summaryrefslogtreecommitdiff
path: root/usr/src/uts
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts')
-rw-r--r--usr/src/uts/common/io/hotplug/pciehpc/pciehpc.c62
-rw-r--r--usr/src/uts/common/os/chip.c9
-rw-r--r--usr/src/uts/common/os/cpu.c7
-rw-r--r--usr/src/uts/common/sys/Makefile3
-rw-r--r--usr/src/uts/common/sys/ddidmareq.h12
-rw-r--r--usr/src/uts/common/sys/fm/io/opl_mc_fm.h69
-rw-r--r--usr/src/uts/common/sys/fm/io/sun4_fire.h41
-rw-r--r--usr/src/uts/sfmmu/ml/sfmmu_asm.s129
-rw-r--r--usr/src/uts/sfmmu/vm/hat_sfmmu.c19
-rw-r--r--usr/src/uts/sfmmu/vm/hat_sfmmu.h18
-rw-r--r--usr/src/uts/sparc/fpu/fpu_simulator.c554
-rw-r--r--usr/src/uts/sparc/os/driver_aliases7
-rw-r--r--usr/src/uts/sparc/os/name_to_major7
-rw-r--r--usr/src/uts/sparc/sys/Makefile8
-rw-r--r--usr/src/uts/sparc/sys/cpu.h11
-rw-r--r--usr/src/uts/sparc/sys/fm/cpu/SPARC64-VI.h118
-rw-r--r--usr/src/uts/sparc/sys/fpu/fpu_simulator.h35
-rw-r--r--usr/src/uts/sun4/io/px/px_debug.c9
-rw-r--r--usr/src/uts/sun4/io/px/px_dma.c15
-rw-r--r--usr/src/uts/sun4/io/px/px_dma.h4
-rw-r--r--usr/src/uts/sun4/io/px/px_fdvma.c2
-rw-r--r--usr/src/uts/sun4/io/px/px_ib.c26
-rw-r--r--usr/src/uts/sun4/io/px/px_ioapi.h17
-rw-r--r--usr/src/uts/sun4/io/px/px_lib.h4
-rw-r--r--usr/src/uts/sun4/io/px/px_mmu.h5
-rw-r--r--usr/src/uts/sun4/io/px/px_space.c76
-rw-r--r--usr/src/uts/sun4/io/px/px_space.h65
-rw-r--r--usr/src/uts/sun4/io/px/px_tools.c12
-rw-r--r--usr/src/uts/sun4/io/px/px_var.h2
-rw-r--r--usr/src/uts/sun4/io/trapstat.c45
-rw-r--r--usr/src/uts/sun4/ml/interrupt.s8
-rw-r--r--usr/src/uts/sun4/os/startup.c12
-rw-r--r--usr/src/uts/sun4/sys/platform_module.h9
-rw-r--r--usr/src/uts/sun4u/Makefile.files5
-rw-r--r--usr/src/uts/sun4u/Makefile.sun4u.shared6
-rw-r--r--usr/src/uts/sun4u/Makefile.workarounds14
-rw-r--r--usr/src/uts/sun4u/cpu/common_asm.s55
-rw-r--r--usr/src/uts/sun4u/cpu/opl_kdi.c156
-rw-r--r--usr/src/uts/sun4u/cpu/opl_olympus.c2237
-rw-r--r--usr/src/uts/sun4u/cpu/opl_olympus_asm.s1991
-rw-r--r--usr/src/uts/sun4u/cpu/opl_olympus_copy.s3716
-rw-r--r--usr/src/uts/sun4u/io/opl_cfg.c2559
-rw-r--r--usr/src/uts/sun4u/io/px/oberon_regs.h183
-rw-r--r--usr/src/uts/sun4u/io/px/px_err.c340
-rw-r--r--usr/src/uts/sun4u/io/px/px_err.h38
-rw-r--r--usr/src/uts/sun4u/io/px/px_err_impl.h19
-rw-r--r--usr/src/uts/sun4u/io/px/px_hlib.c900
-rw-r--r--usr/src/uts/sun4u/io/px/px_lib4u.c354
-rw-r--r--usr/src/uts/sun4u/io/px/px_lib4u.h61
-rw-r--r--usr/src/uts/sun4u/io/todopl.c321
-rw-r--r--usr/src/uts/sun4u/ml/mach_locore.s12
-rw-r--r--usr/src/uts/sun4u/ml/mach_offsets.in8
-rw-r--r--usr/src/uts/sun4u/ml/trap_table.s100
-rw-r--r--usr/src/uts/sun4u/ngdr/io/dr.c87
-rw-r--r--usr/src/uts/sun4u/ngdr/io/dr_cpu.c69
-rw-r--r--usr/src/uts/sun4u/ngdr/io/dr_io.c10
-rw-r--r--usr/src/uts/sun4u/ngdr/io/dr_mem.c156
-rw-r--r--usr/src/uts/sun4u/ngdr/io/dr_util.c209
-rw-r--r--usr/src/uts/sun4u/ngdr/sys/dr.h60
-rw-r--r--usr/src/uts/sun4u/ngdr/sys/dr_util.h36
-rw-r--r--usr/src/uts/sun4u/opl/Makefile147
-rw-r--r--usr/src/uts/sun4u/opl/Makefile.files56
-rw-r--r--usr/src/uts/sun4u/opl/Makefile.opl150
-rw-r--r--usr/src/uts/sun4u/opl/Makefile.rules147
-rw-r--r--usr/src/uts/sun4u/opl/Makefile.targ116
-rw-r--r--usr/src/uts/sun4u/opl/dm2s/Makefile105
-rw-r--r--usr/src/uts/sun4u/opl/dr/Makefile92
-rw-r--r--usr/src/uts/sun4u/opl/drmach/Makefile116
-rw-r--r--usr/src/uts/sun4u/opl/genassym/Makefile97
-rw-r--r--usr/src/uts/sun4u/opl/io/dm2s.c1291
-rw-r--r--usr/src/uts/sun4u/opl/io/dm2s.conf28
-rw-r--r--usr/src/uts/sun4u/opl/io/dr.conf27
-rw-r--r--usr/src/uts/sun4u/opl/io/dr_mem.c2760
-rw-r--r--usr/src/uts/sun4u/opl/io/drmach.c3929
-rw-r--r--usr/src/uts/sun4u/opl/io/mc-opl.c2442
-rw-r--r--usr/src/uts/sun4u/opl/io/mc-opl.conf26
-rw-r--r--usr/src/uts/sun4u/opl/io/oplkmdrv.c1107
-rw-r--r--usr/src/uts/sun4u/opl/io/oplkmdrv.conf28
-rw-r--r--usr/src/uts/sun4u/opl/io/oplmsu/oplmsu.c2471
-rw-r--r--usr/src/uts/sun4u/opl/io/oplmsu/oplmsu_cmn_func.c1845
-rw-r--r--usr/src/uts/sun4u/opl/io/oplmsu/oplmsu_ioctl_lrp.c780
-rw-r--r--usr/src/uts/sun4u/opl/io/oplmsu/oplmsu_ioctl_uwp.c471
-rw-r--r--usr/src/uts/sun4u/opl/io/oplpanel/oplpanel.c414
-rw-r--r--usr/src/uts/sun4u/opl/io/oplpanel/oplpanel.conf29
-rw-r--r--usr/src/uts/sun4u/opl/io/options.conf42
-rw-r--r--usr/src/uts/sun4u/opl/io/pcicmu/pcicmu.c2202
-rw-r--r--usr/src/uts/sun4u/opl/io/pcicmu/pcmu_cb.c288
-rw-r--r--usr/src/uts/sun4u/opl/io/pcicmu/pcmu_counters.c253
-rw-r--r--usr/src/uts/sun4u/opl/io/pcicmu/pcmu_ecc.c469
-rw-r--r--usr/src/uts/sun4u/opl/io/pcicmu/pcmu_ib.c747
-rw-r--r--usr/src/uts/sun4u/opl/io/pcicmu/pcmu_intr.c340
-rw-r--r--usr/src/uts/sun4u/opl/io/pcicmu/pcmu_pbm.c290
-rw-r--r--usr/src/uts/sun4u/opl/io/pcicmu/pcmu_util.c707
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scf_os_interface.c143
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scfconf.c1124
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scfd.conf52
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scfdebug.c2028
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scfdscp.c4450
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scfhandler.c3458
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scfinit.c301
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scfiomp.c2286
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scfkstat.c219
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scfops.c3470
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scfostoescf.c307
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scfparam.c147
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scfreg.c1977
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scfsnap.c766
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scftimer.c603
-rw-r--r--usr/src/uts/sun4u/opl/io/scfd/scftrace.c152
-rw-r--r--usr/src/uts/sun4u/opl/mc-opl/Makefile98
-rw-r--r--usr/src/uts/sun4u/opl/ml/drmach.il.cpp198
-rw-r--r--usr/src/uts/sun4u/opl/ml/drmach_asm.s507
-rw-r--r--usr/src/uts/sun4u/opl/ml/drmach_offsets.in60
-rw-r--r--usr/src/uts/sun4u/opl/olympus_c/Makefile116
-rw-r--r--usr/src/uts/sun4u/opl/oplkmdrv/Makefile100
-rw-r--r--usr/src/uts/sun4u/opl/oplmsu/Makefile88
-rw-r--r--usr/src/uts/sun4u/opl/oplpanel/Makefile87
-rw-r--r--usr/src/uts/sun4u/opl/options/Makefile86
-rw-r--r--usr/src/uts/sun4u/opl/os/opl.c855
-rw-r--r--usr/src/uts/sun4u/opl/pcicmu/Makefile93
-rw-r--r--usr/src/uts/sun4u/opl/platmod/Makefile97
-rw-r--r--usr/src/uts/sun4u/opl/scfd/Makefile88
-rw-r--r--usr/src/uts/sun4u/opl/sys/Makefile124
-rw-r--r--usr/src/uts/sun4u/opl/sys/dm2s.h110
-rw-r--r--usr/src/uts/sun4u/opl/sys/drmach.h355
-rw-r--r--usr/src/uts/sun4u/opl/sys/fiomp.h240
-rw-r--r--usr/src/uts/sun4u/opl/sys/mc-opl.h342
-rw-r--r--usr/src/uts/sun4u/opl/sys/opl_hwdesc.h511
-rw-r--r--usr/src/uts/sun4u/opl/sys/oplkm.h103
-rw-r--r--usr/src/uts/sun4u/opl/sys/oplkm_msg.h91
-rw-r--r--usr/src/uts/sun4u/opl/sys/oplmsu/oplmsu.h358
-rw-r--r--usr/src/uts/sun4u/opl/sys/oplmsu/oplmsu_proto.h166
-rw-r--r--usr/src/uts/sun4u/opl/sys/pcicmu/pcicmu.h450
-rw-r--r--usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_cb.h81
-rw-r--r--usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_counters.h78
-rw-r--r--usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_ecc.h75
-rw-r--r--usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_err.h135
-rw-r--r--usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_ib.h173
-rw-r--r--usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_pbm.h99
-rw-r--r--usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_types.h54
-rw-r--r--usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_util.h123
-rw-r--r--usr/src/uts/sun4u/opl/sys/scfd/iomp_drv.h134
-rw-r--r--usr/src/uts/sun4u/opl/sys/scfd/opcio.h326
-rw-r--r--usr/src/uts/sun4u/opl/sys/scfd/scfdebug.h308
-rw-r--r--usr/src/uts/sun4u/opl/sys/scfd/scfdscp.h468
-rw-r--r--usr/src/uts/sun4u/opl/sys/scfd/scfdscpif.h104
-rw-r--r--usr/src/uts/sun4u/opl/sys/scfd/scfio32.h68
-rw-r--r--usr/src/uts/sun4u/opl/sys/scfd/scfkstat.h102
-rw-r--r--usr/src/uts/sun4u/opl/sys/scfd/scfostoescf.h54
-rw-r--r--usr/src/uts/sun4u/opl/sys/scfd/scfparam.h158
-rw-r--r--usr/src/uts/sun4u/opl/sys/scfd/scfreg.h581
-rw-r--r--usr/src/uts/sun4u/opl/sys/scfd/scfsnap.h111
-rw-r--r--usr/src/uts/sun4u/opl/sys/scfd/scfstate.h197
-rw-r--r--usr/src/uts/sun4u/opl/sys/scfd/scfsys.h862
-rw-r--r--usr/src/uts/sun4u/opl/sys/scfd/scftimer.h123
-rw-r--r--usr/src/uts/sun4u/opl/sys/scfd/scftrace.h215
-rw-r--r--usr/src/uts/sun4u/opl/unix/Makefile191
-rw-r--r--usr/src/uts/sun4u/opl_cfg/Makefile103
-rw-r--r--usr/src/uts/sun4u/opl_pcbe/Makefile84
-rw-r--r--usr/src/uts/sun4u/os/cmp.c32
-rw-r--r--usr/src/uts/sun4u/os/cpr_impl.c18
-rw-r--r--usr/src/uts/sun4u/os/fillsysinfo.c108
-rw-r--r--usr/src/uts/sun4u/os/mach_startup.c207
-rw-r--r--usr/src/uts/sun4u/pcbe/opl_pcbe.c619
-rw-r--r--usr/src/uts/sun4u/serengeti/io/sbdp_mem.c4
-rw-r--r--usr/src/uts/sun4u/serengeti/sys/sbdp_mem.h3
-rw-r--r--usr/src/uts/sun4u/starcat/io/drmach.c72
-rw-r--r--usr/src/uts/sun4u/starcat/io/fcgp2.c14
-rw-r--r--usr/src/uts/sun4u/starcat/os/starcat.c9
-rw-r--r--usr/src/uts/sun4u/starfire/io/drmach.c68
-rw-r--r--usr/src/uts/sun4u/sys/Makefile3
-rw-r--r--usr/src/uts/sun4u/sys/cheetahregs.h66
-rw-r--r--usr/src/uts/sun4u/sys/cpu_impl.h110
-rw-r--r--usr/src/uts/sun4u/sys/cpu_module.h14
-rw-r--r--usr/src/uts/sun4u/sys/machasi.h9
-rw-r--r--usr/src/uts/sun4u/sys/machclock.h9
-rw-r--r--usr/src/uts/sun4u/sys/machparam.h7
-rw-r--r--usr/src/uts/sun4u/sys/machthread.h15
-rw-r--r--usr/src/uts/sun4u/sys/opl.h80
-rw-r--r--usr/src/uts/sun4u/sys/opl_cfg.h306
-rw-r--r--usr/src/uts/sun4u/sys/opl_module.h159
-rw-r--r--usr/src/uts/sun4u/sys/opl_olympus_regs.h311
-rw-r--r--usr/src/uts/sun4u/sys/prom_plat.h16
-rw-r--r--usr/src/uts/sun4u/sys/pte.h39
-rw-r--r--usr/src/uts/sun4u/sys/sbd_ioctl.h28
-rw-r--r--usr/src/uts/sun4u/todopl/Makefile88
-rw-r--r--usr/src/uts/sun4u/vm/mach_sfmmu.c11
-rw-r--r--usr/src/uts/sun4u/vm/mach_sfmmu.h302
-rw-r--r--usr/src/uts/sun4u/vm/mach_sfmmu_asm.s46
-rw-r--r--usr/src/uts/sun4v/Makefile.workarounds1
-rw-r--r--usr/src/uts/sun4v/io/px/px_lib4v.c14
191 files changed, 72331 insertions, 1404 deletions
diff --git a/usr/src/uts/common/io/hotplug/pciehpc/pciehpc.c b/usr/src/uts/common/io/hotplug/pciehpc/pciehpc.c
index 41435779fa..737a83ea7c 100644
--- a/usr/src/uts/common/io/hotplug/pciehpc/pciehpc.c
+++ b/usr/src/uts/common/io/hotplug/pciehpc/pciehpc.c
@@ -746,9 +746,14 @@ pciehpc_enable_intr(pciehpc_t *ctrl_p)
reg = pciehpc_reg_get16(ctrl_p,
ctrl_p->pcie_caps_reg_offset + PCIE_SLOTCTL);
- /* enable all interrupts */
- pciehpc_reg_put16(ctrl_p, ctrl_p->pcie_caps_reg_offset +
- PCIE_SLOTCTL, reg | SLOTCTL_SUPPORTED_INTRS_MASK);
+ /* enable interrupts */
+ if (ctrl_p->slot.slot_state == HPC_SLOT_CONNECTED)
+ pciehpc_reg_put16(ctrl_p, ctrl_p->pcie_caps_reg_offset +
+ PCIE_SLOTCTL, reg | SLOTCTL_SUPPORTED_INTRS_MASK);
+ else
+ pciehpc_reg_put16(ctrl_p, ctrl_p->pcie_caps_reg_offset +
+ PCIE_SLOTCTL, reg | (SLOTCTL_SUPPORTED_INTRS_MASK &
+ ~PCIE_SLOTCTL_PWR_FAULT_EN));
return (DDI_SUCCESS);
}
@@ -1075,9 +1080,45 @@ pciehpc_slot_connect(caddr_t ops_arg, hpc_slot_t slot_hdl,
control &= ~PCIE_SLOTCTL_PWR_CONTROL;
pciehpc_issue_hpc_command(ctrl_p, control);
- /* NOTE - any check to make sure power is really turned ON? */
+ /* check power is really turned ON? */
+ control = pciehpc_reg_get16(ctrl_p,
+ ctrl_p->pcie_caps_reg_offset + PCIE_SLOTCTL);
+ if (control & PCIE_SLOTCTL_PWR_CONTROL) {
+ PCIEHPC_DEBUG((CE_NOTE,
+ "slot %d fails to turn on power on connect\n",
+ ctrl_p->slot.slotNum));
+
+ goto cleanup1;
+ }
+
+ /* check power-fault on the slot? */
+ status = pciehpc_reg_get16(ctrl_p,
+ ctrl_p->pcie_caps_reg_offset + PCIE_SLOTSTS);
+ if (status & PCIE_SLOTSTS_PWR_FAULT_DETECTED) {
+ PCIEHPC_DEBUG((CE_NOTE,
+ "slot %d detects power fault on connect\n",
+ ctrl_p->slot.slotNum));
- /* NOTE - what about power-fault on the slot? */
+ /* set power control to OFF */
+ control = pciehpc_reg_get16(ctrl_p,
+ ctrl_p->pcie_caps_reg_offset + PCIE_SLOTCTL);
+ control |= PCIE_SLOTCTL_PWR_CONTROL;
+ pciehpc_issue_hpc_command(ctrl_p, control);
+
+ /* clear the status */
+ pciehpc_reg_put16(ctrl_p,
+ ctrl_p->pcie_caps_reg_offset + PCIE_SLOTSTS, status);
+ goto cleanup1;
+ }
+
+ /* enable all interrupts */
+ pciehpc_reg_put16(ctrl_p,
+ ctrl_p->pcie_caps_reg_offset + PCIE_SLOTSTS, status);
+
+ control = pciehpc_reg_get16(ctrl_p,
+ ctrl_p->pcie_caps_reg_offset + PCIE_SLOTCTL);
+ pciehpc_reg_put16(ctrl_p, ctrl_p->pcie_caps_reg_offset +
+ PCIE_SLOTCTL, control | SLOTCTL_SUPPORTED_INTRS_MASK);
/* 3. Set power LED to be ON */
pciehpc_set_led_state(ctrl_p, HPC_POWER_LED, HPC_LED_ON);
@@ -1093,6 +1134,10 @@ pciehpc_slot_connect(caddr_t ops_arg, hpc_slot_t slot_hdl,
mutex_exit(&ctrl_p->pciehpc_mutex);
return (HPC_SUCCESS);
+cleanup1:
+ /* set power led to OFF */
+ pciehpc_set_led_state(ctrl_p, HPC_POWER_LED, HPC_LED_OFF);
+
cleanup:
mutex_exit(&ctrl_p->pciehpc_mutex);
return (HPC_ERR_FAILED);
@@ -1177,6 +1222,13 @@ pciehpc_slot_disconnect(caddr_t ops_arg, hpc_slot_t slot_hdl,
pciehpc_set_led_state(ctrl_p, HPC_POWER_LED, HPC_LED_OFF);
pciehpc_set_led_state(ctrl_p, HPC_ATTN_LED, HPC_LED_OFF);
+ /* disable interrupt of power fault detection */
+ control = pciehpc_reg_get16(ctrl_p,
+ ctrl_p->pcie_caps_reg_offset + PCIE_SLOTCTL);
+ pciehpc_reg_put16(ctrl_p, ctrl_p->pcie_caps_reg_offset +
+ PCIE_SLOTCTL, control | (SLOTCTL_SUPPORTED_INTRS_MASK &
+ ~PCIE_SLOTCTL_PWR_FAULT_EN));
+
ctrl_p->slot.slot_state = HPC_SLOT_DISCONNECTED;
mutex_exit(&ctrl_p->pciehpc_mutex);
return (HPC_SUCCESS);
diff --git a/usr/src/uts/common/os/chip.c b/usr/src/uts/common/os/chip.c
index 8b0bfd765d..6a74cf4a91 100644
--- a/usr/src/uts/common/os/chip.c
+++ b/usr/src/uts/common/os/chip.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -230,11 +229,11 @@ chip_cpu_init(cpu_t *cp)
* On sun4v platforms, the chip infrastructure is currently being
* leveraged to implement core level load balancing.
*/
-#ifdef sun4v
+#ifdef DO_CORELEVEL_LOADBAL
cid = chip_plat_get_coreid(cp);
#else
cid = chip_plat_get_chipid(cp);
-#endif /* sun4v */
+#endif /* DO_CORELEVEL_LOADBAL */
chp = chip_find(cid);
if (chp == NULL) {
diff --git a/usr/src/uts/common/os/cpu.c b/usr/src/uts/common/os/cpu.c
index ea0f219b9e..cfb190b1c5 100644
--- a/usr/src/uts/common/os/cpu.c
+++ b/usr/src/uts/common/os/cpu.c
@@ -2593,8 +2593,13 @@ cpuset_find(cpuset_t *s)
/*
* Find a cpu in the cpuset
*/
- for (i = 0; (i < CPUSET_WORDS && cpu == (uint_t)-1); i++)
+ for (i = 0; i < CPUSET_WORDS; i++) {
cpu = (uint_t)(lowbit(s->cpub[i]) - 1);
+ if (cpu != (uint_t)-1) {
+ cpu += i * BT_NBIPUL;
+ break;
+ }
+ }
return (cpu);
}
diff --git a/usr/src/uts/common/sys/Makefile b/usr/src/uts/common/sys/Makefile
index 94069abd27..5c6c23acab 100644
--- a/usr/src/uts/common/sys/Makefile
+++ b/usr/src/uts/common/sys/Makefile
@@ -663,7 +663,8 @@ FMFSHDRS= \
FMIOHDRS= \
ddi.h \
pci.h \
- sun4upci.h
+ sun4upci.h \
+ opl_mc_fm.h
FSHDRS= \
autofs.h \
diff --git a/usr/src/uts/common/sys/ddidmareq.h b/usr/src/uts/common/sys/ddidmareq.h
index af6d7d69f1..f770413ac4 100644
--- a/usr/src/uts/common/sys/ddidmareq.h
+++ b/usr/src/uts/common/sys/ddidmareq.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -388,6 +387,11 @@ typedef struct ddi_dma_lim {
*/
#define DDI_DMA_FLAGERR 0x200
+/*
+ * Enable relaxed ordering
+ */
+#define DDI_DMA_RELAXED_ORDERING 0x400
+
#define DMA_ATTR_V0 0
#define DMA_ATTR_VERSION DMA_ATTR_V0
diff --git a/usr/src/uts/common/sys/fm/io/opl_mc_fm.h b/usr/src/uts/common/sys/fm/io/opl_mc_fm.h
new file mode 100644
index 0000000000..f9f8436d7d
--- /dev/null
+++ b/usr/src/uts/common/sys/fm/io/opl_mc_fm.h
@@ -0,0 +1,69 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_FM_IO_OPL_MC_FM_H
+#define _SYS_FM_IO_OPL_MC_FM_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* mc-opl ereport components */
+#define MC_OPL_ERROR_CLASS "asic.mac"
+#define MC_OPL_PTRL_SUBCLASS "ptrl"
+#define MC_OPL_MI_SUBCLASS "mi"
+
+/*
+ * ereport definition
+ */
+#define MC_OPL_UE "ue"
+#define MC_OPL_CE "ce"
+#define MC_OPL_CMPE "cmpe"
+#define MC_OPL_MUE "mue"
+#define MC_OPL_SUE "sue"
+
+/* mc-opl payload name fields */
+#define MC_OPL_BOARD "board"
+#define MC_OPL_BANK "bank"
+#define MC_OPL_STATUS "status"
+#define MC_OPL_ERR_ADD "err-add"
+#define MC_OPL_ERR_LOG "err-log"
+#define MC_OPL_ERR_SYND "syndrome"
+#define MC_OPL_ERR_DIMMSLOT "dimm-slot"
+#define MC_OPL_ERR_DRAM "dram-place"
+#define MC_OPL_PA "pa"
+#define MC_OPL_FLT_TYPE "flt-type"
+
+#define MC_OPL_RESOURCE "resource"
+
+#define MC_OPL_NO_UNUM ""
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_FM_IO_OPL_MC_FM_H */
diff --git a/usr/src/uts/common/sys/fm/io/sun4_fire.h b/usr/src/uts/common/sys/fm/io/sun4_fire.h
index 708d5ba825..4a7a8fa7eb 100644
--- a/usr/src/uts/common/sys/fm/io/sun4_fire.h
+++ b/usr/src/uts/common/sys/fm/io/sun4_fire.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -39,6 +38,7 @@ extern "C" {
*/
#define PCIEX_FIRE "fire"
+#define PCIEX_OBERON "oberon"
/* FIRE's JBUS ereport classes */
#define FIRE_JBC_MB_PEA "jbc.mb_pea"
@@ -71,6 +71,18 @@ extern "C" {
#define FIRE_JBC_UNSOL_INTR "jbc.unsol_intr"
#define FIRE_JBC_EBUS_TO "jbc.ebus_to"
+/* OBERON's UBC ereport classes */
+#define FIRE_UBC_DMARDUEA "ubc.dmarduea"
+#define FIRE_UBC_DMAWTUEA "ubc.dmawtuea"
+#define FIRE_UBC_MEMRDAXA "ubc.memrdaxa"
+#define FIRE_UBC_MEMWTAXA "ubc.memwtaxa"
+#define FIRE_UBC_DMARDUEB "ubc.dmardueb"
+#define FIRE_UBC_DMAWTUEB "ubc.dmawtueb"
+#define FIRE_UBC_MEMRDAXB "ubc.memrdaxb"
+#define FIRE_UBC_MEMWTAXB "ubc.memwtaxb"
+#define FIRE_UBC_PIOWTUE "ubc.piowtue"
+#define FIRE_UBC_PIOWBEUE "ubc.piowbeue"
+#define FIRE_UBC_PIORBEUE "ubc.piorbeue"
/* FIRE's DMC ereport classes */
#define FIRE_DMC_MSI_NOT_EN "dmc.msi_not_en"
@@ -95,6 +107,7 @@ extern "C" {
#define FIRE_DMC_TBW_ERR "dmc.tbw_err"
#define FIRE_DMC_TBW_DPE "dmc.tbw_dpe"
#define FIRE_DMC_TTC_CAE "dmc.ttc_cae"
+#define FIRE_DMC_TTC_DUE "dmc.ttc_due"
/* FIRE's PEC ereport classes */
@@ -121,7 +134,7 @@ extern "C" {
#define FIRE_PEC_UC "pec.uc"
#define FIRE_PEC_CTO "pec.cto"
#define FIRE_PEC_MFP "pec.mfp"
-#define FIRE_PEC_PP "pec.pp"
+#define FIRE_PEC_PP "pec.pois"
#define FIRE_PEC_FCP "pec.fcp"
#define FIRE_PEC_DLP "pec.dlp"
#define FIRE_PEC_TE "pec.te"
@@ -131,6 +144,13 @@ extern "C" {
#define FIRE_PEC_BDP "pec.bdp"
#define FIRE_PEC_BTP "pec.btp"
#define FIRE_PEC_RE "pec.re"
+#define FIRE_PEC_IHB_UE "pec.ihb_ue"
+#define FIRE_PEC_ECRC "pec.ecrc"
+#define FIRE_PEC_EIUE "pec.eiue"
+#define FIRE_PEC_ERBUE "pec.erbue"
+#define FIRE_PEC_EHBUE "pec.ehbue"
+#define FIRE_PEC_EDBUE "pec.edbue"
+#define FIRE_PEC_TLUEITMO "pec.tlueitmo"
/* Primary error */
#define FIRE_PRIMARY "primary"
@@ -190,6 +210,17 @@ extern "C" {
#define FIRE_JBC_JOTEL2 "jbc-jotel2"
#define FIRE_JBC_MTEL "jbc-mtel"
+/* UBC ereport payload */
+#define OBERON_UBC_ELE "ubc-ele"
+#define OBERON_UBC_IE "ubc-ie"
+#define OBERON_UBC_IS "ubc-is"
+#define OBERON_UBC_ESS "ubc-ess"
+#define OBERON_UBC_MUE "ubc-mue"
+#define OBERON_UBC_UNUM "ubc-unum"
+#define OBERON_UBC_DID "ubc-did"
+#define OBERON_UBC_CPUV "ubc-cpuv"
+#define OBERON_UBC_RESOURCE "resource"
+
#ifdef __cplusplus
}
#endif
diff --git a/usr/src/uts/sfmmu/ml/sfmmu_asm.s b/usr/src/uts/sfmmu/ml/sfmmu_asm.s
index 13992d870a..4ed8cd2e29 100644
--- a/usr/src/uts/sfmmu/ml/sfmmu_asm.s
+++ b/usr/src/uts/sfmmu/ml/sfmmu_asm.s
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -279,6 +278,8 @@ label:
TSB_INSERT_UNLOCK_ENTRY(tsbep, tte, tagtarget, tmp1) ;\
label:
+#endif /* UTSB_PHYS */
+
/*
* Load a 32M/256M Panther TSB entry into the TSB at TL > 0,
* for ITLB synthesis.
@@ -356,8 +357,6 @@ label: ;\
sllx tmp, TTE_SZ_SHFT, tmp ;\
or tte, tmp, tte
-#endif /* UTSB_PHYS */
-
/*
* Load an entry into the TSB at TL=0.
*
@@ -972,10 +971,10 @@ sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
SET_SIZE(sfmmu_kpm_patch_tsbm)
ENTRY_NP(sfmmu_patch_utsb)
-#ifdef sun4v
+#ifdef UTSB_PHYS
retl
nop
-#else /* sun4v */
+#else /* UTSB_PHYS */
/*
* We need to hot patch utsb_vabase and utsb4m_vabase
*/
@@ -1044,7 +1043,7 @@ sfmmu_kpm_unload_tsb(caddr_t addr, int vpshift)
ret
restore
-#endif /* sun4v */
+#endif /* UTSB_PHYS */
SET_SIZE(sfmmu_patch_utsb)
@@ -1420,9 +1419,12 @@ hblk_add_panic2:
mov %o2, %g3
#ifndef sun4v
GET_CPU_IMPL(%g2)
- cmp %g2, CHEETAH_IMPL
- bge %icc, hblk_hash_rm_1
- and %o4, %g4, %g2
+ cmp %g2, CHEETAH_IMPL
+ bge,a,pt %icc, hblk_hash_rm_1
+ and %o4, %g4, %g2
+ cmp %g2, SPITFIRE_IMPL
+ blt %icc, hblk_hash_rm_2 /* no flushing needed for OPL */
+ and %o4, %g4, %g2
stxa %g0, [%g2]ASI_DC_TAG /* flush prev pa from dcache */
add %o4, HMEBLK_NEXT, %o4
and %o4, %g4, %g2
@@ -2111,7 +2113,7 @@ label/**/4: ;\
TTE_SUSPEND_INT_SHIFT(hmentoff) ;\
btst tte, hmentoff ;\
bz,pt %xcc, foundlabel ;\
- nop ;\
+ nop ;\
;\
/* ;\
* Mapping is suspended, so goto suspend label. ;\
@@ -2176,7 +2178,15 @@ sfmmu_kprot_patch_ktsb4m_szcode:
/* %g3 = second TSB entry ptr now, %g2 preserved */
#else /* sun4v */
+#ifdef UTSB_PHYS
+ /* g1 = first TSB entry ptr */
+ GET_2ND_TSBREG(%g3)
+ brlz,a,pt %g3, 9f /* check for 2nd TSB */
+ mov %g0, %g3 /* clear second tsbe ptr */
+ GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
+ /* %g3 = second TSB entry ptr now, %g2 preserved */
+#else /* UTSB_PHYS */
brgez,pt %g1, 9f /* check for 2nd TSB */
mov %g0, %g3 /* clear second tsbe ptr */
@@ -2185,8 +2195,8 @@ sfmmu_kprot_patch_ktsb4m_szcode:
/* %g3 = second TSB entry ptr now, %g7 clobbered */
mov %g1, %g7
GET_1ST_TSBE_PTR(%g7, %g1, %g5, sfmmu_uprot)
-
-#endif /* sun4v */
+#endif /* UTSB_PHYS */
+#endif /* sun4v */
9:
CPU_TSBMISS_AREA(%g6, %g7)
HAT_PERCPU_STAT16(%g6, TSBMISS_UPROTS, %g7)
@@ -2375,7 +2385,6 @@ dktsb4m_kpmcheck:
.align 64
ALTENTRY(sfmmu_uitlb_fastpath)
- SETUP_UTSB_ATOMIC_ASI(%g4, %g5)
PROBE_1ST_ITSB(%g1, %g7, uitlb_fast_8k_probefail)
/* g4 - g5 = clobbered by PROBE_1ST_ITSB */
ba,pn %xcc, sfmmu_tsb_miss_tt
@@ -2395,16 +2404,13 @@ dktsb4m_kpmcheck:
.align 64
ALTENTRY(sfmmu_udtlb_fastpath)
- SETUP_UTSB_ATOMIC_ASI(%g4, %g6)
PROBE_1ST_DTSB(%g1, %g7, udtlb_fast_8k_probefail)
/* g4 - g5 = clobbered by PROBE_1ST_DTSB */
ba,pn %xcc, sfmmu_tsb_miss_tt
mov %g0, %g3
-#endif /* sun4v */
-
/*
- * User instruction miss w/ multiple TSBs.
+ * User instruction miss w/ multiple TSBs (sun4v).
* The first probe covers 8K, 64K, and 512K page sizes,
* because 64K and 512K mappings are replicated off 8K
* pointer. Second probe covers 4M page size only.
@@ -2422,10 +2428,7 @@ dktsb4m_kpmcheck:
.align 64
ALTENTRY(sfmmu_uitlb_slowpath)
-#ifdef sun4v
- SETUP_UTSB_ATOMIC_ASI(%g4, %g5)
GET_1ST_TSBE_PTR(%g2, %g1, %g4, %g5)
-
PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
/* g4 - g5 = clobbered here */
@@ -2434,17 +2437,47 @@ dktsb4m_kpmcheck:
srlx %g2, TAG_VALO_SHIFT, %g7
PROBE_2ND_ITSB(%g3, %g7)
/* NOT REACHED */
+
#else /* sun4v */
+
+ /*
+ * User instruction miss w/ multiple TSBs (sun4u).
+ * The first probe covers 8K, 64K, and 512K page sizes,
+ * because 64K and 512K mappings are replicated off 8K
+ * pointer. Second probe covers 4M page size only.
+ *
+ * Just like sfmmu_udtlb_slowpath, except:
+ * o Uses ASI_ITLB_IN
+ * o checks for execute permission
+ * o No ISM prediction.
+ *
+ * g1 = tsb8k pointer register
+ * g2 = tag access register
+ * g3 = 2nd tsbreg if defined UTSB_PHYS, else scratch
+ * g4 - g6 = scratch registers
+ * g7 = TSB tag to match
+ */
+ .align 64
+ ALTENTRY(sfmmu_uitlb_slowpath)
+
+#ifdef UTSB_PHYS
+ /*
+ * g1 = 1st TSB entry pointer
+ * g3 = 2nd TSB base register
+ * Need 2nd TSB entry pointer for 2nd probe.
+ */
+ PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
+
+ GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
+#else /* UTSB_PHYS */
mov %g1, %g3 /* save tsb8k reg in %g3 */
- SETUP_UTSB_ATOMIC_ASI(%g4, %g5)
GET_1ST_TSBE_PTR(%g3, %g1, %g5, sfmmu_uitlb)
-
PROBE_1ST_ITSB(%g1, %g7, uitlb_8k_probefail)
- /* g4 - g5 = clobbered here */
mov %g2, %g6 /* GET_2ND_TSBE_PTR clobbers tagacc */
mov %g3, %g7 /* copy tsb8k reg in %g7 */
GET_2ND_TSBE_PTR(%g6, %g7, %g3, %g4, %g5, sfmmu_uitlb)
+#endif /* UTSB_PHYS */
/* g1 = first TSB pointer, g3 = second TSB pointer */
srlx %g2, TAG_VALO_SHIFT, %g7
PROBE_2ND_ITSB(%g3, %g7, isynth)
@@ -2462,14 +2495,13 @@ dktsb4m_kpmcheck:
*
* g1 = tsb8k pointer register
* g2 = tag access register
- * g3 - g6 = scratch registers
+ * g3 = 2nd tsbreg if defined UTSB_PHYS, else scratch
+ * g4 - g6 = scratch registers
* g7 = TSB tag to match
*/
.align 64
ALTENTRY(sfmmu_udtlb_slowpath)
- SETUP_UTSB_ATOMIC_ASI(%g4, %g6)
-
/*
* Check for ISM. If it exists, look for 4M mappings in the second TSB
* first, then probe for other mappings in the first TSB if that fails.
@@ -2499,8 +2531,10 @@ udtlb_miss_probefirst:
brgz,pn %g6, sfmmu_tsb_miss_tt
nop
#else /* sun4v */
+#ifndef UTSB_PHYS
mov %g1, %g4
GET_1ST_TSBE_PTR(%g4, %g1, %g5, sfmmu_udtlb)
+#endif UTSB_PHYS
PROBE_1ST_DTSB(%g1, %g7, udtlb_first_probefail)
/*
@@ -2511,7 +2545,9 @@ udtlb_miss_probefirst:
*/
brgz,pn %g6, sfmmu_tsb_miss_tt
nop
+#ifndef UTSB_PHYS
ldxa [%g0]ASI_DMMU_TSB_8K, %g3
+#endif UTSB_PHYS
/* fall through in 8K->4M probe order */
#endif /* sun4v */
@@ -2526,16 +2562,21 @@ udtlb_miss_probesecond:
*/
#ifdef sun4v
/* GET_2ND_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2) */
- /* tagacc (%g2) not destroyed */
GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
/* %g2 is okay, no need to reload, %g3 = second tsbe ptr */
-#else
+#else /* sun4v */
+#ifdef UTSB_PHYS
+ GET_2ND_TSBREG(%g3)
+ GET_2ND_TSBE_PTR(%g2, %g3, %g4, %g5)
+ /* tagacc (%g2) is okay, no need to reload, %g3 = second tsbe ptr */
+#else /* UTSB_PHYS */
mov %g3, %g7
GET_2ND_TSBE_PTR(%g2, %g7, %g3, %g4, %g5, sfmmu_udtlb)
/* %g2 clobbered, %g3 =second tsbe ptr */
mov MMU_TAG_ACCESS, %g2
ldxa [%g2]ASI_DMMU, %g2
-#endif
+#endif /* UTSB_PHYS */
+#endif /* sun4v */
srlx %g2, TAG_VALO_SHIFT, %g7
PROBE_2ND_DTSB(%g3, %g7, udtlb_4m_probefail)
@@ -2670,18 +2711,22 @@ tsb_4M:
tsb_32M:
#ifndef sun4v
GET_CPU_IMPL(%g5)
+ cmp %g5, OLYMPUS_C_IMPL
+ be,pn %xcc, 0f
+ nop
cmp %g5, PANTHER_IMPL
bne,pt %xcc, tsb_pagefault
nop
#endif
+0:
ldn [%g6 + (TSBMISS_SCRATCH + TSB_TAGACC)], %g3
sllx %g3, TAGACC_CTX_LSHIFT, %g5
#ifdef sun4v
- brz,pn %g5, 6f
-#else
+ brz,pn %g5, 6f
+#else
brz,pn %g5, tsb_pagefault
-#endif
+#endif
lduh [%g6 + TSBMISS_HATFLAGS], %g4
and %g4, HAT_32M_FLAG, %g5
brz,pn %g5, tsb_256M
@@ -2813,10 +2858,10 @@ tsb_user:
tsb_user8k:
ldn [%g6 + TSBMISS_TSBPTR], %g1 ! g1 = first TSB ptr
-#ifndef sun4v
- mov ASI_N, %g7 ! user TSBs always accessed by VA
+#ifndef UTSB_PHYS
+ mov ASI_N, %g7 ! user TSBs accessed by VA
mov %g7, %asi
-#endif /* sun4v */
+#endif /* UTSB_PHYS */
TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 5)
@@ -2843,10 +2888,10 @@ tsb_user4m:
brz,pn %g1, 5f /* Check to see if we have 2nd TSB programmed */
nop
-#ifndef sun4v
- mov ASI_N, %g7 ! user TSBs always accessed by VA
- mov %g7, %asi
-#endif
+#ifndef UTSB_PHYS
+ mov ASI_N, %g7 ! user TSBs accessed by VA
+ mov %g7, %asi
+#endif /* UTSB_PHYS */
TSB_UPDATE_TL(%g1, %g3, %g2, %g4, %g7, %g6, 6)
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.c b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
index 93bf6f4a6b..4e507ef2a3 100644
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.c
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.c
@@ -1191,7 +1191,7 @@ hat_init(void)
((struct hme_blk *)hblk_reserve)->hblk_nextpa =
va_to_pa((caddr_t)hblk_reserve);
-#ifndef sun4v
+#ifndef UTSB_PHYS
/*
* Reserve some kernel virtual address space for the locked TTEs
* that allow us to probe the TSB from TL>0.
@@ -2065,7 +2065,7 @@ sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
* Construct a tte for a page:
*
* tte_valid = 1
- * tte_size2 = size & TTE_SZ2_BITS (Panther-only)
+ * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only)
* tte_size = size
* tte_nfo = attr & HAT_NOFAULT
* tte_ie = attr & HAT_STRUCTURE_LE
@@ -12190,7 +12190,7 @@ tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU];
void
chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
{
- uint_t i, j, k;
+ pfn_t i, j, k;
int cpuid = CPU->cpu_id;
gorig[cpuid] = orig_old;
@@ -12208,14 +12208,12 @@ chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
k = TTE_TO_TTEPFN(new);
if (i != j) {
/* remap error? */
- panic("chk_tte: bad pfn, 0x%x, 0x%x",
- i, j);
+ panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j);
}
if (i != k) {
/* remap error? */
- panic("chk_tte: bad pfn2, 0x%x, 0x%x",
- i, k);
+ panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k);
}
} else {
if (TTE_IS_VALID(new)) {
@@ -12225,8 +12223,7 @@ chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
i = TTE_TO_TTEPFN(orig_old);
k = TTE_TO_TTEPFN(new);
if (i != k) {
- panic("chk_tte: bad pfn3, 0x%x, 0x%x",
- i, k);
+ panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k);
}
}
} else {
@@ -12235,8 +12232,8 @@ chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
if (TTE_IS_VALID(new)) {
k = TTE_TO_TTEPFN(new);
if (j != k) {
- panic("chk_tte: bad pfn4, 0x%x, 0x%x",
- j, k);
+ panic("chk_tte: bad pfn4, 0x%lx, 0x%lx",
+ j, k);
}
} else {
panic("chk_tte: why here?");
diff --git a/usr/src/uts/sfmmu/vm/hat_sfmmu.h b/usr/src/uts/sfmmu/vm/hat_sfmmu.h
index 5a81b63090..c90663408a 100644
--- a/usr/src/uts/sfmmu/vm/hat_sfmmu.h
+++ b/usr/src/uts/sfmmu/vm/hat_sfmmu.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -1280,6 +1279,13 @@ extern uint_t tsb_slab_pamask;
* may be up to 4M in size. For now, only hardware supported TSB sizes
* are supported, though the slabs are usually 4M in size.
*
+ * sun4u platforms that define UTSB_PHYS use physical addressing to access
+ * the user TSBs at TL>0. The first user TSB base is in the MMU I/D TSB Base
+ * registers. The second TSB base uses a dedicated scratchpad register which
+ * requires a definition of SCRATCHPAD_UTSBREG in mach_sfmmu.h. The layout for
+ * both registers is equivalent to sun4v below, except the TSB PA range is
+ * [46..13] for sun4u.
+ *
* sun4v platforms
* ---------------
* On sun4v platforms, we use two dedicated scratchpad registers as pseudo
@@ -1516,9 +1522,9 @@ extern void sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *,
* functions exported to machine dependent VM code
*/
extern void sfmmu_patch_ktsb(void);
-#ifndef sun4v
+#ifndef UTSB_PHYS
extern void sfmmu_patch_utsb(void);
-#endif /* sun4v */
+#endif /* UTSB_PHYS */
extern pfn_t sfmmu_vatopfn(caddr_t, sfmmu_t *, tte_t *);
extern void sfmmu_vatopfn_suspended(caddr_t, sfmmu_t *, tte_t *);
#ifdef DEBUG
diff --git a/usr/src/uts/sparc/fpu/fpu_simulator.c b/usr/src/uts/sparc/fpu/fpu_simulator.c
index a29c45f0da..0705a1023b 100644
--- a/usr/src/uts/sparc/fpu/fpu_simulator.c
+++ b/usr/src/uts/sparc/fpu/fpu_simulator.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -107,6 +106,15 @@ struct fpuinfo_kstat fpuinfo = {
{ "fpu_sim_fqtoi", KSTAT_DATA_UINT64},
{ "fpu_sim_fmovcc", KSTAT_DATA_UINT64},
{ "fpu_sim_fmovr", KSTAT_DATA_UINT64},
+ { "fpu_sim_fmadds", KSTAT_DATA_UINT64},
+ { "fpu_sim_fmaddd", KSTAT_DATA_UINT64},
+ { "fpu_sim_fmsubs", KSTAT_DATA_UINT64},
+ { "fpu_sim_fmsubd", KSTAT_DATA_UINT64},
+ { "fpu_sim_fnmadds", KSTAT_DATA_UINT64},
+ { "fpu_sim_fnmaddd", KSTAT_DATA_UINT64},
+ { "fpu_sim_fnmsubs", KSTAT_DATA_UINT64},
+ { "fpu_sim_fnmsubd", KSTAT_DATA_UINT64},
+ { "fpu_sim_invalid", KSTAT_DATA_UINT64},
};
struct visinfo_kstat visinfo = {
@@ -182,241 +190,315 @@ _fp_fpu_simulator(
*/
pfpsd->fp_direction = GSR_IM(gsr) ? GSR_IRND(gsr) : fsr.rnd;
pfpsd->fp_precision = fsr.rnp;
- nfcc = nrd & 0x3;
- if (inst.op3 == 0x35) { /* fpop2 */
- fsr.cexc = 0;
- *pfsr = fsr;
- if ((inst.opcode & 0xf) == 0) {
- if ((fp_notp) && (inst.prec == 0))
- return (ftt_unimplemented);
- FPUINFO_KSTAT(fpu_sim_fmovcc);
- return (fmovcc(pfpsd, inst, pfsr)); /* fmovcc */
- } else if ((inst.opcode & 0x7) == 1) {
- if ((fp_notp) && (inst.prec == 0))
- return (ftt_unimplemented);
- FPUINFO_KSTAT(fpu_sim_fmovr);
- return (fmovr(pfpsd, inst)); /* fmovr */
- }
- }
- /* ibit not valid for fpop1 instructions */
- if ((fp_notp) && (inst.ibit != 0))
- return (ftt_unimplemented);
- if ((fp_notp) && (inst.prec == 0)) { /* fxto[sdq], fito[sdq] */
- if ((inst.opcode != flltos) &&
- (inst.opcode != flltod) &&
- (inst.opcode != flltox) &&
- (inst.opcode != fitos) &&
- (inst.opcode != fitod) &&
- (inst.opcode != fitox)) {
- return (ftt_unimplemented);
- }
- }
- switch (inst.opcode) {
- case fmovs: /* also covers fmovd, fmovq */
- if (inst.prec < 2) { /* fmovs */
- _fp_unpack_word(pfpsd, &usr, nrs2);
- _fp_pack_word(pfpsd, &usr, nrd);
- FPUINFO_KSTAT(fpu_sim_fmovs);
- } else { /* fmovd */
- _fp_unpack_extword(pfpsd, &lusr, nrs2);
- _fp_pack_extword(pfpsd, &lusr, nrd);
- if (inst.prec > 2) { /* fmovq */
- _fp_unpack_extword(pfpsd, &lusr, nrs2+2);
- _fp_pack_extword(pfpsd, &lusr, nrd+2);
- FPUINFO_KSTAT(fpu_sim_fmovq);
- } else {
- FPUINFO_KSTAT(fpu_sim_fmovd);
+
+ if (inst.op3 == 0x37) { /* IMPDEP2B FMA-fused opcode */
+ fp_fma_inst_type *fma_inst;
+ uint32_t nrs3;
+ unpacked us3;
+ unpacked ust;
+ fma_inst = (fp_fma_inst_type *) &inst;
+ nrs2 = fma_inst->rs2;
+ nrs3 = fma_inst->rs3;
+ switch (fma_inst->var) {
+ case fmadd:
+ _fp_unpack(pfpsd, &us1, nrs1, fma_inst->sz);
+ _fp_unpack(pfpsd, &us2, nrs2, fma_inst->sz);
+ _fp_mul(pfpsd, &us1, &us2, &ust);
+ if ((pfpsd->fp_current_exceptions & fsr.tem) == 0) {
+ _fp_unpack(pfpsd, &us3, nrs3, fma_inst->sz);
+ _fp_add(pfpsd, &ust, &us3, &ud);
+ _fp_pack(pfpsd, &ud, nrd, fma_inst->sz);
}
- }
- break;
- case fabss: /* also covers fabsd, fabsq */
- if (inst.prec < 2) { /* fabss */
- _fp_unpack_word(pfpsd, &usr, nrs2);
- usr &= 0x7fffffff;
- _fp_pack_word(pfpsd, &usr, nrd);
- FPUINFO_KSTAT(fpu_sim_fabss);
- } else { /* fabsd */
- _fp_unpack_extword(pfpsd, &lusr, nrs2);
- lusr &= 0x7fffffffffffffff;
- _fp_pack_extword(pfpsd, &lusr, nrd);
- if (inst.prec > 2) { /* fabsq */
- _fp_unpack_extword(pfpsd, &lusr, nrs2+2);
- _fp_pack_extword(pfpsd, &lusr, nrd+2);
- FPUINFO_KSTAT(fpu_sim_fabsq);
- } else {
- FPUINFO_KSTAT(fpu_sim_fabsd);
+ FPUINFO_KSTAT_PREC(fma_inst->sz, fpu_sim_fmadds,
+ fpu_sim_fmaddd, fpu_sim_invalid);
+ break;
+ case fmsub:
+ _fp_unpack(pfpsd, &us1, nrs1, fma_inst->sz);
+ _fp_unpack(pfpsd, &us2, nrs2, fma_inst->sz);
+ _fp_mul(pfpsd, &us1, &us2, &ust);
+ if ((pfpsd->fp_current_exceptions & fsr.tem) == 0) {
+ _fp_unpack(pfpsd, &us3, nrs3, fma_inst->sz);
+ _fp_sub(pfpsd, &ust, &us3, &ud);
+ _fp_pack(pfpsd, &ud, nrd, fma_inst->sz);
+ }
+ FPUINFO_KSTAT_PREC(fma_inst->sz, fpu_sim_fmsubs,
+ fpu_sim_fmsubd, fpu_sim_invalid);
+ break;
+ case fnmadd:
+ _fp_unpack(pfpsd, &us1, nrs1, fma_inst->sz);
+ _fp_unpack(pfpsd, &us2, nrs2, fma_inst->sz);
+ _fp_mul(pfpsd, &us1, &us2, &ust);
+ if ((pfpsd->fp_current_exceptions & fsr.tem) == 0) {
+ if (ust.fpclass == fp_quiet ||
+ ust.fpclass == fp_signaling) {
+ _fp_pack(pfpsd, &ust, nrd, fma_inst->sz);
+ } else {
+ _fp_unpack(pfpsd, &us3, nrs3, fma_inst->sz);
+ _fp_add(pfpsd, &ust, &us3, &ud);
+ ud.sign ^= 1;
+ _fp_pack(pfpsd, &ud, nrd, fma_inst->sz);
+ }
+ }
+ FPUINFO_KSTAT_PREC(fma_inst->sz, fpu_sim_fnmadds,
+ fpu_sim_fnmaddd, fpu_sim_invalid);
+ break;
+ case fnmsub:
+ _fp_unpack(pfpsd, &us1, nrs1, fma_inst->sz);
+ _fp_unpack(pfpsd, &us2, nrs2, fma_inst->sz);
+ _fp_mul(pfpsd, &us1, &us2, &ust);
+ if ((pfpsd->fp_current_exceptions & fsr.tem) == 0) {
+ if (ust.fpclass == fp_quiet ||
+ ust.fpclass == fp_signaling) {
+ _fp_pack(pfpsd, &ust, nrd, fma_inst->sz);
+ } else {
+ _fp_unpack(pfpsd, &us3, nrs3, fma_inst->sz);
+ _fp_sub(pfpsd, &ust, &us3, &ud);
+ ud.sign ^= 1;
+ _fp_pack(pfpsd, &ud, nrd, fma_inst->sz);
+ }
}
+ FPUINFO_KSTAT_PREC(fma_inst->sz, fpu_sim_fnmsubs,
+ fpu_sim_fnmsubd, fpu_sim_invalid);
}
- break;
- case fnegs: /* also covers fnegd, fnegq */
- if (inst.prec < 2) { /* fnegs */
- _fp_unpack_word(pfpsd, &usr, nrs2);
- usr ^= 0x80000000;
- _fp_pack_word(pfpsd, &usr, nrd);
- FPUINFO_KSTAT(fpu_sim_fnegs);
- } else { /* fnegd */
- _fp_unpack_extword(pfpsd, &lusr, nrs2);
- lusr ^= 0x8000000000000000;
- _fp_pack_extword(pfpsd, &lusr, nrd);
- if (inst.prec > 2) { /* fnegq */
- _fp_unpack_extword(pfpsd, &lusr, nrs2+2);
- lusr ^= 0x0000000000000000;
- _fp_pack_extword(pfpsd, &lusr, nrd+2);
- FPUINFO_KSTAT(fpu_sim_fnegq);
- } else {
- FPUINFO_KSTAT(fpu_sim_fnegd);
+ } else {
+ nfcc = nrd & 0x3;
+ if (inst.op3 == 0x35) { /* fpop2 */
+ fsr.cexc = 0;
+ *pfsr = fsr;
+ if ((inst.opcode & 0xf) == 0) {
+ if ((fp_notp) && (inst.prec == 0))
+ return (ftt_unimplemented);
+ FPUINFO_KSTAT(fpu_sim_fmovcc);
+ return (fmovcc(pfpsd, inst, pfsr)); /* fmovcc */
+ } else if ((inst.opcode & 0x7) == 1) {
+ if ((fp_notp) && (inst.prec == 0))
+ return (ftt_unimplemented);
+ FPUINFO_KSTAT(fpu_sim_fmovr);
+ return (fmovr(pfpsd, inst)); /* fmovr */
}
}
- break;
- case fadd:
- _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
- _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
- _fp_add(pfpsd, &us1, &us2, &ud);
- _fp_pack(pfpsd, &ud, nrd, inst.prec);
- FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fadds,
- fpu_sim_faddd, fpu_sim_faddq);
- break;
- case fsub:
- _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
- _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
- _fp_sub(pfpsd, &us1, &us2, &ud);
- _fp_pack(pfpsd, &ud, nrd, inst.prec);
- FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fsubs,
- fpu_sim_fsubd, fpu_sim_fsubq);
- break;
- case fmul:
- _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
- _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
- _fp_mul(pfpsd, &us1, &us2, &ud);
- _fp_pack(pfpsd, &ud, nrd, inst.prec);
- FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fmuls,
- fpu_sim_fmuld, fpu_sim_fmulq);
- break;
- case fsmuld:
- if ((fp_notp) && (inst.prec != 1))
- return (ftt_unimplemented);
- _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
- _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
- _fp_mul(pfpsd, &us1, &us2, &ud);
- _fp_pack(pfpsd, &ud, nrd, (enum fp_op_type) ((int)inst.prec+1));
- FPUINFO_KSTAT(fpu_sim_fsmuld);
- break;
- case fdmulx:
- if ((fp_notp) && (inst.prec != 2))
+ /* ibit not valid for fpop1 instructions */
+ if ((fp_notp) && (inst.ibit != 0))
return (ftt_unimplemented);
- _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
- _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
- _fp_mul(pfpsd, &us1, &us2, &ud);
- _fp_pack(pfpsd, &ud, nrd, (enum fp_op_type) ((int)inst.prec+1));
- FPUINFO_KSTAT(fpu_sim_fdmulx);
- break;
- case fdiv:
- _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
- _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
- _fp_div(pfpsd, &us1, &us2, &ud);
- _fp_pack(pfpsd, &ud, nrd, inst.prec);
- FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fdivs,
- fpu_sim_fdivd, fpu_sim_fdivq);
- break;
- case fcmp:
- _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
- _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
- cc = _fp_compare(pfpsd, &us1, &us2, 0);
- if (!(pfpsd->fp_current_exceptions & pfpsd->fp_fsrtem))
- switch (nfcc) {
- case fcc_0:
- fsr.fcc0 = cc;
- break;
- case fcc_1:
- fsr.fcc1 = cc;
- break;
- case fcc_2:
- fsr.fcc2 = cc;
- break;
- case fcc_3:
- fsr.fcc3 = cc;
- break;
+ if ((fp_notp) && (inst.prec == 0)) { /* fxto[sdq], fito[sdq] */
+ if ((inst.opcode != flltos) &&
+ (inst.opcode != flltod) &&
+ (inst.opcode != flltox) &&
+ (inst.opcode != fitos) &&
+ (inst.opcode != fitod) &&
+ (inst.opcode != fitox)) {
+ return (ftt_unimplemented);
+ }
+ }
+ switch (inst.opcode) {
+ case fmovs: /* also covers fmovd, fmovq */
+ if (inst.prec < 2) { /* fmovs */
+ _fp_unpack_word(pfpsd, &usr, nrs2);
+ _fp_pack_word(pfpsd, &usr, nrd);
+ FPUINFO_KSTAT(fpu_sim_fmovs);
+ } else { /* fmovd */
+ _fp_unpack_extword(pfpsd, &lusr, nrs2);
+ _fp_pack_extword(pfpsd, &lusr, nrd);
+ if (inst.prec > 2) { /* fmovq */
+ _fp_unpack_extword(pfpsd, &lusr, nrs2+2);
+ _fp_pack_extword(pfpsd, &lusr, nrd+2);
+ FPUINFO_KSTAT(fpu_sim_fmovq);
+ } else {
+ FPUINFO_KSTAT(fpu_sim_fmovd);
+ }
}
- FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fcmps,
- fpu_sim_fcmpd, fpu_sim_fcmpq);
- break;
- case fcmpe:
- _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
- _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
- cc = _fp_compare(pfpsd, &us1, &us2, 1);
- if (!(pfpsd->fp_current_exceptions & pfpsd->fp_fsrtem))
- switch (nfcc) {
- case fcc_0:
- fsr.fcc0 = cc;
- break;
- case fcc_1:
- fsr.fcc1 = cc;
- break;
- case fcc_2:
- fsr.fcc2 = cc;
- break;
- case fcc_3:
- fsr.fcc3 = cc;
- break;
+ break;
+ case fabss: /* also covers fabsd, fabsq */
+ if (inst.prec < 2) { /* fabss */
+ _fp_unpack_word(pfpsd, &usr, nrs2);
+ usr &= 0x7fffffff;
+ _fp_pack_word(pfpsd, &usr, nrd);
+ FPUINFO_KSTAT(fpu_sim_fabss);
+ } else { /* fabsd */
+ _fp_unpack_extword(pfpsd, &lusr, nrs2);
+ lusr &= 0x7fffffffffffffff;
+ _fp_pack_extword(pfpsd, &lusr, nrd);
+ if (inst.prec > 2) { /* fabsq */
+ _fp_unpack_extword(pfpsd, &lusr, nrs2+2);
+ _fp_pack_extword(pfpsd, &lusr, nrd+2);
+ FPUINFO_KSTAT(fpu_sim_fabsq);
+ } else {
+ FPUINFO_KSTAT(fpu_sim_fabsd);
+ }
}
- FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fcmpes,
- fpu_sim_fcmped, fpu_sim_fcmpeq);
- break;
- case fsqrt:
- _fp_unpack(pfpsd, &us1, nrs2, inst.prec);
- _fp_sqrt(pfpsd, &us1, &ud);
- _fp_pack(pfpsd, &ud, nrd, inst.prec);
- FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fsqrts,
- fpu_sim_fsqrtd, fpu_sim_fsqrtq);
- break;
- case ftoi:
- _fp_unpack(pfpsd, &us1, nrs2, inst.prec);
- pfpsd->fp_direction = fp_tozero;
- /* Force rounding toward zero. */
- _fp_pack(pfpsd, &us1, nrd, fp_op_int32);
- FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fstoi,
- fpu_sim_fdtoi, fpu_sim_fqtoi);
- break;
- case ftoll:
- _fp_unpack(pfpsd, &us1, nrs2, inst.prec);
- pfpsd->fp_direction = fp_tozero;
- /* Force rounding toward zero. */
- _fp_pack(pfpsd, &us1, nrd, fp_op_int64);
- FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fstox,
- fpu_sim_fdtox, fpu_sim_fqtox);
- break;
- case flltos:
- _fp_unpack(pfpsd, &us1, nrs2, fp_op_int64);
- _fp_pack(pfpsd, &us1, nrd, fp_op_single);
- FPUINFO_KSTAT(fpu_sim_fxtos);
- break;
- case flltod:
- _fp_unpack(pfpsd, &us1, nrs2, fp_op_int64);
- _fp_pack(pfpsd, &us1, nrd, fp_op_double);
- FPUINFO_KSTAT(fpu_sim_fxtod);
- break;
- case flltox:
- _fp_unpack(pfpsd, &us1, nrs2, fp_op_int64);
- _fp_pack(pfpsd, &us1, nrd, fp_op_extended);
- FPUINFO_KSTAT(fpu_sim_fxtoq);
- break;
- case fitos:
- _fp_unpack(pfpsd, &us1, nrs2, inst.prec);
- _fp_pack(pfpsd, &us1, nrd, fp_op_single);
- FPUINFO_KSTAT(fpu_sim_fitos);
- break;
- case fitod:
- _fp_unpack(pfpsd, &us1, nrs2, inst.prec);
- _fp_pack(pfpsd, &us1, nrd, fp_op_double);
- FPUINFO_KSTAT(fpu_sim_fitod);
- break;
- case fitox:
- _fp_unpack(pfpsd, &us1, nrs2, inst.prec);
- _fp_pack(pfpsd, &us1, nrd, fp_op_extended);
- FPUINFO_KSTAT(fpu_sim_fitoq);
- break;
- default:
- return (ftt_unimplemented);
+ break;
+ case fnegs: /* also covers fnegd, fnegq */
+ if (inst.prec < 2) { /* fnegs */
+ _fp_unpack_word(pfpsd, &usr, nrs2);
+ usr ^= 0x80000000;
+ _fp_pack_word(pfpsd, &usr, nrd);
+ FPUINFO_KSTAT(fpu_sim_fnegs);
+ } else { /* fnegd */
+ _fp_unpack_extword(pfpsd, &lusr, nrs2);
+ lusr ^= 0x8000000000000000;
+ _fp_pack_extword(pfpsd, &lusr, nrd);
+ if (inst.prec > 2) { /* fnegq */
+ _fp_unpack_extword(pfpsd, &lusr, nrs2+2);
+ lusr ^= 0x0000000000000000;
+ _fp_pack_extword(pfpsd, &lusr, nrd+2);
+ FPUINFO_KSTAT(fpu_sim_fnegq);
+ } else {
+ FPUINFO_KSTAT(fpu_sim_fnegd);
+ }
+ }
+ break;
+ case fadd:
+ _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
+ _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
+ _fp_add(pfpsd, &us1, &us2, &ud);
+ _fp_pack(pfpsd, &ud, nrd, inst.prec);
+ FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fadds,
+ fpu_sim_faddd, fpu_sim_faddq);
+ break;
+ case fsub:
+ _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
+ _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
+ _fp_sub(pfpsd, &us1, &us2, &ud);
+ _fp_pack(pfpsd, &ud, nrd, inst.prec);
+ FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fsubs,
+ fpu_sim_fsubd, fpu_sim_fsubq);
+ break;
+ case fmul:
+ _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
+ _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
+ _fp_mul(pfpsd, &us1, &us2, &ud);
+ _fp_pack(pfpsd, &ud, nrd, inst.prec);
+ FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fmuls,
+ fpu_sim_fmuld, fpu_sim_fmulq);
+ break;
+ case fsmuld:
+ if ((fp_notp) && (inst.prec != 1))
+ return (ftt_unimplemented);
+ _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
+ _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
+ _fp_mul(pfpsd, &us1, &us2, &ud);
+ _fp_pack(pfpsd, &ud, nrd,
+ (enum fp_op_type) ((int)inst.prec+1));
+ FPUINFO_KSTAT(fpu_sim_fsmuld);
+ break;
+ case fdmulx:
+ if ((fp_notp) && (inst.prec != 2))
+ return (ftt_unimplemented);
+ _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
+ _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
+ _fp_mul(pfpsd, &us1, &us2, &ud);
+ _fp_pack(pfpsd, &ud, nrd,
+ (enum fp_op_type) ((int)inst.prec+1));
+ FPUINFO_KSTAT(fpu_sim_fdmulx);
+ break;
+ case fdiv:
+ _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
+ _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
+ _fp_div(pfpsd, &us1, &us2, &ud);
+ _fp_pack(pfpsd, &ud, nrd, inst.prec);
+ FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fdivs,
+ fpu_sim_fdivd, fpu_sim_fdivq);
+ break;
+ case fcmp:
+ _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
+ _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
+ cc = _fp_compare(pfpsd, &us1, &us2, 0);
+ if (!(pfpsd->fp_current_exceptions & pfpsd->fp_fsrtem))
+ switch (nfcc) {
+ case fcc_0:
+ fsr.fcc0 = cc;
+ break;
+ case fcc_1:
+ fsr.fcc1 = cc;
+ break;
+ case fcc_2:
+ fsr.fcc2 = cc;
+ break;
+ case fcc_3:
+ fsr.fcc3 = cc;
+ break;
+ }
+ FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fcmps,
+ fpu_sim_fcmpd, fpu_sim_fcmpq);
+ break;
+ case fcmpe:
+ _fp_unpack(pfpsd, &us1, nrs1, inst.prec);
+ _fp_unpack(pfpsd, &us2, nrs2, inst.prec);
+ cc = _fp_compare(pfpsd, &us1, &us2, 1);
+ if (!(pfpsd->fp_current_exceptions & pfpsd->fp_fsrtem))
+ switch (nfcc) {
+ case fcc_0:
+ fsr.fcc0 = cc;
+ break;
+ case fcc_1:
+ fsr.fcc1 = cc;
+ break;
+ case fcc_2:
+ fsr.fcc2 = cc;
+ break;
+ case fcc_3:
+ fsr.fcc3 = cc;
+ break;
+ }
+ FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fcmpes,
+ fpu_sim_fcmped, fpu_sim_fcmpeq);
+ break;
+ case fsqrt:
+ _fp_unpack(pfpsd, &us1, nrs2, inst.prec);
+ _fp_sqrt(pfpsd, &us1, &ud);
+ _fp_pack(pfpsd, &ud, nrd, inst.prec);
+ FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fsqrts,
+ fpu_sim_fsqrtd, fpu_sim_fsqrtq);
+ break;
+ case ftoi:
+ _fp_unpack(pfpsd, &us1, nrs2, inst.prec);
+ pfpsd->fp_direction = fp_tozero;
+ /* Force rounding toward zero. */
+ _fp_pack(pfpsd, &us1, nrd, fp_op_int32);
+ FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fstoi,
+ fpu_sim_fdtoi, fpu_sim_fqtoi);
+ break;
+ case ftoll:
+ _fp_unpack(pfpsd, &us1, nrs2, inst.prec);
+ pfpsd->fp_direction = fp_tozero;
+ /* Force rounding toward zero. */
+ _fp_pack(pfpsd, &us1, nrd, fp_op_int64);
+ FPUINFO_KSTAT_PREC(inst.prec, fpu_sim_fstox,
+ fpu_sim_fdtox, fpu_sim_fqtox);
+ break;
+ case flltos:
+ _fp_unpack(pfpsd, &us1, nrs2, fp_op_int64);
+ _fp_pack(pfpsd, &us1, nrd, fp_op_single);
+ FPUINFO_KSTAT(fpu_sim_fxtos);
+ break;
+ case flltod:
+ _fp_unpack(pfpsd, &us1, nrs2, fp_op_int64);
+ _fp_pack(pfpsd, &us1, nrd, fp_op_double);
+ FPUINFO_KSTAT(fpu_sim_fxtod);
+ break;
+ case flltox:
+ _fp_unpack(pfpsd, &us1, nrs2, fp_op_int64);
+ _fp_pack(pfpsd, &us1, nrd, fp_op_extended);
+ FPUINFO_KSTAT(fpu_sim_fxtoq);
+ break;
+ case fitos:
+ _fp_unpack(pfpsd, &us1, nrs2, inst.prec);
+ _fp_pack(pfpsd, &us1, nrd, fp_op_single);
+ FPUINFO_KSTAT(fpu_sim_fitos);
+ break;
+ case fitod:
+ _fp_unpack(pfpsd, &us1, nrs2, inst.prec);
+ _fp_pack(pfpsd, &us1, nrd, fp_op_double);
+ FPUINFO_KSTAT(fpu_sim_fitod);
+ break;
+ case fitox:
+ _fp_unpack(pfpsd, &us1, nrs2, inst.prec);
+ _fp_pack(pfpsd, &us1, nrd, fp_op_extended);
+ FPUINFO_KSTAT(fpu_sim_fitoq);
+ break;
+ default:
+ return (ftt_unimplemented);
+ }
}
fsr.cexc = pfpsd->fp_current_exceptions;
if (pfpsd->fp_current_exceptions) { /* Exception(s) occurred. */
@@ -450,6 +532,7 @@ _fp_fpu_simulator(
return (ftt_none);
}
+
/*
* fpu_vis_sim simulates fpu and vis instructions;
* It can work with both real and pcb image registers.
@@ -495,7 +578,8 @@ fpu_vis_sim(
pregs, (ulong_t *)pregs->r_sp, pfp);
return (ftt);
} else if ((fp.inst.hibits == 2) &&
- ((fp.inst.op3 == 0x34) || (fp.inst.op3 == 0x35))) {
+ ((fp.inst.op3 == 0x34) || (fp.inst.op3 == 0x35) ||
+ (fp.inst.op3 == 0x37))) {
ftt = _fp_fpu_simulator(pfpsd, fp.inst, pfsr, gsr);
if (ftt == ftt_none || ftt == ftt_ieee) {
pregs->r_pc = pregs->r_npc;
@@ -573,7 +657,8 @@ fp_emulator(
return (ftt);
if ((fp.inst.hibits == 2) &&
- ((fp.inst.op3 == 0x34) || (fp.inst.op3 == 0x35))) {
+ ((fp.inst.op3 == 0x34) || (fp.inst.op3 == 0x35) ||
+ (fp.inst.op3 == 0x37))) {
ftt = _fp_fpu_simulator(pfpsd, fp.inst, (fsr_type *)&tfsr, gsr);
/* Do not retry emulated instruction. */
pregs->r_pc = pregs->r_npc;
@@ -612,7 +697,8 @@ again:
if (ftt != ftt_none)
return (ftt);
if ((fp.inst.hibits == 2) && /* fpops */
- ((fp.inst.op3 == 0x34) || (fp.inst.op3 == 0x35))) {
+ ((fp.inst.op3 == 0x34) || (fp.inst.op3 == 0x35) ||
+ (fp.inst.op3 == 0x37))) {
ftt = _fp_fpu_simulator(pfpsd, fp.inst, (fsr_type *)&tfsr, gsr);
/* Do not retry emulated instruction. */
pfpu->fpu_fsr = tfsr;
diff --git a/usr/src/uts/sparc/os/driver_aliases b/usr/src/uts/sparc/os/driver_aliases
index db3ba0352c..787f3d32f0 100644
--- a/usr/src/uts/sparc/os/driver_aliases
+++ b/usr/src/uts/sparc/os/driver_aliases
@@ -144,6 +144,7 @@ ibd "ib.ipib"
px "SUNW,sun4v-pci"
px "pci108e,80f0"
px "pciex108e,80f0"
+px "pciex108e,80f8"
px_pci "pci1033,124"
px_pci "pci1033,125"
px_pci "pci8086,340"
@@ -161,3 +162,9 @@ epic "SUNW,ebus-pic18lf65j10-env"
i8042 "8042"
mouse8042 "pnpPNP,f03"
kb8042 "pnpPNP,303"
+pcicmu "pci10cf,138f"
+pcicmu "pci10cf,1390"
+oplpanel "FJSV,panel"
+oplmsu "FJSV,oplmsu"
+mc-opl "FJSV,oplmc"
+scfd "FJSV,scfc"
diff --git a/usr/src/uts/sparc/os/name_to_major b/usr/src/uts/sparc/os/name_to_major
index d38eb494a4..81708db46f 100644
--- a/usr/src/uts/sparc/os/name_to_major
+++ b/usr/src/uts/sparc/os/name_to_major
@@ -207,3 +207,10 @@ mouse8042 256
kssl 257
epic 258
adm1026 259
+pcicmu 260
+oplpanel 261
+oplmsu 262
+scfd 263
+mc-opl 264
+dm2s 265
+oplkmdrv 266
diff --git a/usr/src/uts/sparc/sys/Makefile b/usr/src/uts/sparc/sys/Makefile
index 72c122f969..b75af1c382 100644
--- a/usr/src/uts/sparc/sys/Makefile
+++ b/usr/src/uts/sparc/sys/Makefile
@@ -2,9 +2,8 @@
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
-# Common Development and Distribution License, Version 1.0 only
-# (the "License"). You may not use this file except in compliance
-# with the License.
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
@@ -68,7 +67,8 @@ FPUHDRS= \
FMCPUHDRS= \
UltraSPARC-II.h \
UltraSPARC-III.h \
- UltraSPARC-T1.h
+ UltraSPARC-T1.h \
+ SPARC64-VI.h
ROOTDIR= $(ROOT)/usr/include/sys
ROOTDIRS= \
diff --git a/usr/src/uts/sparc/sys/cpu.h b/usr/src/uts/sparc/sys/cpu.h
index be90896939..6f0fbc919d 100644
--- a/usr/src/uts/sparc/sys/cpu.h
+++ b/usr/src/uts/sparc/sys/cpu.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -69,7 +68,9 @@ extern int vac;
/*
* Use to insert cpu-dependent instructions into spin loops
*/
-#define SMT_PAUSE() /* none */
+#pragma weak cpu_smt_pause
+extern void cpu_smt_pause();
+#define SMT_PAUSE() { if (&cpu_smt_pause) cpu_smt_pause(); }
#endif /* defined(_KERNEL) && !defined(_ASM) */
diff --git a/usr/src/uts/sparc/sys/fm/cpu/SPARC64-VI.h b/usr/src/uts/sparc/sys/fm/cpu/SPARC64-VI.h
new file mode 100644
index 0000000000..c1de31063d
--- /dev/null
+++ b/usr/src/uts/sparc/sys/fm/cpu/SPARC64-VI.h
@@ -0,0 +1,118 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_FM_SPARC64_VI_H
+#define _SYS_FM_SPARC64_VI_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ereport class subcategories for SPARC64-VI */
+#define FM_EREPORT_CPU_SPARC64_VI "SPARC64-VI"
+#define FM_EREPORT_CPU_UNSUPPORTED "unsupported"
+
+/*
+ * Ereport payload definitions.
+ */
+#define FM_EREPORT_PAYLOAD_NAME_SFSR "sfsr"
+#define FM_EREPORT_PAYLOAD_NAME_SFAR "sfar"
+#define FM_EREPORT_PAYLOAD_NAME_UGESR "ugesr"
+#define FM_EREPORT_PAYLOAD_NAME_PC "pc"
+#define FM_EREPORT_PAYLOAD_NAME_TL "tl"
+#define FM_EREPORT_PAYLOAD_NAME_TT "tt"
+#define FM_EREPORT_PAYLOAD_NAME_PRIV "privileged"
+#define FM_EREPORT_PAYLOAD_NAME_RESOURCE "resource"
+#define FM_EREPORT_PAYLOAD_NAME_FLT_STATUS "flt-status"
+
+#define FM_EREPORT_PAYLOAD_FLAG_SFSR 0x00000001
+#define FM_EREPORT_PAYLOAD_FLAG_SFAR 0x00000002
+#define FM_EREPORT_PAYLOAD_FLAG_UGESR 0x00000004
+#define FM_EREPORT_PAYLOAD_FLAG_PC 0x00000008
+#define FM_EREPORT_PAYLOAD_FLAG_TL 0x00000010
+#define FM_EREPORT_PAYLOAD_FLAG_TT 0x00000020
+#define FM_EREPORT_PAYLOAD_FLAG_PRIV 0x00000040
+#define FM_EREPORT_PAYLOAD_FLAG_RESOURCE 0x00000080
+#define FM_EREPORT_PAYLOAD_FLAG_FLT_STATUS 0x00000100
+
+#define FM_EREPORT_PAYLOAD_FLAGS_TRAP \
+ (FM_EREPORT_PAYLOAD_FLAG_TL | \
+ FM_EREPORT_PAYLOAD_FLAG_TT)
+
+#define FM_EREPORT_PAYLOAD_SYNC (FM_EREPORT_PAYLOAD_FLAG_SFSR | \
+ FM_EREPORT_PAYLOAD_FLAG_SFAR | \
+ FM_EREPORT_PAYLOAD_FLAG_PC | \
+ FM_EREPORT_PAYLOAD_FLAGS_TRAP | \
+ FM_EREPORT_PAYLOAD_FLAG_PRIV | \
+ FM_EREPORT_PAYLOAD_FLAG_FLT_STATUS | \
+ FM_EREPORT_PAYLOAD_FLAG_RESOURCE)
+
+#define FM_EREPORT_PAYLOAD_URGENT (FM_EREPORT_PAYLOAD_FLAG_UGESR | \
+ FM_EREPORT_PAYLOAD_FLAG_PC | \
+ FM_EREPORT_PAYLOAD_FLAGS_TRAP | \
+ FM_EREPORT_PAYLOAD_FLAG_PRIV)
+
+/*
+ * FM_EREPORT_PAYLOAD_SYNC
+ */
+
+#define FM_EREPORT_CPU_UE_MEM "ue-mem"
+#define FM_EREPORT_CPU_UE_CHANNEL "ue-channel"
+#define FM_EREPORT_CPU_UE_CPU "ue-cpu"
+#define FM_EREPORT_CPU_UE_PATH "ue-path"
+#define FM_EREPORT_CPU_BERR "berr"
+#define FM_EREPORT_CPU_BTO "bto"
+#define FM_EREPORT_CPU_MTLB "mtlb"
+#define FM_EREPORT_CPU_TLBP "tlbp"
+#define FM_EREPORT_CPU_INV_SFSR "inv-sfsr"
+
+/*
+ * FM_EREPORT_PAYLOAD_URGENT
+ */
+
+#define FM_EREPORT_CPU_CRE "cre"
+#define FM_EREPORT_CPU_TSBCTX "tsb-ctx"
+#define FM_EREPORT_CPU_TSBP "tsbp"
+#define FM_EREPORT_CPU_PSTATE "pstate"
+#define FM_EREPORT_CPU_TSTATE "tstate"
+#define FM_EREPORT_CPU_IUG_F "iug-f"
+#define FM_EREPORT_CPU_IUG_R "iug-r"
+#define FM_EREPORT_CPU_SDC "sdc"
+#define FM_EREPORT_CPU_WDT "wdt"
+#define FM_EREPORT_CPU_DTLB "dtlb"
+#define FM_EREPORT_CPU_ITLB "itlb"
+#define FM_EREPORT_CPU_CORE "core-err"
+#define FM_EREPORT_CPU_DAE "dae"
+#define FM_EREPORT_CPU_IAE "iae"
+#define FM_EREPORT_CPU_UGE "uge"
+#define FM_EREPORT_CPU_INV_URG "inv-urg"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_FM_SPARC64_VI_H */
diff --git a/usr/src/uts/sparc/sys/fpu/fpu_simulator.h b/usr/src/uts/sparc/sys/fpu/fpu_simulator.h
index 70b1bb54b7..3b46025fcf 100644
--- a/usr/src/uts/sparc/sys/fpu/fpu_simulator.h
+++ b/usr/src/uts/sparc/sys/fpu/fpu_simulator.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -221,6 +220,25 @@ typedef /* FPU instruction. */
uint32_t rs2 : 5; /* Second operand. */
} fp_inst_type;
+enum fp_op_fma_var { /* IMPDEP2B FMA-fused instr. variations */
+ fmadd = 0,
+ fmsub = 1,
+ fnmsub = 2,
+ fnmadd = 3
+};
+
+typedef /* IMPDEP2B FPU FMA-fused instruction. */
+ struct {
+ uint32_t hibits : 2; /* Top two bits. */
+ uint32_t rd : 5; /* Destination. */
+ uint32_t op3 : 6; /* Main op code. */
+ uint32_t rs1 : 5; /* First operand. */
+ uint32_t rs3 : 5; /* Third operand */
+ uint32_t /* enum fp_op_fma_var */ var : 2; /* Instr. variation */
+ uint32_t sz : 2; /* Size */
+ uint32_t rs2 : 5; /* Second operand. */
+} fp_fma_inst_type;
+
typedef /* Integer condition code. */
struct {
uint32_t : 28; /* the unused part */
@@ -305,6 +323,15 @@ struct fpuinfo_kstat {
struct kstat_named fpu_sim_fqtoi;
struct kstat_named fpu_sim_fmovcc;
struct kstat_named fpu_sim_fmovr;
+ struct kstat_named fpu_sim_fmadds;
+ struct kstat_named fpu_sim_fmaddd;
+ struct kstat_named fpu_sim_fmsubs;
+ struct kstat_named fpu_sim_fmsubd;
+ struct kstat_named fpu_sim_fnmadds;
+ struct kstat_named fpu_sim_fnmaddd;
+ struct kstat_named fpu_sim_fnmsubs;
+ struct kstat_named fpu_sim_fnmsubd;
+ struct kstat_named fpu_sim_invalid;
};
struct visinfo_kstat {
diff --git a/usr/src/uts/sun4/io/px/px_debug.c b/usr/src/uts/sun4/io/px/px_debug.c
index af16f7b301..10eefafb16 100644
--- a/usr/src/uts/sun4/io/px/px_debug.c
+++ b/usr/src/uts/sun4/io/px/px_debug.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -89,7 +88,7 @@ static char *px_debug_sym [] = { /* same sequence as px_debug_bit */
/* 36 */ "ilu",
/* 37 */ "tlu",
/* 38 */ "lpu",
- /* 39 */ NULL,
+ /* 39 */ "mmu",
/* 40 */ "open",
/* 41 */ "close",
diff --git a/usr/src/uts/sun4/io/px/px_dma.c b/usr/src/uts/sun4/io/px/px_dma.c
index 6451a9b32c..3a1b8569b7 100644
--- a/usr/src/uts/sun4/io/px/px_dma.c
+++ b/usr/src/uts/sun4/io/px/px_dma.c
@@ -286,7 +286,8 @@ px_dma_attr2hdl(px_t *px_p, ddi_dma_impl_t *mp)
align = 1; /* align on 1 page boundary */
/* do a range check and get the limits */
- ret = px_lib_dma_bypass_rngchk(attrp, &syslo, &syshi);
+ ret = px_lib_dma_bypass_rngchk(px_p->px_dip, attrp,
+ &syslo, &syshi);
if (ret != DDI_SUCCESS)
return (ret);
} else { /* MMU_XLATE or PEER_TO_PEER */
@@ -674,7 +675,8 @@ px_dvma_map_fast(px_mmu_t *mmu_p, ddi_dma_impl_t *mp)
{
uint_t clustsz = px_dvma_page_cache_clustsz;
uint_t entries = px_dvma_page_cache_entries;
- io_attributes_t attr = PX_GET_TTE_ATTR(mp->dmai_rflags);
+ io_attributes_t attr = PX_GET_TTE_ATTR(mp->dmai_rflags,
+ mp->dmai_attr.dma_attr_flags);
int i = mmu_p->mmu_dvma_addr_scan_start;
uint8_t *lock_addr = mmu_p->mmu_dvma_cache_locks + i;
px_dvma_addr_t dvma_pg;
@@ -750,7 +752,8 @@ px_dvma_map(ddi_dma_impl_t *mp, ddi_dma_req_t *dmareq, px_mmu_t *mmu_p)
uint_t npages = PX_DMA_WINNPGS(mp);
px_dvma_addr_t dvma_pg, dvma_pg_index;
void *dvma_addr;
- uint64_t tte = PX_GET_TTE_ATTR(mp->dmai_rflags);
+ uint64_t tte = PX_GET_TTE_ATTR(mp->dmai_rflags,
+ mp->dmai_attr.dma_attr_flags);
int sleep = dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP;
dev_info_t *dip = mp->dmai_rdip;
int ret = DDI_SUCCESS;
@@ -1031,7 +1034,8 @@ px_dma_newwin(dev_info_t *dip, ddi_dma_req_t *dmareq, ddi_dma_impl_t *mp,
size_t sz = cookie_no * sizeof (ddi_dma_cookie_t);
px_dma_win_t *win_p = kmem_zalloc(sizeof (px_dma_win_t) + sz,
waitfp == DDI_DMA_SLEEP ? KM_SLEEP : KM_NOSLEEP);
- io_attributes_t attr = PX_GET_TTE_ATTR(mp->dmai_rflags);
+ io_attributes_t attr = PX_GET_TTE_ATTR(mp->dmai_rflags,
+ mp->dmai_attr.dma_attr_flags);
if (!win_p)
goto noresource;
@@ -1193,7 +1197,8 @@ px_dma_physwin(px_t *px_p, ddi_dma_req_t *dmareq, ddi_dma_impl_t *mp)
uint64_t count_max, bypass_addr = 0;
px_dma_win_t **win_pp = (px_dma_win_t **)&mp->dmai_winlst;
ddi_dma_cookie_t *cookie0_p;
- io_attributes_t attr = PX_GET_TTE_ATTR(mp->dmai_rflags);
+ io_attributes_t attr = PX_GET_TTE_ATTR(mp->dmai_rflags,
+ mp->dmai_attr.dma_attr_flags);
dev_info_t *dip = px_p->px_dip;
ASSERT(PX_DMA_ISPTP(mp) || PX_DMA_ISBYPASS(mp));
diff --git a/usr/src/uts/sun4/io/px/px_dma.h b/usr/src/uts/sun4/io/px/px_dma.h
index ae4ed5b6ab..0bd23f9eb2 100644
--- a/usr/src/uts/sun4/io/px/px_dma.h
+++ b/usr/src/uts/sun4/io/px/px_dma.h
@@ -233,9 +233,9 @@ extern int px_dma_ctl(dev_info_t *dip, dev_info_t *rdip,
#define PX_GET_MP_TTE(tte) \
(((uint64_t)(uintptr_t)(tte) >> 5) << (32 + 5) | \
- ((uint32_t)(uintptr_t)(tte)) & 0x12)
+ ((uint32_t)(uintptr_t)(tte)) & 0x16)
#define PX_SAVE_MP_TTE(mp, tte) \
- (mp)->dmai_tte = (caddr_t)((uintptr_t)HI32(tte) | ((tte) & 0x12))
+ (mp)->dmai_tte = (caddr_t)((uintptr_t)HI32(tte) | ((tte) & 0x16))
#define PX_GET_MP_PFN1(mp, page_no) \
(((px_iopfn_t *)(mp)->dmai_pfnlst)[page_no])
diff --git a/usr/src/uts/sun4/io/px/px_fdvma.c b/usr/src/uts/sun4/io/px/px_fdvma.c
index be377cb638..24b2a57067 100644
--- a/usr/src/uts/sun4/io/px/px_fdvma.c
+++ b/usr/src/uts/sun4/io/px/px_fdvma.c
@@ -87,7 +87,7 @@ px_fdvma_load(ddi_dma_handle_t h, caddr_t a, uint_t len, uint_t index,
DBG(DBG_FAST_DVMA | DBG_CONT, dip, "cookie: %x+%x\n",
cp->dmac_address, cp->dmac_size);
- attr = PX_GET_TTE_ATTR(mp->dmai_rflags);
+ attr = PX_GET_TTE_ATTR(mp->dmai_rflags, mp->dmai_attr.dma_attr_flags);
if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index), npages, attr,
(void *)a, 0, MMU_MAP_BUF) != DDI_SUCCESS) {
diff --git a/usr/src/uts/sun4/io/px/px_ib.c b/usr/src/uts/sun4/io/px/px_ib.c
index ee3a78712e..782129718e 100644
--- a/usr/src/uts/sun4/io/px/px_ib.c
+++ b/usr/src/uts/sun4/io/px/px_ib.c
@@ -38,6 +38,7 @@
#include <sys/machsystm.h> /* intr_dist_add */
#include <sys/ddi_impldefs.h>
#include <sys/cpuvar.h>
+#include <sys/time.h>
#include "px_obj.h"
/*LINTLIBRARY*/
@@ -49,6 +50,8 @@ static uint_t px_ib_intr_reset(void *arg);
static void px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name,
char *path_name, int instance);
+extern uint64_t xc_tick_jump_limit;
+
int
px_ib_attach(px_t *px_p)
{
@@ -172,7 +175,8 @@ px_ib_intr_dist_en(dev_info_t *dip, cpuid_t cpu_id, devino_t ino,
uint32_t old_cpu_id;
sysino_t sysino;
intr_valid_state_t enabled = 0;
- hrtime_t start_time;
+ hrtime_t start_time, prev, curr, interval, jump;
+ hrtime_t intr_timeout;
intr_state_t intr_state;
int e = DDI_SUCCESS;
@@ -208,11 +212,27 @@ px_ib_intr_dist_en(dev_info_t *dip, cpuid_t cpu_id, devino_t ino,
/* Busy wait on pending interrupts */
PX_INTR_DISABLE(dip, sysino);
- for (start_time = gethrtime(); !panicstr &&
+ intr_timeout = px_intrpend_timeout;
+ jump = TICK_TO_NSEC(xc_tick_jump_limit);
+
+ for (curr = start_time = gethrtime(); !panicstr &&
((e = px_lib_intr_getstate(dip, sysino, &intr_state)) ==
DDI_SUCCESS) &&
(intr_state == INTR_DELIVERED_STATE); /* */) {
- if (gethrtime() - start_time > px_intrpend_timeout) {
+ /*
+ * If we have a really large jump in hrtime, it is most
+ * probably because we entered the debugger (or OBP,
+ * in general). So, we adjust the timeout accordingly
+ * to prevent declaring an interrupt timeout. The
+ * master-interrupt mechanism in OBP should deliver
+ * the interrupts properly.
+ */
+ prev = curr;
+ curr = gethrtime();
+ interval = curr - prev;
+ if (interval > jump)
+ intr_timeout += interval;
+ if (curr - start_time > intr_timeout) {
cmn_err(CE_WARN,
"%s%d: px_ib_intr_dist_en: sysino 0x%lx(ino 0x%x) "
"from cpu id 0x%x to 0x%x timeout",
diff --git a/usr/src/uts/sun4/io/px/px_ioapi.h b/usr/src/uts/sun4/io/px/px_ioapi.h
index 79b7cffe94..e8ad722fa8 100644
--- a/usr/src/uts/sun4/io/px/px_ioapi.h
+++ b/usr/src/uts/sun4/io/px/px_ioapi.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -103,13 +102,14 @@ typedef enum intr_valid_state {
*
* PCI_MAP_ATTR_READ 0x01 - xfr direction is from memory
* PCI_MAP_ATTR_WRITE 0x02 - xfr direction is to memory
+ * PCI_MAP_ATTR_RO 0x04 - enable relaxed ordering
*
- * Bits 63:2 are unused and must be set to zero for this
+ * Bits 63:3 are unused and must be set to zero for this
* version of the specification.
*
* Note: For compatibility with future versions of this
- * specification, the caller must set 63:2 to zero.
- * The implementation shall ignore bits 63:2
+ * specification, the caller must set 63:3 to zero.
+ * The implementation shall ignore bits 63:3
*
* r_addr - 64-bit Real Address.
*
@@ -196,7 +196,8 @@ typedef uint64_t pci_device_t;
typedef enum io_attributes {
PCI_MAP_ATTR_READ = (uint32_t)0x01,
- PCI_MAP_ATTR_WRITE = (uint32_t)0x02
+ PCI_MAP_ATTR_WRITE = (uint32_t)0x02,
+ PCI_MAP_ATTR_RO = (uint32_t)0x04
} io_attributes_t;
typedef enum io_sync_direction {
diff --git a/usr/src/uts/sun4/io/px/px_lib.h b/usr/src/uts/sun4/io/px/px_lib.h
index 02553a3b37..07bd06e2a3 100644
--- a/usr/src/uts/sun4/io/px/px_lib.h
+++ b/usr/src/uts/sun4/io/px/px_lib.h
@@ -89,8 +89,8 @@ extern int px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages,
extern int px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages);
extern int px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid,
io_attributes_t *attr_p, r_addr_t *r_addr_p);
-extern int px_lib_dma_bypass_rngchk(ddi_dma_attr_t *attr_p, uint64_t *lo_p,
- uint64_t *hi_p);
+extern int px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p,
+ uint64_t *lo_p, uint64_t *hi_p);
extern int px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra,
io_attributes_t attr, io_addr_t *io_addr_p);
extern int px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip,
diff --git a/usr/src/uts/sun4/io/px/px_mmu.h b/usr/src/uts/sun4/io/px/px_mmu.h
index c861d3ba67..660985a737 100644
--- a/usr/src/uts/sun4/io/px/px_mmu.h
+++ b/usr/src/uts/sun4/io/px/px_mmu.h
@@ -43,9 +43,10 @@ typedef uint64_t px_window_t;
/*
* boiler plate for tte (everything except the pfn)
*/
-#define PX_GET_TTE_ATTR(flags)\
+#define PX_GET_TTE_ATTR(flags, attr)\
(((flags & DDI_DMA_READ) ? PCI_MAP_ATTR_WRITE : 0) | \
- ((flags & DDI_DMA_WRITE) ? PCI_MAP_ATTR_READ : 0))
+ ((flags & DDI_DMA_WRITE) ? PCI_MAP_ATTR_READ : 0) | \
+ ((attr & DDI_DMA_RELAXED_ORDERING) ? PCI_MAP_ATTR_RO : 0))
/*
* mmu block soft state structure:
diff --git a/usr/src/uts/sun4/io/px/px_space.c b/usr/src/uts/sun4/io/px/px_space.c
index 85fb3f120e..ec5b819edf 100644
--- a/usr/src/uts/sun4/io/px/px_space.c
+++ b/usr/src/uts/sun4/io/px/px_space.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -146,72 +145,6 @@ uint_t px_preserve_mmu_tsb = 1;
*/
uintptr_t px_kmem_clid = 0;
-uint64_t px_tlu_ue_intr_mask = PX_ERR_EN_ALL;
-uint64_t px_tlu_ue_log_mask = PX_ERR_EN_ALL;
-uint64_t px_tlu_ue_count_mask = PX_ERR_EN_ALL;
-
-uint64_t px_tlu_ce_intr_mask = PX_ERR_MASK_NONE;
-uint64_t px_tlu_ce_log_mask = PX_ERR_MASK_NONE;
-uint64_t px_tlu_ce_count_mask = PX_ERR_MASK_NONE;
-
-/*
- * Do not enable Link Interrupts
- */
-uint64_t px_tlu_oe_intr_mask = PX_ERR_EN_ALL & ~0x80000000800;
-uint64_t px_tlu_oe_log_mask = PX_ERR_EN_ALL & ~0x80000000800;
-uint64_t px_tlu_oe_count_mask = PX_ERR_EN_ALL;
-
-uint64_t px_mmu_intr_mask = PX_ERR_EN_ALL;
-uint64_t px_mmu_log_mask = PX_ERR_EN_ALL;
-uint64_t px_mmu_count_mask = PX_ERR_EN_ALL;
-
-uint64_t px_imu_intr_mask = PX_ERR_EN_ALL;
-uint64_t px_imu_log_mask = PX_ERR_EN_ALL;
-uint64_t px_imu_count_mask = PX_ERR_EN_ALL;
-
-/*
- * (1ull << ILU_INTERRUPT_ENABLE_IHB_PE_S) |
- * (1ull << ILU_INTERRUPT_ENABLE_IHB_PE_P);
- */
-uint64_t px_ilu_intr_mask = (((uint64_t)0x10 << 32) | 0x10);
-uint64_t px_ilu_log_mask = (((uint64_t)0x10 << 32) | 0x10);
-uint64_t px_ilu_count_mask = PX_ERR_EN_ALL;
-
-uint64_t px_cb_intr_mask = PX_ERR_EN_ALL;
-uint64_t px_cb_log_mask = PX_ERR_EN_ALL;
-uint64_t px_cb_count_mask = PX_ERR_EN_ALL;
-
-/*
- * LPU Intr Registers are reverse encoding from the registers above.
- * 1 = disable
- * 0 = enable
- *
- * Log and Count are however still the same.
- */
-uint64_t px_lpul_intr_mask = LPU_INTR_DISABLE;
-uint64_t px_lpul_log_mask = PX_ERR_EN_ALL;
-uint64_t px_lpul_count_mask = PX_ERR_EN_ALL;
-
-uint64_t px_lpup_intr_mask = LPU_INTR_DISABLE;
-uint64_t px_lpup_log_mask = PX_ERR_EN_ALL;
-uint64_t px_lpup_count_mask = PX_ERR_EN_ALL;
-
-uint64_t px_lpur_intr_mask = LPU_INTR_DISABLE;
-uint64_t px_lpur_log_mask = PX_ERR_EN_ALL;
-uint64_t px_lpur_count_mask = PX_ERR_EN_ALL;
-
-uint64_t px_lpux_intr_mask = LPU_INTR_DISABLE;
-uint64_t px_lpux_log_mask = PX_ERR_EN_ALL;
-uint64_t px_lpux_count_mask = PX_ERR_EN_ALL;
-
-uint64_t px_lpus_intr_mask = LPU_INTR_DISABLE;
-uint64_t px_lpus_log_mask = PX_ERR_EN_ALL;
-uint64_t px_lpus_count_mask = PX_ERR_EN_ALL;
-
-uint64_t px_lpug_intr_mask = LPU_INTR_DISABLE;
-uint64_t px_lpug_log_mask = PX_ERR_EN_ALL;
-uint64_t px_lpug_count_mask = PX_ERR_EN_ALL;
-
/* timeout in micro seconds for receiving PME_To_ACK */
uint64_t px_pme_to_ack_timeout = PX_PME_TO_ACK_TIMEOUT;
@@ -232,7 +165,8 @@ uint32_t px_fabric_die_rc_ue = PCIE_AER_UCE_UR |
PCIE_AER_UCE_TO |
PCIE_AER_UCE_RO |
PCIE_AER_UCE_FCP |
- PCIE_AER_UCE_DLP;
+ PCIE_AER_UCE_DLP |
+ PCIE_AER_UCE_ECRC;
/* Fire PCIe Error that should cause panics even under protected access */
uint32_t px_fabric_die_rc_ce_gos = 0;
diff --git a/usr/src/uts/sun4/io/px/px_space.h b/usr/src/uts/sun4/io/px/px_space.h
index dc75020673..7ca21d0641 100644
--- a/usr/src/uts/sun4/io/px/px_space.h
+++ b/usr/src/uts/sun4/io/px/px_space.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -91,64 +90,6 @@ extern uint_t px_iommu_ctx_lock_failure;
extern uint_t px_preserve_iommu_tsb;
extern uintptr_t px_kmem_clid;
-#define PX_ERR_EN_ALL -1ull
-#define PX_ERR_MASK_NONE 0ull
-
-extern uint64_t px_tlu_ue_intr_mask;
-extern uint64_t px_tlu_ue_log_mask;
-extern uint64_t px_tlu_ue_count_mask;
-
-extern uint64_t px_tlu_ce_intr_mask;
-extern uint64_t px_tlu_ce_log_mask;
-extern uint64_t px_tlu_ce_count_mask;
-
-extern uint64_t px_tlu_oe_intr_mask;
-extern uint64_t px_tlu_oe_log_mask;
-extern uint64_t px_tlu_oe_count_mask;
-
-extern uint64_t px_mmu_intr_mask;
-extern uint64_t px_mmu_log_mask;
-extern uint64_t px_mmu_count_mask;
-
-extern uint64_t px_imu_intr_mask;
-extern uint64_t px_imu_log_mask;
-extern uint64_t px_imu_count_mask;
-
-#define LPU_INTR_ENABLE 0ull
-#define LPU_INTR_DISABLE -1ull
-
-extern uint64_t px_ilu_intr_mask;
-extern uint64_t px_ilu_log_mask;
-extern uint64_t px_ilu_count_mask;
-
-extern uint64_t px_cb_intr_mask;
-extern uint64_t px_cb_log_mask;
-extern uint64_t px_cb_count_mask;
-
-extern uint64_t px_lpul_intr_mask;
-extern uint64_t px_lpul_log_mask;
-extern uint64_t px_lpul_count_mask;
-
-extern uint64_t px_lpup_intr_mask;
-extern uint64_t px_lpup_log_mask;
-extern uint64_t px_lpup_count_mask;
-
-extern uint64_t px_lpur_intr_mask;
-extern uint64_t px_lpur_log_mask;
-extern uint64_t px_lpur_count_mask;
-
-extern uint64_t px_lpux_intr_mask;
-extern uint64_t px_lpux_log_mask;
-extern uint64_t px_lpux_count_mask;
-
-extern uint64_t px_lpus_intr_mask;
-extern uint64_t px_lpus_log_mask;
-extern uint64_t px_lpus_count_mask;
-
-extern uint64_t px_lpug_intr_mask;
-extern uint64_t px_lpug_log_mask;
-extern uint64_t px_lpug_count_mask;
-
/* timeout length in micro seconds */
#define PX_MSEC_TO_USEC 1000
#define PX_PME_TO_ACK_TIMEOUT (1000 * PX_MSEC_TO_USEC)
diff --git a/usr/src/uts/sun4/io/px/px_tools.c b/usr/src/uts/sun4/io/px/px_tools.c
index 5033485058..a10978506f 100644
--- a/usr/src/uts/sun4/io/px/px_tools.c
+++ b/usr/src/uts/sun4/io/px/px_tools.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -361,6 +360,7 @@ pxtool_get_phys_addr(px_t *px_p, int space, uint64_t offset)
int rval;
dev_info_t *dip = px_p->px_dip;
uint32_t base_offset = 0;
+ extern uint64_t px_get_range_prop(px_t *, px_ranges_t *, int);
/*
* Assume that requested entity is small enough to be on the same page.
@@ -385,9 +385,7 @@ pxtool_get_phys_addr(px_t *px_p, int space, uint64_t offset)
if (rval != DDI_SUCCESS)
return (NULL);
else {
- range_base =
- (((uint64_t)(rp->parent_high & 0x7ff)) << 32) +
- rp->parent_low;
+ range_base = px_get_range_prop(px_p, rp, 0);
DBG(DBG_TOOLS, dip, "range base:0x%" PRIx64 "\n", range_base);
return (base_offset + range_base);
}
diff --git a/usr/src/uts/sun4/io/px/px_var.h b/usr/src/uts/sun4/io/px/px_var.h
index 4418b71d3a..ffd593a444 100644
--- a/usr/src/uts/sun4/io/px/px_var.h
+++ b/usr/src/uts/sun4/io/px/px_var.h
@@ -83,7 +83,7 @@ typedef enum {
PX_SUSPENDED
} px_state_t;
-enum { PX_INTR_XBC, PX_INTR_PEC };
+enum { PX_INTR_XBC, PX_INTR_PEC, PX_INTR_HOTPLUG };
#define PX_ATTACH_RETCODE(obj, op, err) \
((err) ? (obj) << 8 | (op) << 4 | (err) & 0xf : DDI_SUCCESS)
diff --git a/usr/src/uts/sun4/io/trapstat.c b/usr/src/uts/sun4/io/trapstat.c
index 65a97fbb91..bdaac735fe 100644
--- a/usr/src/uts/sun4/io/trapstat.c
+++ b/usr/src/uts/sun4/io/trapstat.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -46,6 +45,9 @@
#ifdef sun4v
#include <sys/hypervisor_api.h>
#endif
+#ifndef sun4v
+#include <sys/chip.h>
+#endif
/* BEGIN CSTYLED */
/*
@@ -1399,6 +1401,14 @@ trapstat_make_traptab(tstat_percpu_t *tcpu)
#undef TSTAT_ENABLED_BA
#undef TSTAT_DISABLED_BA
+#ifndef sun4v
+/*
+ * See Section A.6 in SPARC v9 Manual.
+ * max branch = 4*((2^21)-1) = 8388604
+ */
+#define MAX_BICC_BRANCH_DISPLACEMENT (4 * ((1 << 21) - 1))
+#endif
+
static void
trapstat_setup(processorid_t cpu)
{
@@ -1407,6 +1417,9 @@ trapstat_setup(processorid_t cpu)
int i;
caddr_t va;
pfn_t *pfn;
+ cpu_t *cp;
+ uint_t strand_idx;
+ size_t tstat_offset;
#endif
ASSERT(tcpu->tcpu_pfn == NULL);
@@ -1422,7 +1435,29 @@ trapstat_setup(processorid_t cpu)
* align our instruction base address appropriately.
*/
#ifndef sun4v
- tcpu->tcpu_ibase = (caddr_t)((KERNELBASE - tstat_total_size)
+ tstat_offset = tstat_total_size;
+
+ cp = cpu_get(cpu);
+ ASSERT(cp != NULL);
+ if ((strand_idx = cpu ^ chip_plat_get_coreid(cp)) != 0) {
+ /*
+ * On sun4u platforms with multiple CPUs sharing the MMU
+ * (Olympus-C has 2 strands per core), each CPU uses a
+ * disjoint trap table. The indexing is based on the
+ * strand id, which is obtained by XOR'ing the cpuid with
+ * the coreid.
+ */
+ tstat_offset += tstat_total_size * strand_idx;
+
+ /*
+ * Offset must be less than the maximum PC-relative branch
+ * displacement for Bicc variants. See the Implementation
+ * Details comment.
+ */
+ ASSERT(tstat_offset <= MAX_BICC_BRANCH_DISPLACEMENT);
+ }
+
+ tcpu->tcpu_ibase = (caddr_t)((KERNELBASE - tstat_offset)
& TSTAT_TBA_MASK);
tcpu->tcpu_dbase = tcpu->tcpu_ibase + TSTAT_INSTR_SIZE;
tcpu->tcpu_vabase = tcpu->tcpu_ibase;
diff --git a/usr/src/uts/sun4/ml/interrupt.s b/usr/src/uts/sun4/ml/interrupt.s
index 3a316eb8cc..e9f4863836 100644
--- a/usr/src/uts/sun4/ml/interrupt.s
+++ b/usr/src/uts/sun4/ml/interrupt.s
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -822,6 +821,7 @@ intr_thread(struct regs *regs, uint_t inumber, uint_t pil)
! Switch back to the interrupted thread and return
!
stn %o4, [%o2 + CPU_THREAD]
+ membar #StoreLoad ! sync with mutex_exit()
mov %o4, THREAD_REG
! If we pinned an interrupt thread, store its starting timestamp.
diff --git a/usr/src/uts/sun4/os/startup.c b/usr/src/uts/sun4/os/startup.c
index 2051840ae2..dd8b58faee 100644
--- a/usr/src/uts/sun4/os/startup.c
+++ b/usr/src/uts/sun4/os/startup.c
@@ -532,7 +532,7 @@ extern caddr_t ecache_init_scrub_flush_area(caddr_t alloc_base);
extern uint64_t ecache_flush_address(void);
#pragma weak load_platform_modules
-#pragma weak starcat_startup_memlist
+#pragma weak plat_startup_memlist
#pragma weak ecache_init_scrub_flush_area
#pragma weak ecache_flush_address
@@ -963,12 +963,12 @@ startup_memlist(void)
PRM_DEBUG(alloc_base);
/*
- * Starcat needs its special structures assigned in 32-bit virtual
- * address space because its probing routines execute FCode, and FCode
- * can't handle 64-bit virtual addresses...
+ * Platforms like Starcat and OPL need special structures assigned in
+ * 32-bit virtual address space because their probing routines execute
+ * FCode, and FCode can't handle 64-bit virtual addresses...
*/
- if (&starcat_startup_memlist) {
- alloc_base = starcat_startup_memlist(alloc_base);
+ if (&plat_startup_memlist) {
+ alloc_base = plat_startup_memlist(alloc_base);
alloc_base = (caddr_t)roundup((uintptr_t)alloc_base,
ecache_alignsize);
PRM_DEBUG(alloc_base);
diff --git a/usr/src/uts/sun4/sys/platform_module.h b/usr/src/uts/sun4/sys/platform_module.h
index f0802dc623..8f96b014c7 100644
--- a/usr/src/uts/sun4/sys/platform_module.h
+++ b/usr/src/uts/sun4/sys/platform_module.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -108,7 +107,7 @@ extern int plat_ecc_capability_sc_get(int type);
extern int plat_blacklist(int cmd, const char *scheme, nvlist_t *fmri,
const char *class);
-extern caddr_t starcat_startup_memlist(caddr_t alloc_base);
+extern caddr_t plat_startup_memlist(caddr_t alloc_base);
extern int starcat_dr_name(char *name);
#pragma weak plat_setprop_enter
diff --git a/usr/src/uts/sun4u/Makefile.files b/usr/src/uts/sun4u/Makefile.files
index c6a8ddc3d1..cc6825033b 100644
--- a/usr/src/uts/sun4u/Makefile.files
+++ b/usr/src/uts/sun4u/Makefile.files
@@ -102,6 +102,7 @@ GRBEEP_OBJS += grbeep.o
ADM1031_OBJS += adm1031.o
ICS951601_OBJS += ics951601.o
PPM_OBJS += ppm_subr.o ppm.o
+OPLCFG_OBJS += opl_cfg.o
PCF8584_OBJS += pcf8584.o
PCA9556_OBJS += pca9556.o
ADM1026_OBJS += adm1026.o
@@ -182,6 +183,7 @@ TODBLADE_OBJS += todblade.o
TODM5819_OBJS += todm5819.o
TODBQ4802_OBJS += todbq4802.o
TODSG_OBJS += todsg.o
+TODOPL_OBJS = todopl.o
#
# Misc modules
@@ -198,6 +200,7 @@ SBD_OBJS += sbd.o sbd_cpu.o sbd_mem.o sbd_io.o
# Performance Counter BackEnd (PCBE) Modules
#
US_PCBE_OBJS = us234_pcbe.o
+OPL_PCBE_OBJS = opl_pcbe.o
#
# cpu modules
@@ -209,6 +212,8 @@ US3_CMN_OBJS = us3_common.o us3_common_mmu.o us3_common_asm.o us3_kdi.o cheetah_
CHEETAH_OBJS = $(US3_CMN_OBJS) us3_cheetah.o us3_cheetah_asm.o
CHEETAHPLUS_OBJS= $(US3_CMN_OBJS) us3_cheetahplus.o us3_cheetahplus_asm.o
JALAPENO_OBJS = $(US3_CMN_OBJS) us3_jalapeno.o us3_jalapeno_asm.o
+OLYMPUS_OBJS = opl_olympus.o opl_olympus_asm.o opl_olympus_copy.o \
+ opl_kdi.o common_asm.o
#
# platform module
diff --git a/usr/src/uts/sun4u/Makefile.sun4u.shared b/usr/src/uts/sun4u/Makefile.sun4u.shared
index 61855f6096..3f91e661e5 100644
--- a/usr/src/uts/sun4u/Makefile.sun4u.shared
+++ b/usr/src/uts/sun4u/Makefile.sun4u.shared
@@ -179,6 +179,7 @@ IMPLEMENTATIONS += chicago .WAIT
IMPLEMENTATIONS += sunfire .WAIT
IMPLEMENTATIONS += lw8 .WAIT
IMPLEMENTATIONS += makaha .WAIT
+IMPLEMENTATIONS += opl .WAIT
$(CLOSED_BUILD)CLOSED_IMPLEMENTATIONS = chalupa .WAIT
$(CLOSED_BUILD)CLOSED_IMPLEMENTATIONS += ents .WAIT
@@ -299,6 +300,7 @@ $(IF_TRAPTRACE_OBJ)us3_common_asm.o := DEBUG_DEFS += -DTRAPTRACE
$(IF_TRAPTRACE_OBJ)us3_cheetah_asm.o := DEBUG_DEFS += -DTRAPTRACE
$(IF_TRAPTRACE_OBJ)us3_cheetahplus_asm.o := DEBUG_DEFS += -DTRAPTRACE
$(IF_TRAPTRACE_OBJ)us3_jalapeno_asm.o := DEBUG_DEFS += -DTRAPTRACE
+$(IF_TRAPTRACE_OBJ)opl_olympus_asm.o := DEBUG_DEFS += -DTRAPTRACE
# Comment these out if you don't want dispatcher lock statistics.
@@ -433,6 +435,7 @@ SYS_KMODS +=
MISC_KMODS += obpsym bootdev vis cpr platmod md5 sha1 i2c_svc
MISC_KMODS += sbd
+MISC_KMODS += opl_cfg
MISC_KMODS += kmech_krb5
MISC_KMODS += zuluvm
@@ -472,7 +475,7 @@ CPU_KMODS += cheetah cheetahplus jalapeno serrano spitfire hummingbird
# sun4u 'TOD' Modules (/platform/.../kernel/tod):
#
TOD_KMODS += todds1287 todds1337 todmostek todstarfire
-TOD_KMODS += todm5819 todblade todbq4802 todsg
+TOD_KMODS += todm5819 todblade todbq4802 todsg todopl
$(CLOSED_BUILD)CLOSED_TOD_KMODS += todm5819p_rmc todstarcat todm5823
@@ -480,3 +483,4 @@ $(CLOSED_BUILD)CLOSED_TOD_KMODS += todm5819p_rmc todstarcat todm5823
# Performance Counter BackEnd Modules (/usr/kernel/pcbe):
#
PCBE_KMODS += us234_pcbe
+PCBE_KMODS += opl_pcbe
diff --git a/usr/src/uts/sun4u/Makefile.workarounds b/usr/src/uts/sun4u/Makefile.workarounds
index 444d2b974d..236b726d67 100644
--- a/usr/src/uts/sun4u/Makefile.workarounds
+++ b/usr/src/uts/sun4u/Makefile.workarounds
@@ -2,9 +2,8 @@
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
-# Common Development and Distribution License, Version 1.0 only
-# (the "License"). You may not use this file except in compliance
-# with the License.
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
@@ -22,10 +21,10 @@
#
# uts/sun4u/Makefile.workarounds
#
-# Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
-#ident "%Z%%M% %I% %E% SMI"
+#pragma ident "%Z%%M% %I% %E% SMI"
#
#
# This makefile contains workaround defines to be shared
@@ -89,3 +88,8 @@ WORKAROUND_DEFS += -DJALAPENO_ERRATA_85
# Relocate ITLB t16 index 0 locked TTEs to avoid eviction.
# Will not be fixed.
WORKAROUND_DEFS += -DCHEETAHPLUS_ERRATUM_34
+
+# OLYMPUS C cross-call errata. The first revision of the CPU can
+# deliver only one cross call at time.
+# Will be fixed in the succeeded revision of the cpu.
+WORKAROUND_DEFS += -DOLYMPUS_ERRATA_XCALL
diff --git a/usr/src/uts/sun4u/cpu/common_asm.s b/usr/src/uts/sun4u/cpu/common_asm.s
index e434e4cca6..49a2080e3a 100644
--- a/usr/src/uts/sun4u/cpu/common_asm.s
+++ b/usr/src/uts/sun4u/cpu/common_asm.s
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -53,7 +52,7 @@
* Do not use any instruction that modifies condition codes as the
* caller may depend on these to remain unchanged across the macro.
*/
-#if defined(CHEETAH)
+#if defined(CHEETAH) || defined(OLYMPUS_C)
#define GET_NATIVE_TIME(out, scr1, scr2) \
rd STICK, out
@@ -907,6 +906,13 @@ adj_shift:
* With UltraSPARC-III the combination of supporting mixed-speed CPUs
* and variable clock rate for power management requires that we
* use %stick to implement this routine.
+ *
+ * For OPL platforms that support the "sleep" instruction, we
+ * conditionally (ifdef'ed) insert a "sleep" instruction in
+ * the loop. Note that theoritically we should have move (duplicated)
+ * the code down to spitfire/us3/opl specific asm files - but this
+ * is alot of code duplication just to add one "sleep" instruction.
+ * We chose less code duplication for this.
*/
#if defined(lint)
@@ -935,7 +941,11 @@ usec_delay(int n)
GET_NATIVE_TIME(%o2, %o3, %o4)
add %o1, %o2, %o1
-1: cmp %o1, %o2
+1:
+#ifdef _OPL
+ .word 0x81b01060 ! insert "sleep" instruction
+#endif /* _OPL */ ! use byte code for now
+ cmp %o1, %o2
GET_NATIVE_TIME(%o2, %o3, %o4)
bgeu,pt %xcc, 1b
nop
@@ -1251,11 +1261,31 @@ prefetch_page_r(void *pp)
retl
prefetch [%o0+STRIDE2], #n_reads
SET_SIZE(prefetch_page_r)
-#else /* SPITFIRE || HUMMINGBIRD */
+
+#elif defined(OLYMPUS_C)
+ !
+ ! Prefetch strides for Olympus-C
+ !
+
+#define STRIDE1 512
+#define STRIDE2 640
+
+ ENTRY(prefetch_page_w)
+ prefetch [%o0+STRIDE1], #n_writes
+ retl
+ prefetch [%o0+STRIDE2], #n_writes
+ SET_SIZE(prefetch_page_w)
+
+ ENTRY(prefetch_page_r)
+ prefetch [%o0+STRIDE1], #n_writes
+ retl
+ prefetch [%o0+STRIDE2], #n_writes
+ SET_SIZE(prefetch_page_r)
+#else /* OLYMPUS_C */
#error "You need to fix this for your new cpu type."
-#endif /* SPITFIRE || HUMMINGBIRD */
+#endif /* OLYMPUS_C */
#endif /* lint */
@@ -1278,11 +1308,16 @@ prefetch_smap_w(void *smp)
#define PREFETCH_Q_LEN 3
-#else /* SPITFIRE || HUMMINGBIRD */
+#elif defined(OLYMPUS_C)
+ !
+ ! (TBD) Use length of one for now.
+#define PREFETCH_Q_LEN 1
+
+#else /* OLYMPUS_C */
#error You need to fix this for your new cpu type.
-#endif /* SPITFIRE || HUMMINGBIRD */
+#endif /* OLYMPUS_C */
#include <vm/kpm.h>
diff --git a/usr/src/uts/sun4u/cpu/opl_kdi.c b/usr/src/uts/sun4u/cpu/opl_kdi.c
new file mode 100644
index 0000000000..c6795bb4e8
--- /dev/null
+++ b/usr/src/uts/sun4u/cpu/opl_kdi.c
@@ -0,0 +1,156 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * CPU-specific functions needed by the Kernel-Debugger Interface (KDI). These
+ * functions are invoked directly by the kernel debugger (kmdb) while the system
+ * has been stopped, and as such must not use any kernel facilities that block
+ * or otherwise rely on forward progress by other parts of the kernel.
+ *
+ * These functions may also be called before unix`_start, and as such cannot
+ * use any kernel facilities that must be initialized as part of system start.
+ * An example of such a facility is drv_usecwait(), which relies on a parameter
+ * that is initialized by the unix module. As a result, drv_usecwait() may not
+ * be used by KDI functions.
+ */
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/archsystm.h>
+#include <sys/machsystm.h>
+#include <sys/cpu_module.h>
+#include <sys/xc_impl.h>
+#include <sys/intreg.h>
+#include <sys/kdi_impl.h>
+
+/*
+ * We keep our own copies, used for cache flushing, because we can be called
+ * before cpu_fiximpl().
+ */
+static int kdi_dcache_size;
+static int kdi_dcache_linesize;
+static int kdi_icache_size;
+static int kdi_icache_linesize;
+
+/*
+ * Assembly support for SPARC64-VI modules in opl_olympus_asm.s
+ */
+extern int idsr_busy(void);
+extern void init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2);
+extern void shipit(int, int);
+extern void kdi_flush_idcache(int, int, int, int);
+extern int kdi_get_stick(uint64_t *);
+
+static int
+kdi_cpu_ready_iter(int (*cb)(int, void *), void *arg)
+{
+ int rc, i;
+
+ for (rc = 0, i = 0; i < NCPU; i++) {
+ if (CPU_IN_SET(cpu_ready_set, i))
+ rc += cb(i, arg);
+ }
+
+ return (rc);
+}
+
+/*
+ * Sends a cross-call to a specified processor. The caller assumes
+ * responsibility for repetition of cross-calls, as appropriate (MARSA for
+ * debugging).
+ */
+static int
+kdi_xc_one(int cpuid, void (*func)(uintptr_t, uintptr_t), uintptr_t arg1,
+ uintptr_t arg2)
+{
+ uint64_t idsr;
+ static void kdi_tickwait(clock_t);
+ uint64_t endtick, tick;
+
+ init_mondo_nocheck((xcfunc_t *)func, arg1, arg2);
+
+ shipit(cpuid, 0);
+
+ /* Spin for at most 1 second for checking */
+ endtick = gettick() + (uint64_t)sys_tick_freq;
+
+ idsr = getidsr();
+
+ if (idsr & IDSR_BUSY) {
+ do {
+ idsr = getidsr();
+ tick = gettick();
+ if (tick > endtick) {
+ return (KDI_XC_RES_BUSY);
+ }
+ } while (idsr & IDSR_BUSY);
+ }
+
+ kdi_tickwait(20000);
+
+ if (idsr & IDSR_NACK)
+ return (KDI_XC_RES_NACK);
+ else
+ return (KDI_XC_RES_OK);
+}
+
+static void
+kdi_tickwait(clock_t nticks)
+{
+ clock_t endtick = gettick() + nticks;
+
+ while (gettick() < endtick);
+}
+
+static void
+kdi_cpu_init(int dcache_size, int dcache_linesize, int icache_size,
+ int icache_linesize)
+{
+ kdi_dcache_size = dcache_size;
+ kdi_dcache_linesize = dcache_linesize;
+ kdi_icache_size = icache_size;
+ kdi_icache_linesize = icache_linesize;
+}
+
+/* used directly by kdi_read/write_phys */
+void
+kdi_flush_caches(void)
+{
+ kdi_flush_idcache(kdi_dcache_size, kdi_dcache_linesize,
+ kdi_icache_size, kdi_icache_linesize);
+}
+
+void
+cpu_kdi_init(kdi_t *kdi)
+{
+ kdi->kdi_flush_caches = kdi_flush_caches;
+ kdi->mkdi_cpu_init = kdi_cpu_init;
+ kdi->mkdi_cpu_ready_iter = kdi_cpu_ready_iter;
+ kdi->mkdi_xc_one = kdi_xc_one;
+ kdi->mkdi_tickwait = kdi_tickwait;
+ kdi->mkdi_get_stick = kdi_get_stick;
+}
diff --git a/usr/src/uts/sun4u/cpu/opl_olympus.c b/usr/src/uts/sun4u/cpu/opl_olympus.c
new file mode 100644
index 0000000000..35d54c0a0a
--- /dev/null
+++ b/usr/src/uts/sun4u/cpu/opl_olympus.c
@@ -0,0 +1,2237 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/ddi.h>
+#include <sys/sysmacros.h>
+#include <sys/archsystm.h>
+#include <sys/vmsystm.h>
+#include <sys/machparam.h>
+#include <sys/machsystm.h>
+#include <sys/machthread.h>
+#include <sys/cpu.h>
+#include <sys/cmp.h>
+#include <sys/elf_SPARC.h>
+#include <vm/vm_dep.h>
+#include <vm/hat_sfmmu.h>
+#include <vm/seg_kpm.h>
+#include <sys/cpuvar.h>
+#include <sys/opl_olympus_regs.h>
+#include <sys/opl_module.h>
+#include <sys/async.h>
+#include <sys/cmn_err.h>
+#include <sys/debug.h>
+#include <sys/dditypes.h>
+#include <sys/cpu_module.h>
+#include <sys/sysmacros.h>
+#include <sys/intreg.h>
+#include <sys/clock.h>
+#include <sys/platform_module.h>
+#include <sys/ontrap.h>
+#include <sys/panic.h>
+#include <sys/memlist.h>
+#include <sys/ndifm.h>
+#include <sys/ddifm.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/util.h>
+#include <sys/fm/cpu/SPARC64-VI.h>
+#include <sys/dtrace.h>
+#include <sys/watchpoint.h>
+#include <sys/promif.h>
+
+/*
+ * Internal functions.
+ */
+static int cpu_sync_log_err(void *flt);
+static void cpu_payload_add_aflt(struct async_flt *, nvlist_t *, nvlist_t *);
+static void opl_cpu_sync_error(struct regs *, ulong_t, ulong_t, uint_t, uint_t);
+static int cpu_flt_in_memory(opl_async_flt_t *, uint64_t);
+
+/*
+ * Error counters resetting interval.
+ */
+static int opl_async_check_interval = 60; /* 1 min */
+
+/*
+ * Maximum number of contexts for Olympus-C.
+ */
+#define MAX_NCTXS (1 << 13)
+
+/* Will be set !NULL for SPARC64-VI and derivatives. */
+static uchar_t ctx_pgsz_arr[MAX_NCTXS];
+uchar_t *ctx_pgsz_array = ctx_pgsz_arr;
+
+/*
+ * PA[22:0] represent Displacement in Jupiter
+ * configuration space.
+ */
+uint_t root_phys_addr_lo_mask = 0x7fffffu;
+
+/*
+ * set in /etc/system to control logging of user BERR/TO's
+ */
+int cpu_berr_to_verbose = 0;
+
+static int min_ecache_size;
+static uint_t priv_hcl_1;
+static uint_t priv_hcl_2;
+static uint_t priv_hcl_4;
+static uint_t priv_hcl_8;
+
+/*
+ * Olympus error log
+ */
+static opl_errlog_t *opl_err_log;
+
+/*
+ * UE is classified into four classes (MEM, CHANNEL, CPU, PATH).
+ * No any other ecc_type_info insertion is allowed in between the following
+ * four UE classess.
+ */
+ecc_type_to_info_t ecc_type_to_info[] = {
+ SFSR_UE, "UE ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE,
+ "Uncorrectable ECC", FM_EREPORT_PAYLOAD_SYNC,
+ FM_EREPORT_CPU_UE_MEM,
+ SFSR_UE, "UE ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE,
+ "Uncorrectable ECC", FM_EREPORT_PAYLOAD_SYNC,
+ FM_EREPORT_CPU_UE_CHANNEL,
+ SFSR_UE, "UE ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE,
+ "Uncorrectable ECC", FM_EREPORT_PAYLOAD_SYNC,
+ FM_EREPORT_CPU_UE_CPU,
+ SFSR_UE, "UE ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_UE,
+ "Uncorrectable ECC", FM_EREPORT_PAYLOAD_SYNC,
+ FM_EREPORT_CPU_UE_PATH,
+ SFSR_BERR, "BERR ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS,
+ "Bus Error", FM_EREPORT_PAYLOAD_SYNC,
+ FM_EREPORT_CPU_BERR,
+ SFSR_TO, "TO ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS,
+ "Bus Timeout", FM_EREPORT_PAYLOAD_SYNC,
+ FM_EREPORT_CPU_BTO,
+ SFSR_TLB_MUL, "TLB_MUL ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS,
+ "TLB MultiHit", FM_EREPORT_PAYLOAD_SYNC,
+ FM_EREPORT_CPU_MTLB,
+ SFSR_TLB_PRT, "TLB_PRT ", (OPL_ECC_SYNC_TRAP), OPL_CPU_SYNC_OTHERS,
+ "TLB Parity", FM_EREPORT_PAYLOAD_SYNC,
+ FM_EREPORT_CPU_TLBP,
+
+ UGESR_IAUG_CRE, "IAUG_CRE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
+ "IAUG CRE", FM_EREPORT_PAYLOAD_URGENT,
+ FM_EREPORT_CPU_CRE,
+ UGESR_IAUG_TSBCTXT, "IAUG_TSBCTXT",
+ OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
+ "IAUG TSBCTXT", FM_EREPORT_PAYLOAD_URGENT,
+ FM_EREPORT_CPU_TSBCTX,
+ UGESR_IUG_TSBP, "IUG_TSBP", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
+ "IUG TSBP", FM_EREPORT_PAYLOAD_URGENT,
+ FM_EREPORT_CPU_TSBP,
+ UGESR_IUG_PSTATE, "IUG_PSTATE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
+ "IUG PSTATE", FM_EREPORT_PAYLOAD_URGENT,
+ FM_EREPORT_CPU_PSTATE,
+ UGESR_IUG_TSTATE, "IUG_TSTATE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
+ "IUG TSTATE", FM_EREPORT_PAYLOAD_URGENT,
+ FM_EREPORT_CPU_TSTATE,
+ UGESR_IUG_F, "IUG_F", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
+ "IUG FREG", FM_EREPORT_PAYLOAD_URGENT,
+ FM_EREPORT_CPU_IUG_F,
+ UGESR_IUG_R, "IUG_R", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
+ "IUG RREG", FM_EREPORT_PAYLOAD_URGENT,
+ FM_EREPORT_CPU_IUG_R,
+ UGESR_AUG_SDC, "AUG_SDC", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
+ "AUG SDC", FM_EREPORT_PAYLOAD_URGENT,
+ FM_EREPORT_CPU_SDC,
+ UGESR_IUG_WDT, "IUG_WDT", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
+ "IUG WDT", FM_EREPORT_PAYLOAD_URGENT,
+ FM_EREPORT_CPU_WDT,
+ UGESR_IUG_DTLB, "IUG_DTLB", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
+ "IUG DTLB", FM_EREPORT_PAYLOAD_URGENT,
+ FM_EREPORT_CPU_DTLB,
+ UGESR_IUG_ITLB, "IUG_ITLB", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
+ "IUG ITLB", FM_EREPORT_PAYLOAD_URGENT,
+ FM_EREPORT_CPU_ITLB,
+ UGESR_IUG_COREERR, "IUG_COREERR",
+ OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
+ "IUG COREERR", FM_EREPORT_PAYLOAD_URGENT,
+ FM_EREPORT_CPU_CORE,
+ UGESR_MULTI_DAE, "MULTI_DAE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
+ "MULTI DAE", FM_EREPORT_PAYLOAD_URGENT,
+ FM_EREPORT_CPU_DAE,
+ UGESR_MULTI_IAE, "MULTI_IAE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
+ "MULTI IAE", FM_EREPORT_PAYLOAD_URGENT,
+ FM_EREPORT_CPU_IAE,
+ UGESR_MULTI_UGE, "MULTI_UGE", OPL_ECC_URGENT_TRAP, OPL_CPU_URGENT,
+ "MULTI UGE", FM_EREPORT_PAYLOAD_URGENT,
+ FM_EREPORT_CPU_UGE,
+ 0, NULL, 0, 0,
+ NULL, 0, 0,
+};
+
+int (*p2get_mem_info)(int synd_code, uint64_t paddr,
+ uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep,
+ int *segsp, int *banksp, int *mcidp);
+
+
+/*
+ * Setup trap handlers for 0xA, 0x32, 0x40 trap types.
+ */
+void
+cpu_init_trap(void)
+{
+ OPL_SET_TRAP(tt0_iae, opl_serr_instr);
+ OPL_SET_TRAP(tt1_iae, opl_serr_instr);
+ OPL_SET_TRAP(tt0_dae, opl_serr_instr);
+ OPL_SET_TRAP(tt1_dae, opl_serr_instr);
+ OPL_SET_TRAP(tt0_asdat, opl_ugerr_instr);
+ OPL_SET_TRAP(tt1_asdat, opl_ugerr_instr);
+}
+
+static int
+getintprop(pnode_t node, char *name, int deflt)
+{
+ int value;
+
+ switch (prom_getproplen(node, name)) {
+ case sizeof (int):
+ (void) prom_getprop(node, name, (caddr_t)&value);
+ break;
+
+ default:
+ value = deflt;
+ break;
+ }
+
+ return (value);
+}
+
+/*
+ * Set the magic constants of the implementation.
+ */
+/*ARGSUSED*/
+void
+cpu_fiximp(pnode_t dnode)
+{
+ int i, a;
+ extern int vac_size, vac_shift;
+ extern uint_t vac_mask;
+
+ static struct {
+ char *name;
+ int *var;
+ int defval;
+ } prop[] = {
+ "l1-dcache-size", &dcache_size, OPL_DCACHE_SIZE,
+ "l1-dcache-line-size", &dcache_linesize, OPL_DCACHE_LSIZE,
+ "l1-icache-size", &icache_size, OPL_ICACHE_SIZE,
+ "l1-icache-line-size", &icache_linesize, OPL_ICACHE_LSIZE,
+ "l2-cache-size", &ecache_size, OPL_ECACHE_SIZE,
+ "l2-cache-line-size", &ecache_alignsize, OPL_ECACHE_LSIZE,
+ "l2-cache-associativity", &ecache_associativity, OPL_ECACHE_NWAY
+ };
+
+ for (i = 0; i < sizeof (prop) / sizeof (prop[0]); i++)
+ *prop[i].var = getintprop(dnode, prop[i].name, prop[i].defval);
+
+ ecache_setsize = ecache_size / ecache_associativity;
+
+ vac_size = OPL_VAC_SIZE;
+ vac_mask = MMU_PAGEMASK & (vac_size - 1);
+ i = 0; a = vac_size;
+ while (a >>= 1)
+ ++i;
+ vac_shift = i;
+ shm_alignment = vac_size;
+ vac = 1;
+}
+
+void
+send_mondo_set(cpuset_t set)
+{
+ int lo, busy, nack, shipped = 0;
+ uint16_t i, cpuids[IDSR_BN_SETS];
+ uint64_t idsr, nackmask = 0, busymask, curnack, curbusy;
+ uint64_t starttick, endtick, tick, lasttick;
+#if (NCPU > IDSR_BN_SETS)
+ int index = 0;
+ int ncpuids = 0;
+#endif
+#ifdef OLYMPUS_ERRATA_XCALL
+ int bn_sets = IDSR_BN_SETS;
+ uint64_t ver;
+
+ ASSERT(NCPU > bn_sets);
+#endif
+
+ ASSERT(!CPUSET_ISNULL(set));
+ starttick = lasttick = gettick();
+
+#ifdef OLYMPUS_ERRATA_XCALL
+ ver = ultra_getver();
+ if (((ULTRA_VER_IMPL(ver)) == OLYMPUS_C_IMPL) &&
+ ((OLYMPUS_REV_MASK(ver)) == OLYMPUS_C_A))
+ bn_sets = 1;
+#endif
+
+#if (NCPU <= IDSR_BN_SETS)
+ for (i = 0; i < NCPU; i++)
+ if (CPU_IN_SET(set, i)) {
+ shipit(i, shipped);
+ nackmask |= IDSR_NACK_BIT(shipped);
+ cpuids[shipped++] = i;
+ CPUSET_DEL(set, i);
+ if (CPUSET_ISNULL(set))
+ break;
+ }
+ CPU_STATS_ADDQ(CPU, sys, xcalls, shipped);
+#else
+ for (i = 0; i < NCPU; i++)
+ if (CPU_IN_SET(set, i)) {
+ ncpuids++;
+
+ /*
+ * Ship only to the first (IDSR_BN_SETS) CPUs. If we
+ * find we have shipped to more than (IDSR_BN_SETS)
+ * CPUs, set "index" to the highest numbered CPU in
+ * the set so we can ship to other CPUs a bit later on.
+ */
+#ifdef OLYMPUS_ERRATA_XCALL
+ if (shipped < bn_sets) {
+#else
+ if (shipped < IDSR_BN_SETS) {
+#endif
+ shipit(i, shipped);
+ nackmask |= IDSR_NACK_BIT(shipped);
+ cpuids[shipped++] = i;
+ CPUSET_DEL(set, i);
+ if (CPUSET_ISNULL(set))
+ break;
+ } else
+ index = (int)i;
+ }
+
+ CPU_STATS_ADDQ(CPU, sys, xcalls, ncpuids);
+#endif
+
+ busymask = IDSR_NACK_TO_BUSY(nackmask);
+ busy = nack = 0;
+ endtick = starttick + xc_tick_limit;
+ for (;;) {
+ idsr = getidsr();
+#if (NCPU <= IDSR_BN_SETS)
+ if (idsr == 0)
+ break;
+#else
+ if (idsr == 0 && shipped == ncpuids)
+ break;
+#endif
+ tick = gettick();
+ /*
+ * If there is a big jump between the current tick
+ * count and lasttick, we have probably hit a break
+ * point. Adjust endtick accordingly to avoid panic.
+ */
+ if (tick > (lasttick + xc_tick_jump_limit))
+ endtick += (tick - lasttick);
+ lasttick = tick;
+ if (tick > endtick) {
+ if (panic_quiesce)
+ return;
+ cmn_err(CE_CONT, "send mondo timeout "
+ "[%d NACK %d BUSY]\nIDSR 0x%"
+ "" PRIx64 " cpuids:", nack, busy, idsr);
+#ifdef OLYMPUS_ERRATA_XCALL
+ for (i = 0; i < bn_sets; i++) {
+#else
+ for (i = 0; i < IDSR_BN_SETS; i++) {
+#endif
+ if (idsr & (IDSR_NACK_BIT(i) |
+ IDSR_BUSY_BIT(i))) {
+ cmn_err(CE_CONT, " 0x%x",
+ cpuids[i]);
+ }
+ }
+ cmn_err(CE_CONT, "\n");
+ cmn_err(CE_PANIC, "send_mondo_set: timeout");
+ }
+ curnack = idsr & nackmask;
+ curbusy = idsr & busymask;
+#if (NCPU > IDSR_BN_SETS)
+ if (shipped < ncpuids) {
+ uint64_t cpus_left;
+ uint16_t next = (uint16_t)index;
+
+ cpus_left = ~(IDSR_NACK_TO_BUSY(curnack) | curbusy) &
+ busymask;
+
+ if (cpus_left) {
+ do {
+ /*
+ * Sequence through and ship to the
+ * remainder of the CPUs in the system
+ * (e.g. other than the first
+ * (IDSR_BN_SETS)) in reverse order.
+ */
+ lo = lowbit(cpus_left) - 1;
+ i = IDSR_BUSY_IDX(lo);
+ shipit(next, i);
+ shipped++;
+ cpuids[i] = next;
+
+ /*
+ * If we've processed all the CPUs,
+ * exit the loop now and save
+ * instructions.
+ */
+ if (shipped == ncpuids)
+ break;
+
+ for ((index = ((int)next - 1));
+ index >= 0; index--)
+ if (CPU_IN_SET(set, index)) {
+ next = (uint16_t)index;
+ break;
+ }
+
+ cpus_left &= ~(1ull << lo);
+ } while (cpus_left);
+ continue;
+ }
+ }
+#endif
+ if (curbusy) {
+ busy++;
+ continue;
+ }
+
+#ifdef SEND_MONDO_STATS
+ {
+ int n = gettick() - starttick;
+ if (n < 8192)
+ x_nack_stimes[n >> 7]++;
+ }
+#endif
+ while (gettick() < (tick + sys_clock_mhz))
+ ;
+ do {
+ lo = lowbit(curnack) - 1;
+ i = IDSR_NACK_IDX(lo);
+ shipit(cpuids[i], i);
+ curnack &= ~(1ull << lo);
+ } while (curnack);
+ nack++;
+ busy = 0;
+ }
+#ifdef SEND_MONDO_STATS
+ {
+ int n = gettick() - starttick;
+ if (n < 8192)
+ x_set_stimes[n >> 7]++;
+ else
+ x_set_ltimes[(n >> 13) & 0xf]++;
+ }
+ x_set_cpus[shipped]++;
+#endif
+}
+
+/*
+ * Cpu private initialization.
+ */
+void
+cpu_init_private(struct cpu *cp)
+{
+ if (!(IS_OLYMPUS_C(cpunodes[cp->cpu_id].implementation))) {
+ cmn_err(CE_PANIC, "CPU%d Impl %d: Only SPARC64-VI is supported",
+ cp->cpu_id, cpunodes[cp->cpu_id].implementation);
+ }
+
+ adjust_hw_copy_limits(cpunodes[cp->cpu_id].ecache_size);
+}
+
+void
+cpu_setup(void)
+{
+ extern int at_flags;
+ extern int disable_delay_tlb_flush, delay_tlb_flush;
+ extern int cpc_has_overflow_intr;
+ extern int disable_text_largepages;
+ extern int use_text_pgsz4m;
+ uint64_t cpu0_log;
+ extern uint64_t opl_cpu0_err_log;
+
+ /*
+ * Initialize Error log Scratch register for error handling.
+ */
+
+ cpu0_log = va_to_pa(&opl_cpu0_err_log);
+ opl_error_setup(cpu0_log);
+
+ /*
+ * Enable MMU translating multiple page sizes for
+ * sITLB and sDTLB.
+ */
+ opl_mpg_enable();
+
+ /*
+ * Setup chip-specific trap handlers.
+ */
+ cpu_init_trap();
+
+ cache |= (CACHE_VAC | CACHE_PTAG | CACHE_IOCOHERENT);
+
+ at_flags = EF_SPARC_32PLUS | EF_SPARC_SUN_US1 | EF_SPARC_SUN_US3;
+
+ /*
+ * Use the maximum number of contexts available for SPARC64-VI
+ * unless it has been tuned for debugging.
+ * We are checking against 0 here since this value can be patched
+ * while booting. It can not be patched via /etc/system since it
+ * will be patched too late and thus cause the system to panic.
+ */
+ if (nctxs == 0)
+ nctxs = MAX_NCTXS;
+
+ /*
+ * Due to the number of entries in the fully-associative tlb
+ * this may have to be tuned lower than in spitfire.
+ */
+ pp_slots = MIN(8, MAXPP_SLOTS);
+
+ /*
+ * Block stores do not invalidate all pages of the d$, pagecopy
+ * et. al. need virtual translations with virtual coloring taken
+ * into consideration. prefetch/ldd will pollute the d$ on the
+ * load side.
+ */
+ pp_consistent_coloring = PPAGE_STORE_VCOLORING | PPAGE_LOADS_POLLUTE;
+
+ if (use_page_coloring) {
+ do_pg_coloring = 1;
+ if (use_virtual_coloring)
+ do_virtual_coloring = 1;
+ }
+
+ isa_list =
+ "sparcv9+vis2 sparcv9+vis sparcv9 "
+ "sparcv8plus+vis2 sparcv8plus+vis sparcv8plus "
+ "sparcv8 sparcv8-fsmuld sparcv7 sparc";
+
+ cpu_hwcap_flags = AV_SPARC_VIS | AV_SPARC_VIS2;
+
+ /*
+ * On SPARC64-VI, there's no hole in the virtual address space
+ */
+ hole_start = hole_end = 0;
+
+ /*
+ * The kpm mapping window.
+ * kpm_size:
+ * The size of a single kpm range.
+ * The overall size will be: kpm_size * vac_colors.
+ * kpm_vbase:
+ * The virtual start address of the kpm range within the kernel
+ * virtual address space. kpm_vbase has to be kpm_size aligned.
+ */
+ kpm_size = (size_t)(128ull * 1024 * 1024 * 1024 * 1024); /* 128TB */
+ kpm_size_shift = 47;
+ kpm_vbase = (caddr_t)0x8000000000000000ull; /* 8EB */
+ kpm_smallpages = 1;
+
+ /*
+ * The traptrace code uses either %tick or %stick for
+ * timestamping. We have %stick so we can use it.
+ */
+ traptrace_use_stick = 1;
+
+ /*
+ * SPARC64-VI has a performance counter overflow interrupt
+ */
+ cpc_has_overflow_intr = 1;
+
+ /*
+ * Use SPARC64-VI flush-all support
+ */
+ if (!disable_delay_tlb_flush)
+ delay_tlb_flush = 1;
+
+ /*
+ * Declare that this architecture/cpu combination does not support
+ * fpRAS.
+ */
+ fpras_implemented = 0;
+
+ /*
+ * Enable 4M pages to be used for mapping user text by default. Don't
+ * use large pages for initialized data segments since we may not know
+ * at exec() time what should be the preferred large page size for DTLB
+ * programming.
+ */
+ use_text_pgsz4m = 1;
+ disable_text_largepages = (1 << TTE64K) | (1 << TTE512K) |
+ (1 << TTE32M) | (1 << TTE256M);
+}
+
+/*
+ * Called by setcpudelay
+ */
+void
+cpu_init_tick_freq(void)
+{
+ /*
+ * For SPARC64-VI we want to use the system clock rate as
+ * the basis for low level timing, due to support of mixed
+ * speed CPUs and power managment.
+ */
+ if (system_clock_freq == 0)
+ cmn_err(CE_PANIC, "setcpudelay: invalid system_clock_freq");
+
+ sys_tick_freq = system_clock_freq;
+}
+
+#ifdef SEND_MONDO_STATS
+uint32_t x_one_stimes[64];
+uint32_t x_one_ltimes[16];
+uint32_t x_set_stimes[64];
+uint32_t x_set_ltimes[16];
+uint32_t x_set_cpus[NCPU];
+uint32_t x_nack_stimes[64];
+#endif
+
+/*
+ * Note: A version of this function is used by the debugger via the KDI,
+ * and must be kept in sync with this version. Any changes made to this
+ * function to support new chips or to accomodate errata must also be included
+ * in the KDI-specific version. See us3_kdi.c.
+ */
+void
+send_one_mondo(int cpuid)
+{
+ int busy, nack;
+ uint64_t idsr, starttick, endtick, tick, lasttick;
+ uint64_t busymask;
+
+ CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
+ starttick = lasttick = gettick();
+ shipit(cpuid, 0);
+ endtick = starttick + xc_tick_limit;
+ busy = nack = 0;
+ busymask = IDSR_BUSY;
+ for (;;) {
+ idsr = getidsr();
+ if (idsr == 0)
+ break;
+
+ tick = gettick();
+ /*
+ * If there is a big jump between the current tick
+ * count and lasttick, we have probably hit a break
+ * point. Adjust endtick accordingly to avoid panic.
+ */
+ if (tick > (lasttick + xc_tick_jump_limit))
+ endtick += (tick - lasttick);
+ lasttick = tick;
+ if (tick > endtick) {
+ if (panic_quiesce)
+ return;
+ cmn_err(CE_PANIC, "send mondo timeout "
+ "(target 0x%x) [%d NACK %d BUSY]",
+ cpuid, nack, busy);
+ }
+
+ if (idsr & busymask) {
+ busy++;
+ continue;
+ }
+ drv_usecwait(1);
+ shipit(cpuid, 0);
+ nack++;
+ busy = 0;
+ }
+#ifdef SEND_MONDO_STATS
+ {
+ int n = gettick() - starttick;
+ if (n < 8192)
+ x_one_stimes[n >> 7]++;
+ else
+ x_one_ltimes[(n >> 13) & 0xf]++;
+ }
+#endif
+}
+
+/*
+ * init_mmu_page_sizes is set to one after the bootup time initialization
+ * via mmu_init_mmu_page_sizes, to indicate that mmu_page_sizes has a
+ * valid value.
+ *
+ * mmu_disable_ism_large_pages and mmu_disable_large_pages are the mmu-specific
+ * versions of disable_ism_large_pages and disable_large_pages, and feed back
+ * into those two hat variables at hat initialization time.
+ *
+ */
+int init_mmu_page_sizes = 0;
+static int mmu_disable_ism_large_pages = ((1 << TTE64K) |
+ (1 << TTE512K) | (1 << TTE256M));
+static int mmu_disable_large_pages = 0;
+
+/*
+ * Re-initialize mmu_page_sizes and friends, for SPARC64-VI mmu support.
+ * Called during very early bootup from check_cpus_set().
+ * Can be called to verify that mmu_page_sizes are set up correctly.
+ *
+ * Set Olympus defaults. We do not use the function parameter.
+ */
+/*ARGSUSED*/
+int
+mmu_init_mmu_page_sizes(int32_t not_used)
+{
+ if (!init_mmu_page_sizes) {
+ mmu_page_sizes = MMU_PAGE_SIZES;
+ mmu_hashcnt = MAX_HASHCNT;
+ mmu_ism_pagesize = MMU_PAGESIZE32M;
+ mmu_exported_pagesize_mask = (1 << TTE8K) |
+ (1 << TTE64K) | (1 << TTE512K) | (1 << TTE4M) |
+ (1 << TTE32M) | (1 << TTE256M);
+ init_mmu_page_sizes = 1;
+ return (0);
+ }
+ return (1);
+}
+
+/* SPARC64-VI worst case DTLB parameters */
+#ifndef LOCKED_DTLB_ENTRIES
+#define LOCKED_DTLB_ENTRIES 5 /* 2 user TSBs, 2 nucleus, + OBP */
+#endif
+#define TOTAL_DTLB_ENTRIES 32
+#define AVAIL_32M_ENTRIES 0
+#define AVAIL_256M_ENTRIES 0
+#define AVAIL_DTLB_ENTRIES (TOTAL_DTLB_ENTRIES - LOCKED_DTLB_ENTRIES)
+static uint64_t ttecnt_threshold[MMU_PAGE_SIZES] = {
+ AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES,
+ AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES,
+ AVAIL_DTLB_ENTRIES, AVAIL_DTLB_ENTRIES};
+
+size_t
+mmu_map_pgsz(size_t pgsize)
+{
+ struct proc *p = curproc;
+ struct as *as = p->p_as;
+ struct hat *hat = as->a_hat;
+ uint_t pgsz0, pgsz1;
+ size_t size0, size1;
+
+ ASSERT(mmu_page_sizes == max_mmu_page_sizes);
+ pgsz0 = hat->sfmmu_pgsz[0];
+ pgsz1 = hat->sfmmu_pgsz[1];
+ size0 = hw_page_array[pgsz0].hp_size;
+ size1 = hw_page_array[pgsz1].hp_size;
+ /* Allow use of a larger pagesize if neither TLB is reprogrammed. */
+ if ((pgsz0 == TTE8K) && (pgsz1 == TTE8K)) {
+ return (pgsize);
+ /* Allow use of requested pagesize if TLB is reprogrammed to it. */
+ } else if ((pgsize == size0) || (pgsize == size1)) {
+ return (pgsize);
+ /* Use larger reprogrammed TLB size if pgsize is atleast that big. */
+ } else if (pgsz1 > pgsz0) {
+ if (pgsize >= size1)
+ return (size1);
+ /* Use smaller reprogrammed TLB size if pgsize is atleast that big. */
+ } else {
+ if (pgsize >= size0)
+ return (size0);
+ }
+ return (pgsize);
+}
+
+/*
+ * The function returns the mmu-specific values for the
+ * hat's disable_large_pages and disable_ism_large_pages variables.
+ */
+int
+mmu_large_pages_disabled(uint_t flag)
+{
+ int pages_disable = 0;
+
+ if (flag == HAT_LOAD) {
+ pages_disable = mmu_disable_large_pages;
+ } else if (flag == HAT_LOAD_SHARE) {
+ pages_disable = mmu_disable_ism_large_pages;
+ }
+ return (pages_disable);
+}
+
+/*
+ * mmu_init_large_pages is called with the desired ism_pagesize parameter.
+ * It may be called from set_platform_defaults, if some value other than 32M
+ * is desired. mmu_ism_pagesize is the tunable. If it has a bad value,
+ * then only warn, since it would be bad form to panic due to a user typo.
+ *
+ * The function re-initializes the mmu_disable_ism_large_pages variable.
+ */
+void
+mmu_init_large_pages(size_t ism_pagesize)
+{
+ switch (ism_pagesize) {
+ case MMU_PAGESIZE4M:
+ mmu_disable_ism_large_pages = ((1 << TTE64K) |
+ (1 << TTE512K) | (1 << TTE32M) | (1 << TTE256M));
+ break;
+ case MMU_PAGESIZE32M:
+ mmu_disable_ism_large_pages = ((1 << TTE64K) |
+ (1 << TTE512K) | (1 << TTE256M));
+ break;
+ case MMU_PAGESIZE256M:
+ mmu_disable_ism_large_pages = ((1 << TTE64K) |
+ (1 << TTE512K) | (1 << TTE32M));
+ break;
+ default:
+ cmn_err(CE_WARN, "Unrecognized mmu_ism_pagesize value 0x%lx",
+ ism_pagesize);
+ break;
+ }
+}
+
+/*ARGSUSED*/
+uint_t
+mmu_preferred_pgsz(struct hat *hat, caddr_t addr, size_t len)
+{
+ sfmmu_t *sfmmup = (sfmmu_t *)hat;
+ uint_t pgsz0, pgsz1;
+ uint_t szc, maxszc = mmu_page_sizes - 1;
+ size_t pgsz;
+ extern int disable_large_pages;
+
+ pgsz0 = (uint_t)sfmmup->sfmmu_pgsz[0];
+ pgsz1 = (uint_t)sfmmup->sfmmu_pgsz[1];
+
+ /*
+ * If either of the TLBs are reprogrammed, choose
+ * the largest mapping size as the preferred size,
+ * if it fits the size and alignment constraints.
+ * Else return the largest mapping size that fits,
+ * if neither TLB is reprogrammed.
+ */
+ if (pgsz0 > TTE8K || pgsz1 > TTE8K) {
+ if (pgsz1 > pgsz0) { /* First try pgsz1 */
+ pgsz = hw_page_array[pgsz1].hp_size;
+ if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz))
+ return (pgsz1);
+ }
+ if (pgsz0 > TTE8K) { /* Then try pgsz0, if !TTE8K */
+ pgsz = hw_page_array[pgsz0].hp_size;
+ if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz))
+ return (pgsz0);
+ }
+ } else { /* Otherwise pick best fit if neither TLB is reprogrammed. */
+ for (szc = maxszc; szc > TTE8K; szc--) {
+ if (disable_large_pages & (1 << szc))
+ continue;
+
+ pgsz = hw_page_array[szc].hp_size;
+ if ((len >= pgsz) && IS_P2ALIGNED(addr, pgsz))
+ return (szc);
+ }
+ }
+ return (TTE8K);
+}
+
+/*
+ * Function to reprogram the TLBs when page sizes used
+ * by a process change significantly.
+ */
+void
+mmu_setup_page_sizes(struct hat *hat, uint64_t *ttecnt)
+{
+ extern int page_szc(size_t);
+ uint8_t pgsz0, pgsz1;
+
+ /*
+ * Don't program 2nd dtlb for kernel and ism hat
+ */
+ if (hat->sfmmu_ismhat || hat == ksfmmup)
+ return;
+
+ /*
+ * hat->sfmmu_pgsz[] is an array whose elements
+ * contain a sorted order of page sizes. Element
+ * 0 is the most commonly used page size, followed
+ * by element 1, and so on.
+ *
+ * ttecnt[] is an array of per-page-size page counts
+ * mapped into the process.
+ *
+ * If the HAT's choice for page sizes is unsuitable,
+ * we can override it here. The new values written
+ * to the array will be handed back to us later to
+ * do the actual programming of the TLB hardware.
+ *
+ */
+ pgsz0 = (uint8_t)MIN(hat->sfmmu_pgsz[0], hat->sfmmu_pgsz[1]);
+ pgsz1 = (uint8_t)MAX(hat->sfmmu_pgsz[0], hat->sfmmu_pgsz[1]);
+
+ /*
+ * This implements PAGESIZE programming of the sTLB
+ * if large TTE counts don't exceed the thresholds.
+ */
+ if (ttecnt[pgsz0] < ttecnt_threshold[pgsz0])
+ pgsz0 = page_szc(MMU_PAGESIZE);
+ if (ttecnt[pgsz1] < ttecnt_threshold[pgsz1])
+ pgsz1 = page_szc(MMU_PAGESIZE);
+ hat->sfmmu_pgsz[0] = pgsz0;
+ hat->sfmmu_pgsz[1] = pgsz1;
+ /* otherwise, accept what the HAT chose for us */
+}
+
+/*
+ * The HAT calls this function when an MMU context is allocated so that we
+ * can reprogram the large TLBs appropriately for the new process using
+ * the context.
+ *
+ * The caller must hold the HAT lock.
+ */
+void
+mmu_set_ctx_page_sizes(struct hat *hat)
+{
+ uint8_t pgsz0, pgsz1;
+ uint8_t new_cext;
+
+ ASSERT(sfmmu_hat_lock_held(hat));
+ /*
+ * Don't program 2nd dtlb for kernel and ism hat
+ */
+ if (hat->sfmmu_ismhat || hat == ksfmmup)
+ return;
+
+ /*
+ * If supported, reprogram the TLBs to a larger pagesize.
+ */
+ pgsz0 = hat->sfmmu_pgsz[0];
+ pgsz1 = hat->sfmmu_pgsz[1];
+ ASSERT(pgsz0 < mmu_page_sizes);
+ ASSERT(pgsz1 < mmu_page_sizes);
+ new_cext = TAGACCEXT_MKSZPAIR(pgsz1, pgsz0);
+ if (hat->sfmmu_cext != new_cext) {
+ hat->sfmmu_cext = new_cext;
+ }
+ ctx_pgsz_array[hat->sfmmu_cnum] = hat->sfmmu_cext;
+ /*
+ * sfmmu_setctx_sec() will take care of the
+ * rest of the dirty work for us.
+ */
+}
+
+/*
+ * Return processor specific async error structure
+ * size used.
+ */
+int
+cpu_aflt_size(void)
+{
+ return (sizeof (opl_async_flt_t));
+}
+
+/*
+ * The cpu_sync_log_err() function is called via the [uc]e_drain() function to
+ * post-process CPU events that are dequeued. As such, it can be invoked
+ * from softint context, from AST processing in the trap() flow, or from the
+ * panic flow. We decode the CPU-specific data, and take appropriate actions.
+ * Historically this entry point was used to log the actual cmn_err(9F) text;
+ * now with FMA it is used to prepare 'flt' to be converted into an ereport.
+ * With FMA this function now also returns a flag which indicates to the
+ * caller whether the ereport should be posted (1) or suppressed (0).
+ */
+/*ARGSUSED*/
+static int
+cpu_sync_log_err(void *flt)
+{
+ opl_async_flt_t *opl_flt = (opl_async_flt_t *)flt;
+ struct async_flt *aflt = (struct async_flt *)flt;
+
+ /*
+ * No extra processing of urgent error events.
+ * Always generate ereports for these events.
+ */
+ if (aflt->flt_status == OPL_ECC_URGENT_TRAP)
+ return (1);
+
+ /*
+ * Additional processing for synchronous errors.
+ */
+ switch (opl_flt->flt_type) {
+ case OPL_CPU_INV_SFSR:
+ return (1);
+
+ case OPL_CPU_SYNC_UE:
+ /*
+ * The validity: SFSR_MK_UE bit has been checked
+ * in opl_cpu_sync_error()
+ * No more check is required.
+ *
+ * opl_flt->flt_eid_mod and flt_eid_sid have been set by H/W,
+ * and they have been retrieved in cpu_queue_events()
+ */
+
+ if (opl_flt->flt_eid_mod == OPL_ERRID_MEM) {
+ ASSERT(aflt->flt_in_memory);
+ /*
+ * We want to skip logging only if ALL the following
+ * conditions are true:
+ *
+ * 1. We are not panicing already.
+ * 2. The error is a memory error.
+ * 3. There is only one error.
+ * 4. The error is on a retired page.
+ * 5. The error occurred under on_trap
+ * protection AFLT_PROT_EC
+ */
+ if (!panicstr && aflt->flt_prot == AFLT_PROT_EC &&
+ page_retire_check(aflt->flt_addr, NULL) == 0) {
+ /*
+ * Do not log an error from
+ * the retired page
+ */
+ softcall(ecc_page_zero, (void *)aflt->flt_addr);
+ return (0);
+ }
+ if (!panicstr)
+ cpu_page_retire(opl_flt);
+ }
+ return (1);
+
+ case OPL_CPU_SYNC_OTHERS:
+ /*
+ * For the following error cases, the processor HW does
+ * not set the flt_eid_mod/flt_eid_sid. Instead, SW will attempt
+ * to assign appropriate values here to reflect what we
+ * think is the most likely cause of the problem w.r.t to
+ * the particular error event. For Buserr and timeout
+ * error event, we will assign OPL_ERRID_CHANNEL as the
+ * most likely reason. For TLB parity or multiple hit
+ * error events, we will assign the reason as
+ * OPL_ERRID_CPU (cpu related problem) and set the
+ * flt_eid_sid to point to the cpuid.
+ */
+
+ if (opl_flt->flt_bit & (SFSR_BERR|SFSR_TO)) {
+ /*
+ * flt_eid_sid will not be used for this case.
+ */
+ opl_flt->flt_eid_mod = OPL_ERRID_CHANNEL;
+ }
+ if (opl_flt->flt_bit & (SFSR_TLB_MUL|SFSR_TLB_PRT)) {
+ opl_flt->flt_eid_mod = OPL_ERRID_CPU;
+ opl_flt->flt_eid_sid = aflt->flt_inst;
+ }
+
+ /*
+ * In case of no effective error bit
+ */
+ if ((opl_flt->flt_bit & SFSR_ERRS) == 0) {
+ opl_flt->flt_eid_mod = OPL_ERRID_CPU;
+ opl_flt->flt_eid_sid = aflt->flt_inst;
+ }
+ break;
+
+ default:
+ return (1);
+ }
+ return (1);
+}
+
+/*
+ * Retire the bad page that may contain the flushed error.
+ */
+void
+cpu_page_retire(opl_async_flt_t *opl_flt)
+{
+ struct async_flt *aflt = (struct async_flt *)opl_flt;
+ (void) page_retire(aflt->flt_addr, PR_UE);
+}
+
+/*
+ * Invoked by error_init() early in startup and therefore before
+ * startup_errorq() is called to drain any error Q -
+ *
+ * startup()
+ * startup_end()
+ * error_init()
+ * cpu_error_init()
+ * errorq_init()
+ * errorq_drain()
+ * start_other_cpus()
+ *
+ * The purpose of this routine is to create error-related taskqs. Taskqs
+ * are used for this purpose because cpu_lock can't be grabbed from interrupt
+ * context.
+ *
+ */
+/*ARGSUSED*/
+void
+cpu_error_init(int items)
+{
+ opl_err_log = (opl_errlog_t *)
+ kmem_alloc(ERRLOG_ALLOC_SZ, KM_SLEEP);
+ if ((uint64_t)opl_err_log & MMU_PAGEOFFSET)
+ cmn_err(CE_PANIC, "The base address of the error log "
+ "is not page aligned");
+}
+
+/*
+ * We route all errors through a single switch statement.
+ */
+void
+cpu_ue_log_err(struct async_flt *aflt)
+{
+ switch (aflt->flt_class) {
+ case CPU_FAULT:
+ if (cpu_sync_log_err(aflt))
+ cpu_ereport_post(aflt);
+ break;
+
+ case BUS_FAULT:
+ bus_async_log_err(aflt);
+ break;
+
+ default:
+ cmn_err(CE_WARN, "discarding async error %p with invalid "
+ "fault class (0x%x)", (void *)aflt, aflt->flt_class);
+ return;
+ }
+}
+
+/*
+ * Routine for panic hook callback from panic_idle().
+ *
+ * Nothing to do here.
+ */
+void
+cpu_async_panic_callb(void)
+{
+}
+
+/*
+ * Routine to return a string identifying the physical name
+ * associated with a memory/cache error.
+ */
+/*ARGSUSED*/
+int
+cpu_get_mem_unum(int synd_status, ushort_t flt_synd, uint64_t flt_stat,
+ uint64_t flt_addr, int flt_bus_id, int flt_in_memory,
+ ushort_t flt_status, char *buf, int buflen, int *lenp)
+{
+ int synd_code;
+ int ret;
+
+ /*
+ * An AFSR of -1 defaults to a memory syndrome.
+ */
+ synd_code = (int)flt_synd;
+
+ if (&plat_get_mem_unum) {
+ if ((ret = plat_get_mem_unum(synd_code, flt_addr, flt_bus_id,
+ flt_in_memory, flt_status, buf, buflen, lenp)) != 0) {
+ buf[0] = '\0';
+ *lenp = 0;
+ }
+ return (ret);
+ }
+ buf[0] = '\0';
+ *lenp = 0;
+ return (ENOTSUP);
+}
+
+/*
+ * Wrapper for cpu_get_mem_unum() routine that takes an
+ * async_flt struct rather than explicit arguments.
+ */
+int
+cpu_get_mem_unum_aflt(int synd_status, struct async_flt *aflt,
+ char *buf, int buflen, int *lenp)
+{
+ /*
+ * We always pass -1 so that cpu_get_mem_unum will interpret this as a
+ * memory error.
+ */
+ return (cpu_get_mem_unum(synd_status, aflt->flt_synd,
+ (uint64_t)-1,
+ aflt->flt_addr, aflt->flt_bus_id, aflt->flt_in_memory,
+ aflt->flt_status, buf, buflen, lenp));
+}
+
+/*
+ * This routine is a more generic interface to cpu_get_mem_unum()
+ * that may be used by other modules (e.g. mm).
+ */
+/*ARGSUSED*/
+int
+cpu_get_mem_name(uint64_t synd, uint64_t *afsr, uint64_t afar,
+ char *buf, int buflen, int *lenp)
+{
+ int synd_status, flt_in_memory, ret;
+ ushort_t flt_status = 0;
+ char unum[UNUM_NAMLEN];
+
+ /*
+ * Check for an invalid address.
+ */
+ if (afar == (uint64_t)-1)
+ return (ENXIO);
+
+ if (synd == (uint64_t)-1)
+ synd_status = AFLT_STAT_INVALID;
+ else
+ synd_status = AFLT_STAT_VALID;
+
+ flt_in_memory = (*afsr & SFSR_MEMORY) &&
+ pf_is_memory(afar >> MMU_PAGESHIFT);
+
+ ret = cpu_get_mem_unum(synd_status, (ushort_t)synd, *afsr, afar,
+ CPU->cpu_id, flt_in_memory, flt_status, unum,
+ UNUM_NAMLEN, lenp);
+ if (ret != 0)
+ return (ret);
+
+ if (*lenp >= buflen)
+ return (ENAMETOOLONG);
+
+ (void) strncpy(buf, unum, buflen);
+
+ return (0);
+}
+
+/*
+ * Routine to return memory information associated
+ * with a physical address and syndrome.
+ */
+/*ARGSUSED*/
+int
+cpu_get_mem_info(uint64_t synd, uint64_t afar,
+ uint64_t *mem_sizep, uint64_t *seg_sizep, uint64_t *bank_sizep,
+ int *segsp, int *banksp, int *mcidp)
+{
+ int synd_code = (int)synd;
+
+ if (afar == (uint64_t)-1)
+ return (ENXIO);
+
+ if (p2get_mem_info != NULL)
+ return ((p2get_mem_info)(synd_code, afar,
+ mem_sizep, seg_sizep, bank_sizep,
+ segsp, banksp, mcidp));
+ else
+ return (ENOTSUP);
+}
+
+/*
+ * Routine to return a string identifying the physical
+ * name associated with a cpuid.
+ */
+int
+cpu_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
+{
+ int ret;
+ char unum[UNUM_NAMLEN];
+
+ if (&plat_get_cpu_unum) {
+ if ((ret = plat_get_cpu_unum(cpuid, unum, UNUM_NAMLEN, lenp))
+ != 0)
+ return (ret);
+ } else {
+ return (ENOTSUP);
+ }
+
+ if (*lenp >= buflen)
+ return (ENAMETOOLONG);
+
+ (void) strncpy(buf, unum, *lenp);
+
+ return (0);
+}
+
+/*
+ * This routine exports the name buffer size.
+ */
+size_t
+cpu_get_name_bufsize()
+{
+ return (UNUM_NAMLEN);
+}
+
+/*
+ * Flush the entire ecache by ASI_L2_CNTL.U2_FLUSH
+ */
+void
+cpu_flush_ecache(void)
+{
+ flush_ecache(ecache_flushaddr, cpunodes[CPU->cpu_id].ecache_size,
+ cpunodes[CPU->cpu_id].ecache_linesize);
+}
+
+static uint8_t
+flt_to_trap_type(struct async_flt *aflt)
+{
+ if (aflt->flt_status & OPL_ECC_ISYNC_TRAP)
+ return (TRAP_TYPE_ECC_I);
+ if (aflt->flt_status & OPL_ECC_DSYNC_TRAP)
+ return (TRAP_TYPE_ECC_D);
+ if (aflt->flt_status & OPL_ECC_URGENT_TRAP)
+ return (TRAP_TYPE_URGENT);
+ return (-1);
+}
+
+/*
+ * Encode the data saved in the opl_async_flt_t struct into
+ * the FM ereport payload.
+ */
+/* ARGSUSED */
+static void
+cpu_payload_add_aflt(struct async_flt *aflt, nvlist_t *payload,
+ nvlist_t *resource)
+{
+ opl_async_flt_t *opl_flt = (opl_async_flt_t *)aflt;
+ char unum[UNUM_NAMLEN];
+ char sbuf[21]; /* sizeof (UINT64_MAX) + '\0' */
+ int len;
+
+
+ if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_SFSR) {
+ fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SFSR,
+ DATA_TYPE_UINT64, aflt->flt_stat, NULL);
+ }
+ if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_SFAR) {
+ fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SFAR,
+ DATA_TYPE_UINT64, aflt->flt_addr, NULL);
+ }
+ if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_UGESR) {
+ fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UGESR,
+ DATA_TYPE_UINT64, aflt->flt_stat, NULL);
+ }
+ if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_PC) {
+ fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PC,
+ DATA_TYPE_UINT64, (uint64_t)aflt->flt_pc, NULL);
+ }
+ if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_TL) {
+ fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_TL,
+ DATA_TYPE_UINT8, (uint8_t)aflt->flt_tl, NULL);
+ }
+ if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_TT) {
+ fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_TT,
+ DATA_TYPE_UINT8, flt_to_trap_type(aflt), NULL);
+ }
+ if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_PRIV) {
+ fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PRIV,
+ DATA_TYPE_BOOLEAN_VALUE,
+ (aflt->flt_priv ? B_TRUE : B_FALSE), NULL);
+ }
+ if (aflt->flt_payload & FM_EREPORT_PAYLOAD_FLAG_FLT_STATUS) {
+ fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FLT_STATUS,
+ DATA_TYPE_UINT64, (uint64_t)aflt->flt_status, NULL);
+ }
+
+ switch (opl_flt->flt_eid_mod) {
+ case OPL_ERRID_CPU:
+ (void) snprintf(sbuf, sizeof (sbuf), "%llX",
+ (u_longlong_t)cpunodes[opl_flt->flt_eid_sid].device_id);
+ (void) fm_fmri_cpu_set(resource, FM_CPU_SCHEME_VERSION,
+ NULL, opl_flt->flt_eid_sid,
+ (uint8_t *)&cpunodes[opl_flt->flt_eid_sid].version,
+ sbuf);
+ fm_payload_set(payload,
+ FM_EREPORT_PAYLOAD_NAME_RESOURCE,
+ DATA_TYPE_NVLIST, resource, NULL);
+ break;
+
+ case OPL_ERRID_CHANNEL:
+ /*
+ * No resource is created but the cpumem DE will find
+ * the defective path by retreiving EID from SFSR which is
+ * included in the payload.
+ */
+ break;
+
+ case OPL_ERRID_MEM:
+ (void) cpu_get_mem_unum_aflt(0, aflt, unum, UNUM_NAMLEN, &len);
+ (void) fm_fmri_mem_set(resource, FM_MEM_SCHEME_VERSION,
+ NULL, unum, NULL, (uint64_t)-1);
+ fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RESOURCE,
+ DATA_TYPE_NVLIST, resource, NULL);
+ break;
+
+ case OPL_ERRID_PATH:
+ /*
+ * No resource is created but the cpumem DE will find
+ * the defective path by retreiving EID from SFSR which is
+ * included in the payload.
+ */
+ break;
+ }
+}
+
+/*
+ * Returns whether fault address is valid for this error bit and
+ * whether the address is "in memory" (i.e. pf_is_memory returns 1).
+ */
+/*ARGSUSED*/
+static int
+cpu_flt_in_memory(opl_async_flt_t *opl_flt, uint64_t t_afsr_bit)
+{
+ struct async_flt *aflt = (struct async_flt *)opl_flt;
+
+ if (aflt->flt_status & (OPL_ECC_SYNC_TRAP)) {
+ return ((t_afsr_bit & SFSR_MEMORY) &&
+ pf_is_memory(aflt->flt_addr >> MMU_PAGESHIFT));
+ }
+ return (0);
+}
+
+/*
+ * In OPL SCF does the stick synchronization.
+ */
+void
+sticksync_slave(void)
+{
+}
+
+/*
+ * In OPL SCF does the stick synchronization.
+ */
+void
+sticksync_master(void)
+{
+}
+
+/*
+ * Cpu private unitialization. OPL cpus do not use the private area.
+ */
+void
+cpu_uninit_private(struct cpu *cp)
+{
+ cmp_delete_cpu(cp->cpu_id);
+}
+
+/*
+ * Always flush an entire cache.
+ */
+void
+cpu_error_ecache_flush(void)
+{
+ cpu_flush_ecache();
+}
+
+void
+cpu_ereport_post(struct async_flt *aflt)
+{
+ char *cpu_type, buf[FM_MAX_CLASS];
+ nv_alloc_t *nva = NULL;
+ nvlist_t *ereport, *detector, *resource;
+ errorq_elem_t *eqep;
+ char sbuf[21]; /* sizeof (UINT64_MAX) + '\0' */
+
+ if (aflt->flt_panic || panicstr) {
+ eqep = errorq_reserve(ereport_errorq);
+ if (eqep == NULL)
+ return;
+ ereport = errorq_elem_nvl(ereport_errorq, eqep);
+ nva = errorq_elem_nva(ereport_errorq, eqep);
+ } else {
+ ereport = fm_nvlist_create(nva);
+ }
+
+ /*
+ * Create the scheme "cpu" FMRI.
+ */
+ detector = fm_nvlist_create(nva);
+ resource = fm_nvlist_create(nva);
+ switch (cpunodes[aflt->flt_inst].implementation) {
+ case OLYMPUS_C_IMPL:
+ cpu_type = FM_EREPORT_CPU_SPARC64_VI;
+ break;
+ default:
+ cpu_type = FM_EREPORT_CPU_UNSUPPORTED;
+ break;
+ }
+ (void) snprintf(sbuf, sizeof (sbuf), "%llX",
+ (u_longlong_t)cpunodes[aflt->flt_inst].device_id);
+ (void) fm_fmri_cpu_set(detector, FM_CPU_SCHEME_VERSION, NULL,
+ aflt->flt_inst, (uint8_t *)&cpunodes[aflt->flt_inst].version,
+ sbuf);
+
+ /*
+ * Encode all the common data into the ereport.
+ */
+ (void) snprintf(buf, FM_MAX_CLASS, "%s.%s.%s",
+ FM_ERROR_CPU, cpu_type, aflt->flt_erpt_class);
+
+ fm_ereport_set(ereport, FM_EREPORT_VERSION, buf,
+ fm_ena_generate(aflt->flt_id, FM_ENA_FMT1), detector, NULL);
+
+ /*
+ * Encode the error specific data that was saved in
+ * the async_flt structure into the ereport.
+ */
+ cpu_payload_add_aflt(aflt, ereport, resource);
+
+ if (aflt->flt_panic || panicstr) {
+ errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
+ } else {
+ (void) fm_ereport_post(ereport, EVCH_TRYHARD);
+ fm_nvlist_destroy(ereport, FM_NVA_FREE);
+ fm_nvlist_destroy(detector, FM_NVA_FREE);
+ fm_nvlist_destroy(resource, FM_NVA_FREE);
+ }
+}
+
+void
+cpu_run_bus_error_handlers(struct async_flt *aflt, int expected)
+{
+ int status;
+ ddi_fm_error_t de;
+
+ bzero(&de, sizeof (ddi_fm_error_t));
+
+ de.fme_version = DDI_FME_VERSION;
+ de.fme_ena = fm_ena_generate(aflt->flt_id, FM_ENA_FMT1);
+ de.fme_flag = expected;
+ de.fme_bus_specific = (void *)aflt->flt_addr;
+ status = ndi_fm_handler_dispatch(ddi_root_node(), NULL, &de);
+ if ((aflt->flt_prot == AFLT_PROT_NONE) && (status == DDI_FM_FATAL))
+ aflt->flt_panic = 1;
+}
+
+void
+cpu_errorq_dispatch(char *error_class, void *payload, size_t payload_sz,
+ errorq_t *eqp, uint_t flag)
+{
+ struct async_flt *aflt = (struct async_flt *)payload;
+
+ aflt->flt_erpt_class = error_class;
+ errorq_dispatch(eqp, payload, payload_sz, flag);
+}
+
+void
+adjust_hw_copy_limits(int ecache_size)
+{
+ /*
+ * Set hw copy limits.
+ *
+ * /etc/system will be parsed later and can override one or more
+ * of these settings.
+ *
+ * At this time, ecache size seems only mildly relevant.
+ * We seem to run into issues with the d-cache and stalls
+ * we see on misses.
+ *
+ * Cycle measurement indicates that 2 byte aligned copies fare
+ * little better than doing things with VIS at around 512 bytes.
+ * 4 byte aligned shows promise until around 1024 bytes. 8 Byte
+ * aligned is faster whenever the source and destination data
+ * in cache and the total size is less than 2 Kbytes. The 2K
+ * limit seems to be driven by the 2K write cache.
+ * When more than 2K of copies are done in non-VIS mode, stores
+ * backup in the write cache. In VIS mode, the write cache is
+ * bypassed, allowing faster cache-line writes aligned on cache
+ * boundaries.
+ *
+ * In addition, in non-VIS mode, there is no prefetching, so
+ * for larger copies, the advantage of prefetching to avoid even
+ * occasional cache misses is enough to justify using the VIS code.
+ *
+ * During testing, it was discovered that netbench ran 3% slower
+ * when hw_copy_limit_8 was 2K or larger. Apparently for server
+ * applications, data is only used once (copied to the output
+ * buffer, then copied by the network device off the system). Using
+ * the VIS copy saves more L2 cache state. Network copies are
+ * around 1.3K to 1.5K in size for historical reasons.
+ *
+ * Therefore, a limit of 1K bytes will be used for the 8 byte
+ * aligned copy even for large caches and 8 MB ecache. The
+ * infrastructure to allow different limits for different sized
+ * caches is kept to allow further tuning in later releases.
+ */
+
+ if (min_ecache_size == 0 && use_hw_bcopy) {
+ /*
+ * First time through - should be before /etc/system
+ * is read.
+ * Could skip the checks for zero but this lets us
+ * preserve any debugger rewrites.
+ */
+ if (hw_copy_limit_1 == 0) {
+ hw_copy_limit_1 = VIS_COPY_THRESHOLD;
+ priv_hcl_1 = hw_copy_limit_1;
+ }
+ if (hw_copy_limit_2 == 0) {
+ hw_copy_limit_2 = 2 * VIS_COPY_THRESHOLD;
+ priv_hcl_2 = hw_copy_limit_2;
+ }
+ if (hw_copy_limit_4 == 0) {
+ hw_copy_limit_4 = 4 * VIS_COPY_THRESHOLD;
+ priv_hcl_4 = hw_copy_limit_4;
+ }
+ if (hw_copy_limit_8 == 0) {
+ hw_copy_limit_8 = 4 * VIS_COPY_THRESHOLD;
+ priv_hcl_8 = hw_copy_limit_8;
+ }
+ min_ecache_size = ecache_size;
+ } else {
+ /*
+ * MP initialization. Called *after* /etc/system has
+ * been parsed. One CPU has already been initialized.
+ * Need to cater for /etc/system having scragged one
+ * of our values.
+ */
+ if (ecache_size == min_ecache_size) {
+ /*
+ * Same size ecache. We do nothing unless we
+ * have a pessimistic ecache setting. In that
+ * case we become more optimistic (if the cache is
+ * large enough).
+ */
+ if (hw_copy_limit_8 == 4 * VIS_COPY_THRESHOLD) {
+ /*
+ * Need to adjust hw_copy_limit* from our
+ * pessimistic uniprocessor value to a more
+ * optimistic UP value *iff* it hasn't been
+ * reset.
+ */
+ if ((ecache_size > 1048576) &&
+ (priv_hcl_8 == hw_copy_limit_8)) {
+ if (ecache_size <= 2097152)
+ hw_copy_limit_8 = 4 *
+ VIS_COPY_THRESHOLD;
+ else if (ecache_size <= 4194304)
+ hw_copy_limit_8 = 4 *
+ VIS_COPY_THRESHOLD;
+ else
+ hw_copy_limit_8 = 4 *
+ VIS_COPY_THRESHOLD;
+ priv_hcl_8 = hw_copy_limit_8;
+ }
+ }
+ } else if (ecache_size < min_ecache_size) {
+ /*
+ * A different ecache size. Can this even happen?
+ */
+ if (priv_hcl_8 == hw_copy_limit_8) {
+ /*
+ * The previous value that we set
+ * is unchanged (i.e., it hasn't been
+ * scragged by /etc/system). Rewrite it.
+ */
+ if (ecache_size <= 1048576)
+ hw_copy_limit_8 = 8 *
+ VIS_COPY_THRESHOLD;
+ else if (ecache_size <= 2097152)
+ hw_copy_limit_8 = 8 *
+ VIS_COPY_THRESHOLD;
+ else if (ecache_size <= 4194304)
+ hw_copy_limit_8 = 8 *
+ VIS_COPY_THRESHOLD;
+ else
+ hw_copy_limit_8 = 10 *
+ VIS_COPY_THRESHOLD;
+ priv_hcl_8 = hw_copy_limit_8;
+ min_ecache_size = ecache_size;
+ }
+ }
+ }
+}
+
+#define VIS_BLOCKSIZE 64
+
+int
+dtrace_blksuword32_err(uintptr_t addr, uint32_t *data)
+{
+ int ret, watched;
+
+ watched = watch_disable_addr((void *)addr, VIS_BLOCKSIZE, S_WRITE);
+ ret = dtrace_blksuword32(addr, data, 0);
+ if (watched)
+ watch_enable_addr((void *)addr, VIS_BLOCKSIZE, S_WRITE);
+
+ return (ret);
+}
+
+void
+opl_cpu_reg_init()
+{
+ uint64_t this_cpu_log;
+
+ /*
+ * We do not need to re-initialize cpu0 registers.
+ */
+ if (cpu[getprocessorid()] == &cpu0)
+ return;
+
+ /*
+ * Initialize Error log Scratch register for error handling.
+ */
+
+ this_cpu_log = va_to_pa((void*)(((uint64_t)opl_err_log) +
+ ERRLOG_BUFSZ * (getprocessorid())));
+ opl_error_setup(this_cpu_log);
+
+ /*
+ * Enable MMU translating multiple page sizes for
+ * sITLB and sDTLB.
+ */
+ opl_mpg_enable();
+}
+
+/*
+ * Queue one event in ue_queue based on ecc_type_to_info entry.
+ */
+static void
+cpu_queue_one_event(opl_async_flt_t *opl_flt, char *reason,
+ ecc_type_to_info_t *eccp)
+{
+ struct async_flt *aflt = (struct async_flt *)opl_flt;
+
+ if (reason &&
+ strlen(reason) + strlen(eccp->ec_reason) < MAX_REASON_STRING) {
+ (void) strcat(reason, eccp->ec_reason);
+ }
+
+ opl_flt->flt_bit = eccp->ec_afsr_bit;
+ opl_flt->flt_type = eccp->ec_flt_type;
+ aflt->flt_in_memory = cpu_flt_in_memory(opl_flt, opl_flt->flt_bit);
+ aflt->flt_payload = eccp->ec_err_payload;
+
+ ASSERT(aflt->flt_status & (OPL_ECC_SYNC_TRAP|OPL_ECC_URGENT_TRAP));
+ cpu_errorq_dispatch(eccp->ec_err_class,
+ (void *)opl_flt, sizeof (opl_async_flt_t),
+ ue_queue,
+ aflt->flt_panic);
+}
+
+/*
+ * Queue events on async event queue one event per error bit.
+ * Return number of events queued.
+ */
+int
+cpu_queue_events(opl_async_flt_t *opl_flt, char *reason, uint64_t t_afsr_errs)
+{
+ struct async_flt *aflt = (struct async_flt *)opl_flt;
+ ecc_type_to_info_t *eccp;
+ int nevents = 0;
+
+ /*
+ * Queue expected errors, error bit and fault type must must match
+ * in the ecc_type_to_info table.
+ */
+ for (eccp = ecc_type_to_info; t_afsr_errs != 0 && eccp->ec_desc != NULL;
+ eccp++) {
+ if ((eccp->ec_afsr_bit & t_afsr_errs) != 0 &&
+ (eccp->ec_flags & aflt->flt_status) != 0) {
+ /*
+ * UE error event can be further
+ * classified/breakdown into finer granularity
+ * based on the flt_eid_mod value set by HW. We do
+ * special handling here so that we can report UE
+ * error in finer granularity as ue_mem,
+ * ue_channel, ue_cpu or ue_path.
+ */
+ if (eccp->ec_flt_type == OPL_CPU_SYNC_UE) {
+ opl_flt->flt_eid_mod =
+ (aflt->flt_stat & SFSR_EID_MOD)
+ >> SFSR_EID_MOD_SHIFT;
+ opl_flt->flt_eid_sid =
+ (aflt->flt_stat & SFSR_EID_SID)
+ >> SFSR_EID_SID_SHIFT;
+ /*
+ * Need to advance eccp pointer by flt_eid_mod
+ * so that we get an appropriate ecc pointer
+ *
+ * EID # of advances
+ * ----------------------------------
+ * OPL_ERRID_MEM 0
+ * OPL_ERRID_CHANNEL 1
+ * OPL_ERRID_CPU 2
+ * OPL_ERRID_PATH 3
+ */
+ eccp += opl_flt->flt_eid_mod;
+ }
+ cpu_queue_one_event(opl_flt, reason, eccp);
+ t_afsr_errs &= ~eccp->ec_afsr_bit;
+ nevents++;
+ }
+ }
+
+ return (nevents);
+}
+
+/*
+ * Sync. error wrapper functions.
+ * We use these functions in order to transfer here from the
+ * nucleus trap handler information about trap type (data or
+ * instruction) and trap level (0 or above 0). This way we
+ * get rid of using SFSR's reserved bits.
+ */
+
+#define OPL_SYNC_TL0 0
+#define OPL_SYNC_TL1 1
+#define OPL_ISYNC_ERR 0
+#define OPL_DSYNC_ERR 1
+
+void
+opl_cpu_isync_tl0_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr)
+{
+ uint64_t t_sfar = p_sfar;
+ uint64_t t_sfsr = p_sfsr;
+
+ opl_cpu_sync_error(rp, t_sfar, t_sfsr,
+ OPL_SYNC_TL0, OPL_ISYNC_ERR);
+}
+
+void
+opl_cpu_isync_tl1_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr)
+{
+ uint64_t t_sfar = p_sfar;
+ uint64_t t_sfsr = p_sfsr;
+
+ opl_cpu_sync_error(rp, t_sfar, t_sfsr,
+ OPL_SYNC_TL1, OPL_ISYNC_ERR);
+}
+
+void
+opl_cpu_dsync_tl0_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr)
+{
+ uint64_t t_sfar = p_sfar;
+ uint64_t t_sfsr = p_sfsr;
+
+ opl_cpu_sync_error(rp, t_sfar, t_sfsr,
+ OPL_SYNC_TL0, OPL_DSYNC_ERR);
+}
+
+void
+opl_cpu_dsync_tl1_error(struct regs *rp, ulong_t p_sfar, ulong_t p_sfsr)
+{
+ uint64_t t_sfar = p_sfar;
+ uint64_t t_sfsr = p_sfsr;
+
+ opl_cpu_sync_error(rp, t_sfar, t_sfsr,
+ OPL_SYNC_TL1, OPL_DSYNC_ERR);
+}
+
+/*
+ * The fj sync err handler transfers control here for UE, BERR, TO, TLB_MUL
+ * and TLB_PRT.
+ * This function is designed based on cpu_deferred_error().
+ */
+
+static void
+opl_cpu_sync_error(struct regs *rp, ulong_t t_sfar, ulong_t t_sfsr,
+ uint_t tl, uint_t derr)
+{
+ opl_async_flt_t opl_flt;
+ struct async_flt *aflt;
+ int trampolined = 0;
+ char pr_reason[MAX_REASON_STRING];
+ uint64_t log_sfsr;
+ int expected = DDI_FM_ERR_UNEXPECTED;
+ ddi_acc_hdl_t *hp;
+
+ /*
+ * We need to look at p_flag to determine if the thread detected an
+ * error while dumping core. We can't grab p_lock here, but it's ok
+ * because we just need a consistent snapshot and we know that everyone
+ * else will store a consistent set of bits while holding p_lock. We
+ * don't have to worry about a race because SDOCORE is set once prior
+ * to doing i/o from the process's address space and is never cleared.
+ */
+ uint_t pflag = ttoproc(curthread)->p_flag;
+
+ pr_reason[0] = '\0';
+
+ /*
+ * handle the specific error
+ */
+ bzero(&opl_flt, sizeof (opl_async_flt_t));
+ aflt = (struct async_flt *)&opl_flt;
+ aflt->flt_id = gethrtime_waitfree();
+ aflt->flt_bus_id = getprocessorid();
+ aflt->flt_inst = CPU->cpu_id;
+ aflt->flt_stat = t_sfsr;
+ aflt->flt_addr = t_sfar;
+ aflt->flt_pc = (caddr_t)rp->r_pc;
+ aflt->flt_prot = (uchar_t)AFLT_PROT_NONE;
+ aflt->flt_class = (uchar_t)CPU_FAULT;
+ aflt->flt_priv = (uchar_t)
+ (tl == 1 ? 1 : ((rp->r_tstate & TSTATE_PRIV) ? 1 : 0));
+ aflt->flt_tl = (uchar_t)tl;
+ aflt->flt_panic = (uchar_t)(tl != 0 || aft_testfatal != 0 ||
+ (t_sfsr & (SFSR_TLB_MUL|SFSR_TLB_PRT)) != 0);
+ aflt->flt_core = (pflag & SDOCORE) ? 1 : 0;
+ aflt->flt_status = (derr) ? OPL_ECC_DSYNC_TRAP : OPL_ECC_ISYNC_TRAP;
+
+ /*
+ * If SFSR.FV is not set, both SFSR and SFAR/SFPAR values are uncertain.
+ * So, clear all error bits to avoid mis-handling and force the system
+ * panicked.
+ * We skip all the procedures below down to the panic message call.
+ */
+ if (!(t_sfsr & SFSR_FV)) {
+ opl_flt.flt_type = OPL_CPU_INV_SFSR;
+ aflt->flt_panic = 1;
+ aflt->flt_payload = FM_EREPORT_PAYLOAD_SYNC;
+ cpu_errorq_dispatch(FM_EREPORT_CPU_INV_SFSR,
+ (void *)&opl_flt, sizeof (opl_async_flt_t), ue_queue,
+ aflt->flt_panic);
+ fm_panic("%sErrors(s)", "invalid SFSR");
+ }
+
+ /*
+ * If either UE and MK bit is off, this is not valid UE error.
+ * If it is not valid UE error, clear UE & MK_UE bits to prevent
+ * mis-handling below.
+ * aflt->flt_stat keeps the original bits as a reference.
+ */
+ if ((t_sfsr & (SFSR_MK_UE|SFSR_UE)) !=
+ (SFSR_MK_UE|SFSR_UE)) {
+ t_sfsr &= ~(SFSR_MK_UE|SFSR_UE);
+ }
+
+ /*
+ * If the trap occurred in privileged mode at TL=0, we need to check to
+ * see if we were executing in the kernel under on_trap() or t_lofault
+ * protection. If so, modify the saved registers so that we return
+ * from the trap to the appropriate trampoline routine.
+ */
+ if (!aflt->flt_panic && aflt->flt_priv && tl == 0) {
+ if (curthread->t_ontrap != NULL) {
+ on_trap_data_t *otp = curthread->t_ontrap;
+
+ if (otp->ot_prot & OT_DATA_EC) {
+ aflt->flt_prot = (uchar_t)AFLT_PROT_EC;
+ otp->ot_trap |= (ushort_t)OT_DATA_EC;
+ rp->r_pc = otp->ot_trampoline;
+ rp->r_npc = rp->r_pc + 4;
+ trampolined = 1;
+ }
+
+ if ((t_sfsr & (SFSR_TO | SFSR_BERR)) &&
+ (otp->ot_prot & OT_DATA_ACCESS)) {
+ aflt->flt_prot = (uchar_t)AFLT_PROT_ACCESS;
+ otp->ot_trap |= (ushort_t)OT_DATA_ACCESS;
+ rp->r_pc = otp->ot_trampoline;
+ rp->r_npc = rp->r_pc + 4;
+ trampolined = 1;
+ /*
+ * for peeks and caut_gets errors are expected
+ */
+ hp = (ddi_acc_hdl_t *)otp->ot_handle;
+ if (!hp)
+ expected = DDI_FM_ERR_PEEK;
+ else if (hp->ah_acc.devacc_attr_access ==
+ DDI_CAUTIOUS_ACC)
+ expected = DDI_FM_ERR_EXPECTED;
+ }
+
+ } else if (curthread->t_lofault) {
+ aflt->flt_prot = AFLT_PROT_COPY;
+ rp->r_g1 = EFAULT;
+ rp->r_pc = curthread->t_lofault;
+ rp->r_npc = rp->r_pc + 4;
+ trampolined = 1;
+ }
+ }
+
+ /*
+ * If we're in user mode or we're doing a protected copy, we either
+ * want the ASTON code below to send a signal to the user process
+ * or we want to panic if aft_panic is set.
+ *
+ * If we're in privileged mode and we're not doing a copy, then we
+ * need to check if we've trampolined. If we haven't trampolined,
+ * we should panic.
+ */
+ if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY) {
+ if (t_sfsr & (SFSR_ERRS & ~(SFSR_BERR | SFSR_TO)))
+ aflt->flt_panic |= aft_panic;
+ } else if (!trampolined) {
+ aflt->flt_panic = 1;
+ }
+
+ /*
+ * If we've trampolined due to a privileged TO or BERR, or if an
+ * unprivileged TO or BERR occurred, we don't want to enqueue an
+ * event for that TO or BERR. Queue all other events (if any) besides
+ * the TO/BERR.
+ */
+ log_sfsr = t_sfsr;
+ if (trampolined) {
+ log_sfsr &= ~(SFSR_TO | SFSR_BERR);
+ } else if (!aflt->flt_priv) {
+ /*
+ * User mode, suppress messages if
+ * cpu_berr_to_verbose is not set.
+ */
+ if (!cpu_berr_to_verbose)
+ log_sfsr &= ~(SFSR_TO | SFSR_BERR);
+ }
+
+ if (((log_sfsr & SFSR_ERRS) &&
+ (cpu_queue_events(&opl_flt, pr_reason, t_sfsr) == 0)) ||
+ ((t_sfsr & SFSR_ERRS) == 0)) {
+ opl_flt.flt_type = OPL_CPU_INV_SFSR;
+ aflt->flt_payload = FM_EREPORT_PAYLOAD_SYNC;
+ cpu_errorq_dispatch(FM_EREPORT_CPU_INV_SFSR,
+ (void *)&opl_flt, sizeof (opl_async_flt_t), ue_queue,
+ aflt->flt_panic);
+ }
+
+ if (t_sfsr & (SFSR_UE|SFSR_TO|SFSR_BERR)) {
+ cpu_run_bus_error_handlers(aflt, expected);
+ }
+
+ /*
+ * Panic here if aflt->flt_panic has been set. Enqueued errors will
+ * be logged as part of the panic flow.
+ */
+ if (aflt->flt_panic) {
+ if (pr_reason[0] == 0)
+ strcpy(pr_reason, "invalid SFSR ");
+
+ fm_panic("%sErrors(s)", pr_reason);
+ }
+
+ /*
+ * If we queued an error and we are going to return from the trap and
+ * the error was in user mode or inside of a copy routine, set AST flag
+ * so the queue will be drained before returning to user mode. The
+ * AST processing will also act on our failure policy.
+ */
+ if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY) {
+ int pcb_flag = 0;
+
+ if (t_sfsr & (SFSR_ERRS &
+ ~(SFSR_BERR | SFSR_TO)))
+ pcb_flag |= ASYNC_HWERR;
+
+ if (t_sfsr & SFSR_BERR)
+ pcb_flag |= ASYNC_BERR;
+
+ if (t_sfsr & SFSR_TO)
+ pcb_flag |= ASYNC_BTO;
+
+ ttolwp(curthread)->lwp_pcb.pcb_flags |= pcb_flag;
+ aston(curthread);
+ }
+}
+
+/*ARGSUSED*/
+void
+opl_cpu_urgent_error(struct regs *rp, ulong_t p_ugesr, ulong_t tl)
+{
+ opl_async_flt_t opl_flt;
+ struct async_flt *aflt;
+ char pr_reason[MAX_REASON_STRING];
+
+ /* normalize tl */
+ tl = (tl >= 2 ? 1 : 0);
+ pr_reason[0] = '\0';
+
+ bzero(&opl_flt, sizeof (opl_async_flt_t));
+ aflt = (struct async_flt *)&opl_flt;
+ aflt->flt_id = gethrtime_waitfree();
+ aflt->flt_bus_id = getprocessorid();
+ aflt->flt_inst = CPU->cpu_id;
+ aflt->flt_stat = p_ugesr;
+ aflt->flt_pc = (caddr_t)rp->r_pc;
+ aflt->flt_class = (uchar_t)CPU_FAULT;
+ aflt->flt_tl = tl;
+ aflt->flt_priv = (uchar_t)
+ (tl == 1 ? 1 : ((rp->r_tstate & TSTATE_PRIV) ? 1 : 0));
+ aflt->flt_status = OPL_ECC_URGENT_TRAP;
+ aflt->flt_panic = 1;
+ /*
+ * HW does not set mod/sid in case of urgent error.
+ * So we have to set it here.
+ */
+ opl_flt.flt_eid_mod = OPL_ERRID_CPU;
+ opl_flt.flt_eid_sid = aflt->flt_inst;
+
+ if (cpu_queue_events(&opl_flt, pr_reason, p_ugesr) == 0) {
+ opl_flt.flt_type = OPL_CPU_INV_UGESR;
+ aflt->flt_payload = FM_EREPORT_PAYLOAD_URGENT;
+ cpu_errorq_dispatch(FM_EREPORT_CPU_INV_URG,
+ (void *)&opl_flt, sizeof (opl_async_flt_t),
+ ue_queue, aflt->flt_panic);
+ }
+
+ fm_panic("Urgent Error");
+}
+
+/*
+ * Initialization error counters resetting.
+ */
+/* ARGSUSED */
+static void
+opl_ras_online(void *arg, cpu_t *cp, cyc_handler_t *hdlr, cyc_time_t *when)
+{
+ hdlr->cyh_func = (cyc_func_t)ras_cntr_reset;
+ hdlr->cyh_level = CY_LOW_LEVEL;
+ hdlr->cyh_arg = (void *)(uintptr_t)cp->cpu_id;
+
+ when->cyt_when = cp->cpu_id * (((hrtime_t)NANOSEC * 10)/ NCPU);
+ when->cyt_interval = (hrtime_t)NANOSEC * opl_async_check_interval;
+}
+
+void
+cpu_mp_init(void)
+{
+ cyc_omni_handler_t hdlr;
+
+ hdlr.cyo_online = opl_ras_online;
+ hdlr.cyo_offline = NULL;
+ hdlr.cyo_arg = NULL;
+ mutex_enter(&cpu_lock);
+ (void) cyclic_add_omni(&hdlr);
+ mutex_exit(&cpu_lock);
+}
+
+/*ARGSUSED*/
+void
+mmu_init_kernel_pgsz(struct hat *hat)
+{
+}
+
+size_t
+mmu_get_kernel_lpsize(size_t lpsize)
+{
+ uint_t tte;
+
+ if (lpsize == 0) {
+ /* no setting for segkmem_lpsize in /etc/system: use default */
+ return (MMU_PAGESIZE4M);
+ }
+
+ for (tte = TTE8K; tte <= TTE4M; tte++) {
+ if (lpsize == TTEBYTES(tte))
+ return (lpsize);
+ }
+
+ return (TTEBYTES(TTE8K));
+}
+
+/*
+ * The following are functions that are unused in
+ * OPL cpu module. They are defined here to resolve
+ * dependencies in the "unix" module.
+ * Unused functions that should never be called in
+ * OPL are coded with ASSERT(0).
+ */
+
+void
+cpu_disable_errors(void)
+{}
+
+void
+cpu_enable_errors(void)
+{ ASSERT(0); }
+
+/*ARGSUSED*/
+void
+cpu_ce_scrub_mem_err(struct async_flt *ecc, boolean_t t)
+{ ASSERT(0); }
+
+/*ARGSUSED*/
+void
+cpu_faulted_enter(struct cpu *cp)
+{}
+
+/*ARGSUSED*/
+void
+cpu_faulted_exit(struct cpu *cp)
+{}
+
+/*ARGSUSED*/
+void
+cpu_check_allcpus(struct async_flt *aflt)
+{}
+
+/*ARGSUSED*/
+void
+cpu_ce_log_err(struct async_flt *aflt, errorq_elem_t *t)
+{ ASSERT(0); }
+
+/*ARGSUSED*/
+void
+cpu_check_ce(int flag, uint64_t pa, caddr_t va, uint_t psz)
+{ ASSERT(0); }
+
+/*ARGSUSED*/
+void
+cpu_ce_count_unum(struct async_flt *ecc, int len, char *unum)
+{ ASSERT(0); }
+
+/*ARGSUSED*/
+void
+cpu_busy_ecache_scrub(struct cpu *cp)
+{}
+
+/*ARGSUSED*/
+void
+cpu_idle_ecache_scrub(struct cpu *cp)
+{}
+
+/* ARGSUSED */
+void
+cpu_change_speed(uint64_t divisor, uint64_t arg2)
+{ ASSERT(0); }
+
+void
+cpu_init_cache_scrub(void)
+{}
+
+/* ARGSUSED */
+int
+cpu_get_mem_sid(char *unum, char *buf, int buflen, int *lenp)
+{
+ return (ENOTSUP);
+}
+
+/* ARGSUSED */
+int
+cpu_get_mem_addr(char *unum, char *sid, uint64_t offset, uint64_t *addrp)
+{
+ return (ENOTSUP);
+}
+
+/* ARGSUSED */
+int
+cpu_get_mem_offset(uint64_t flt_addr, uint64_t *offp)
+{
+ return (ENOTSUP);
+}
+
+/*ARGSUSED*/
+void
+itlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
+{ ASSERT(0); }
+
+/*ARGSUSED*/
+void
+dtlb_rd_entry(uint_t entry, tte_t *tte, uint64_t *va_tag)
+{ ASSERT(0); }
diff --git a/usr/src/uts/sun4u/cpu/opl_olympus_asm.s b/usr/src/uts/sun4u/cpu/opl_olympus_asm.s
new file mode 100644
index 0000000000..44e4983586
--- /dev/null
+++ b/usr/src/uts/sun4u/cpu/opl_olympus_asm.s
@@ -0,0 +1,1991 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Assembly code support for the Olympus-C module
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#if !defined(lint)
+#include "assym.h"
+#endif /* lint */
+
+#include <sys/asm_linkage.h>
+#include <sys/mmu.h>
+#include <vm/hat_sfmmu.h>
+#include <sys/machparam.h>
+#include <sys/machcpuvar.h>
+#include <sys/machthread.h>
+#include <sys/machtrap.h>
+#include <sys/privregs.h>
+#include <sys/asm_linkage.h>
+#include <sys/trap.h>
+#include <sys/opl_olympus_regs.h>
+#include <sys/opl_module.h>
+#include <sys/xc_impl.h>
+#include <sys/intreg.h>
+#include <sys/async.h>
+#include <sys/clock.h>
+#include <sys/cmpregs.h>
+
+#ifdef TRAPTRACE
+#include <sys/traptrace.h>
+#endif /* TRAPTRACE */
+
+/*
+ * Macro that flushes the entire Ecache.
+ *
+ * arg1 = ecache size
+ * arg2 = ecache linesize
+ * arg3 = ecache flush address - Not used for olympus-C
+ */
+#define ECACHE_FLUSHALL(arg1, arg2, arg3, tmp1) \
+ mov ASI_L2_CTRL_U2_FLUSH, arg1; \
+ mov ASI_L2_CTRL_RW_ADDR, arg2; \
+ stxa arg1, [arg2]ASI_L2_CTRL
+
+/*
+ * SPARC64-VI MMU and Cache operations.
+ */
+
+#if defined(lint)
+
+/* ARGSUSED */
+void
+vtag_flushpage(caddr_t vaddr, u_int ctxnum)
+{}
+
+#else /* lint */
+
+ ENTRY_NP(vtag_flushpage)
+ /*
+ * flush page from the tlb
+ *
+ * %o0 = vaddr
+ * %o1 = ctxnum
+ */
+ rdpr %pstate, %o5
+#ifdef DEBUG
+ andcc %o5, PSTATE_IE, %g0 /* if interrupts already */
+ bnz,a,pt %icc, 3f /* disabled, panic */
+ nop
+ save %sp, -SA(MINFRAME), %sp
+ sethi %hi(sfmmu_panic1), %o0
+ call panic
+ or %o0, %lo(sfmmu_panic1), %o0
+ ret
+ restore
+3:
+#endif /* DEBUG */
+ /*
+ * disable ints
+ */
+ andn %o5, PSTATE_IE, %o4
+ wrpr %o4, 0, %pstate
+
+ /*
+ * Then, blow out the tlb
+ * Interrupts are disabled to prevent the primary ctx register
+ * from changing underneath us.
+ */
+ brnz,pt %o1, 1f /* KCONTEXT? */
+ sethi %hi(FLUSH_ADDR), %o3
+ /*
+ * For KCONTEXT demaps use primary. type = page implicitly
+ */
+ stxa %g0, [%o0]ASI_DTLB_DEMAP /* dmmu flush for KCONTEXT */
+ stxa %g0, [%o0]ASI_ITLB_DEMAP /* immu flush for KCONTEXT */
+ flush %o3
+ b 5f
+ nop
+1:
+ /*
+ * User demap. We need to set the primary context properly.
+ * Secondary context cannot be used for SPARC64-VI IMMU.
+ * %o0 = vaddr
+ * %o1 = ctxnum
+ * %o3 = FLUSH_ADDR
+ */
+ sethi %hi(ctx_pgsz_array), %o4
+ ldn [%o4 + %lo(ctx_pgsz_array)], %o4
+ ldub [%o4 + %o1], %o4
+ sll %o4, CTXREG_EXT_SHIFT, %o4
+ or %o1, %o4, %o1
+ wrpr %g0, 1, %tl
+ set MMU_PCONTEXT, %o4
+ or DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %o0, %o0
+ ldxa [%o4]ASI_DMMU, %o2 /* rd old ctxnum */
+ stxa %o1, [%o4]ASI_DMMU /* wr new ctxum */
+4:
+ stxa %g0, [%o0]ASI_DTLB_DEMAP
+ stxa %g0, [%o0]ASI_ITLB_DEMAP
+ stxa %o2, [%o4]ASI_DMMU /* restore old ctxnum */
+ flush %o3
+ wrpr %g0, 0, %tl
+5:
+ retl
+ wrpr %g0, %o5, %pstate /* enable interrupts */
+ SET_SIZE(vtag_flushpage)
+
+#endif /* lint */
+
+
+#if defined(lint)
+
+/* ARGSUSED */
+void
+vtag_flushctx(u_int ctxnum)
+{}
+
+#else /* lint */
+
+ ENTRY_NP(vtag_flushctx)
+ /*
+ * flush context from the tlb
+ *
+ * %o0 = ctxnum
+ * We disable interrupts to prevent the primary ctx register changing
+ * underneath us.
+ */
+ sethi %hi(FLUSH_ADDR), %o3
+ rdpr %pstate, %o2
+
+#ifdef DEBUG
+ andcc %o2, PSTATE_IE, %g0 /* if interrupts already */
+ bnz,a,pt %icc, 1f /* disabled, panic */
+ nop
+ sethi %hi(sfmmu_panic1), %o0
+ call panic
+ or %o0, %lo(sfmmu_panic1), %o0
+1:
+#endif /* DEBUG */
+
+ sethi %hi(ctx_pgsz_array), %o4
+ ldn [%o4 + %lo(ctx_pgsz_array)], %o4
+ ldub [%o4 + %o0], %o4
+ sll %o4, CTXREG_EXT_SHIFT, %o4
+ or %o0, %o4, %o0
+ wrpr %o2, PSTATE_IE, %pstate /* disable interrupts */
+ set MMU_PCONTEXT, %o4
+ set DEMAP_CTX_TYPE | DEMAP_PRIMARY, %g1
+ wrpr %g0, 1, %tl
+ ldxa [%o4]ASI_DMMU, %o5 /* rd old ctxnum */
+ stxa %o0, [%o4]ASI_DMMU /* wr new ctxum */
+4:
+ stxa %g0, [%g1]ASI_DTLB_DEMAP
+ stxa %g0, [%g1]ASI_ITLB_DEMAP
+ stxa %o5, [%o4]ASI_DMMU /* restore old ctxnum */
+ flush %o3
+ wrpr %g0, 0, %tl
+5:
+ retl
+ wrpr %g0, %o2, %pstate /* enable interrupts */
+ SET_SIZE(vtag_flushctx)
+
+#endif /* lint */
+
+
+#if defined(lint)
+
+void
+vtag_flushall(void)
+{}
+
+#else /* lint */
+
+ ENTRY_NP2(vtag_flushall, demap_all)
+ /*
+ * flush the tlb
+ */
+ sethi %hi(FLUSH_ADDR), %o3
+ set DEMAP_ALL_TYPE, %g1
+ stxa %g0, [%g1]ASI_DTLB_DEMAP
+ stxa %g0, [%g1]ASI_ITLB_DEMAP
+ flush %o3
+ retl
+ nop
+ SET_SIZE(demap_all)
+ SET_SIZE(vtag_flushall)
+
+#endif /* lint */
+
+
+#if defined(lint)
+
+/* ARGSUSED */
+void
+vtag_flushpage_tl1(uint64_t vaddr, uint64_t ctxnum)
+{}
+
+#else /* lint */
+
+ ENTRY_NP(vtag_flushpage_tl1)
+ /*
+ * x-trap to flush page from tlb and tsb
+ *
+ * %g1 = vaddr, zero-extended on 32-bit kernel
+ * %g2 = ctxnum
+ *
+ * assumes TSBE_TAG = 0
+ */
+ srln %g1, MMU_PAGESHIFT, %g1
+ brnz,pt %g2, 1f /* KCONTEXT */
+ slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
+
+ /* We need to demap in the kernel context */
+ or DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
+ stxa %g0, [%g1]ASI_DTLB_DEMAP
+ stxa %g0, [%g1]ASI_ITLB_DEMAP
+ retry
+1:
+ /* We need to demap in a user context */
+ or DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
+ sethi %hi(ctx_pgsz_array), %g4
+ ldn [%g4 + %lo(ctx_pgsz_array)], %g4
+ ldub [%g4 + %g2], %g4
+ sll %g4, CTXREG_EXT_SHIFT, %g4
+ or %g2, %g4, %g2
+
+ set MMU_PCONTEXT, %g4
+ ldxa [%g4]ASI_DMMU, %g5 /* rd old ctxnum */
+ stxa %g2, [%g4]ASI_DMMU /* wr new ctxum */
+ stxa %g0, [%g1]ASI_DTLB_DEMAP
+ stxa %g0, [%g1]ASI_ITLB_DEMAP
+ stxa %g5, [%g4]ASI_DMMU /* restore old ctxnum */
+ retry
+ SET_SIZE(vtag_flushpage_tl1)
+
+#endif /* lint */
+
+#if defined(lint)
+
+/* ARGSUSED */
+void
+vtag_flush_pgcnt_tl1(uint64_t vaddr, uint64_t ctx_pgcnt)
+{}
+
+#else /* lint */
+
+ ENTRY_NP(vtag_flush_pgcnt_tl1)
+ /*
+ * x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
+ *
+ * %g1 = vaddr, zero-extended on 32-bit kernel
+ * %g2 = <zero32|ctx16|pgcnt16>
+ *
+ * NOTE: this handler relies on the fact that no
+ * interrupts or traps can occur during the loop
+ * issuing the TLB_DEMAP operations. It is assumed
+ * that interrupts are disabled and this code is
+ * fetching from the kernel locked text address.
+ *
+ * assumes TSBE_TAG = 0
+ */
+ set 0xffff, %g4
+ and %g4, %g2, %g3 /* g3 = pgcnt */
+ srln %g2, 16, %g2 /* g2 = ctxnum */
+ srln %g1, MMU_PAGESHIFT, %g1
+ brnz,pt %g2, 1f /* KCONTEXT? */
+ slln %g1, MMU_PAGESHIFT, %g1 /* g1 = vaddr */
+
+ /* We need to demap in the kernel context */
+ or DEMAP_NUCLEUS | DEMAP_PAGE_TYPE, %g1, %g1
+ set MMU_PAGESIZE, %g2 /* g2 = pgsize */
+4:
+ stxa %g0, [%g1]ASI_DTLB_DEMAP
+ stxa %g0, [%g1]ASI_ITLB_DEMAP
+ deccc %g3 /* decr pgcnt */
+ bnz,pt %icc,4b
+ add %g1, %g2, %g1 /* next page */
+ retry
+1:
+ /* We need to demap in a user context */
+ sethi %hi(ctx_pgsz_array), %g4
+ ldn [%g4 + %lo(ctx_pgsz_array)], %g4
+ or DEMAP_PRIMARY | DEMAP_PAGE_TYPE, %g1, %g1
+ ldub [%g4 + %g2], %g4
+ sll %g4, CTXREG_EXT_SHIFT, %g4
+ or %g2, %g4, %g2
+
+ set MMU_PCONTEXT, %g4
+ ldxa [%g4]ASI_DMMU, %g5 /* rd old ctxnum */
+ stxa %g2, [%g4]ASI_DMMU /* wr new ctxum */
+
+ set MMU_PAGESIZE, %g2 /* g2 = pgsize */
+3:
+ stxa %g0, [%g1]ASI_DTLB_DEMAP
+ stxa %g0, [%g1]ASI_ITLB_DEMAP
+ deccc %g3 /* decr pgcnt */
+ bnz,pt %icc,3b
+ add %g1, %g2, %g1 /* next page */
+
+ stxa %g5, [%g4]ASI_DMMU /* restore old ctxnum */
+ retry
+ SET_SIZE(vtag_flush_pgcnt_tl1)
+
+#endif /* lint */
+
+
+#if defined(lint)
+
+/* ARGSUSED */
+void
+vtag_flushctx_tl1(uint64_t ctxnum, uint64_t dummy)
+{}
+
+#else /* lint */
+
+ ENTRY_NP(vtag_flushctx_tl1)
+ /*
+ * x-trap to flush context from tlb
+ *
+ * %g1 = ctxnum
+ */
+ sethi %hi(ctx_pgsz_array), %g4
+ ldn [%g4 + %lo(ctx_pgsz_array)], %g4
+ ldub [%g4 + %g1], %g4
+ sll %g4, CTXREG_EXT_SHIFT, %g4
+ or %g1, %g4, %g1
+ set DEMAP_CTX_TYPE | DEMAP_PRIMARY, %g4
+ set MMU_PCONTEXT, %g3
+ ldxa [%g3]ASI_DMMU, %g5 /* rd old ctxnum */
+ stxa %g1, [%g3]ASI_DMMU /* wr new ctxum */
+ stxa %g0, [%g4]ASI_DTLB_DEMAP
+ stxa %g0, [%g4]ASI_ITLB_DEMAP
+ stxa %g5, [%g3]ASI_DMMU /* restore old ctxnum */
+ retry
+ SET_SIZE(vtag_flushctx_tl1)
+
+#endif /* lint */
+
+
+#if defined(lint)
+
+/*ARGSUSED*/
+void
+vtag_flushall_tl1(uint64_t dummy1, uint64_t dummy2)
+{}
+
+#else /* lint */
+
+ ENTRY_NP(vtag_flushall_tl1)
+ /*
+ * x-trap to flush tlb
+ */
+ set DEMAP_ALL_TYPE, %g4
+ stxa %g0, [%g4]ASI_DTLB_DEMAP
+ stxa %g0, [%g4]ASI_ITLB_DEMAP
+ retry
+ SET_SIZE(vtag_flushall_tl1)
+
+#endif /* lint */
+
+
+/*
+ * VAC (virtual address conflict) does not apply to OPL.
+ * VAC resolution is managed by the Olympus processor hardware.
+ * As a result, all OPL VAC flushing routines are no-ops.
+ */
+
+#if defined(lint)
+
+/* ARGSUSED */
+void
+vac_flushpage(pfn_t pfnum, int vcolor)
+{}
+
+#else /* lint */
+
+ ENTRY(vac_flushpage)
+ retl
+ nop
+ SET_SIZE(vac_flushpage)
+
+#endif /* lint */
+
+#if defined(lint)
+
+/* ARGSUSED */
+void
+vac_flushpage_tl1(uint64_t pfnum, uint64_t vcolor)
+{}
+
+#else /* lint */
+
+ ENTRY_NP(vac_flushpage_tl1)
+ retry
+ SET_SIZE(vac_flushpage_tl1)
+
+#endif /* lint */
+
+
+#if defined(lint)
+
+/* ARGSUSED */
+void
+vac_flushcolor(int vcolor, pfn_t pfnum)
+{}
+
+#else /* lint */
+
+ ENTRY(vac_flushcolor)
+ retl
+ nop
+ SET_SIZE(vac_flushcolor)
+
+#endif /* lint */
+
+
+
+#if defined(lint)
+
+/* ARGSUSED */
+void
+vac_flushcolor_tl1(uint64_t vcolor, uint64_t pfnum)
+{}
+
+#else /* lint */
+
+ ENTRY(vac_flushcolor_tl1)
+ retry
+ SET_SIZE(vac_flushcolor_tl1)
+
+#endif /* lint */
+
+#if defined(lint)
+
+int
+idsr_busy(void)
+{
+ return (0);
+}
+
+#else /* lint */
+
+/*
+ * Determine whether or not the IDSR is busy.
+ * Entry: no arguments
+ * Returns: 1 if busy, 0 otherwise
+ */
+ ENTRY(idsr_busy)
+ ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1
+ clr %o0
+ btst IDSR_BUSY, %g1
+ bz,a,pt %xcc, 1f
+ mov 1, %o0
+1:
+ retl
+ nop
+ SET_SIZE(idsr_busy)
+
+#endif /* lint */
+
+#if defined(lint)
+
+/* ARGSUSED */
+void
+init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
+{}
+
+/* ARGSUSED */
+void
+init_mondo_nocheck(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
+{}
+
+#else /* lint */
+
+ .global _dispatch_status_busy
+_dispatch_status_busy:
+ .asciz "ASI_INTR_DISPATCH_STATUS error: busy"
+ .align 4
+
+/*
+ * Setup interrupt dispatch data registers
+ * Entry:
+ * %o0 - function or inumber to call
+ * %o1, %o2 - arguments (2 uint64_t's)
+ */
+ .seg "text"
+
+ ENTRY(init_mondo)
+#ifdef DEBUG
+ !
+ ! IDSR should not be busy at the moment
+ !
+ ldxa [%g0]ASI_INTR_DISPATCH_STATUS, %g1
+ btst IDSR_BUSY, %g1
+ bz,pt %xcc, 1f
+ nop
+ sethi %hi(_dispatch_status_busy), %o0
+ call panic
+ or %o0, %lo(_dispatch_status_busy), %o0
+#endif /* DEBUG */
+
+ ALTENTRY(init_mondo_nocheck)
+ !
+ ! interrupt vector dispatch data reg 0
+ !
+1:
+ mov IDDR_0, %g1
+ mov IDDR_1, %g2
+ mov IDDR_2, %g3
+ stxa %o0, [%g1]ASI_INTR_DISPATCH
+
+ !
+ ! interrupt vector dispatch data reg 1
+ !
+ stxa %o1, [%g2]ASI_INTR_DISPATCH
+
+ !
+ ! interrupt vector dispatch data reg 2
+ !
+ stxa %o2, [%g3]ASI_INTR_DISPATCH
+
+ membar #Sync
+ retl
+ nop
+ SET_SIZE(init_mondo_nocheck)
+ SET_SIZE(init_mondo)
+
+#endif /* lint */
+
+
+#if defined(lint)
+
+/* ARGSUSED */
+void
+shipit(int upaid, int bn)
+{ return; }
+
+#else /* lint */
+
+/*
+ * Ship mondo to aid using busy/nack pair bn
+ */
+ ENTRY_NP(shipit)
+ sll %o0, IDCR_PID_SHIFT, %g1 ! IDCR<23:14> = agent id
+ sll %o1, IDCR_BN_SHIFT, %g2 ! IDCR<28:24> = b/n pair
+ or %g1, IDCR_OFFSET, %g1 ! IDCR<13:0> = 0x70
+ or %g1, %g2, %g1
+ stxa %g0, [%g1]ASI_INTR_DISPATCH ! interrupt vector dispatch
+ membar #Sync
+ retl
+ nop
+ SET_SIZE(shipit)
+
+#endif /* lint */
+
+
+#if defined(lint)
+
+/* ARGSUSED */
+void
+flush_instr_mem(caddr_t vaddr, size_t len)
+{}
+
+#else /* lint */
+
+/*
+ * flush_instr_mem:
+ * Flush 1 page of the I-$ starting at vaddr
+ * %o0 vaddr
+ * %o1 bytes to be flushed
+ *
+ * SPARC64-VI maintains consistency of the on-chip Instruction Cache with
+ * the stores from all processors so that a FLUSH instruction is only needed
+ * to ensure pipeline is consistent. This means a single flush is sufficient at
+ * the end of a sequence of stores that updates the instruction stream to
+ * ensure correct operation.
+ */
+
+ ENTRY(flush_instr_mem)
+ flush %o0 ! address irrelevent
+ retl
+ nop
+ SET_SIZE(flush_instr_mem)
+
+#endif /* lint */
+
+
+/*
+ * flush_ecache:
+ * %o0 - 64 bit physical address
+ * %o1 - ecache size
+ * %o2 - ecache linesize
+ */
+#if defined(lint)
+
+/*ARGSUSED*/
+void
+flush_ecache(uint64_t physaddr, size_t ecache_size, size_t ecache_linesize)
+{}
+
+#else /* !lint */
+
+ ENTRY(flush_ecache)
+
+ /*
+ * Flush the entire Ecache.
+ */
+ ECACHE_FLUSHALL(%o1, %o2, %o0, %o4)
+ retl
+ nop
+ SET_SIZE(flush_ecache)
+
+#endif /* lint */
+
+#if defined(lint)
+
+/*ARGSUSED*/
+void
+kdi_flush_idcache(int dcache_size, int dcache_lsize, int icache_size,
+ int icache_lsize)
+{
+}
+
+#else /* lint */
+
+ /*
+ * I/D cache flushing is not needed for OPL processors
+ */
+ ENTRY(kdi_flush_idcache)
+ retl
+ nop
+ SET_SIZE(kdi_flush_idcache)
+
+#endif /* lint */
+
+#ifdef TRAPTRACE
+/*
+ * Simplified trap trace macro for OPL. Adapted from us3.
+ */
+#define OPL_TRAPTRACE(ptr, scr1, scr2, label) \
+ CPU_INDEX(scr1, ptr); \
+ sll scr1, TRAPTR_SIZE_SHIFT, scr1; \
+ set trap_trace_ctl, ptr; \
+ add ptr, scr1, scr1; \
+ ld [scr1 + TRAPTR_LIMIT], ptr; \
+ tst ptr; \
+ be,pn %icc, label/**/1; \
+ ldx [scr1 + TRAPTR_PBASE], ptr; \
+ ld [scr1 + TRAPTR_OFFSET], scr1; \
+ add ptr, scr1, ptr; \
+ rd %asi, scr2; \
+ wr %g0, TRAPTR_ASI, %asi; \
+ rd STICK, scr1; \
+ stxa scr1, [ptr + TRAP_ENT_TICK]%asi; \
+ rdpr %tl, scr1; \
+ stha scr1, [ptr + TRAP_ENT_TL]%asi; \
+ rdpr %tt, scr1; \
+ stha scr1, [ptr + TRAP_ENT_TT]%asi; \
+ rdpr %tpc, scr1; \
+ stna scr1, [ptr + TRAP_ENT_TPC]%asi; \
+ rdpr %tstate, scr1; \
+ stxa scr1, [ptr + TRAP_ENT_TSTATE]%asi; \
+ stna %sp, [ptr + TRAP_ENT_SP]%asi; \
+ stna %g0, [ptr + TRAP_ENT_TR]%asi; \
+ stna %g0, [ptr + TRAP_ENT_F1]%asi; \
+ stna %g0, [ptr + TRAP_ENT_F2]%asi; \
+ stna %g0, [ptr + TRAP_ENT_F3]%asi; \
+ stna %g0, [ptr + TRAP_ENT_F4]%asi; \
+ wr %g0, scr2, %asi; \
+ CPU_INDEX(ptr, scr1); \
+ sll ptr, TRAPTR_SIZE_SHIFT, ptr; \
+ set trap_trace_ctl, scr1; \
+ add scr1, ptr, ptr; \
+ ld [ptr + TRAPTR_OFFSET], scr1; \
+ ld [ptr + TRAPTR_LIMIT], scr2; \
+ st scr1, [ptr + TRAPTR_LAST_OFFSET]; \
+ add scr1, TRAP_ENT_SIZE, scr1; \
+ sub scr2, TRAP_ENT_SIZE, scr2; \
+ cmp scr1, scr2; \
+ movge %icc, 0, scr1; \
+ st scr1, [ptr + TRAPTR_OFFSET]; \
+label/**/1:
+#endif /* TRAPTRACE */
+
+
+
+/*
+ * Macros facilitating error handling.
+ */
+
+/*
+ * Save alternative global registers reg1, reg2, reg3
+ * to scratchpad registers 1, 2, 3 respectively.
+ */
+#define OPL_SAVE_GLOBAL(reg1, reg2, reg3) \
+ stxa reg1, [%g0]ASI_SCRATCHPAD ;\
+ mov OPL_SCRATCHPAD_SAVE_AG2, reg1 ;\
+ stxa reg2, [reg1]ASI_SCRATCHPAD ;\
+ mov OPL_SCRATCHPAD_SAVE_AG3, reg1 ;\
+ stxa reg3, [reg1]ASI_SCRATCHPAD
+
+/*
+ * Restore alternative global registers reg1, reg2, reg3
+ * from scratchpad registers 1, 2, 3 respectively.
+ */
+#define OPL_RESTORE_GLOBAL(reg1, reg2, reg3) \
+ mov OPL_SCRATCHPAD_SAVE_AG3, reg1 ;\
+ ldxa [reg1]ASI_SCRATCHPAD, reg3 ;\
+ mov OPL_SCRATCHPAD_SAVE_AG2, reg1 ;\
+ ldxa [reg1]ASI_SCRATCHPAD, reg2 ;\
+ ldxa [%g0]ASI_SCRATCHPAD, reg1
+
+/*
+ * Logs value `val' into the member `offset' of a structure
+ * at physical address `pa'
+ */
+#define LOG_REG(pa, offset, val) \
+ add pa, offset, pa ;\
+ stxa val, [pa]ASI_MEM
+
+#define FLUSH_ALL_TLB(tmp1) \
+ set DEMAP_ALL_TYPE, tmp1 ;\
+ stxa %g0, [tmp1]ASI_ITLB_DEMAP ;\
+ stxa %g0, [tmp1]ASI_DTLB_DEMAP ;\
+ sethi %hi(FLUSH_ADDR), tmp1 ;\
+ flush tmp1
+
+/*
+ * Extracts the Physaddr to Logging Buffer field of the OPL_SCRATCHPAD_ERRLOG
+ * scratch register by zeroing all other fields. Result is in pa.
+ */
+#define LOG_ADDR(pa) \
+ mov OPL_SCRATCHPAD_ERRLOG, pa ;\
+ ldxa [pa]ASI_SCRATCHPAD, pa ;\
+ sllx pa, 64-ERRLOG_REG_EIDR_SHIFT, pa ;\
+ srlx pa, 64-ERRLOG_REG_EIDR_SHIFT+ERRLOG_REG_ERR_SHIFT, pa ;\
+ sllx pa, ERRLOG_REG_ERR_SHIFT, pa
+
+/*
+ * Advance the per-cpu error log buffer pointer to the next
+ * ERRLOG_SZ entry, making sure that it will modulo (wraparound)
+ * ERRLOG_BUFSIZ boundary. The args logpa, bufmask, tmp are
+ * unused input registers for this macro.
+ *
+ * Algorithm:
+ * 1. logpa = contents of errorlog scratchpad register
+ * 2. bufmask = ERRLOG_BUFSIZ - 1
+ * 3. tmp = logpa & ~(bufmask) (tmp is now logbase)
+ * 4. logpa += ERRLOG_SZ
+ * 5. logpa = logpa & bufmask (get new offset to logbase)
+ * 4. logpa = tmp | logpa
+ * 7. write logpa back into errorlog scratchpad register
+ *
+ * new logpa = (logpa & ~bufmask) | ((logpa + ERRLOG_SZ) & bufmask)
+ *
+ */
+#define UPDATE_LOGADD(logpa, bufmask, tmp) \
+ set OPL_SCRATCHPAD_ERRLOG, tmp ;\
+ ldxa [tmp]ASI_SCRATCHPAD, logpa ;\
+ set (ERRLOG_BUFSZ-1), bufmask ;\
+ andn logpa, bufmask, tmp ;\
+ add logpa, ERRLOG_SZ, logpa ;\
+ and logpa, bufmask, logpa ;\
+ or tmp, logpa, logpa ;\
+ set OPL_SCRATCHPAD_ERRLOG, tmp ;\
+ stxa logpa, [tmp]ASI_SCRATCHPAD
+
+/* Log error status registers into the log buffer */
+#define LOG_SYNC_REG(sfsr, sfar, tmp) \
+ LOG_ADDR(tmp) ;\
+ LOG_REG(tmp, LOG_SFSR_OFF, sfsr) ;\
+ LOG_ADDR(tmp) ;\
+ mov tmp, sfsr ;\
+ LOG_REG(tmp, LOG_SFAR_OFF, sfar) ;\
+ rd STICK, sfar ;\
+ mov sfsr, tmp ;\
+ LOG_REG(tmp, LOG_STICK_OFF, sfar) ;\
+ rdpr %tl, tmp ;\
+ sllx tmp, 32, sfar ;\
+ rdpr %tt, tmp ;\
+ or sfar, tmp, sfar ;\
+ mov sfsr, tmp ;\
+ LOG_REG(tmp, LOG_TL_OFF, sfar) ;\
+ set OPL_SCRATCHPAD_ERRLOG, tmp ;\
+ ldxa [tmp]ASI_SCRATCHPAD, sfar ;\
+ mov sfsr, tmp ;\
+ LOG_REG(tmp, LOG_ASI3_OFF, sfar) ;\
+ rdpr %tpc, sfar ;\
+ mov sfsr, tmp ;\
+ LOG_REG(tmp, LOG_TPC_OFF, sfar) ;\
+ UPDATE_LOGADD(sfsr, sfar, tmp)
+
+#define LOG_UGER_REG(uger, tmp, tmp2) \
+ LOG_ADDR(tmp) ;\
+ mov tmp, tmp2 ;\
+ LOG_REG(tmp2, LOG_UGER_OFF, uger) ;\
+ mov tmp, uger ;\
+ rd STICK, tmp2 ;\
+ LOG_REG(tmp, LOG_STICK_OFF, tmp2) ;\
+ rdpr %tl, tmp ;\
+ sllx tmp, 32, tmp2 ;\
+ rdpr %tt, tmp ;\
+ or tmp2, tmp, tmp2 ;\
+ mov uger, tmp ;\
+ LOG_REG(tmp, LOG_TL_OFF, tmp2) ;\
+ set OPL_SCRATCHPAD_ERRLOG, tmp2 ;\
+ ldxa [tmp2]ASI_SCRATCHPAD, tmp2 ;\
+ mov uger, tmp ;\
+ LOG_REG(tmp, LOG_ASI3_OFF, tmp2) ;\
+ rdpr %tstate, tmp2 ;\
+ mov uger, tmp ;\
+ LOG_REG(tmp, LOG_TSTATE_OFF, tmp2) ;\
+ rdpr %tpc, tmp2 ;\
+ mov uger, tmp ;\
+ LOG_REG(tmp, LOG_TPC_OFF, tmp2) ;\
+ UPDATE_LOGADD(uger, tmp, tmp2)
+
+/*
+ * Scrub the STICK_COMPARE register to clear error by updating
+ * it to a reasonable value for interrupt generation.
+ * Ensure that we observe the CPU_ENABLE flag so that we
+ * don't accidentally enable TICK interrupt in STICK_COMPARE
+ * i.e. no clock interrupt will be generated if CPU_ENABLE flag
+ * is off.
+ */
+#define UPDATE_STICK_COMPARE(tmp1, tmp2) \
+ CPU_ADDR(tmp1, tmp2) ;\
+ lduh [tmp1 + CPU_FLAGS], tmp2 ;\
+ andcc tmp2, CPU_ENABLE, %g0 ;\
+ set OPL_UGER_STICK_DIFF, tmp2 ;\
+ rd STICK, tmp1 ;\
+ add tmp1, tmp2, tmp1 ;\
+ mov 1, tmp2 ;\
+ sllx tmp2, TICKINT_DIS_SHFT, tmp2 ;\
+ or tmp1, tmp2, tmp2 ;\
+ movnz %xcc, tmp1, tmp2 ;\
+ wr tmp2, %g0, STICK_COMPARE
+
+/*
+ * Reset registers that may be corrupted by IAUG_CRE error.
+ * To update interrupt handling related registers force the
+ * clock interrupt.
+ */
+#define IAG_CRE(tmp1, tmp2) \
+ set OPL_SCRATCHPAD_ERRLOG, tmp1 ;\
+ ldxa [tmp1]ASI_SCRATCHPAD, tmp1 ;\
+ srlx tmp1, ERRLOG_REG_EIDR_SHIFT, tmp1 ;\
+ set ERRLOG_REG_EIDR_MASK, tmp2 ;\
+ and tmp1, tmp2, tmp1 ;\
+ stxa tmp1, [%g0]ASI_EIDR ;\
+ wr %g0, 0, SOFTINT ;\
+ sethi %hi(hres_last_tick), tmp1 ;\
+ ldx [tmp1 + %lo(hres_last_tick)], tmp1 ;\
+ set OPL_UGER_STICK_DIFF, tmp2 ;\
+ add tmp1, tmp2, tmp1 ;\
+ wr tmp1, %g0, STICK ;\
+ UPDATE_STICK_COMPARE(tmp1, tmp2)
+
+
+#define CLEAR_FPREGS(tmp) \
+ wr %g0, FPRS_FEF, %fprs ;\
+ wr %g0, %g0, %gsr ;\
+ sethi %hi(opl_clr_freg), tmp ;\
+ or tmp, %lo(opl_clr_freg), tmp ;\
+ ldx [tmp], %fsr ;\
+ fzero %d0 ;\
+ fzero %d2 ;\
+ fzero %d4 ;\
+ fzero %d6 ;\
+ fzero %d8 ;\
+ fzero %d10 ;\
+ fzero %d12 ;\
+ fzero %d14 ;\
+ fzero %d16 ;\
+ fzero %d18 ;\
+ fzero %d20 ;\
+ fzero %d22 ;\
+ fzero %d24 ;\
+ fzero %d26 ;\
+ fzero %d28 ;\
+ fzero %d30 ;\
+ fzero %d32 ;\
+ fzero %d34 ;\
+ fzero %d36 ;\
+ fzero %d38 ;\
+ fzero %d40 ;\
+ fzero %d42 ;\
+ fzero %d44 ;\
+ fzero %d46 ;\
+ fzero %d48 ;\
+ fzero %d50 ;\
+ fzero %d52 ;\
+ fzero %d54 ;\
+ fzero %d56 ;\
+ fzero %d58 ;\
+ fzero %d60 ;\
+ fzero %d62 ;\
+ wr %g0, %g0, %fprs
+
+#define CLEAR_GLOBALS() \
+ mov %g0, %g1 ;\
+ mov %g0, %g2 ;\
+ mov %g0, %g3 ;\
+ mov %g0, %g4 ;\
+ mov %g0, %g5 ;\
+ mov %g0, %g6 ;\
+ mov %g0, %g7
+
+/*
+ * We do not clear the alternative globals here because they
+ * are scratch registers, i.e. there is no code that reads from
+ * them without write to them firstly. In other words every
+ * read always follows write that makes extra write to the
+ * alternative globals unnecessary.
+ */
+#define CLEAR_GEN_REGS(tmp1, label) \
+ set TSTATE_KERN, tmp1 ;\
+ wrpr %g0, tmp1, %tstate ;\
+ mov %g0, %y ;\
+ mov %g0, %asi ;\
+ mov %g0, %ccr ;\
+ mov %g0, %l0 ;\
+ mov %g0, %l1 ;\
+ mov %g0, %l2 ;\
+ mov %g0, %l3 ;\
+ mov %g0, %l4 ;\
+ mov %g0, %l5 ;\
+ mov %g0, %l6 ;\
+ mov %g0, %l7 ;\
+ mov %g0, %i0 ;\
+ mov %g0, %i1 ;\
+ mov %g0, %i2 ;\
+ mov %g0, %i3 ;\
+ mov %g0, %i4 ;\
+ mov %g0, %i5 ;\
+ mov %g0, %i6 ;\
+ mov %g0, %i7 ;\
+ mov %g0, %o1 ;\
+ mov %g0, %o2 ;\
+ mov %g0, %o3 ;\
+ mov %g0, %o4 ;\
+ mov %g0, %o5 ;\
+ mov %g0, %o6 ;\
+ mov %g0, %o7 ;\
+ mov %g0, %o0 ;\
+ mov %g0, %g4 ;\
+ mov %g0, %g5 ;\
+ mov %g0, %g6 ;\
+ mov %g0, %g7 ;\
+ rdpr %tl, tmp1 ;\
+ cmp tmp1, 1 ;\
+ be,pt %xcc, label/**/1 ;\
+ rdpr %pstate, tmp1 ;\
+ wrpr tmp1, PSTATE_AG|PSTATE_IG, %pstate ;\
+ CLEAR_GLOBALS() ;\
+ rdpr %pstate, tmp1 ;\
+ wrpr tmp1, PSTATE_IG|PSTATE_MG, %pstate ;\
+ CLEAR_GLOBALS() ;\
+ rdpr %pstate, tmp1 ;\
+ wrpr tmp1, PSTATE_MG|PSTATE_AG, %pstate ;\
+ ba,pt %xcc, label/**/2 ;\
+ nop ;\
+label/**/1: ;\
+ wrpr tmp1, PSTATE_AG, %pstate ;\
+ CLEAR_GLOBALS() ;\
+ rdpr %pstate, tmp1 ;\
+ wrpr tmp1, PSTATE_AG, %pstate ;\
+label/**/2:
+
+
+/*
+ * Reset all window related registers
+ */
+#define RESET_WINREG(tmp) \
+ sethi %hi(nwin_minus_one), tmp ;\
+ ld [tmp + %lo(nwin_minus_one)], tmp ;\
+ wrpr %g0, tmp, %cwp ;\
+ wrpr %g0, tmp, %cleanwin ;\
+ sub tmp, 1, tmp ;\
+ wrpr %g0, tmp, %cansave ;\
+ wrpr %g0, %g0, %canrestore ;\
+ wrpr %g0, %g0, %otherwin ;\
+ wrpr %g0, PIL_MAX, %pil ;\
+ wrpr %g0, WSTATE_KERN, %wstate
+
+
+#define RESET_PREV_TSTATE(tmp1, tmp2, label) \
+ rdpr %tl, tmp1 ;\
+ subcc tmp1, 1, tmp1 ;\
+ bz,pt %xcc, label/**/1 ;\
+ nop ;\
+ wrpr tmp1, %g0, %tl ;\
+ set TSTATE_KERN, tmp2 ;\
+ wrpr tmp2, %g0, %tstate ;\
+ wrpr %g0, %g0, %tpc ;\
+ wrpr %g0, %g0, %tnpc ;\
+ add tmp1, 1, tmp1 ;\
+ wrpr tmp1, %g0, %tl ;\
+label/**/1:
+
+
+/*
+ * %pstate, %pc, %npc are propagated to %tstate, %tpc, %tnpc,
+ * and we reset these regiseter here.
+ */
+#define RESET_CUR_TSTATE(tmp) \
+ set TSTATE_KERN, tmp ;\
+ wrpr %g0, tmp, %tstate ;\
+ wrpr %g0, 0, %tpc ;\
+ wrpr %g0, 0, %tnpc ;\
+ RESET_WINREG(tmp)
+
+/*
+ * In case of urgent errors some MMU registers may be
+ * corrupted, so we set here some reasonable values for
+ * them.
+ */
+
+#if !defined(lint)
+#define RESET_MMU_REGS(tmp1, tmp2, tmp3) \
+ set MMU_PCONTEXT, tmp1 ;\
+ stxa %g0, [tmp1]ASI_DMMU ;\
+ set MMU_SCONTEXT, tmp1 ;\
+ stxa %g0, [tmp1]ASI_DMMU ;\
+ sethi %hi(ktsb_base), tmp1 ;\
+ ldx [tmp1 + %lo(ktsb_base)], tmp2 ;\
+ mov MMU_TSB, tmp3 ;\
+ stxa tmp2, [tmp3]ASI_IMMU ;\
+ stxa tmp2, [tmp3]ASI_DMMU ;\
+ membar #Sync
+
+#define RESET_TSB_TAGPTR(tmp) \
+ set MMU_TAG_ACCESS, tmp ;\
+ stxa %g0, [tmp]ASI_IMMU ;\
+ stxa %g0, [tmp]ASI_DMMU ;\
+ membar #Sync
+#endif /* lint */
+
+/*
+ * RESET_TO_PRIV()
+ *
+ * In many cases, we need to force the thread into privilege mode because
+ * privilege mode is only thing in which the system continue to work
+ * due to undeterminable user mode information that come from register
+ * corruption.
+ *
+ * - opl_uger_ctxt
+ * If the error is secondary TSB related register parity, we have no idea
+ * what value is supposed to be for it.
+ *
+ * The below three cases %tstate is not accessible until it is overwritten
+ * with some value, so we have no clue if the thread was running on user mode
+ * or not
+ * - opl_uger_pstate
+ * If the error is %pstate parity, it propagates to %tstate.
+ * - opl_uger_tstate
+ * No need to say the reason
+ * - opl_uger_r
+ * If the error is %ccr or %asi parity, it propagates to %tstate
+ *
+ * For the above four cases, user mode info may not be available for
+ * sys_trap() and user_trap() to work consistently. So we have to force
+ * the thread into privilege mode.
+ *
+ * Forcing the thread to privilege mode requires forcing
+ * regular %g7 to be CPU_THREAD. Because if it was running on user mode,
+ * %g7 will be set in user_trap(). Also since the %sp may be in
+ * an inconsistent state, we need to do a stack reset and switch to
+ * something we know i.e. current thread's kernel stack.
+ * We also reset the window registers and MMU registers just to
+ * make sure.
+ *
+ * To set regular %g7, we need to clear PSTATE_AG bit and need to
+ * use one local register. Note that we are panicking and will never
+ * unwind back so it is ok to clobber a local.
+ *
+ * If the thread was running in user mode, the %tpc value itself might be
+ * within the range of OBP addresses. %tpc must be forced to be zero to prevent
+ * sys_trap() from going to prom_trap()
+ *
+ */
+#define RESET_TO_PRIV(tmp, tmp1, tmp2, local) \
+ RESET_WINREG(tmp) ;\
+ RESET_MMU_REGS(tmp, tmp1, tmp2) ;\
+ CPU_ADDR(tmp, tmp1) ;\
+ ldx [tmp + CPU_THREAD], local ;\
+ ldx [local + T_STACK], tmp ;\
+ sub tmp, STACK_BIAS, %sp ;\
+ rdpr %pstate, tmp ;\
+ wrpr tmp, PSTATE_AG, %pstate ;\
+ mov local, %g7 ;\
+ rdpr %pstate, local ;\
+ wrpr local, PSTATE_AG, %pstate ;\
+ wrpr %g0, 1, %tl ;\
+ set TSTATE_KERN, tmp ;\
+ rdpr %cwp, tmp1 ;\
+ or tmp, tmp1, tmp ;\
+ wrpr tmp, %g0, %tstate ;\
+ wrpr %g0, %tpc
+
+
+#if defined(lint)
+
+void
+ce_err(void)
+{}
+
+#else /* lint */
+
+/*
+ * We normally don't expect CE traps since we disable the
+ * 0x63 trap reporting at the start of day. There is a
+ * small window before we disable them, so let check for
+ * it. Otherwise, panic.
+ */
+
+ .align 128
+ ENTRY_NP(ce_err)
+ mov AFSR_ECR, %g1
+ ldxa [%g1]ASI_ECR, %g1
+ andcc %g1, ASI_ECR_RTE_UE | ASI_ECR_RTE_CEDG, %g0
+ bz,pn %xcc, 1f
+ nop
+ retry
+1:
+ /*
+ * We did disabled the 0x63 trap reporting.
+ * This shouldn't happen - panic.
+ */
+ set trap, %g1
+ rdpr %tt, %g3
+ sethi %hi(sys_trap), %g5
+ jmp %g5 + %lo(sys_trap)
+ sub %g0, 1, %g4
+ SET_SIZE(ce_err)
+
+#endif /* lint */
+
+
+#if defined(lint)
+
+void
+ce_err_tl1(void)
+{}
+
+#else /* lint */
+
+/*
+ * We don't use trap for CE detection.
+ */
+ ENTRY_NP(ce_err_tl1)
+ set trap, %g1
+ rdpr %tt, %g3
+ sethi %hi(sys_trap), %g5
+ jmp %g5 + %lo(sys_trap)
+ sub %g0, 1, %g4
+ SET_SIZE(ce_err_tl1)
+
+#endif /* lint */
+
+
+#if defined(lint)
+
+void
+async_err(void)
+{}
+
+#else /* lint */
+
+/*
+ * async_err is the default handler for IAE/DAE traps.
+ * For OPL, we patch in the right handler at start of day.
+ * But if a IAE/DAE trap get generated before the handler
+ * is patched, panic.
+ */
+ ENTRY_NP(async_err)
+ set trap, %g1
+ rdpr %tt, %g3
+ sethi %hi(sys_trap), %g5
+ jmp %g5 + %lo(sys_trap)
+ sub %g0, 1, %g4
+ SET_SIZE(async_err)
+
+#endif /* lint */
+
+#if defined(lint)
+void
+opl_sync_trap(void)
+{}
+#else /* lint */
+
+ .seg ".data"
+ .global opl_clr_freg
+ .global opl_cpu0_err_log
+
+ .align 16
+opl_clr_freg:
+ .word 0
+ .align 16
+
+ .align MMU_PAGESIZE
+opl_cpu0_err_log:
+ .skip MMU_PAGESIZE
+
+/*
+ * Common synchronous error trap handler (tt=0xA, 0x32)
+ * All TL=0 and TL>0 0xA and 0x32 traps vector to this handler.
+ * The error handling can be best summarized as follows:
+ * 0. Do TRAPTRACE if enabled.
+ * 1. Save globals %g1, %g2 & %g3 onto the scratchpad regs.
+ * 2. The SFSR register is read and verified as valid by checking
+ * SFSR.FV bit being set. If the SFSR.FV is not set, the
+ * error cases cannot be decoded/determined and the SFPAR
+ * register that contain the physical faultaddr is also
+ * not valid. Also the SPFAR is only valid for UE/TO/BERR error
+ * cases. Assuming the SFSR.FV is valid:
+ * - BERR(bus error)/TO(timeout)/UE case
+ * If any of these error cases are detected, read the SFPAR
+ * to get the faultaddress. Generate ereport.
+ * - TLB Parity case (only recoverable case)
+ * For DAE, read SFAR for the faultaddress. For IAE,
+ * use %tpc for faultaddress (SFAR is not valid in IAE)
+ * Flush all the tlbs.
+ * Subtract one from the recoverable error count stored in
+ * the error log scratch register. If the threshold limit
+ * is reached (zero) - generate ereport. Else
+ * restore globals and retry (no ereport is generated).
+ * - TLB Multiple hits
+ * For DAE, read SFAR for the faultaddress. For IAE,
+ * use %tpc for faultaddress (SFAR is not valid in IAE).
+ * Flush all tlbs and generate ereport.
+ * 3. TL=0 and TL>0 considerations
+ * - Since both TL=0 & TL>1 traps are made to vector into
+ * the same handler, the underlying assumption/design here is
+ * that any nested error condition (if happens) occurs only
+ * in the handler and the system is assumed to eventually
+ * Red-mode. With this philosophy in mind, the recoverable
+ * TLB Parity error case never check the TL level before it
+ * retry. Note that this is ok for the TL>1 case (assuming we
+ * don't have a nested error) since we always save the globals
+ * %g1, %g2 & %g3 whenever we enter this trap handler.
+ * - Additional TL=0 vs TL>1 handling includes:
+ * - For UE error occuring under TL>1, special handling
+ * is added to prevent the unlikely chance of a cpu-lockup
+ * when a UE was originally detected in user stack and
+ * the spill trap handler taken from sys_trap() so happened
+ * to reference the same UE location. Under the above
+ * condition (TL>1 and UE error), paranoid code is added
+ * to reset window regs so that spill traps can't happen
+ * during the unwind back to TL=0 handling.
+ * Note that we can do that because we are not returning
+ * back.
+ * 4. Ereport generation.
+ * - Ereport generation is performed when we unwind to the TL=0
+ * handling code via sys_trap(). on_trap()/lofault protection
+ * will apply there.
+ *
+ */
+ ENTRY_NP(opl_sync_trap)
+#ifdef TRAPTRACE
+ OPL_TRAPTRACE(%g1, %g2, %g3, opl_sync_trap_lb)
+ rdpr %tt, %g1
+#endif /* TRAPTRACE */
+ cmp %g1, T_INSTR_ERROR
+ bne,pt %xcc, 0f
+ mov MMU_SFSR, %g3
+ ldxa [%g3]ASI_IMMU, %g1 ! IAE trap case tt = 0xa
+ andcc %g1, SFSR_FV, %g0
+ bz,a,pn %xcc, 2f ! Branch if SFSR is invalid and
+ rdpr %tpc, %g2 ! use %tpc for faultaddr instead
+
+ sethi %hi(SFSR_UE|SFSR_BERR|SFSR_TO), %g3
+ andcc %g1, %g3, %g0 ! Check for UE/BERR/TO errors
+ bz,a,pt %xcc, 1f ! Branch if not UE/BERR/TO and
+ rdpr %tpc, %g2 ! use %tpc as faultaddr
+ set OPL_MMU_SFPAR, %g3 ! In the UE/BERR/TO cases, use
+ ba,pt %xcc, 2f ! SFPAR as faultaddr
+ ldxa [%g3]ASI_IMMU, %g2
+0:
+ ldxa [%g3]ASI_DMMU, %g1 ! DAE trap case tt = 0x32
+ andcc %g1, SFSR_FV, %g0
+ bnz,pt %xcc, 7f ! branch if SFSR.FV is valid
+ mov MMU_SFAR, %g2 ! set %g2 to use SFAR
+ ba,pt %xcc, 2f ! SFSR.FV is not valid, read SFAR
+ ldxa [%g2]ASI_DMMU, %g2 ! for faultaddr
+7:
+ sethi %hi(SFSR_UE|SFSR_BERR|SFSR_TO), %g3
+ andcc %g1, %g3, %g0 ! Check UE/BERR/TO for valid SFPAR
+ movnz %xcc, OPL_MMU_SFPAR, %g2 ! Use SFPAR instead of SFAR for
+ ldxa [%g2]ASI_DMMU, %g2 ! faultaddr
+1:
+ sethi %hi(SFSR_TLB_PRT), %g3
+ andcc %g1, %g3, %g0
+ bz,pt %xcc, 8f ! branch for TLB multi-hit check
+ nop
+ /*
+ * This is the TLB parity error case and it is the
+ * only retryable error case.
+ * Only %g1, %g2 and %g3 are allowed
+ */
+ FLUSH_ALL_TLB(%g3)
+ set OPL_SCRATCHPAD_ERRLOG, %g3
+ ldxa [%g3]ASI_SCRATCHPAD, %g3 ! Read errlog scratchreg
+ and %g3, ERRLOG_REG_NUMERR_MASK, %g3! Extract the error count
+ subcc %g3, 1, %g0 ! Subtract one from the count
+ bz,pn %xcc, 2f ! too many TLB parity errs in a certain
+ nop ! period, branch to generate ereport
+ LOG_SYNC_REG(%g1, %g2, %g3) ! Record into the error log
+ set OPL_SCRATCHPAD_ERRLOG, %g3
+ ldxa [%g3]ASI_SCRATCHPAD, %g2
+ sub %g2, 1, %g2 ! decrement error counter by 1
+ stxa %g2, [%g3]ASI_SCRATCHPAD ! update the errlog scratchreg
+ OPL_RESTORE_GLOBAL(%g1, %g2, %g3)
+ retry
+8:
+ sethi %hi(SFSR_TLB_MUL), %g3
+ andcc %g1, %g3, %g0
+ bz,pt %xcc, 2f ! check for the TLB multi-hit errors
+ nop
+ FLUSH_ALL_TLB(%g3)
+2:
+ /*
+ * non-retryable error handling
+ * now we can use other registers since
+ * we will not be returning back
+ */
+ mov %g1, %g5 ! %g5 = SFSR
+ mov %g2, %g6 ! %g6 = SFPAR or SFAR/tpc
+ LOG_SYNC_REG(%g1, %g2, %g3) ! Record into the error log
+
+ /*
+ * Special case for UE on user stack.
+ * There is a possibility that the same error may come back here
+ * by touching the same UE in spill trap handler taken from
+ * sys_trap(). It ends up with an infinite loop causing a cpu lockup.
+ * Conditions for this handling this case are:
+ * - SFSR_FV is valid and SFSR_UE is set
+ * - we are at TL > 1
+ * If the above conditions are true, we force %cansave to be a
+ * big number to prevent spill trap in sys_trap(). Note that
+ * we will not be returning back.
+ */
+ rdpr %tt, %g4 ! %g4 == ttype
+ rdpr %tl, %g1 ! %g1 == tl
+ cmp %g1, 1 ! Check if TL == 1
+ be,pt %xcc, 3f ! branch if we came from TL=0
+ nop
+ andcc %g5, SFSR_FV, %g0 ! see if SFSR.FV is valid
+ bz,pn %xcc, 4f ! branch, checking UE is meaningless
+ sethi %hi(SFSR_UE), %g2
+ andcc %g5, %g2, %g0 ! check for UE
+ bz,pt %xcc, 4f ! branch if not UE
+ nop
+ RESET_WINREG(%g1) ! reset windows to prevent spills
+4:
+ mov %g5, %g3 ! pass SFSR to the 3rd arg
+ mov %g6, %g2 ! pass SFAR to the 2nd arg
+ set opl_cpu_isync_tl1_error, %g1
+ set opl_cpu_dsync_tl1_error, %g6
+ cmp %g4, T_INSTR_ERROR
+ movne %icc, %g6, %g1
+ ba,pt %icc, 6f
+ nop
+3:
+ mov %g5, %g3 ! pass SFSR to the 3rd arg
+ mov %g6, %g2 ! pass SFAR to the 2nd arg
+ set opl_cpu_isync_tl0_error, %g1
+ set opl_cpu_dsync_tl0_error, %g6
+ cmp %g4, T_INSTR_ERROR
+ movne %icc, %g6, %g1
+6:
+ sethi %hi(sys_trap), %g5
+ jmp %g5 + %lo(sys_trap)
+ mov PIL_15, %g4
+ SET_SIZE(opl_sync_trap)
+#endif /* lint */
+
+#if defined(lint)
+void
+opl_uger_trap(void)
+{}
+#else /* lint */
+/*
+ * Common Urgent error trap handler (tt=0x40)
+ * All TL=0 and TL>0 0x40 traps vector to this handler.
+ * The error handling can be best summarized as follows:
+ * 1. Read the Urgent error status register (UGERSR)
+ * Faultaddress is N/A here and it is not collected.
+ * 2. Check to see if we have a multiple errors case
+ * If so, we enable WEAK_ED (weak error detection) bit
+ * to prevent any potential error storms and branch directly
+ * to generate ereport. (we don't decode/handle individual
+ * error cases when we get a multiple error situation)
+ * 3. Now look for the recoverable error cases which include
+ * IUG_DTLB, IUG_ITLB or COREERR errors. If any of the
+ * recoverable errors are detected, do the following:
+ * - Flush all tlbs.
+ * - Verify that we came from TL=0, if not, generate
+ * ereport. Note that the reason we don't recover
+ * at TL>0 is because the AGs might be corrupted or
+ * inconsistent. We can't save/restore them into
+ * the scratchpad regs like we did for opl_sync_trap().
+ * - Check the INSTEND[5:4] bits in the UGERSR. If the
+ * value is 0x3 (11b), this error is not recoverable.
+ * Generate ereport.
+ * - Subtract one from the recoverable error count stored in
+ * the error log scratch register. If the threshold limit
+ * is reached (zero) - generate ereport.
+ * - If the count is within the limit, update the count
+ * in the error log register (subtract one). Log the error
+ * info in the log buffer. Capture traptrace if enabled.
+ * Retry (no ereport generated)
+ * 4. The rest of the error cases are unrecoverable and will
+ * be handled according (flushing regs, etc as required).
+ * For details on these error cases (UGER_CRE, UGER_CTXT, etc..)
+ * consult the OPL cpu/mem philosophy doc.
+ * Ereport will be generated for these errors.
+ * 5. Ereport generation.
+ * - Ereport generation for urgent error trap always
+ * result in a panic when we unwind to the TL=0 handling
+ * code via sys_trap(). on_trap()/lofault protection do
+ * not apply there.
+ */
+ ENTRY_NP(opl_uger_trap)
+ set ASI_UGERSR, %g2
+ ldxa [%g2]ASI_AFSR, %g1 ! Read the UGERSR reg
+
+ set UGESR_MULTI, %g2
+ andcc %g1, %g2, %g0 ! Check for Multi-errs
+ bz,pt %xcc, opl_uger_is_recover ! branch if not Multi-errs
+ nop
+ set AFSR_ECR, %g2
+ ldxa [%g2]ASI_AFSR, %g3 ! Enable Weak error
+ or %g3, ASI_ECR_WEAK_ED, %g3 ! detect mode to prevent
+ stxa %g3, [%g2]ASI_AFSR ! potential error storms
+ ba %xcc, opl_uger_panic1
+ nop
+
+opl_uger_is_recover:
+ set UGESR_CAN_RECOVER, %g2 ! Check for recoverable
+ andcc %g1, %g2, %g0 ! errors i.e.IUG_DTLB,
+ bz,pt %xcc, opl_uger_cre ! IUG_ITLB or COREERR
+ nop
+
+ /*
+ * Fall thru to handle recoverable case
+ * Need to do the following additional checks to determine
+ * if this is indeed recoverable.
+ * 1. Error trap came from TL=0 and
+ * 2. INSTEND[5:4] bits in UGERSR is not 0x3
+ * 3. Recoverable error count limit not reached
+ *
+ */
+ FLUSH_ALL_TLB(%g3)
+ rdpr %tl, %g3 ! Read TL
+ cmp %g3, 1 ! Check if we came from TL=0
+ bne,pt %xcc, opl_uger_panic ! branch if came from TL>0
+ nop
+ srlx %g1, 4, %g2 ! shift INSTEND[5:4] -> [1:0]
+ and %g2, 3, %g2 ! extract the shifted [1:0] bits
+ cmp %g2, 3 ! check if INSTEND is recoverable
+ be,pt %xcc, opl_uger_panic ! panic if ([1:0] = 11b)
+ nop
+ set OPL_SCRATCHPAD_ERRLOG, %g3
+ ldxa [%g3]ASI_SCRATCHPAD, %g2 ! Read errlog scratch reg
+ and %g2, ERRLOG_REG_NUMERR_MASK, %g3! Extract error count and
+ subcc %g3, 1, %g3 ! subtract one from it
+ bz,pt %xcc, opl_uger_panic ! If count reached zero, too many
+ nop ! errors, branch to generate ereport
+ sub %g2, 1, %g2 ! Subtract one from the count
+ set OPL_SCRATCHPAD_ERRLOG, %g3 ! and write back the updated
+ stxa %g2, [%g3]ASI_SCRATCHPAD ! count into the errlog reg
+ LOG_UGER_REG(%g1, %g2, %g3) ! Log the error info
+#ifdef TRAPTRACE
+ OPL_TRAPTRACE(%g1, %g2, %g3, opl_uger_trap_lb)
+#endif /* TRAPTRACE */
+ retry ! retry - no ereport
+
+ /*
+ * Process the rest of the unrecoverable error cases
+ * All error cases below ultimately branch to either
+ * opl_uger_panic or opl_uger_panic1.
+ * opl_uger_panic1 is the same as opl_uger_panic except
+ * for the additional execution of the RESET_TO_PRIV()
+ * macro that does a heavy handed reset. Read the
+ * comments for RESET_TO_PRIV() macro for more info.
+ */
+opl_uger_cre:
+ set UGESR_IAUG_CRE, %g2
+ andcc %g1, %g2, %g0
+ bz,pt %xcc, opl_uger_ctxt
+ nop
+ IAG_CRE(%g2, %g3)
+ set AFSR_ECR, %g2
+ ldxa [%g2]ASI_AFSR, %g3
+ or %g3, ASI_ECR_WEAK_ED, %g3
+ stxa %g3, [%g2]ASI_AFSR
+ ba %xcc, opl_uger_panic
+ nop
+
+opl_uger_ctxt:
+ set UGESR_IAUG_TSBCTXT, %g2
+ andcc %g1, %g2, %g0
+ bz,pt %xcc, opl_uger_tsbp
+ nop
+ RESET_MMU_REGS(%g2, %g3, %g4)
+ ba %xcc, opl_uger_panic
+ nop
+
+opl_uger_tsbp:
+ set UGESR_IUG_TSBP, %g2
+ andcc %g1, %g2, %g0
+ bz,pt %xcc, opl_uger_pstate
+ nop
+ RESET_TSB_TAGPTR(%g2)
+ ba %xcc, opl_uger_panic
+ nop
+
+opl_uger_pstate:
+ set UGESR_IUG_PSTATE, %g2
+ andcc %g1, %g2, %g0
+ bz,pt %xcc, opl_uger_tstate
+ nop
+ RESET_CUR_TSTATE(%g2)
+ ba %xcc, opl_uger_panic1
+ nop
+
+opl_uger_tstate:
+ set UGESR_IUG_TSTATE, %g2
+ andcc %g1, %g2, %g0
+ bz,pt %xcc, opl_uger_f
+ nop
+ RESET_PREV_TSTATE(%g2, %g3, opl_uger_tstate_1)
+ ba %xcc, opl_uger_panic1
+ nop
+
+opl_uger_f:
+ set UGESR_IUG_F, %g2
+ andcc %g1, %g2, %g0
+ bz,pt %xcc, opl_uger_r
+ nop
+ CLEAR_FPREGS(%g2)
+ ba %xcc, opl_uger_panic
+ nop
+
+opl_uger_r:
+ set UGESR_IUG_R, %g2
+ andcc %g1, %g2, %g0
+ bz,pt %xcc, opl_uger_panic1
+ nop
+ CLEAR_GEN_REGS(%g2, opl_uger_r_1)
+ ba %xcc, opl_uger_panic1
+ nop
+
+opl_uger_panic:
+ mov %g1, %g2 ! %g2 = arg #1
+ LOG_UGER_REG(%g1, %g3, %g4)
+ ba %xcc, opl_uger_panic_cmn
+ nop
+
+opl_uger_panic1:
+ mov %g1, %g2 ! %g2 = arg #1
+ LOG_UGER_REG(%g1, %g3, %g4)
+ RESET_TO_PRIV(%g1, %g3, %g4, %l0)
+
+ /*
+ * Set up the argument for sys_trap.
+ * %g2 = arg #1 already set above
+ */
+opl_uger_panic_cmn:
+ rdpr %tl, %g3 ! arg #2
+ set opl_cpu_urgent_error, %g1 ! pc
+ sethi %hi(sys_trap), %g5
+ jmp %g5 + %lo(sys_trap)
+ mov PIL_15, %g4
+ SET_SIZE(opl_uger_trap)
+#endif /* lint */
+
+#if defined(lint)
+
+void
+opl_serr_instr(void)
+{}
+
+#else /* lint */
+/*
+ * The actual trap handler for tt=0x0a, and tt=0x32
+ */
+ ENTRY_NP(opl_serr_instr)
+ OPL_SAVE_GLOBAL(%g1,%g2,%g3)
+ sethi %hi(opl_sync_trap), %g3
+ jmp %g3 + %lo(opl_sync_trap)
+ rdpr %tt, %g1
+ .align 32
+ SET_SIZE(opl_serr_instr)
+
+#endif /* lint */
+
+#if defined(lint)
+
+void
+opl_ugerr_instr(void)
+{}
+
+#else /* lint */
+/*
+ * The actual trap handler for tt=0x40
+ */
+ ENTRY_NP(opl_ugerr_instr)
+ sethi %hi(opl_uger_trap), %g3
+ jmp %g3 + %lo(opl_uger_trap)
+ nop
+ .align 32
+ SET_SIZE(opl_ugerr_instr)
+
+#endif /* lint */
+
+#if defined(lint)
+/*
+ * Get timestamp (stick).
+ */
+/* ARGSUSED */
+void
+stick_timestamp(int64_t *ts)
+{
+}
+
+#else /* lint */
+
+ ENTRY_NP(stick_timestamp)
+ rd STICK, %g1 ! read stick reg
+ sllx %g1, 1, %g1
+ srlx %g1, 1, %g1 ! clear npt bit
+
+ retl
+ stx %g1, [%o0] ! store the timestamp
+ SET_SIZE(stick_timestamp)
+
+#endif /* lint */
+
+
+#if defined(lint)
+/*
+ * Set STICK adjusted by skew.
+ */
+/* ARGSUSED */
+void
+stick_adj(int64_t skew)
+{
+}
+
+#else /* lint */
+
+ ENTRY_NP(stick_adj)
+ rdpr %pstate, %g1 ! save processor state
+ andn %g1, PSTATE_IE, %g3
+ ba 1f ! cache align stick adj
+ wrpr %g0, %g3, %pstate ! turn off interrupts
+
+ .align 16
+1: nop
+
+ rd STICK, %g4 ! read stick reg
+ add %g4, %o0, %o1 ! adjust stick with skew
+ wr %o1, %g0, STICK ! write stick reg
+
+ retl
+ wrpr %g1, %pstate ! restore processor state
+ SET_SIZE(stick_adj)
+
+#endif /* lint */
+
+#if defined(lint)
+/*
+ * Debugger-specific stick retrieval
+ */
+/*ARGSUSED*/
+int
+kdi_get_stick(uint64_t *stickp)
+{
+ return (0);
+}
+
+#else /* lint */
+
+ ENTRY_NP(kdi_get_stick)
+ rd STICK, %g1
+ stx %g1, [%o0]
+ retl
+ mov %g0, %o0
+ SET_SIZE(kdi_get_stick)
+
+#endif /* lint */
+
+#if defined(lint)
+
+/*ARGSUSED*/
+int
+dtrace_blksuword32(uintptr_t addr, uint32_t *data, int tryagain)
+{ return (0); }
+
+#else
+
+ ENTRY(dtrace_blksuword32)
+ save %sp, -SA(MINFRAME + 4), %sp
+
+ rdpr %pstate, %l1
+ andn %l1, PSTATE_IE, %l2 ! disable interrupts to
+ wrpr %g0, %l2, %pstate ! protect our FPU diddling
+
+ rd %fprs, %l0
+ andcc %l0, FPRS_FEF, %g0
+ bz,a,pt %xcc, 1f ! if the fpu is disabled
+ wr %g0, FPRS_FEF, %fprs ! ... enable the fpu
+
+ st %f0, [%fp + STACK_BIAS - 4] ! save %f0 to the stack
+1:
+ set 0f, %l5
+ /*
+ * We're about to write a block full or either total garbage
+ * (not kernel data, don't worry) or user floating-point data
+ * (so it only _looks_ like garbage).
+ */
+ ld [%i1], %f0 ! modify the block
+ membar #Sync
+ stn %l5, [THREAD_REG + T_LOFAULT] ! set up the lofault handler
+ stda %d0, [%i0]ASI_BLK_COMMIT_S ! store the modified block
+ membar #Sync
+ stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler
+
+ bz,a,pt %xcc, 1f
+ wr %g0, %l0, %fprs ! restore %fprs
+
+ ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
+1:
+
+ wrpr %g0, %l1, %pstate ! restore interrupts
+
+ ret
+ restore %g0, %g0, %o0
+
+0:
+ membar #Sync
+ stn %g0, [THREAD_REG + T_LOFAULT] ! remove the lofault handler
+
+ bz,a,pt %xcc, 1f
+ wr %g0, %l0, %fprs ! restore %fprs
+
+ ld [%fp + STACK_BIAS - 4], %f0 ! restore %f0
+1:
+
+ wrpr %g0, %l1, %pstate ! restore interrupts
+
+ /*
+ * If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
+ * which deals with watchpoints. Otherwise, just return -1.
+ */
+ brnz,pt %i2, 1f
+ nop
+ ret
+ restore %g0, -1, %o0
+1:
+ call dtrace_blksuword32_err
+ restore
+
+ SET_SIZE(dtrace_blksuword32)
+#endif /* lint */
+
+#if defined(lint)
+/*ARGSUSED*/
+void
+ras_cntr_reset(void *arg)
+{
+}
+#else
+ ENTRY_NP(ras_cntr_reset)
+ set OPL_SCRATCHPAD_ERRLOG, %o1
+ ldxa [%o1]ASI_SCRATCHPAD, %o0
+ or %o0, ERRLOG_REG_NUMERR_MASK, %o0
+ retl
+ stxa %o0, [%o1]ASI_SCRATCHPAD
+ SET_SIZE(ras_cntr_reset)
+#endif /* lint */
+
+#if defined(lint)
+/* ARGSUSED */
+void
+opl_error_setup(uint64_t cpu_err_log_pa)
+{
+}
+
+#else /* lint */
+ ENTRY_NP(opl_error_setup)
+ /*
+ * Initialize the error log scratchpad register
+ */
+ ldxa [%g0]ASI_EIDR, %o2
+ sethi %hi(ERRLOG_REG_EIDR_MASK), %o1
+ or %o1, %lo(ERRLOG_REG_EIDR_MASK), %o1
+ and %o2, %o1, %o3
+ sllx %o3, ERRLOG_REG_EIDR_SHIFT, %o2
+ or %o2, %o0, %o3
+ or %o3, ERRLOG_REG_NUMERR_MASK, %o0
+ set OPL_SCRATCHPAD_ERRLOG, %o1
+ stxa %o0, [%o1]ASI_SCRATCHPAD
+ /*
+ * Disable all restrainable error traps
+ */
+ mov AFSR_ECR, %o1
+ ldxa [%o1]ASI_AFSR, %o0
+ andn %o0, ASI_ECR_RTE_UE|ASI_ECR_RTE_CEDG, %o0
+ retl
+ stxa %o0, [%o1]ASI_AFSR
+ SET_SIZE(opl_error_setup)
+#endif /* lint */
+
+#if defined(lint)
+/* ARGSUSED */
+void
+opl_mpg_enable(void)
+{
+}
+#else /* lint */
+ ENTRY_NP(opl_mpg_enable)
+ /*
+ * Enable MMU translating multiple page sizes for
+ * sITLB and sDTLB.
+ */
+ mov LSU_MCNTL, %o0
+ ldxa [%o0] ASI_MCNTL, %o1
+ or %o1, MCNTL_MPG_SITLB | MCNTL_MPG_SDTLB, %o1
+ retl
+ stxa %o1, [%o0] ASI_MCNTL
+ SET_SIZE(opl_mpg_enable)
+#endif /* lint */
+
+#if defined(lint)
+/*
+ * This function is called for each (enabled) CPU. We use it to
+ * initialize error handling related registers.
+ */
+/*ARGSUSED*/
+void
+cpu_feature_init(void)
+{}
+#else /* lint */
+ ENTRY(cpu_feature_init)
+ !
+ ! get the device_id and store the device_id
+ ! in the appropriate cpunodes structure
+ ! given the cpus index
+ !
+ CPU_INDEX(%o0, %o1)
+ mulx %o0, CPU_NODE_SIZE, %o0
+ set cpunodes + DEVICE_ID, %o1
+ ldxa [%g0] ASI_DEVICE_SERIAL_ID, %o2
+ stx %o2, [%o0 + %o1]
+ !
+ ! initialize CPU registers
+ !
+ ba opl_cpu_reg_init
+ nop
+ SET_SIZE(cpu_feature_init)
+#endif /* lint */
+
+#if defined(lint)
+
+void
+cpu_cleartickpnt(void)
+{}
+
+#else /* lint */
+ /*
+ * Clear the NPT (non-privileged trap) bit in the %tick/%stick
+ * registers. In an effort to make the change in the
+ * tick/stick counter as consistent as possible, we disable
+ * all interrupts while we're changing the registers. We also
+ * ensure that the read and write instructions are in the same
+ * line in the instruction cache.
+ */
+ ENTRY_NP(cpu_clearticknpt)
+ rdpr %pstate, %g1 /* save processor state */
+ andn %g1, PSTATE_IE, %g3 /* turn off */
+ wrpr %g0, %g3, %pstate /* interrupts */
+ rdpr %tick, %g2 /* get tick register */
+ brgez,pn %g2, 1f /* if NPT bit off, we're done */
+ mov 1, %g3 /* create mask */
+ sllx %g3, 63, %g3 /* for NPT bit */
+ ba,a,pt %xcc, 2f
+ .align 8 /* Ensure rd/wr in same i$ line */
+2:
+ rdpr %tick, %g2 /* get tick register */
+ wrpr %g3, %g2, %tick /* write tick register, */
+ /* clearing NPT bit */
+1:
+ rd STICK, %g2 /* get stick register */
+ brgez,pn %g2, 3f /* if NPT bit off, we're done */
+ mov 1, %g3 /* create mask */
+ sllx %g3, 63, %g3 /* for NPT bit */
+ ba,a,pt %xcc, 4f
+ .align 8 /* Ensure rd/wr in same i$ line */
+4:
+ rd STICK, %g2 /* get stick register */
+ wr %g3, %g2, STICK /* write stick register, */
+ /* clearing NPT bit */
+3:
+ jmp %g4 + 4
+ wrpr %g0, %g1, %pstate /* restore processor state */
+
+ SET_SIZE(cpu_clearticknpt)
+
+#endif /* lint */
+
+#if defined(lint)
+
+void
+cpu_halt_cpu(void)
+{}
+
+void
+cpu_smt_pause(void)
+{}
+
+#else /* lint */
+
+ /*
+ * Halt the current strand with the suspend instruction.
+ * The compiler/asm currently does not support this suspend
+ * instruction mnemonic, use byte code for now.
+ */
+ ENTRY_NP(cpu_halt_cpu)
+ .word 0x81b01040
+ retl
+ nop
+ SET_SIZE(cpu_halt_cpu)
+
+ /*
+ * Pause the current strand with the sleep instruction.
+ * The compiler/asm currently does not support this sleep
+ * instruction mnemonic, use byte code for now.
+ */
+ ENTRY_NP(cpu_smt_pause)
+ .word 0x81b01060
+ retl
+ nop
+ SET_SIZE(cpu_smt_pause)
+
+#endif /* lint */
diff --git a/usr/src/uts/sun4u/cpu/opl_olympus_copy.s b/usr/src/uts/sun4u/cpu/opl_olympus_copy.s
new file mode 100644
index 0000000000..6918980941
--- /dev/null
+++ b/usr/src/uts/sun4u/cpu/opl_olympus_copy.s
@@ -0,0 +1,3716 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/param.h>
+#include <sys/errno.h>
+#include <sys/asm_linkage.h>
+#include <sys/vtrace.h>
+#include <sys/machthread.h>
+#include <sys/clock.h>
+#include <sys/asi.h>
+#include <sys/fsr.h>
+#include <sys/privregs.h>
+
+#if !defined(lint)
+#include "assym.h"
+#endif /* lint */
+
+/*
+ * Pseudo-code to aid in understanding the control flow of the
+ * bcopy/copyin/copyout routines.
+ *
+ * On entry:
+ *
+ * ! Determine whether to use the FP register version
+ * ! or the leaf routine version depending on size
+ * ! of copy and flags. Set up error handling accordingly.
+ * ! The transition point depends on whether the src and
+ * ! dst addresses can be aligned to long word, word,
+ * ! half word, or byte boundaries.
+ * !
+ * ! WARNING: <Register usage convention>
+ * ! For FP version, %l6 holds previous error handling and
+ * ! a flag: TRAMP_FLAG (low bits)
+ * ! for leaf routine version, %o4 holds those values.
+ * ! So either %l6 or %o4 is reserved and not available for
+ * ! any other use.
+ *
+ * if (length <= VIS_COPY_THRESHOLD) ! start with a quick test
+ * go to small_copy; ! to speed short copies
+ *
+ * ! src, dst long word alignable
+ * if (hw_copy_limit_8 == 0) ! hw_copy disabled
+ * go to small_copy;
+ * if (length <= hw_copy_limit_8)
+ * go to small_copy;
+ * go to FPBLK_copy;
+ * }
+ * if (src,dst not alignable) {
+ * if (hw_copy_limit_1 == 0) ! hw_copy disabled
+ * go to small_copy;
+ * if (length <= hw_copy_limit_1)
+ * go to small_copy;
+ * go to FPBLK_copy;
+ * }
+ * if (src,dst halfword alignable) {
+ * if (hw_copy_limit_2 == 0) ! hw_copy disabled
+ * go to small_copy;
+ * if (length <= hw_copy_limit_2)
+ * go to small_copy;
+ * go to FPBLK_copy;
+ * }
+ * if (src,dst word alignable) {
+ * if (hw_copy_limit_4 == 0) ! hw_copy disabled
+ * go to small_copy;
+ * if (length <= hw_copy_limit_4)
+ * go to small_copy;
+ * go to FPBLK_copy;
+ * }
+ *
+ * small_copy:
+ * Setup_leaf_rtn_error_handler; ! diffs for each entry point
+ *
+ * if (count <= 3) ! fast path for tiny copies
+ * go to sm_left; ! special finish up code
+ * else
+ * if (count > CHKSIZE) ! medium sized copies
+ * go to sm_med ! tuned by alignment
+ * if(src&dst not both word aligned) {
+ * sm_movebytes:
+ * move byte by byte in 4-way unrolled loop
+ * fall into sm_left;
+ * sm_left:
+ * move 0-3 bytes byte at a time as needed.
+ * restore error handler and exit.
+ *
+ * } else { ! src&dst are word aligned
+ * check for at least 8 bytes left,
+ * move word at a time, unrolled by 2
+ * when fewer than 8 bytes left,
+ * sm_half: move half word at a time while 2 or more bytes left
+ * sm_byte: move final byte if necessary
+ * sm_exit:
+ * restore error handler and exit.
+ * }
+ *
+ * ! Medium length cases with at least CHKSIZE bytes available
+ * ! method: line up src and dst as best possible, then
+ * ! move data in 4-way unrolled loops.
+ *
+ * sm_med:
+ * if(src&dst unalignable)
+ * go to sm_movebytes
+ * if(src&dst halfword alignable)
+ * go to sm_movehalf
+ * if(src&dst word alignable)
+ * go to sm_moveword
+ * ! fall into long word movement
+ * move bytes until src is word aligned
+ * if not long word aligned, move a word
+ * move long words in 4-way unrolled loop until < 32 bytes left
+ * move long words in 1-way unrolled loop until < 8 bytes left
+ * if zero bytes left, goto sm_exit
+ * if one byte left, go to sm_byte
+ * else go to sm_half
+ *
+ * sm_moveword:
+ * move bytes until src is word aligned
+ * move words in 4-way unrolled loop until < 16 bytes left
+ * move words in 1-way unrolled loop until < 4 bytes left
+ * if zero bytes left, goto sm_exit
+ * if one byte left, go to sm_byte
+ * else go to sm_half
+ *
+ * sm_movehalf:
+ * move a byte if needed to align src on halfword
+ * move halfwords in 4-way unrolled loop until < 8 bytes left
+ * if zero bytes left, goto sm_exit
+ * if one byte left, go to sm_byte
+ * else go to sm_half
+ *
+ *
+ * FPBLK_copy:
+ * %l6 = curthread->t_lofault;
+ * if (%l6 != NULL) {
+ * membar #Sync
+ * curthread->t_lofault = .copyerr;
+ * caller_error_handler = TRUE ! %l6 |= 2
+ * }
+ *
+ * ! for FPU testing we must not migrate cpus
+ * if (curthread->t_lwp == NULL) {
+ * ! Kernel threads do not have pcb's in which to store
+ * ! the floating point state, so disallow preemption during
+ * ! the copy. This also prevents cpu migration.
+ * kpreempt_disable(curthread);
+ * } else {
+ * thread_nomigrate();
+ * }
+ *
+ * old_fprs = %fprs;
+ * old_gsr = %gsr;
+ * if (%fprs.fef) {
+ * %fprs.fef = 1;
+ * save current fpregs on stack using blockstore
+ * } else {
+ * %fprs.fef = 1;
+ * }
+ *
+ *
+ * do_blockcopy_here;
+ *
+ * In lofault handler:
+ * curthread->t_lofault = .copyerr2;
+ * Continue on with the normal exit handler
+ *
+ * On normal exit:
+ * %gsr = old_gsr;
+ * if (old_fprs & FPRS_FEF)
+ * restore fpregs from stack using blockload
+ * else
+ * zero fpregs
+ * %fprs = old_fprs;
+ * membar #Sync
+ * curthread->t_lofault = (%l6 & ~3);
+ * ! following test omitted from copyin/copyout as they
+ * ! will always have a current thread
+ * if (curthread->t_lwp == NULL)
+ * kpreempt_enable(curthread);
+ * else
+ * thread_allowmigrate();
+ * return (0)
+ *
+ * In second lofault handler (.copyerr2):
+ * We've tried to restore fp state from the stack and failed. To
+ * prevent from returning with a corrupted fp state, we will panic.
+ */
+
+/*
+ * Comments about optimization choices
+ *
+ * The initial optimization decision in this code is to determine
+ * whether to use the FP registers for a copy or not. If we don't
+ * use the FP registers, we can execute the copy as a leaf routine,
+ * saving a register save and restore. Also, less elaborate setup
+ * is required, allowing short copies to be completed more quickly.
+ * For longer copies, especially unaligned ones (where the src and
+ * dst do not align to allow simple ldx,stx operation), the FP
+ * registers allow much faster copy operations.
+ *
+ * The estimated extra cost of the FP path will vary depending on
+ * src/dst alignment, dst offset from the next 64 byte FPblock store
+ * boundary, remaining src data after the last full dst cache line is
+ * moved whether the FP registers need to be saved, and some other
+ * minor issues. The average additional overhead is estimated to be
+ * 400 clocks. Since each non-repeated/predicted tst and branch costs
+ * around 10 clocks, elaborate calculation would slow down to all
+ * longer copies and only benefit a small portion of medium sized
+ * copies. Rather than incur such cost, we chose fixed transition
+ * points for each of the alignment choices.
+ *
+ * For the inner loop, here is a comparison of the per cache line
+ * costs for each alignment when src&dst are in cache:
+ *
+ * byte aligned: 108 clocks slower for non-FPBLK
+ * half aligned: 44 clocks slower for non-FPBLK
+ * word aligned: 12 clocks slower for non-FPBLK
+ * long aligned: 4 clocks >>faster<< for non-FPBLK
+ *
+ * The long aligned loop runs faster because it does no prefetching.
+ * That wins if the data is not in cache or there is too little
+ * data to gain much benefit from prefetching. But when there
+ * is more data and that data is not in cache, failing to prefetch
+ * can run much slower. In addition, there is a 2 Kbyte store queue
+ * which will cause the non-FPBLK inner loop to slow for larger copies.
+ * The exact tradeoff is strongly load and application dependent, with
+ * increasing risk of a customer visible performance regression if the
+ * non-FPBLK code is used for larger copies. Studies of synthetic in-cache
+ * vs out-of-cache copy tests in user space suggest 1024 bytes as a safe
+ * upper limit for the non-FPBLK code. To minimize performance regression
+ * risk while still gaining the primary benefits of the improvements to
+ * the non-FPBLK code, we set an upper bound of 1024 bytes for the various
+ * hw_copy_limit_*. Later experimental studies using different values
+ * of hw_copy_limit_* can be used to make further adjustments if
+ * appropriate.
+ *
+ * hw_copy_limit_1 = src and dst are byte aligned but not halfword aligned
+ * hw_copy_limit_2 = src and dst are halfword aligned but not word aligned
+ * hw_copy_limit_4 = src and dst are word aligned but not longword aligned
+ * hw_copy_limit_8 = src and dst are longword aligned
+ *
+ * To say that src and dst are word aligned means that after
+ * some initial alignment activity of moving 0 to 3 bytes,
+ * both the src and dst will be on word boundaries so that
+ * word loads and stores may be used.
+ *
+ * Default values at May,2005 are:
+ * hw_copy_limit_1 = 256
+ * hw_copy_limit_2 = 512
+ * hw_copy_limit_4 = 1024
+ * hw_copy_limit_8 = 1024 (or 1536 on some systems)
+ *
+ *
+ * If hw_copy_limit_? is set to zero, then use of FPBLK copy is
+ * disabled for that alignment choice.
+ * If hw_copy_limit_? is set to a value between 1 and VIS_COPY_THRESHOLD (256)
+ * the value of VIS_COPY_THRESHOLD is used.
+ * It is not envisioned that hw_copy_limit_? will be changed in the field
+ * It is provided to allow for disabling FPBLK copies and to allow
+ * easy testing of alternate values on future HW implementations
+ * that might have different cache sizes, clock rates or instruction
+ * timing rules.
+ *
+ * Our first test for FPBLK copies vs non-FPBLK copies checks a minimum
+ * threshold to speedup all shorter copies (less than 256). That
+ * saves an alignment test, memory reference, and enabling test
+ * for all short copies, or an estimated 24 clocks.
+ *
+ * The order in which these limits are checked does matter since each
+ * non-predicted tst and branch costs around 10 clocks.
+ * If src and dst are randomly selected addresses,
+ * 4 of 8 will not be alignable.
+ * 2 of 8 will be half word alignable.
+ * 1 of 8 will be word alignable.
+ * 1 of 8 will be long word alignable.
+ * But, tests on running kernels show that src and dst to copy code
+ * are typically not on random alignments. Structure copies and
+ * copies of larger data sizes are often on long word boundaries.
+ * So we test the long word alignment case first, then
+ * the byte alignment, then halfword, then word alignment.
+ *
+ * Several times, tests for length are made to split the code
+ * into subcases. These tests often allow later tests to be
+ * avoided. For example, within the non-FPBLK copy, we first
+ * check for tiny copies of 3 bytes or less. That allows us
+ * to use a 4-way unrolled loop for the general byte copy case
+ * without a test on loop entry.
+ * We subdivide the non-FPBLK case further into CHKSIZE bytes and less
+ * vs longer cases. For the really short case, we don't attempt
+ * align src and dst. We try to minimize special case tests in
+ * the shortest loops as each test adds a significant percentage
+ * to the total time.
+ *
+ * For the medium sized cases, we allow ourselves to adjust the
+ * src and dst alignment and provide special cases for each of
+ * the four adjusted alignment cases. The CHKSIZE that was used
+ * to decide between short and medium size was chosen to be 39
+ * as that allows for the worst case of 7 bytes of alignment
+ * shift and 4 times 8 bytes for the first long word unrolling.
+ * That knowledge saves an initial test for length on entry into
+ * the medium cases. If the general loop unrolling factor were
+ * to be increases, this number would also need to be adjusted.
+ *
+ * For all cases in the non-FPBLK code where it is known that at
+ * least 4 chunks of data are available for movement, the
+ * loop is unrolled by four. This 4-way loop runs in 8 clocks
+ * or 2 clocks per data element.
+ *
+ * Instruction alignment is forced by used of .align 16 directives
+ * and nops which are not executed in the code. This
+ * combination of operations shifts the alignment of following
+ * loops to insure that loops are aligned so that their instructions
+ * fall within the minimum number of 4 instruction fetch groups.
+ * If instructions are inserted or removed between the .align
+ * instruction and the unrolled loops, then the alignment needs
+ * to be readjusted. Misaligned loops can add a clock per loop
+ * iteration to the loop timing.
+ *
+ * In a few cases, code is duplicated to avoid a branch. Since
+ * a non-predicted tst and branch takes 10 clocks, this savings
+ * is judged an appropriate time-space tradeoff.
+ *
+ * Within the FPBLK-code, the prefetch method in the inner
+ * loop needs to be explained as it is not standard. Two
+ * prefetches are issued for each cache line instead of one.
+ * The primary one is at the maximum reach of 8 cache lines.
+ * Most of the time, that maximum prefetch reach gives the
+ * cache line more time to reach the processor for systems with
+ * higher processor clocks. But, sometimes memory interference
+ * can cause that prefetch to be dropped. Putting a second
+ * prefetch at a reach of 5 cache lines catches the drops
+ * three iterations later and shows a measured improvement
+ * in performance over any similar loop with a single prefetch.
+ * The prefetches are placed in the loop so they overlap with
+ * non-memory instructions, so that there is no extra cost
+ * when the data is already in-cache.
+ *
+ */
+
+/*
+ * Notes on preserving existing fp state and on membars.
+ *
+ * When a copyOP decides to use fp we may have to preserve existing
+ * floating point state. It is not the caller's state that we need to
+ * preserve - the rest of the kernel does not use fp and, anyway, fp
+ * registers are volatile across a call. Some examples:
+ *
+ * - userland has fp state and is interrupted (device interrupt
+ * or trap) and within the interrupt/trap handling we use
+ * bcopy()
+ * - another (higher level) interrupt or trap handler uses bcopy
+ * while a bcopy from an earlier interrupt is still active
+ * - an asynchronous error trap occurs while fp state exists (in
+ * userland or in kernel copy) and the tl0 component of the handling
+ * uses bcopy
+ * - a user process with fp state incurs a copy-on-write fault and
+ * hwblkpagecopy always uses fp
+ *
+ * We therefore need a per-call place in which to preserve fp state -
+ * using our stack is ideal (and since fp copy cannot be leaf optimized
+ * because of calls it makes, this is no hardship).
+ *
+ * When we have finished fp copy (with it's repeated block stores)
+ * we must membar #Sync so that our block stores may complete before
+ * we either restore the original fp state into the fp registers or
+ * return to a caller which may initiate other fp operations that could
+ * modify the fp regs we used before the block stores complete.
+ *
+ * Synchronous faults (eg, unresolvable DMMU miss) that occur while
+ * t_lofault is not NULL will not panic but will instead trampoline
+ * to the registered lofault handler. There is no need for any
+ * membars for these - eg, our store to t_lofault will always be visible to
+ * ourselves and it is our cpu which will take any trap.
+ *
+ * Asynchronous faults (eg, uncorrectable ECC error from memory) that occur
+ * while t_lofault is not NULL will also not panic. Since we're copying
+ * to or from userland the extent of the damage is known - the destination
+ * buffer is incomplete. So trap handlers will trampoline to the lofault
+ * handler in this case which should take some form of error action to
+ * avoid using the incomplete buffer. The trap handler also flags the
+ * fault so that later return-from-trap handling (for the trap that brought
+ * this thread into the kernel in the first place) can notify the process
+ * and reboot the system (or restart the service with Greenline/Contracts).
+ *
+ * Asynchronous faults (eg, uncorrectable ECC error from memory) can
+ * result in deferred error traps - the trap is taken sometime after
+ * the event and the trap PC may not be the PC of the faulting access.
+ * Delivery of such pending traps can be forced by a membar #Sync, acting
+ * as an "error barrier" in this role. To accurately apply the user/kernel
+ * separation described in the preceding paragraph we must force delivery
+ * of deferred traps affecting kernel state before we install a lofault
+ * handler (if we interpose a new lofault handler on an existing one there
+ * is no need to repeat this), and we must force delivery of deferred
+ * errors affecting the lofault-protected region before we clear t_lofault.
+ * Failure to do so results in lost kernel state being interpreted as
+ * affecting a copyin/copyout only, or of an error that really only
+ * affects copy data being interpreted as losing kernel state.
+ *
+ * Since the copy operations may preserve and later restore floating
+ * point state that does not belong to the caller (see examples above),
+ * we must be careful in how we do this in order to prevent corruption
+ * of another program.
+ *
+ * To make sure that floating point state is always saved and restored
+ * correctly, the following "big rules" must be followed when the floating
+ * point registers will be used:
+ *
+ * 1. %l6 always holds the caller's lofault handler. Also in this register,
+ * Bit 1 (FPUSED_FLAG) indicates that the floating point registers are in
+ * use. Bit 2 (TRAMP_FLAG) indicates that the call was to bcopy, and a
+ * lofault handler was set coming in.
+ *
+ * 2. The FPUSED flag indicates that all FP state has been successfully stored
+ * on the stack. It should not be set until this save has been completed.
+ *
+ * 3. The FPUSED flag should not be cleared on exit until all FP state has
+ * been restored from the stack. If an error occurs while restoring
+ * data from the stack, the error handler can check this flag to see if
+ * a restore is necessary.
+ *
+ * 4. Code run under the new lofault handler must be kept to a minimum. In
+ * particular, any calls to FP_ALLOWMIGRATE, which could result in a call
+ * to kpreempt(), should not be made until after the lofault handler has
+ * been restored.
+ */
+
+/*
+ * VIS_COPY_THRESHOLD indicates the minimum number of bytes needed
+ * to "break even" using FP/VIS-accelerated memory operations.
+ * The FPBLK code assumes a minimum number of bytes are available
+ * to be moved on entry. Check that code carefully before
+ * reducing VIS_COPY_THRESHOLD below 256.
+ */
+/*
+ * This shadows sys/machsystm.h which can't be included due to the lack of
+ * _ASM guards in include files it references. Change it here, change it there.
+ */
+#define VIS_COPY_THRESHOLD 256
+
+/*
+ * TEST for very short copies
+ * Be aware that the maximum unroll for the short unaligned case
+ * is SHORTCOPY+1
+ */
+#define SHORTCOPY 3
+#define CHKSIZE 39
+
+/*
+ * Indicates that we're to trampoline to the error handler.
+ * Entry points bcopy, copyin_noerr, and copyout_noerr use this flag.
+ * kcopy, copyout, xcopyout, copyin, and xcopyin do not set this flag.
+ */
+#define FPUSED_FLAG 1
+#define TRAMP_FLAG 2
+#define MASK_FLAGS 3
+
+/*
+ * Number of outstanding prefetches.
+ * We may need more tuning when Olympus-C processor is available.
+ */
+#define OLYMPUS_C_PREFETCH 4
+#define OLYMPUS_C_2ND_PREFETCH 10
+
+#define VIS_BLOCKSIZE 64
+
+/*
+ * Size of stack frame in order to accomodate a 64-byte aligned
+ * floating-point register save area and 2 64-bit temp locations.
+ * All copy functions use two quadrants of fp registers; to assure a
+ * block-aligned two block buffer in which to save we must reserve
+ * three blocks on stack. Not all functions preserve %pfrs on stack
+ * or need to preserve %gsr but we use HWCOPYFRAMESIZE for all.
+ *
+ * _______________________________________ <-- %fp + STACK_BIAS
+ * | We may need to preserve 2 quadrants |
+ * | of fp regs, but since we do so with |
+ * | BST/BLD we need room in which to |
+ * | align to VIS_BLOCKSIZE bytes. So |
+ * | this area is 3 * VIS_BLOCKSIZE. | <-- - SAVED_FPREGS_OFFSET
+ * |-------------------------------------|
+ * | 8 bytes to save %fprs | <-- - SAVED_FPRS_OFFSET
+ * |-------------------------------------|
+ * | 8 bytes to save %gsr | <-- - SAVED_GSR_OFFSET
+ * ---------------------------------------
+ */
+#define HWCOPYFRAMESIZE ((VIS_BLOCKSIZE * (2 + 1)) + (2 * 8))
+#define SAVED_FPREGS_OFFSET (VIS_BLOCKSIZE * 3)
+#define SAVED_FPREGS_ADJUST ((VIS_BLOCKSIZE * 2) - 1)
+#define SAVED_FPRS_OFFSET (SAVED_FPREGS_OFFSET + 8)
+#define SAVED_GSR_OFFSET (SAVED_FPRS_OFFSET + 8)
+
+/*
+ * Common macros used by the various versions of the block copy
+ * routines in this file.
+ */
+
+/*
+ * In FP copies if we do not have preserved data to restore over
+ * the fp regs we used then we must zero those regs to avoid
+ * exposing portions of the data to later threads (data security).
+ *
+ * Copy functions use either quadrants 1 and 3 or 2 and 4.
+ *
+ * FZEROQ1Q3: Zero quadrants 1 and 3, ie %f0 - %f15 and %f32 - %f47
+ * FZEROQ2Q4: Zero quadrants 2 and 4, ie %f16 - %f31 and %f48 - %f63
+ *
+ * The instructions below are quicker than repeated fzero instructions
+ * since they can dispatch down two fp pipelines.
+ */
+#define FZEROQ1Q3 \
+ fzero %f0 ;\
+ fmovd %f0, %f2 ;\
+ fmovd %f0, %f4 ;\
+ fmovd %f0, %f6 ;\
+ fmovd %f0, %f8 ;\
+ fmovd %f0, %f10 ;\
+ fmovd %f0, %f12 ;\
+ fmovd %f0, %f14 ;\
+ fmovd %f0, %f32 ;\
+ fmovd %f0, %f34 ;\
+ fmovd %f0, %f36 ;\
+ fmovd %f0, %f38 ;\
+ fmovd %f0, %f40 ;\
+ fmovd %f0, %f42 ;\
+ fmovd %f0, %f44 ;\
+ fmovd %f0, %f46
+
+#define FZEROQ2Q4 \
+ fzero %f16 ;\
+ fmovd %f0, %f18 ;\
+ fmovd %f0, %f20 ;\
+ fmovd %f0, %f22 ;\
+ fmovd %f0, %f24 ;\
+ fmovd %f0, %f26 ;\
+ fmovd %f0, %f28 ;\
+ fmovd %f0, %f30 ;\
+ fmovd %f0, %f48 ;\
+ fmovd %f0, %f50 ;\
+ fmovd %f0, %f52 ;\
+ fmovd %f0, %f54 ;\
+ fmovd %f0, %f56 ;\
+ fmovd %f0, %f58 ;\
+ fmovd %f0, %f60 ;\
+ fmovd %f0, %f62
+
+/*
+ * Macros to save and restore quadrants 1 and 3 or 2 and 4 to/from the stack.
+ * Used to save and restore in-use fp registers when we want to use FP
+ * and find fp already in use and copy size still large enough to justify
+ * the additional overhead of this save and restore.
+ *
+ * A membar #Sync is needed before save to sync fp ops initiated before
+ * the call to the copy function (by whoever has fp in use); for example
+ * an earlier block load to the quadrant we are about to save may still be
+ * "in flight". A membar #Sync is required at the end of the save to
+ * sync our block store (the copy code is about to begin ldd's to the
+ * first quadrant).
+ *
+ * Similarly: a membar #Sync before restore allows the block stores of
+ * the copy operation to complete before we fill the quadrants with their
+ * original data, and a membar #Sync after restore lets the block loads
+ * of the restore complete before we return to whoever has the fp regs
+ * in use. To avoid repeated membar #Sync we make it the responsibility
+ * of the copy code to membar #Sync immediately after copy is complete
+ * and before using the BLD_*_FROMSTACK macro.
+ */
+#if !defined(lint)
+#define BST_FPQ1Q3_TOSTACK(tmp1) \
+ /* membar #Sync */ ;\
+ add %fp, STACK_BIAS - SAVED_FPREGS_ADJUST, tmp1 ;\
+ and tmp1, -VIS_BLOCKSIZE, tmp1 /* block align */ ;\
+ stda %f0, [tmp1]ASI_BLK_P ;\
+ add tmp1, VIS_BLOCKSIZE, tmp1 ;\
+ stda %f32, [tmp1]ASI_BLK_P ;\
+ membar #Sync
+
+#define BLD_FPQ1Q3_FROMSTACK(tmp1) \
+ /* membar #Sync - provided at copy completion */ ;\
+ add %fp, STACK_BIAS - SAVED_FPREGS_ADJUST, tmp1 ;\
+ and tmp1, -VIS_BLOCKSIZE, tmp1 /* block align */ ;\
+ ldda [tmp1]ASI_BLK_P, %f0 ;\
+ add tmp1, VIS_BLOCKSIZE, tmp1 ;\
+ ldda [tmp1]ASI_BLK_P, %f32 ;\
+ membar #Sync
+
+#define BST_FPQ2Q4_TOSTACK(tmp1) \
+ /* membar #Sync */ ;\
+ add %fp, STACK_BIAS - SAVED_FPREGS_ADJUST, tmp1 ;\
+ and tmp1, -VIS_BLOCKSIZE, tmp1 /* block align */ ;\
+ stda %f16, [tmp1]ASI_BLK_P ;\
+ add tmp1, VIS_BLOCKSIZE, tmp1 ;\
+ stda %f48, [tmp1]ASI_BLK_P ;\
+ membar #Sync
+
+#define BLD_FPQ2Q4_FROMSTACK(tmp1) \
+ /* membar #Sync - provided at copy completion */ ;\
+ add %fp, STACK_BIAS - SAVED_FPREGS_ADJUST, tmp1 ;\
+ and tmp1, -VIS_BLOCKSIZE, tmp1 /* block align */ ;\
+ ldda [tmp1]ASI_BLK_P, %f16 ;\
+ add tmp1, VIS_BLOCKSIZE, tmp1 ;\
+ ldda [tmp1]ASI_BLK_P, %f48 ;\
+ membar #Sync
+#endif
+
+/*
+ * FP_NOMIGRATE and FP_ALLOWMIGRATE. Prevent migration (or, stronger,
+ * prevent preemption if there is no t_lwp to save FP state to on context
+ * switch) before commencing a FP copy, and reallow it on completion or
+ * in error trampoline paths when we were using FP copy.
+ *
+ * Both macros may call other functions, so be aware that all outputs are
+ * forfeit after using these macros. For this reason we do not pass registers
+ * to use - we just use any outputs we want.
+ *
+ * Pseudo code:
+ *
+ * FP_NOMIGRATE:
+ *
+ * if (curthread->t_lwp) {
+ * thread_nomigrate();
+ * } else {
+ * kpreempt_disable();
+ * }
+ *
+ * FP_ALLOWMIGRATE:
+ *
+ * if (curthread->t_lwp) {
+ * thread_allowmigrate();
+ * } else {
+ * kpreempt_enable();
+ * }
+ */
+
+#define FP_NOMIGRATE(label1, label2) \
+ ldn [THREAD_REG + T_LWP], %o0 ;\
+ brz,a,pn %o0, label1/**/f ;\
+ ldsb [THREAD_REG + T_PREEMPT], %o1 ;\
+ call thread_nomigrate ;\
+ nop ;\
+ ba label2/**/f ;\
+ nop ;\
+label1: ;\
+ inc %o1 ;\
+ stb %o1, [THREAD_REG + T_PREEMPT] ;\
+label2:
+
+#define FP_ALLOWMIGRATE(label1, label2) \
+ ldn [THREAD_REG + T_LWP], %o0 ;\
+ brz,a,pn %o0, label1/**/f ;\
+ ldsb [THREAD_REG + T_PREEMPT], %o1 ;\
+ call thread_allowmigrate ;\
+ nop ;\
+ ba label2/**/f ;\
+ nop ;\
+label1: ;\
+ dec %o1 ;\
+ brnz,pn %o1, label2/**/f ;\
+ stb %o1, [THREAD_REG + T_PREEMPT] ;\
+ ldn [THREAD_REG + T_CPU], %o0 ;\
+ ldub [%o0 + CPU_KPRUNRUN], %o0 ;\
+ brz,pt %o0, label2/**/f ;\
+ nop ;\
+ call kpreempt ;\
+ rdpr %pil, %o0 ;\
+label2:
+
+/*
+ * Copy a block of storage, returning an error code if `from' or
+ * `to' takes a kernel pagefault which cannot be resolved.
+ * Returns errno value on pagefault error, 0 if all ok
+ */
+
+#if defined(lint)
+
+/* ARGSUSED */
+int
+kcopy(const void *from, void *to, size_t count)
+{ return(0); }
+
+#else /* lint */
+
+ .seg ".text"
+ .align 4
+
+ ENTRY(kcopy)
+
+ cmp %o2, VIS_COPY_THRESHOLD ! check for leaf rtn case
+ bleu,pt %ncc, .kcopy_small ! go to larger cases
+ xor %o0, %o1, %o3 ! are src, dst alignable?
+ btst 7, %o3 !
+ bz,pt %ncc, .kcopy_8 ! check for longword alignment
+ nop
+ btst 1, %o3 !
+ bz,pt %ncc, .kcopy_2 ! check for half-word
+ nop
+ sethi %hi(hw_copy_limit_1), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_1)], %o3
+ tst %o3
+ bz,pn %icc, .kcopy_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .kcopy_small ! go to small copy
+ nop
+ ba,pt %ncc, .kcopy_more ! otherwise go to large copy
+ nop
+.kcopy_2:
+ btst 3, %o3 !
+ bz,pt %ncc, .kcopy_4 ! check for word alignment
+ nop
+ sethi %hi(hw_copy_limit_2), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_2)], %o3
+ tst %o3
+ bz,pn %icc, .kcopy_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .kcopy_small ! go to small copy
+ nop
+ ba,pt %ncc, .kcopy_more ! otherwise go to large copy
+ nop
+.kcopy_4:
+ ! already checked longword, must be word aligned
+ sethi %hi(hw_copy_limit_4), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_4)], %o3
+ tst %o3
+ bz,pn %icc, .kcopy_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .kcopy_small ! go to small copy
+ nop
+ ba,pt %ncc, .kcopy_more ! otherwise go to large copy
+ nop
+.kcopy_8:
+ sethi %hi(hw_copy_limit_8), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_8)], %o3
+ tst %o3
+ bz,pn %icc, .kcopy_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .kcopy_small ! go to small copy
+ nop
+ ba,pt %ncc, .kcopy_more ! otherwise go to large copy
+ nop
+
+.kcopy_small:
+ sethi %hi(.sm_copyerr), %o5 ! sm_copyerr is lofault value
+ or %o5, %lo(.sm_copyerr), %o5
+ ldn [THREAD_REG + T_LOFAULT], %o4 ! save existing handler
+ membar #Sync ! sync error barrier
+ ba,pt %ncc, .sm_do_copy ! common code
+ stn %o5, [THREAD_REG + T_LOFAULT] ! set t_lofault
+
+.kcopy_more:
+ save %sp, -SA(MINFRAME + HWCOPYFRAMESIZE), %sp
+ sethi %hi(.copyerr), %l7 ! copyerr is lofault value
+ or %l7, %lo(.copyerr), %l7
+ ldn [THREAD_REG + T_LOFAULT], %l6 ! save existing handler
+ membar #Sync ! sync error barrier
+ ba,pt %ncc, .do_copy ! common code
+ stn %l7, [THREAD_REG + T_LOFAULT] ! set t_lofault
+
+
+/*
+ * We got here because of a fault during bcopy_more, called from kcopy or bcopy.
+ * Errno value is in %g1. bcopy_more uses fp quadrants 1 and 3.
+ */
+.copyerr:
+ set .copyerr2, %l0
+ membar #Sync ! sync error barrier
+ stn %l0, [THREAD_REG + T_LOFAULT] ! set t_lofault
+ btst FPUSED_FLAG, %l6
+ bz %ncc, 1f
+ and %l6, TRAMP_FLAG, %l0 ! copy trampoline flag to %l0
+
+ ldx [%fp + STACK_BIAS - SAVED_GSR_OFFSET], %o2 ! restore gsr
+ wr %o2, 0, %gsr
+
+ ld [%fp + STACK_BIAS - SAVED_FPRS_OFFSET], %o3
+ btst FPRS_FEF, %o3
+ bz,pt %icc, 4f
+ nop
+
+ BLD_FPQ1Q3_FROMSTACK(%o2)
+
+ ba,pt %ncc, 1f
+ wr %o3, 0, %fprs ! restore fprs
+
+4:
+ FZEROQ1Q3
+ wr %o3, 0, %fprs ! restore fprs
+
+ !
+ ! Need to cater for the different expectations of kcopy
+ ! and bcopy. kcopy will *always* set a t_lofault handler
+ ! If it fires, we're expected to just return the error code
+ ! and *not* to invoke any existing error handler. As far as
+ ! bcopy is concerned, we only set t_lofault if there was an
+ ! existing lofault handler. In that case we're expected to
+ ! invoke the previously existing handler after resetting the
+ ! t_lofault value.
+ !
+1:
+ andn %l6, MASK_FLAGS, %l6 ! turn trampoline flag off
+ membar #Sync ! sync error barrier
+ stn %l6, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ FP_ALLOWMIGRATE(5, 6)
+
+ btst TRAMP_FLAG, %l0
+ bnz,pn %ncc, 3f
+ nop
+ ret
+ restore %g1, 0, %o0
+
+3:
+ !
+ ! We're here via bcopy. There *must* have been an error handler
+ ! in place otherwise we would have died a nasty death already.
+ !
+ jmp %l6 ! goto real handler
+ restore %g0, 0, %o0 ! dispose of copy window
+
+/*
+ * We got here because of a fault in .copyerr. We can't safely restore fp
+ * state, so we panic.
+ */
+fp_panic_msg:
+ .asciz "Unable to restore fp state after copy operation"
+
+ .align 4
+.copyerr2:
+ set fp_panic_msg, %o0
+ call panic
+ nop
+
+/*
+ * We got here because of a fault during a small kcopy or bcopy.
+ * No floating point registers are used by the small copies.
+ * Errno value is in %g1.
+ */
+.sm_copyerr:
+1:
+ btst TRAMP_FLAG, %o4
+ membar #Sync
+ andn %o4, TRAMP_FLAG, %o4
+ bnz,pn %ncc, 3f
+ stn %o4, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ retl
+ mov %g1, %o0
+3:
+ jmp %o4 ! goto real handler
+ mov %g0, %o0 !
+
+ SET_SIZE(kcopy)
+#endif /* lint */
+
+
+/*
+ * Copy a block of storage - must not overlap (from + len <= to).
+ * Registers: l6 - saved t_lofault
+ * (for short copies, o4 - saved t_lofault)
+ *
+ * Copy a page of memory.
+ * Assumes double word alignment and a count >= 256.
+ */
+#if defined(lint)
+
+/* ARGSUSED */
+void
+bcopy(const void *from, void *to, size_t count)
+{}
+
+#else /* lint */
+
+ ENTRY(bcopy)
+
+ cmp %o2, VIS_COPY_THRESHOLD ! check for leaf rtn case
+ bleu,pt %ncc, .bcopy_small ! go to larger cases
+ xor %o0, %o1, %o3 ! are src, dst alignable?
+ btst 7, %o3 !
+ bz,pt %ncc, .bcopy_8 ! check for longword alignment
+ nop
+ btst 1, %o3 !
+ bz,pt %ncc, .bcopy_2 ! check for half-word
+ nop
+ sethi %hi(hw_copy_limit_1), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_1)], %o3
+ tst %o3
+ bz,pn %icc, .bcopy_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .bcopy_small ! go to small copy
+ nop
+ ba,pt %ncc, .bcopy_more ! otherwise go to large copy
+ nop
+.bcopy_2:
+ btst 3, %o3 !
+ bz,pt %ncc, .bcopy_4 ! check for word alignment
+ nop
+ sethi %hi(hw_copy_limit_2), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_2)], %o3
+ tst %o3
+ bz,pn %icc, .bcopy_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .bcopy_small ! go to small copy
+ nop
+ ba,pt %ncc, .bcopy_more ! otherwise go to large copy
+ nop
+.bcopy_4:
+ ! already checked longword, must be word aligned
+ sethi %hi(hw_copy_limit_4), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_4)], %o3
+ tst %o3
+ bz,pn %icc, .bcopy_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .bcopy_small ! go to small copy
+ nop
+ ba,pt %ncc, .bcopy_more ! otherwise go to large copy
+ nop
+.bcopy_8:
+ sethi %hi(hw_copy_limit_8), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_8)], %o3
+ tst %o3
+ bz,pn %icc, .bcopy_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .bcopy_small ! go to small copy
+ nop
+ ba,pt %ncc, .bcopy_more ! otherwise go to large copy
+ nop
+
+ .align 16
+.bcopy_small:
+ ldn [THREAD_REG + T_LOFAULT], %o4 ! save t_lofault
+ tst %o4
+ bz,pt %icc, .sm_do_copy
+ nop
+ sethi %hi(.sm_copyerr), %o5
+ or %o5, %lo(.sm_copyerr), %o5
+ membar #Sync ! sync error barrier
+ stn %o5, [THREAD_REG + T_LOFAULT] ! install new vector
+ or %o4, TRAMP_FLAG, %o4 ! error should trampoline
+.sm_do_copy:
+ cmp %o2, SHORTCOPY ! check for really short case
+ bleu,pt %ncc, .bc_sm_left !
+ cmp %o2, CHKSIZE ! check for medium length cases
+ bgu,pn %ncc, .bc_med !
+ or %o0, %o1, %o3 ! prepare alignment check
+ andcc %o3, 0x3, %g0 ! test for alignment
+ bz,pt %ncc, .bc_sm_word ! branch to word aligned case
+.bc_sm_movebytes:
+ sub %o2, 3, %o2 ! adjust count to allow cc zero test
+.bc_sm_notalign4:
+ ldub [%o0], %o3 ! read byte
+ stb %o3, [%o1] ! write byte
+ subcc %o2, 4, %o2 ! reduce count by 4
+ ldub [%o0 + 1], %o3 ! repeat for a total of 4 bytes
+ add %o0, 4, %o0 ! advance SRC by 4
+ stb %o3, [%o1 + 1]
+ ldub [%o0 - 2], %o3
+ add %o1, 4, %o1 ! advance DST by 4
+ stb %o3, [%o1 - 2]
+ ldub [%o0 - 1], %o3
+ bgt,pt %ncc, .bc_sm_notalign4 ! loop til 3 or fewer bytes remain
+ stb %o3, [%o1 - 1]
+ add %o2, 3, %o2 ! restore count
+.bc_sm_left:
+ tst %o2
+ bz,pt %ncc, .bc_sm_exit ! check for zero length
+ deccc %o2 ! reduce count for cc test
+ ldub [%o0], %o3 ! move one byte
+ bz,pt %ncc, .bc_sm_exit
+ stb %o3, [%o1]
+ ldub [%o0 + 1], %o3 ! move another byte
+ deccc %o2 ! check for more
+ bz,pt %ncc, .bc_sm_exit
+ stb %o3, [%o1 + 1]
+ ldub [%o0 + 2], %o3 ! move final byte
+ stb %o3, [%o1 + 2]
+ membar #Sync ! sync error barrier
+ andn %o4, TRAMP_FLAG, %o4
+ stn %o4, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ retl
+ mov %g0, %o0 ! return 0
+ .align 16
+ nop ! instruction alignment
+ ! see discussion at start of file
+.bc_sm_words:
+ lduw [%o0], %o3 ! read word
+.bc_sm_wordx:
+ subcc %o2, 8, %o2 ! update count
+ stw %o3, [%o1] ! write word
+ add %o0, 8, %o0 ! update SRC
+ lduw [%o0 - 4], %o3 ! read word
+ add %o1, 8, %o1 ! update DST
+ bgt,pt %ncc, .bc_sm_words ! loop til done
+ stw %o3, [%o1 - 4] ! write word
+ addcc %o2, 7, %o2 ! restore count
+ bz,pt %ncc, .bc_sm_exit
+ deccc %o2
+ bz,pt %ncc, .bc_sm_byte
+.bc_sm_half:
+ subcc %o2, 2, %o2 ! reduce count by 2
+ add %o0, 2, %o0 ! advance SRC by 2
+ lduh [%o0 - 2], %o3 ! read half word
+ add %o1, 2, %o1 ! advance DST by 2
+ bgt,pt %ncc, .bc_sm_half ! loop til done
+ sth %o3, [%o1 - 2] ! write half word
+ addcc %o2, 1, %o2 ! restore count
+ bz,pt %ncc, .bc_sm_exit
+ nop
+.bc_sm_byte:
+ ldub [%o0], %o3
+ stb %o3, [%o1]
+ membar #Sync ! sync error barrier
+ andn %o4, TRAMP_FLAG, %o4
+ stn %o4, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ retl
+ mov %g0, %o0 ! return 0
+
+.bc_sm_word:
+ subcc %o2, 4, %o2 ! update count
+ bgt,pt %ncc, .bc_sm_wordx
+ lduw [%o0], %o3 ! read word
+ addcc %o2, 3, %o2 ! restore count
+ bz,pt %ncc, .bc_sm_exit
+ stw %o3, [%o1] ! write word
+ deccc %o2 ! reduce count for cc test
+ ldub [%o0 + 4], %o3 ! load one byte
+ bz,pt %ncc, .bc_sm_exit
+ stb %o3, [%o1 + 4] ! store one byte
+ ldub [%o0 + 5], %o3 ! load second byte
+ deccc %o2
+ bz,pt %ncc, .bc_sm_exit
+ stb %o3, [%o1 + 5] ! store second byte
+ ldub [%o0 + 6], %o3 ! load third byte
+ stb %o3, [%o1 + 6] ! store third byte
+.bc_sm_exit:
+ membar #Sync ! sync error barrier
+ andn %o4, TRAMP_FLAG, %o4
+ stn %o4, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ retl
+ mov %g0, %o0 ! return 0
+
+ .align 16
+.bc_med:
+ xor %o0, %o1, %o3 ! setup alignment check
+ btst 1, %o3
+ bnz,pt %ncc, .bc_sm_movebytes ! unaligned
+ nop
+ btst 3, %o3
+ bnz,pt %ncc, .bc_med_half ! halfword aligned
+ nop
+ btst 7, %o3
+ bnz,pt %ncc, .bc_med_word ! word aligned
+ nop
+.bc_med_long:
+ btst 3, %o0 ! check for
+ bz,pt %ncc, .bc_med_long1 ! word alignment
+ nop
+.bc_med_long0:
+ ldub [%o0], %o3 ! load one byte
+ inc %o0
+ stb %o3,[%o1] ! store byte
+ inc %o1
+ btst 3, %o0
+ bnz,pt %ncc, .bc_med_long0
+ dec %o2
+.bc_med_long1: ! word aligned
+ btst 7, %o0 ! check for long word
+ bz,pt %ncc, .bc_med_long2
+ nop
+ lduw [%o0], %o3 ! load word
+ add %o0, 4, %o0 ! advance SRC by 4
+ stw %o3, [%o1] ! store word
+ add %o1, 4, %o1 ! advance DST by 4
+ sub %o2, 4, %o2 ! reduce count by 4
+!
+! Now long word aligned and have at least 32 bytes to move
+!
+.bc_med_long2:
+ sub %o2, 31, %o2 ! adjust count to allow cc zero test
+.bc_med_lmove:
+ ldx [%o0], %o3 ! read long word
+ stx %o3, [%o1] ! write long word
+ subcc %o2, 32, %o2 ! reduce count by 32
+ ldx [%o0 + 8], %o3 ! repeat for a total for 4 long words
+ add %o0, 32, %o0 ! advance SRC by 32
+ stx %o3, [%o1 + 8]
+ ldx [%o0 - 16], %o3
+ add %o1, 32, %o1 ! advance DST by 32
+ stx %o3, [%o1 - 16]
+ ldx [%o0 - 8], %o3
+ bgt,pt %ncc, .bc_med_lmove ! loop til 31 or fewer bytes left
+ stx %o3, [%o1 - 8]
+ addcc %o2, 24, %o2 ! restore count to long word offset
+ ble,pt %ncc, .bc_med_lextra ! check for more long words to move
+ nop
+.bc_med_lword:
+ ldx [%o0], %o3 ! read long word
+ subcc %o2, 8, %o2 ! reduce count by 8
+ stx %o3, [%o1] ! write long word
+ add %o0, 8, %o0 ! advance SRC by 8
+ bgt,pt %ncc, .bc_med_lword ! loop til 7 or fewer bytes left
+ add %o1, 8, %o1 ! advance DST by 8
+.bc_med_lextra:
+ addcc %o2, 7, %o2 ! restore rest of count
+ bz,pt %ncc, .bc_sm_exit ! if zero, then done
+ deccc %o2
+ bz,pt %ncc, .bc_sm_byte
+ nop
+ ba,pt %ncc, .bc_sm_half
+ nop
+
+ .align 16
+.bc_med_word:
+ btst 3, %o0 ! check for
+ bz,pt %ncc, .bc_med_word1 ! word alignment
+ nop
+.bc_med_word0:
+ ldub [%o0], %o3 ! load one byte
+ inc %o0
+ stb %o3,[%o1] ! store byte
+ inc %o1
+ btst 3, %o0
+ bnz,pt %ncc, .bc_med_word0
+ dec %o2
+!
+! Now word aligned and have at least 36 bytes to move
+!
+.bc_med_word1:
+ sub %o2, 15, %o2 ! adjust count to allow cc zero test
+.bc_med_wmove:
+ lduw [%o0], %o3 ! read word
+ stw %o3, [%o1] ! write word
+ subcc %o2, 16, %o2 ! reduce count by 16
+ lduw [%o0 + 4], %o3 ! repeat for a total for 4 words
+ add %o0, 16, %o0 ! advance SRC by 16
+ stw %o3, [%o1 + 4]
+ lduw [%o0 - 8], %o3
+ add %o1, 16, %o1 ! advance DST by 16
+ stw %o3, [%o1 - 8]
+ lduw [%o0 - 4], %o3
+ bgt,pt %ncc, .bc_med_wmove ! loop til 15 or fewer bytes left
+ stw %o3, [%o1 - 4]
+ addcc %o2, 12, %o2 ! restore count to word offset
+ ble,pt %ncc, .bc_med_wextra ! check for more words to move
+ nop
+.bc_med_word2:
+ lduw [%o0], %o3 ! read word
+ subcc %o2, 4, %o2 ! reduce count by 4
+ stw %o3, [%o1] ! write word
+ add %o0, 4, %o0 ! advance SRC by 4
+ bgt,pt %ncc, .bc_med_word2 ! loop til 3 or fewer bytes left
+ add %o1, 4, %o1 ! advance DST by 4
+.bc_med_wextra:
+ addcc %o2, 3, %o2 ! restore rest of count
+ bz,pt %ncc, .bc_sm_exit ! if zero, then done
+ deccc %o2
+ bz,pt %ncc, .bc_sm_byte
+ nop
+ ba,pt %ncc, .bc_sm_half
+ nop
+
+ .align 16
+.bc_med_half:
+ btst 1, %o0 ! check for
+ bz,pt %ncc, .bc_med_half1 ! half word alignment
+ nop
+ ldub [%o0], %o3 ! load one byte
+ inc %o0
+ stb %o3,[%o1] ! store byte
+ inc %o1
+ dec %o2
+!
+! Now half word aligned and have at least 38 bytes to move
+!
+.bc_med_half1:
+ sub %o2, 7, %o2 ! adjust count to allow cc zero test
+.bc_med_hmove:
+ lduh [%o0], %o3 ! read half word
+ sth %o3, [%o1] ! write half word
+ subcc %o2, 8, %o2 ! reduce count by 8
+ lduh [%o0 + 2], %o3 ! repeat for a total for 4 halfwords
+ add %o0, 8, %o0 ! advance SRC by 8
+ sth %o3, [%o1 + 2]
+ lduh [%o0 - 4], %o3
+ add %o1, 8, %o1 ! advance DST by 8
+ sth %o3, [%o1 - 4]
+ lduh [%o0 - 2], %o3
+ bgt,pt %ncc, .bc_med_hmove ! loop til 7 or fewer bytes left
+ sth %o3, [%o1 - 2]
+ addcc %o2, 7, %o2 ! restore count
+ bz,pt %ncc, .bc_sm_exit
+ deccc %o2
+ bz,pt %ncc, .bc_sm_byte
+ nop
+ ba,pt %ncc, .bc_sm_half
+ nop
+
+ SET_SIZE(bcopy)
+
+/*
+ * The _more entry points are not intended to be used directly by
+ * any caller from outside this file. They are provided to allow
+ * profiling and dtrace of the portions of the copy code that uses
+ * the floating point registers.
+ * This entry is particularly important as DTRACE (at least as of
+ * 4/2004) does not support leaf functions.
+ */
+
+ ENTRY(bcopy_more)
+.bcopy_more:
+ prefetch [%o0], #n_reads
+ save %sp, -SA(MINFRAME + HWCOPYFRAMESIZE), %sp
+ ldn [THREAD_REG + T_LOFAULT], %l6 ! save t_lofault
+ tst %l6
+ bz,pt %ncc, .do_copy
+ nop
+ sethi %hi(.copyerr), %o2
+ or %o2, %lo(.copyerr), %o2
+ membar #Sync ! sync error barrier
+ stn %o2, [THREAD_REG + T_LOFAULT] ! install new vector
+ !
+ ! We've already captured whether t_lofault was zero on entry.
+ ! We need to mark ourselves as being from bcopy since both
+ ! kcopy and bcopy use the same code path. If TRAMP_FLAG is set
+ ! and the saved lofault was zero, we won't reset lofault on
+ ! returning.
+ !
+ or %l6, TRAMP_FLAG, %l6
+
+/*
+ * Copies that reach here are larger than VIS_COPY_THRESHOLD bytes
+ * Also, use of FP registers has been tested to be enabled
+ */
+.do_copy:
+ FP_NOMIGRATE(6, 7)
+
+ rd %fprs, %o2 ! check for unused fp
+ st %o2, [%fp + STACK_BIAS - SAVED_FPRS_OFFSET] ! save orig %fprs
+ btst FPRS_FEF, %o2
+ bz,a,pt %icc, .do_blockcopy
+ wr %g0, FPRS_FEF, %fprs
+
+ BST_FPQ1Q3_TOSTACK(%o2)
+
+.do_blockcopy:
+ rd %gsr, %o2
+ stx %o2, [%fp + STACK_BIAS - SAVED_GSR_OFFSET] ! save gsr
+ or %l6, FPUSED_FLAG, %l6
+
+#define REALSRC %i0
+#define DST %i1
+#define CNT %i2
+#define SRC %i3
+#define TMP %i5
+
+ andcc DST, VIS_BLOCKSIZE - 1, TMP
+ bz,pt %ncc, 2f
+ neg TMP
+ add TMP, VIS_BLOCKSIZE, TMP
+
+ ! TMP = bytes required to align DST on FP_BLOCK boundary
+ ! Using SRC as a tmp here
+ cmp TMP, 3
+ bleu,pt %ncc, 1f
+ sub CNT,TMP,CNT ! adjust main count
+ sub TMP, 3, TMP ! adjust for end of loop test
+.bc_blkalign:
+ ldub [REALSRC], SRC ! move 4 bytes per loop iteration
+ stb SRC, [DST]
+ subcc TMP, 4, TMP
+ ldub [REALSRC + 1], SRC
+ add REALSRC, 4, REALSRC
+ stb SRC, [DST + 1]
+ ldub [REALSRC - 2], SRC
+ add DST, 4, DST
+ stb SRC, [DST - 2]
+ ldub [REALSRC - 1], SRC
+ bgu,pt %ncc, .bc_blkalign
+ stb SRC, [DST - 1]
+
+ addcc TMP, 3, TMP ! restore count adjustment
+ bz,pt %ncc, 2f ! no bytes left?
+ nop
+1: ldub [REALSRC], SRC
+ inc REALSRC
+ inc DST
+ deccc TMP
+ bgu %ncc, 1b
+ stb SRC, [DST - 1]
+
+2:
+ membar #StoreLoad
+ andn REALSRC, 0x7, SRC
+
+ ! SRC - 8-byte aligned
+ ! DST - 64-byte aligned
+ ldd [SRC], %f0
+ prefetch [SRC + (1 * VIS_BLOCKSIZE)], #n_reads
+ alignaddr REALSRC, %g0, %g0
+ ldd [SRC + 0x08], %f2
+ prefetch [SRC + (2 * VIS_BLOCKSIZE)], #n_reads
+ faligndata %f0, %f2, %f32
+ ldd [SRC + 0x10], %f4
+ prefetch [SRC + (3 * VIS_BLOCKSIZE)], #one_read
+ faligndata %f2, %f4, %f34
+ ldd [SRC + 0x18], %f6
+ prefetch [SRC + (4 * VIS_BLOCKSIZE)], #one_read
+ faligndata %f4, %f6, %f36
+ ldd [SRC + 0x20], %f8
+ prefetch [SRC + (5 * VIS_BLOCKSIZE)], #one_read
+ faligndata %f6, %f8, %f38
+ ldd [SRC + 0x28], %f10
+ prefetch [SRC + (6 * VIS_BLOCKSIZE)], #one_read
+ faligndata %f8, %f10, %f40
+ ldd [SRC + 0x30], %f12
+ prefetch [SRC + (7 * VIS_BLOCKSIZE)], #one_read
+ faligndata %f10, %f12, %f42
+ ldd [SRC + 0x38], %f14
+ ldd [SRC + VIS_BLOCKSIZE], %f0
+ sub CNT, VIS_BLOCKSIZE, CNT
+ add SRC, VIS_BLOCKSIZE, SRC
+ prefetch [SRC + (9 * VIS_BLOCKSIZE) - VIS_BLOCKSIZE], #one_read
+ add REALSRC, VIS_BLOCKSIZE, REALSRC
+ ba,pt %ncc, 1f
+ prefetch [SRC + (10 * VIS_BLOCKSIZE) - VIS_BLOCKSIZE], #one_read
+ .align 32
+1:
+ ldd [SRC + 0x08], %f2
+ faligndata %f12, %f14, %f44
+ ldd [SRC + 0x10], %f4
+ faligndata %f14, %f0, %f46
+ stda %f32, [DST]ASI_BLK_P
+ ldd [SRC + 0x18], %f6
+ faligndata %f0, %f2, %f32
+ ldd [SRC + 0x20], %f8
+ faligndata %f2, %f4, %f34
+ ldd [SRC + 0x28], %f10
+ faligndata %f4, %f6, %f36
+ ldd [SRC + 0x30], %f12
+ faligndata %f6, %f8, %f38
+ ldd [SRC + 0x38], %f14
+ prefetch [SRC + ((OLYMPUS_C_PREFETCH) * VIS_BLOCKSIZE)], #n_reads
+ faligndata %f8, %f10, %f40
+ ldd [SRC + VIS_BLOCKSIZE], %f0
+ prefetch [SRC + ((OLYMPUS_C_2ND_PREFETCH) * VIS_BLOCKSIZE)], #one_read
+ faligndata %f10, %f12, %f42
+ prefetch [SRC + ((OLYMPUS_C_PREFETCH) * VIS_BLOCKSIZE) + 0x20], #n_reads
+ prefetch [SRC + ((OLYMPUS_C_2ND_PREFETCH) * VIS_BLOCKSIZE) + 0x20], #one_read
+ sub CNT, VIS_BLOCKSIZE, CNT
+ add DST, VIS_BLOCKSIZE, DST
+ add REALSRC, VIS_BLOCKSIZE, REALSRC
+ cmp CNT, VIS_BLOCKSIZE + 8
+ bgu,pt %ncc, 1b
+ add SRC, VIS_BLOCKSIZE, SRC
+
+ ! only if REALSRC & 0x7 is 0
+ cmp CNT, VIS_BLOCKSIZE
+ bne %ncc, 3f
+ andcc REALSRC, 0x7, %g0
+ bz,pt %ncc, 2f
+ nop
+3:
+ faligndata %f12, %f14, %f44
+ faligndata %f14, %f0, %f46
+ stda %f32, [DST]ASI_BLK_P
+ add DST, VIS_BLOCKSIZE, DST
+ ba,pt %ncc, 3f
+ nop
+2:
+ ldd [SRC + 0x08], %f2
+ fsrc1 %f12, %f44
+ ldd [SRC + 0x10], %f4
+ fsrc1 %f14, %f46
+ stda %f32, [DST]ASI_BLK_P
+ ldd [SRC + 0x18], %f6
+ fsrc1 %f0, %f32
+ ldd [SRC + 0x20], %f8
+ fsrc1 %f2, %f34
+ ldd [SRC + 0x28], %f10
+ fsrc1 %f4, %f36
+ ldd [SRC + 0x30], %f12
+ fsrc1 %f6, %f38
+ ldd [SRC + 0x38], %f14
+ fsrc1 %f8, %f40
+ sub CNT, VIS_BLOCKSIZE, CNT
+ add DST, VIS_BLOCKSIZE, DST
+ add SRC, VIS_BLOCKSIZE, SRC
+ add REALSRC, VIS_BLOCKSIZE, REALSRC
+ fsrc1 %f10, %f42
+ fsrc1 %f12, %f44
+ fsrc1 %f14, %f46
+ stda %f32, [DST]ASI_BLK_P
+ add DST, VIS_BLOCKSIZE, DST
+ ba,a,pt %ncc, .bcb_exit
+ nop
+
+3: tst CNT
+ bz,a,pt %ncc, .bcb_exit
+ nop
+
+5: ldub [REALSRC], TMP
+ inc REALSRC
+ inc DST
+ deccc CNT
+ bgu %ncc, 5b
+ stb TMP, [DST - 1]
+.bcb_exit:
+ membar #Sync
+
+ ldx [%fp + STACK_BIAS - SAVED_GSR_OFFSET], %o2 ! restore gsr
+ wr %o2, 0, %gsr
+
+ ld [%fp + STACK_BIAS - SAVED_FPRS_OFFSET], %o3
+ btst FPRS_FEF, %o3
+ bz,pt %icc, 4f
+ nop
+
+ BLD_FPQ1Q3_FROMSTACK(%o2)
+
+ ba,pt %ncc, 2f
+ wr %o3, 0, %fprs ! restore fprs
+4:
+ FZEROQ1Q3
+ wr %o3, 0, %fprs ! restore fprs
+2:
+ membar #Sync ! sync error barrier
+ andn %l6, MASK_FLAGS, %l6
+ stn %l6, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ FP_ALLOWMIGRATE(5, 6)
+ ret
+ restore %g0, 0, %o0
+
+ SET_SIZE(bcopy_more)
+
+#endif /* lint */
+
+/*
+ * Block copy with possibly overlapped operands.
+ */
+
+#if defined(lint)
+
+/*ARGSUSED*/
+void
+ovbcopy(const void *from, void *to, size_t count)
+{}
+
+#else /* lint */
+
+ ENTRY(ovbcopy)
+ tst %o2 ! check count
+ bgu,a %ncc, 1f ! nothing to do or bad arguments
+ subcc %o0, %o1, %o3 ! difference of from and to address
+
+ retl ! return
+ nop
+1:
+ bneg,a %ncc, 2f
+ neg %o3 ! if < 0, make it positive
+2: cmp %o2, %o3 ! cmp size and abs(from - to)
+ bleu %ncc, bcopy ! if size <= abs(diff): use bcopy,
+ .empty ! no overlap
+ cmp %o0, %o1 ! compare from and to addresses
+ blu %ncc, .ov_bkwd ! if from < to, copy backwards
+ nop
+ !
+ ! Copy forwards.
+ !
+.ov_fwd:
+ ldub [%o0], %o3 ! read from address
+ inc %o0 ! inc from address
+ stb %o3, [%o1] ! write to address
+ deccc %o2 ! dec count
+ bgu %ncc, .ov_fwd ! loop till done
+ inc %o1 ! inc to address
+
+ retl ! return
+ nop
+ !
+ ! Copy backwards.
+ !
+.ov_bkwd:
+ deccc %o2 ! dec count
+ ldub [%o0 + %o2], %o3 ! get byte at end of src
+ bgu %ncc, .ov_bkwd ! loop till done
+ stb %o3, [%o1 + %o2] ! delay slot, store at end of dst
+
+ retl ! return
+ nop
+
+ SET_SIZE(ovbcopy)
+
+#endif /* lint */
+
+
+/*
+ * hwblkpagecopy()
+ *
+ * Copies exactly one page. This routine assumes the caller (ppcopy)
+ * has already disabled kernel preemption and has checked
+ * use_hw_bcopy. Preventing preemption also prevents cpu migration.
+ */
+#ifdef lint
+/*ARGSUSED*/
+void
+hwblkpagecopy(const void *src, void *dst)
+{ }
+#else /* lint */
+ ENTRY(hwblkpagecopy)
+ ! get another window w/space for three aligned blocks of saved fpregs
+ prefetch [%o0], #n_reads
+ save %sp, -SA(MINFRAME + HWCOPYFRAMESIZE), %sp
+
+ ! %i0 - source address (arg)
+ ! %i1 - destination address (arg)
+ ! %i2 - length of region (not arg)
+ ! %l0 - saved fprs
+ ! %l1 - pointer to saved fpregs
+
+ rd %fprs, %l0 ! check for unused fp
+ btst FPRS_FEF, %l0
+ bz,a,pt %icc, 1f
+ wr %g0, FPRS_FEF, %fprs
+
+ BST_FPQ1Q3_TOSTACK(%l1)
+
+1: set PAGESIZE, CNT
+ mov REALSRC, SRC
+
+ ldd [SRC], %f0
+ prefetch [SRC + (1 * VIS_BLOCKSIZE)], #n_reads
+ ldd [SRC + 0x08], %f2
+ prefetch [SRC + (2 * VIS_BLOCKSIZE)], #n_reads
+ fmovd %f0, %f32
+ ldd [SRC + 0x10], %f4
+ prefetch [SRC + (3 * VIS_BLOCKSIZE)], #one_read
+ fmovd %f2, %f34
+ ldd [SRC + 0x18], %f6
+ prefetch [SRC + (4 * VIS_BLOCKSIZE)], #one_read
+ fmovd %f4, %f36
+ ldd [SRC + 0x20], %f8
+ prefetch [SRC + (5 * VIS_BLOCKSIZE)], #one_read
+ fmovd %f6, %f38
+ ldd [SRC + 0x28], %f10
+ prefetch [SRC + (6 * VIS_BLOCKSIZE)], #one_read
+ fmovd %f8, %f40
+ ldd [SRC + 0x30], %f12
+ prefetch [SRC + (7 * VIS_BLOCKSIZE)], #one_read
+ fmovd %f10, %f42
+ ldd [SRC + 0x38], %f14
+ ldd [SRC + VIS_BLOCKSIZE], %f0
+ sub CNT, VIS_BLOCKSIZE, CNT
+ add SRC, VIS_BLOCKSIZE, SRC
+ prefetch [SRC + (9 * VIS_BLOCKSIZE) - VIS_BLOCKSIZE], #one_read
+ ba,pt %ncc, 2f
+ prefetch [SRC + (10 * VIS_BLOCKSIZE) - VIS_BLOCKSIZE], #one_read
+ .align 32
+2:
+ ldd [SRC + 0x08], %f2
+ fmovd %f12, %f44
+ ldd [SRC + 0x10], %f4
+ fmovd %f14, %f46
+ stda %f32, [DST]ASI_BLK_P
+ ldd [SRC + 0x18], %f6
+ fmovd %f0, %f32
+ ldd [SRC + 0x20], %f8
+ fmovd %f2, %f34
+ ldd [SRC + 0x28], %f10
+ fmovd %f4, %f36
+ ldd [SRC + 0x30], %f12
+ fmovd %f6, %f38
+ ldd [SRC + 0x38], %f14
+ prefetch [SRC + ((OLYMPUS_C_PREFETCH) * VIS_BLOCKSIZE)], #n_reads
+ fmovd %f8, %f40
+ ldd [SRC + VIS_BLOCKSIZE], %f0
+ prefetch [SRC + ((OLYMPUS_C_2ND_PREFETCH) * VIS_BLOCKSIZE)], #one_read
+ fmovd %f10, %f42
+ prefetch [SRC + ((OLYMPUS_C_PREFETCH) * VIS_BLOCKSIZE) + 0x20], #n_reads
+ prefetch [SRC + ((OLYMPUS_C_2ND_PREFETCH) * VIS_BLOCKSIZE) + 0x20], #one_read
+ sub CNT, VIS_BLOCKSIZE, CNT
+ add DST, VIS_BLOCKSIZE, DST
+ cmp CNT, VIS_BLOCKSIZE + 8
+ bgu,pt %ncc, 2b
+ add SRC, VIS_BLOCKSIZE, SRC
+
+ ! trailing block
+ ldd [SRC + 0x08], %f2
+ fsrc1 %f12, %f44
+ ldd [SRC + 0x10], %f4
+ fsrc1 %f14, %f46
+ stda %f32, [DST]ASI_BLK_P
+ ldd [SRC + 0x18], %f6
+ fsrc1 %f0, %f32
+ ldd [SRC + 0x20], %f8
+ fsrc1 %f2, %f34
+ ldd [SRC + 0x28], %f10
+ fsrc1 %f4, %f36
+ ldd [SRC + 0x30], %f12
+ fsrc1 %f6, %f38
+ ldd [SRC + 0x38], %f14
+ fsrc1 %f8, %f40
+ sub CNT, VIS_BLOCKSIZE, CNT
+ add DST, VIS_BLOCKSIZE, DST
+ add SRC, VIS_BLOCKSIZE, SRC
+ fsrc1 %f10, %f42
+ fsrc1 %f12, %f44
+ fsrc1 %f14, %f46
+ stda %f32, [DST]ASI_BLK_P
+
+ membar #Sync
+
+ btst FPRS_FEF, %l0
+ bz,pt %icc, 2f
+ nop
+
+ BLD_FPQ1Q3_FROMSTACK(%l3)
+ ba 3f
+ nop
+
+2: FZEROQ1Q3
+
+3: wr %l0, 0, %fprs ! restore fprs
+ ret
+ restore %g0, 0, %o0
+
+ SET_SIZE(hwblkpagecopy)
+#endif /* lint */
+
+
+/*
+ * Transfer data to and from user space -
+ * Note that these routines can cause faults
+ * It is assumed that the kernel has nothing at
+ * less than KERNELBASE in the virtual address space.
+ *
+ * Note that copyin(9F) and copyout(9F) are part of the
+ * DDI/DKI which specifies that they return '-1' on "errors."
+ *
+ * Sigh.
+ *
+ * So there's two extremely similar routines - xcopyin() and xcopyout()
+ * which return the errno that we've faithfully computed. This
+ * allows other callers (e.g. uiomove(9F)) to work correctly.
+ * Given that these are used pretty heavily, we expand the calling
+ * sequences inline for all flavours (rather than making wrappers).
+ *
+ * There are also stub routines for xcopyout_little and xcopyin_little,
+ * which currently are intended to handle requests of <= 16 bytes from
+ * do_unaligned. Future enhancement to make them handle 8k pages efficiently
+ * is left as an exercise...
+ */
+
+/*
+ * Copy user data to kernel space (copyOP/xcopyOP/copyOP_noerr)
+ *
+ * General theory of operation:
+ *
+ * The only difference between copy{in,out} and
+ * xcopy{in,out} is in the error handling routine they invoke
+ * when a memory access error occurs. xcopyOP returns the errno
+ * while copyOP returns -1 (see above). copy{in,out}_noerr set
+ * a special flag (by oring the TRAMP_FLAG into the fault handler address)
+ * if they are called with a fault handler already in place. That flag
+ * causes the default handlers to trampoline to the previous handler
+ * upon an error.
+ *
+ * None of the copyops routines grab a window until it's decided that
+ * we need to do a HW block copy operation. This saves a window
+ * spill/fill when we're called during socket ops. The typical IO
+ * path won't cause spill/fill traps.
+ *
+ * This code uses a set of 4 limits for the maximum size that will
+ * be copied given a particular input/output address alignment.
+ * If the value for a particular limit is zero, the copy will be performed
+ * by the plain copy loops rather than FPBLK.
+ *
+ * See the description of bcopy above for more details of the
+ * data copying algorithm and the default limits.
+ *
+ */
+
+/*
+ * Copy kernel data to user space (copyout/xcopyout/xcopyout_little).
+ */
+
+#if defined(lint)
+
+
+#else /* lint */
+/*
+ * We save the arguments in the following registers in case of a fault:
+ * kaddr - %l1
+ * uaddr - %l2
+ * count - %l3
+ */
+#define SAVE_SRC %l1
+#define SAVE_DST %l2
+#define SAVE_COUNT %l3
+
+#define SM_SAVE_SRC %g4
+#define SM_SAVE_DST %g5
+#define SM_SAVE_COUNT %o5
+#define ERRNO %l5
+
+
+#define REAL_LOFAULT %l4
+/*
+ * Generic copyio fault handler. This is the first line of defense when a
+ * fault occurs in (x)copyin/(x)copyout. In order for this to function
+ * properly, the value of the 'real' lofault handler should be in REAL_LOFAULT.
+ * This allows us to share common code for all the flavors of the copy
+ * operations, including the _noerr versions.
+ *
+ * Note that this function will restore the original input parameters before
+ * calling REAL_LOFAULT. So the real handler can vector to the appropriate
+ * member of the t_copyop structure, if needed.
+ */
+ ENTRY(copyio_fault)
+ membar #Sync
+ mov %g1,ERRNO ! save errno in ERRNO
+ btst FPUSED_FLAG, %l6
+ bz %ncc, 1f
+ nop
+
+ ldx [%fp + STACK_BIAS - SAVED_GSR_OFFSET], %o2
+ wr %o2, 0, %gsr ! restore gsr
+
+ ld [%fp + STACK_BIAS - SAVED_FPRS_OFFSET], %o3
+ btst FPRS_FEF, %o3
+ bz,pt %icc, 4f
+ nop
+
+ BLD_FPQ2Q4_FROMSTACK(%o2)
+
+ ba,pt %ncc, 1f
+ wr %o3, 0, %fprs ! restore fprs
+
+4:
+ FZEROQ2Q4
+ wr %o3, 0, %fprs ! restore fprs
+
+1:
+ andn %l6, FPUSED_FLAG, %l6
+ membar #Sync
+ stn %l6, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ FP_ALLOWMIGRATE(5, 6)
+
+ mov SAVE_SRC, %i0
+ mov SAVE_DST, %i1
+ jmp REAL_LOFAULT
+ mov SAVE_COUNT, %i2
+
+ SET_SIZE(copyio_fault)
+
+
+#endif
+
+#if defined(lint)
+
+/*ARGSUSED*/
+int
+copyout(const void *kaddr, void *uaddr, size_t count)
+{ return (0); }
+
+#else /* lint */
+
+ ENTRY(copyout)
+
+ cmp %o2, VIS_COPY_THRESHOLD ! check for leaf rtn case
+ bleu,pt %ncc, .copyout_small ! go to larger cases
+ xor %o0, %o1, %o3 ! are src, dst alignable?
+ btst 7, %o3 !
+ bz,pt %ncc, .copyout_8 ! check for longword alignment
+ nop
+ btst 1, %o3 !
+ bz,pt %ncc, .copyout_2 ! check for half-word
+ nop
+ sethi %hi(hw_copy_limit_1), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_1)], %o3
+ tst %o3
+ bz,pn %icc, .copyout_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyout_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyout_more ! otherwise go to large copy
+ nop
+.copyout_2:
+ btst 3, %o3 !
+ bz,pt %ncc, .copyout_4 ! check for word alignment
+ nop
+ sethi %hi(hw_copy_limit_2), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_2)], %o3
+ tst %o3
+ bz,pn %icc, .copyout_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyout_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyout_more ! otherwise go to large copy
+ nop
+.copyout_4:
+ ! already checked longword, must be word aligned
+ sethi %hi(hw_copy_limit_4), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_4)], %o3
+ tst %o3
+ bz,pn %icc, .copyout_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyout_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyout_more ! otherwise go to large copy
+ nop
+.copyout_8:
+ sethi %hi(hw_copy_limit_8), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_8)], %o3
+ tst %o3
+ bz,pn %icc, .copyout_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyout_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyout_more ! otherwise go to large copy
+ nop
+
+ .align 16
+ nop ! instruction alignment
+ ! see discussion at start of file
+.copyout_small:
+ sethi %hi(.sm_copyout_err), %o5 ! .sm_copyout_err is lofault
+ or %o5, %lo(.sm_copyout_err), %o5
+ ldn [THREAD_REG + T_LOFAULT], %o4 ! save existing handler
+ membar #Sync ! sync error barrier
+ stn %o5, [THREAD_REG + T_LOFAULT] ! set t_lofault
+.sm_do_copyout:
+ mov %o0, SM_SAVE_SRC
+ mov %o1, SM_SAVE_DST
+ cmp %o2, SHORTCOPY ! check for really short case
+ bleu,pt %ncc, .co_sm_left !
+ mov %o2, SM_SAVE_COUNT
+ cmp %o2, CHKSIZE ! check for medium length cases
+ bgu,pn %ncc, .co_med !
+ or %o0, %o1, %o3 ! prepare alignment check
+ andcc %o3, 0x3, %g0 ! test for alignment
+ bz,pt %ncc, .co_sm_word ! branch to word aligned case
+.co_sm_movebytes:
+ sub %o2, 3, %o2 ! adjust count to allow cc zero test
+.co_sm_notalign4:
+ ldub [%o0], %o3 ! read byte
+ subcc %o2, 4, %o2 ! reduce count by 4
+ stba %o3, [%o1]ASI_USER ! write byte
+ inc %o1 ! advance DST by 1
+ ldub [%o0 + 1], %o3 ! repeat for a total of 4 bytes
+ add %o0, 4, %o0 ! advance SRC by 4
+ stba %o3, [%o1]ASI_USER
+ inc %o1 ! advance DST by 1
+ ldub [%o0 - 2], %o3
+ stba %o3, [%o1]ASI_USER
+ inc %o1 ! advance DST by 1
+ ldub [%o0 - 1], %o3
+ stba %o3, [%o1]ASI_USER
+ bgt,pt %ncc, .co_sm_notalign4 ! loop til 3 or fewer bytes remain
+ inc %o1 ! advance DST by 1
+ add %o2, 3, %o2 ! restore count
+.co_sm_left:
+ tst %o2
+ bz,pt %ncc, .co_sm_exit ! check for zero length
+ nop
+ ldub [%o0], %o3 ! load one byte
+ deccc %o2 ! reduce count for cc test
+ bz,pt %ncc, .co_sm_exit
+ stba %o3,[%o1]ASI_USER ! store one byte
+ ldub [%o0 + 1], %o3 ! load second byte
+ deccc %o2
+ inc %o1
+ bz,pt %ncc, .co_sm_exit
+ stba %o3,[%o1]ASI_USER ! store second byte
+ ldub [%o0 + 2], %o3 ! load third byte
+ inc %o1
+ stba %o3,[%o1]ASI_USER ! store third byte
+ membar #Sync ! sync error barrier
+ stn %o4, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ retl
+ mov %g0, %o0 ! return 0
+ .align 16
+.co_sm_words:
+ lduw [%o0], %o3 ! read word
+.co_sm_wordx:
+ subcc %o2, 8, %o2 ! update count
+ stwa %o3, [%o1]ASI_USER ! write word
+ add %o0, 8, %o0 ! update SRC
+ lduw [%o0 - 4], %o3 ! read word
+ add %o1, 4, %o1 ! update DST
+ stwa %o3, [%o1]ASI_USER ! write word
+ bgt,pt %ncc, .co_sm_words ! loop til done
+ add %o1, 4, %o1 ! update DST
+ addcc %o2, 7, %o2 ! restore count
+ bz,pt %ncc, .co_sm_exit
+ nop
+ deccc %o2
+ bz,pt %ncc, .co_sm_byte
+.co_sm_half:
+ subcc %o2, 2, %o2 ! reduce count by 2
+ lduh [%o0], %o3 ! read half word
+ add %o0, 2, %o0 ! advance SRC by 2
+ stha %o3, [%o1]ASI_USER ! write half word
+ bgt,pt %ncc, .co_sm_half ! loop til done
+ add %o1, 2, %o1 ! advance DST by 2
+ addcc %o2, 1, %o2 ! restore count
+ bz,pt %ncc, .co_sm_exit
+ nop
+.co_sm_byte:
+ ldub [%o0], %o3
+ stba %o3, [%o1]ASI_USER
+ membar #Sync ! sync error barrier
+ stn %o4, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ retl
+ mov %g0, %o0 ! return 0
+ .align 16
+.co_sm_word:
+ subcc %o2, 4, %o2 ! update count
+ bgt,pt %ncc, .co_sm_wordx
+ lduw [%o0], %o3 ! read word
+ addcc %o2, 3, %o2 ! restore count
+ bz,pt %ncc, .co_sm_exit
+ stwa %o3, [%o1]ASI_USER ! write word
+ deccc %o2 ! reduce count for cc test
+ ldub [%o0 + 4], %o3 ! load one byte
+ add %o1, 4, %o1
+ bz,pt %ncc, .co_sm_exit
+ stba %o3, [%o1]ASI_USER ! store one byte
+ ldub [%o0 + 5], %o3 ! load second byte
+ deccc %o2
+ inc %o1
+ bz,pt %ncc, .co_sm_exit
+ stba %o3, [%o1]ASI_USER ! store second byte
+ ldub [%o0 + 6], %o3 ! load third byte
+ inc %o1
+ stba %o3, [%o1]ASI_USER ! store third byte
+.co_sm_exit:
+ membar #Sync ! sync error barrier
+ stn %o4, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ retl
+ mov %g0, %o0 ! return 0
+
+ .align 16
+.co_med:
+ xor %o0, %o1, %o3 ! setup alignment check
+ btst 1, %o3
+ bnz,pt %ncc, .co_sm_movebytes ! unaligned
+ nop
+ btst 3, %o3
+ bnz,pt %ncc, .co_med_half ! halfword aligned
+ nop
+ btst 7, %o3
+ bnz,pt %ncc, .co_med_word ! word aligned
+ nop
+.co_med_long:
+ btst 3, %o0 ! check for
+ bz,pt %ncc, .co_med_long1 ! word alignment
+ nop
+.co_med_long0:
+ ldub [%o0], %o3 ! load one byte
+ inc %o0
+ stba %o3,[%o1]ASI_USER ! store byte
+ inc %o1
+ btst 3, %o0
+ bnz,pt %ncc, .co_med_long0
+ dec %o2
+.co_med_long1: ! word aligned
+ btst 7, %o0 ! check for long word
+ bz,pt %ncc, .co_med_long2
+ nop
+ lduw [%o0], %o3 ! load word
+ add %o0, 4, %o0 ! advance SRC by 4
+ stwa %o3, [%o1]ASI_USER ! store word
+ add %o1, 4, %o1 ! advance DST by 4
+ sub %o2, 4, %o2 ! reduce count by 4
+!
+! Now long word aligned and have at least 32 bytes to move
+!
+.co_med_long2:
+ sub %o2, 31, %o2 ! adjust count to allow cc zero test
+ sub %o1, 8, %o1 ! adjust pointer to allow store in
+ ! branch delay slot instead of add
+.co_med_lmove:
+ add %o1, 8, %o1 ! advance DST by 8
+ ldx [%o0], %o3 ! read long word
+ subcc %o2, 32, %o2 ! reduce count by 32
+ stxa %o3, [%o1]ASI_USER ! write long word
+ add %o1, 8, %o1 ! advance DST by 8
+ ldx [%o0 + 8], %o3 ! repeat for a total for 4 long words
+ add %o0, 32, %o0 ! advance SRC by 32
+ stxa %o3, [%o1]ASI_USER
+ ldx [%o0 - 16], %o3
+ add %o1, 8, %o1 ! advance DST by 8
+ stxa %o3, [%o1]ASI_USER
+ ldx [%o0 - 8], %o3
+ add %o1, 8, %o1 ! advance DST by 8
+ bgt,pt %ncc, .co_med_lmove ! loop til 31 or fewer bytes left
+ stxa %o3, [%o1]ASI_USER
+ add %o1, 8, %o1 ! advance DST by 8
+ addcc %o2, 24, %o2 ! restore count to long word offset
+ ble,pt %ncc, .co_med_lextra ! check for more long words to move
+ nop
+.co_med_lword:
+ ldx [%o0], %o3 ! read long word
+ subcc %o2, 8, %o2 ! reduce count by 8
+ stxa %o3, [%o1]ASI_USER ! write long word
+ add %o0, 8, %o0 ! advance SRC by 8
+ bgt,pt %ncc, .co_med_lword ! loop til 7 or fewer bytes left
+ add %o1, 8, %o1 ! advance DST by 8
+.co_med_lextra:
+ addcc %o2, 7, %o2 ! restore rest of count
+ bz,pt %ncc, .co_sm_exit ! if zero, then done
+ deccc %o2
+ bz,pt %ncc, .co_sm_byte
+ nop
+ ba,pt %ncc, .co_sm_half
+ nop
+
+ .align 16
+ nop ! instruction alignment
+ ! see discussion at start of file
+.co_med_word:
+ btst 3, %o0 ! check for
+ bz,pt %ncc, .co_med_word1 ! word alignment
+ nop
+.co_med_word0:
+ ldub [%o0], %o3 ! load one byte
+ inc %o0
+ stba %o3,[%o1]ASI_USER ! store byte
+ inc %o1
+ btst 3, %o0
+ bnz,pt %ncc, .co_med_word0
+ dec %o2
+!
+! Now word aligned and have at least 36 bytes to move
+!
+.co_med_word1:
+ sub %o2, 15, %o2 ! adjust count to allow cc zero test
+.co_med_wmove:
+ lduw [%o0], %o3 ! read word
+ subcc %o2, 16, %o2 ! reduce count by 16
+ stwa %o3, [%o1]ASI_USER ! write word
+ add %o1, 4, %o1 ! advance DST by 4
+ lduw [%o0 + 4], %o3 ! repeat for a total for 4 words
+ add %o0, 16, %o0 ! advance SRC by 16
+ stwa %o3, [%o1]ASI_USER
+ add %o1, 4, %o1 ! advance DST by 4
+ lduw [%o0 - 8], %o3
+ stwa %o3, [%o1]ASI_USER
+ add %o1, 4, %o1 ! advance DST by 4
+ lduw [%o0 - 4], %o3
+ stwa %o3, [%o1]ASI_USER
+ bgt,pt %ncc, .co_med_wmove ! loop til 15 or fewer bytes left
+ add %o1, 4, %o1 ! advance DST by 4
+ addcc %o2, 12, %o2 ! restore count to word offset
+ ble,pt %ncc, .co_med_wextra ! check for more words to move
+ nop
+.co_med_word2:
+ lduw [%o0], %o3 ! read word
+ subcc %o2, 4, %o2 ! reduce count by 4
+ stwa %o3, [%o1]ASI_USER ! write word
+ add %o0, 4, %o0 ! advance SRC by 4
+ bgt,pt %ncc, .co_med_word2 ! loop til 3 or fewer bytes left
+ add %o1, 4, %o1 ! advance DST by 4
+.co_med_wextra:
+ addcc %o2, 3, %o2 ! restore rest of count
+ bz,pt %ncc, .co_sm_exit ! if zero, then done
+ deccc %o2
+ bz,pt %ncc, .co_sm_byte
+ nop
+ ba,pt %ncc, .co_sm_half
+ nop
+
+ .align 16
+ nop ! instruction alignment
+ nop ! see discussion at start of file
+ nop
+.co_med_half:
+ btst 1, %o0 ! check for
+ bz,pt %ncc, .co_med_half1 ! half word alignment
+ nop
+ ldub [%o0], %o3 ! load one byte
+ inc %o0
+ stba %o3,[%o1]ASI_USER ! store byte
+ inc %o1
+ dec %o2
+!
+! Now half word aligned and have at least 38 bytes to move
+!
+.co_med_half1:
+ sub %o2, 7, %o2 ! adjust count to allow cc zero test
+.co_med_hmove:
+ lduh [%o0], %o3 ! read half word
+ subcc %o2, 8, %o2 ! reduce count by 8
+ stha %o3, [%o1]ASI_USER ! write half word
+ add %o1, 2, %o1 ! advance DST by 2
+ lduh [%o0 + 2], %o3 ! repeat for a total for 4 halfwords
+ add %o0, 8, %o0 ! advance SRC by 8
+ stha %o3, [%o1]ASI_USER
+ add %o1, 2, %o1 ! advance DST by 2
+ lduh [%o0 - 4], %o3
+ stha %o3, [%o1]ASI_USER
+ add %o1, 2, %o1 ! advance DST by 2
+ lduh [%o0 - 2], %o3
+ stha %o3, [%o1]ASI_USER
+ bgt,pt %ncc, .co_med_hmove ! loop til 7 or fewer bytes left
+ add %o1, 2, %o1 ! advance DST by 2
+ addcc %o2, 7, %o2 ! restore count
+ bz,pt %ncc, .co_sm_exit
+ deccc %o2
+ bz,pt %ncc, .co_sm_byte
+ nop
+ ba,pt %ncc, .co_sm_half
+ nop
+
+/*
+ * We got here because of a fault during short copyout.
+ * Errno value is in ERRNO, but DDI/DKI says return -1 (sigh).
+ */
+.sm_copyout_err:
+ membar #Sync
+ stn %o4, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ mov SM_SAVE_SRC, %o0
+ mov SM_SAVE_DST, %o1
+ mov SM_SAVE_COUNT, %o2
+ ldn [THREAD_REG + T_COPYOPS], %o3 ! check for copyop handler
+ tst %o3
+ bz,pt %ncc, 3f ! if not, return error
+ nop
+ ldn [%o3 + CP_COPYOUT], %o5 ! if handler, invoke it with
+ jmp %o5 ! original arguments
+ nop
+3:
+ retl
+ or %g0, -1, %o0 ! return error value
+
+ SET_SIZE(copyout)
+
+/*
+ * The _more entry points are not intended to be used directly by
+ * any caller from outside this file. They are provided to allow
+ * profiling and dtrace of the portions of the copy code that uses
+ * the floating point registers.
+ * This entry is particularly important as DTRACE (at least as of
+ * 4/2004) does not support leaf functions.
+ */
+
+ ENTRY(copyout_more)
+.copyout_more:
+ prefetch [%o0], #n_reads
+ save %sp, -SA(MINFRAME + HWCOPYFRAMESIZE), %sp
+ set .copyout_err, REAL_LOFAULT
+
+/*
+ * Copy outs that reach here are larger than VIS_COPY_THRESHOLD bytes
+ */
+.do_copyout:
+ set copyio_fault, %l7 ! .copyio_fault is lofault val
+
+ ldn [THREAD_REG + T_LOFAULT], %l6 ! save existing handler
+ membar #Sync ! sync error barrier
+ stn %l7, [THREAD_REG + T_LOFAULT] ! set t_lofault
+
+ mov %i0, SAVE_SRC
+ mov %i1, SAVE_DST
+ mov %i2, SAVE_COUNT
+
+ FP_NOMIGRATE(6, 7)
+
+ rd %fprs, %o2 ! check for unused fp
+ st %o2, [%fp + STACK_BIAS - SAVED_FPRS_OFFSET] ! save orig %fprs
+ btst FPRS_FEF, %o2
+ bz,a,pt %icc, .do_blockcopyout
+ wr %g0, FPRS_FEF, %fprs
+
+ BST_FPQ2Q4_TOSTACK(%o2)
+
+.do_blockcopyout:
+ rd %gsr, %o2
+ stx %o2, [%fp + STACK_BIAS - SAVED_GSR_OFFSET] ! save gsr
+ or %l6, FPUSED_FLAG, %l6
+
+ andcc DST, VIS_BLOCKSIZE - 1, TMP
+ mov ASI_USER, %asi
+ bz,pt %ncc, 2f
+ neg TMP
+ add TMP, VIS_BLOCKSIZE, TMP
+
+ ! TMP = bytes required to align DST on FP_BLOCK boundary
+ ! Using SRC as a tmp here
+ cmp TMP, 3
+ bleu,pt %ncc, 1f
+ sub CNT,TMP,CNT ! adjust main count
+ sub TMP, 3, TMP ! adjust for end of loop test
+.co_blkalign:
+ ldub [REALSRC], SRC ! move 4 bytes per loop iteration
+ stba SRC, [DST]%asi
+ subcc TMP, 4, TMP
+ ldub [REALSRC + 1], SRC
+ add REALSRC, 4, REALSRC
+ stba SRC, [DST + 1]%asi
+ ldub [REALSRC - 2], SRC
+ add DST, 4, DST
+ stba SRC, [DST - 2]%asi
+ ldub [REALSRC - 1], SRC
+ bgu,pt %ncc, .co_blkalign
+ stba SRC, [DST - 1]%asi
+
+ addcc TMP, 3, TMP ! restore count adjustment
+ bz,pt %ncc, 2f ! no bytes left?
+ nop
+1: ldub [REALSRC], SRC
+ inc REALSRC
+ inc DST
+ deccc TMP
+ bgu %ncc, 1b
+ stba SRC, [DST - 1]%asi
+
+2:
+ membar #StoreLoad
+ andn REALSRC, 0x7, SRC
+
+ ! SRC - 8-byte aligned
+ ! DST - 64-byte aligned
+ ldd [SRC], %f16
+ prefetch [SRC + (1 * VIS_BLOCKSIZE)], #n_reads
+ alignaddr REALSRC, %g0, %g0
+ ldd [SRC + 0x08], %f18
+ prefetch [SRC + (2 * VIS_BLOCKSIZE)], #n_reads
+ faligndata %f16, %f18, %f48
+ ldd [SRC + 0x10], %f20
+ prefetch [SRC + (3 * VIS_BLOCKSIZE)], #one_read
+ faligndata %f18, %f20, %f50
+ ldd [SRC + 0x18], %f22
+ prefetch [SRC + (4 * VIS_BLOCKSIZE)], #one_read
+ faligndata %f20, %f22, %f52
+ ldd [SRC + 0x20], %f24
+ prefetch [SRC + (5 * VIS_BLOCKSIZE)], #one_read
+ faligndata %f22, %f24, %f54
+ ldd [SRC + 0x28], %f26
+ prefetch [SRC + (6 * VIS_BLOCKSIZE)], #one_read
+ faligndata %f24, %f26, %f56
+ ldd [SRC + 0x30], %f28
+ prefetch [SRC + (7 * VIS_BLOCKSIZE)], #one_read
+ faligndata %f26, %f28, %f58
+ ldd [SRC + 0x38], %f30
+ ldd [SRC + VIS_BLOCKSIZE], %f16
+ sub CNT, VIS_BLOCKSIZE, CNT
+ add SRC, VIS_BLOCKSIZE, SRC
+ prefetch [SRC + (9 * VIS_BLOCKSIZE) - VIS_BLOCKSIZE], #one_read
+ add REALSRC, VIS_BLOCKSIZE, REALSRC
+ ba,pt %ncc, 1f
+ prefetch [SRC + (10 * VIS_BLOCKSIZE) - VIS_BLOCKSIZE], #one_read
+ .align 32
+1:
+ ldd [SRC + 0x08], %f18
+ faligndata %f28, %f30, %f60
+ ldd [SRC + 0x10], %f20
+ faligndata %f30, %f16, %f62
+ stda %f48, [DST]ASI_BLK_AIUS
+ ldd [SRC + 0x18], %f22
+ faligndata %f16, %f18, %f48
+ ldd [SRC + 0x20], %f24
+ faligndata %f18, %f20, %f50
+ ldd [SRC + 0x28], %f26
+ faligndata %f20, %f22, %f52
+ ldd [SRC + 0x30], %f28
+ faligndata %f22, %f24, %f54
+ ldd [SRC + 0x38], %f30
+ prefetch [SRC + ((OLYMPUS_C_PREFETCH) * VIS_BLOCKSIZE)], #n_reads
+ faligndata %f24, %f26, %f56
+ ldd [SRC + VIS_BLOCKSIZE], %f16
+ prefetch [SRC + ((OLYMPUS_C_2ND_PREFETCH) * VIS_BLOCKSIZE)], #one_read
+ faligndata %f26, %f28, %f58
+ prefetch [SRC + ((OLYMPUS_C_PREFETCH) * VIS_BLOCKSIZE) + 0x20], #n_reads
+ prefetch [SRC + ((OLYMPUS_C_2ND_PREFETCH) * VIS_BLOCKSIZE) + 0x20], #one_read
+ sub CNT, VIS_BLOCKSIZE, CNT
+ add DST, VIS_BLOCKSIZE, DST
+ add REALSRC, VIS_BLOCKSIZE, REALSRC
+ cmp CNT, VIS_BLOCKSIZE + 8
+ bgu,pt %ncc, 1b
+ add SRC, VIS_BLOCKSIZE, SRC
+
+ ! only if REALSRC & 0x7 is 0
+ cmp CNT, VIS_BLOCKSIZE
+ bne %ncc, 3f
+ andcc REALSRC, 0x7, %g0
+ bz,pt %ncc, 2f
+ nop
+3:
+ faligndata %f28, %f30, %f60
+ faligndata %f30, %f16, %f62
+ stda %f48, [DST]ASI_BLK_AIUS
+ add DST, VIS_BLOCKSIZE, DST
+ ba,pt %ncc, 3f
+ nop
+2:
+ ldd [SRC + 0x08], %f18
+ fsrc1 %f28, %f60
+ ldd [SRC + 0x10], %f20
+ fsrc1 %f30, %f62
+ stda %f48, [DST]ASI_BLK_AIUS
+ ldd [SRC + 0x18], %f22
+ fsrc1 %f16, %f48
+ ldd [SRC + 0x20], %f24
+ fsrc1 %f18, %f50
+ ldd [SRC + 0x28], %f26
+ fsrc1 %f20, %f52
+ ldd [SRC + 0x30], %f28
+ fsrc1 %f22, %f54
+ ldd [SRC + 0x38], %f30
+ fsrc1 %f24, %f56
+ sub CNT, VIS_BLOCKSIZE, CNT
+ add DST, VIS_BLOCKSIZE, DST
+ add SRC, VIS_BLOCKSIZE, SRC
+ add REALSRC, VIS_BLOCKSIZE, REALSRC
+ fsrc1 %f26, %f58
+ fsrc1 %f28, %f60
+ fsrc1 %f30, %f62
+ stda %f48, [DST]ASI_BLK_AIUS
+ add DST, VIS_BLOCKSIZE, DST
+ ba,a,pt %ncc, 4f
+ nop
+
+3: tst CNT
+ bz,a %ncc, 4f
+ nop
+
+5: ldub [REALSRC], TMP
+ inc REALSRC
+ inc DST
+ deccc CNT
+ bgu %ncc, 5b
+ stba TMP, [DST - 1]%asi
+4:
+
+.copyout_exit:
+ membar #Sync
+
+ ldx [%fp + STACK_BIAS - SAVED_GSR_OFFSET], %o2
+ wr %o2, 0, %gsr ! restore gsr
+
+ ld [%fp + STACK_BIAS - SAVED_FPRS_OFFSET], %o3
+ btst FPRS_FEF, %o3
+ bz,pt %icc, 4f
+ nop
+
+ BLD_FPQ2Q4_FROMSTACK(%o2)
+
+ ba,pt %ncc, 1f
+ wr %o3, 0, %fprs ! restore fprs
+
+4:
+ FZEROQ2Q4
+ wr %o3, 0, %fprs ! restore fprs
+
+1:
+ membar #Sync
+ andn %l6, FPUSED_FLAG, %l6
+ stn %l6, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ FP_ALLOWMIGRATE(5, 6)
+ ret
+ restore %g0, 0, %o0
+
+/*
+ * We got here because of a fault during copyout.
+ * Errno value is in ERRNO, but DDI/DKI says return -1 (sigh).
+ */
+.copyout_err:
+ ldn [THREAD_REG + T_COPYOPS], %o4 ! check for copyop handler
+ tst %o4
+ bz,pt %ncc, 2f ! if not, return error
+ nop
+ ldn [%o4 + CP_COPYOUT], %g2 ! if handler, invoke it with
+ jmp %g2 ! original arguments
+ restore %g0, 0, %g0 ! dispose of copy window
+2:
+ ret
+ restore %g0, -1, %o0 ! return error value
+
+
+ SET_SIZE(copyout_more)
+
+#endif /* lint */
+
+
+#ifdef lint
+
+/*ARGSUSED*/
+int
+xcopyout(const void *kaddr, void *uaddr, size_t count)
+{ return (0); }
+
+#else /* lint */
+
+ ENTRY(xcopyout)
+ cmp %o2, VIS_COPY_THRESHOLD ! check for leaf rtn case
+ bleu,pt %ncc, .xcopyout_small ! go to larger cases
+ xor %o0, %o1, %o3 ! are src, dst alignable?
+ btst 7, %o3 !
+ bz,pt %ncc, .xcopyout_8 !
+ nop
+ btst 1, %o3 !
+ bz,pt %ncc, .xcopyout_2 ! check for half-word
+ nop
+ sethi %hi(hw_copy_limit_1), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_1)], %o3
+ tst %o3
+ bz,pn %icc, .xcopyout_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .xcopyout_small ! go to small copy
+ nop
+ ba,pt %ncc, .xcopyout_more ! otherwise go to large copy
+ nop
+.xcopyout_2:
+ btst 3, %o3 !
+ bz,pt %ncc, .xcopyout_4 ! check for word alignment
+ nop
+ sethi %hi(hw_copy_limit_2), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_2)], %o3
+ tst %o3
+ bz,pn %icc, .xcopyout_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .xcopyout_small ! go to small copy
+ nop
+ ba,pt %ncc, .xcopyout_more ! otherwise go to large copy
+ nop
+.xcopyout_4:
+ ! already checked longword, must be word aligned
+ sethi %hi(hw_copy_limit_4), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_4)], %o3
+ tst %o3
+ bz,pn %icc, .xcopyout_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .xcopyout_small ! go to small copy
+ nop
+ ba,pt %ncc, .xcopyout_more ! otherwise go to large copy
+ nop
+.xcopyout_8:
+ sethi %hi(hw_copy_limit_8), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_8)], %o3
+ tst %o3
+ bz,pn %icc, .xcopyout_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .xcopyout_small ! go to small copy
+ nop
+ ba,pt %ncc, .xcopyout_more ! otherwise go to large copy
+ nop
+
+.xcopyout_small:
+ sethi %hi(.sm_xcopyout_err), %o5 ! .sm_xcopyout_err is lofault
+ or %o5, %lo(.sm_xcopyout_err), %o5
+ ldn [THREAD_REG + T_LOFAULT], %o4 ! save existing handler
+ membar #Sync ! sync error barrier
+ ba,pt %ncc, .sm_do_copyout ! common code
+ stn %o5, [THREAD_REG + T_LOFAULT] ! set t_lofault
+
+.xcopyout_more:
+ save %sp, -SA(MINFRAME + HWCOPYFRAMESIZE), %sp
+ sethi %hi(.xcopyout_err), REAL_LOFAULT
+ ba,pt %ncc, .do_copyout ! common code
+ or REAL_LOFAULT, %lo(.xcopyout_err), REAL_LOFAULT
+
+/*
+ * We got here because of fault during xcopyout
+ * Errno value is in ERRNO
+ */
+.xcopyout_err:
+ ldn [THREAD_REG + T_COPYOPS], %o4 ! check for copyop handler
+ tst %o4
+ bz,pt %ncc, 2f ! if not, return error
+ nop
+ ldn [%o4 + CP_XCOPYOUT], %g2 ! if handler, invoke it with
+ jmp %g2 ! original arguments
+ restore %g0, 0, %g0 ! dispose of copy window
+2:
+ ret
+ restore ERRNO, 0, %o0 ! return errno value
+
+.sm_xcopyout_err:
+
+ membar #Sync
+ stn %o4, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ mov SM_SAVE_SRC, %o0
+ mov SM_SAVE_DST, %o1
+ mov SM_SAVE_COUNT, %o2
+ ldn [THREAD_REG + T_COPYOPS], %o3 ! check for copyop handler
+ tst %o3
+ bz,pt %ncc, 3f ! if not, return error
+ nop
+ ldn [%o3 + CP_XCOPYOUT], %o5 ! if handler, invoke it with
+ jmp %o5 ! original arguments
+ nop
+3:
+ retl
+ or %g1, 0, %o0 ! return errno value
+
+ SET_SIZE(xcopyout)
+
+#endif /* lint */
+
+#ifdef lint
+
+/*ARGSUSED*/
+int
+xcopyout_little(const void *kaddr, void *uaddr, size_t count)
+{ return (0); }
+
+#else /* lint */
+
+ ENTRY(xcopyout_little)
+ sethi %hi(.xcopyio_err), %o5
+ or %o5, %lo(.xcopyio_err), %o5
+ ldn [THREAD_REG + T_LOFAULT], %o4
+ membar #Sync ! sync error barrier
+ stn %o5, [THREAD_REG + T_LOFAULT]
+ mov %o4, %o5
+
+ subcc %g0, %o2, %o3
+ add %o0, %o2, %o0
+ bz,pn %ncc, 2f ! check for zero bytes
+ sub %o2, 1, %o4
+ add %o0, %o4, %o0 ! start w/last byte
+ add %o1, %o2, %o1
+ ldub [%o0 + %o3], %o4
+
+1: stba %o4, [%o1 + %o3]ASI_AIUSL
+ inccc %o3
+ sub %o0, 2, %o0 ! get next byte
+ bcc,a,pt %ncc, 1b
+ ldub [%o0 + %o3], %o4
+
+2:
+ membar #Sync ! sync error barrier
+ stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ retl
+ mov %g0, %o0 ! return (0)
+
+ SET_SIZE(xcopyout_little)
+
+#endif /* lint */
+
+/*
+ * Copy user data to kernel space (copyin/xcopyin/xcopyin_little)
+ */
+
+#if defined(lint)
+
+/*ARGSUSED*/
+int
+copyin(const void *uaddr, void *kaddr, size_t count)
+{ return (0); }
+
+#else /* lint */
+
+ ENTRY(copyin)
+ cmp %o2, VIS_COPY_THRESHOLD ! check for leaf rtn case
+ bleu,pt %ncc, .copyin_small ! go to larger cases
+ xor %o0, %o1, %o3 ! are src, dst alignable?
+ btst 7, %o3 !
+ bz,pt %ncc, .copyin_8 ! check for longword alignment
+ nop
+ btst 1, %o3 !
+ bz,pt %ncc, .copyin_2 ! check for half-word
+ nop
+ sethi %hi(hw_copy_limit_1), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_1)], %o3
+ tst %o3
+ bz,pn %icc, .copyin_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyin_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyin_more ! otherwise go to large copy
+ nop
+.copyin_2:
+ btst 3, %o3 !
+ bz,pt %ncc, .copyin_4 ! check for word alignment
+ nop
+ sethi %hi(hw_copy_limit_2), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_2)], %o3
+ tst %o3
+ bz,pn %icc, .copyin_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyin_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyin_more ! otherwise go to large copy
+ nop
+.copyin_4:
+ ! already checked longword, must be word aligned
+ sethi %hi(hw_copy_limit_4), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_4)], %o3
+ tst %o3
+ bz,pn %icc, .copyin_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyin_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyin_more ! otherwise go to large copy
+ nop
+.copyin_8:
+ sethi %hi(hw_copy_limit_8), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_8)], %o3
+ tst %o3
+ bz,pn %icc, .copyin_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyin_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyin_more ! otherwise go to large copy
+ nop
+
+ .align 16
+ nop ! instruction alignment
+ ! see discussion at start of file
+.copyin_small:
+ sethi %hi(.sm_copyin_err), %o5 ! .sm_copyin_err is lofault
+ or %o5, %lo(.sm_copyin_err), %o5
+ ldn [THREAD_REG + T_LOFAULT], %o4 ! set/save t_lofault, no tramp
+ membar #Sync ! sync error barrier
+ stn %o5, [THREAD_REG + T_LOFAULT]
+.sm_do_copyin:
+ mov %o0, SM_SAVE_SRC
+ mov %o1, SM_SAVE_DST
+ cmp %o2, SHORTCOPY ! check for really short case
+ bleu,pt %ncc, .ci_sm_left !
+ mov %o2, SM_SAVE_COUNT
+ cmp %o2, CHKSIZE ! check for medium length cases
+ bgu,pn %ncc, .ci_med !
+ or %o0, %o1, %o3 ! prepare alignment check
+ andcc %o3, 0x3, %g0 ! test for alignment
+ bz,pt %ncc, .ci_sm_word ! branch to word aligned case
+.ci_sm_movebytes:
+ sub %o2, 3, %o2 ! adjust count to allow cc zero test
+.ci_sm_notalign4:
+ lduba [%o0]ASI_USER, %o3 ! read byte
+ subcc %o2, 4, %o2 ! reduce count by 4
+ stb %o3, [%o1] ! write byte
+ add %o0, 1, %o0 ! advance SRC by 1
+ lduba [%o0]ASI_USER, %o3 ! repeat for a total of 4 bytes
+ add %o0, 1, %o0 ! advance SRC by 1
+ stb %o3, [%o1 + 1]
+ add %o1, 4, %o1 ! advance DST by 4
+ lduba [%o0]ASI_USER, %o3
+ add %o0, 1, %o0 ! advance SRC by 1
+ stb %o3, [%o1 - 2]
+ lduba [%o0]ASI_USER, %o3
+ add %o0, 1, %o0 ! advance SRC by 1
+ bgt,pt %ncc, .ci_sm_notalign4 ! loop til 3 or fewer bytes remain
+ stb %o3, [%o1 - 1]
+ add %o2, 3, %o2 ! restore count
+.ci_sm_left:
+ tst %o2
+ bz,pt %ncc, .ci_sm_exit
+ nop
+ lduba [%o0]ASI_USER, %o3 ! load one byte
+ deccc %o2 ! reduce count for cc test
+ bz,pt %ncc, .ci_sm_exit
+ stb %o3,[%o1] ! store one byte
+ inc %o0
+ lduba [%o0]ASI_USER, %o3 ! load second byte
+ deccc %o2
+ bz,pt %ncc, .ci_sm_exit
+ stb %o3,[%o1 + 1] ! store second byte
+ inc %o0
+ lduba [%o0]ASI_USER, %o3 ! load third byte
+ stb %o3,[%o1 + 2] ! store third byte
+ membar #Sync ! sync error barrier
+ stn %o4, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ retl
+ mov %g0, %o0 ! return 0
+ .align 16
+.ci_sm_words:
+ lduwa [%o0]ASI_USER, %o3 ! read word
+.ci_sm_wordx:
+ subcc %o2, 8, %o2 ! update count
+ stw %o3, [%o1] ! write word
+ add %o0, 4, %o0 ! update SRC
+ add %o1, 8, %o1 ! update DST
+ lduwa [%o0]ASI_USER, %o3 ! read word
+ add %o0, 4, %o0 ! update SRC
+ bgt,pt %ncc, .ci_sm_words ! loop til done
+ stw %o3, [%o1 - 4] ! write word
+ addcc %o2, 7, %o2 ! restore count
+ bz,pt %ncc, .ci_sm_exit
+ nop
+ deccc %o2
+ bz,pt %ncc, .ci_sm_byte
+.ci_sm_half:
+ subcc %o2, 2, %o2 ! reduce count by 2
+ lduha [%o0]ASI_USER, %o3 ! read half word
+ add %o0, 2, %o0 ! advance SRC by 2
+ add %o1, 2, %o1 ! advance DST by 2
+ bgt,pt %ncc, .ci_sm_half ! loop til done
+ sth %o3, [%o1 - 2] ! write half word
+ addcc %o2, 1, %o2 ! restore count
+ bz,pt %ncc, .ci_sm_exit
+ nop
+.ci_sm_byte:
+ lduba [%o0]ASI_USER, %o3
+ stb %o3, [%o1]
+ membar #Sync ! sync error barrier
+ stn %o4, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ retl
+ mov %g0, %o0 ! return 0
+ .align 16
+.ci_sm_word:
+ subcc %o2, 4, %o2 ! update count
+ bgt,pt %ncc, .ci_sm_wordx
+ lduwa [%o0]ASI_USER, %o3 ! read word
+ addcc %o2, 3, %o2 ! restore count
+ bz,pt %ncc, .ci_sm_exit
+ stw %o3, [%o1] ! write word
+ deccc %o2 ! reduce count for cc test
+ add %o0, 4, %o0
+ lduba [%o0]ASI_USER, %o3 ! load one byte
+ bz,pt %ncc, .ci_sm_exit
+ stb %o3, [%o1 + 4] ! store one byte
+ inc %o0
+ lduba [%o0]ASI_USER, %o3 ! load second byte
+ deccc %o2
+ bz,pt %ncc, .ci_sm_exit
+ stb %o3, [%o1 + 5] ! store second byte
+ inc %o0
+ lduba [%o0]ASI_USER, %o3 ! load third byte
+ stb %o3, [%o1 + 6] ! store third byte
+.ci_sm_exit:
+ membar #Sync ! sync error barrier
+ stn %o4, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ retl
+ mov %g0, %o0 ! return 0
+
+ .align 16
+.ci_med:
+ xor %o0, %o1, %o3 ! setup alignment check
+ btst 1, %o3
+ bnz,pt %ncc, .ci_sm_movebytes ! unaligned
+ nop
+ btst 3, %o3
+ bnz,pt %ncc, .ci_med_half ! halfword aligned
+ nop
+ btst 7, %o3
+ bnz,pt %ncc, .ci_med_word ! word aligned
+ nop
+.ci_med_long:
+ btst 3, %o0 ! check for
+ bz,pt %ncc, .ci_med_long1 ! word alignment
+ nop
+.ci_med_long0:
+ lduba [%o0]ASI_USER, %o3 ! load one byte
+ inc %o0
+ stb %o3,[%o1] ! store byte
+ inc %o1
+ btst 3, %o0
+ bnz,pt %ncc, .ci_med_long0
+ dec %o2
+.ci_med_long1: ! word aligned
+ btst 7, %o0 ! check for long word
+ bz,pt %ncc, .ci_med_long2
+ nop
+ lduwa [%o0]ASI_USER, %o3 ! load word
+ add %o0, 4, %o0 ! advance SRC by 4
+ stw %o3, [%o1] ! store word
+ add %o1, 4, %o1 ! advance DST by 4
+ sub %o2, 4, %o2 ! reduce count by 4
+!
+! Now long word aligned and have at least 32 bytes to move
+!
+.ci_med_long2:
+ sub %o2, 31, %o2 ! adjust count to allow cc zero test
+.ci_med_lmove:
+ ldxa [%o0]ASI_USER, %o3 ! read long word
+ subcc %o2, 32, %o2 ! reduce count by 32
+ stx %o3, [%o1] ! write long word
+ add %o0, 8, %o0 ! advance SRC by 8
+ ldxa [%o0]ASI_USER, %o3 ! repeat for a total for 4 long words
+ add %o0, 8, %o0 ! advance SRC by 8
+ stx %o3, [%o1 + 8]
+ add %o1, 32, %o1 ! advance DST by 32
+ ldxa [%o0]ASI_USER, %o3
+ add %o0, 8, %o0 ! advance SRC by 8
+ stx %o3, [%o1 - 16]
+ ldxa [%o0]ASI_USER, %o3
+ add %o0, 8, %o0 ! advance SRC by 8
+ bgt,pt %ncc, .ci_med_lmove ! loop til 31 or fewer bytes left
+ stx %o3, [%o1 - 8]
+ addcc %o2, 24, %o2 ! restore count to long word offset
+ ble,pt %ncc, .ci_med_lextra ! check for more long words to move
+ nop
+.ci_med_lword:
+ ldxa [%o0]ASI_USER, %o3 ! read long word
+ subcc %o2, 8, %o2 ! reduce count by 8
+ stx %o3, [%o1] ! write long word
+ add %o0, 8, %o0 ! advance SRC by 8
+ bgt,pt %ncc, .ci_med_lword ! loop til 7 or fewer bytes left
+ add %o1, 8, %o1 ! advance DST by 8
+.ci_med_lextra:
+ addcc %o2, 7, %o2 ! restore rest of count
+ bz,pt %ncc, .ci_sm_exit ! if zero, then done
+ deccc %o2
+ bz,pt %ncc, .ci_sm_byte
+ nop
+ ba,pt %ncc, .ci_sm_half
+ nop
+
+ .align 16
+ nop ! instruction alignment
+ ! see discussion at start of file
+.ci_med_word:
+ btst 3, %o0 ! check for
+ bz,pt %ncc, .ci_med_word1 ! word alignment
+ nop
+.ci_med_word0:
+ lduba [%o0]ASI_USER, %o3 ! load one byte
+ inc %o0
+ stb %o3,[%o1] ! store byte
+ inc %o1
+ btst 3, %o0
+ bnz,pt %ncc, .ci_med_word0
+ dec %o2
+!
+! Now word aligned and have at least 36 bytes to move
+!
+.ci_med_word1:
+ sub %o2, 15, %o2 ! adjust count to allow cc zero test
+.ci_med_wmove:
+ lduwa [%o0]ASI_USER, %o3 ! read word
+ subcc %o2, 16, %o2 ! reduce count by 16
+ stw %o3, [%o1] ! write word
+ add %o0, 4, %o0 ! advance SRC by 4
+ lduwa [%o0]ASI_USER, %o3 ! repeat for a total for 4 words
+ add %o0, 4, %o0 ! advance SRC by 4
+ stw %o3, [%o1 + 4]
+ add %o1, 16, %o1 ! advance DST by 16
+ lduwa [%o0]ASI_USER, %o3
+ add %o0, 4, %o0 ! advance SRC by 4
+ stw %o3, [%o1 - 8]
+ lduwa [%o0]ASI_USER, %o3
+ add %o0, 4, %o0 ! advance SRC by 4
+ bgt,pt %ncc, .ci_med_wmove ! loop til 15 or fewer bytes left
+ stw %o3, [%o1 - 4]
+ addcc %o2, 12, %o2 ! restore count to word offset
+ ble,pt %ncc, .ci_med_wextra ! check for more words to move
+ nop
+.ci_med_word2:
+ lduwa [%o0]ASI_USER, %o3 ! read word
+ subcc %o2, 4, %o2 ! reduce count by 4
+ stw %o3, [%o1] ! write word
+ add %o0, 4, %o0 ! advance SRC by 4
+ bgt,pt %ncc, .ci_med_word2 ! loop til 3 or fewer bytes left
+ add %o1, 4, %o1 ! advance DST by 4
+.ci_med_wextra:
+ addcc %o2, 3, %o2 ! restore rest of count
+ bz,pt %ncc, .ci_sm_exit ! if zero, then done
+ deccc %o2
+ bz,pt %ncc, .ci_sm_byte
+ nop
+ ba,pt %ncc, .ci_sm_half
+ nop
+
+ .align 16
+ nop ! instruction alignment
+ ! see discussion at start of file
+.ci_med_half:
+ btst 1, %o0 ! check for
+ bz,pt %ncc, .ci_med_half1 ! half word alignment
+ nop
+ lduba [%o0]ASI_USER, %o3 ! load one byte
+ inc %o0
+ stb %o3,[%o1] ! store byte
+ inc %o1
+ dec %o2
+!
+! Now half word aligned and have at least 38 bytes to move
+!
+.ci_med_half1:
+ sub %o2, 7, %o2 ! adjust count to allow cc zero test
+.ci_med_hmove:
+ lduha [%o0]ASI_USER, %o3 ! read half word
+ subcc %o2, 8, %o2 ! reduce count by 8
+ sth %o3, [%o1] ! write half word
+ add %o0, 2, %o0 ! advance SRC by 2
+ lduha [%o0]ASI_USER, %o3 ! repeat for a total for 4 halfwords
+ add %o0, 2, %o0 ! advance SRC by 2
+ sth %o3, [%o1 + 2]
+ add %o1, 8, %o1 ! advance DST by 8
+ lduha [%o0]ASI_USER, %o3
+ add %o0, 2, %o0 ! advance SRC by 2
+ sth %o3, [%o1 - 4]
+ lduha [%o0]ASI_USER, %o3
+ add %o0, 2, %o0 ! advance SRC by 2
+ bgt,pt %ncc, .ci_med_hmove ! loop til 7 or fewer bytes left
+ sth %o3, [%o1 - 2]
+ addcc %o2, 7, %o2 ! restore count
+ bz,pt %ncc, .ci_sm_exit
+ deccc %o2
+ bz,pt %ncc, .ci_sm_byte
+ nop
+ ba,pt %ncc, .ci_sm_half
+ nop
+
+.sm_copyin_err:
+ membar #Sync
+ stn %o4, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ mov SM_SAVE_SRC, %o0
+ mov SM_SAVE_DST, %o1
+ mov SM_SAVE_COUNT, %o2
+ ldn [THREAD_REG + T_COPYOPS], %o3 ! check for copyop handler
+ tst %o3
+ bz,pt %ncc, 3f ! if not, return error
+ nop
+ ldn [%o3 + CP_COPYIN], %o5 ! if handler, invoke it with
+ jmp %o5 ! original arguments
+ nop
+3:
+ retl
+ or %g0, -1, %o0 ! return errno value
+
+ SET_SIZE(copyin)
+
+
+/*
+ * The _more entry points are not intended to be used directly by
+ * any caller from outside this file. They are provided to allow
+ * profiling and dtrace of the portions of the copy code that uses
+ * the floating point registers.
+ * This entry is particularly important as DTRACE (at least as of
+ * 4/2004) does not support leaf functions.
+ */
+
+ ENTRY(copyin_more)
+.copyin_more:
+ prefetch [%o0], #n_reads
+ save %sp, -SA(MINFRAME + HWCOPYFRAMESIZE), %sp
+ set .copyin_err, REAL_LOFAULT
+
+/*
+ * Copy ins that reach here are larger than VIS_COPY_THRESHOLD bytes
+ */
+.do_copyin:
+ set copyio_fault, %l7 ! .copyio_fault is lofault val
+
+ ldn [THREAD_REG + T_LOFAULT], %l6 ! save existing handler
+ membar #Sync ! sync error barrier
+ stn %l7, [THREAD_REG + T_LOFAULT] ! set t_lofault
+
+ mov %i0, SAVE_SRC
+ mov %i1, SAVE_DST
+ mov %i2, SAVE_COUNT
+
+ FP_NOMIGRATE(6, 7)
+
+ rd %fprs, %o2 ! check for unused fp
+ st %o2, [%fp + STACK_BIAS - SAVED_FPRS_OFFSET] ! save orig %fprs
+ btst FPRS_FEF, %o2
+ bz,a,pt %icc, .do_blockcopyin
+ wr %g0, FPRS_FEF, %fprs
+
+ BST_FPQ2Q4_TOSTACK(%o2)
+
+.do_blockcopyin:
+ rd %gsr, %o2
+ stx %o2, [%fp + STACK_BIAS - SAVED_GSR_OFFSET] ! save gsr
+ or %l6, FPUSED_FLAG, %l6
+
+ andcc DST, VIS_BLOCKSIZE - 1, TMP
+ mov ASI_USER, %asi
+ bz,pt %ncc, 2f
+ neg TMP
+ add TMP, VIS_BLOCKSIZE, TMP
+
+ ! TMP = bytes required to align DST on FP_BLOCK boundary
+ ! Using SRC as a tmp here
+ cmp TMP, 3
+ bleu,pt %ncc, 1f
+ sub CNT,TMP,CNT ! adjust main count
+ sub TMP, 3, TMP ! adjust for end of loop test
+.ci_blkalign:
+ lduba [REALSRC]%asi, SRC ! move 4 bytes per loop iteration
+ stb SRC, [DST]
+ subcc TMP, 4, TMP
+ lduba [REALSRC + 1]%asi, SRC
+ add REALSRC, 4, REALSRC
+ stb SRC, [DST + 1]
+ lduba [REALSRC - 2]%asi, SRC
+ add DST, 4, DST
+ stb SRC, [DST - 2]
+ lduba [REALSRC - 1]%asi, SRC
+ bgu,pt %ncc, .ci_blkalign
+ stb SRC, [DST - 1]
+
+ addcc TMP, 3, TMP ! restore count adjustment
+ bz,pt %ncc, 2f ! no bytes left?
+ nop
+1: lduba [REALSRC]%asi, SRC
+ inc REALSRC
+ inc DST
+ deccc TMP
+ bgu %ncc, 1b
+ stb SRC, [DST - 1]
+
+2:
+ membar #StoreLoad
+ andn REALSRC, 0x7, SRC
+
+ ! SRC - 8-byte aligned
+ ! DST - 64-byte aligned
+ ldda [SRC]%asi, %f16
+ prefetcha [SRC + (1 * VIS_BLOCKSIZE)]%asi, #n_reads
+ alignaddr REALSRC, %g0, %g0
+ ldda [SRC + 0x08]%asi, %f18
+ prefetcha [SRC + (2 * VIS_BLOCKSIZE)]%asi, #n_reads
+ faligndata %f16, %f18, %f48
+ ldda [SRC + 0x10]%asi, %f20
+ prefetcha [SRC + (3 * VIS_BLOCKSIZE)]%asi, #one_read
+ faligndata %f18, %f20, %f50
+ ldda [SRC + 0x18]%asi, %f22
+ prefetcha [SRC + (4 * VIS_BLOCKSIZE)]%asi, #one_read
+ faligndata %f20, %f22, %f52
+ ldda [SRC + 0x20]%asi, %f24
+ prefetcha [SRC + (5 * VIS_BLOCKSIZE)]%asi, #one_read
+ faligndata %f22, %f24, %f54
+ ldda [SRC + 0x28]%asi, %f26
+ prefetcha [SRC + (6 * VIS_BLOCKSIZE)]%asi, #one_read
+ faligndata %f24, %f26, %f56
+ ldda [SRC + 0x30]%asi, %f28
+ prefetcha [SRC + (7 * VIS_BLOCKSIZE)]%asi, #one_read
+ faligndata %f26, %f28, %f58
+ ldda [SRC + 0x38]%asi, %f30
+ ldda [SRC + VIS_BLOCKSIZE]%asi, %f16
+ sub CNT, VIS_BLOCKSIZE, CNT
+ add SRC, VIS_BLOCKSIZE, SRC
+ prefetcha [SRC + (9 * VIS_BLOCKSIZE) - VIS_BLOCKSIZE]%asi, #one_read
+ add REALSRC, VIS_BLOCKSIZE, REALSRC
+ ba,pt %ncc, 1f
+ prefetcha [SRC + (10 * VIS_BLOCKSIZE) - VIS_BLOCKSIZE]%asi, #one_read
+ .align 32
+1:
+ ldda [SRC + 0x08]%asi, %f18
+ faligndata %f28, %f30, %f60
+ ldda [SRC + 0x10]%asi, %f20
+ faligndata %f30, %f16, %f62
+ stda %f48, [DST]ASI_BLK_P
+ ldda [SRC + 0x18]%asi, %f22
+ faligndata %f16, %f18, %f48
+ ldda [SRC + 0x20]%asi, %f24
+ faligndata %f18, %f20, %f50
+ ldda [SRC + 0x28]%asi, %f26
+ faligndata %f20, %f22, %f52
+ ldda [SRC + 0x30]%asi, %f28
+ faligndata %f22, %f24, %f54
+ ldda [SRC + 0x38]%asi, %f30
+ prefetcha [SRC + ((OLYMPUS_C_PREFETCH) * VIS_BLOCKSIZE)]%asi, #n_reads
+ faligndata %f24, %f26, %f56
+ ldda [SRC + VIS_BLOCKSIZE]%asi, %f16
+ prefetcha [SRC + ((OLYMPUS_C_2ND_PREFETCH) * VIS_BLOCKSIZE)]%asi, #one_read
+ faligndata %f26, %f28, %f58
+ prefetcha [SRC + ((OLYMPUS_C_PREFETCH) * VIS_BLOCKSIZE) + 0x20]%asi, #n_reads
+ prefetcha [SRC + ((OLYMPUS_C_2ND_PREFETCH) * VIS_BLOCKSIZE) + 0x20]%asi, #one_read
+ sub CNT, VIS_BLOCKSIZE, CNT
+ add DST, VIS_BLOCKSIZE, DST
+ add REALSRC, VIS_BLOCKSIZE, REALSRC
+ cmp CNT, VIS_BLOCKSIZE + 8
+ bgu,pt %ncc, 1b
+ add SRC, VIS_BLOCKSIZE, SRC
+
+ ! only if REALSRC & 0x7 is 0
+ cmp CNT, VIS_BLOCKSIZE
+ bne %ncc, 3f
+ andcc REALSRC, 0x7, %g0
+ bz,pt %ncc, 2f
+ nop
+3:
+ faligndata %f28, %f30, %f60
+ faligndata %f30, %f16, %f62
+ stda %f48, [DST]ASI_BLK_P
+ add DST, VIS_BLOCKSIZE, DST
+ ba,pt %ncc, 3f
+ nop
+2:
+ ldda [SRC + 0x08]%asi, %f18
+ fsrc1 %f28, %f60
+ ldda [SRC + 0x10]%asi, %f20
+ fsrc1 %f30, %f62
+ stda %f48, [DST]ASI_BLK_P
+ ldda [SRC + 0x18]%asi, %f22
+ fsrc1 %f16, %f48
+ ldda [SRC + 0x20]%asi, %f24
+ fsrc1 %f18, %f50
+ ldda [SRC + 0x28]%asi, %f26
+ fsrc1 %f20, %f52
+ ldda [SRC + 0x30]%asi, %f28
+ fsrc1 %f22, %f54
+ ldda [SRC + 0x38]%asi, %f30
+ fsrc1 %f24, %f56
+ sub CNT, VIS_BLOCKSIZE, CNT
+ add DST, VIS_BLOCKSIZE, DST
+ add SRC, VIS_BLOCKSIZE, SRC
+ add REALSRC, VIS_BLOCKSIZE, REALSRC
+ fsrc1 %f26, %f58
+ fsrc1 %f28, %f60
+ fsrc1 %f30, %f62
+ stda %f48, [DST]ASI_BLK_P
+ add DST, VIS_BLOCKSIZE, DST
+ ba,a,pt %ncc, 4f
+ nop
+
+3: tst CNT
+ bz,a %ncc, 4f
+ nop
+
+5: lduba [REALSRC]ASI_USER, TMP
+ inc REALSRC
+ inc DST
+ deccc CNT
+ bgu %ncc, 5b
+ stb TMP, [DST - 1]
+4:
+
+.copyin_exit:
+ membar #Sync
+
+ ldx [%fp + STACK_BIAS - SAVED_GSR_OFFSET], %o2 ! restore gsr
+ wr %o2, 0, %gsr
+
+ ld [%fp + STACK_BIAS - SAVED_FPRS_OFFSET], %o3
+ btst FPRS_FEF, %o3
+ bz,pt %icc, 4f
+ nop
+
+ BLD_FPQ2Q4_FROMSTACK(%o2)
+
+ ba,pt %ncc, 1f
+ wr %o3, 0, %fprs ! restore fprs
+
+4:
+ FZEROQ2Q4
+ wr %o3, 0, %fprs ! restore fprs
+
+1:
+ membar #Sync ! sync error barrier
+ andn %l6, FPUSED_FLAG, %l6
+ stn %l6, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ FP_ALLOWMIGRATE(5, 6)
+ ret
+ restore %g0, 0, %o0
+/*
+ * We got here because of a fault during copyin
+ * Errno value is in ERRNO, but DDI/DKI says return -1 (sigh).
+ */
+.copyin_err:
+ ldn [THREAD_REG + T_COPYOPS], %o4 ! check for copyop handler
+ tst %o4
+ bz,pt %ncc, 2f ! if not, return error
+ nop
+ ldn [%o4 + CP_COPYIN], %g2 ! if handler, invoke it with
+ jmp %g2 ! original arguments
+ restore %g0, 0, %g0 ! dispose of copy window
+2:
+ ret
+ restore %g0, -1, %o0 ! return error value
+
+
+ SET_SIZE(copyin_more)
+
+#endif /* lint */
+
+#ifdef lint
+
+/*ARGSUSED*/
+int
+xcopyin(const void *uaddr, void *kaddr, size_t count)
+{ return (0); }
+
+#else /* lint */
+
+ ENTRY(xcopyin)
+
+ cmp %o2, VIS_COPY_THRESHOLD ! check for leaf rtn case
+ bleu,pt %ncc, .xcopyin_small ! go to larger cases
+ xor %o0, %o1, %o3 ! are src, dst alignable?
+ btst 7, %o3 !
+ bz,pt %ncc, .xcopyin_8 ! check for longword alignment
+ nop
+ btst 1, %o3 !
+ bz,pt %ncc, .xcopyin_2 ! check for half-word
+ nop
+ sethi %hi(hw_copy_limit_1), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_1)], %o3
+ tst %o3
+ bz,pn %icc, .xcopyin_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .xcopyin_small ! go to small copy
+ nop
+ ba,pt %ncc, .xcopyin_more ! otherwise go to large copy
+ nop
+.xcopyin_2:
+ btst 3, %o3 !
+ bz,pt %ncc, .xcopyin_4 ! check for word alignment
+ nop
+ sethi %hi(hw_copy_limit_2), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_2)], %o3
+ tst %o3
+ bz,pn %icc, .xcopyin_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .xcopyin_small ! go to small copy
+ nop
+ ba,pt %ncc, .xcopyin_more ! otherwise go to large copy
+ nop
+.xcopyin_4:
+ ! already checked longword, must be word aligned
+ sethi %hi(hw_copy_limit_4), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_4)], %o3
+ tst %o3
+ bz,pn %icc, .xcopyin_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .xcopyin_small ! go to small copy
+ nop
+ ba,pt %ncc, .xcopyin_more ! otherwise go to large copy
+ nop
+.xcopyin_8:
+ sethi %hi(hw_copy_limit_8), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_8)], %o3
+ tst %o3
+ bz,pn %icc, .xcopyin_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .xcopyin_small ! go to small copy
+ nop
+ ba,pt %ncc, .xcopyin_more ! otherwise go to large copy
+ nop
+
+.xcopyin_small:
+ sethi %hi(.sm_xcopyin_err), %o5 ! .sm_xcopyin_err is lofault value
+ or %o5, %lo(.sm_xcopyin_err), %o5
+ ldn [THREAD_REG + T_LOFAULT], %o4 ! set/save t_lofaul
+ membar #Sync ! sync error barrier
+ ba,pt %ncc, .sm_do_copyin ! common code
+ stn %o5, [THREAD_REG + T_LOFAULT]
+
+.xcopyin_more:
+ save %sp, -SA(MINFRAME + HWCOPYFRAMESIZE), %sp
+ sethi %hi(.xcopyin_err), REAL_LOFAULT ! .xcopyin_err is lofault value
+ ba,pt %ncc, .do_copyin
+ or REAL_LOFAULT, %lo(.xcopyin_err), REAL_LOFAULT
+
+/*
+ * We got here because of fault during xcopyin
+ * Errno value is in ERRNO
+ */
+.xcopyin_err:
+ ldn [THREAD_REG + T_COPYOPS], %o4 ! check for copyop handler
+ tst %o4
+ bz,pt %ncc, 2f ! if not, return error
+ nop
+ ldn [%o4 + CP_XCOPYIN], %g2 ! if handler, invoke it with
+ jmp %g2 ! original arguments
+ restore %g0, 0, %g0 ! dispose of copy window
+2:
+ ret
+ restore ERRNO, 0, %o0 ! return errno value
+
+.sm_xcopyin_err:
+
+ membar #Sync
+ stn %o4, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ mov SM_SAVE_SRC, %o0
+ mov SM_SAVE_DST, %o1
+ mov SM_SAVE_COUNT, %o2
+ ldn [THREAD_REG + T_COPYOPS], %o3 ! check for copyop handler
+ tst %o3
+ bz,pt %ncc, 3f ! if not, return error
+ nop
+ ldn [%o3 + CP_XCOPYIN], %o5 ! if handler, invoke it with
+ jmp %o5 ! original arguments
+ nop
+3:
+ retl
+ or %g1, 0, %o0 ! return errno value
+
+ SET_SIZE(xcopyin)
+
+#endif /* lint */
+
+#ifdef lint
+
+/*ARGSUSED*/
+int
+xcopyin_little(const void *uaddr, void *kaddr, size_t count)
+{ return (0); }
+
+#else /* lint */
+
+ ENTRY(xcopyin_little)
+ sethi %hi(.xcopyio_err), %o5
+ or %o5, %lo(.xcopyio_err), %o5
+ ldn [THREAD_REG + T_LOFAULT], %o4
+ membar #Sync ! sync error barrier
+ stn %o5, [THREAD_REG + T_LOFAULT]
+ mov %o4, %o5
+
+ subcc %g0, %o2, %o3
+ add %o0, %o2, %o0
+ bz,pn %ncc, 2f ! check for zero bytes
+ sub %o2, 1, %o4
+ add %o0, %o4, %o0 ! start w/last byte
+ add %o1, %o2, %o1
+ lduba [%o0 + %o3]ASI_AIUSL, %o4
+
+1: stb %o4, [%o1 + %o3]
+ inccc %o3
+ sub %o0, 2, %o0 ! get next byte
+ bcc,a,pt %ncc, 1b
+ lduba [%o0 + %o3]ASI_AIUSL, %o4
+
+2:
+ membar #Sync ! sync error barrier
+ stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ retl
+ mov %g0, %o0 ! return (0)
+
+.xcopyio_err:
+ membar #Sync ! sync error barrier
+ stn %o5, [THREAD_REG + T_LOFAULT] ! restore old t_lofault
+ retl
+ mov %g1, %o0
+
+ SET_SIZE(xcopyin_little)
+
+#endif /* lint */
+
+
+/*
+ * Copy a block of storage - must not overlap (from + len <= to).
+ * No fault handler installed (to be called under on_fault())
+ */
+#if defined(lint)
+
+/* ARGSUSED */
+void
+copyin_noerr(const void *ufrom, void *kto, size_t count)
+{}
+
+#else /* lint */
+ ENTRY(copyin_noerr)
+
+ cmp %o2, VIS_COPY_THRESHOLD ! check for leaf rtn case
+ bleu,pt %ncc, .copyin_ne_small ! go to larger cases
+ xor %o0, %o1, %o3 ! are src, dst alignable?
+ btst 7, %o3 !
+ bz,pt %ncc, .copyin_ne_8 ! check for longword alignment
+ nop
+ btst 1, %o3 !
+ bz,pt %ncc, .copyin_ne_2 ! check for half-word
+ nop
+ sethi %hi(hw_copy_limit_1), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_1)], %o3
+ tst %o3
+ bz,pn %icc, .copyin_ne_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyin_ne_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyin_noerr_more ! otherwise go to large copy
+ nop
+.copyin_ne_2:
+ btst 3, %o3 !
+ bz,pt %ncc, .copyin_ne_4 ! check for word alignment
+ nop
+ sethi %hi(hw_copy_limit_2), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_2)], %o3
+ tst %o3
+ bz,pn %icc, .copyin_ne_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyin_ne_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyin_noerr_more ! otherwise go to large copy
+ nop
+.copyin_ne_4:
+ ! already checked longword, must be word aligned
+ sethi %hi(hw_copy_limit_4), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_4)], %o3
+ tst %o3
+ bz,pn %icc, .copyin_ne_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyin_ne_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyin_noerr_more ! otherwise go to large copy
+ nop
+.copyin_ne_8:
+ sethi %hi(hw_copy_limit_8), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_8)], %o3
+ tst %o3
+ bz,pn %icc, .copyin_ne_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyin_ne_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyin_noerr_more ! otherwise go to large copy
+ nop
+
+.copyin_ne_small:
+ ldn [THREAD_REG + T_LOFAULT], %o4
+ tst %o4
+ bz,pn %ncc, .sm_do_copyin
+ nop
+ sethi %hi(.sm_copyio_noerr), %o5
+ or %o5, %lo(.sm_copyio_noerr), %o5
+ membar #Sync ! sync error barrier
+ ba,pt %ncc, .sm_do_copyin
+ stn %o5, [THREAD_REG + T_LOFAULT] ! set/save t_lofault
+
+.copyin_noerr_more:
+ save %sp, -SA(MINFRAME + HWCOPYFRAMESIZE), %sp
+ sethi %hi(.copyio_noerr), REAL_LOFAULT
+ ba,pt %ncc, .do_copyin
+ or REAL_LOFAULT, %lo(.copyio_noerr), REAL_LOFAULT
+
+.copyio_noerr:
+ jmp %l6
+ restore %g0,0,%g0
+
+.sm_copyio_noerr:
+ membar #Sync
+ stn %o4, [THREAD_REG + T_LOFAULT] ! restore t_lofault
+ jmp %o4
+ nop
+
+ SET_SIZE(copyin_noerr)
+#endif /* lint */
+
+/*
+ * Copy a block of storage - must not overlap (from + len <= to).
+ * No fault handler installed (to be called under on_fault())
+ */
+
+#if defined(lint)
+
+/* ARGSUSED */
+void
+copyout_noerr(const void *kfrom, void *uto, size_t count)
+{}
+
+#else /* lint */
+ ENTRY(copyout_noerr)
+
+ cmp %o2, VIS_COPY_THRESHOLD ! check for leaf rtn case
+ bleu,pt %ncc, .copyout_ne_small ! go to larger cases
+ xor %o0, %o1, %o3 ! are src, dst alignable?
+ btst 7, %o3 !
+ bz,pt %ncc, .copyout_ne_8 ! check for longword alignment
+ nop
+ btst 1, %o3 !
+ bz,pt %ncc, .copyout_ne_2 ! check for half-word
+ nop
+ sethi %hi(hw_copy_limit_1), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_1)], %o3
+ tst %o3
+ bz,pn %icc, .copyout_ne_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyout_ne_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyout_noerr_more ! otherwise go to large copy
+ nop
+.copyout_ne_2:
+ btst 3, %o3 !
+ bz,pt %ncc, .copyout_ne_4 ! check for word alignment
+ nop
+ sethi %hi(hw_copy_limit_2), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_2)], %o3
+ tst %o3
+ bz,pn %icc, .copyout_ne_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyout_ne_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyout_noerr_more ! otherwise go to large copy
+ nop
+.copyout_ne_4:
+ ! already checked longword, must be word aligned
+ sethi %hi(hw_copy_limit_4), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_4)], %o3
+ tst %o3
+ bz,pn %icc, .copyout_ne_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyout_ne_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyout_noerr_more ! otherwise go to large copy
+ nop
+.copyout_ne_8:
+ sethi %hi(hw_copy_limit_8), %o3 ! Check copy limit
+ ld [%o3 + %lo(hw_copy_limit_8)], %o3
+ tst %o3
+ bz,pn %icc, .copyout_ne_small ! if zero, disable HW copy
+ cmp %o2, %o3 ! if length <= limit
+ bleu,pt %ncc, .copyout_ne_small ! go to small copy
+ nop
+ ba,pt %ncc, .copyout_noerr_more ! otherwise go to large copy
+ nop
+
+.copyout_ne_small:
+ ldn [THREAD_REG + T_LOFAULT], %o4
+ tst %o4
+ bz,pn %ncc, .sm_do_copyout
+ nop
+ sethi %hi(.sm_copyio_noerr), %o5
+ or %o5, %lo(.sm_copyio_noerr), %o5
+ membar #Sync ! sync error barrier
+ ba,pt %ncc, .sm_do_copyout
+ stn %o5, [THREAD_REG + T_LOFAULT] ! set/save t_lofault
+
+.copyout_noerr_more:
+ save %sp, -SA(MINFRAME + HWCOPYFRAMESIZE), %sp
+ sethi %hi(.copyio_noerr), REAL_LOFAULT
+ ba,pt %ncc, .do_copyout
+ or REAL_LOFAULT, %lo(.copyio_noerr), REAL_LOFAULT
+
+ SET_SIZE(copyout_noerr)
+#endif /* lint */
+
+
+/*
+ * hwblkclr - clears block-aligned, block-multiple-sized regions that are
+ * longer than 256 bytes in length using spitfire's block stores. If
+ * the criteria for using this routine are not met then it calls bzero
+ * and returns 1. Otherwise 0 is returned indicating success.
+ * Caller is responsible for ensuring use_hw_bzero is true and that
+ * kpreempt_disable() has been called.
+ */
+#ifdef lint
+/*ARGSUSED*/
+int
+hwblkclr(void *addr, size_t len)
+{
+ return(0);
+}
+#else /* lint */
+ ! %i0 - start address
+ ! %i1 - length of region (multiple of 64)
+ ! %l0 - saved fprs
+ ! %l1 - pointer to saved %d0 block
+ ! %l2 - saved curthread->t_lwp
+
+ ENTRY(hwblkclr)
+ ! get another window w/space for one aligned block of saved fpregs
+ save %sp, -SA(MINFRAME + 2*VIS_BLOCKSIZE), %sp
+
+ ! Must be block-aligned
+ andcc %i0, (VIS_BLOCKSIZE-1), %g0
+ bnz,pn %ncc, 1f
+ nop
+
+ ! ... and must be 256 bytes or more
+ cmp %i1, 256
+ blu,pn %ncc, 1f
+ nop
+
+ ! ... and length must be a multiple of VIS_BLOCKSIZE
+ andcc %i1, (VIS_BLOCKSIZE-1), %g0
+ bz,pn %ncc, 2f
+ nop
+
+1: ! punt, call bzero but notify the caller that bzero was used
+ mov %i0, %o0
+ call bzero
+ mov %i1, %o1
+ ret
+ restore %g0, 1, %o0 ! return (1) - did not use block operations
+
+2: rd %fprs, %l0 ! check for unused fp
+ btst FPRS_FEF, %l0
+ bz,pt %icc, 1f
+ nop
+
+ ! save in-use fpregs on stack
+ membar #Sync
+ add %fp, STACK_BIAS - 65, %l1
+ and %l1, -VIS_BLOCKSIZE, %l1
+ stda %d0, [%l1]ASI_BLK_P
+
+1: membar #StoreStore|#StoreLoad|#LoadStore
+ wr %g0, FPRS_FEF, %fprs
+ wr %g0, ASI_BLK_P, %asi
+
+ ! Clear block
+ fzero %d0
+ fzero %d2
+ fzero %d4
+ fzero %d6
+ fzero %d8
+ fzero %d10
+ fzero %d12
+ fzero %d14
+
+ mov 256, %i3
+ ba,pt %ncc, .pz_doblock
+ nop
+
+.pz_blkstart:
+ ! stda %d0, [%i0 + 192]%asi ! in dly slot of branch that got us here
+ stda %d0, [%i0 + 128]%asi
+ stda %d0, [%i0 + 64]%asi
+ stda %d0, [%i0]%asi
+.pz_zinst:
+ add %i0, %i3, %i0
+ sub %i1, %i3, %i1
+.pz_doblock:
+ cmp %i1, 256
+ bgeu,a %ncc, .pz_blkstart
+ stda %d0, [%i0 + 192]%asi
+
+ cmp %i1, 64
+ blu %ncc, .pz_finish
+
+ andn %i1, (64-1), %i3
+ srl %i3, 4, %i2 ! using blocks, 1 instr / 16 words
+ set .pz_zinst, %i4
+ sub %i4, %i2, %i4
+ jmp %i4
+ nop
+
+.pz_finish:
+ membar #Sync
+ btst FPRS_FEF, %l0
+ bz,a .pz_finished
+ wr %l0, 0, %fprs ! restore fprs
+
+ ! restore fpregs from stack
+ ldda [%l1]ASI_BLK_P, %d0
+ membar #Sync
+ wr %l0, 0, %fprs ! restore fprs
+
+.pz_finished:
+ ret
+ restore %g0, 0, %o0 ! return (bzero or not)
+
+ SET_SIZE(hwblkclr)
+#endif /* lint */
+
+#ifdef lint
+/*ARGSUSED*/
+void
+hw_pa_bcopy32(uint64_t src, uint64_t dst)
+{}
+#else /*!lint */
+ /*
+ * Copy 32 bytes of data from src (%o0) to dst (%o1)
+ * using physical addresses.
+ */
+ ENTRY_NP(hw_pa_bcopy32)
+ rdpr %pstate, %g1
+ andn %g1, PSTATE_IE, %g2
+ wrpr %g0, %g2, %pstate
+
+ rdpr %pstate, %g0
+ ldxa [%o0]ASI_MEM, %o2
+ add %o0, 8, %o0
+ ldxa [%o0]ASI_MEM, %o3
+ add %o0, 8, %o0
+ ldxa [%o0]ASI_MEM, %o4
+ add %o0, 8, %o0
+ ldxa [%o0]ASI_MEM, %o5
+ membar #Sync
+
+ stxa %o2, [%o1]ASI_MEM
+ add %o1, 8, %o1
+ stxa %o3, [%o1]ASI_MEM
+ add %o1, 8, %o1
+ stxa %o4, [%o1]ASI_MEM
+ add %o1, 8, %o1
+ stxa %o5, [%o1]ASI_MEM
+
+ retl
+ wrpr %g0, %g1, %pstate
+
+ SET_SIZE(hw_pa_bcopy32)
+
+#endif /* lint */
+
+#if defined(lint)
+
+int use_hw_bcopy = 1;
+int use_hw_bzero = 1;
+uint_t hw_copy_limit_1 = 0;
+uint_t hw_copy_limit_2 = 0;
+uint_t hw_copy_limit_4 = 0;
+uint_t hw_copy_limit_8 = 0;
+
+#else /* !lint */
+
+ DGDEF(use_hw_bcopy)
+ .word 1
+ DGDEF(use_hw_bzero)
+ .word 1
+ DGDEF(hw_copy_limit_1)
+ .word 0
+ DGDEF(hw_copy_limit_2)
+ .word 0
+ DGDEF(hw_copy_limit_4)
+ .word 0
+ DGDEF(hw_copy_limit_8)
+ .word 0
+
+ .align 64
+ .section ".text"
+#endif /* !lint */
diff --git a/usr/src/uts/sun4u/io/opl_cfg.c b/usr/src/uts/sun4u/io/opl_cfg.c
new file mode 100644
index 0000000000..51da7369c0
--- /dev/null
+++ b/usr/src/uts/sun4u/io/opl_cfg.c
@@ -0,0 +1,2559 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/conf.h>
+#include <sys/kmem.h>
+#include <sys/debug.h>
+#include <sys/modctl.h>
+#include <sys/autoconf.h>
+#include <sys/hwconf.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/sunndi.h>
+#include <sys/ndi_impldefs.h>
+#include <sys/machsystm.h>
+#include <sys/fcode.h>
+#include <sys/promif.h>
+#include <sys/promimpl.h>
+#include <sys/opl_cfg.h>
+#include <sys/scfd/scfostoescf.h>
+
+static unsigned int opl_cfg_inited;
+static opl_board_cfg_t opl_boards[HWD_SBS_PER_DOMAIN];
+
+/*
+ * Module control operations
+ */
+
+extern struct mod_ops mod_miscops;
+
+static struct modlmisc modlmisc = {
+ &mod_miscops, /* Type of module */
+ "OPL opl_cfg %I%"
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, (void *)&modlmisc, NULL
+};
+
+static int opl_map_in(dev_info_t *, fco_handle_t, fc_ci_t *);
+static int opl_map_out(dev_info_t *, fco_handle_t, fc_ci_t *);
+static int opl_register_fetch(dev_info_t *, fco_handle_t, fc_ci_t *);
+static int opl_register_store(dev_info_t *, fco_handle_t, fc_ci_t *);
+
+static int opl_claim_memory(dev_info_t *, fco_handle_t, fc_ci_t *);
+static int opl_release_memory(dev_info_t *, fco_handle_t, fc_ci_t *);
+static int opl_vtop(dev_info_t *, fco_handle_t, fc_ci_t *);
+
+static int opl_config_child(dev_info_t *, fco_handle_t, fc_ci_t *);
+
+static int opl_get_fcode_size(dev_info_t *, fco_handle_t, fc_ci_t *);
+static int opl_get_fcode(dev_info_t *, fco_handle_t, fc_ci_t *);
+
+static int opl_map_phys(dev_info_t *, struct regspec *, caddr_t *,
+ ddi_device_acc_attr_t *, ddi_acc_handle_t *);
+static void opl_unmap_phys(ddi_acc_handle_t *);
+static int opl_get_hwd_va(dev_info_t *, fco_handle_t, fc_ci_t *);
+
+extern int prom_get_fcode_size(char *);
+extern int prom_get_fcode(char *, char *);
+
+#define PROBE_STR_SIZE 64
+#define UNIT_ADDR_SIZE 64
+
+opl_fc_ops_t opl_fc_ops[] = {
+
+ { FC_MAP_IN, opl_map_in},
+ { FC_MAP_OUT, opl_map_out},
+ { "rx@", opl_register_fetch},
+ { FC_RL_FETCH, opl_register_fetch},
+ { FC_RW_FETCH, opl_register_fetch},
+ { FC_RB_FETCH, opl_register_fetch},
+ { "rx!", opl_register_store},
+ { FC_RL_STORE, opl_register_store},
+ { FC_RW_STORE, opl_register_store},
+ { FC_RB_STORE, opl_register_store},
+ { "claim-memory", opl_claim_memory},
+ { "release-memory", opl_release_memory},
+ { "vtop", opl_vtop},
+ { FC_CONFIG_CHILD, opl_config_child},
+ { FC_GET_FCODE_SIZE, opl_get_fcode_size},
+ { FC_GET_FCODE, opl_get_fcode},
+ { "get-hwd-va", opl_get_hwd_va},
+ { NULL, NULL}
+
+};
+
+extern caddr_t efcode_vaddr;
+extern int efcode_size;
+
+#ifdef DEBUG
+#define HWDDUMP_OFFSETS 1
+#define HWDDUMP_ALL_STATUS 2
+#define HWDDUMP_CHUNKS 3
+#define HWDDUMP_SBP 4
+
+int hwddump_flags = HWDDUMP_SBP | HWDDUMP_CHUNKS;
+#endif
+
+int
+_init()
+{
+ int err = 0;
+
+ /*
+ * Create a resource map for the contiguous memory allocated
+ * at start-of-day in startup.c
+ */
+ err = ndi_ra_map_setup(ddi_root_node(), "opl-fcodemem");
+ if (err == NDI_FAILURE) {
+ cmn_err(CE_WARN, "Cannot setup resource map opl-fcodemem\n");
+ return (1);
+ }
+
+ /*
+ * Put the allocated memory into the pool.
+ */
+ (void) ndi_ra_free(ddi_root_node(), (uint64_t)efcode_vaddr,
+ (uint64_t)efcode_size, "opl-fcodemem", 0);
+
+ if ((err = mod_install(&modlinkage)) != 0) {
+ cmn_err(CE_WARN, "opl_cfg failed to load, error=%d", err);
+ (void) ndi_ra_map_destroy(ddi_root_node(), "opl-fcodemem");
+ }
+
+ return (err);
+}
+
+int
+_fini(void)
+{
+ int ret;
+
+ ret = (mod_remove(&modlinkage));
+ if (ret != 0)
+ return (ret);
+
+ (void) ndi_ra_map_destroy(ddi_root_node(), "opl-fcodemem");
+
+ return (ret);
+}
+
+int
+_info(modinfop)
+struct modinfo *modinfop;
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+#ifdef DEBUG
+static void
+opl_dump_hwd(opl_probe_t *probe)
+{
+ hwd_header_t *hdrp;
+ hwd_sb_status_t *statp;
+ hwd_domain_info_t *dinfop;
+ hwd_sb_t *sbp;
+ hwd_cpu_chip_t *chips;
+ hwd_pci_ch_t *channels;
+ int board, i, status;
+
+ board = probe->pr_board;
+
+ hdrp = probe->pr_hdr;
+ statp = probe->pr_sb_status;
+ dinfop = probe->pr_dinfo;
+ sbp = probe->pr_sb;
+
+ printf("HWD: board %d\n", board);
+ printf("HWD:magic = 0x%x\n", hdrp->hdr_magic);
+ printf("HWD:version = 0x%x.%x\n", hdrp->hdr_version.major,
+ hdrp->hdr_version.minor);
+
+ if (hwddump_flags & HWDDUMP_OFFSETS) {
+ printf("HWD:status offset = 0x%x\n",
+ hdrp->hdr_sb_status_offset);
+ printf("HWD:domain offset = 0x%x\n",
+ hdrp->hdr_domain_info_offset);
+ printf("HWD:board offset = 0x%x\n", hdrp->hdr_sb_info_offset);
+ }
+
+ if (hwddump_flags & HWDDUMP_SBP)
+ printf("HWD:sb_t ptr = 0x%p\n", (void *)probe->pr_sb);
+
+ if (hwddump_flags & HWDDUMP_ALL_STATUS) {
+ int bd;
+ printf("HWD:board status =");
+ for (bd = 0; bd < HWD_SBS_PER_DOMAIN; bd++)
+ printf("%x ", statp->sb_status[bd]);
+ printf("\n");
+ } else {
+ printf("HWD:board status = %d\n", statp->sb_status[board]);
+ }
+
+ printf("HWD:banner name = %s\n", dinfop->dinf_banner_name);
+ printf("HWD:platform = %s\n", dinfop->dinf_platform_token);
+
+ printf("HWD:chip status:\n");
+ chips = &sbp->sb_cmu.cmu_cpu_chips[0];
+ for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) {
+
+ status = chips[i].chip_status;
+ printf("chip[%d] = ", i);
+ if (HWD_STATUS_NONE(status))
+ printf("none");
+ else if (HWD_STATUS_FAILED(status))
+ printf("fail");
+ else if (HWD_STATUS_OK(status))
+ printf("ok");
+ printf("\n");
+ }
+
+ if (hwddump_flags & HWDDUMP_CHUNKS) {
+ int chunk;
+ hwd_memory_t *mem = &sbp->sb_cmu.cmu_memory;
+ printf("HWD:chunks:\n");
+ for (chunk = 0; chunk < HWD_MAX_MEM_CHUNKS; chunk++)
+ printf("\t%d 0x%lx 0x%lx\n", chunk,
+ mem->mem_chunks[chunk].chnk_start_address,
+ mem->mem_chunks[chunk].chnk_size);
+ }
+
+ printf("HWD:channel status:\n");
+ channels = &sbp->sb_pci_ch[0];
+ for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) {
+
+ status = channels[i].pci_status;
+ printf("channels[%d] = ", i);
+ if (HWD_STATUS_NONE(status))
+ printf("none");
+ else if (HWD_STATUS_FAILED(status))
+ printf("fail");
+ else if (HWD_STATUS_OK(status))
+ printf("ok");
+ printf("\n");
+ }
+ printf("channels[%d] = ", i);
+ status = sbp->sb_cmu.cmu_ch.chan_status;
+ if (HWD_STATUS_NONE(status))
+ printf("none");
+ else if (HWD_STATUS_FAILED(status))
+ printf("fail");
+ else if (HWD_STATUS_OK(status))
+ printf("ok");
+ printf("\n");
+}
+#endif /* DEBUG */
+
+#ifdef UCTEST
+ /*
+ * For SesamI debugging, just map the SRAM directly to a kernel
+ * VA and read it out from there
+ */
+
+#include <sys/vmem.h>
+#include <vm/seg_kmem.h>
+
+/*
+ * 0x4081F1323000LL is the HWD base address for LSB 0. But we need to map
+ * at page boundaries. So, we use a base address of 0x4081F1322000LL.
+ * Note that this has to match the HWD base pa set in .sesami-common-defs.
+ *
+ * The size specified for the HWD in the SCF spec is 36K. But since
+ * we adjusted the base address by 4K, we need to use 40K for the
+ * mapping size to cover the HWD. And 40K is also a multiple of the
+ * base page size.
+ */
+#define OPL_HWD_BASE(lsb) \
+(0x4081F1322000LL | (((uint64_t)(lsb)) << 40))
+
+ void *opl_hwd_vaddr;
+#endif /* UCTEST */
+
+/*
+ * Get the hardware descriptor from SCF.
+ */
+
+/*ARGSUSED*/
+int
+opl_read_hwd(int board, hwd_header_t **hdrp, hwd_sb_status_t **statp,
+ hwd_domain_info_t **dinfop, hwd_sb_t **sbp)
+{
+ static int (*getinfop)(uint32_t, uint8_t, uint32_t, uint32_t *,
+ void *) = NULL;
+ void *hwdp;
+
+ uint32_t key = KEY_ESCF; /* required value */
+ uint8_t type = 0x40; /* SUB_OS_RECEIVE_HWD */
+ uint32_t transid = board;
+ uint32_t datasize = HWD_DATA_SIZE;
+
+ hwd_header_t *hd;
+ hwd_sb_status_t *st;
+ hwd_domain_info_t *di;
+ hwd_sb_t *sb;
+
+ int ret;
+
+ if (opl_boards[board].cfg_hwd == NULL) {
+#ifdef UCTEST
+ /*
+ * Just map the HWD in SRAM to a kernel VA
+ */
+
+ size_t size;
+ pfn_t pfn;
+
+ size = 0xA000;
+
+ opl_hwd_vaddr = vmem_alloc(heap_arena, size, VM_SLEEP);
+ if (opl_hwd_vaddr == NULL) {
+ cmn_err(CE_NOTE, "No space for HWD");
+ return (-1);
+ }
+
+ pfn = btop(OPL_HWD_BASE(board));
+ hat_devload(kas.a_hat, opl_hwd_vaddr, size, pfn, PROT_READ,
+ HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
+
+ hwdp = (void *)((char *)opl_hwd_vaddr + 0x1000);
+ opl_boards[board].cfg_hwd = hwdp;
+ ret = 0;
+#else
+
+ /* find the scf_service_getinfo() function */
+ if (getinfop == NULL)
+ getinfop = (int (*)(uint32_t, uint8_t, uint32_t,
+ uint32_t *,
+ void *))modgetsymvalue("scf_service_getinfo", 0);
+
+ if (getinfop == NULL)
+ return (-1);
+
+ /* allocate memory to receive the data */
+ hwdp = kmem_alloc(HWD_DATA_SIZE, KM_SLEEP);
+
+ /* get the HWD */
+ ret = (*getinfop)(key, type, transid, &datasize, hwdp);
+ if (ret == 0)
+ opl_boards[board].cfg_hwd = hwdp;
+ else
+ kmem_free(hwdp, HWD_DATA_SIZE);
+#endif
+ } else {
+ hwdp = opl_boards[board].cfg_hwd;
+ ret = 0;
+ }
+
+ /* copy the data to the destination */
+ if (ret == 0) {
+ hd = (hwd_header_t *)hwdp;
+ st = (hwd_sb_status_t *)
+ ((char *)hwdp + hd->hdr_sb_status_offset);
+ di = (hwd_domain_info_t *)
+ ((char *)hwdp + hd->hdr_domain_info_offset);
+ sb = (hwd_sb_t *)
+ ((char *)hwdp + hd->hdr_sb_info_offset);
+ if (hdrp != NULL)
+ *hdrp = hd;
+ if (statp != NULL)
+ *statp = st;
+ if (dinfop != NULL)
+ *dinfop = di;
+ if (sbp != NULL)
+ *sbp = sb;
+ }
+
+ return (ret);
+}
+
+/*
+ * The opl_probe_t probe structure is used to pass all sorts of parameters
+ * to callback functions during probing. It also contains a snapshot of
+ * the hardware descriptor that is taken at the beginning of a probe.
+ */
+static int
+opl_probe_init(opl_probe_t *probe)
+{
+ hwd_header_t **hdrp;
+ hwd_sb_status_t **statp;
+ hwd_domain_info_t **dinfop;
+ hwd_sb_t **sbp;
+ int board, ret;
+
+ board = probe->pr_board;
+
+ hdrp = &probe->pr_hdr;
+ statp = &probe->pr_sb_status;
+ dinfop = &probe->pr_dinfo;
+ sbp = &probe->pr_sb;
+
+ /*
+ * Read the hardware descriptor.
+ */
+ ret = opl_read_hwd(board, hdrp, statp, dinfop, sbp);
+ if (ret != 0) {
+
+ cmn_err(CE_WARN, "IKP: failed to read HWD header");
+ return (-1);
+ }
+
+#ifdef DEBUG
+ opl_dump_hwd(probe);
+#endif
+ return (0);
+}
+
+/*
+ * This function is used to obtain pointers to relevant device nodes
+ * which are created by Solaris at boot time.
+ *
+ * This function walks the child nodes of a given node, extracts
+ * the "name" property, if it exists, and passes the node to a
+ * callback init function. The callback determines if this node is
+ * interesting or not. If it is, then a pointer to the node is
+ * stored away by the callback for use during unprobe.
+ *
+ * The DDI get property function allocates storage for the name
+ * property. That needs to be freed within this function.
+ */
+static int
+opl_init_nodes(dev_info_t *parent, opl_init_func_t init)
+{
+ dev_info_t *node;
+ char *name;
+ int circ, ret;
+ int len;
+
+ ASSERT(parent != NULL);
+
+ /*
+ * Hold parent node busy to walk its child list
+ */
+ ndi_devi_enter(parent, &circ);
+ node = ddi_get_child(parent);
+
+ while (node != NULL) {
+
+ ret = OPL_GET_PROP(string, node, "name", &name, &len);
+ if (ret != DDI_PROP_SUCCESS) {
+ /*
+ * The property does not exist for this node.
+ */
+ node = ddi_get_next_sibling(node);
+ continue;
+ }
+
+ ret = init(node, name, len);
+ kmem_free(name, len);
+ if (ret != 0) {
+
+ ndi_devi_exit(parent, circ);
+ return (-1);
+ }
+
+ node = ddi_get_next_sibling(node);
+ }
+
+ ndi_devi_exit(parent, circ);
+
+ return (0);
+}
+
+/*
+ * This init function finds all the interesting nodes under the
+ * root node and stores pointers to them. The following nodes
+ * are considered interesting by this implementation:
+ *
+ * "cmp"
+ * These are nodes that represent processor chips.
+ *
+ * "pci"
+ * These are nodes that represent PCI leaves.
+ *
+ * "pseudo-mc"
+ * These are nodes that contain memory information.
+ */
+static int
+opl_init_root_nodes(dev_info_t *node, char *name, int len)
+{
+ int portid, board, chip, channel, leaf;
+ int ret;
+
+ if (strncmp(name, OPL_CPU_CHIP_NODE, len) == 0) {
+
+ ret = OPL_GET_PROP(int, node, "portid", &portid, -1);
+ if (ret != DDI_PROP_SUCCESS)
+ return (-1);
+
+ ret = OPL_GET_PROP(int, node, "board#", &board, -1);
+ if (ret != DDI_PROP_SUCCESS)
+ return (-1);
+
+ chip = OPL_CPU_CHIP(portid);
+ opl_boards[board].cfg_cpu_chips[chip] = node;
+
+ } else if (strncmp(name, OPL_PCI_LEAF_NODE, len) == 0) {
+
+ ret = OPL_GET_PROP(int, node, "portid", &portid, -1);
+ if (ret != DDI_PROP_SUCCESS)
+ return (-1);
+
+ board = OPL_IO_PORTID_TO_LSB(portid);
+ channel = OPL_PORTID_TO_CHANNEL(portid);
+
+ if (channel == OPL_CMU_CHANNEL) {
+
+ opl_boards[board].cfg_cmuch_leaf = node;
+
+ } else {
+
+ leaf = OPL_PORTID_TO_LEAF(portid);
+ opl_boards[board].cfg_pcich_leaf[channel][leaf] = node;
+ }
+ } else if (strncmp(name, OPL_PSEUDO_MC_NODE, len) == 0) {
+
+ ret = OPL_GET_PROP(int, node, "board#", &board, -1);
+ if (ret != DDI_PROP_SUCCESS)
+ return (-1);
+
+ ASSERT((board >= 0) && (board < HWD_SBS_PER_DOMAIN));
+
+ opl_boards[board].cfg_pseudo_mc = node;
+ }
+
+ return (0);
+}
+
+/*
+ * This function initializes the OPL IKP feature. Currently, all it does
+ * is find the interesting nodes that Solaris has created at boot time
+ * for boards present at boot time and store pointers to them. This
+ * is useful if those boards are unprobed by DR.
+ */
+int
+opl_init_cfg()
+{
+ dev_info_t *root;
+
+ if (opl_cfg_inited == 0) {
+
+ root = ddi_root_node();
+ if ((opl_init_nodes(root, opl_init_root_nodes) != 0)) {
+ cmn_err(CE_WARN, "IKP: init failed");
+ return (1);
+ }
+
+ opl_cfg_inited = 1;
+ }
+
+ return (0);
+}
+
+/*
+ * When DR is initialized, we walk the device tree and acquire a hold on
+ * all the nodes that are interesting to IKP. This is so that the corresponding
+ * branches cannot be deleted.
+ *
+ * The following function informs the walk about which nodes are interesting
+ * so that it can hold the corresponding branches.
+ */
+static int
+opl_hold_node(char *name)
+{
+ /*
+ * We only need to hold/release the following nodes which
+ * represent separate branches that must be managed.
+ */
+ return ((strcmp(name, OPL_CPU_CHIP_NODE) == 0) ||
+ (strcmp(name, OPL_PSEUDO_MC_NODE) == 0) ||
+ (strcmp(name, OPL_PCI_LEAF_NODE) == 0));
+}
+
+static int
+opl_hold_rele_devtree(dev_info_t *rdip, void *arg)
+{
+
+ int *holdp = (int *)arg;
+ char *name = ddi_node_name(rdip);
+
+ /*
+ * We only need to hold/release the following nodes which
+ * represent separate branches that must be managed.
+ */
+ if (opl_hold_node(name) == 0) {
+ /* Not of interest to us */
+ return (DDI_WALK_PRUNECHILD);
+ }
+ if (*holdp) {
+ ASSERT(!e_ddi_branch_held(rdip));
+ e_ddi_branch_hold(rdip);
+ } else {
+ ASSERT(e_ddi_branch_held(rdip));
+ e_ddi_branch_rele(rdip);
+ }
+
+ return (DDI_WALK_PRUNECHILD);
+}
+
+void
+opl_hold_devtree()
+{
+ dev_info_t *dip;
+ int circ;
+ int hold = 1;
+
+ dip = ddi_root_node();
+ ndi_devi_enter(dip, &circ);
+ ddi_walk_devs(ddi_get_child(dip), opl_hold_rele_devtree, &hold);
+ ndi_devi_exit(dip, circ);
+}
+
+void
+opl_release_devtree()
+{
+ dev_info_t *dip;
+ int circ;
+ int hold = 0;
+
+ dip = ddi_root_node();
+ ndi_devi_enter(dip, &circ);
+ ddi_walk_devs(ddi_get_child(dip), opl_hold_rele_devtree, &hold);
+ ndi_devi_exit(dip, circ);
+}
+
+/*
+ * This is a helper function that allows opl_create_node() to return a
+ * pointer to a newly created node to its caller.
+ */
+/*ARGSUSED*/
+static void
+opl_set_node(dev_info_t *node, void *arg, uint_t flags)
+{
+ opl_probe_t *probe;
+
+ probe = arg;
+ probe->pr_node = node;
+}
+
+/*
+ * Function to create a node in the device tree under a specified parent.
+ *
+ * e_ddi_branch_create() allows the creation of a whole branch with a
+ * single call of the function. However, we only use it to create one node
+ * at a time in the case of non-I/O device nodes. In other words, we
+ * create branches by repeatedly using this function. This makes the
+ * code more readable.
+ *
+ * The branch descriptor passed to e_ddi_branch_create() takes two
+ * callbacks. The create() callback is used to set the properties of a
+ * newly created node. The other callback is used to return a pointer
+ * to the newly created node. The create() callback is passed by the
+ * caller of this function based on the kind of node he wishes to
+ * create.
+ *
+ * e_ddi_branch_create() returns with the newly created node held. We
+ * only need to hold the top nodes of the branches we create. We release
+ * the hold for the others. E.g., the "cmp" node needs to be held. Since
+ * we hold the "cmp" node, there is no need to hold the "core" and "cpu"
+ * nodes below it.
+ */
+static dev_info_t *
+opl_create_node(opl_probe_t *probe)
+{
+ devi_branch_t branch;
+
+ probe->pr_node = NULL;
+
+ branch.arg = probe;
+ branch.type = DEVI_BRANCH_SID;
+ branch.create.sid_branch_create = probe->pr_create;
+ branch.devi_branch_callback = opl_set_node;
+
+ if (e_ddi_branch_create(probe->pr_parent, &branch, NULL, 0) != 0)
+ return (NULL);
+
+ ASSERT(probe->pr_node != NULL);
+
+ if (probe->pr_hold == 0)
+ e_ddi_branch_rele(probe->pr_node);
+
+ return (probe->pr_node);
+}
+
+/*
+ * Function to tear down a whole branch rooted at the specified node.
+ *
+ * Although we create each node of a branch individually, we destroy
+ * a whole branch in one call. This is more efficient.
+ */
+static int
+opl_destroy_node(dev_info_t *node)
+{
+ if (e_ddi_branch_destroy(node, NULL, 0) != 0)
+ return (-1);
+
+ return (0);
+}
+
+/*
+ * Set the properties for a "cpu" node.
+ */
+/*ARGSUSED*/
+static int
+opl_create_cpu(dev_info_t *node, void *arg, uint_t flags)
+{
+ opl_probe_t *probe;
+ hwd_cpu_chip_t *chip;
+ hwd_core_t *core;
+ hwd_cpu_t *cpu;
+ int ret;
+
+ probe = arg;
+ chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
+ core = &chip->chip_cores[probe->pr_core];
+ cpu = &core->core_cpus[probe->pr_cpu];
+ OPL_UPDATE_PROP(string, node, "name", OPL_CPU_NODE);
+ OPL_UPDATE_PROP(string, node, "device_type", OPL_CPU_NODE);
+
+ OPL_UPDATE_PROP(int, node, "cpuid", cpu->cpu_cpuid);
+ OPL_UPDATE_PROP(int, node, "reg", probe->pr_cpu);
+
+ OPL_UPDATE_PROP(string, node, "status", "okay");
+
+ return (DDI_WALK_TERMINATE);
+}
+
+/*
+ * Create "cpu" nodes as child nodes of a given "core" node.
+ */
+static int
+opl_probe_cpus(opl_probe_t *probe)
+{
+ int i;
+ hwd_cpu_chip_t *chip;
+ hwd_core_t *core;
+ hwd_cpu_t *cpus;
+
+ chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
+ core = &chip->chip_cores[probe->pr_core];
+ cpus = &core->core_cpus[0];
+
+ for (i = 0; i < HWD_CPUS_PER_CORE; i++) {
+
+ /*
+ * Olympus-C has 2 cpus per core.
+ * Jupiter has 4 cpus per core.
+ * For the Olympus-C based platform, we expect the cpu_status
+ * of the non-existent cpus to be set to missing.
+ */
+ if (!HWD_STATUS_OK(cpus[i].cpu_status))
+ continue;
+
+ probe->pr_create = opl_create_cpu;
+ probe->pr_cpu = i;
+ if (opl_create_node(probe) == NULL) {
+
+ cmn_err(CE_WARN, "IKP: create cpu (%d-%d-%d-%d) failed",
+ probe->pr_board, probe->pr_cpu_chip,
+ probe->pr_core, probe->pr_cpu);
+ return (-1);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Set the properties for a "core" node.
+ */
+/*ARGSUSED*/
+static int
+opl_create_core(dev_info_t *node, void *arg, uint_t flags)
+{
+ opl_probe_t *probe;
+ hwd_cpu_chip_t *chip;
+ hwd_core_t *core;
+ int sharing[2];
+ int ret;
+
+ probe = arg;
+ chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
+ core = &chip->chip_cores[probe->pr_core];
+
+ OPL_UPDATE_PROP(string, node, "name", OPL_CORE_NODE);
+ OPL_UPDATE_PROP(string, node, "device_type", OPL_CORE_NODE);
+ OPL_UPDATE_PROP(string, node, "compatible", chip->chip_compatible);
+
+ OPL_UPDATE_PROP(int, node, "reg", probe->pr_core);
+ OPL_UPDATE_PROP(int, node, "manufacturer#", core->core_manufacturer);
+ OPL_UPDATE_PROP(int, node, "implementation#",
+ core->core_implementation);
+ OPL_UPDATE_PROP(int, node, "mask#", core->core_mask);
+
+ OPL_UPDATE_PROP(int, node, "sparc-version", core->core_version);
+ OPL_UPDATE_PROP(int, node, "clock-frequency", core->core_frequency);
+
+ OPL_UPDATE_PROP(int, node, "l1-icache-size", core->core_l1_icache_size);
+ OPL_UPDATE_PROP(int, node, "l1-icache-line-size",
+ core->core_l1_icache_line_size);
+ OPL_UPDATE_PROP(int, node, "l1-icache-associativity",
+ core->core_l1_icache_associativity);
+ OPL_UPDATE_PROP(int, node, "#itlb-entries",
+ core->core_num_itlb_entries);
+
+ OPL_UPDATE_PROP(int, node, "l1-dcache-size", core->core_l1_dcache_size);
+ OPL_UPDATE_PROP(int, node, "l1-dcache-line-size",
+ core->core_l1_dcache_line_size);
+ OPL_UPDATE_PROP(int, node, "l1-dcache-associativity",
+ core->core_l1_dcache_associativity);
+ OPL_UPDATE_PROP(int, node, "#dtlb-entries",
+ core->core_num_dtlb_entries);
+
+ OPL_UPDATE_PROP(int, node, "l2-cache-size", core->core_l2_cache_size);
+ OPL_UPDATE_PROP(int, node, "l2-cache-line-size",
+ core->core_l2_cache_line_size);
+ OPL_UPDATE_PROP(int, node, "l2-cache-associativity",
+ core->core_l2_cache_associativity);
+ sharing[0] = 0;
+ sharing[1] = core->core_l2_cache_sharing;
+ OPL_UPDATE_PROP_ARRAY(int, node, "l2-cache-sharing", sharing, 2);
+
+ OPL_UPDATE_PROP(string, node, "status", "okay");
+
+ return (DDI_WALK_TERMINATE);
+}
+
+/*
+ * Create "core" nodes as child nodes of a given "cmp" node.
+ *
+ * Create the branch below each "core" node".
+ */
+static int
+opl_probe_cores(opl_probe_t *probe)
+{
+ int i;
+ hwd_cpu_chip_t *chip;
+ hwd_core_t *cores;
+ dev_info_t *parent, *node;
+
+ chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
+ cores = &chip->chip_cores[0];
+ parent = probe->pr_parent;
+
+ for (i = 0; i < HWD_CORES_PER_CPU_CHIP; i++) {
+
+ if (!HWD_STATUS_OK(cores[i].core_status))
+ continue;
+
+ probe->pr_parent = parent;
+ probe->pr_create = opl_create_core;
+ probe->pr_core = i;
+ node = opl_create_node(probe);
+ if (node == NULL) {
+
+ cmn_err(CE_WARN, "IKP: create core (%d-%d-%d) failed",
+ probe->pr_board, probe->pr_cpu_chip,
+ probe->pr_core);
+ return (-1);
+ }
+
+ /*
+ * Create "cpu" nodes below "core".
+ */
+ probe->pr_parent = node;
+ if (opl_probe_cpus(probe) != 0)
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * Set the properties for a "cmp" node.
+ */
+/*ARGSUSED*/
+static int
+opl_create_cpu_chip(dev_info_t *node, void *arg, uint_t flags)
+{
+ opl_probe_t *probe;
+ hwd_cpu_chip_t *chip;
+ opl_range_t range;
+ uint64_t dummy_addr;
+ int ret;
+
+ probe = arg;
+ chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
+
+ OPL_UPDATE_PROP(string, node, "name", OPL_CPU_CHIP_NODE);
+
+ OPL_UPDATE_PROP(int, node, "portid", chip->chip_portid);
+ OPL_UPDATE_PROP(int, node, "board#", probe->pr_board);
+
+ dummy_addr = OPL_PROC_AS(probe->pr_board, probe->pr_cpu_chip);
+ range.rg_addr_hi = OPL_HI(dummy_addr);
+ range.rg_addr_lo = OPL_LO(dummy_addr);
+ range.rg_size_hi = 0;
+ range.rg_size_lo = 0;
+ OPL_UPDATE_PROP_ARRAY(int, node, "reg", (int *)&range, 4);
+
+ OPL_UPDATE_PROP(int, node, "#address-cells", 1);
+ OPL_UPDATE_PROP(int, node, "#size-cells", 0);
+
+ OPL_UPDATE_PROP(string, node, "status", "okay");
+
+ return (DDI_WALK_TERMINATE);
+}
+
+/*
+ * Create "cmp" nodes as child nodes of the root node.
+ *
+ * Create the branch below each "cmp" node.
+ */
+static int
+opl_probe_cpu_chips(opl_probe_t *probe)
+{
+ int i;
+ dev_info_t **cfg_cpu_chips;
+ hwd_cpu_chip_t *chips;
+ dev_info_t *node;
+
+ cfg_cpu_chips = opl_boards[probe->pr_board].cfg_cpu_chips;
+ chips = &probe->pr_sb->sb_cmu.cmu_cpu_chips[0];
+
+ for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) {
+
+ ASSERT(cfg_cpu_chips[i] == NULL);
+
+ if (!HWD_STATUS_OK(chips[i].chip_status))
+ continue;
+
+ probe->pr_parent = ddi_root_node();
+ probe->pr_create = opl_create_cpu_chip;
+ probe->pr_cpu_chip = i;
+ probe->pr_hold = 1;
+ node = opl_create_node(probe);
+ if (node == NULL) {
+
+ cmn_err(CE_WARN, "IKP: create chip (%d-%d) failed",
+ probe->pr_board, probe->pr_cpu_chip);
+ return (-1);
+ }
+
+ cfg_cpu_chips[i] = node;
+
+ /*
+ * Create "core" nodes below "cmp".
+ * We hold the "cmp" node. So, there is no need to hold
+ * the "core" and "cpu" nodes below it.
+ */
+ probe->pr_parent = node;
+ probe->pr_hold = 0;
+ if (opl_probe_cores(probe) != 0)
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * Set the properties for a "pseudo-mc" node.
+ */
+/*ARGSUSED*/
+static int
+opl_create_pseudo_mc(dev_info_t *node, void *arg, uint_t flags)
+{
+ opl_probe_t *probe;
+ int board, portid;
+ hwd_bank_t *bank;
+ hwd_memory_t *mem;
+ opl_range_t range;
+ opl_mc_addr_t mc[HWD_BANKS_PER_CMU];
+ int status[2][7];
+ int i, j;
+ int ret;
+
+ probe = arg;
+ board = probe->pr_board;
+
+ OPL_UPDATE_PROP(string, node, "name", OPL_PSEUDO_MC_NODE);
+ OPL_UPDATE_PROP(string, node, "device_type", "memory-controller");
+ OPL_UPDATE_PROP(string, node, "compatible", "FJSV,oplmc");
+
+ portid = OPL_LSB_TO_PSEUDOMC_PORTID(board);
+ OPL_UPDATE_PROP(int, node, "portid", portid);
+
+ range.rg_addr_hi = OPL_HI(OPL_MC_AS(board));
+ range.rg_addr_lo = 0x200;
+ range.rg_size_hi = 0;
+ range.rg_size_lo = 0;
+ OPL_UPDATE_PROP_ARRAY(int, node, "reg", (int *)&range, 4);
+
+ OPL_UPDATE_PROP(int, node, "board#", board);
+ OPL_UPDATE_PROP(int, node, "physical-board#",
+ probe->pr_sb->sb_psb_number);
+
+ OPL_UPDATE_PROP(int, node, "#address-cells", 1);
+ OPL_UPDATE_PROP(int, node, "#size-cells", 2);
+
+ mem = &probe->pr_sb->sb_cmu.cmu_memory;
+
+ range.rg_addr_hi = OPL_HI(mem->mem_start_address);
+ range.rg_addr_lo = OPL_LO(mem->mem_start_address);
+ range.rg_size_hi = OPL_HI(mem->mem_size);
+ range.rg_size_lo = OPL_LO(mem->mem_size);
+ OPL_UPDATE_PROP_ARRAY(int, node, "sb-mem-ranges", (int *)&range, 4);
+
+ bank = probe->pr_sb->sb_cmu.cmu_memory.mem_banks;
+ for (i = 0, j = 0; i < HWD_BANKS_PER_CMU; i++) {
+
+ if (!HWD_STATUS_OK(bank[i].bank_status))
+ continue;
+
+ mc[j].mc_bank = i;
+ mc[j].mc_hi = OPL_HI(bank[i].bank_register_address);
+ mc[j].mc_lo = OPL_LO(bank[i].bank_register_address);
+ j++;
+ }
+ ASSERT(j > 0);
+ OPL_UPDATE_PROP_ARRAY(int, node, "mc-addr", (int *)mc, j*3);
+
+ OPL_UPDATE_PROP_ARRAY(byte, node, "cs0-mc-pa-trans-table",
+ mem->mem_cs[0].cs_pa_mac_table, 64);
+ OPL_UPDATE_PROP_ARRAY(byte, node, "cs1-mc-pa-trans-table",
+ mem->mem_cs[1].cs_pa_mac_table, 64);
+
+#define CS_PER_MEM 2
+
+ for (i = 0, j = 0; i < CS_PER_MEM; i++) {
+ if (HWD_STATUS_OK(mem->mem_cs[i].cs_status) ||
+ HWD_STATUS_FAILED(mem->mem_cs[i].cs_status)) {
+ status[j][0] = i;
+ if (HWD_STATUS_OK(mem->mem_cs[i].cs_status))
+ status[j][1] = 0;
+ else
+ status[j][1] = 1;
+ status[j][2] =
+ OPL_HI(mem->mem_cs[i].cs_available_capacity);
+ status[j][3] =
+ OPL_LO(mem->mem_cs[i].cs_available_capacity);
+ status[j][4] = OPL_HI(mem->mem_cs[i].cs_dimm_capacity);
+ status[j][5] = OPL_LO(mem->mem_cs[i].cs_dimm_capacity);
+ status[j][6] = mem->mem_cs[i].cs_number_of_dimms;
+ j++;
+ }
+ }
+ ASSERT(j > 0);
+ OPL_UPDATE_PROP_ARRAY(int, node, "cs-status", (int *)status,
+ j*7);
+
+ return (DDI_WALK_TERMINATE);
+}
+
+/*
+ * Create "pseudo-mc" nodes
+ */
+static int
+opl_probe_memory(opl_probe_t *probe)
+{
+ int board;
+ opl_board_cfg_t *board_cfg;
+ dev_info_t *node;
+
+ board = probe->pr_board;
+ board_cfg = &opl_boards[board];
+
+ ASSERT(board_cfg->cfg_pseudo_mc == NULL);
+
+ probe->pr_parent = ddi_root_node();
+ probe->pr_create = opl_create_pseudo_mc;
+ probe->pr_hold = 1;
+ node = opl_create_node(probe);
+ if (node == NULL) {
+
+ cmn_err(CE_WARN, "IKP: create pseudo-mc (%d) failed", board);
+ return (-1);
+ }
+
+ board_cfg->cfg_pseudo_mc = node;
+
+ return (0);
+}
+
+/*
+ * Allocate the fcode ops handle.
+ */
+/*ARGSUSED*/
+static
+fco_handle_t
+opl_fc_ops_alloc_handle(dev_info_t *parent, dev_info_t *child,
+ void *fcode, size_t fcode_size, char *unit_address,
+ char *my_args)
+{
+ fco_handle_t rp;
+ phandle_t h;
+ char *buf;
+
+ rp = kmem_zalloc(sizeof (struct fc_resource_list), KM_SLEEP);
+ rp->next_handle = fc_ops_alloc_handle(parent, child, fcode, fcode_size,
+ unit_address, NULL);
+ rp->ap = parent;
+ rp->child = child;
+ rp->fcode = fcode;
+ rp->fcode_size = fcode_size;
+ rp->my_args = my_args;
+
+ if (unit_address) {
+ buf = kmem_zalloc(UNIT_ADDR_SIZE, KM_SLEEP);
+ (void) strcpy(buf, unit_address);
+ rp->unit_address = buf;
+ }
+
+ /*
+ * Add the child's nodeid to our table...
+ */
+ h = ddi_get_nodeid(rp->child);
+ fc_add_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child, h);
+
+ return (rp);
+}
+
+
+static void
+opl_fc_ops_free_handle(fco_handle_t rp)
+{
+ struct fc_resource *resp, *nresp;
+
+ ASSERT(rp);
+
+ if (rp->next_handle)
+ fc_ops_free_handle(rp->next_handle);
+ if (rp->unit_address)
+ kmem_free(rp->unit_address, UNIT_ADDR_SIZE);
+
+ /*
+ * Release all the resources from the resource list
+ */
+ for (resp = rp->head; resp != NULL; resp = nresp) {
+ nresp = resp->next;
+ switch (resp->type) {
+
+ case RT_MAP:
+ break;
+
+ case RT_DMA:
+ /*
+ * DMA has to be freed up at exit time.
+ */
+ cmn_err(CE_CONT,
+ "opl_fc_ops_free_handle: Unexpected DMA seen!");
+ break;
+
+ case RT_CONTIGIOUS:
+ FC_DEBUG2(1, CE_CONT, "opl_fc_ops_free: "
+ "Free claim-memory resource 0x%lx size 0x%x\n",
+ resp->fc_contig_virt, resp->fc_contig_len);
+
+ (void) ndi_ra_free(ddi_root_node(),
+ (uint64_t)resp->fc_contig_virt,
+ resp->fc_contig_len, "opl-fcodemem",
+ NDI_RA_PASS);
+
+ break;
+
+ default:
+ cmn_err(CE_CONT, "opl_fc_ops_free: "
+ "unknown resource type %d", resp->type);
+ break;
+ }
+ fc_rem_resource(rp, resp);
+ kmem_free(resp, sizeof (struct fc_resource));
+ }
+
+ kmem_free(rp, sizeof (struct fc_resource_list));
+}
+
+int
+opl_fc_do_op(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
+{
+ opl_fc_ops_t *op;
+ char *service = fc_cell2ptr(cp->svc_name);
+
+ ASSERT(rp);
+
+ FC_DEBUG1(1, CE_CONT, "opl_fc_do_op: <%s>\n", service);
+
+ /*
+ * First try the generic fc_ops.
+ */
+ if (fc_ops(ap, rp->next_handle, cp) == 0)
+ return (0);
+
+ /*
+ * Now try the Jupiter-specific ops.
+ */
+ for (op = opl_fc_ops; op->fc_service != NULL; ++op)
+ if (strcmp(op->fc_service, service) == 0)
+ return (op->fc_op(ap, rp, cp));
+
+ FC_DEBUG1(9, CE_CONT, "opl_fc_do_op: <%s> not serviced\n", service);
+
+ return (-1);
+}
+
+/*
+ * map-in (phys.lo phys.hi size -- virt)
+ */
+static int
+opl_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
+{
+ size_t len;
+ int error;
+ caddr_t virt;
+ struct fc_resource *resp;
+ struct regspec rspec;
+ ddi_device_acc_attr_t acc;
+ ddi_acc_handle_t h;
+
+ if (fc_cell2int(cp->nargs) != 3)
+ return (fc_syntax_error(cp, "nargs must be 3"));
+
+ if (fc_cell2int(cp->nresults) < 1)
+ return (fc_syntax_error(cp, "nresults must be >= 1"));
+
+ rspec.regspec_size = len = fc_cell2size(fc_arg(cp, 0));
+ rspec.regspec_bustype = fc_cell2uint(fc_arg(cp, 1));
+ rspec.regspec_addr = fc_cell2uint(fc_arg(cp, 2));
+
+ acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
+ acc.devacc_attr_endian_flags = DDI_STRUCTURE_BE_ACC;
+ acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
+
+ FC_DEBUG3(1, CE_CONT, "opl_map_in: attempting map in "
+ "address 0x%08x.%08x length %x\n", rspec.regspec_bustype,
+ rspec.regspec_addr, rspec.regspec_size);
+
+ error = opl_map_phys(rp->child, &rspec, &virt, &acc, &h);
+
+ if (error) {
+ FC_DEBUG3(1, CE_CONT, "opl_map_in: map in failed - "
+ "address 0x%08x.%08x length %x\n", rspec.regspec_bustype,
+ rspec.regspec_addr, rspec.regspec_size);
+
+ return (fc_priv_error(cp, "opl map-in failed"));
+ }
+
+ FC_DEBUG1(3, CE_CONT, "opl_map_in: returning virt %p\n", virt);
+
+ cp->nresults = fc_int2cell(1);
+ fc_result(cp, 0) = fc_ptr2cell(virt);
+
+ /*
+ * Log this resource ...
+ */
+ resp = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
+ resp->type = RT_MAP;
+ resp->fc_map_virt = virt;
+ resp->fc_map_len = len;
+ resp->fc_map_handle = h;
+ fc_add_resource(rp, resp);
+
+ return (fc_success_op(ap, rp, cp));
+}
+
+/*
+ * map-out (virt size -- )
+ */
+static int
+opl_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
+{
+ caddr_t virt;
+ size_t len;
+ struct fc_resource *resp;
+
+ if (fc_cell2int(cp->nargs) != 2)
+ return (fc_syntax_error(cp, "nargs must be 2"));
+
+ virt = fc_cell2ptr(fc_arg(cp, 1));
+
+ len = fc_cell2size(fc_arg(cp, 0));
+
+ FC_DEBUG2(1, CE_CONT, "opl_map_out: attempting map out %p %x\n",
+ virt, len);
+
+ /*
+ * Find if this request matches a mapping resource we set up.
+ */
+ fc_lock_resource_list(rp);
+ for (resp = rp->head; resp != NULL; resp = resp->next) {
+ if (resp->type != RT_MAP)
+ continue;
+ if (resp->fc_map_virt != virt)
+ continue;
+ if (resp->fc_map_len == len)
+ break;
+ }
+ fc_unlock_resource_list(rp);
+
+ if (resp == NULL)
+ return (fc_priv_error(cp, "request doesn't match a "
+ "known mapping"));
+
+ opl_unmap_phys(&resp->fc_map_handle);
+
+ /*
+ * remove the resource from the list and release it.
+ */
+ fc_rem_resource(rp, resp);
+ kmem_free(resp, sizeof (struct fc_resource));
+
+ cp->nresults = fc_int2cell(0);
+ return (fc_success_op(ap, rp, cp));
+}
+
+static int
+opl_register_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
+{
+ size_t len;
+ caddr_t virt;
+ int error = 0;
+ uint64_t v;
+ uint64_t x;
+ uint32_t l;
+ uint16_t w;
+ uint8_t b;
+ char *service = fc_cell2ptr(cp->svc_name);
+ struct fc_resource *resp;
+
+ if (fc_cell2int(cp->nargs) != 1)
+ return (fc_syntax_error(cp, "nargs must be 1"));
+
+ if (fc_cell2int(cp->nresults) < 1)
+ return (fc_syntax_error(cp, "nresults must be >= 1"));
+
+ virt = fc_cell2ptr(fc_arg(cp, 0));
+
+ /*
+ * Determine the access width .. we can switch on the 2nd
+ * character of the name which is "rx@", "rl@", "rb@" or "rw@"
+ */
+ switch (*(service + 1)) {
+ case 'x': len = sizeof (x); break;
+ case 'l': len = sizeof (l); break;
+ case 'w': len = sizeof (w); break;
+ case 'b': len = sizeof (b); break;
+ }
+
+ /*
+ * Check the alignment ...
+ */
+ if (((intptr_t)virt & (len - 1)) != 0)
+ return (fc_priv_error(cp, "unaligned access"));
+
+ /*
+ * Find if this virt is 'within' a request we know about
+ */
+ fc_lock_resource_list(rp);
+ for (resp = rp->head; resp != NULL; resp = resp->next) {
+ if (resp->type == RT_MAP) {
+ if ((virt >= (caddr_t)resp->fc_map_virt) &&
+ ((virt + len) <=
+ ((caddr_t)resp->fc_map_virt + resp->fc_map_len)))
+ break;
+ } else if (resp->type == RT_CONTIGIOUS) {
+ if ((virt >= (caddr_t)resp->fc_contig_virt) && ((virt + len)
+ <= ((caddr_t)resp->fc_contig_virt +
+ resp->fc_contig_len)))
+ break;
+ }
+ }
+ fc_unlock_resource_list(rp);
+
+ if (resp == NULL) {
+ return (fc_priv_error(cp, "request not within "
+ "known mappings"));
+ }
+
+ switch (len) {
+ case sizeof (x):
+ if (resp->type == RT_MAP)
+ error = ddi_peek64(rp->child,
+ (int64_t *)virt, (int64_t *)&x);
+ else /* RT_CONTIGIOUS */
+ x = *(int64_t *)virt;
+ v = x;
+ break;
+ case sizeof (l):
+ if (resp->type == RT_MAP)
+ error = ddi_peek32(rp->child,
+ (int32_t *)virt, (int32_t *)&l);
+ else /* RT_CONTIGIOUS */
+ l = *(int32_t *)virt;
+ v = l;
+ break;
+ case sizeof (w):
+ if (resp->type == RT_MAP)
+ error = ddi_peek16(rp->child,
+ (int16_t *)virt, (int16_t *)&w);
+ else /* RT_CONTIGIOUS */
+ w = *(int16_t *)virt;
+ v = w;
+ break;
+ case sizeof (b):
+ if (resp->type == RT_MAP)
+ error = ddi_peek8(rp->child,
+ (int8_t *)virt, (int8_t *)&b);
+ else /* RT_CONTIGIOUS */
+ b = *(int8_t *)virt;
+ v = b;
+ break;
+ }
+
+ if (error == DDI_FAILURE) {
+ FC_DEBUG2(1, CE_CONT, "opl_register_fetch: access error "
+ "accessing virt %p len %d\n", virt, len);
+ return (fc_priv_error(cp, "access error"));
+ }
+
+ FC_DEBUG3(1, CE_CONT, "register_fetch (%s) %llx %llx\n",
+ service, virt, v);
+
+ cp->nresults = fc_int2cell(1);
+ switch (len) {
+ case sizeof (x): fc_result(cp, 0) = x; break;
+ case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break;
+ case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break;
+ case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break;
+ }
+ return (fc_success_op(ap, rp, cp));
+}
+
+static int
+opl_register_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
+{
+ size_t len;
+ caddr_t virt;
+ uint64_t v;
+ uint64_t x;
+ uint32_t l;
+ uint16_t w;
+ uint8_t b;
+ char *service = fc_cell2ptr(cp->svc_name);
+ struct fc_resource *resp;
+ int error = 0;
+
+ if (fc_cell2int(cp->nargs) != 2)
+ return (fc_syntax_error(cp, "nargs must be 2"));
+
+ virt = fc_cell2ptr(fc_arg(cp, 0));
+
+ /*
+ * Determine the access width .. we can switch on the 2nd
+ * character of the name which is "rx!", "rl!", "rb!" or "rw!"
+ */
+ switch (*(service + 1)) {
+ case 'x':
+ len = sizeof (x);
+ x = fc_arg(cp, 1);
+ v = x;
+ break;
+ case 'l':
+ len = sizeof (l);
+ l = fc_cell2uint32_t(fc_arg(cp, 1));
+ v = l;
+ break;
+ case 'w':
+ len = sizeof (w);
+ w = fc_cell2uint16_t(fc_arg(cp, 1));
+ v = w;
+ break;
+ case 'b':
+ len = sizeof (b);
+ b = fc_cell2uint8_t(fc_arg(cp, 1));
+ v = b;
+ break;
+ }
+
+ FC_DEBUG3(1, CE_CONT, "register_store (%s) %llx %llx\n",
+ service, virt, v);
+
+ /*
+ * Check the alignment ...
+ */
+ if (((intptr_t)virt & (len - 1)) != 0)
+ return (fc_priv_error(cp, "unaligned access"));
+
+ /*
+ * Find if this virt is 'within' a request we know about
+ */
+ fc_lock_resource_list(rp);
+ for (resp = rp->head; resp != NULL; resp = resp->next) {
+ if (resp->type == RT_MAP) {
+ if ((virt >= (caddr_t)resp->fc_map_virt) &&
+ ((virt + len) <=
+ ((caddr_t)resp->fc_map_virt + resp->fc_map_len)))
+ break;
+ } else if (resp->type == RT_CONTIGIOUS) {
+ if ((virt >= (caddr_t)resp->fc_contig_virt) && ((virt + len)
+ <= ((caddr_t)resp->fc_contig_virt +
+ resp->fc_contig_len)))
+ break;
+ }
+ }
+ fc_unlock_resource_list(rp);
+
+ if (resp == NULL)
+ return (fc_priv_error(cp, "request not within"
+ "known mappings"));
+
+ switch (len) {
+ case sizeof (x):
+ if (resp->type == RT_MAP)
+ error = ddi_poke64(rp->child, (int64_t *)virt, x);
+ else if (resp->type == RT_CONTIGIOUS)
+ *(uint64_t *)virt = x;
+ break;
+ case sizeof (l):
+ if (resp->type == RT_MAP)
+ error = ddi_poke32(rp->child, (int32_t *)virt, l);
+ else if (resp->type == RT_CONTIGIOUS)
+ *(uint32_t *)virt = l;
+ break;
+ case sizeof (w):
+ if (resp->type == RT_MAP)
+ error = ddi_poke16(rp->child, (int16_t *)virt, w);
+ else if (resp->type == RT_CONTIGIOUS)
+ *(uint16_t *)virt = w;
+ break;
+ case sizeof (b):
+ if (resp->type == RT_MAP)
+ error = ddi_poke8(rp->child, (int8_t *)virt, b);
+ else if (resp->type == RT_CONTIGIOUS)
+ *(uint8_t *)virt = b;
+ break;
+ }
+
+ if (error == DDI_FAILURE) {
+ FC_DEBUG2(1, CE_CONT, "opl_register_store: access error "
+ "accessing virt %p len %d\n", virt, len);
+ return (fc_priv_error(cp, "access error"));
+ }
+
+ cp->nresults = fc_int2cell(0);
+ return (fc_success_op(ap, rp, cp));
+}
+
+/*
+ * opl_claim_memory
+ *
+ * claim-memory (align size vhint -- vaddr)
+ */
+static int
+opl_claim_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
+{
+ int align, size, vhint;
+ uint64_t answer, alen;
+ ndi_ra_request_t request;
+ struct fc_resource *resp;
+
+ if (fc_cell2int(cp->nargs) != 3)
+ return (fc_syntax_error(cp, "nargs must be 3"));
+
+ if (fc_cell2int(cp->nresults) < 1)
+ return (fc_syntax_error(cp, "nresults must be >= 1"));
+
+ vhint = fc_cell2int(fc_arg(cp, 2));
+ size = fc_cell2int(fc_arg(cp, 1));
+ align = fc_cell2int(fc_arg(cp, 0));
+
+ FC_DEBUG3(1, CE_CONT, "opl_claim_memory: align=0x%x size=0x%x "
+ "vhint=0x%x\n", align, size, vhint);
+
+ if (size == 0) {
+ cmn_err(CE_WARN, "opl_claim_memory - unable to allocate "
+ "contiguous memory of size zero\n");
+ return (fc_priv_error(cp, "allocation error"));
+ }
+
+ if (vhint) {
+ cmn_err(CE_WARN, "opl_claim_memory - vhint is not zero "
+ "vhint=0x%x - Ignoring Argument\n", vhint);
+ }
+
+ bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
+ request.ra_flags = NDI_RA_ALLOC_BOUNDED;
+ request.ra_boundbase = 0;
+ request.ra_boundlen = 0xffffffff;
+ request.ra_len = size;
+ request.ra_align_mask = align - 1;
+
+ if (ndi_ra_alloc(ddi_root_node(), &request, &answer, &alen,
+ "opl-fcodemem", NDI_RA_PASS) != NDI_SUCCESS) {
+ cmn_err(CE_WARN, "opl_claim_memory - unable to allocate "
+ "contiguous memory\n");
+ return (fc_priv_error(cp, "allocation error"));
+ }
+
+ FC_DEBUG2(1, CE_CONT, "opl_claim_memory: address allocated=0x%lx "
+ "size=0x%x\n", answer, alen);
+
+ cp->nresults = fc_int2cell(1);
+ fc_result(cp, 0) = answer;
+
+ /*
+ * Log this resource ...
+ */
+ resp = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
+ resp->type = RT_CONTIGIOUS;
+ resp->fc_contig_virt = (void *)answer;
+ resp->fc_contig_len = size;
+ fc_add_resource(rp, resp);
+
+ return (fc_success_op(ap, rp, cp));
+}
+
+/*
+ * opl_release_memory
+ *
+ * release-memory (size vaddr -- )
+ */
+static int
+opl_release_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
+{
+ int32_t vaddr, size;
+ struct fc_resource *resp;
+
+ if (fc_cell2int(cp->nargs) != 2)
+ return (fc_syntax_error(cp, "nargs must be 2"));
+
+ if (fc_cell2int(cp->nresults) != 0)
+ return (fc_syntax_error(cp, "nresults must be 0"));
+
+ vaddr = fc_cell2int(fc_arg(cp, 1));
+ size = fc_cell2int(fc_arg(cp, 0));
+
+ FC_DEBUG2(1, CE_CONT, "opl_release_memory: vaddr=0x%x size=0x%x\n",
+ vaddr, size);
+
+ /*
+ * Find if this request matches a mapping resource we set up.
+ */
+ fc_lock_resource_list(rp);
+ for (resp = rp->head; resp != NULL; resp = resp->next) {
+ if (resp->type != RT_CONTIGIOUS)
+ continue;
+ if (resp->fc_contig_virt != (void *)(uintptr_t)vaddr)
+ continue;
+ if (resp->fc_contig_len == size)
+ break;
+ }
+ fc_unlock_resource_list(rp);
+
+ if (resp == NULL)
+ return (fc_priv_error(cp, "request doesn't match a "
+ "known mapping"));
+
+ (void) ndi_ra_free(ddi_root_node(), vaddr, size,
+ "opl-fcodemem", NDI_RA_PASS);
+
+ /*
+ * remove the resource from the list and release it.
+ */
+ fc_rem_resource(rp, resp);
+ kmem_free(resp, sizeof (struct fc_resource));
+
+ cp->nresults = fc_int2cell(0);
+
+ return (fc_success_op(ap, rp, cp));
+}
+
+/*
+ * opl_vtop
+ *
+ * vtop (vaddr -- paddr.lo paddr.hi)
+ */
+static int
+opl_vtop(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
+{
+ int vaddr;
+ uint64_t paddr;
+ struct fc_resource *resp;
+
+ if (fc_cell2int(cp->nargs) != 1)
+ return (fc_syntax_error(cp, "nargs must be 1"));
+
+ if (fc_cell2int(cp->nresults) >= 3)
+ return (fc_syntax_error(cp, "nresults must be less than 2"));
+
+ vaddr = fc_cell2int(fc_arg(cp, 0));
+
+ /*
+ * Find if this request matches a mapping resource we set up.
+ */
+ fc_lock_resource_list(rp);
+ for (resp = rp->head; resp != NULL; resp = resp->next) {
+ if (resp->type != RT_CONTIGIOUS)
+ continue;
+ if (resp->fc_contig_virt == (void *)(uintptr_t)vaddr)
+ break;
+ }
+ fc_unlock_resource_list(rp);
+
+ if (resp == NULL)
+ return (fc_priv_error(cp, "request doesn't match a "
+ "known mapping"));
+
+ paddr = va_to_pa((void *)(uintptr_t)vaddr);
+
+ FC_DEBUG2(1, CE_CONT, "opl_vtop: vaddr=0x%x paddr=0x%x\n",
+ vaddr, paddr);
+
+ cp->nresults = fc_int2cell(2);
+
+ fc_result(cp, 0) = paddr;
+ fc_result(cp, 1) = 0;
+
+ return (fc_success_op(ap, rp, cp));
+}
+
+static int
+opl_config_child(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
+{
+ fc_phandle_t h;
+
+ if (fc_cell2int(cp->nargs) != 0)
+ return (fc_syntax_error(cp, "nargs must be 0"));
+
+ if (fc_cell2int(cp->nresults) < 1)
+ return (fc_syntax_error(cp, "nresults must be >= 1"));
+
+ h = fc_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child);
+
+ cp->nresults = fc_int2cell(1);
+ fc_result(cp, 0) = fc_phandle2cell(h);
+
+ return (fc_success_op(ap, rp, cp));
+}
+
+static int
+opl_get_fcode(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
+{
+ caddr_t dropin_name_virt, fcode_virt;
+ char *dropin_name, *fcode;
+ int fcode_len, status;
+
+ if (fc_cell2int(cp->nargs) != 3)
+ return (fc_syntax_error(cp, "nargs must be 3"));
+
+ if (fc_cell2int(cp->nresults) < 1)
+ return (fc_syntax_error(cp, "nresults must be >= 1"));
+
+ dropin_name_virt = fc_cell2ptr(fc_arg(cp, 0));
+
+ fcode_virt = fc_cell2ptr(fc_arg(cp, 1));
+
+ fcode_len = fc_cell2int(fc_arg(cp, 2));
+
+ dropin_name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
+
+ FC_DEBUG2(1, CE_CONT, "get_fcode: %x %d\n", fcode_virt, fcode_len);
+
+ if (copyinstr(fc_cell2ptr(dropin_name_virt), dropin_name,
+ FC_SVC_NAME_LEN - 1, NULL)) {
+ FC_DEBUG1(1, CE_CONT, "opl_get_fcode: "
+ "fault copying in drop in name %p\n", dropin_name_virt);
+ status = 0;
+ } else {
+ FC_DEBUG1(1, CE_CONT, "get_fcode: %s\n", dropin_name);
+
+ fcode = kmem_zalloc(fcode_len, KM_SLEEP);
+
+ if ((status = prom_get_fcode(dropin_name, fcode)) != 0) {
+
+ if (copyout((void *)fcode, (void *)fcode_virt,
+ fcode_len)) {
+ cmn_err(CE_WARN, " opl_get_fcode: Unable "
+ "to copy out fcode image");
+ status = 0;
+ }
+ }
+
+ kmem_free(fcode, fcode_len);
+ }
+
+ kmem_free(dropin_name, FC_SVC_NAME_LEN);
+
+ cp->nresults = fc_int2cell(1);
+ fc_result(cp, 0) = status;
+
+ return (fc_success_op(ap, rp, cp));
+}
+
+static int
+opl_get_fcode_size(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
+{
+ caddr_t virt;
+ char *dropin_name;
+ int len;
+
+ if (fc_cell2int(cp->nargs) != 1)
+ return (fc_syntax_error(cp, "nargs must be 1"));
+
+ if (fc_cell2int(cp->nresults) < 1)
+ return (fc_syntax_error(cp, "nresults must be >= 1"));
+
+ virt = fc_cell2ptr(fc_arg(cp, 0));
+
+ dropin_name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
+
+ FC_DEBUG0(1, CE_CONT, "opl_get_fcode_size:\n");
+
+ if (copyinstr(fc_cell2ptr(virt), dropin_name,
+ FC_SVC_NAME_LEN - 1, NULL)) {
+ FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: "
+ "fault copying in drop in name %p\n", virt);
+ len = 0;
+ } else {
+ FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: %s\n", dropin_name);
+
+ len = prom_get_fcode_size(dropin_name);
+ }
+
+ kmem_free(dropin_name, FC_SVC_NAME_LEN);
+
+ FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: fcode_len = %d\n", len);
+
+ cp->nresults = fc_int2cell(1);
+ fc_result(cp, 0) = len;
+
+ return (fc_success_op(ap, rp, cp));
+}
+
+static int
+opl_map_phys(dev_info_t *dip, struct regspec *phys_spec,
+ caddr_t *addrp, ddi_device_acc_attr_t *accattrp,
+ ddi_acc_handle_t *handlep)
+{
+ ddi_map_req_t mapreq;
+ ddi_acc_hdl_t *acc_handlep;
+ int result;
+ struct regspec *rspecp;
+
+ *handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
+ acc_handlep = impl_acc_hdl_get(*handlep);
+ acc_handlep->ah_vers = VERS_ACCHDL;
+ acc_handlep->ah_dip = dip;
+ acc_handlep->ah_rnumber = 0;
+ acc_handlep->ah_offset = 0;
+ acc_handlep->ah_len = 0;
+ acc_handlep->ah_acc = *accattrp;
+ rspecp = kmem_zalloc(sizeof (struct regspec), KM_SLEEP);
+ *rspecp = *phys_spec;
+ /*
+ * cache a copy of the reg spec
+ */
+ acc_handlep->ah_bus_private = rspecp;
+
+ mapreq.map_op = DDI_MO_MAP_LOCKED;
+ mapreq.map_type = DDI_MT_REGSPEC;
+ mapreq.map_obj.rp = (struct regspec *)phys_spec;
+ mapreq.map_prot = PROT_READ | PROT_WRITE;
+ mapreq.map_flags = DDI_MF_KERNEL_MAPPING;
+ mapreq.map_handlep = acc_handlep;
+ mapreq.map_vers = DDI_MAP_VERSION;
+
+ result = ddi_map(dip, &mapreq, 0, 0, addrp);
+
+ if (result != DDI_SUCCESS) {
+ impl_acc_hdl_free(*handlep);
+ *handlep = (ddi_acc_handle_t)NULL;
+ } else {
+ acc_handlep->ah_addr = *addrp;
+ }
+
+ return (result);
+}
+
+static void
+opl_unmap_phys(ddi_acc_handle_t *handlep)
+{
+ ddi_map_req_t mapreq;
+ ddi_acc_hdl_t *acc_handlep;
+ struct regspec *rspecp;
+
+ acc_handlep = impl_acc_hdl_get(*handlep);
+ ASSERT(acc_handlep);
+ rspecp = acc_handlep->ah_bus_private;
+
+ mapreq.map_op = DDI_MO_UNMAP;
+ mapreq.map_type = DDI_MT_REGSPEC;
+ mapreq.map_obj.rp = (struct regspec *)rspecp;
+ mapreq.map_prot = PROT_READ | PROT_WRITE;
+ mapreq.map_flags = DDI_MF_KERNEL_MAPPING;
+ mapreq.map_handlep = acc_handlep;
+ mapreq.map_vers = DDI_MAP_VERSION;
+
+ (void) ddi_map(acc_handlep->ah_dip, &mapreq, acc_handlep->ah_offset,
+ acc_handlep->ah_len, &acc_handlep->ah_addr);
+
+ impl_acc_hdl_free(*handlep);
+ /*
+ * Free the cached copy
+ */
+ kmem_free(rspecp, sizeof (struct regspec));
+ *handlep = (ddi_acc_handle_t)NULL;
+}
+
+static int
+opl_get_hwd_va(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
+{
+ uint32_t portid;
+ void *hwd_virt;
+ hwd_header_t *hwd_h = NULL;
+ hwd_sb_t *hwd_sb = NULL;
+ int lsb, ch, leaf;
+ int status = 1;
+
+ /* Check the argument */
+ if (fc_cell2int(cp->nargs) != 2)
+ return (fc_syntax_error(cp, "nargs must be 2"));
+
+ if (fc_cell2int(cp->nresults) < 1)
+ return (fc_syntax_error(cp, "nresults must be >= 1"));
+
+ /* Get the parameters */
+ portid = fc_cell2uint32_t(fc_arg(cp, 0));
+ hwd_virt = (void *)fc_cell2ptr(fc_arg(cp, 1));
+
+ /* Get the ID numbers */
+ lsb = OPL_IO_PORTID_TO_LSB(portid);
+ ch = OPL_PORTID_TO_CHANNEL(portid);
+ leaf = OPL_PORTID_TO_LEAF(portid);
+ ASSERT(OPL_IO_PORTID(lsb, ch, leaf) == portid);
+
+ /* Set the pointer of hwd. */
+ if ((hwd_h = (hwd_header_t *)opl_boards[lsb].cfg_hwd) == NULL) {
+ return (fc_priv_error(cp, "null hwd header"));
+ }
+ /* Set the pointer of hwd sb. */
+ if ((hwd_sb = (hwd_sb_t *)((char *)hwd_h + hwd_h->hdr_sb_info_offset))
+ == NULL) {
+ return (fc_priv_error(cp, "null hwd sb"));
+ }
+
+ if (ch == OPL_CMU_CHANNEL) {
+ /* Copyout CMU-CH HW Descriptor */
+ if (copyout((void *)&hwd_sb->sb_cmu.cmu_ch,
+ (void *)hwd_virt, sizeof (hwd_cmu_chan_t))) {
+ cmn_err(CE_WARN, "opl_get_hwd_va: "
+ "Unable to copy out cmuch descriptor for %x",
+ portid);
+ status = 0;
+ }
+ } else {
+ /* Copyout PCI-CH HW Descriptor */
+ if (copyout((void *)&hwd_sb->sb_pci_ch[ch].pci_leaf[leaf],
+ (void *)hwd_virt, sizeof (hwd_leaf_t))) {
+ cmn_err(CE_WARN, "opl_get_hwd_va: "
+ "Unable to copy out pcich descriptor for %x",
+ portid);
+ status = 0;
+ }
+ }
+
+ cp->nresults = fc_int2cell(1);
+ fc_result(cp, 0) = status;
+
+ return (fc_success_op(ap, rp, cp));
+}
+
+/*
+ * Set the properties for a leaf node (Oberon leaf or CMU channel leaf).
+ */
+/*ARGSUSED*/
+static int
+opl_create_leaf(dev_info_t *node, void *arg, uint_t flags)
+{
+ int ret;
+
+ OPL_UPDATE_PROP(string, node, "name", OPL_PCI_LEAF_NODE);
+
+ OPL_UPDATE_PROP(string, node, "status", "okay");
+
+ return (DDI_WALK_TERMINATE);
+}
+
+static char *
+opl_get_probe_string(opl_probe_t *probe, int channel, int leaf)
+{
+ char *probe_string;
+ int portid;
+
+ probe_string = kmem_zalloc(PROBE_STR_SIZE, KM_SLEEP);
+
+ if (channel == OPL_CMU_CHANNEL)
+ portid = probe->pr_sb->sb_cmu.cmu_ch.chan_portid;
+ else
+ portid = probe->
+ pr_sb->sb_pci_ch[channel].pci_leaf[leaf].leaf_port_id;
+
+ (void) sprintf(probe_string, "%x", portid);
+
+ return (probe_string);
+}
+
+static int
+opl_probe_leaf(opl_probe_t *probe)
+{
+ int channel, leaf, portid, error, circ;
+ int board;
+ fco_handle_t fco_handle, *cfg_handle;
+ dev_info_t *parent, *leaf_node;
+ char unit_address[UNIT_ADDR_SIZE];
+ char *probe_string;
+ opl_board_cfg_t *board_cfg;
+
+ board = probe->pr_board;
+ channel = probe->pr_channel;
+ leaf = probe->pr_leaf;
+ parent = ddi_root_node();
+ board_cfg = &opl_boards[board];
+
+ ASSERT(OPL_VALID_CHANNEL(channel));
+ ASSERT(OPL_VALID_LEAF(leaf));
+
+ if (channel == OPL_CMU_CHANNEL) {
+ portid = probe->pr_sb->sb_cmu.cmu_ch.chan_portid;
+ cfg_handle = &board_cfg->cfg_cmuch_handle;
+ } else {
+ portid = probe->
+ pr_sb->sb_pci_ch[channel].pci_leaf[leaf].leaf_port_id;
+ cfg_handle = &board_cfg->cfg_pcich_handle[channel][leaf];
+ }
+
+ /*
+ * Prevent any changes to leaf_node until we have bound
+ * it to the correct driver.
+ */
+ ndi_devi_enter(parent, &circ);
+
+ /*
+ * Ideally, fcode would be run from the "sid_branch_create"
+ * callback (that is the primary purpose of that callback).
+ * However, the fcode interpreter was written with the
+ * assumption that the "new_child" was linked into the
+ * device tree. The callback is invoked with the devinfo node
+ * in the DS_PROTO state. More investigation is needed before
+ * we can invoke the interpreter from the callback. For now,
+ * we create the "new_child" in the BOUND state, invoke the
+ * fcode interpreter and then rebind the dip to use any
+ * compatible properties created by fcode.
+ */
+
+ probe->pr_parent = parent;
+ probe->pr_create = opl_create_leaf;
+ probe->pr_hold = 1;
+
+ leaf_node = opl_create_node(probe);
+ if (leaf_node == NULL) {
+
+ cmn_err(CE_WARN, "IKP: create leaf (%d-%d-%d) failed",
+ probe->pr_board, probe->pr_channel, probe->pr_leaf);
+ ndi_devi_exit(parent, circ);
+ return (-1);
+ }
+
+ /*
+ * The platform DR interfaces created the dip in
+ * bound state. Bring devinfo node down to linked
+ * state and hold it there until compatible
+ * properties are created.
+ */
+ e_ddi_branch_rele(leaf_node);
+ (void) i_ndi_unconfig_node(leaf_node, DS_LINKED, 0);
+ ASSERT(i_ddi_node_state(leaf_node) == DS_LINKED);
+ e_ddi_branch_hold(leaf_node);
+
+ mutex_enter(&DEVI(leaf_node)->devi_lock);
+ DEVI(leaf_node)->devi_flags |= DEVI_NO_BIND;
+ mutex_exit(&DEVI(leaf_node)->devi_lock);
+
+ /*
+ * Drop the busy-hold on parent before calling
+ * fcode_interpreter to prevent potential deadlocks
+ */
+ ndi_devi_exit(parent, circ);
+
+ (void) sprintf(unit_address, "%x", portid);
+
+ /*
+ * Get the probe string
+ */
+ probe_string = opl_get_probe_string(probe, channel, leaf);
+
+ /*
+ * The fcode pointer specified here is NULL and the fcode
+ * size specified here is 0. This causes the user-level
+ * fcode interpreter to issue a request to the fcode
+ * driver to get the Oberon/cmu-ch fcode.
+ */
+ fco_handle = opl_fc_ops_alloc_handle(parent, leaf_node,
+ NULL, 0, unit_address, probe_string);
+
+ error = fcode_interpreter(parent, &opl_fc_do_op, fco_handle);
+
+ if (error != 0) {
+ cmn_err(CE_WARN, "IKP: Unable to probe PCI leaf (%d-%d-%d)",
+ probe->pr_board, probe->pr_channel, probe->pr_leaf);
+
+ opl_fc_ops_free_handle(fco_handle);
+
+ if (probe_string != NULL)
+ kmem_free(probe_string, PROBE_STR_SIZE);
+
+ (void) opl_destroy_node(leaf_node);
+ } else {
+ *cfg_handle = fco_handle;
+
+ if (channel == OPL_CMU_CHANNEL)
+ board_cfg->cfg_cmuch_probe_str = probe_string;
+ else
+ board_cfg->cfg_pcich_probe_str[channel][leaf]
+ = probe_string;
+
+ /*
+ * Compatible properties (if any) have been created,
+ * so bind driver.
+ */
+ ndi_devi_enter(parent, &circ);
+ ASSERT(i_ddi_node_state(leaf_node) <= DS_LINKED);
+
+ mutex_enter(&DEVI(leaf_node)->devi_lock);
+ DEVI(leaf_node)->devi_flags &= ~DEVI_NO_BIND;
+ mutex_exit(&DEVI(leaf_node)->devi_lock);
+
+ ndi_devi_exit(parent, circ);
+
+ if (ndi_devi_bind_driver(leaf_node, 0) !=
+ DDI_SUCCESS) {
+ cmn_err(CE_WARN,
+ "IKP: Unable to bind PCI leaf (%d-%d-%d)",
+ probe->pr_board, probe->pr_channel,
+ probe->pr_leaf);
+ }
+ }
+
+ if ((error != 0) && (channel == OPL_CMU_CHANNEL))
+ return (-1);
+
+ return (0);
+}
+
+static void
+opl_init_leaves(int myboard)
+{
+ dev_info_t *parent, *node;
+ char *name;
+ int circ, ret;
+ int len, portid, board, channel, leaf;
+ opl_board_cfg_t *cfg;
+
+ parent = ddi_root_node();
+
+ /*
+ * Hold parent node busy to walk its child list
+ */
+ ndi_devi_enter(parent, &circ);
+
+ for (node = ddi_get_child(parent);
+ (node != NULL);
+ node = ddi_get_next_sibling(node)) {
+
+ ret = OPL_GET_PROP(string, node, "name", &name, &len);
+ if (ret != DDI_PROP_SUCCESS) {
+ /*
+ * The property does not exist for this node.
+ */
+ continue;
+ }
+
+ if (strncmp(name, OPL_PCI_LEAF_NODE, len) == 0) {
+
+ ret = OPL_GET_PROP(int, node, "portid", &portid, -1);
+ if (ret == DDI_PROP_SUCCESS) {
+
+ ret = OPL_GET_PROP(int, node, "board#",
+ &board, -1);
+ if ((ret != DDI_PROP_SUCCESS) ||
+ (board != myboard))
+ continue;
+
+ cfg = &opl_boards[board];
+ channel = OPL_PORTID_TO_CHANNEL(portid);
+ if (channel == OPL_CMU_CHANNEL) {
+
+ if (cfg->cfg_cmuch_handle != NULL)
+ cfg->cfg_cmuch_leaf = node;
+
+ } else {
+
+ leaf = OPL_PORTID_TO_LEAF(portid);
+ if (cfg->cfg_pcich_handle
+ [channel][leaf] != NULL)
+ cfg->cfg_pcich_leaf
+ [channel][leaf] = node;
+ }
+ }
+ }
+
+ kmem_free(name, len);
+ if (ret != DDI_PROP_SUCCESS)
+ break;
+ }
+
+ ndi_devi_exit(parent, circ);
+}
+
+/*
+ * Create "pci" node and hierarchy for the Oberon channels and the
+ * CMU channel.
+ */
+/*ARGSUSED*/
+static int
+opl_probe_io(opl_probe_t *probe)
+{
+
+ int i, j;
+ hwd_pci_ch_t *channels;
+
+ if (HWD_STATUS_OK(probe->pr_sb->sb_cmu.cmu_ch.chan_status)) {
+
+ probe->pr_channel = HWD_CMU_CHANNEL;
+ probe->pr_channel_status =
+ probe->pr_sb->sb_cmu.cmu_ch.chan_status;
+ probe->pr_leaf = 0;
+ probe->pr_leaf_status = probe->pr_channel_status;
+
+ if (opl_probe_leaf(probe) != 0)
+ return (-1);
+ }
+
+ channels = &probe->pr_sb->sb_pci_ch[0];
+
+ for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) {
+
+ if (!HWD_STATUS_OK(channels[i].pci_status))
+ continue;
+
+ probe->pr_channel = i;
+ probe->pr_channel_status = channels[i].pci_status;
+
+ for (j = 0; j < HWD_LEAVES_PER_PCI_CHANNEL; j++) {
+
+ probe->pr_leaf = j;
+ probe->pr_leaf_status =
+ channels[i].pci_leaf[j].leaf_status;
+
+ if (!HWD_STATUS_OK(probe->pr_leaf_status))
+ continue;
+
+ (void) opl_probe_leaf(probe);
+ }
+ }
+ opl_init_leaves(probe->pr_board);
+ return (0);
+}
+
+/*
+ * Perform the probe in the following order:
+ *
+ * processors
+ * memory
+ * IO
+ *
+ * Each probe function returns 0 on sucess and a non-zero value on failure.
+ * What is a failure is determined by the implementor of the probe function.
+ * For example, while probing CPUs, any error encountered during probe
+ * is considered a failure and causes the whole probe operation to fail.
+ * However, for I/O, an error encountered while probing one device
+ * should not prevent other devices from being probed. It should not cause
+ * the whole probe operation to fail.
+ */
+int
+opl_probe_sb(int board)
+{
+ opl_probe_t *probe;
+ int ret;
+
+ if ((board < 0) || (board >= HWD_SBS_PER_DOMAIN))
+ return (-1);
+
+ ASSERT(opl_cfg_inited != 0);
+
+ /*
+ * If the previous probe failed and left a partially configured
+ * board, we need to unprobe the board and start with a clean slate.
+ */
+ if ((opl_boards[board].cfg_hwd != NULL) &&
+ (opl_unprobe_sb(board) != 0))
+ return (-1);
+
+ ret = 0;
+
+ probe = kmem_zalloc(sizeof (opl_probe_t), KM_SLEEP);
+ probe->pr_board = board;
+
+ if ((opl_probe_init(probe) != 0) ||
+
+ (opl_probe_cpu_chips(probe) != 0) ||
+
+ (opl_probe_memory(probe) != 0) ||
+
+ (opl_probe_io(probe) != 0)) {
+
+ /*
+ * Probe failed. Perform cleanup.
+ */
+ (void) opl_unprobe_sb(board);
+ ret = -1;
+ }
+
+ kmem_free(probe, sizeof (opl_probe_t));
+
+ return (ret);
+}
+
+/*
+ * This unprobing also includes CMU-CH.
+ */
+/*ARGSUSED*/
+static int
+opl_unprobe_io(int board)
+{
+ int i, j, ret;
+ opl_board_cfg_t *board_cfg;
+ dev_info_t **node;
+ fco_handle_t *hand;
+ char **probe_str;
+
+ board_cfg = &opl_boards[board];
+
+ for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) {
+
+ for (j = 0; j < HWD_LEAVES_PER_PCI_CHANNEL; j++) {
+
+ node = &board_cfg->cfg_pcich_leaf[i][j];
+ hand = &board_cfg->cfg_pcich_handle[i][j];
+ probe_str = &board_cfg->cfg_pcich_probe_str[i][j];
+
+ if (*node == NULL)
+ continue;
+
+ if (*hand != NULL) {
+ opl_fc_ops_free_handle(*hand);
+ *hand = NULL;
+ }
+
+ if (*probe_str != NULL) {
+ kmem_free(*probe_str, PROBE_STR_SIZE);
+ *probe_str = NULL;
+ }
+
+ ret = opl_destroy_node(*node);
+ if (ret != 0) {
+
+ cmn_err(CE_WARN,
+ "IKP: destroy pci (%d-%d-%d) failed",
+ board, i, j);
+ return (-1);
+ }
+
+ *node = NULL;
+
+ }
+ }
+
+ node = &board_cfg->cfg_cmuch_leaf;
+ hand = &board_cfg->cfg_cmuch_handle;
+ probe_str = &board_cfg->cfg_cmuch_probe_str;
+
+ if (*node == NULL)
+ return (0);
+
+ if (*hand != NULL) {
+ opl_fc_ops_free_handle(*hand);
+ *hand = NULL;
+ }
+
+ if (*probe_str != NULL) {
+ kmem_free(*probe_str, PROBE_STR_SIZE);
+ *probe_str = NULL;
+ }
+
+ if (opl_destroy_node(*node) != 0) {
+
+ cmn_err(CE_WARN, "IKP: destroy pci (%d-%d-%d) failed",
+ board, OPL_CMU_CHANNEL, 0);
+ return (-1);
+ }
+
+ *node = NULL;
+
+ return (0);
+}
+
+/*
+ * Destroy the "pseudo-mc" node for a board.
+ */
+static int
+opl_unprobe_memory(int board)
+{
+ opl_board_cfg_t *board_cfg;
+
+ board_cfg = &opl_boards[board];
+
+ if (board_cfg->cfg_pseudo_mc == NULL)
+ return (0);
+
+ if (opl_destroy_node(board_cfg->cfg_pseudo_mc) != 0) {
+
+ cmn_err(CE_WARN, "IKP: destroy pseudo-mc (%d) failed", board);
+ return (-1);
+ }
+
+ board_cfg->cfg_pseudo_mc = NULL;
+
+ return (0);
+}
+
+/*
+ * Destroy the "cmp" nodes for a board. This also destroys the "core"
+ * and "cpu" nodes below the "cmp" nodes.
+ */
+static int
+opl_unprobe_processors(int board)
+{
+ int i;
+ dev_info_t **cfg_cpu_chips;
+
+ cfg_cpu_chips = opl_boards[board].cfg_cpu_chips;
+
+ for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) {
+
+ if (cfg_cpu_chips[i] == NULL)
+ continue;
+
+ if (opl_destroy_node(cfg_cpu_chips[i]) != 0) {
+
+ cmn_err(CE_WARN,
+ "IKP: destroy chip (%d-%d) failed", board, i);
+ return (-1);
+ }
+
+ cfg_cpu_chips[i] = NULL;
+ }
+
+ return (0);
+}
+
+/*
+ * Perform the unprobe in the following order:
+ *
+ * IO
+ * memory
+ * processors
+ */
+int
+opl_unprobe_sb(int board)
+{
+ if ((board < 0) || (board >= HWD_SBS_PER_DOMAIN))
+ return (-1);
+
+ ASSERT(opl_cfg_inited != 0);
+
+ if ((opl_unprobe_io(board) != 0) ||
+
+ (opl_unprobe_memory(board) != 0) ||
+
+ (opl_unprobe_processors(board) != 0))
+
+ return (-1);
+
+ if (opl_boards[board].cfg_hwd != NULL) {
+#ifdef UCTEST
+ size_t size = 0xA000;
+#endif
+ /* Release the memory for the HWD */
+ void *hwdp = opl_boards[board].cfg_hwd;
+ opl_boards[board].cfg_hwd = NULL;
+#ifdef UCTEST
+ hwdp = (void *)((char *)hwdp - 0x1000);
+ hat_unload(kas.a_hat, hwdp, size, HAT_UNLOAD_UNLOCK);
+ vmem_free(heap_arena, hwdp, size);
+#else
+ kmem_free(hwdp, HWD_DATA_SIZE);
+#endif
+ }
+ return (0);
+}
+
+/*
+ * For MAC patrol support, we need to update the PA-related properties
+ * when there is a copy-rename event. This should be called after the
+ * physical copy and rename has been done by DR, and before the MAC
+ * patrol is restarted.
+ */
+int
+oplcfg_pa_swap(int from, int to)
+{
+ dev_info_t *from_node = opl_boards[from].cfg_pseudo_mc;
+ dev_info_t *to_node = opl_boards[to].cfg_pseudo_mc;
+ opl_range_t *rangef, *ranget;
+ int elems;
+ int ret;
+
+ if ((OPL_GET_PROP_ARRAY(int, from_node, "sb-mem-ranges", rangef,
+ elems) != DDI_SUCCESS) || (elems != 4)) {
+ /* XXX -- bad news */
+ return (-1);
+ }
+ if ((OPL_GET_PROP_ARRAY(int, to_node, "sb-mem-ranges", ranget,
+ elems) != DDI_SUCCESS) || (elems != 4)) {
+ /* XXX -- bad news */
+ return (-1);
+ }
+ OPL_UPDATE_PROP_ARRAY(int, from_node, "sb-mem-ranges", (int *)ranget,
+ 4);
+ OPL_UPDATE_PROP_ARRAY(int, to_node, "sb-mem-ranges", (int *)rangef,
+ 4);
+
+ OPL_FREE_PROP(ranget);
+ OPL_FREE_PROP(rangef);
+
+ return (0);
+}
diff --git a/usr/src/uts/sun4u/io/px/oberon_regs.h b/usr/src/uts/sun4u/io/px/oberon_regs.h
new file mode 100644
index 0000000000..3fd69f96a7
--- /dev/null
+++ b/usr/src/uts/sun4u/io/px/oberon_regs.h
@@ -0,0 +1,183 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_OBERON_REGS_H
+#define _SYS_OBERON_REGS_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define UBC_ERROR_LOG_ENABLE 0x471000
+#define UBC_ERROR_STATUS_CLEAR 0x471018
+#define UBC_INTERRUPT_ENABLE 0x471008
+#define UBC_INTERRUPT_STATUS 0x471010
+#define UBC_INTERRUPT_STATUS_DMARDUEA_P 0
+#define UBC_INTERRUPT_STATUS_DMAWTUEA_P 1
+#define UBC_INTERRUPT_STATUS_MEMRDAXA_P 2
+#define UBC_INTERRUPT_STATUS_MEMWTAXA_P 3
+#define UBC_INTERRUPT_STATUS_DMARDUEB_P 8
+#define UBC_INTERRUPT_STATUS_DMAWTUEB_P 9
+#define UBC_INTERRUPT_STATUS_MEMRDAXB_P 10
+#define UBC_INTERRUPT_STATUS_MEMWTAXB_P 11
+#define UBC_INTERRUPT_STATUS_PIOWTUE_P 16
+#define UBC_INTERRUPT_STATUS_PIOWBEUE_P 17
+#define UBC_INTERRUPT_STATUS_PIORBEUE_P 18
+#define UBC_INTERRUPT_STATUS_DMARDUEA_S 32
+#define UBC_INTERRUPT_STATUS_DMAWTUEA_S 33
+#define UBC_INTERRUPT_STATUS_MEMRDAXA_S 34
+#define UBC_INTERRUPT_STATUS_MEMWTAXA_S 35
+#define UBC_INTERRUPT_STATUS_DMARDUEB_S 40
+#define UBC_INTERRUPT_STATUS_DMAWTUEB_S 41
+#define UBC_INTERRUPT_STATUS_MEMRDAXB_S 42
+#define UBC_INTERRUPT_STATUS_MEMWTAXB_S 43
+#define UBC_INTERRUPT_STATUS_PIOWTUE_S 48
+#define UBC_INTERRUPT_STATUS_PIOWBEUE_S 49
+#define UBC_INTERRUPT_STATUS_PIORBEUE_S 50
+#define UBC_ERROR_STATUS_SET 0x471020
+#define UBC_PERFORMANCE_COUNTER_SELECT 0x472000
+#define UBC_PERFORMANCE_COUNTER_ZERO 0x472008
+#define UBC_PERFORMANCE_COUNTER_ONE 0x472010
+#define UBC_PERFORMANCE_COUNTER_SEL_MASKS 0x3f3f
+#define UBC_MEMORY_UE_LOG 0x471028
+#define UBC_MEMORY_UE_LOG_EID 60
+#define UBC_MEMORY_UE_LOG_EID_MASK 0x3
+#define UBC_MEMORY_UE_LOG_MARKED 48
+#define UBC_MEMORY_UE_LOG_MARKED_MASK 0x3fff
+#define UBC_MARKED_MAX_CPUID_MASK 0x1ff
+/*
+ * Class qualifiers on errors for which EID is valid.
+ */
+#define UBC_EID_MEM 0
+#define UBC_EID_CHANNEL 1
+#define UBC_EID_CPU 2
+#define UBC_EID_PATH 3
+
+#define OBERON_UBC_ID_MAX 64
+#define OBERON_UBC_ID_IOC 0
+#define OBERON_UBC_ID_LSB 2
+
+#define OBERON_PORT_ID_IOC 1
+#define OBERON_PORT_ID_IOC_MASK 0x03
+#define OBERON_PORT_ID_LSB 4
+#define OBERON_PORT_ID_LSB_MASK 0x0F
+
+#define INTERRUPT_MAPPING_ENTRIES_T_DESTID 21
+#define INTERRUPT_MAPPING_ENTRIES_T_DESTID_MASK 0x3ff
+
+#define OBERON_TLU_CONTROL_DRN_TR_DIS 35
+#define OBERON_TLU_CONTROL_CPLEP_DEN 34
+#define OBERON_TLU_CONTROL_ECRCCHK_DIS 33
+#define OBERON_TLU_CONTROL_ECRCGEN_DIS 32
+
+#define TLU_SLOT_CAPABILITIES_HP 6
+#define TLU_SLOT_CAPABILITIES_HPSUP 5
+#define TLU_SLOT_CAPABILITIES_PWINDP 4
+#define TLU_SLOT_CAPABILITIES_ATINDP 3
+#define TLU_SLOT_CAPABILITIES_MRLSP 2
+#define TLU_SLOT_CAPABILITIES_PWCNTLP 1
+#define TLU_SLOT_CAPABILITIES_ATBTNP 0
+
+#define DLU_INTERRUPT_MASK 0xe2048
+#define DLU_INTERRUPT_MASK_MSK_INTERRUPT_EN 31
+#define DLU_INTERRUPT_MASK_MSK_LINK_LAYER 5
+#define DLU_INTERRUPT_MASK_MSK_PHY_ERROR 4
+#define DLU_LINK_LAYER_CONFIG 0xe2200
+#define DLU_LINK_LAYER_CONFIG_VC0_EN 8
+#define DLU_LINK_LAYER_CONFIG_TLP_XMIT_FC_EN 3
+#define DLU_LINK_LAYER_CONFIG_FREQ_ACK_ENABLE 2
+#define DLU_LINK_LAYER_CONFIG_RETRY_DISABLE 1
+#define DLU_LINK_LAYER_INTERRUPT_AND_STATUS 0xe2210
+#define DLU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_LINK_ERR_ACT 31
+#define DLU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_PARABUS_PE 23
+#define DLU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_UNSPRTD_DLLP 22
+#define DLU_LINK_LAYER_INTERRUPT_AND_STATUS_INT_SRC_ERR_TLP 17
+#define DLU_LINK_LAYER_INTERRUPT_MASK 0xe2220
+#define DLU_LINK_LAYER_INTERRUPT_MASK_MSK_LINK_ERR_ACT 31
+#define DLU_LINK_LAYER_INTERRUPT_MASK_MSK_PARABUS_PE 23
+#define DLU_LINK_LAYER_INTERRUPT_MASK_MSK_UNSPRTD_DLLP 22
+#define DLU_LINK_LAYER_INTERRUPT_MASK_MSK_SRC_ERR_TLP 17
+#define DLU_FLOW_CONTROL_UPDATE_CONTROL 0xe2240
+#define DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_C_EN 2
+#define DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN 1
+#define DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN 0
+#define DLU_TXLINK_REPLAY_TIMER_THRESHOLD 0xe2410
+#define DLU_TXLINK_REPLAY_TIMER_THRESHOLD_RPLAY_TMR_THR 0
+#define DLU_TXLINK_REPLAY_TIMER_THRESHOLD_RPLAY_TMR_THR_MASK 0xfffff
+#define DLU_TXLINK_REPLAY_TIMER_THRESHOLD_DEFAULT 0xc9
+#define DLU_PORT_CONTROL 0xe2b00
+#define DLU_PORT_CONTROL_CK_EN 0
+#define DLU_PORT_STATUS 0xe2b08
+
+#define MMU_INTERRUPT_STATUS_TTC_DUE_P 8
+#define MMU_INTERRUPT_STATUS_TTC_DUE_S 40
+#define ILU_INTERRUPT_STATUS_IHB_UE_P 4
+#define ILU_INTERRUPT_STATUS_IHB_UE_S 36
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_ECRC_P 19
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_ECRC_S 51
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_POIS_P 12
+#define TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_POIS_S 44
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EIUE_P 0
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EIUE_S 32
+#define TLU_OTHER_EVENT_STATUS_CLEAR_ERBUE_P 1
+#define TLU_OTHER_EVENT_STATUS_CLEAR_ERBUE_S 33
+#define TLU_OTHER_EVENT_STATUS_CLEAR_TLUEITMO_P 7
+#define TLU_OTHER_EVENT_STATUS_CLEAR_TLUEITMO_S 39
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EHBUE_P 12
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EHBUE_S 44
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EDBUE_P 12
+#define TLU_OTHER_EVENT_STATUS_CLEAR_EDBUE_S 44
+
+#define TLU_CONTROL_DRN_TR_DIS 35
+
+#define TLU_SLOT_CONTROL 0x90038
+#define TLU_SLOT_CONTROL_PWFDEN 1
+#define TLU_SLOT_STATUS 0x90040
+#define TLU_SLOT_STATUS_PSD 6
+#define TLU_SLOT_STATUS_MRLS 5
+#define TLU_SLOT_STATUS_CMDCPLT 4
+#define TLU_SLOT_STATUS_PSDC 3
+#define TLU_SLOT_STATUS_MRLC 2
+#define TLU_SLOT_STATUS_PWFD 1
+#define TLU_SLOT_STATUS_ABTN 0
+
+#define FLP_PORT_CONTROL 0xe5200
+#define FLP_PORT_CONTROL_PORT_DIS 0
+
+#define HOTPLUG_CONTROL 0x88000
+#define HOTPLUG_CONTROL_SLOTPON 3
+#define HOTPLUG_CONTROL_PWREN 2
+#define HOTPLUG_CONTROL_CLKEN 1
+#define HOTPLUG_CONTROL_N_PERST 0
+
+#define PX_PCIEHP_PIL (LOCK_LEVEL - 1)
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_OBERON_REGS_H */
diff --git a/usr/src/uts/sun4u/io/px/px_err.c b/usr/src/uts/sun4u/io/px/px_err.c
index 59a3ca3311..2ab0a6eb16 100644
--- a/usr/src/uts/sun4u/io/px/px_err.c
+++ b/usr/src/uts/sun4u/io/px/px_err.c
@@ -40,10 +40,83 @@
#include <px_regs.h>
#include <px_csr.h>
#include <sys/membar.h>
+#include <sys/machcpuvar.h>
+#include <sys/platform_module.h>
#include "pcie_pwr.h"
#include "px_lib4u.h"
#include "px_err.h"
#include "px_err_impl.h"
+#include "oberon_regs.h"
+
+uint64_t px_tlu_ue_intr_mask = PX_ERR_EN_ALL;
+uint64_t px_tlu_ue_log_mask = PX_ERR_EN_ALL;
+uint64_t px_tlu_ue_count_mask = PX_ERR_EN_ALL;
+
+uint64_t px_tlu_ce_intr_mask = PX_ERR_MASK_NONE;
+uint64_t px_tlu_ce_log_mask = PX_ERR_MASK_NONE;
+uint64_t px_tlu_ce_count_mask = PX_ERR_MASK_NONE;
+
+/*
+ * Do not enable Link Interrupts
+ */
+uint64_t px_tlu_oe_intr_mask = PX_ERR_EN_ALL & ~0x80000000800;
+uint64_t px_tlu_oe_log_mask = PX_ERR_EN_ALL & ~0x80000000800;
+uint64_t px_tlu_oe_count_mask = PX_ERR_EN_ALL;
+
+uint64_t px_mmu_intr_mask = PX_ERR_EN_ALL;
+uint64_t px_mmu_log_mask = PX_ERR_EN_ALL;
+uint64_t px_mmu_count_mask = PX_ERR_EN_ALL;
+
+uint64_t px_imu_intr_mask = PX_ERR_EN_ALL;
+uint64_t px_imu_log_mask = PX_ERR_EN_ALL;
+uint64_t px_imu_count_mask = PX_ERR_EN_ALL;
+
+/*
+ * (1ull << ILU_INTERRUPT_ENABLE_IHB_PE_S) |
+ * (1ull << ILU_INTERRUPT_ENABLE_IHB_PE_P);
+ */
+uint64_t px_ilu_intr_mask = (((uint64_t)0x10 << 32) | 0x10);
+uint64_t px_ilu_log_mask = (((uint64_t)0x10 << 32) | 0x10);
+uint64_t px_ilu_count_mask = PX_ERR_EN_ALL;
+
+uint64_t px_ubc_intr_mask = PX_ERR_EN_ALL;
+uint64_t px_ubc_log_mask = PX_ERR_EN_ALL;
+uint64_t px_ubc_count_mask = PX_ERR_EN_ALL;
+
+uint64_t px_jbc_intr_mask = PX_ERR_EN_ALL;
+uint64_t px_jbc_log_mask = PX_ERR_EN_ALL;
+uint64_t px_jbc_count_mask = PX_ERR_EN_ALL;
+
+/*
+ * LPU Intr Registers are reverse encoding from the registers above.
+ * 1 = disable
+ * 0 = enable
+ *
+ * Log and Count are however still the same.
+ */
+uint64_t px_lpul_intr_mask = LPU_INTR_DISABLE;
+uint64_t px_lpul_log_mask = PX_ERR_EN_ALL;
+uint64_t px_lpul_count_mask = PX_ERR_EN_ALL;
+
+uint64_t px_lpup_intr_mask = LPU_INTR_DISABLE;
+uint64_t px_lpup_log_mask = PX_ERR_EN_ALL;
+uint64_t px_lpup_count_mask = PX_ERR_EN_ALL;
+
+uint64_t px_lpur_intr_mask = LPU_INTR_DISABLE;
+uint64_t px_lpur_log_mask = PX_ERR_EN_ALL;
+uint64_t px_lpur_count_mask = PX_ERR_EN_ALL;
+
+uint64_t px_lpux_intr_mask = LPU_INTR_DISABLE;
+uint64_t px_lpux_log_mask = PX_ERR_EN_ALL;
+uint64_t px_lpux_count_mask = PX_ERR_EN_ALL;
+
+uint64_t px_lpus_intr_mask = LPU_INTR_DISABLE;
+uint64_t px_lpus_log_mask = PX_ERR_EN_ALL;
+uint64_t px_lpus_count_mask = PX_ERR_EN_ALL;
+
+uint64_t px_lpug_intr_mask = LPU_INTR_DISABLE;
+uint64_t px_lpug_log_mask = PX_ERR_EN_ALL;
+uint64_t px_lpug_count_mask = PX_ERR_EN_ALL;
/*
* JBC error bit table
@@ -59,7 +132,7 @@
PX_ERR_BIT_HANDLE(hdl), \
PX_ERPT_SEND(erpt), \
PX_ERR_JBC_CLASS(bit)
-px_err_bit_desc_t px_err_cb_tbl[] = {
+px_err_bit_desc_t px_err_jbc_tbl[] = {
/* JBC FATAL - see io erpt doc, section 1.1 */
{ JBC_BIT_DESC(MB_PEA, fatal_hw, jbc_fatal) },
{ JBC_BIT_DESC(CPE, fatal_hw, jbc_fatal) },
@@ -105,8 +178,49 @@ px_err_bit_desc_t px_err_cb_tbl[] = {
{ JBC_BIT_DESC(EBUS_TO, jbc_csr, jbc_csr) }
};
-#define px_err_cb_keys \
- (sizeof (px_err_cb_tbl)) / (sizeof (px_err_bit_desc_t))
+#define px_err_jbc_keys \
+ (sizeof (px_err_jbc_tbl)) / (sizeof (px_err_bit_desc_t))
+
+/*
+ * UBC error bit table
+ */
+#define UBC_BIT_DESC(bit, hdl, erpt) \
+ UBC_INTERRUPT_STATUS_ ## bit ## _P, \
+ 0, \
+ PX_ERR_BIT_HANDLE(hdl), \
+ PX_ERPT_SEND(erpt), \
+ PX_ERR_UBC_CLASS(bit) }, \
+ { UBC_INTERRUPT_STATUS_ ## bit ## _S, \
+ 0, \
+ PX_ERR_BIT_HANDLE(hdl), \
+ PX_ERPT_SEND(erpt), \
+ PX_ERR_UBC_CLASS(bit)
+px_err_bit_desc_t px_err_ubc_tbl[] = {
+ /* UBC FATAL */
+ { UBC_BIT_DESC(DMARDUEA, non_fatal, ubc_fatal) },
+ { UBC_BIT_DESC(DMAWTUEA, fatal_sw, ubc_fatal) },
+ { UBC_BIT_DESC(MEMRDAXA, fatal_sw, ubc_fatal) },
+ { UBC_BIT_DESC(MEMWTAXA, fatal_sw, ubc_fatal) },
+ { UBC_BIT_DESC(DMARDUEB, non_fatal, ubc_fatal) },
+ { UBC_BIT_DESC(DMAWTUEB, fatal_sw, ubc_fatal) },
+ { UBC_BIT_DESC(MEMRDAXB, fatal_sw, ubc_fatal) },
+ { UBC_BIT_DESC(MEMWTAXB, fatal_sw, ubc_fatal) },
+ { UBC_BIT_DESC(PIOWTUE, fatal_sw, ubc_fatal) },
+ { UBC_BIT_DESC(PIOWBEUE, fatal_sw, ubc_fatal) },
+ { UBC_BIT_DESC(PIORBEUE, fatal_sw, ubc_fatal) }
+};
+
+#define px_err_ubc_keys \
+ (sizeof (px_err_ubc_tbl)) / (sizeof (px_err_bit_desc_t))
+
+
+char *ubc_class_eid_qualifier[] = {
+ "-mem",
+ "-channel",
+ "-cpu",
+ "-path"
+};
+
/*
* DMC error bit tables
@@ -173,6 +287,7 @@ px_err_bit_desc_t px_err_mmu_tbl[] = {
};
#define px_err_mmu_keys (sizeof (px_err_mmu_tbl)) / (sizeof (px_err_bit_desc_t))
+
/*
* PEC error bit tables
*/
@@ -210,12 +325,24 @@ px_err_bit_desc_t px_err_ilu_tbl[] = {
PX_ERR_BIT_HANDLE(hdl), \
PX_ERPT_SEND(erpt), \
PX_ERR_PEC_CLASS(bit)
+#define TLU_UC_OB_BIT_DESC(bit, hdl, erpt) \
+ TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_ ## bit ## _P, \
+ 0, \
+ PX_ERR_BIT_HANDLE(hdl), \
+ PX_ERPT_SEND(erpt), \
+ PX_ERR_PEC_OB_CLASS(bit) }, \
+ { TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR_ ## bit ## _S, \
+ 0, \
+ PX_ERR_BIT_HANDLE(hdl), \
+ PX_ERPT_SEND(erpt), \
+ PX_ERR_PEC_CLASS(bit)
px_err_bit_desc_t px_err_tlu_ue_tbl[] = {
/* PCI-E Receive Uncorrectable Errors - see io erpt doc, section 3.2 */
{ TLU_UC_BIT_DESC(UR, pciex_ue, pciex_rx_ue) },
{ TLU_UC_BIT_DESC(UC, pciex_ue, pciex_rx_ue) },
/* PCI-E Transmit Uncorrectable Errors - see io erpt doc, section 3.3 */
+ { TLU_UC_OB_BIT_DESC(ECRC, pciex_ue, pciex_rx_ue) },
{ TLU_UC_BIT_DESC(CTO, pciex_ue, pciex_tx_ue) },
{ TLU_UC_BIT_DESC(ROF, pciex_ue, pciex_tx_ue) },
@@ -234,6 +361,7 @@ px_err_bit_desc_t px_err_tlu_ue_tbl[] = {
#define px_err_tlu_ue_keys \
(sizeof (px_err_tlu_ue_tbl)) / (sizeof (px_err_bit_desc_t))
+
/*
* PEC CE errors implementation is incomplete pending PCIE generic
* fabric rules.
@@ -261,6 +389,7 @@ px_err_bit_desc_t px_err_tlu_ce_tbl[] = {
#define px_err_tlu_ce_keys \
(sizeof (px_err_tlu_ce_tbl)) / (sizeof (px_err_bit_desc_t))
+
/* pec oe errors */
#define TLU_OE_BIT_DESC(bit, hdl, erpt) \
TLU_OTHER_EVENT_STATUS_CLEAR_ ## bit ## _P, \
@@ -273,6 +402,17 @@ px_err_bit_desc_t px_err_tlu_ce_tbl[] = {
PX_ERR_BIT_HANDLE(hdl), \
PX_ERPT_SEND(erpt), \
PX_ERR_PEC_CLASS(bit)
+#define TLU_OE_OB_BIT_DESC(bit, hdl, erpt) \
+ TLU_OTHER_EVENT_STATUS_CLEAR_ ## bit ## _P, \
+ 0, \
+ PX_ERR_BIT_HANDLE(hdl), \
+ PX_ERPT_SEND(erpt), \
+ PX_ERR_PEC_OB_CLASS(bit) }, \
+ { TLU_OTHER_EVENT_STATUS_CLEAR_ ## bit ## _S, \
+ 0, \
+ PX_ERR_BIT_HANDLE(hdl), \
+ PX_ERPT_SEND(erpt), \
+ PX_ERR_PEC_OB_CLASS(bit)
px_err_bit_desc_t px_err_tlu_oe_tbl[] = {
/*
* TLU Other Event Status (receive only) - see io erpt doc, section 3.7
@@ -288,6 +428,7 @@ px_err_bit_desc_t px_err_tlu_oe_tbl[] = {
{ TLU_OE_BIT_DESC(IIP, fatal_gos, pciex_oe) },
{ TLU_OE_BIT_DESC(EDP, fatal_gos, pciex_oe) },
{ TLU_OE_BIT_DESC(EHP, fatal_gos, pciex_oe) },
+ { TLU_OE_OB_BIT_DESC(TLUEITMO, fatal_gos, pciex_oe) },
{ TLU_OE_BIT_DESC(LIN, non_fatal, pciex_oe) },
{ TLU_OE_BIT_DESC(LRS, non_fatal, pciex_oe) },
{ TLU_OE_BIT_DESC(LDN, tlu_ldn, pciex_oe) },
@@ -303,6 +444,7 @@ px_err_bit_desc_t px_err_tlu_oe_tbl[] = {
#define px_err_tlu_oe_keys \
(sizeof (px_err_tlu_oe_tbl)) / (sizeof (px_err_bit_desc_t))
+
/*
* All the following tables below are for LPU Interrupts. These interrupts
* are *NOT* error interrupts, but event status interrupts.
@@ -415,6 +557,15 @@ px_err_bit_desc_t px_err_lpug_tbl[] = {
px_err_ ## pre ## _keys, \
0
+#define MnT6_ob(pre) \
+ B_FALSE, \
+ &px_ ## pre ## _intr_mask, \
+ &px_ ## pre ## _log_mask, \
+ &px_ ## pre ## _count_mask, \
+ px_err_ ## pre ## _ob_tbl, \
+ px_err_ ## pre ## _ob_keys, \
+ 0
+
/* LPU Registers Addresses */
#define LR4(pre) \
NULL, \
@@ -436,7 +587,7 @@ px_err_bit_desc_t px_err_lpug_tbl[] = {
TLU_ ## pre ## _INTERRUPT_STATUS, \
TLU_ ## pre ## _STATUS_CLEAR
-/* Registers Addresses for JBC, MMU, IMU and ILU */
+/* Registers Addresses for JBC, UBC, MMU, IMU and ILU */
#define R4(pre) \
pre ## _ERROR_LOG_ENABLE, \
pre ## _INTERRUPT_ENABLE, \
@@ -449,7 +600,8 @@ px_err_bit_desc_t px_err_lpug_tbl[] = {
* It is located in px_err.h
*/
px_err_reg_desc_t px_err_reg_tbl[] = {
- { MnT6(cb), R4(JBC), "JBC Error"},
+ { MnT6(jbc), R4(JBC), "JBC Error"},
+ { MnT6(ubc), R4(UBC), "UBC Error"},
{ MnT6(mmu), R4(MMU), "MMU Error"},
{ MnT6(imu), R4(IMU), "IMU Error"},
{ MnT6(tlu_ue), TR4(UNCORRECTABLE_ERROR), "TLU UE"},
@@ -461,7 +613,7 @@ px_err_reg_desc_t px_err_reg_tbl[] = {
{ MnT6(lpur), LR4(RECEIVE_PHY), "LPU RX Phy Layer"},
{ MnT6(lpux), LR4(TRANSMIT_PHY), "LPU TX Phy Layer"},
{ MnT6(lpus), LR4(LTSSM), "LPU LTSSM"},
- { MnT6(lpug), LR4(GIGABLAZE_GLUE), "LPU GigaBlaze Glue"}
+ { MnT6(lpug), LR4(GIGABLAZE_GLUE), "LPU GigaBlaze Glue"},
};
#define PX_ERR_REG_KEYS (sizeof (px_err_reg_tbl)) / (sizeof (px_err_reg_tbl[0]))
@@ -469,7 +621,7 @@ typedef struct px_err_ss {
uint64_t err_status[PX_ERR_REG_KEYS];
} px_err_ss_t;
-static void px_err_snapshot(px_t *px_p, px_err_ss_t *ss, boolean_t chkjbc);
+static void px_err_snapshot(px_t *px_p, px_err_ss_t *ss, boolean_t chk_cb);
static int px_err_erpt_and_clr(px_t *px_p, ddi_fm_error_t *derr,
px_err_ss_t *ss);
static int px_err_check_severity(px_t *px_p, ddi_fm_error_t *derr,
@@ -477,11 +629,11 @@ static int px_err_check_severity(px_t *px_p, ddi_fm_error_t *derr,
/*
* px_err_cb_intr:
- * Interrupt handler for the JBC block.
+ * Interrupt handler for the JBC/UBC block.
* o lock
* o create derr
- * o px_err_handle(leaf1, with jbc)
- * o px_err_handle(leaf2, without jbc)
+ * o px_err_handle(leaf1, with cb)
+ * o px_err_handle(leaf2, without cb)
* o dispatch (leaf1)
* o dispatch (leaf2)
* o unlock
@@ -541,7 +693,7 @@ px_err_cb_intr(caddr_t arg)
* Interrupt handler for the DMC/PEC block.
* o lock
* o create derr
- * o px_err_handle(leaf, with jbc)
+ * o px_err_handle(leaf, with cb)
* o dispatch (leaf)
* o unlock
* o handle error: fatal? fm_panic() : return INTR_CLAIMED)
@@ -600,7 +752,8 @@ px_err_reg_enable(px_t *px_p, px_err_id_t id)
caddr_t csr_base;
pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
- if (id == PX_ERR_JBC)
+ /* Get the correct CSR BASE */
+ if (PX_ERR_XBC(id))
csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC];
else
csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
@@ -642,7 +795,8 @@ px_err_reg_disable(px_t *px_p, px_err_id_t id)
caddr_t csr_base;
pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
- if (id == PX_ERR_JBC)
+ /* Get the correct CSR BASE */
+ if (PX_ERR_XBC(id))
csr_base = (caddr_t)(uintptr_t)pxu_p->px_address[PX_REG_XBC];
else
csr_base = (caddr_t)(uintptr_t)pxu_p->px_address[PX_REG_CSR];
@@ -651,6 +805,7 @@ px_err_reg_disable(px_t *px_p, px_err_id_t id)
switch (id) {
case PX_ERR_JBC:
+ case PX_ERR_UBC:
case PX_ERR_MMU:
case PX_ERR_IMU:
case PX_ERR_TLU_UE:
@@ -687,13 +842,13 @@ px_err_reg_disable(px_t *px_p, px_err_id_t id)
* @param px_p leaf in which to check access
* @param derr fm err data structure to be updated
* @param caller PX_TRAP_CALL | PX_INTR_CALL
- * @param chkjbc whether to handle jbc registers
+ * @param chk_cb whether to handle cb registers
* @return err PX_OK | PX_NONFATAL |
* PX_FATAL_GOS | PX_FATAL_HW | PX_STUCK_FATAL
*/
int
px_err_handle(px_t *px_p, ddi_fm_error_t *derr, int caller,
- boolean_t chkjbc)
+ boolean_t chk_cb)
{
px_err_ss_t ss;
int err = PX_OK;
@@ -701,7 +856,7 @@ px_err_handle(px_t *px_p, ddi_fm_error_t *derr, int caller,
ASSERT(MUTEX_HELD(&px_p->px_fm_mutex));
/* snap shot the current fire registers */
- px_err_snapshot(px_p, &ss, chkjbc);
+ px_err_snapshot(px_p, &ss, chk_cb);
/* check for safe access */
px_err_safeacc_check(px_p, derr);
@@ -731,14 +886,14 @@ px_err_handle(px_t *px_p, ddi_fm_error_t *derr, int caller,
/*
* px_err_snapshot:
* Take a current snap shot of all the fire error registers. This includes
- * JBC, DMC, and PEC, unless chkjbc == false;
+ * JBC/UBC, DMC, and PEC, unless chk_cb == false;
*
* @param px_p leaf in which to take the snap shot.
* @param ss pre-allocated memory to store the snap shot.
- * @param chkjbc boolean on whether to store jbc register.
+ * @param chk_cb boolean on whether to store jbc/ubc register.
*/
static void
-px_err_snapshot(px_t *px_p, px_err_ss_t *ss, boolean_t chkjbc)
+px_err_snapshot(px_t *px_p, px_err_ss_t *ss, boolean_t chk_cb)
{
pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
caddr_t xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC];
@@ -746,21 +901,40 @@ px_err_snapshot(px_t *px_p, px_err_ss_t *ss, boolean_t chkjbc)
px_err_reg_desc_t *reg_desc;
int reg_id;
- /* snapshot JBC interrupt status */
- reg_id = PX_ERR_JBC;
- if (chkjbc == B_TRUE) {
+ switch (PX_CHIP_TYPE(pxu_p)) {
+ case PX_CHIP_OBERON:
+ reg_id = PX_ERR_UBC;
+ break;
+ case PX_CHIP_FIRE:
+ reg_id = PX_ERR_JBC;
+ break;
+ default:
+ DBG(DBG_ERR_INTR, NULL, "px_err_snapshot - "
+ "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p));
+ reg_id = 0;
+ break;
+ }
+
+ /* snapshot CB interrupt status */
+ if (chk_cb == B_TRUE) {
reg_desc = &px_err_reg_tbl[reg_id];
- ss->err_status[reg_id] = CSR_XR(xbc_csr_base,
- reg_desc->status_addr);
+ /* Only look at enabled groups. */
+ if (reg_desc->enabled == B_TRUE) {
+ ss->err_status[reg_id] = CSR_XR(xbc_csr_base,
+ reg_desc->status_addr);
+ }
} else {
ss->err_status[reg_id] = 0;
}
/* snapshot DMC/PEC interrupt status */
- for (reg_id = 1; reg_id < PX_ERR_REG_KEYS; reg_id += 1) {
+ for (reg_id = 2; reg_id < PX_ERR_REG_KEYS; reg_id += 1) {
reg_desc = &px_err_reg_tbl[reg_id];
- ss->err_status[reg_id] = CSR_XR(pec_csr_base,
- reg_desc->status_addr);
+ /* Only look at enabled groups. */
+ if (reg_desc->enabled == B_TRUE) {
+ ss->err_status[reg_id] = CSR_XR(pec_csr_base,
+ reg_desc->status_addr);
+ }
}
}
@@ -803,12 +977,15 @@ px_err_erpt_and_clr(px_t *px_p, ddi_fm_error_t *derr, px_err_ss_t *ss)
/* Get the correct register description table */
err_reg_tbl = &px_err_reg_tbl[reg_id];
+ /* Only look at enabled groups. */
+ if (err_reg_tbl->enabled != B_TRUE)
+ continue;
+
/* Get the correct CSR BASE */
- if (reg_id == PX_ERR_JBC) {
+ if (PX_ERR_XBC(reg_id))
csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC];
- } else {
+ else
csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR];
- }
/* Get pointers to masks and register addresses */
log_mask = err_reg_tbl->log_mask_p;
@@ -859,9 +1036,7 @@ px_err_erpt_and_clr(px_t *px_p, ddi_fm_error_t *derr, px_err_ss_t *ss)
err_bit_desc->class_name);
}
}
-
}
-
/* Print register status */
if (ss_reg & *log_mask)
DBG(DBG_ERR_INTR, rpdip, "<%x>=%16llx %s\n",
@@ -1002,6 +1177,105 @@ PX_ERPT_SEND_DEC(do_not)
return (PX_OK);
}
+/* UBC FATAL - see io erpt doc, section 1.1 */
+/* ARGSUSED */
+PX_ERPT_SEND_DEC(ubc_fatal)
+{
+ char buf[FM_MAX_CLASS];
+ uint64_t memory_ue_log, marked;
+ char unum[FM_MAX_CLASS];
+ int unum_length;
+ uint64_t device_id = 0;
+ uint8_t cpu_version = 0;
+ nvlist_t *resource = NULL;
+
+ unum[0] = '\0';
+ (void) snprintf(buf, FM_MAX_CLASS, "%s", class_name);
+
+ memory_ue_log = CSR_XR(csr_base, UBC_MEMORY_UE_LOG);
+ marked = (memory_ue_log >> UBC_MEMORY_UE_LOG_MARKED) &
+ UBC_MEMORY_UE_LOG_MARKED_MASK;
+
+ if ((strstr(class_name, "ubc.piowtue") != NULL) ||
+ (strstr(class_name, "ubc.piowbeue") != NULL) ||
+ (strstr(class_name, "ubc.piorbeue") != NULL) ||
+ (strstr(class_name, "ubc.dmarduea") != NULL) ||
+ (strstr(class_name, "ubc.dmardueb") != NULL)) {
+ int eid = (memory_ue_log >> UBC_MEMORY_UE_LOG_EID) &
+ UBC_MEMORY_UE_LOG_EID_MASK;
+ (void) strncat(buf, ubc_class_eid_qualifier[eid],
+ FM_MAX_CLASS);
+
+ if (eid == UBC_EID_MEM) {
+ uint64_t phys_addr = memory_ue_log &
+ MMU_OBERON_PADDR_MASK;
+ uint64_t offset = (uint64_t)-1;
+
+ resource = fm_nvlist_create(NULL);
+ if (&plat_get_mem_unum) {
+ if ((plat_get_mem_unum(0,
+ phys_addr, 0, B_TRUE, 0, unum,
+ FM_MAX_CLASS, &unum_length)) != 0)
+ unum[0] = '\0';
+ }
+ fm_fmri_mem_set(resource, FM_MEM_SCHEME_VERSION,
+ NULL, unum, NULL, offset);
+
+ } else if (eid == UBC_EID_CPU) {
+ int cpuid = (marked & UBC_MARKED_MAX_CPUID_MASK);
+ char sbuf[21]; /* sizeof (UINT64_MAX) + '\0' */
+
+ resource = fm_nvlist_create(NULL);
+ cpu_version = cpunodes[cpuid].version;
+ device_id = cpunodes[cpuid].device_id;
+ (void) snprintf(sbuf, sizeof (sbuf), "%lX",
+ device_id);
+ (void) fm_fmri_cpu_set(resource,
+ FM_CPU_SCHEME_VERSION, NULL, cpuid,
+ &cpu_version, sbuf);
+ }
+ }
+
+ if (resource) {
+ ddi_fm_ereport_post(rpdip, buf, derr->fme_ena,
+ DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
+ FIRE_PRIMARY, DATA_TYPE_BOOLEAN_VALUE, B_TRUE,
+ OBERON_UBC_ELE, DATA_TYPE_UINT64,
+ CSR_XR(csr_base, UBC_ERROR_LOG_ENABLE),
+ OBERON_UBC_IE, DATA_TYPE_UINT64,
+ CSR_XR(csr_base, UBC_INTERRUPT_ENABLE),
+ OBERON_UBC_IS, DATA_TYPE_UINT64,
+ CSR_XR(csr_base, UBC_INTERRUPT_STATUS),
+ OBERON_UBC_ESS, DATA_TYPE_UINT64,
+ CSR_XR(csr_base, UBC_ERROR_STATUS_SET),
+ OBERON_UBC_MUE, DATA_TYPE_UINT64, memory_ue_log,
+ OBERON_UBC_UNUM, DATA_TYPE_STRING, unum,
+ OBERON_UBC_DID, DATA_TYPE_UINT64, device_id,
+ OBERON_UBC_CPUV, DATA_TYPE_UINT32, cpu_version,
+ OBERON_UBC_RESOURCE, DATA_TYPE_NVLIST, resource,
+ NULL);
+ fm_nvlist_destroy(resource, FM_NVA_FREE);
+ } else {
+ ddi_fm_ereport_post(rpdip, buf, derr->fme_ena,
+ DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
+ FIRE_PRIMARY, DATA_TYPE_BOOLEAN_VALUE, B_TRUE,
+ OBERON_UBC_ELE, DATA_TYPE_UINT64,
+ CSR_XR(csr_base, UBC_ERROR_LOG_ENABLE),
+ OBERON_UBC_IE, DATA_TYPE_UINT64,
+ CSR_XR(csr_base, UBC_INTERRUPT_ENABLE),
+ OBERON_UBC_IS, DATA_TYPE_UINT64,
+ CSR_XR(csr_base, UBC_INTERRUPT_STATUS),
+ OBERON_UBC_ESS, DATA_TYPE_UINT64,
+ CSR_XR(csr_base, UBC_ERROR_STATUS_SET),
+ OBERON_UBC_MUE, DATA_TYPE_UINT64, memory_ue_log,
+ OBERON_UBC_UNUM, DATA_TYPE_STRING, unum,
+ OBERON_UBC_DID, DATA_TYPE_UINT64, device_id,
+ OBERON_UBC_CPUV, DATA_TYPE_UINT32, cpu_version,
+ NULL);
+ }
+
+ return (PX_OK);
+}
/* JBC FATAL - see io erpt doc, section 1.1 */
PX_ERPT_SEND_DEC(jbc_fatal)
@@ -1352,6 +1626,7 @@ px_err_imu_rbne_handle(dev_info_t *rpdip, caddr_t csr_base,
* errors for a period of time within which the occuring of the
* disabled errors become rbne, that is non fatal.
*/
+
if (!(imu_log_enable & imu_intr_enable & mask))
err = PX_FATAL_SW;
@@ -1456,6 +1731,7 @@ PX_ERPT_SEND_DEC(mmu_tfar_tfsr)
boolean_t pri = PX_ERR_IS_PRI(bit);
(void) snprintf(buf, FM_MAX_CLASS, "%s", class_name);
+
ddi_fm_ereport_post(rpdip, buf, derr->fme_ena,
DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
FIRE_PRIMARY, DATA_TYPE_BOOLEAN_VALUE, pri,
diff --git a/usr/src/uts/sun4u/io/px/px_err.h b/usr/src/uts/sun4u/io/px/px_err.h
index d050649556..e6625a249b 100644
--- a/usr/src/uts/sun4u/io/px/px_err.h
+++ b/usr/src/uts/sun4u/io/px/px_err.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -35,6 +34,7 @@ extern "C" {
typedef enum {
PX_ERR_JBC,
+ PX_ERR_UBC,
PX_ERR_MMU,
PX_ERR_IMU,
PX_ERR_TLU_UE,
@@ -52,6 +52,36 @@ typedef enum {
void px_err_reg_enable(px_t *px_p, px_err_id_t id);
void px_err_reg_disable(px_t *px_p, px_err_id_t id);
+#define PX_ERR_EN_ALL -1ull
+#define PX_ERR_MASK_NONE 0ull
+
+#define LPU_INTR_ENABLE 0ull
+#define LPU_INTR_DISABLE -1ull
+
+extern uint64_t px_tlu_ue_intr_mask;
+extern uint64_t px_tlu_ue_log_mask;
+extern uint64_t px_tlu_ue_count_mask;
+
+extern uint64_t px_tlu_ce_intr_mask;
+extern uint64_t px_tlu_ce_log_mask;
+extern uint64_t px_tlu_ce_count_mask;
+
+extern uint64_t px_tlu_oe_intr_mask;
+extern uint64_t px_tlu_oe_log_mask;
+extern uint64_t px_tlu_oe_count_mask;
+
+extern uint64_t px_mmu_intr_mask;
+extern uint64_t px_mmu_log_mask;
+extern uint64_t px_mmu_count_mask;
+
+extern uint64_t px_imu_intr_mask;
+extern uint64_t px_imu_log_mask;
+extern uint64_t px_imu_count_mask;
+
+extern uint64_t px_ilu_intr_mask;
+extern uint64_t px_ilu_log_mask;
+extern uint64_t px_ilu_count_mask;
+
#ifdef __cplusplus
}
#endif
diff --git a/usr/src/uts/sun4u/io/px/px_err_impl.h b/usr/src/uts/sun4u/io/px/px_err_impl.h
index 8dda2220ea..223bfd6e46 100644
--- a/usr/src/uts/sun4u/io/px/px_err_impl.h
+++ b/usr/src/uts/sun4u/io/px/px_err_impl.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -85,6 +84,11 @@ typedef struct px_err_reg_desc {
} px_err_reg_desc_t;
/*
+ * Macro to test for the JBC or UBC error id.
+ */
+#define PX_ERR_XBC(id) (((id) == PX_ERR_JBC)||((id) == PX_ERR_UBC))
+
+/*
* Macro to create the error handling forward declaration
*
* The error handlers examines error, determine the nature of the error
@@ -141,10 +145,11 @@ PX_ERPT_SEND_DEC(do_not);
/*
- * JBC error handling and ereport forward declarations
+ * JBC/UBC error handling and ereport forward declarations
*/
#define PX_ERR_JBC_CLASS(n) PCIEX_FIRE "." FIRE_JBC_ ## n
+#define PX_ERR_UBC_CLASS(n) PCIEX_OBERON "." FIRE_UBC_ ## n
/*
* Fire JBC error Handling Forward Declarations
@@ -176,6 +181,9 @@ PX_ERPT_SEND_DEC(jbc_odcd);
PX_ERPT_SEND_DEC(jbc_idc);
PX_ERPT_SEND_DEC(jbc_csr);
+/* Oberon UBC error ereport Forward Declarations */
+PX_ERPT_SEND_DEC(ubc_fatal);
+
/*
* DMC error handling and ereport forward declarations
@@ -218,6 +226,7 @@ PX_ERPT_SEND_DEC(mmu);
*/
#define PX_ERR_PEC_CLASS(n) PCIEX_FIRE "." FIRE_PEC_ ## n
+#define PX_ERR_PEC_OB_CLASS(n) PCIEX_OBERON "." FIRE_PEC_ ## n
int px_err_tlu_lup_handle(dev_info_t *rpdip, caddr_t csr_base,
ddi_fm_error_t *derr, px_err_reg_desc_t *err_reg_descr,
diff --git a/usr/src/uts/sun4u/io/px/px_hlib.c b/usr/src/uts/sun4u/io/px/px_hlib.c
index 90368735d7..1cd79e686c 100644
--- a/usr/src/uts/sun4u/io/px/px_hlib.c
+++ b/usr/src/uts/sun4u/io/px/px_hlib.c
@@ -32,9 +32,11 @@
#include <sys/machsystm.h> /* lddphys() */
#include <sys/iommutsb.h>
#include <sys/pci.h>
+#include <sys/hotplug/pci/pciehpc.h>
#include <pcie_pwr.h>
#include <px_obj.h>
#include "px_regs.h"
+#include "oberon_regs.h"
#include "px_csr.h"
#include "px_lib4u.h"
@@ -45,45 +47,57 @@
/*
* Registers in the PEC Module.
* LPU_RESET should be set to 0ull during resume
+ *
+ * This array is in reg,chip form. PX_CHIP_UNIDENTIFIED is for all chips
+ * or PX_CHIP_FIRE for Fire only, or PX_CHIP_OBERON for Oberon only.
*/
-static uint64_t pec_config_state_regs[] = {
- PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
- ILU_ERROR_LOG_ENABLE,
- ILU_INTERRUPT_ENABLE,
- TLU_CONTROL,
- TLU_OTHER_EVENT_LOG_ENABLE,
- TLU_OTHER_EVENT_INTERRUPT_ENABLE,
- TLU_DEVICE_CONTROL,
- TLU_LINK_CONTROL,
- TLU_UNCORRECTABLE_ERROR_LOG_ENABLE,
- TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE,
- TLU_CORRECTABLE_ERROR_LOG_ENABLE,
- TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE,
- LPU_LINK_LAYER_INTERRUPT_MASK,
- LPU_PHY_INTERRUPT_MASK,
- LPU_RECEIVE_PHY_INTERRUPT_MASK,
- LPU_TRANSMIT_PHY_INTERRUPT_MASK,
- LPU_GIGABLAZE_GLUE_INTERRUPT_MASK,
- LPU_LTSSM_INTERRUPT_MASK,
- LPU_RESET,
- LPU_DEBUG_CONFIG,
- LPU_INTERRUPT_MASK,
- LPU_LINK_LAYER_CONFIG,
- LPU_FLOW_CONTROL_UPDATE_CONTROL,
- LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD,
- LPU_TXLINK_REPLAY_TIMER_THRESHOLD,
- LPU_REPLAY_BUFFER_MAX_ADDRESS,
- LPU_TXLINK_RETRY_FIFO_POINTER,
- LPU_LTSSM_CONFIG2,
- LPU_LTSSM_CONFIG3,
- LPU_LTSSM_CONFIG4,
- LPU_LTSSM_CONFIG5,
- DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
- DMC_DEBUG_SELECT_FOR_PORT_A,
- DMC_DEBUG_SELECT_FOR_PORT_B
+static struct px_pec_regs {
+ uint64_t reg;
+ uint64_t chip;
+} pec_config_state_regs[] = {
+ {PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
+ {ILU_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
+ {ILU_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
+ {TLU_CONTROL, PX_CHIP_UNIDENTIFIED},
+ {TLU_OTHER_EVENT_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
+ {TLU_OTHER_EVENT_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
+ {TLU_DEVICE_CONTROL, PX_CHIP_UNIDENTIFIED},
+ {TLU_LINK_CONTROL, PX_CHIP_UNIDENTIFIED},
+ {TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
+ {TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
+ {TLU_CORRECTABLE_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
+ {TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
+ {DLU_LINK_LAYER_CONFIG, PX_CHIP_OBERON},
+ {DLU_FLOW_CONTROL_UPDATE_CONTROL, PX_CHIP_OBERON},
+ {DLU_TXLINK_REPLAY_TIMER_THRESHOLD, PX_CHIP_OBERON},
+ {LPU_LINK_LAYER_INTERRUPT_MASK, PX_CHIP_FIRE},
+ {LPU_PHY_INTERRUPT_MASK, PX_CHIP_FIRE},
+ {LPU_RECEIVE_PHY_INTERRUPT_MASK, PX_CHIP_FIRE},
+ {LPU_TRANSMIT_PHY_INTERRUPT_MASK, PX_CHIP_FIRE},
+ {LPU_GIGABLAZE_GLUE_INTERRUPT_MASK, PX_CHIP_FIRE},
+ {LPU_LTSSM_INTERRUPT_MASK, PX_CHIP_FIRE},
+ {LPU_RESET, PX_CHIP_FIRE},
+ {LPU_DEBUG_CONFIG, PX_CHIP_FIRE},
+ {LPU_INTERRUPT_MASK, PX_CHIP_FIRE},
+ {LPU_LINK_LAYER_CONFIG, PX_CHIP_FIRE},
+ {LPU_FLOW_CONTROL_UPDATE_CONTROL, PX_CHIP_FIRE},
+ {LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, PX_CHIP_FIRE},
+ {LPU_TXLINK_REPLAY_TIMER_THRESHOLD, PX_CHIP_FIRE},
+ {LPU_REPLAY_BUFFER_MAX_ADDRESS, PX_CHIP_FIRE},
+ {LPU_TXLINK_RETRY_FIFO_POINTER, PX_CHIP_FIRE},
+ {LPU_LTSSM_CONFIG2, PX_CHIP_FIRE},
+ {LPU_LTSSM_CONFIG3, PX_CHIP_FIRE},
+ {LPU_LTSSM_CONFIG4, PX_CHIP_FIRE},
+ {LPU_LTSSM_CONFIG5, PX_CHIP_FIRE},
+ {DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
+ {DMC_DEBUG_SELECT_FOR_PORT_A, PX_CHIP_UNIDENTIFIED},
+ {DMC_DEBUG_SELECT_FOR_PORT_B, PX_CHIP_UNIDENTIFIED}
};
-#define PEC_SIZE (sizeof (pec_config_state_regs))
-#define PEC_KEYS (PEC_SIZE / sizeof (uint64_t))
+
+#define PEC_KEYS \
+ ((sizeof (pec_config_state_regs))/sizeof (struct px_pec_regs))
+
+#define PEC_SIZE (PEC_KEYS * sizeof (uint64_t))
/*
* Registers for the MMU module.
@@ -110,18 +124,29 @@ static uint64_t ib_config_state_regs[] = {
#define IB_MAP_SIZE (INTERRUPT_MAPPING_ENTRIES * sizeof (uint64_t))
/*
- * Registers for the CB module.
+ * Registers for the JBC module.
* JBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
*/
-static uint64_t cb_config_state_regs[] = {
+static uint64_t jbc_config_state_regs[] = {
JBUS_PARITY_CONTROL,
JBC_FATAL_RESET_ENABLE,
JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
JBC_ERROR_LOG_ENABLE,
JBC_INTERRUPT_ENABLE
};
-#define CB_SIZE (sizeof (cb_config_state_regs))
-#define CB_KEYS (CB_SIZE / sizeof (uint64_t))
+#define JBC_SIZE (sizeof (jbc_config_state_regs))
+#define JBC_KEYS (JBC_SIZE / sizeof (uint64_t))
+
+/*
+ * Registers for the UBC module.
+ * UBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
+ */
+static uint64_t ubc_config_state_regs[] = {
+ UBC_ERROR_LOG_ENABLE,
+ UBC_INTERRUPT_ENABLE
+};
+#define UBC_SIZE (sizeof (ubc_config_state_regs))
+#define UBC_KEYS (UBC_SIZE / sizeof (uint64_t))
static uint64_t msiq_config_other_regs[] = {
ERR_COR_MAPPING,
@@ -140,20 +165,43 @@ static uint64_t msiq_config_other_regs[] = {
static uint64_t msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p);
static void msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p);
+static void jbc_init(caddr_t xbc_csr_base, pxu_t *pxu_p);
+static void ubc_init(caddr_t xbc_csr_base, pxu_t *pxu_p);
/*
- * Initialize the module, but do not enable interrupts.
+ * Initialize the bus, but do not enable interrupts.
*/
/* ARGSUSED */
void
hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
{
+ switch (PX_CHIP_TYPE(pxu_p)) {
+ case PX_CHIP_OBERON:
+ ubc_init(xbc_csr_base, pxu_p);
+ break;
+ case PX_CHIP_FIRE:
+ jbc_init(xbc_csr_base, pxu_p);
+ break;
+ default:
+ DBG(DBG_CB, NULL, "hvio_cb_init - unknown chip type: 0x%x\n",
+ PX_CHIP_TYPE(pxu_p));
+ break;
+ }
+}
+
+/*
+ * Initialize the JBC module, but do not enable interrupts.
+ */
+/* ARGSUSED */
+static void
+jbc_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
+{
uint64_t val;
/* Check if we need to enable inverted parity */
val = (1ULL << JBUS_PARITY_CONTROL_P_EN);
CSR_XS(xbc_csr_base, JBUS_PARITY_CONTROL, val);
- DBG(DBG_CB, NULL, "hvio_cb_init, JBUS_PARITY_CONTROL: 0x%llx\n",
+ DBG(DBG_CB, NULL, "jbc_init, JBUS_PARITY_CONTROL: 0x%llx\n",
CSR_XR(xbc_csr_base, JBUS_PARITY_CONTROL));
val = (1 << JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN) |
@@ -165,7 +213,7 @@ hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
(1 << JBC_FATAL_RESET_ENABLE_JTCEEI_P_INT_EN) |
(1 << JBC_FATAL_RESET_ENABLE_JTCEER_P_INT_EN);
CSR_XS(xbc_csr_base, JBC_FATAL_RESET_ENABLE, val);
- DBG(DBG_CB, NULL, "hvio_cb_init, JBC_FATAL_RESET_ENABLE: 0x%llx\n",
+ DBG(DBG_CB, NULL, "jbc_init, JBC_FATAL_RESET_ENABLE: 0x%llx\n",
CSR_XR(xbc_csr_base, JBC_FATAL_RESET_ENABLE));
/*
@@ -173,26 +221,63 @@ hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
*/
CSR_XS(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, -1ull);
DBG(DBG_CB, NULL,
- "hvio_cb_init, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
+ "jbc_init, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
CSR_XR(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
/*
- * CSR_V CB's interrupt regs (log, enable, status, clear)
+ * CSR_V JBC's interrupt regs (log, enable, status, clear)
*/
- DBG(DBG_CB, NULL, "hvio_cb_init, JBC_ERROR_LOG_ENABLE: 0x%llx\n",
+ DBG(DBG_CB, NULL, "jbc_init, JBC_ERROR_LOG_ENABLE: 0x%llx\n",
CSR_XR(xbc_csr_base, JBC_ERROR_LOG_ENABLE));
- DBG(DBG_CB, NULL, "hvio_cb_init, JBC_INTERRUPT_ENABLE: 0x%llx\n",
+ DBG(DBG_CB, NULL, "jbc_init, JBC_INTERRUPT_ENABLE: 0x%llx\n",
CSR_XR(xbc_csr_base, JBC_INTERRUPT_ENABLE));
- DBG(DBG_CB, NULL, "hvio_cb_init, JBC_INTERRUPT_STATUS: 0x%llx\n",
+ DBG(DBG_CB, NULL, "jbc_init, JBC_INTERRUPT_STATUS: 0x%llx\n",
CSR_XR(xbc_csr_base, JBC_INTERRUPT_STATUS));
- DBG(DBG_CB, NULL, "hvio_cb_init, JBC_ERROR_STATUS_CLEAR: 0x%llx\n",
+ DBG(DBG_CB, NULL, "jbc_init, JBC_ERROR_STATUS_CLEAR: 0x%llx\n",
CSR_XR(xbc_csr_base, JBC_ERROR_STATUS_CLEAR));
}
/*
+ * Initialize the UBC module, but do not enable interrupts.
+ */
+/* ARGSUSED */
+static void
+ubc_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
+{
+ /*
+ * Enable Uranus bus error log bits.
+ */
+ CSR_XS(xbc_csr_base, UBC_ERROR_LOG_ENABLE, -1ull);
+ DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n",
+ CSR_XR(xbc_csr_base, UBC_ERROR_LOG_ENABLE));
+
+ /*
+ * Clear Uranus bus errors.
+ */
+ CSR_XS(xbc_csr_base, UBC_ERROR_STATUS_CLEAR, -1ull);
+ DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n",
+ CSR_XR(xbc_csr_base, UBC_ERROR_STATUS_CLEAR));
+
+ /*
+ * CSR_V UBC's interrupt regs (log, enable, status, clear)
+ */
+ DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n",
+ CSR_XR(xbc_csr_base, UBC_ERROR_LOG_ENABLE));
+
+ DBG(DBG_CB, NULL, "ubc_init, UBC_INTERRUPT_ENABLE: 0x%llx\n",
+ CSR_XR(xbc_csr_base, UBC_INTERRUPT_ENABLE));
+
+ DBG(DBG_CB, NULL, "ubc_init, UBC_INTERRUPT_STATUS: 0x%llx\n",
+ CSR_XR(xbc_csr_base, UBC_INTERRUPT_STATUS));
+
+ DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n",
+ CSR_XR(xbc_csr_base, UBC_ERROR_STATUS_CLEAR));
+}
+
+/*
* Initialize the module, but do not enable interrupts.
*/
/* ARGSUSED */
@@ -269,6 +354,20 @@ tlu_init(caddr_t csr_base, pxu_t *pxu_p)
(1ull << TLU_CONTROL_NPWR_EN) | TLU_CONTROL_CONFIG_DEFAULT;
/*
+ * For Oberon, NPWR_EN is set to 0 to prevent PIO reads from blocking
+ * behind non-posted PIO writes. This blocking could cause a master or
+ * slave timeout on the host bus if multiple serialized PIOs were to
+ * suffer Completion Timeouts because the CTO delays for each PIO ahead
+ * of the read would accumulate. Since the Olympus processor can have
+ * only 1 PIO outstanding, there is no possibility of PIO accesses from
+ * a given CPU to a given device being re-ordered by the PCIe fabric;
+ * therefore turning off serialization should be safe from a PCIe
+ * ordering perspective.
+ */
+ if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
+ val &= ~(1ull << TLU_CONTROL_NPWR_EN);
+
+ /*
* Set Detect.Quiet. This will disable automatic link
* re-training, if the link goes down e.g. power management
* turns off power to the downstream device. This will enable
@@ -1433,6 +1532,37 @@ lpu_init(caddr_t csr_base, pxu_t *pxu_p)
/* ARGSUSED */
static void
+dlu_init(caddr_t csr_base, pxu_t *pxu_p)
+{
+uint64_t val;
+
+ CSR_XS(csr_base, DLU_INTERRUPT_MASK, 0ull);
+ DBG(DBG_TLU, NULL, "dlu_init - DLU_INTERRUPT_MASK: 0x%llx\n",
+ CSR_XR(csr_base, DLU_INTERRUPT_MASK));
+
+ val = (1ull << DLU_LINK_LAYER_CONFIG_VC0_EN);
+ CSR_XS(csr_base, DLU_LINK_LAYER_CONFIG, val);
+ DBG(DBG_TLU, NULL, "dlu_init - DLU_LINK_LAYER_CONFIG: 0x%llx\n",
+ CSR_XR(csr_base, DLU_LINK_LAYER_CONFIG));
+
+ val = (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) |
+ (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN);
+
+ CSR_XS(csr_base, DLU_FLOW_CONTROL_UPDATE_CONTROL, val);
+ DBG(DBG_TLU, NULL, "dlu_init - DLU_FLOW_CONTROL_UPDATE_CONTROL: "
+ "0x%llx\n", CSR_XR(csr_base, DLU_FLOW_CONTROL_UPDATE_CONTROL));
+
+ val = (DLU_TXLINK_REPLAY_TIMER_THRESHOLD_DEFAULT <<
+ DLU_TXLINK_REPLAY_TIMER_THRESHOLD_RPLAY_TMR_THR);
+
+ CSR_XS(csr_base, DLU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
+
+ DBG(DBG_TLU, NULL, "dlu_init - DLU_TXLINK_REPLAY_TIMER_THRESHOLD: "
+ "0x%llx\n", CSR_XR(csr_base, DLU_TXLINK_REPLAY_TIMER_THRESHOLD));
+}
+
+/* ARGSUSED */
+static void
dmc_init(caddr_t csr_base, pxu_t *pxu_p)
{
uint64_t val;
@@ -1478,7 +1608,20 @@ hvio_pec_init(caddr_t csr_base, pxu_t *pxu_p)
ilu_init(csr_base, pxu_p);
tlu_init(csr_base, pxu_p);
- lpu_init(csr_base, pxu_p);
+
+ switch (PX_CHIP_TYPE(pxu_p)) {
+ case PX_CHIP_OBERON:
+ dlu_init(csr_base, pxu_p);
+ break;
+ case PX_CHIP_FIRE:
+ lpu_init(csr_base, pxu_p);
+ break;
+ default:
+ DBG(DBG_PEC, NULL, "hvio_pec_init - unknown chip type: 0x%x\n",
+ PX_CHIP_TYPE(pxu_p));
+ break;
+ }
+
dmc_init(csr_base, pxu_p);
/*
@@ -1500,27 +1643,90 @@ hvio_pec_init(caddr_t csr_base, pxu_t *pxu_p)
}
/*
+ * Convert a TTE to physical address
+ */
+static r_addr_t
+mmu_tte_to_pa(uint64_t tte, pxu_t *pxu_p)
+{
+ uint64_t pa_mask;
+
+ switch (PX_CHIP_TYPE(pxu_p)) {
+ case PX_CHIP_OBERON:
+ pa_mask = MMU_OBERON_PADDR_MASK;
+ break;
+ case PX_CHIP_FIRE:
+ pa_mask = MMU_FIRE_PADDR_MASK;
+ break;
+ default:
+ DBG(DBG_MMU, NULL, "mmu_tte_to_pa - unknown chip type: 0x%x\n",
+ PX_CHIP_TYPE(pxu_p));
+ pa_mask = 0;
+ break;
+ }
+ return ((tte & pa_mask) >> MMU_PAGE_SHIFT);
+}
+
+/*
+ * Return MMU bypass noncache bit for chip
+ */
+static r_addr_t
+mmu_bypass_noncache(pxu_t *pxu_p)
+{
+ r_addr_t bypass_noncache_bit;
+
+ switch (PX_CHIP_TYPE(pxu_p)) {
+ case PX_CHIP_OBERON:
+ bypass_noncache_bit = MMU_OBERON_BYPASS_NONCACHE;
+ break;
+ case PX_CHIP_FIRE:
+ bypass_noncache_bit = MMU_FIRE_BYPASS_NONCACHE;
+ break;
+ default:
+ DBG(DBG_MMU, NULL,
+ "mmu_bypass_nocache - unknown chip type: 0x%x\n",
+ PX_CHIP_TYPE(pxu_p));
+ bypass_noncache_bit = 0;
+ break;
+ }
+ return (bypass_noncache_bit);
+}
+
+/*
+ * Calculate number of TSB entries for the chip.
+ */
+/* ARGSUSED */
+static uint_t
+mmu_tsb_entries(caddr_t csr_base, pxu_t *pxu_p)
+{
+ uint64_t tsb_ctrl;
+ uint_t obp_tsb_entries, obp_tsb_size;
+
+ tsb_ctrl = CSR_XR(csr_base, MMU_TSB_CONTROL);
+
+ obp_tsb_size = tsb_ctrl & 0xF;
+
+ obp_tsb_entries = MMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size);
+
+ return (obp_tsb_entries);
+}
+
+/*
* Initialize the module, but do not enable interrupts.
*/
void
hvio_mmu_init(caddr_t csr_base, pxu_t *pxu_p)
{
- uint64_t val, i, tsb_ctrl, obp_tsb_pa, *base_tte_addr;
- uint_t obp_tsb_entries, obp_tsb_size;
+ uint64_t val, i, obp_tsb_pa, *base_tte_addr;
+ uint_t obp_tsb_entries;
bzero(pxu_p->tsb_vaddr, pxu_p->tsb_size);
/*
* Preserve OBP's TSB
*/
- val = CSR_XR(csr_base, MMU_TSB_CONTROL);
+ obp_tsb_pa = CSR_XR(csr_base, MMU_TSB_CONTROL) & MMU_TSB_PA_MASK;
- tsb_ctrl = CSR_XR(csr_base, MMU_TSB_CONTROL);
-
- obp_tsb_pa = tsb_ctrl & 0x7FFFFFFE000;
- obp_tsb_size = tsb_ctrl & 0xF;
-
- obp_tsb_entries = MMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size);
+ obp_tsb_entries = mmu_tsb_entries(csr_base, pxu_p);
base_tte_addr = pxu_p->tsb_vaddr +
((pxu_p->tsb_size >> 3) - obp_tsb_entries);
@@ -1609,17 +1815,54 @@ hvio_iommu_map(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid, pages_t pages,
if (io_attr & PCI_MAP_ATTR_WRITE)
attr |= MMU_TTE_W;
+ if ((PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) &&
+ (io_attr & PCI_MAP_ATTR_RO))
+ attr |= MMU_TTE_RO;
+
+ if (attr & MMU_TTE_RO) {
+ DBG(DBG_MMU, NULL, "hvio_iommu_map: pfn_index=0x%x "
+ "pages=0x%x attr = 0x%lx\n", pfn_index, pages, attr);
+ }
+
if (flags & MMU_MAP_PFN) {
ddi_dma_impl_t *mp = (ddi_dma_impl_t *)addr;
for (i = 0; i < pages; i++, pfn_index++, tsb_index++) {
px_iopfn_t pfn = PX_GET_MP_PFN(mp, pfn_index);
pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr;
+
+ /*
+ * Oberon will need to flush the corresponding TTEs in
+ * Cache. We only need to flush every cache line.
+ * Extra PIO's are expensive.
+ */
+ if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
+ if ((i == (pages-1))||!((tsb_index+1) & 0x7)) {
+ CSR_XS(dev_hdl,
+ MMU_TTE_CACHE_FLUSH_ADDRESS,
+ (pxu_p->tsb_paddr+
+ (tsb_index*MMU_TTE_SIZE)));
+ }
+ }
}
} else {
caddr_t a = (caddr_t)addr;
for (i = 0; i < pages; i++, a += MMU_PAGE_SIZE, tsb_index++) {
px_iopfn_t pfn = hat_getpfnum(kas.a_hat, a);
pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr;
+
+ /*
+ * Oberon will need to flush the corresponding TTEs in
+ * Cache. We only need to flush every cache line.
+ * Extra PIO's are expensive.
+ */
+ if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
+ if ((i == (pages-1))||!((tsb_index+1) & 0x7)) {
+ CSR_XS(dev_hdl,
+ MMU_TTE_CACHE_FLUSH_ADDRESS,
+ (pxu_p->tsb_paddr+
+ (tsb_index*MMU_TTE_SIZE)));
+ }
+ }
}
}
@@ -1634,9 +1877,24 @@ hvio_iommu_demap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
tsbindex_t tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
int i;
- for (i = 0; i < pages; i++, tsb_index++)
+ for (i = 0; i < pages; i++, tsb_index++) {
pxu_p->tsb_vaddr[tsb_index] = MMU_INVALID_TTE;
+ /*
+ * Oberon will need to flush the corresponding TTEs in
+ * Cache. We only need to flush every cache line.
+ * Extra PIO's are expensive.
+ */
+ if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
+ if ((i == (pages-1))||!((tsb_index+1) & 0x7)) {
+ CSR_XS(dev_hdl,
+ MMU_TTE_CACHE_FLUSH_ADDRESS,
+ (pxu_p->tsb_paddr+
+ (tsb_index*MMU_TTE_SIZE)));
+ }
+ }
+ }
+
return (H_EOK);
}
@@ -1652,7 +1910,7 @@ hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
tte_addr = (uint64_t *)(pxu_p->tsb_vaddr) + tsb_index;
if (*tte_addr & MMU_TTE_V) {
- *r_addr_p = MMU_TTETOPA(*tte_addr);
+ *r_addr_p = mmu_tte_to_pa(*tte_addr, pxu_p);
*attr_p = (*tte_addr & MMU_TTE_W) ?
PCI_MAP_ATTR_WRITE:PCI_MAP_ATTR_READ;
} else {
@@ -1666,13 +1924,59 @@ hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
/* ARGSUSED */
uint64_t
-hvio_iommu_getbypass(devhandle_t dev_hdl, r_addr_t ra, io_attributes_t attr,
- io_addr_t *io_addr_p)
+hvio_get_bypass_base(pxu_t *pxu_p)
+{
+ uint64_t base;
+
+ switch (PX_CHIP_TYPE(pxu_p)) {
+ case PX_CHIP_OBERON:
+ base = MMU_OBERON_BYPASS_BASE;
+ break;
+ case PX_CHIP_FIRE:
+ base = MMU_FIRE_BYPASS_BASE;
+ break;
+ default:
+ DBG(DBG_MMU, NULL,
+ "hvio_get_bypass_base - unknown chip type: 0x%x\n",
+ PX_CHIP_TYPE(pxu_p));
+ base = 0;
+ break;
+ }
+ return (base);
+}
+
+/* ARGSUSED */
+uint64_t
+hvio_get_bypass_end(pxu_t *pxu_p)
+{
+ uint64_t end;
+
+ switch (PX_CHIP_TYPE(pxu_p)) {
+ case PX_CHIP_OBERON:
+ end = MMU_OBERON_BYPASS_END;
+ break;
+ case PX_CHIP_FIRE:
+ end = MMU_FIRE_BYPASS_END;
+ break;
+ default:
+ DBG(DBG_MMU, NULL,
+ "hvio_get_bypass_end - unknown chip type: 0x%x\n",
+ PX_CHIP_TYPE(pxu_p));
+ end = 0;
+ break;
+ }
+ return (end);
+}
+
+/* ARGSUSED */
+uint64_t
+hvio_iommu_getbypass(devhandle_t dev_hdl, pxu_t *pxu_p, r_addr_t ra,
+ io_attributes_t attr, io_addr_t *io_addr_p)
{
uint64_t pfn = MMU_BTOP(ra);
- *io_addr_p = MMU_BYPASS_BASE | ra |
- (pf_is_memory(pfn) ? 0 : MMU_BYPASS_NONCACHE);
+ *io_addr_p = hvio_get_bypass_base(pxu_p) | ra |
+ (pf_is_memory(pfn) ? 0 : mmu_bypass_noncache(pxu_p));
return (H_EOK);
}
@@ -1812,10 +2116,23 @@ hvio_intr_setstate(devhandle_t dev_hdl, sysino_t sysino,
* has not been set via intr_settarget.
*/
uint64_t
-hvio_intr_gettarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t *cpuid)
+hvio_intr_gettarget(devhandle_t dev_hdl, pxu_t *pxu_p, sysino_t sysino,
+ cpuid_t *cpuid)
{
- *cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
- SYSINO_TO_DEVINO(sysino), ENTRIES_T_JPID);
+ switch (PX_CHIP_TYPE(pxu_p)) {
+ case PX_CHIP_OBERON:
+ *cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
+ SYSINO_TO_DEVINO(sysino), ENTRIES_T_DESTID);
+ break;
+ case PX_CHIP_FIRE:
+ *cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
+ SYSINO_TO_DEVINO(sysino), ENTRIES_T_JPID);
+ break;
+ default:
+ DBG(DBG_CB, NULL, "hvio_intr_gettarget - "
+ "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p));
+ return (EINVAL);
+ }
return (H_EOK);
}
@@ -1825,7 +2142,8 @@ hvio_intr_gettarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t *cpuid)
* sysino to the target cpu value defined by the argument cpuid.
*/
uint64_t
-hvio_intr_settarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t cpuid)
+hvio_intr_settarget(devhandle_t dev_hdl, pxu_t *pxu_p, sysino_t sysino,
+ cpuid_t cpuid)
{
uint64_t val, intr_controller;
@@ -1838,10 +2156,27 @@ hvio_intr_settarget(devhandle_t dev_hdl, sysino_t sysino, cpuid_t cpuid)
*/
intr_controller = 0x1ull << (cpuid % 4);
- val = (((cpuid & INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK) <<
- INTERRUPT_MAPPING_ENTRIES_T_JPID) |
- ((intr_controller & INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
- << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
+ switch (PX_CHIP_TYPE(pxu_p)) {
+ case PX_CHIP_OBERON:
+ val = (((cpuid &
+ INTERRUPT_MAPPING_ENTRIES_T_DESTID_MASK) <<
+ INTERRUPT_MAPPING_ENTRIES_T_DESTID) |
+ ((intr_controller &
+ INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
+ << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
+ break;
+ case PX_CHIP_FIRE:
+ val = (((cpuid & INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK) <<
+ INTERRUPT_MAPPING_ENTRIES_T_JPID) |
+ ((intr_controller &
+ INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
+ << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
+ break;
+ default:
+ DBG(DBG_CB, NULL, "hvio_intr_settarget - "
+ "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p));
+ return (EINVAL);
+ }
/* For EQ interrupts, set DATA MONDO bit */
if ((ino >= PX_DEFAULT_MSIQ_1ST_DEVINO) &&
@@ -2309,8 +2644,12 @@ hvio_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
/* Save the PEC configuration states */
pxu_p->pec_config_state = config_state;
for (i = 0; i < PEC_KEYS; i++) {
- pxu_p->pec_config_state[i] =
- CSR_XR((caddr_t)dev_hdl, pec_config_state_regs[i]);
+ if ((pec_config_state_regs[i].chip == PX_CHIP_TYPE(pxu_p)) ||
+ (pec_config_state_regs[i].chip == PX_CHIP_UNIDENTIFIED)) {
+ pxu_p->pec_config_state[i] =
+ CSR_XR((caddr_t)dev_hdl,
+ pec_config_state_regs[i].reg);
+ }
}
/* Save the MMU configuration states */
@@ -2380,8 +2719,11 @@ hvio_resume(devhandle_t dev_hdl, devino_t devino, pxu_t *pxu_p)
CSR_XS((caddr_t)dev_hdl, LPU_RESET, 0ull);
for (i = 0; i < PEC_KEYS; i++) {
- CSR_XS((caddr_t)dev_hdl, pec_config_state_regs[i],
- pxu_p->pec_config_state[i]);
+ if ((pec_config_state_regs[i].chip == PX_CHIP_TYPE(pxu_p)) ||
+ (pec_config_state_regs[i].chip == PX_CHIP_UNIDENTIFIED)) {
+ CSR_XS((caddr_t)dev_hdl, pec_config_state_regs[i].reg,
+ pxu_p->pec_config_state[i]);
+ }
}
/* Enable PCI-E interrupt */
@@ -2403,10 +2745,27 @@ hvio_resume(devhandle_t dev_hdl, devino_t devino, pxu_t *pxu_p)
uint64_t
hvio_cb_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
{
- uint64_t *config_state;
- int i;
+ uint64_t *config_state, *cb_regs;
+ int i, cb_size, cb_keys;
+
+ switch (PX_CHIP_TYPE(pxu_p)) {
+ case PX_CHIP_OBERON:
+ cb_size = UBC_SIZE;
+ cb_keys = UBC_KEYS;
+ cb_regs = ubc_config_state_regs;
+ break;
+ case PX_CHIP_FIRE:
+ cb_size = JBC_SIZE;
+ cb_keys = JBC_KEYS;
+ cb_regs = jbc_config_state_regs;
+ break;
+ default:
+ DBG(DBG_CB, NULL, "hvio_cb_suspend - unknown chip type: 0x%x\n",
+ PX_CHIP_TYPE(pxu_p));
+ break;
+ }
- config_state = kmem_zalloc(CB_SIZE, KM_NOSLEEP);
+ config_state = kmem_zalloc(cb_size, KM_NOSLEEP);
if (config_state == NULL) {
return (H_EIO);
@@ -2414,9 +2773,9 @@ hvio_cb_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
/* Save the configuration states */
pxu_p->xcb_config_state = config_state;
- for (i = 0; i < CB_KEYS; i++) {
+ for (i = 0; i < cb_keys; i++) {
pxu_p->xcb_config_state[i] =
- CSR_XR((caddr_t)dev_hdl, cb_config_state_regs[i]);
+ CSR_XR((caddr_t)dev_hdl, cb_regs[i]);
}
return (H_EOK);
@@ -2426,20 +2785,42 @@ void
hvio_cb_resume(devhandle_t pci_dev_hdl, devhandle_t xbus_dev_hdl,
devino_t devino, pxu_t *pxu_p)
{
- sysino_t sysino;
- int i;
-
- /*
- * No reason to have any reset bits high until an error is
- * detected on the link.
- */
- CSR_XS((caddr_t)xbus_dev_hdl, JBC_ERROR_STATUS_CLEAR, -1ull);
+ sysino_t sysino;
+ uint64_t *cb_regs;
+ int i, cb_size, cb_keys;
+
+ switch (PX_CHIP_TYPE(pxu_p)) {
+ case PX_CHIP_OBERON:
+ cb_size = UBC_SIZE;
+ cb_keys = UBC_KEYS;
+ cb_regs = ubc_config_state_regs;
+ /*
+ * No reason to have any reset bits high until an error is
+ * detected on the link.
+ */
+ CSR_XS((caddr_t)xbus_dev_hdl, UBC_ERROR_STATUS_CLEAR, -1ull);
+ break;
+ case PX_CHIP_FIRE:
+ cb_size = JBC_SIZE;
+ cb_keys = JBC_KEYS;
+ cb_regs = jbc_config_state_regs;
+ /*
+ * No reason to have any reset bits high until an error is
+ * detected on the link.
+ */
+ CSR_XS((caddr_t)xbus_dev_hdl, JBC_ERROR_STATUS_CLEAR, -1ull);
+ break;
+ default:
+ DBG(DBG_CB, NULL, "hvio_cb_resume - unknown chip type: 0x%x\n",
+ PX_CHIP_TYPE(pxu_p));
+ break;
+ }
ASSERT(pxu_p->xcb_config_state);
/* Restore the configuration states */
- for (i = 0; i < CB_KEYS; i++) {
- CSR_XS((caddr_t)xbus_dev_hdl, cb_config_state_regs[i],
+ for (i = 0; i < cb_keys; i++) {
+ CSR_XS((caddr_t)xbus_dev_hdl, cb_regs[i],
pxu_p->xcb_config_state[i]);
}
@@ -2448,7 +2829,7 @@ hvio_cb_resume(devhandle_t pci_dev_hdl, devhandle_t xbus_dev_hdl,
(void) hvio_intr_setstate(pci_dev_hdl, sysino, INTR_IDLE_STATE);
- kmem_free(pxu_p->xcb_config_state, CB_SIZE);
+ kmem_free(pxu_p->xcb_config_state, cb_size);
pxu_p->xcb_config_state = NULL;
}
@@ -2603,3 +2984,334 @@ px_enable_detect_quiet(caddr_t csr_base)
tlu_ctrl |= (1ull << TLU_REMAIN_DETECT_QUIET);
CSR_XS(csr_base, TLU_CONTROL, tlu_ctrl);
}
+
+static uint_t
+oberon_hp_pwron(caddr_t csr_base)
+{
+ volatile uint64_t reg;
+
+#ifdef DEBUG
+ cmn_err(CE_CONT, "oberon_hp_pwron the slot\n");
+#endif
+
+ /* Check Leaf Reset status */
+ reg = CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE);
+ if (!(reg & (1ull << ILU_ERROR_LOG_ENABLE_SPARE3))) {
+#ifdef DEBUG
+ cmn_err(CE_WARN, "oberon_hp_pwron fails: leaf not reset\n");
+#endif
+ goto fail;
+ }
+
+ /* Check Slot status */
+ reg = CSR_XR(csr_base, TLU_SLOT_STATUS);
+ if (!(reg & (1ull << TLU_SLOT_STATUS_PSD)) ||
+ (reg & (1ull << TLU_SLOT_STATUS_MRLS)) ||
+ (reg & (1ull << TLU_SLOT_STATUS_PWFD))) {
+#ifdef DEBUG
+ cmn_err(CE_WARN, "oberon_hp_pwron fails: slot status %lx\n",
+ reg);
+#endif
+ goto fail;
+ }
+
+ /* Blink power LED, this is done from pciehpc already */
+
+ /* Turn on slot power */
+ CSR_BS(csr_base, HOTPLUG_CONTROL, PWREN);
+
+ /* wait to check power state */
+ delay(drv_usectohz(25000));
+
+ return (DDI_SUCCESS);
+
+fail:
+ return (DDI_FAILURE);
+}
+
+static uint_t
+oberon_hp_pwroff(caddr_t csr_base)
+{
+ volatile uint64_t reg;
+
+#ifdef DEBUG
+ cmn_err(CE_CONT, "oberon_hp_pwroff the slot\n");
+#endif
+
+ /* Blink power LED, this is done from pciehpc already */
+
+ /* Clear Slot Event */
+ CSR_BS(csr_base, TLU_SLOT_STATUS, PSDC);
+
+ /* DRN_TR_DIS on */
+ CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS);
+
+ /* Disable port */
+ CSR_BS(csr_base, FLP_PORT_CONTROL, PORT_DIS);
+
+ /* check time or link status */
+ delay(drv_usectohz(10000));
+
+ /* DRN_TR_DIS off/ CK_EN off */
+ reg = CSR_XR(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR);
+ if ((reg & (1ull << TLU_OTHER_EVENT_STATUS_CLEAR_LDN_P)) ||
+ (reg & (1ull << TLU_OTHER_EVENT_STATUS_CLEAR_LDN_S))) {
+ CSR_BC(csr_base, TLU_CONTROL, DRN_TR_DIS);
+ }
+ CSR_BC(csr_base, DLU_PORT_CONTROL, CK_EN);
+
+ /* PCIE reset */
+ CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST);
+
+ /* PCIE clock stop */
+ CSR_BC(csr_base, HOTPLUG_CONTROL, CLKEN);
+
+ /* Turn off slot power */
+ CSR_BC(csr_base, TLU_SLOT_CONTROL, PWFDEN);
+ CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
+ CSR_BC(csr_base, TLU_SLOT_STATUS, PWFD);
+
+ /* write 0 to bit 7 of ILU Error Log Enable Register */
+ CSR_BS(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3);
+
+ /* Power LED off */
+ reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
+ reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK;
+ reg = pcie_slotctl_pwr_indicator_set(reg,
+ PCIE_SLOTCTL_INDICATOR_STATE_OFF);
+ CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
+
+ /* Indicator LED blink */
+ reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
+ reg &= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK;
+ reg = pcie_slotctl_attn_indicator_set(reg,
+ PCIE_SLOTCTL_INDICATOR_STATE_BLINK);
+ CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
+
+ /* Notify to SCF */
+ CSR_BC(csr_base, HOTPLUG_CONTROL, SLOTPON);
+
+ /*
+ * Check Leaf Reset
+ * XXX - We don't need to wait for this any more.
+ */
+
+ /* Indicator LED off */
+ reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
+ reg &= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK;
+ reg = pcie_slotctl_attn_indicator_set(reg,
+ PCIE_SLOTCTL_INDICATOR_STATE_OFF);
+ CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
+
+ return (DDI_SUCCESS);
+}
+
+static uint_t oberon_hp_pwrledon(caddr_t csr_base)
+{
+ volatile uint64_t reg;
+
+#ifdef DEBUG
+ cmn_err(CE_CONT, "oberon_hp_pwrledon the slot\n");
+#endif
+
+ CSR_BS(csr_base, TLU_SLOT_CONTROL, PWFDEN);
+
+ /* Turn on slot clock */
+ CSR_BS(csr_base, HOTPLUG_CONTROL, CLKEN);
+
+ /* Release PCI-E Reset */
+ delay(drv_usectohz(100000));
+ CSR_BS(csr_base, HOTPLUG_CONTROL, N_PERST);
+
+ /*
+ * Open events' mask
+ * This should be done from pciehpc already
+ */
+
+ /*
+ * Initialize Leaf
+ * SPLS = 00b, SPLV = 11001b, i.e. 25W
+ */
+ reg = CSR_XR(csr_base, TLU_SLOT_CAPABILITIES);
+ reg &= ~(TLU_SLOT_CAPABILITIES_SPLS_MASK <<
+ TLU_SLOT_CAPABILITIES_SPLS);
+ reg &= ~(TLU_SLOT_CAPABILITIES_SPLV_MASK <<
+ TLU_SLOT_CAPABILITIES_SPLS);
+ reg |= (0x19 << TLU_SLOT_CAPABILITIES_SPLS);
+ CSR_XS(csr_base, TLU_SLOT_CAPABILITIES, reg);
+
+ /* Enable PCIE port */
+ CSR_BS(csr_base, DLU_PORT_CONTROL, CK_EN);
+ CSR_BC(csr_base, FLP_PORT_CONTROL, PORT_DIS);
+
+ /* wait for the LUP_P/LUP_S */
+ delay(drv_usectohz(10000));
+ reg = CSR_XR(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR);
+ if (!(reg & (1ull << TLU_OTHER_EVENT_STATUS_CLEAR_LUP_P)) &&
+ !(reg & (1ull << TLU_OTHER_EVENT_STATUS_CLEAR_LUP_S)))
+ goto fail;
+
+ /* Turn on Power LED */
+ reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
+ reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK;
+ reg = pcie_slotctl_pwr_indicator_set(reg,
+ PCIE_SLOTCTL_INDICATOR_STATE_ON);
+ CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
+
+ /* Notify to SCF */
+ CSR_BS(csr_base, HOTPLUG_CONTROL, SLOTPON);
+ return (DDI_SUCCESS);
+
+fail:
+ /* Link up is failed */
+ CSR_BS(csr_base, FLP_PORT_CONTROL, PORT_DIS);
+ CSR_BC(csr_base, DLU_PORT_CONTROL, CK_EN);
+ CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST);
+ delay(drv_usectohz(150));
+
+ CSR_BC(csr_base, HOTPLUG_CONTROL, CLKEN);
+ delay(drv_usectohz(100));
+
+ CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
+
+ reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
+ reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK;
+ reg = pcie_slotctl_pwr_indicator_set(reg,
+ PCIE_SLOTCTL_INDICATOR_STATE_OFF);
+ CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
+
+ CSR_BC(csr_base, TLU_SLOT_CONTROL, PWFDEN);
+
+ return (DDI_FAILURE);
+}
+
+static uint_t
+oberon_hpreg_get(void *cookie, off_t off)
+{
+ caddr_t csr_base = *(caddr_t *)cookie;
+ volatile uint64_t val = -1ull;
+
+ switch (off) {
+ case PCIE_SLOTCAP:
+ val = CSR_XR(csr_base, TLU_SLOT_CAPABILITIES);
+ break;
+ case PCIE_SLOTCTL:
+ val = CSR_XR(csr_base, TLU_SLOT_CONTROL);
+
+ /* Get the power state */
+ val |= (CSR_XR(csr_base, HOTPLUG_CONTROL) &
+ (1ull << HOTPLUG_CONTROL_PWREN)) ?
+ 0 : PCIE_SLOTCTL_PWR_CONTROL;
+ break;
+ case PCIE_SLOTSTS:
+ val = CSR_XR(csr_base, TLU_SLOT_STATUS);
+ break;
+ default:
+#ifdef DEBUG
+ cmn_err(CE_WARN, "oberon_hpreg_get(): "
+ "unsupported offset 0x%lx\n", off);
+#endif
+ break;
+ }
+
+ return ((uint_t)val);
+}
+
+static uint_t
+oberon_hpreg_put(void *cookie, off_t off, uint_t val)
+{
+ caddr_t csr_base = *(caddr_t *)cookie;
+ volatile uint64_t pwr_state_on, pwr_fault, reg;
+ uint16_t pwr_led_state, pwr_led_ctrl;
+ uint_t pwr_off, ret = DDI_SUCCESS;
+
+#ifdef DEBUG
+ cmn_err(CE_CONT, "oberon_hpreg_put 0x%lx: cur %x, new %x\n",
+ off, oberon_hpreg_get(cookie, off), val);
+#endif
+
+ switch (off) {
+ case PCIE_SLOTCTL:
+ /*
+ * Depending on the current state, insertion or removal
+ * will go through their respective sequences.
+ */
+ reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
+ pwr_led_state = pcie_slotctl_pwr_indicator_get(reg);
+ pwr_led_ctrl = pcie_slotctl_pwr_indicator_get(val);
+
+ pwr_state_on = CSR_BR(csr_base, HOTPLUG_CONTROL, PWREN);
+ pwr_off = val & PCIE_SLOTCTL_PWR_CONTROL;
+
+ if (!pwr_off && !pwr_state_on)
+ ret = oberon_hp_pwron(csr_base);
+ else if (pwr_off && pwr_state_on) {
+ pwr_fault = CSR_XR(csr_base, TLU_SLOT_STATUS) &
+ (1ull << TLU_SLOT_STATUS_PWFD);
+
+ if (pwr_fault)
+ CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
+ else
+ ret = oberon_hp_pwroff(csr_base);
+ } else if (pwr_state_on &&
+ (pwr_led_state == PCIE_SLOTCTL_INDICATOR_STATE_BLINK) &&
+ (pwr_led_ctrl == PCIE_SLOTCTL_INDICATOR_STATE_ON))
+ ret = oberon_hp_pwrledon(csr_base);
+ else
+ CSR_XS(csr_base, TLU_SLOT_CONTROL, val);
+ break;
+ case PCIE_SLOTSTS:
+ CSR_XS(csr_base, TLU_SLOT_STATUS, val);
+ break;
+ default:
+#ifdef DEBUG
+ cmn_err(CE_WARN, "oberon_hpreg_put(): "
+ "unsupported offset 0x%lx\n", off);
+#endif
+ ret = DDI_FAILURE;
+ break;
+ }
+
+ return (ret);
+}
+
+int
+hvio_hotplug_init(dev_info_t *dip, void *arg)
+{
+ pciehpc_regops_t *regops = (pciehpc_regops_t *)arg;
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+
+ if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
+ if (!CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR],
+ TLU_SLOT_CAPABILITIES, HP)) {
+#ifdef DEBUG
+ cmn_err(CE_WARN, "%s%d: hotplug capabale not set\n",
+ ddi_driver_name(dip), ddi_get_instance(dip));
+#endif
+ return (DDI_FAILURE);
+ }
+
+ regops->get = oberon_hpreg_get;
+ regops->put = oberon_hpreg_put;
+
+ /* cookie is the csr_base */
+ regops->cookie = (void *)&pxu_p->px_address[PX_REG_CSR];
+
+ return (DDI_SUCCESS);
+ }
+
+ return (DDI_ENOTSUP);
+}
+
+int
+hvio_hotplug_uninit(dev_info_t *dip)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+
+ if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
+ return (DDI_SUCCESS);
+
+ return (DDI_FAILURE);
+}
diff --git a/usr/src/uts/sun4u/io/px/px_lib4u.c b/usr/src/uts/sun4u/io/px/px_lib4u.c
index 02ba672adc..91d06068f6 100644
--- a/usr/src/uts/sun4u/io/px/px_lib4u.c
+++ b/usr/src/uts/sun4u/io/px/px_lib4u.c
@@ -44,11 +44,13 @@
#include <sys/hotplug/pci/pciehpc.h>
#include <px_obj.h>
#include <pcie_pwr.h>
+#include "px_tools_var.h"
#include <px_regs.h>
#include <px_csr.h>
#include <sys/machsystm.h>
#include "px_lib4u.h"
#include "px_err.h"
+#include "oberon_regs.h"
#pragma weak jbus_stst_order
@@ -56,6 +58,7 @@ extern void jbus_stst_order();
ulong_t px_mmu_dvma_end = 0xfffffffful;
uint_t px_ranges_phi_mask = 0xfffffffful;
+uint64_t *px_oberon_ubc_scratch_regs;
static int px_goto_l23ready(px_t *px_p);
static int px_goto_l0(px_t *px_p);
@@ -168,8 +171,11 @@ px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
case FIRE_VER_20:
DBG(DBG_ATTACH, dip, "FIRE Hardware Version 2.0\n");
break;
+ case OBERON_VER_10:
+ DBG(DBG_ATTACH, dip, "Oberon Hardware Version 1.0\n");
+ break;
default:
- cmn_err(CE_WARN, "%s%d: FIRE Hardware Version Unknown\n",
+ cmn_err(CE_WARN, "%s%d: PX Hardware Version Unknown\n",
ddi_driver_name(dip), ddi_get_instance(dip));
return (DDI_FAILURE);
}
@@ -197,6 +203,8 @@ px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie);
pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie);
+ pxu_p->tsb_paddr = va_to_pa(pxu_p->tsb_vaddr);
+
/*
* Create "virtual-dma" property to support child devices
* needing to know DVMA range.
@@ -222,19 +230,39 @@ px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl)
/*
* Initialize all the interrupt handlers
*/
- px_err_reg_enable(px_p, PX_ERR_JBC);
- px_err_reg_enable(px_p, PX_ERR_MMU);
- px_err_reg_enable(px_p, PX_ERR_IMU);
- px_err_reg_enable(px_p, PX_ERR_TLU_UE);
- px_err_reg_enable(px_p, PX_ERR_TLU_CE);
- px_err_reg_enable(px_p, PX_ERR_TLU_OE);
- px_err_reg_enable(px_p, PX_ERR_ILU);
- px_err_reg_enable(px_p, PX_ERR_LPU_LINK);
- px_err_reg_enable(px_p, PX_ERR_LPU_PHY);
- px_err_reg_enable(px_p, PX_ERR_LPU_RX);
- px_err_reg_enable(px_p, PX_ERR_LPU_TX);
- px_err_reg_enable(px_p, PX_ERR_LPU_LTSSM);
- px_err_reg_enable(px_p, PX_ERR_LPU_GIGABLZ);
+ switch (PX_CHIP_TYPE(pxu_p)) {
+ case PX_CHIP_OBERON:
+ px_err_reg_enable(px_p, PX_ERR_UBC);
+ px_err_reg_enable(px_p, PX_ERR_MMU);
+ px_err_reg_enable(px_p, PX_ERR_IMU);
+ px_err_reg_enable(px_p, PX_ERR_TLU_UE);
+ px_err_reg_enable(px_p, PX_ERR_TLU_CE);
+ px_err_reg_enable(px_p, PX_ERR_TLU_OE);
+ px_err_reg_enable(px_p, PX_ERR_ILU);
+
+ px_fabric_die_rc_ue |= PCIE_AER_UCE_UC;
+ break;
+
+ case PX_CHIP_FIRE:
+ px_err_reg_enable(px_p, PX_ERR_JBC);
+ px_err_reg_enable(px_p, PX_ERR_MMU);
+ px_err_reg_enable(px_p, PX_ERR_IMU);
+ px_err_reg_enable(px_p, PX_ERR_TLU_UE);
+ px_err_reg_enable(px_p, PX_ERR_TLU_CE);
+ px_err_reg_enable(px_p, PX_ERR_TLU_OE);
+ px_err_reg_enable(px_p, PX_ERR_ILU);
+ px_err_reg_enable(px_p, PX_ERR_LPU_LINK);
+ px_err_reg_enable(px_p, PX_ERR_LPU_PHY);
+ px_err_reg_enable(px_p, PX_ERR_LPU_RX);
+ px_err_reg_enable(px_p, PX_ERR_LPU_TX);
+ px_err_reg_enable(px_p, PX_ERR_LPU_LTSSM);
+ px_err_reg_enable(px_p, PX_ERR_LPU_GIGABLZ);
+ break;
+ default:
+ cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n",
+ ddi_driver_name(dip), ddi_get_instance(dip));
+ return (DDI_FAILURE);
+ }
/* Initilize device handle */
*dev_hdl = (devhandle_t)csr_base;
@@ -255,19 +283,36 @@ px_lib_dev_fini(dev_info_t *dip)
/*
* Deinitialize all the interrupt handlers
*/
- px_err_reg_disable(px_p, PX_ERR_JBC);
- px_err_reg_disable(px_p, PX_ERR_MMU);
- px_err_reg_disable(px_p, PX_ERR_IMU);
- px_err_reg_disable(px_p, PX_ERR_TLU_UE);
- px_err_reg_disable(px_p, PX_ERR_TLU_CE);
- px_err_reg_disable(px_p, PX_ERR_TLU_OE);
- px_err_reg_disable(px_p, PX_ERR_ILU);
- px_err_reg_disable(px_p, PX_ERR_LPU_LINK);
- px_err_reg_disable(px_p, PX_ERR_LPU_PHY);
- px_err_reg_disable(px_p, PX_ERR_LPU_RX);
- px_err_reg_disable(px_p, PX_ERR_LPU_TX);
- px_err_reg_disable(px_p, PX_ERR_LPU_LTSSM);
- px_err_reg_disable(px_p, PX_ERR_LPU_GIGABLZ);
+ switch (PX_CHIP_TYPE(pxu_p)) {
+ case PX_CHIP_OBERON:
+ px_err_reg_disable(px_p, PX_ERR_UBC);
+ px_err_reg_disable(px_p, PX_ERR_MMU);
+ px_err_reg_disable(px_p, PX_ERR_IMU);
+ px_err_reg_disable(px_p, PX_ERR_TLU_UE);
+ px_err_reg_disable(px_p, PX_ERR_TLU_CE);
+ px_err_reg_disable(px_p, PX_ERR_TLU_OE);
+ px_err_reg_disable(px_p, PX_ERR_ILU);
+ break;
+ case PX_CHIP_FIRE:
+ px_err_reg_disable(px_p, PX_ERR_JBC);
+ px_err_reg_disable(px_p, PX_ERR_MMU);
+ px_err_reg_disable(px_p, PX_ERR_IMU);
+ px_err_reg_disable(px_p, PX_ERR_TLU_UE);
+ px_err_reg_disable(px_p, PX_ERR_TLU_CE);
+ px_err_reg_disable(px_p, PX_ERR_TLU_OE);
+ px_err_reg_disable(px_p, PX_ERR_ILU);
+ px_err_reg_disable(px_p, PX_ERR_LPU_LINK);
+ px_err_reg_disable(px_p, PX_ERR_LPU_PHY);
+ px_err_reg_disable(px_p, PX_ERR_LPU_RX);
+ px_err_reg_disable(px_p, PX_ERR_LPU_TX);
+ px_err_reg_disable(px_p, PX_ERR_LPU_LTSSM);
+ px_err_reg_disable(px_p, PX_ERR_LPU_GIGABLZ);
+ break;
+ default:
+ cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n",
+ ddi_driver_name(dip), ddi_get_instance(dip));
+ return (DDI_FAILURE);
+ }
iommu_tsb_free(pxu_p->tsb_cookie);
@@ -393,12 +438,14 @@ px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino,
int
px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
uint64_t ret;
DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n",
dip, sysino);
- if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip),
+ if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), pxu_p,
sysino, cpuid)) != H_EOK) {
DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n",
ret);
@@ -414,12 +461,14 @@ px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid)
int
px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid)
{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
uint64_t ret;
DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx "
"cpuid 0x%x\n", dip, sysino, cpuid);
- if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip),
+ if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), pxu_p,
sysino, cpuid)) != H_EOK) {
DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n",
ret);
@@ -531,10 +580,14 @@ px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p,
*/
/*ARGSUSED*/
int
-px_lib_dma_bypass_rngchk(ddi_dma_attr_t *attr_p, uint64_t *lo_p, uint64_t *hi_p)
+px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p,
+ uint64_t *lo_p, uint64_t *hi_p)
{
- *lo_p = MMU_BYPASS_BASE;
- *hi_p = MMU_BYPASS_END;
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+
+ *lo_p = hvio_get_bypass_base(pxu_p);
+ *hi_p = hvio_get_bypass_end(pxu_p);
return (DDI_SUCCESS);
}
@@ -546,12 +599,14 @@ px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr,
io_addr_t *io_addr_p)
{
uint64_t ret;
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx "
"attr 0x%x\n", dip, ra, attr);
- if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), ra, attr,
- io_addr_p)) != H_EOK) {
+ if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), pxu_p, ra,
+ attr, io_addr_p)) != H_EOK) {
DBG(DBG_LIB_DMA, dip,
"hvio_iommu_getbypass failed, ret 0x%lx\n", ret);
return (DDI_FAILURE);
@@ -572,12 +627,20 @@ px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle,
off_t off, size_t len, uint_t cache_flags)
{
ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle;
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p "
"handle 0x%llx off 0x%x len 0x%x flags 0x%x\n",
dip, rdip, handle, off, len, cache_flags);
/*
+ * No flush needed for Oberon
+ */
+ if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
+ return (DDI_SUCCESS);
+
+ /*
* jbus_stst_order is found only in certain cpu modules.
* Just return success if not present.
*/
@@ -1242,6 +1305,122 @@ px_lib_resume(dev_info_t *dip)
hvio_resume(dev_hdl, pec_ino, pxu_p);
}
+/*
+ * Generate a unique Oberon UBC ID based on the Logicial System Board and
+ * the IO Channel from the portid property field.
+ */
+static uint64_t
+oberon_get_ubc_id(dev_info_t *dip)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+ uint64_t ubc_id;
+
+ /*
+ * Generate a unique 6 bit UBC ID using the 2 IO_Channel#[1:0] bits and
+ * the 4 LSB_ID[3:0] bits from the Oberon's portid property.
+ */
+ ubc_id = (((pxu_p->portid >> OBERON_PORT_ID_IOC) &
+ OBERON_PORT_ID_IOC_MASK) | (((pxu_p->portid >>
+ OBERON_PORT_ID_LSB) & OBERON_PORT_ID_LSB_MASK)
+ << OBERON_UBC_ID_LSB));
+
+ return (ubc_id);
+}
+
+/*
+ * Oberon does not have a UBC scratch register, so alloc an array of scratch
+ * registers when needed and use a unique UBC ID as an index. This code
+ * can be simplified if we use a pre-allocated array. They are currently
+ * being dynamically allocated because it's only needed by the Oberon.
+ */
+static void
+oberon_set_cb(dev_info_t *dip, uint64_t val)
+{
+ uint64_t ubc_id;
+
+ if (px_oberon_ubc_scratch_regs == NULL)
+ px_oberon_ubc_scratch_regs =
+ (uint64_t *)kmem_zalloc(sizeof (uint64_t)*
+ OBERON_UBC_ID_MAX, KM_SLEEP);
+
+ ubc_id = oberon_get_ubc_id(dip);
+
+ px_oberon_ubc_scratch_regs[ubc_id] = val;
+
+ /*
+ * Check if any scratch registers are still in use. If all scratch
+ * registers are currently set to zero, then deallocate the scratch
+ * register array.
+ */
+ for (ubc_id = 0; ubc_id < OBERON_UBC_ID_MAX; ubc_id++) {
+ if (px_oberon_ubc_scratch_regs[ubc_id] != NULL)
+ return;
+ }
+
+ /*
+ * All scratch registers are set to zero so deallocate the scratch
+ * register array and set the pointer to NULL.
+ */
+ kmem_free(px_oberon_ubc_scratch_regs,
+ (sizeof (uint64_t)*OBERON_UBC_ID_MAX));
+
+ px_oberon_ubc_scratch_regs = NULL;
+}
+
+/*
+ * Oberon does not have a UBC scratch register, so use an allocated array of
+ * scratch registers and use the unique UBC ID as an index into that array.
+ */
+static uint64_t
+oberon_get_cb(dev_info_t *dip)
+{
+ uint64_t ubc_id;
+
+ if (px_oberon_ubc_scratch_regs == NULL)
+ return (0);
+
+ ubc_id = oberon_get_ubc_id(dip);
+
+ return (px_oberon_ubc_scratch_regs[ubc_id]);
+}
+
+/*
+ * Misc Functions:
+ * Currently unsupported by hypervisor
+ */
+static uint64_t
+px_get_cb(dev_info_t *dip)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+
+ /*
+ * Oberon does not currently have Scratchpad registers.
+ */
+ if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
+ return (oberon_get_cb(dip));
+
+ return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1));
+}
+
+static void
+px_set_cb(dev_info_t *dip, uint64_t val)
+{
+ px_t *px_p = DIP_TO_STATE(dip);
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+
+ /*
+ * Oberon does not currently have Scratchpad registers.
+ */
+ if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
+ oberon_set_cb(dip, val);
+ return;
+ }
+
+ CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val);
+}
+
/*ARGSUSED*/
int
px_lib_map_vconfig(dev_info_t *dip,
@@ -1806,6 +1985,15 @@ px_identity_chip(px_t *px_p)
return (PX_CHIP_ID(PX_CHIP_FIRE, revision, 0x00));
}
+ /* Check for Oberon driver binding name */
+ if (strcmp(name, "pciex108e,80f8") == 0) {
+ DBG(DBG_ATTACH, dip, "px_identity_chip: %s%d: "
+ "name %s module-revision %d\n", ddi_driver_name(dip),
+ ddi_get_instance(dip), name, revision);
+
+ return (PX_CHIP_ID(PX_CHIP_OBERON, revision, 0x00));
+ }
+
DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n",
ddi_driver_name(dip), ddi_get_instance(dip), name, revision);
@@ -1847,8 +2035,7 @@ px_cb_add_intr(px_fault_t *fault_p)
{
px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip);
pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
- px_cb_t *cb_p = (px_cb_t *)CSR_XR((caddr_t)
- pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1);
+ px_cb_t *cb_p = (px_cb_t *)px_get_cb(fault_p->px_fh_dip);
px_cb_list_t *pxl, *pxl_new;
cpuid_t cpuid;
@@ -1858,8 +2045,7 @@ px_cb_add_intr(px_fault_t *fault_p)
mutex_init(&cb_p->cb_mutex, NULL, MUTEX_DRIVER, NULL);
cb_p->px_cb_func = px_cb_intr;
pxu_p->px_cb_p = cb_p;
- CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1,
- (uint64_t)cb_p);
+ px_set_cb(fault_p->px_fh_dip, (uint64_t)cb_p);
} else
pxu_p->px_cb_p = cb_p;
@@ -1967,7 +2153,7 @@ px_cb_rem_intr(px_fault_t *fault_p)
mutex_exit(&cb_p->cb_mutex);
mutex_destroy(&cb_p->cb_mutex);
- CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, 0ull);
+ px_set_cb(fault_p->px_fh_dip, 0ull);
kmem_free(cb_p, sizeof (px_cb_t));
}
@@ -2039,8 +2225,7 @@ px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset)
uint32_t val;
/* Get Fire's Physical Base Address */
- range_prop = (((uint64_t)(rp[bank].parent_high & 0x7ff)) << 32) |
- rp[bank].parent_low;
+ range_prop = px_get_range_prop(px_p, rp, bank);
/* Get config space first. */
base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
@@ -2058,8 +2243,7 @@ px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset,
int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG);
/* Get Fire's Physical Base Address */
- range_prop = (((uint64_t)(rp[bank].parent_high & 0x7ff)) << 32) |
- rp[bank].parent_low;
+ range_prop = px_get_range_prop(px_p, rp, bank);
/* Get config space first. */
base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset);
@@ -2170,6 +2354,31 @@ px_cpr_callb(void *arg, int code)
}
/*
+ * fetch chip's range propery's value
+ */
+uint64_t
+px_get_range_prop(px_t *px_p, px_ranges_t *rp, int bank)
+{
+ pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p;
+ uint64_t mask, range_prop;
+
+ switch (PX_CHIP_TYPE(pxu_p)) {
+ case PX_CHIP_OBERON:
+ mask = OBERON_RANGE_PROP_MASK;
+ break;
+ case PX_CHIP_FIRE:
+ mask = FIRE_RANGE_PROP_MASK;
+ break;
+ default:
+ mask = FIRE_RANGE_PROP_MASK;
+ }
+ range_prop = (((uint64_t)(rp[bank].parent_high & mask)) << 32) |
+ rp[bank].parent_low;
+
+ return (range_prop);
+}
+
+/*
* add cpr callback
*/
void
@@ -2189,14 +2398,69 @@ px_cpr_rem_callb(px_t *px_p)
}
/*ARGSUSED*/
+static uint_t
+px_hp_intr(caddr_t arg1, caddr_t arg2)
+{
+ px_t *px_p = (px_t *)arg1;
+ int rval;
+
+ rval = pciehpc_intr(px_p->px_dip);
+
+#ifdef DEBUG
+ if (rval == DDI_INTR_UNCLAIMED)
+ cmn_err(CE_WARN, "%s%d: UNCLAIMED intr\n",
+ ddi_driver_name(px_p->px_dip),
+ ddi_get_instance(px_p->px_dip));
+#endif
+
+ return (rval);
+}
+
int
px_lib_hotplug_init(dev_info_t *dip, void *arg)
{
- return (DDI_ENOTSUP);
+ px_t *px_p = DIP_TO_STATE(dip);
+ uint64_t ret;
+
+ if ((ret = hvio_hotplug_init(dip, arg)) == DDI_SUCCESS) {
+ sysino_t sysino;
+
+ if (px_lib_intr_devino_to_sysino(px_p->px_dip,
+ px_p->px_inos[PX_INTR_HOTPLUG], &sysino) !=
+ DDI_SUCCESS) {
+#ifdef DEBUG
+ cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n",
+ ddi_driver_name(px_p->px_dip),
+ ddi_get_instance(px_p->px_dip));
+#endif
+ return (DDI_FAILURE);
+ }
+
+ VERIFY(add_ivintr(sysino, PX_PCIEHP_PIL,
+ (intrfunc)px_hp_intr, (caddr_t)px_p, NULL) == 0);
+ }
+
+ return (ret);
}
-/*ARGSUSED*/
void
px_lib_hotplug_uninit(dev_info_t *dip)
{
+ if (hvio_hotplug_uninit(dip) == DDI_SUCCESS) {
+ px_t *px_p = DIP_TO_STATE(dip);
+ sysino_t sysino;
+
+ if (px_lib_intr_devino_to_sysino(px_p->px_dip,
+ px_p->px_inos[PX_INTR_HOTPLUG], &sysino) !=
+ DDI_SUCCESS) {
+#ifdef DEBUG
+ cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n",
+ ddi_driver_name(px_p->px_dip),
+ ddi_get_instance(px_p->px_dip));
+#endif
+ return;
+ }
+
+ rem_ivintr(sysino, NULL);
+ }
}
diff --git a/usr/src/uts/sun4u/io/px/px_lib4u.h b/usr/src/uts/sun4u/io/px/px_lib4u.h
index e7c27cfb5b..5012e4a4f1 100644
--- a/usr/src/uts/sun4u/io/px/px_lib4u.h
+++ b/usr/src/uts/sun4u/io/px/px_lib4u.h
@@ -86,6 +86,8 @@ typedef struct pxu {
uint16_t tsb_cookie;
uint32_t tsb_size;
uint64_t *tsb_vaddr;
+ uint64_t tsb_paddr; /* Only used for Oberon */
+
void *msiq_mapped_p;
px_cb_t *px_cb_p;
@@ -136,7 +138,8 @@ typedef struct eq_rec {
#define MMU_INVALID_TTE 0ull
#define MMU_TTE_VALID(tte) (((tte) & MMU_TTE_V) == MMU_TTE_V)
-#define MMU_TTETOPA(x) ((x & 0x7ffffffffff) >> MMU_PAGE_SHIFT)
+#define MMU_OBERON_PADDR_MASK 0x7fffffffffff
+#define MMU_FIRE_PADDR_MASK 0x7ffffffffff
/*
* control register decoding
@@ -146,16 +149,26 @@ typedef struct eq_rec {
#define MMU_TSBSIZE_TO_TSBENTRIES(s) ((1 << (s)) << (13 - 3))
/*
- * For mmu bypass addresses, bit 43 specifies cacheability.
+ * For Fire mmu bypass addresses, bit 43 specifies cacheability.
*/
-#define MMU_BYPASS_NONCACHE (1ull << 43)
+#define MMU_FIRE_BYPASS_NONCACHE (1ull << 43)
+
+/*
+ * For Oberon mmu bypass addresses, bit 47 specifies cacheability.
+ */
+#define MMU_OBERON_BYPASS_NONCACHE (1ull << 47)
/*
* The following macros define the address ranges supported for DVMA
- * and mmu bypass transfers.
+ * and mmu bypass transfers. For Oberon, bit 63 is used for ordering.
*/
-#define MMU_BYPASS_BASE 0xFFFC000000000000ull
-#define MMU_BYPASS_END 0xFFFC01FFFFFFFFFFull
+#define MMU_FIRE_BYPASS_BASE 0xFFFC000000000000ull
+#define MMU_FIRE_BYPASS_END 0xFFFC01FFFFFFFFFFull
+
+#define MMU_OBERON_BYPASS_BASE 0x7FFC000000000000ull
+#define MMU_OBERON_BYPASS_END 0x7FFC01FFFFFFFFFFull
+
+#define MMU_TSB_PA_MASK 0x7FFFFFFFE000
/*
* The following macros are for loading and unloading io tte
@@ -164,6 +177,7 @@ typedef struct eq_rec {
#define MMU_TTE_SIZE 8
#define MMU_TTE_V (1ull << 63)
#define MMU_TTE_W (1ull << 1)
+#define MMU_TTE_RO (1ull << 62) /* Oberon Relaxed Ordering */
#define INO_BITS 6 /* INO#s are 6 bits long */
#define IGN_BITS 5 /* IGN#s are 5 bits long */
@@ -248,7 +262,8 @@ typedef struct eq_rec {
*/
typedef enum {
PX_CHIP_UNIDENTIFIED = 0,
- PX_CHIP_FIRE = 1
+ PX_CHIP_FIRE = 1,
+ PX_CHIP_OBERON = 2
} px_chip_id_t;
/*
@@ -256,16 +271,18 @@ typedef enum {
* 0x00 <chip_type> <version#> <module-revision#>
*/
#define PX_CHIP_ID(t, v, m) (((t) << 16) | ((v) << 8) | (m))
-#define PX_ID_CHIP_TYPE(id) ((id) >> 16)
-#define PX_CHIP_TYPE(pxu_p) PX_ID_CHIP_TYPE(PX_CHIP_ID((pxu_p)->chip_id))
-#define PX_CHIP_REV(pxu_p) PX_CHIP_ID(((pxu_p)->chip_id) & 0xFF)
-#define PX_CHIP_VER(pxu_p) PX_CHIP_ID((((pxu_p)->chip_id) >> 8) & 0xFF)
+#define PX_CHIP_TYPE(pxu_p) (((pxu_p)->chip_id) >> 16)
+#define PX_CHIP_REV(pxu_p) (((pxu_p)->chip_id) & 0xFF)
+#define PX_CHIP_VER(pxu_p) ((((pxu_p)->chip_id) >> 8) & 0xFF)
/*
* Fire hardware specific version definitions.
*/
#define FIRE_VER_10 PX_CHIP_ID(PX_CHIP_FIRE, 0x01, 0x00)
#define FIRE_VER_20 PX_CHIP_ID(PX_CHIP_FIRE, 0x03, 0x00)
+#define OBERON_VER_10 PX_CHIP_ID(PX_CHIP_OBERON, 0x00, 0x00)
+#define FIRE_RANGE_PROP_MASK 0x7ff
+#define OBERON_RANGE_PROP_MASK 0x7fff
extern void hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p);
extern void hvio_ib_init(caddr_t csr_base, pxu_t *pxu_p);
@@ -282,10 +299,10 @@ extern uint64_t hvio_intr_getstate(devhandle_t dev_hdl, sysino_t sysino,
intr_state_t *intr_state);
extern uint64_t hvio_intr_setstate(devhandle_t dev_hdl, sysino_t sysino,
intr_state_t intr_state);
-extern uint64_t hvio_intr_gettarget(devhandle_t dev_hdl, sysino_t sysino,
- cpuid_t *cpuid);
-extern uint64_t hvio_intr_settarget(devhandle_t dev_hdl, sysino_t sysino,
- cpuid_t cpuid);
+extern uint64_t hvio_intr_gettarget(devhandle_t dev_hdl, pxu_t *pxu_p,
+ sysino_t sysino, cpuid_t *cpuid);
+extern uint64_t hvio_intr_settarget(devhandle_t dev_hdl, pxu_t *pxu_p,
+ sysino_t sysino, cpuid_t cpuid);
extern uint64_t hvio_iommu_map(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
pages_t pages, io_attributes_t attr, void *addr, size_t pfn_index,
@@ -294,8 +311,12 @@ extern uint64_t hvio_iommu_demap(devhandle_t dev_hdl, pxu_t *pxu_p,
tsbid_t tsbid, pages_t pages);
extern uint64_t hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p,
tsbid_t tsbid, io_attributes_t *attr_p, r_addr_t *r_addr_p);
-extern uint64_t hvio_iommu_getbypass(devhandle_t dev_hdl, r_addr_t ra,
- io_attributes_t attr, io_addr_t *io_addr_p);
+extern uint64_t hvio_iommu_getbypass(devhandle_t dev_hdl, pxu_t *pxu_p,
+ r_addr_t ra, io_attributes_t attr, io_addr_t *io_addr_p);
+extern uint64_t hvio_get_bypass_base(pxu_t *pxu_p);
+extern uint64_t hvio_get_bypass_end(pxu_t *pxu_p);
+extern uint64_t px_get_range_prop(px_t *px_p, px_ranges_t *rp, int bank);
+
/*
* MSIQ Functions:
@@ -362,6 +383,12 @@ extern void px_enable_detect_quiet(caddr_t csr_base);
extern void px_lib_clr_errs(px_t *px_p);
+/*
+ * Hotplug functions:
+ */
+extern int hvio_hotplug_init(dev_info_t *dip, void *arg);
+extern int hvio_hotplug_uninit(dev_info_t *dip);
+
#ifdef __cplusplus
}
#endif
diff --git a/usr/src/uts/sun4u/io/todopl.c b/usr/src/uts/sun4u/io/todopl.c
new file mode 100644
index 0000000000..5d510d3c42
--- /dev/null
+++ b/usr/src/uts/sun4u/io/todopl.c
@@ -0,0 +1,321 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * tod driver module for OPL (implements a soft tod)
+ */
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/sysmacros.h>
+#include <sys/systm.h>
+#include <sys/errno.h>
+#include <sys/modctl.h>
+#include <sys/autoconf.h>
+#include <sys/debug.h>
+#include <sys/clock.h>
+#include <sys/cmn_err.h>
+#include <sys/prom_plat.h>
+#include <sys/cpuvar.h>
+#include <sys/opl_module.h>
+
+/*
+ * Debug stuff
+ */
+#ifdef DEBUG
+int todopl_debug = 0;
+#define TODOPL_DEBUG(args) if (todopl_debug) cmn_err args
+#else
+#define TODOPL_DEBUG(args)
+#endif
+
+#define abs(x) ((x) < 0 ? -(x) : (x))
+
+#define TODOPL_SET_THRESHOLD 30
+
+static timestruc_t todopl_get(void);
+static void todopl_set(timestruc_t);
+static uint_t todopl_set_watchdog_timer(uint_t);
+static uint_t todopl_clear_watchdog_timer(void);
+static void todopl_set_power_alarm(timestruc_t);
+static void todopl_clear_power_alarm(void);
+static uint64_t todopl_get_cpufrequency(void);
+
+/*
+ * Module linkage information for the kernel.
+ */
+static struct modlmisc modlmisc = {
+ &mod_miscops, "Soft tod module for OPL 1.11"
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, (void *)&modlmisc, NULL
+};
+
+/*
+ * The TOD OPL logic description.
+ *
+ * The todopl driver uses promif functions prom_opl_get_tod() and
+ * prom_opl_set_diff(). These functions call FJSV,get-tod and
+ * FJSV,set-domain-time OBP client services.
+ *
+ * At the system boot or reboot:
+ *
+ * FJSV,tod-get
+ * OS ---------> OBP SCF I/F
+ * -----------> XSCF
+ * <-----------
+ * <-------- time, diff
+ * time+diff, stick
+ *
+ * Note that on first powerup domain boot, diff is zero.
+ *
+ * When system updates the time via date(1m):
+ *
+ * FJSV,set-domain-time
+ * OS ---------> OBP SRAM
+ * diff_delta diff += diff_delta -------------> XSCF
+ *
+ * diff_delta = new time - current domain time (hrestime)
+ *
+ *
+ * In theory, FJSV,get-tod and FJSV,set-domain-time should never fails.
+ * But, if call to FJSV,get-tod fails on boot, the domain will be unable
+ * to calculate "diff" properly and synchronization between Domain and
+ * SP will be broken. In this particular case, we notify users that
+ * "there is no time synchronization" and the logic will attempt to
+ * resync with the SP whenever the OS tries to do a TOD update.
+ * (e.g. via date(1m) or NTP).
+ */
+
+static int enable_time_sync = 1;
+
+int
+_init(void)
+{
+ int64_t stick;
+ time_t obp_time = 0;
+ int64_t obp_stick;
+
+ if (strcmp(tod_module_name, "todopl") == 0) {
+ /*
+ * Get TOD time from OBP and adjust it.
+ */
+ prom_opl_get_tod(&obp_time, &obp_stick);
+
+ TODOPL_DEBUG((CE_NOTE, "todopl: OBP time 0x%lx stick 0x%lx\n",
+ obp_time, obp_stick));
+
+ if (obp_time != 0) {
+ /*
+ * adjust OBP time by stick counts
+ */
+ stick_timestamp(&stick);
+ obp_time += ((stick - obp_stick) / system_clock_freq);
+
+ TODOPL_DEBUG((CE_NOTE,
+ "todopl: cpu stick 0x%lx sys_time 0x%lx\n",
+ stick, obp_time));
+ } else {
+ /*
+ * A date of zero causes the root filesystem driver
+ * to try to set the date from the last shutdown.
+ */
+ enable_time_sync = 0;
+ cmn_err(CE_WARN, "Initial date is invalid.");
+ cmn_err(CE_CONT, "Attempting to set the date and time "
+ "based on the last shutdown.\n");
+ cmn_err(CE_CONT, "The time could not be synchronized "
+ "between Domain and Service Processor.\n");
+ cmn_err(CE_CONT, "Please inspect the date and time and "
+ "correct if necessary.\n");
+ }
+
+ hrestime.tv_sec = obp_time;
+
+ /*
+ * Check that the date has not overflowed a 32-bit integer.
+ */
+ if (TIMESPEC_OVERFLOW(&hrestime)) {
+ cmn_err(CE_WARN, "Date overflow detected.");
+ cmn_err(CE_CONT, "Attempting to set the date and time "
+ "based on the last shutdown.\n");
+ cmn_err(CE_CONT, "Please inspect the date and time and "
+ "correct if necessary.\n");
+
+ hrestime.tv_sec = (time_t)0;
+ }
+
+ tod_ops.tod_get = todopl_get;
+ tod_ops.tod_set = todopl_set;
+ tod_ops.tod_set_watchdog_timer = todopl_set_watchdog_timer;
+ tod_ops.tod_clear_watchdog_timer = todopl_clear_watchdog_timer;
+ tod_ops.tod_set_power_alarm = todopl_set_power_alarm;
+ tod_ops.tod_clear_power_alarm = todopl_clear_power_alarm;
+ tod_ops.tod_get_cpufrequency = todopl_get_cpufrequency;
+
+ /*
+ * Flag warning if user tried to use hardware watchdog
+ */
+ if (watchdog_enable) {
+ cmn_err(CE_WARN, "Hardware watchdog unavailable");
+ }
+ }
+
+ return (mod_install(&modlinkage));
+}
+
+int
+_fini(void)
+{
+ if (strcmp(tod_module_name, "todopl") == 0)
+ return (EBUSY);
+ else
+ return (mod_remove(&modlinkage));
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+
+/*
+ * OPL tod_get is simplified to return hrestime
+ * Must be called with tod_lock held.
+ */
+static timestruc_t
+todopl_get(void)
+{
+ ASSERT(MUTEX_HELD(&tod_lock));
+ return (hrestime);
+}
+
+/*
+ * Must be called with tod_lock held.
+ *
+ * When running NTP, tod_set is called at least once per second in order
+ * to update the hardware clock. To minimize pressure on SP, we want only
+ * to record significant time changes on the SP (when date(1M) is run).
+ * We have 30 seconds threshold requirement before recording the time change.
+ */
+/* ARGSUSED */
+static void
+todopl_set(timestruc_t ts)
+{
+ ASSERT(MUTEX_HELD(&tod_lock));
+
+ if (abs(ts.tv_sec - hrestime.tv_sec) > TODOPL_SET_THRESHOLD) {
+ /*
+ * Send time difference to SP
+ */
+ if (enable_time_sync)
+ prom_opl_set_diff(ts.tv_sec - hrestime.tv_sec);
+ else {
+ /*
+ * We did not get a successful initial time
+ * update/sync from the SP via OBP during boot.
+ * Try again here.
+ */
+ time_t obp_time = 0;
+ int64_t obp_stick;
+ int64_t stick;
+
+ prom_opl_get_tod(&obp_time, &obp_stick);
+
+ if (obp_time != 0) {
+ /*
+ * adjust OBP time by stick counts
+ */
+ stick_timestamp(&stick);
+ obp_time += ((stick - obp_stick) /
+ system_clock_freq);
+
+ /*
+ * Sync up by computing the diff using the
+ * newly acquired SP/OBP reference time
+ */
+ prom_opl_set_diff(ts.tv_sec - obp_time);
+
+ enable_time_sync = 1;
+ }
+ }
+ TODOPL_DEBUG((CE_NOTE, "todopl_set: new domain time 0x%lx\n",
+ ts.tv_sec));
+ }
+}
+
+/*
+ * No watchdog function.
+ */
+/* ARGSUSED */
+static uint_t
+todopl_set_watchdog_timer(uint_t timeoutval)
+{
+ ASSERT(MUTEX_HELD(&tod_lock));
+ return (0);
+}
+
+/*
+ * No watchdog function
+ */
+static uint_t
+todopl_clear_watchdog_timer(void)
+{
+ ASSERT(MUTEX_HELD(&tod_lock));
+ return (0);
+}
+
+/*
+ * Null function.
+ */
+/* ARGSUSED */
+static void
+todopl_set_power_alarm(timestruc_t ts)
+{
+ ASSERT(MUTEX_HELD(&tod_lock));
+}
+
+/*
+ * Null function
+ */
+static void
+todopl_clear_power_alarm()
+{
+ ASSERT(MUTEX_HELD(&tod_lock));
+}
+
+/*
+ * Get clock freq from the cpunode. This function is only called
+ * when use_stick = 0, otherwise, system_clock_freq gets used instead.
+ */
+uint64_t
+todopl_get_cpufrequency(void)
+{
+ return (cpunodes[CPU->cpu_id].clock_freq);
+}
diff --git a/usr/src/uts/sun4u/ml/mach_locore.s b/usr/src/uts/sun4u/ml/mach_locore.s
index 56a6254626..b430fef0cc 100644
--- a/usr/src/uts/sun4u/ml/mach_locore.s
+++ b/usr/src/uts/sun4u/ml/mach_locore.s
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -1191,7 +1190,13 @@ ptl1_panic(u_int reason)
! Flushing D$ erases old data in D$, so that it will not be loaded.
! Since we can afford only 2 registers (%g2 and %g3) for this job, we
! flush entire D$.
+ ! For FJ OPL processors (IMPL values < SPITFIRE_IMPL), DC flushing
+ ! is not needed.
!
+ GET_CPU_IMPL(%g2)
+ cmp %g2, SPITFIRE_IMPL
+ blt,pn %icc, 1f ! Skip flushing for OPL processors
+ nop
sethi %hi(dcache_size), %g2
ld [%g2 + %lo(dcache_size)], %g2
sethi %hi(dcache_linesize), %g3
@@ -1201,6 +1206,7 @@ ptl1_panic(u_int reason)
membar #Sync
brnz,pt %g2, 0b
sub %g2, %g3, %g2
+1:
!
! increment the entry counter.
! save CPU state if this is the first entry.
diff --git a/usr/src/uts/sun4u/ml/mach_offsets.in b/usr/src/uts/sun4u/ml/mach_offsets.in
index d70e3c3457..560127697e 100644
--- a/usr/src/uts/sun4u/ml/mach_offsets.in
+++ b/usr/src/uts/sun4u/ml/mach_offsets.in
@@ -2,9 +2,8 @@
\ CDDL HEADER START
\
\ The contents of this file are subject to the terms of the
-\ Common Development and Distribution License, Version 1.0 only
-\ (the "License"). You may not use this file except in compliance
-\ with the License.
+\ Common Development and Distribution License (the "License").
+\ You may not use this file except in compliance with the License.
\
\ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
\ or http://www.opensolaris.org/os/licensing.
@@ -19,7 +18,7 @@
\
\ CDDL HEADER END
\
-\ Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+\ Copyright 2006 Sun Microsystems, Inc. All rights reserved.
\ Use is subject to license terms.
\
\ offsets.in: input file to produce assym.h using the stabs program
@@ -200,6 +199,7 @@ page PAGE_SIZE
tsb_info TSBINFO_SIZE
tsb_tte TSBINFO_TTE
tsb_va TSBINFO_VADDR
+ tsb_pa TSBINFO_PADDR
tsb_szc TSBINFO_SZCODE
tsb_next TSBINFO_NEXTPTR
diff --git a/usr/src/uts/sun4u/ml/trap_table.s b/usr/src/uts/sun4u/ml/trap_table.s
index 81c0089739..981749e8ee 100644
--- a/usr/src/uts/sun4u/ml/trap_table.s
+++ b/usr/src/uts/sun4u/ml/trap_table.s
@@ -31,6 +31,7 @@
#include <sys/asm_linkage.h>
#include <sys/privregs.h>
#include <sys/sun4asi.h>
+#include <sys/spitregs.h>
#include <sys/cheetahregs.h>
#include <sys/machtrap.h>
#include <sys/machthread.h>
@@ -855,7 +856,9 @@
* to stop RED state entry if the store queue has many
* pending bad stores (PRM, Chapter 11).
*/
-#define ASYNC_TRAP(ttype, ttlabel)\
+#define ASYNC_TRAP(ttype, ttlabel, table_name)\
+ .global table_name ;\
+table_name: ;\
membar #Sync ;\
TT_TRACE(ttlabel) ;\
ba async_err ;\
@@ -1063,6 +1066,41 @@ tt1_dtlbmiss:
* could live in this file but currently it seems better to allow
* it to fall thru to sfmmu_tsb_miss.
*/
+#ifdef UTSB_PHYS
+#define DTLB_MISS(table_name) ;\
+ .global table_name/**/_dtlbmiss ;\
+table_name/**/_dtlbmiss: ;\
+ HAT_PERCPU_DBSTAT(TSBMISS_DTLBMISS) /* 3 instr ifdef DEBUG */ ;\
+ mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\
+ ldxa [%g0]ASI_DMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\
+ ldxa [%g6]ASI_DMMU, %g2 /* g2 = tag access */ ;\
+ sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\
+ srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\
+ cmp %g3, INVALID_CONTEXT ;\
+ ble,pn %xcc, sfmmu_kdtlb_miss ;\
+ srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\
+ mov SCRATCHPAD_UTSBREG, %g3 ;\
+ ldxa [%g3]ASI_SCRATCHPAD, %g3 /* g3 = 2nd tsb reg */ ;\
+ brgez,pn %g3, sfmmu_udtlb_slowpath /* branch if 2 TSBs */ ;\
+ nop ;\
+ ldda [%g1]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, %g5 data */;\
+ cmp %g4, %g7 ;\
+ bne,pn %xcc, sfmmu_tsb_miss_tt /* no 4M TSB, miss */ ;\
+ mov %g0, %g3 /* clear 4M tsbe ptr */ ;\
+ TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\
+ stxa %g5, [%g0]ASI_DTLB_IN /* trapstat expects TTE */ ;\
+ retry /* in %g5 */ ;\
+ unimp 0 ;\
+ unimp 0 ;\
+ unimp 0 ;\
+ unimp 0 ;\
+ unimp 0 ;\
+ unimp 0 ;\
+ unimp 0 ;\
+ unimp 0 ;\
+ unimp 0 ;\
+ .align 128
+#else /* UTSB_PHYS */
#define DTLB_MISS(table_name) ;\
.global table_name/**/_dtlbmiss ;\
table_name/**/_dtlbmiss: ;\
@@ -1096,6 +1134,7 @@ table_name/**/_dtlbmiss: ;\
unimp 0 ;\
unimp 0 ;\
.align 128
+#endif /* UTSB_PHYS */
#if defined(cscope)
/*
@@ -1115,6 +1154,41 @@ tt1_itlbmiss:
* by sfmmu_patch_ktsb at runtime.
* MUST be EXACTLY 32 instructions or we'll break.
*/
+#ifdef UTSB_PHYS
+#define ITLB_MISS(table_name) \
+ .global table_name/**/_itlbmiss ;\
+table_name/**/_itlbmiss: ;\
+ HAT_PERCPU_DBSTAT(TSBMISS_ITLBMISS) /* 3 instr ifdef DEBUG */ ;\
+ mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\
+ ldxa [%g0]ASI_IMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\
+ ldxa [%g6]ASI_IMMU, %g2 /* g2 = tag access */ ;\
+ sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\
+ srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\
+ cmp %g3, INVALID_CONTEXT ;\
+ ble,pn %xcc, sfmmu_kitlb_miss ;\
+ srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\
+ mov SCRATCHPAD_UTSBREG, %g3 ;\
+ ldxa [%g3]ASI_SCRATCHPAD, %g3 /* g3 = 2nd tsb reg */ ;\
+ brgez,pn %g3, sfmmu_uitlb_slowpath /* branch if 2 TSBs */ ;\
+ nop ;\
+ ldda [%g1]ASI_QUAD_LDD_PHYS, %g4 /* g4 = tag, g5 = data */ ;\
+ cmp %g4, %g7 ;\
+ bne,pn %xcc, sfmmu_tsb_miss_tt /* br if 8k ptr miss */ ;\
+ mov %g0, %g3 /* no 4M TSB */ ;\
+ andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */ ;\
+ bz,pn %icc, exec_fault ;\
+ nop ;\
+ TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\
+ stxa %g5, [%g0]ASI_ITLB_IN /* trapstat expects %g5 */ ;\
+ retry ;\
+ unimp 0 ;\
+ unimp 0 ;\
+ unimp 0 ;\
+ unimp 0 ;\
+ unimp 0 ;\
+ unimp 0 ;\
+ .align 128
+#else /* UTSB_PHYS */
#define ITLB_MISS(table_name) \
.global table_name/**/_itlbmiss ;\
table_name/**/_itlbmiss: ;\
@@ -1148,6 +1222,7 @@ table_name/**/_itlbmiss: ;\
unimp 0 ;\
unimp 0 ;\
.align 128
+#endif /* UTSB_PHYS */
/*
@@ -1278,7 +1353,7 @@ trap_table0:
NOT; NOT; /* 006 - 007 reserved */
IMMU_EXCEPTION; /* 008 instruction access exception */
NOT; /* 009 instruction access MMU miss */
- ASYNC_TRAP(T_INSTR_ERROR, trace_gen);
+ ASYNC_TRAP(T_INSTR_ERROR, trace_gen, tt0_iae);
/* 00A instruction access error */
NOT; NOT4; /* 00B - 00F reserved */
ILLTRAP_INSTR; /* 010 illegal instruction */
@@ -1296,7 +1371,7 @@ trap_table0:
NOT; NOT; NOT4; /* 02A - 02F reserved */
DMMU_EXCEPTION; /* 030 data access exception */
NOT; /* 031 data access MMU miss */
- ASYNC_TRAP(T_DATA_ERROR, trace_gen);
+ ASYNC_TRAP(T_DATA_ERROR, trace_gen, tt0_dae);
/* 032 data access error */
NOT; /* 033 data access protection */
DMMU_EXC_AG_NOT_ALIGNED; /* 034 mem address not aligned */
@@ -1306,7 +1381,7 @@ trap_table0:
NOT; /* 038 LDQF mem address not aligned */
NOT; /* 039 STQF mem address not aligned */
NOT; NOT; NOT4; /* 03A - 03F reserved */
- NOT; /* 040 async data error */
+ LABELED_BAD(tt0_asdat); /* 040 async data error */
LEVEL_INTERRUPT(1); /* 041 interrupt level 1 */
LEVEL_INTERRUPT(2); /* 042 interrupt level 2 */
LEVEL_INTERRUPT(3); /* 043 interrupt level 3 */
@@ -1432,7 +1507,7 @@ trap_table0:
NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */
trap_table1:
NOT4; NOT4; NOT; NOT; /* 000 - 009 unused */
- ASYNC_TRAP(T_INSTR_ERROR + T_TL1, trace_gen);
+ ASYNC_TRAP(T_INSTR_ERROR + T_TL1, trace_gen, tt1_iae);
/* 00A instruction access error */
NOT; NOT4; /* 00B - 00F unused */
NOT4; NOT4; NOT4; NOT4; /* 010 - 01F unused */
@@ -1441,12 +1516,14 @@ trap_table1:
NOT4; NOT4; /* 028 - 02F unused */
DMMU_EXCEPTION_TL1; /* 030 data access exception */
NOT; /* 031 unused */
- ASYNC_TRAP(T_DATA_ERROR + T_TL1, trace_gen);
+ ASYNC_TRAP(T_DATA_ERROR + T_TL1, trace_gen, tt1_dae);
/* 032 data access error */
NOT; /* 033 unused */
MISALIGN_ADDR_TL1; /* 034 mem address not aligned */
NOT; NOT; NOT; NOT4; NOT4 /* 035 - 03F unused */
- NOT4; NOT4; NOT4; NOT4; /* 040 - 04F unused */
+ LABELED_BAD(tt1_asdat); /* 040 async data error */
+ NOT; NOT; NOT; /* 041 - 043 unused */
+ NOT4; NOT4; NOT4; /* 044 - 04F unused */
NOT4; NOT4; NOT4; NOT4; /* 050 - 05F unused */
NOT; /* 060 unused */
GOTO(kmdb_trap_tl1); /* 061 PA watchpoint */
@@ -2608,13 +2685,15 @@ mmu_trap_tl1_4:
add %g6, CPU_TL1_HDLR, %g6 ! %g6 = &cpu_m.tl1_hdlr (VA)
GET_CPU_IMPL(%g5)
cmp %g5, CHEETAH_IMPL
- bl,pn %icc, 3f
- sethi %hi(dcache_line_mask), %g5
+ bl,pt %icc, 3f
+ cmp %g5, SPITFIRE_IMPL
stxa %g0, [%g7]ASI_DC_INVAL
membar #Sync
ba,pt %xcc, 2f
- nop
+ nop
3:
+ bl,pt %icc, 2f
+ sethi %hi(dcache_line_mask), %g5
ld [%g5 + %lo(dcache_line_mask)], %g5
and %g6, %g5, %g5
stxa %g0, [%g5]ASI_DC_TAG
@@ -2932,4 +3011,3 @@ fast_trap_dummy_call:
nop
#endif /* lint */
-
diff --git a/usr/src/uts/sun4u/ngdr/io/dr.c b/usr/src/uts/sun4u/ngdr/io/dr.c
index f9d0bd668f..2d84b9e4ef 100644
--- a/usr/src/uts/sun4u/ngdr/io/dr.c
+++ b/usr/src/uts/sun4u/ngdr/io/dr.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -115,7 +114,9 @@ static dr_devname_t dr_devattr[] = {
{ DRMACH_DEVTYPE_MEM, SBD_COMP_MEM },
{ DRMACH_DEVTYPE_CPU, SBD_COMP_CPU },
{ DRMACH_DEVTYPE_PCI, SBD_COMP_IO },
+#if defined(DRMACH_DEVTYPE_SBUS)
{ DRMACH_DEVTYPE_SBUS, SBD_COMP_IO },
+#endif
#if defined(DRMACH_DEVTYPE_WCI)
{ DRMACH_DEVTYPE_WCI, SBD_COMP_IO },
#endif
@@ -448,7 +449,7 @@ _init(void)
* soft state structure each time a node is attached.
*/
err = ddi_soft_state_init((void **)&dr_g.softsp,
- sizeof (dr_softstate_t), 1);
+ sizeof (dr_softstate_t), 1);
if (err)
return (err);
@@ -546,12 +547,9 @@ dr_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
}
/*
- * Enable/disable Starcat DR features.
+ * Enable/disable DR features.
*/
-#ifndef _STARFIRE
int dr_enable = 1;
-int slot1_dr_enable = 1;
-#endif /* _STARFIRE */
/*ARGSUSED3*/
static int
@@ -576,7 +574,6 @@ dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
return (ENXIO);
}
-#ifndef _STARFIRE
if (!dr_enable) {
switch (cmd) {
case SBD_CMD_STATUS:
@@ -587,25 +584,11 @@ dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
return (ENOTSUP);
}
}
-#endif /* _STARFIRE */
bd = DR_MINOR2BNUM(getminor(dev));
if (bd >= MAX_BOARDS)
return (ENXIO);
-#ifndef _STARFIRE
- if (!slot1_dr_enable && (bd & 0x1)) {
- switch (cmd) {
- case SBD_CMD_STATUS:
- case SBD_CMD_GETNCM:
- case SBD_CMD_PASSTHRU:
- break;
- default:
- return (ENOTSUP);
- }
- }
-#endif /* _STARFIRE */
-
/* get and initialize storage for new handle */
hp = GETSTRUCT(dr_handle_t, 1);
hp->h_bd = &softsp->boards[bd];
@@ -695,7 +678,7 @@ dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
/* Board changed state. Log a sysevent. */
if (rv == 0)
(void) drmach_log_sysevent(hp->h_bd->b_num, "",
- SE_SLEEP, 1);
+ SE_SLEEP, 1);
/* Fall through */
default:
@@ -917,7 +900,7 @@ dr_copyin_iocmd(dr_handle_t *hp)
bzero((caddr_t)&scmd32, sizeof (sbd_cmd32_t));
if (ddi_copyin((void *)hp->h_iap, (void *)&scmd32,
- sizeof (sbd_cmd32_t), hp->h_mode)) {
+ sizeof (sbd_cmd32_t), hp->h_mode)) {
cmn_err(CE_WARN,
"%s: (32bit) failed to copyin "
"sbdcmd-struct", f);
@@ -944,7 +927,7 @@ dr_copyin_iocmd(dr_handle_t *hp)
} else
#endif /* _MULTI_DATAMODEL */
if (ddi_copyin((void *)hp->h_iap, (void *)scp,
- sizeof (sbd_cmd_t), hp->h_mode) != 0) {
+ sizeof (sbd_cmd_t), hp->h_mode) != 0) {
cmn_err(CE_WARN,
"%s: failed to copyin sbdcmd-struct", f);
return (EFAULT);
@@ -994,7 +977,7 @@ dr_copyout_iocmd(dr_handle_t *hp)
}
if (ddi_copyout((void *)&scmd32, (void *)hp->h_iap,
- sizeof (sbd_cmd32_t), hp->h_mode)) {
+ sizeof (sbd_cmd32_t), hp->h_mode)) {
cmn_err(CE_WARN,
"%s: (32bit) failed to copyout "
"sbdcmd-struct", f);
@@ -1003,7 +986,7 @@ dr_copyout_iocmd(dr_handle_t *hp)
} else
#endif /* _MULTI_DATAMODEL */
if (ddi_copyout((void *)scp, (void *)hp->h_iap,
- sizeof (sbd_cmd_t), hp->h_mode) != 0) {
+ sizeof (sbd_cmd_t), hp->h_mode) != 0) {
cmn_err(CE_WARN,
"%s: failed to copyout sbdcmd-struct", f);
return (EFAULT);
@@ -1036,7 +1019,7 @@ dr_copyout_errs(dr_handle_t *hp)
MAXPATHLEN);
if (ddi_copyout((void *)serr32p,
(void *)&((sbd_ioctl_arg32_t *)hp->h_iap)->i_err,
- sizeof (sbd_error32_t), hp->h_mode)) {
+ sizeof (sbd_error32_t), hp->h_mode)) {
cmn_err(CE_WARN,
"%s: (32bit) failed to copyout", f);
return (EFAULT);
@@ -1046,7 +1029,7 @@ dr_copyout_errs(dr_handle_t *hp)
#endif /* _MULTI_DATAMODEL */
if (ddi_copyout((void *)hp->h_err,
(void *)&hp->h_iap->i_err,
- sizeof (sbd_error_t), hp->h_mode)) {
+ sizeof (sbd_error_t), hp->h_mode)) {
cmn_err(CE_WARN,
"%s: failed to copyout", f);
return (EFAULT);
@@ -1371,7 +1354,7 @@ dr_connect(dr_handle_t *hp)
/*
* Board already has devices present.
*/
- PR_ALL("%s: devices already present (0x%x)\n",
+ PR_ALL("%s: devices already present (0x%lx)\n",
f, DR_DEVS_PRESENT(bp));
return;
}
@@ -1408,7 +1391,7 @@ dr_disconnect(dr_handle_t *hp)
* unattached can be disconnected.
*/
devset = hp->h_devset & DR_DEVS_PRESENT(bp) &
- DR_DEVS_UNATTACHED(bp);
+ DR_DEVS_UNATTACHED(bp);
if ((devset == 0) && DR_DEVS_PRESENT(bp)) {
dr_op_err(CE_IGNORE, hp, ESBD_EMPTY_BD, bp->b_path);
@@ -2017,8 +2000,8 @@ dr_detach_update_state(dr_handle_t *hp,
hp->h_bd->b_ostate = SBD_STAT_UNCONFIGURED;
(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
} else if ((bp->b_state != DR_STATE_PARTIAL) &&
- (DR_DEVS_ATTACHED(bp) !=
- DR_DEVS_PRESENT(bp))) {
+ (DR_DEVS_ATTACHED(bp) !=
+ DR_DEVS_PRESENT(bp))) {
/*
* Some devices remain attached.
*/
@@ -2248,7 +2231,7 @@ dr_dev_status(dr_handle_t *hp)
if (ncm > 1)
sz32 += sizeof (sbd_dev_stat32_t) * (ncm - 1);
pnstat = (pbsz - sizeof (sbd_stat32_t))/
- sizeof (sbd_dev_stat32_t);
+ sizeof (sbd_dev_stat32_t);
}
sz += sz32;
@@ -2489,7 +2472,7 @@ dr_get_ncm(dr_handle_t *hp)
devset = DR_DEVS_PRESENT(hp->h_bd);
if (hp->h_sbdcmd.cmd_cm.c_id.c_type != SBD_COMP_NONE)
devset &= DEVSET(hp->h_sbdcmd.cmd_cm.c_id.c_type,
- DEVSET_ANYUNIT);
+ DEVSET_ANYUNIT);
/*
* Handle CPUs first to deal with possible CMP
@@ -2597,7 +2580,7 @@ dr_dev2devset(sbd_comp_id_t *cid)
devset = DEVSET(SBD_COMP_CPU, DEVSET_ANYUNIT);
devset |= DEVSET(SBD_COMP_MEM, DEVSET_ANYUNIT);
devset |= DEVSET(SBD_COMP_IO, DEVSET_ANYUNIT);
- PR_ALL("%s: COMP_NONE devset = 0x%x\n", f, devset);
+ PR_ALL("%s: COMP_NONE devset = 0x%lx\n", f, devset);
break;
case SBD_COMP_CPU:
@@ -2618,7 +2601,7 @@ dr_dev2devset(sbd_comp_id_t *cid)
devset = DEVSET(SBD_COMP_CMP, unit);
}
- PR_ALL("%s: CPU devset = 0x%x\n", f, devset);
+ PR_ALL("%s: CPU devset = 0x%lx\n", f, devset);
break;
case SBD_COMP_MEM:
@@ -2635,7 +2618,7 @@ dr_dev2devset(sbd_comp_id_t *cid)
} else
devset = DEVSET(cid->c_type, unit);
- PR_ALL("%s: MEM devset = 0x%x\n", f, devset);
+ PR_ALL("%s: MEM devset = 0x%lx\n", f, devset);
break;
case SBD_COMP_IO:
@@ -2647,7 +2630,7 @@ dr_dev2devset(sbd_comp_id_t *cid)
} else
devset = DEVSET(cid->c_type, unit);
- PR_ALL("%s: IO devset = 0x%x\n", f, devset);
+ PR_ALL("%s: IO devset = 0x%lx\n", f, devset);
break;
default:
@@ -2879,9 +2862,9 @@ dr_dev_found(void *data, const char *name, int unum, drmachid_t id)
/* render dynamic attachment point path of this unit */
(void) snprintf(dp->du_common.sbdev_path,
- sizeof (dp->du_common.sbdev_path),
- (nt == SBD_COMP_MEM ? "%s::%s" : "%s::%s%d"),
- bp->b_path, name, DR_UNUM2SBD_UNUM(unum));
+ sizeof (dp->du_common.sbdev_path),
+ (nt == SBD_COMP_MEM ? "%s::%s" : "%s::%s%d"),
+ bp->b_path, name, DR_UNUM2SBD_UNUM(unum, nt));
dp->du_common.sbdev_id = id;
DR_DEV_SET_PRESENT(&dp->du_common);
@@ -3011,7 +2994,7 @@ dr_check_unit_attached(dr_common_unit_t *cp)
memlist_read_lock();
for (ml = phys_install; ml; ml = ml->next)
if ((endpa <= ml->address) ||
- (basepa >= (ml->address + ml->size)))
+ (basepa >= (ml->address + ml->size)))
continue;
else
break;
@@ -3208,7 +3191,7 @@ dr_board_discovery(dr_board_t *bp)
* information necessary to re-configure the device
* back online, e.g. memlist.
*/
- PR_ALL("%s: some devices LOST (0x%x)...\n", f, devs_lost);
+ PR_ALL("%s: some devices LOST (0x%lx)...\n", f, devs_lost);
for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
if (!DEVSET_IN_SET(devs_lost, SBD_COMP_CPU, ut))
@@ -3256,13 +3239,13 @@ dr_board_init(dr_board_t *bp, dev_info_t *dip, int bd)
bp->b_dip = dip;
bp->b_dev[NIX(SBD_COMP_CPU)] = GETSTRUCT(dr_dev_unit_t,
- MAX_CPU_UNITS_PER_BOARD);
+ MAX_CPU_UNITS_PER_BOARD);
bp->b_dev[NIX(SBD_COMP_MEM)] = GETSTRUCT(dr_dev_unit_t,
- MAX_MEM_UNITS_PER_BOARD);
+ MAX_MEM_UNITS_PER_BOARD);
bp->b_dev[NIX(SBD_COMP_IO)] = GETSTRUCT(dr_dev_unit_t,
- MAX_IO_UNITS_PER_BOARD);
+ MAX_IO_UNITS_PER_BOARD);
/*
* Initialize the devlists
@@ -3328,19 +3311,19 @@ dr_board_destroy(dr_board_t *bp)
* Free up MEM unit structs.
*/
FREESTRUCT(bp->b_dev[NIX(SBD_COMP_MEM)],
- dr_dev_unit_t, MAX_MEM_UNITS_PER_BOARD);
+ dr_dev_unit_t, MAX_MEM_UNITS_PER_BOARD);
bp->b_dev[NIX(SBD_COMP_MEM)] = NULL;
/*
* Free up CPU unit structs.
*/
FREESTRUCT(bp->b_dev[NIX(SBD_COMP_CPU)],
- dr_dev_unit_t, MAX_CPU_UNITS_PER_BOARD);
+ dr_dev_unit_t, MAX_CPU_UNITS_PER_BOARD);
bp->b_dev[NIX(SBD_COMP_CPU)] = NULL;
/*
* Free up IO unit structs.
*/
FREESTRUCT(bp->b_dev[NIX(SBD_COMP_IO)],
- dr_dev_unit_t, MAX_IO_UNITS_PER_BOARD);
+ dr_dev_unit_t, MAX_IO_UNITS_PER_BOARD);
bp->b_dev[NIX(SBD_COMP_IO)] = NULL;
mutex_destroy(&bp->b_lock);
diff --git a/usr/src/uts/sun4u/ngdr/io/dr_cpu.c b/usr/src/uts/sun4u/ngdr/io/dr_cpu.c
index d9f1f17247..0c6b037777 100644
--- a/usr/src/uts/sun4u/ngdr/io/dr_cpu.c
+++ b/usr/src/uts/sun4u/ngdr/io/dr_cpu.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -64,7 +63,7 @@
#include <sys/mmu.h>
#include <sys/x_call.h>
#include <sys/cpu_module.h>
-#include <sys/cheetahregs.h>
+#include <sys/cpu_impl.h>
#include <sys/autoconf.h>
#include <sys/cmn_err.h>
@@ -159,8 +158,13 @@ dr_cpu_set_prop(dr_cpu_unit_t *cp)
}
/* read in the CPU speed */
- clock_freq = (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
- DDI_PROP_DONTPASS, "clock-frequency", 0);
+
+ /*
+ * If the property is not found in the CPU node, it has to be
+ * kept in the core or cmp node so we just keep looking.
+ */
+ clock_freq = (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY,
+ dip, 0, "clock-frequency", 0);
ASSERT(clock_freq != 0);
@@ -168,6 +172,7 @@ dr_cpu_set_prop(dr_cpu_unit_t *cp)
* The ecache property string is not the same
* for all CPU implementations.
*/
+
switch (cp->sbc_cpu_impl) {
case BLACKBIRD_IMPL:
case CHEETAH_IMPL:
@@ -175,6 +180,7 @@ dr_cpu_set_prop(dr_cpu_unit_t *cp)
cache_str = "ecache-size";
break;
case JAGUAR_IMPL:
+ case OLYMPUS_C_IMPL:
cache_str = "l2-cache-size";
break;
case PANTHER_IMPL:
@@ -189,8 +195,14 @@ dr_cpu_set_prop(dr_cpu_unit_t *cp)
if (cache_str != NULL) {
/* read in the ecache size */
- ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
- DDI_PROP_DONTPASS, cache_str, 0);
+ /*
+ * If the property is not found in the CPU node,
+ * it has to be kept in the core or cmp node so
+ * we just keep looking.
+ */
+
+ ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY,
+ dip, 0, cache_str, 0);
}
ASSERT(ecache_size != 0);
@@ -280,7 +292,8 @@ dr_pre_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
* one message should be printed, no matter how
* many cores are actually present.
*/
- curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum);
+ curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum,
+ SBD_COMP_CPU);
if (curr_cpu >= next_cpu) {
cmn_err(CE_CONT, "OS configure %s",
up->sbc_cm.sbdev_path);
@@ -331,13 +344,12 @@ dr_attach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
if (err) {
DRERR_SET_C(&cp->sbdev_error, &err);
- err = drmach_unconfigure(cp->sbdev_id, DRMACH_DEVI_REMOVE);
+ err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
if (err)
sbd_err_clear(&err);
} else if ((rv = cpu_configure(cpuid)) != 0) {
dr_dev_err(CE_WARN, cp, dr_errno2ecode(rv));
- err = drmach_unconfigure(cp->sbdev_id,
- DRMACH_DEVI_REMOVE);
+ err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
if (err)
sbd_err_clear(&err);
}
@@ -421,7 +433,7 @@ dr_pre_release_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
/* allocate status struct storage. */
ds = (sbd_dev_stat_t *) kmem_zalloc(sizeof (sbd_dev_stat_t) *
- MAX_CPU_UNITS_PER_BOARD, KM_SLEEP);
+ MAX_CPU_UNITS_PER_BOARD, KM_SLEEP);
cix = dr_cpu_status(hp, devset, ds);
@@ -473,7 +485,7 @@ dr_pre_release_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
if (disp_bound_threads(cp, 0)) {
cmn_err(CE_WARN, "%s: thread(s) "
"bound to cpu %d",
- f, cp->cpu_id);
+ f, cp->cpu_id);
}
rv = -1;
break;
@@ -553,7 +565,8 @@ dr_pre_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
* one message should be printed, no matter how
* many cores are actually present.
*/
- curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum);
+ curr_cpu = DR_UNUM2SBD_UNUM(up->sbc_cm.sbdev_unum,
+ SBD_COMP_CPU);
if (curr_cpu >= next_cpu) {
cmn_err(CE_CONT, "OS unconfigure %s\n",
up->sbc_cm.sbdev_path);
@@ -581,7 +594,7 @@ dr_pre_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
if (disp_bound_threads(cp, 0)) {
cmn_err(CE_WARN, "%s: thread(s) "
"bound to cpu %d",
- f, cp->cpu_id);
+ f, cp->cpu_id);
}
goto err;
}
@@ -618,7 +631,7 @@ dr_detach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
} else if ((rv = cpu_unconfigure(cpuid)) != 0) {
dr_dev_err(CE_IGNORE, cp, dr_errno2ecode(rv));
} else {
- err = drmach_unconfigure(cp->sbdev_id, DRMACH_DEVI_REMOVE);
+ err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
if (err) {
DRERR_SET_C(&cp->sbdev_error, &err);
}
@@ -696,7 +709,7 @@ dr_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl, sbd_cmp_stat_t *psp)
* on the data for the first core.
*/
psp->ps_type = SBD_COMP_CMP;
- psp->ps_unit = DR_UNUM2SBD_UNUM(csp->cs_unit);
+ psp->ps_unit = DR_UNUM2SBD_UNUM(csp->cs_unit, SBD_COMP_CMP);
strncpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
psp->ps_cond = csp->cs_cond;
psp->ps_busy = csp->cs_busy;
@@ -721,7 +734,8 @@ dr_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl, sbd_cmp_stat_t *psp)
* The following properties should be the same
* for all the cores of the CMP.
*/
- ASSERT(psp->ps_unit == DR_UNUM2SBD_UNUM(csp[core].cs_unit));
+ ASSERT(psp->ps_unit == DR_UNUM2SBD_UNUM(
+ csp[core].cs_unit, SBD_COMP_CMP));
ASSERT(psp->ps_speed == csp[core].cs_speed);
psp->ps_cpuid[core] = csp[core].cs_cpuid;
@@ -761,6 +775,7 @@ dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
int ncpu;
dr_board_t *bp;
sbd_cpu_stat_t cstat[MAX_CORES_PER_CMP];
+ int impl;
bp = hp->h_bd;
ncpu = 0;
@@ -810,6 +825,11 @@ dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
}
dr_fill_cpu_stat(cp, &pstat, &cstat[ncores++]);
+ /*
+ * We should set impl here because the last core
+ * found might be EMPTY or not present.
+ */
+ impl = cp->sbc_cpu_impl;
}
if (ncores == 0) {
@@ -825,9 +845,10 @@ dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
* found, assuming that all cores will have the
* same implementation.
*/
- if (CPU_IMPL_IS_CMP(cp->sbc_cpu_impl)) {
+
+ if (CPU_IMPL_IS_CMP(impl)) {
psp = (sbd_cmp_stat_t *)dsp;
- dr_fill_cmp_stat(cstat, ncores, cp->sbc_cpu_impl, psp);
+ dr_fill_cmp_stat(cstat, ncores, impl, psp);
} else {
ASSERT(ncores == 1);
bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
@@ -888,8 +909,8 @@ dr_cancel_cpu(dr_cpu_unit_t *up)
if (cpu_flagged_nointr(up->sbc_cpu_flags)) {
if (cpu_intr_disable(cp) != 0) {
cmn_err(CE_WARN, "%s: failed to "
- "disable interrupts on cpu %d",
- f, up->sbc_cpu_id);
+ "disable interrupts on cpu %d",
+ f, up->sbc_cpu_id);
}
}
}
diff --git a/usr/src/uts/sun4u/ngdr/io/dr_io.c b/usr/src/uts/sun4u/ngdr/io/dr_io.c
index fa88a8e690..ac36e971d0 100644
--- a/usr/src/uts/sun4u/ngdr/io/dr_io.c
+++ b/usr/src/uts/sun4u/ngdr/io/dr_io.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -117,8 +116,7 @@ dr_detach_io(dr_handle_t *hp, dr_common_unit_t *cp)
err = drmach_unconfigure(cp->sbdev_id, 0);
if (!err)
- err = drmach_unconfigure(cp->sbdev_id,
- DRMACH_DEVI_REMOVE);
+ err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
if (!err)
err = drmach_io_post_release(cp->sbdev_id);
diff --git a/usr/src/uts/sun4u/ngdr/io/dr_mem.c b/usr/src/uts/sun4u/ngdr/io/dr_mem.c
index f9e5c9e0ea..7686bbab19 100644
--- a/usr/src/uts/sun4u/ngdr/io/dr_mem.c
+++ b/usr/src/uts/sun4u/ngdr/io/dr_mem.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -72,13 +71,8 @@ static int dr_select_mem_target(dr_handle_t *hp,
dr_mem_unit_t *mp, struct memlist *ml);
static void dr_init_mem_unit_data(dr_mem_unit_t *mp);
-static struct memlist *memlist_dup(struct memlist *);
static int memlist_canfit(struct memlist *s_mlist,
struct memlist *t_mlist);
-static struct memlist *memlist_del_span(struct memlist *mlist,
- uint64_t base, uint64_t len);
-static struct memlist *memlist_cat_span(struct memlist *mlist,
- uint64_t base, uint64_t len);
/*
* dr_mem_unit_t.sbm_flags
@@ -355,7 +349,8 @@ dr_attach_mem(dr_handle_t *hp, dr_common_unit_t *cp)
/* back out if configure failed */
if (mp->sbm_cm.sbdev_error != NULL) {
dr_lock_status(hp->h_bd);
- err = drmach_unconfigure(cp->sbdev_id, DRMACH_DEVI_REMOVE);
+ err = drmach_unconfigure(cp->sbdev_id,
+ DEVI_BRANCH_DESTROY);
if (err)
sbd_err_clear(&err);
dr_unlock_status(hp->h_bd);
@@ -628,7 +623,7 @@ dr_detach_mem(dr_handle_t *hp, dr_common_unit_t *cp)
dr_lock_status(hp->h_bd);
err = drmach_unconfigure(s_mp->sbm_cm.sbdev_id,
- DRMACH_DEVI_REMOVE);
+ DEVI_BRANCH_DESTROY);
dr_unlock_status(hp->h_bd);
if (err)
sbd_err_clear(&err);
@@ -663,7 +658,8 @@ dr_del_span_query(pfn_t base, pgcnt_t npages, memquery_t *mp)
again:
for (ml = mlist; ml; ml = ml->next) {
if ((ml->address & sm) != sa) {
- mlist = memlist_del_span(mlist, ml->address, ml->size);
+ mlist = memlist_del_span(mlist,
+ ml->address, ml->size);
goto again;
}
}
@@ -2669,28 +2665,6 @@ dr_select_mem_target(dr_handle_t *hp,
/*
* Memlist support.
*/
-static struct memlist *
-memlist_dup(struct memlist *mlist)
-{
- struct memlist *hl = NULL, *tl, **mlp;
-
- if (mlist == NULL)
- return (NULL);
-
- mlp = &hl;
- tl = *mlp;
- for (; mlist; mlist = mlist->next) {
- *mlp = GETSTRUCT(struct memlist, 1);
- (*mlp)->address = mlist->address;
- (*mlp)->size = mlist->size;
- (*mlp)->prev = tl;
- tl = *mlp;
- mlp = &((*mlp)->next);
- }
- *mlp = NULL;
-
- return (hl);
-}
/*
* Determine whether the source memlist (s_mlist) will
@@ -2754,117 +2728,3 @@ memlist_canfit(struct memlist *s_mlist, struct memlist *t_mlist)
return (rv);
}
-
-static struct memlist *
-memlist_del_span(struct memlist *mlist, uint64_t base, uint64_t len)
-{
- uint64_t end;
- struct memlist *ml, *tl, *nlp;
-
- if (mlist == NULL)
- return (NULL);
-
- end = base + len;
- if ((end <= mlist->address) || (base == end))
- return (mlist);
-
- for (tl = ml = mlist; ml; tl = ml, ml = nlp) {
- uint64_t mend;
-
- nlp = ml->next;
-
- if (end <= ml->address)
- break;
-
- mend = ml->address + ml->size;
- if (base < mend) {
- if (base <= ml->address) {
- ml->address = end;
- if (end >= mend)
- ml->size = 0ull;
- else
- ml->size = mend - ml->address;
- } else {
- ml->size = base - ml->address;
- if (end < mend) {
- struct memlist *nl;
- /*
- * splitting an memlist entry.
- */
- nl = GETSTRUCT(struct memlist, 1);
- nl->address = end;
- nl->size = mend - nl->address;
- if ((nl->next = nlp) != NULL)
- nlp->prev = nl;
- nl->prev = ml;
- ml->next = nl;
- nlp = nl;
- }
- }
- if (ml->size == 0ull) {
- if (ml == mlist) {
- if ((mlist = nlp) != NULL)
- nlp->prev = NULL;
- FREESTRUCT(ml, struct memlist, 1);
- if (mlist == NULL)
- break;
- ml = nlp;
- } else {
- if ((tl->next = nlp) != NULL)
- nlp->prev = tl;
- FREESTRUCT(ml, struct memlist, 1);
- ml = tl;
- }
- }
- }
- }
-
- return (mlist);
-}
-
-/*
- * add span without merging
- */
-static struct memlist *
-memlist_cat_span(struct memlist *mlist, uint64_t base, uint64_t len)
-{
- struct memlist *ml, *tl, *nl;
-
- if (len == 0ull)
- return (NULL);
-
- if (mlist == NULL) {
- mlist = GETSTRUCT(struct memlist, 1);
- mlist->address = base;
- mlist->size = len;
- mlist->next = mlist->prev = NULL;
-
- return (mlist);
- }
-
- for (tl = ml = mlist; ml; tl = ml, ml = ml->next) {
- if (base < ml->address) {
- nl = GETSTRUCT(struct memlist, 1);
- nl->address = base;
- nl->size = len;
- nl->next = ml;
- if ((nl->prev = ml->prev) != NULL)
- nl->prev->next = nl;
- ml->prev = nl;
- if (mlist == ml)
- mlist = nl;
- break;
- }
- }
-
- if (ml == NULL) {
- nl = GETSTRUCT(struct memlist, 1);
- nl->address = base;
- nl->size = len;
- nl->next = NULL;
- nl->prev = tl;
- tl->next = nl;
- }
-
- return (mlist);
-}
diff --git a/usr/src/uts/sun4u/ngdr/io/dr_util.c b/usr/src/uts/sun4u/ngdr/io/dr_util.c
index 7a17df1190..eaf8c95178 100644
--- a/usr/src/uts/sun4u/ngdr/io/dr_util.c
+++ b/usr/src/uts/sun4u/ngdr/io/dr_util.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -174,7 +173,7 @@ drerr_set_c(int log, sbd_error_t **ep, int e_code, char *fmt, ...)
* Memlist support.
*/
void
-memlist_delete(struct memlist *mlist)
+dr_memlist_delete(struct memlist *mlist)
{
register struct memlist *ml;
@@ -185,7 +184,7 @@ memlist_delete(struct memlist *mlist)
}
int
-memlist_intersect(struct memlist *al, struct memlist *bl)
+dr_memlist_intersect(struct memlist *al, struct memlist *bl)
{
uint64_t astart, aend, bstart, bend;
@@ -220,7 +219,7 @@ memlist_intersect(struct memlist *al, struct memlist *bl)
}
void
-memlist_coalesce(struct memlist *mlist)
+dr_memlist_coalesce(struct memlist *mlist)
{
uint64_t end, nend;
@@ -260,3 +259,199 @@ memlist_dump(struct memlist *mlist)
printf("memlist> 0x%lx, 0x%lx\n", ml->address, ml->size);
}
#endif
+
+struct memlist *
+dr_memlist_dup(struct memlist *mlist)
+{
+ struct memlist *hl = NULL, *tl, **mlp;
+
+ if (mlist == NULL)
+ return (NULL);
+
+ mlp = &hl;
+ tl = *mlp;
+ for (; mlist; mlist = mlist->next) {
+ *mlp = GETSTRUCT(struct memlist, 1);
+ (*mlp)->address = mlist->address;
+ (*mlp)->size = mlist->size;
+ (*mlp)->prev = tl;
+ tl = *mlp;
+ mlp = &((*mlp)->next);
+ }
+ *mlp = NULL;
+
+ return (hl);
+}
+
+struct memlist *
+dr_memlist_add_span(struct memlist *mlist, uint64_t base, uint64_t len)
+{
+ struct memlist *ml, *tl, *nl;
+
+ if (len == 0ull)
+ return (NULL);
+
+ if (mlist == NULL) {
+ mlist = GETSTRUCT(struct memlist, 1);
+ mlist->address = base;
+ mlist->size = len;
+ mlist->next = mlist->prev = NULL;
+
+ return (mlist);
+ }
+
+ for (tl = ml = mlist; ml; tl = ml, ml = ml->next) {
+ if (base < ml->address) {
+ if ((base + len) < ml->address) {
+ nl = GETSTRUCT(struct memlist, 1);
+ nl->address = base;
+ nl->size = len;
+ nl->next = ml;
+ if ((nl->prev = ml->prev) != NULL)
+ nl->prev->next = nl;
+ ml->prev = nl;
+ if (mlist == ml)
+ mlist = nl;
+ } else {
+ ml->size = MAX((base + len),
+ (ml->address + ml->size)) -
+ base;
+ ml->address = base;
+ }
+ break;
+
+ } else if (base <= (ml->address + ml->size)) {
+ ml->size = MAX((base + len),
+ (ml->address + ml->size)) -
+ MIN(ml->address, base);
+ ml->address = MIN(ml->address, base);
+ break;
+ }
+ }
+ if (ml == NULL) {
+ nl = GETSTRUCT(struct memlist, 1);
+ nl->address = base;
+ nl->size = len;
+ nl->next = NULL;
+ nl->prev = tl;
+ tl->next = nl;
+ }
+
+ dr_memlist_coalesce(mlist);
+
+ return (mlist);
+}
+
+struct memlist *
+dr_memlist_del_span(struct memlist *mlist, uint64_t base, uint64_t len)
+{
+ uint64_t end;
+ struct memlist *ml, *tl, *nlp;
+
+ if (mlist == NULL)
+ return (NULL);
+
+ end = base + len;
+ if ((end <= mlist->address) || (base == end))
+ return (mlist);
+
+ for (tl = ml = mlist; ml; tl = ml, ml = nlp) {
+ uint64_t mend;
+
+ nlp = ml->next;
+
+ if (end <= ml->address)
+ break;
+
+ mend = ml->address + ml->size;
+ if (base < mend) {
+ if (base <= ml->address) {
+ ml->address = end;
+ if (end >= mend)
+ ml->size = 0ull;
+ else
+ ml->size = mend - ml->address;
+ } else {
+ ml->size = base - ml->address;
+ if (end < mend) {
+ struct memlist *nl;
+ /*
+ * splitting an memlist entry.
+ */
+ nl = GETSTRUCT(struct memlist, 1);
+ nl->address = end;
+ nl->size = mend - nl->address;
+ if ((nl->next = nlp) != NULL)
+ nlp->prev = nl;
+ nl->prev = ml;
+ ml->next = nl;
+ nlp = nl;
+ }
+ }
+ if (ml->size == 0ull) {
+ if (ml == mlist) {
+ if ((mlist = nlp) != NULL)
+ nlp->prev = NULL;
+ FREESTRUCT(ml, struct memlist, 1);
+ if (mlist == NULL)
+ break;
+ ml = nlp;
+ } else {
+ if ((tl->next = nlp) != NULL)
+ nlp->prev = tl;
+ FREESTRUCT(ml, struct memlist, 1);
+ ml = tl;
+ }
+ }
+ }
+ }
+
+ return (mlist);
+}
+
+/*
+ * add span without merging
+ */
+struct memlist *
+dr_memlist_cat_span(struct memlist *mlist, uint64_t base, uint64_t len)
+{
+ struct memlist *ml, *tl, *nl;
+
+ if (len == 0ull)
+ return (NULL);
+
+ if (mlist == NULL) {
+ mlist = GETSTRUCT(struct memlist, 1);
+ mlist->address = base;
+ mlist->size = len;
+ mlist->next = mlist->prev = NULL;
+
+ return (mlist);
+ }
+
+ for (tl = ml = mlist; ml; tl = ml, ml = ml->next) {
+ if (base < ml->address) {
+ nl = GETSTRUCT(struct memlist, 1);
+ nl->address = base;
+ nl->size = len;
+ nl->next = ml;
+ if ((nl->prev = ml->prev) != NULL)
+ nl->prev->next = nl;
+ ml->prev = nl;
+ if (mlist == ml)
+ mlist = nl;
+ break;
+ }
+ }
+
+ if (ml == NULL) {
+ nl = GETSTRUCT(struct memlist, 1);
+ nl->address = base;
+ nl->size = len;
+ nl->next = NULL;
+ nl->prev = tl;
+ tl->next = nl;
+ }
+
+ return (mlist);
+}
diff --git a/usr/src/uts/sun4u/ngdr/sys/dr.h b/usr/src/uts/sun4u/ngdr/sys/dr.h
index f4942b6b55..c34c744120 100644
--- a/usr/src/uts/sun4u/ngdr/sys/dr.h
+++ b/usr/src/uts/sun4u/ngdr/sys/dr.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -48,12 +47,24 @@ extern "C" {
#define DR_MAXNUM_NT 3
/* used to map sbd_comp_type_t to array index */
-#define NIX(t) \
- (((t) == SBD_COMP_CPU) ? 0 : \
- ((t) == SBD_COMP_MEM) ? 1 : \
- ((t) == SBD_COMP_IO) ? 2 : \
+#define NIX(t) \
+ (((t) == SBD_COMP_CPU) ? 0 : \
+ ((t) == SBD_COMP_MEM) ? 1 : \
+ ((t) == SBD_COMP_IO) ? 2 : \
((t) == SBD_COMP_CMP) ? 0 : DR_MAXNUM_NT)
+#define BIX(t) \
+ (((t) == SBD_COMP_CPU) ? 0 : \
+ ((t) == SBD_COMP_MEM) ? 32 : \
+ ((t) == SBD_COMP_IO) ? 40 : \
+ ((t) == SBD_COMP_CMP) ? 0 : 0)
+
+#define NMASK(t) \
+ (((t) == SBD_COMP_CPU) ? ((dr_devset_t)0xffffffff) : \
+ ((t) == SBD_COMP_MEM) ? ((dr_devset_t)0x1) : \
+ ((t) == SBD_COMP_IO) ? ((dr_devset_t)0x1ff) : \
+ ((t) == SBD_COMP_CMP) ? ((dr_devset_t)0xffffffff) : 0)
+
/*
* helper macros for constructing and reporting internal error messages.
* NOTE: each module which uses one or more this these macros is expected
@@ -84,22 +95,23 @@ extern "C" {
/*
* Format of dr_devset_t bit masks:
*
- * 32 16 8 0
- * |....|....|....|IIII|....|...M|CCCC|CCCC|
+ * 64 48 40 32 24 16 8 0
+ * |....|...I|IIII|IIII|....|...M|CCCC|CCCC|CCCC|CCCC|CCCC|CCCC|CCCC|CCCC|
*
* 1 = indicates respective component present/attached.
* I = I/O, M = Memory, C = CPU.
*/
-#define DEVSET_ANYUNIT (-1)
-#define _NT2DEVPOS(t, u) ((NIX(t) << 3) + (u))
-#define _DEVSET_MASK 0x000f01ff
-#define _CMP_DEVSET_MASK 0x11
+#define _NT2DEVPOS(t, u) (BIX(t) + (u))
+#define _DEVSET_MASK ((dr_devset_t)0x1ff01ffffffff)
+#define _CMP_DEVSET_MASK ((dr_devset_t)0x11111111)
+#define DEVSET_ONEUNIT ((dr_devset_t)1)
+#define DEVSET_ANYUNIT (dr_devset_t)(-1)
#define DEVSET(t, u) \
(((u) == DEVSET_ANYUNIT) ? \
- (dr_devset_t)((0xff << _NT2DEVPOS((t), 0)) & _DEVSET_MASK) : \
+ ((NMASK(t) << _NT2DEVPOS((t), 0)) & _DEVSET_MASK) : \
((t) == SBD_COMP_CMP) ? \
- (dr_devset_t)(_CMP_DEVSET_MASK << _NT2DEVPOS((t), (u))) : \
- (dr_devset_t)(1 << _NT2DEVPOS((t), (u))))
+ (_CMP_DEVSET_MASK << _NT2DEVPOS((t), (u))) : \
+ (DEVSET_ONEUNIT << _NT2DEVPOS((t), (u))))
#define DEVSET_IN_SET(ds, t, u) (((ds) & DEVSET((t), (u))) != 0)
#define DEVSET_ADD(ds, t, u) ((ds) |= DEVSET((t), (u)))
@@ -175,7 +187,17 @@ extern "C" {
* CMP Specific Helpers
*/
#define DR_CMP_CORE_UNUM(cmp, core) (cmp + (core * 4))
-#define DR_UNUM2SBD_UNUM(unum) (unum & 0x3)
+/*
+ * DR_UNUM2SBD_UNUM should be set to (unum & (max #of CMP on board - 1))
+ * for all the platforms. So far, all sun4u platforms supported have
+ * the same limit so 0x3 works. One day we might have to make this
+ * a platform specific macro.
+ */
+
+#define DR_UNUM2SBD_UNUM(n, d) ((d == SBD_COMP_IO) ? (n & 0xf) : \
+ (d == SBD_COMP_CPU) ? (n & 0x3) : \
+ (d == SBD_COMP_CMP) ? (n & 0x3) : \
+ (n))
/*
* Some stuff to assist in debug.
@@ -213,7 +235,7 @@ extern uint_t dr_debug;
#define DR_BSLOCK 0x01 /* for blocking status (protected by b_slock) */
typedef const char *fn_t;
-typedef uint32_t dr_devset_t; /* TODO: fix limitation */
+typedef uint64_t dr_devset_t; /* TODO: fix limitation */
/*
* Unsafe devices based on dr.conf prop "unsupported-io-drivers"
diff --git a/usr/src/uts/sun4u/ngdr/sys/dr_util.h b/usr/src/uts/sun4u/ngdr/sys/dr_util.h
index c23dd00a2d..69b0bbd84c 100644
--- a/usr/src/uts/sun4u/ngdr/sys/dr_util.h
+++ b/usr/src/uts/sun4u/ngdr/sys/dr_util.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,8 +19,8 @@
* CDDL HEADER END
*/
/*
- * Copyright (c) 2000 by Sun Microsystems, Inc.
- * All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
*/
#ifndef _SYS_DR_UTIL_H_
@@ -69,11 +68,32 @@ extern sbd_error_t *drerr_new_v(int e_code, char *fmt, va_list args);
extern void drerr_set_c(int log, sbd_error_t **ep,
int e_code, char *fmt, ...);
-extern void memlist_delete(struct memlist *mlist);
+extern void dr_memlist_delete(struct memlist *mlist);
extern void memlist_dump(struct memlist *mlist);
-extern int memlist_intersect(struct memlist *al,
+extern int dr_memlist_intersect(struct memlist *al,
struct memlist *bl);
-extern void memlist_coalesce(struct memlist *mlist);
+extern void dr_memlist_coalesce(struct memlist *mlist);
+extern struct memlist *dr_memlist_dup(struct memlist *mlist);
+extern struct memlist *dr_memlist_add_span(struct memlist *mlist,
+ uint64_t base, uint64_t len);
+extern struct memlist *dr_memlist_del_span(struct memlist *mlist,
+ uint64_t base, uint64_t len);
+extern struct memlist *dr_memlist_cat_span(struct memlist *mlist,
+ uint64_t base, uint64_t len);
+
+/*
+ * These are all utilities internal for DR. There are
+ * similar functions in common/os which have similar names.
+ * We rename them to make sure there is no name space
+ * conflict.
+ */
+#define memlist_delete dr_memlist_delete
+#define memlist_intersect dr_memlist_intersect
+#define memlist_coalesce dr_memlist_coalesce
+#define memlist_dup dr_memlist_dup
+#define memlist_add_span dr_memlist_add_span
+#define memlist_del_span dr_memlist_del_span
+#define memlist_cat_span dr_memlist_cat_span
#ifdef __cplusplus
}
diff --git a/usr/src/uts/sun4u/opl/Makefile b/usr/src/uts/sun4u/opl/Makefile
new file mode 100644
index 0000000000..dc18c8ec61
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/Makefile
@@ -0,0 +1,147 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the sun4u opl platform
+# module.
+#
+# sun4u opl implementation architecture dependent
+#
+# uts/sun4u/opl/Makefile
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.opl
+
+def := TARGET= def
+all := TARGET= all
+install := TARGET= install
+install_h := TARGET= install_h
+clean := TARGET= clean
+clobber := TARGET= clobber
+lint := TARGET= lint
+lintlib := TARGET= lintlib
+modlist := TARGET= modlist
+modlist := NO_STATE= -K $$MODSTATE$$$$
+modlintlib := TARGET= modlintlib
+clean.lint := TARGET= clean.lint
+check := TARGET= check
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def all clean clobber clean.lint: genassym unix .WAIT \
+ $(OPL_CPU_KMODS) $(OPL_KMODS)
+
+modlist: unix $(OPL_CPU_KMODS) $(OPL_KMODS)
+
+lintlib: unix
+
+modlintlib: $(OPL_KMODS)
+
+IMPLEMENTED_PLATFORM = SUNW,SPARC-Enterprise
+LINKED_PLATFORMS = FJSV,SPARC-Enterprise
+# temporary link to the development platform name - remove before ON putback!
+LINKED_PLATFORMS += SUNW,OPL-Enterprise
+
+# EXPORT DELETE START
+#
+# aes256 is delivered in the SUNWcryr package which is removed from
+# the EXPORT_SRC build.
+#
+OPL_CRYPTO_LINKS += aes256
+# EXPORT DELETE END
+
+install: $(ROOT_OPL_DIR) $(USR_OPL_DIR) \
+ $(USR_OPL_INC_DIR) \
+ $(USR_OPL_SBIN_DIR) \
+ $(USR_OPL_SBIN_EEPROM) \
+ $(USR_OPL_SBIN_PRTDIAG) \
+ $(USR_OPL_SBIN_TRAPSTAT) \
+ $(USR_OPL_SBIN_FRUADM) \
+ $(USR_OPL_LIB_DIR) \
+ $(LINKED_PLATFORMS:%=$(USR_PLAT_DIR)/%) \
+ $(LINKED_PLATFORMS:%=$(ROOT_PLAT_DIR)/%) \
+ $(OPL_CRYPTO_LINKS) \
+ genassym unix .WAIT $(OPL_CPU_KMODS) $(OPL_KMODS) \
+ $(OPLMSU_OPTION)
+
+genassym unix $(OPL_KMODS) $(OPL_CPU_KMODS): FRC
+ @cd $@; pwd; $(MAKE) $(TARGET)
+
+$(OPL_CRYPTO_LINKS): $(ROOT_OPL_CRYPTO_DIR_64)
+ -$(RM) $(ROOT_OPL_CRYPTO_DIR_64)/$@
+ $(SYMLINK) $(ROOT_US3_CRYPTO_LINK)/$@ $(ROOT_OPL_CRYPTO_DIR_64)/$@
+
+$(OPLMSU_OPTION): FRC
+ @cd $@; pwd; $(MAKE) $(TARGET)
+
+install_h check: FRC
+ @cd sys; pwd; $(MAKE) $(TARGET)
+
+lint: modlintlib
+
+
+#
+# The 'lint.platmod' target lints the opl platform module against
+# the sun4u kernel. This ends up doing all the kernel cross-checks,
+# so it takes a couple of minutes.
+# Due to the low ROI, it's not run by default, but it's a good
+# idea to run this if you change os/opl.c.
+#
+
+LINT_LIBS = $(LINT_LIB) \
+ -L$(OPL_LINT_LIB_DIR) \
+ -L$(LINT_LIB_DIR) $(LINT_KMODS:%=-l%) \
+ -L$(SPARC_LIB_DIR) $(SPARC_LINTS:%=-l%)
+
+lint.platmod: modlintlib
+ @-$(ECHO) "\nOpl Platform-dependent module: global crosschecks:"
+ @-$(LINT) $(LINTFLAGS) $(LINT_LIBS) 2>&1 | $(LGREP.2)
+
+# EXPORT DELETE START
+
+EXPORT_SRC:
+ $(RM) Makefile+
+ $(SED) -e "/^# EXPORT DELETE START/,/^# EXPORT DELETE END/d" \
+ < Makefile > Makefile+
+ $(MV) Makefile+ Makefile
+ $(CHMOD) 444 Makefile
+
+# EXPORT DELETE END
+
+#
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/Makefile.files b/usr/src/uts/sun4u/opl/Makefile.files
new file mode 100644
index 0000000000..397ce7d2e5
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/Makefile.files
@@ -0,0 +1,56 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This Makefile defines all file modules for the directory
+# uts/sun4u/opl and its children. These are the source files which
+# are opl "implementation architecture" dependent.
+#
+# uts/sun4u/opl/Makefile.files
+#
+
+OPL_OBJS = opl.o
+DM2S_OBJS = dm2s.o
+OPLKM_OBJS = oplkmdrv.o
+PCICMU_OBJS = pcicmu.o pcmu_cb.o pcmu_counters.o pcmu_ecc.o \
+ pcmu_ib.o pcmu_intr.o pcmu_pbm.o pcmu_util.o
+OPL_PANEL_OBJS = oplpanel.o
+SCFD_OBJS = scfconf.o scfdscp.o scfiomp.o scfostoescf.o \
+ scfsnap.o scfhandler.o scfkstat.o scfparam.o \
+ scftimer.o scfdebug.o scfinit.o scfops.o \
+ scfreg.o scftrace.o scf_os_interface.o
+DRMACH_OBJS = drmach.o drmach_asm.o dr_util.o drmach_err.o
+DRMACH_DEPS += drmach_asm.o
+OPLMSU_OBJS = oplmsu.o oplmsu_cmn_func.o oplmsu_ioctl_lrp.o oplmsu_ioctl_uwp.o
+MC_OPL_OBJS = mc-opl.o
+
+#
+# Miscellaneous
+#
+INC_PATH += -I$(UTSBASE)/sun4u/opl
+
+ASSYM_DEPS += drmach_asm.o
+
+.KEEP_STATE:
diff --git a/usr/src/uts/sun4u/opl/Makefile.opl b/usr/src/uts/sun4u/opl/Makefile.opl
new file mode 100644
index 0000000000..053452a150
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/Makefile.opl
@@ -0,0 +1,150 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# Global definitions for sun4u opl implementation specific modules.
+#
+# uts/sun4u/opl/Makefile.opl
+#
+
+#
+# Define directories.
+#
+#
+#
+ROOT_OPL_DIR = $(ROOT_PLAT_DIR)/SUNW,SPARC-Enterprise
+ROOT_OPL_MOD_DIR = $(ROOT_OPL_DIR)/kernel
+ROOT_OPL_KERN_DIR_32 = $(ROOT_OPL_MOD_DIR)
+ROOT_OPL_KERN_DIR_64 = $(ROOT_OPL_MOD_DIR)/$(SUBDIR64)
+ROOT_OPL_MISC_DIR_32 = $(ROOT_OPL_MOD_DIR)/misc
+ROOT_OPL_MISC_DIR_64 = $(ROOT_OPL_MOD_DIR)/misc/$(SUBDIR64)
+ROOT_OPL_DRV_DIR_32 = $(ROOT_OPL_MOD_DIR)/drv
+ROOT_OPL_DRV_DIR_64 = $(ROOT_OPL_MOD_DIR)/drv/$(SUBDIR64)
+ROOT_OPL_CPU_DIR_32 = $(ROOT_OPL_MOD_DIR)/cpu
+ROOT_OPL_CPU_DIR_64 = $(ROOT_OPL_MOD_DIR)/cpu/$(SUBDIR64)
+ROOT_OPL_CRYPTO_DIR_32 = $(ROOT_OPL_MOD_DIR)/crypto
+ROOT_OPL_CRYPTO_DIR_64 = $(ROOT_OPL_MOD_DIR)/crypto/$(SUBDIR64)
+
+ROOT_OPL_KERN_DIR = $(ROOT_OPL_KERN_DIR_$(CLASS))
+ROOT_OPL_MISC_DIR = $(ROOT_OPL_MISC_DIR_$(CLASS))
+ROOT_OPL_DRV_DIR = $(ROOT_OPL_DRV_DIR_$(CLASS))
+ROOT_OPL_CPU_DIR = $(ROOT_OPL_CPU_DIR_$(CLASS))
+ROOT_OPL_CRYPTO_DIR = $(ROOT_OPL_CRYPTO_DIR_$(CLASS))
+
+ROOT_PLAT_MOD_DIRS += $(ROOT_OPL_MOD_DIR)
+ROOT_PLAT_MISC_DIRS_32 += $(ROOT_OPL_MISC_DIR_32)
+
+USR_OPL_DIR = $(USR_PLAT_DIR)/SUNW,SPARC-Enterprise
+USR_OPL_LIB_DIR = $(USR_OPL_DIR)/lib
+USR_OPL_SBIN_DIR = $(USR_OPL_DIR)/sbin
+USR_OPL_SBIN_EEPROM = $(USR_OPL_SBIN_DIR)/eeprom
+USR_OPL_SBIN_PRTDIAG = $(USR_OPL_SBIN_DIR)/prtdiag
+USR_OPL_SBIN_TRAPSTAT = $(USR_OPL_SBIN_DIR)/trapstat
+USR_OPL_SBIN_FRUADM = $(USR_OPL_SBIN_DIR)/fruadm
+USR_OPL_INC_DIR = $(USR_OPL_DIR)/include
+USR_OPL_ISYS_DIR = $(USR_OPL_INC_DIR)/sys
+
+OPL_LINT_LIB_DIR = $(UTSBASE)/$(PLATFORM)/opl/lint-libs/$(OBJS_DIR)
+OPLMSU_OPTION = options
+
+#
+# Define modules.
+#
+OPL_KMODS = platmod
+OPL_KMODS += dm2s
+OPL_KMODS += oplkmdrv
+OPL_KMODS += pcicmu
+OPL_KMODS += oplpanel
+OPL_KMODS += scfd
+OPL_KMODS += dr .WAIT drmach
+OPL_KMODS += oplmsu
+OPL_KMODS += mc-opl
+
+#
+# CPU modules.
+#
+OPL_CPU_KMODS += olympus_c
+
+# Links to OPL crypto modules
+#
+OPL_CRYPTO_LINKS = aes
+
+#
+# Include the makefiles which define build rule templates, the
+# collection of files per module, and a few specific flags. Note
+# that order is significant, just as with an include path. The
+# first build rule template which matches the files name will be
+# used. By including these in order from most machine dependent
+# to most machine independent, we allow a machine dependent file
+# to be used in preference over a machine independent version
+# (Such as a machine specific optimization, which preserves the
+# interfaces.)
+#
+include $(UTSBASE)/sun4u/ngdr/Makefile.files
+include $(UTSBASE)/sun4u/opl/Makefile.files
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/Makefile.sun4u
+
+#
+# Everybody needs to know how to build modstubs.o and to locate unix.o
+#
+UNIX_DIR = $(UTSBASE)/$(PLATFORM)/opl/unix
+MODSTUBS_DIR = $(UNIX_DIR)
+DSF_DIR = $(UTSBASE)/$(PLATFORM)/opl/genassym
+LINTS_DIR = $(OBJS_DIR)
+LINT_LIB_DIR = $(UTSBASE)/$(PLATFORM)/opl/lint-libs/$(OBJS_DIR)
+
+UNIX_O = $(UNIX_DIR)/$(OBJS_DIR)/unix.o
+
+LINT_LIB = $(LINT_LIB_DIR)/llib-lunix.ln
+
+#
+# Define the actual specific platforms
+#
+MACHINE_DEFS = -D$(PLATFORM) -D_MACHDEP -DSFMMU -DMP
+MACHINE_DEFS += -D_CPU_SIGNATURE
+
+#
+# Maximum CPUID = 01111 11 01 1 = 0x1FB (507)
+# Maximum CHIPID = 1 01111 11 00 0 = 0x5F8 (1528)
+#
+MACHINE_DEFS += -DNCPU=508
+MACHINE_DEFS += -DMAX_CPU_CHIPID=1529
+MACHINE_DEFS += -DMAX_UPA=256
+MACHINE_DEFS += -DIGN_SIZE=8
+MACHINE_DEFS += -DMAX_MEM_NODES=16
+#
+# UTSB_PHYS will enable user TSB physical access for TL>0
+#
+MACHINE_DEFS += -DUTSB_PHYS
+MACHINE_DEFS += -D_OPL
+MACHINE_DEFS += -DOLYMPUS_SHARED_FTLB
+MACHINE_DEFS += -D_CMP_NO_ERROR_STEERING -D_HW_MEMSCRUB_SUPPORT
+MACHINE_DEFS += -DDO_CORELEVEL_LOADBAL
+
+.KEEP_STATE:
diff --git a/usr/src/uts/sun4u/opl/Makefile.rules b/usr/src/uts/sun4u/opl/Makefile.rules
new file mode 100644
index 0000000000..a19bf3a80e
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/Makefile.rules
@@ -0,0 +1,147 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This Makefile defines the build rules for the directory
+# uts/sun4u/opl and its children.
+#
+# uts/sun4u/opl/Makefile.rules
+#
+
+#
+# Section 1a: C object build rules
+#
+
+#
+# inline support for DR.
+#
+
+
+$(OBJS_DIR)/dr_mem.o: $(UTSBASE)/sun4u/opl/io/dr_mem.c
+ $(COMPILE.c) -o $@ $(UTSBASE)/sun4u/opl/io/dr_mem.c
+ $(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o: $(UTSBASE)/sun4u/ngdr/io/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
+DR_IO= $(UTSBASE)/sun4u/ngdr/io
+SBD_IOCTL= $(UTSBASE)/sun4u/sys/sbd_ioctl.h
+SBDGENERR= $(DR_IO)/sbdgenerr
+
+CLEANFILES += $(SBDGENERR)
+CLEANFILES += $(DR_IO)/dr_err.c
+
+$(DR_IO)/dr_err.c: $(SBDGENERR) $(SBD_IOCTL)
+ $(RM) $@
+ $(SBDGENERR) ESBD < $(SBD_IOCTL) > $(DR_IO)/dr_err.c
+
+$(SBDGENERR): $(DR_IO)/sbdgenerr.pl
+ $(RM) $@
+ $(CAT) $(DR_IO)/sbdgenerr.pl > $@
+ $(CHMOD) +x $@
+
+IL_CPP=$(CPP) -P -DINLINE -D_ASM $(AS_INC_PATH) \
+ $(CPP_DEFS) $(ALWAYS_DEFS) $(ALL_DEFS) $(CONFIG_DEFS)
+
+DRMACH_IL= $(OBJS_DIR)/drmach.il
+$(OBJS_DIR)/drmach.o := CC_XARCH_32 = -xarch=v8plusa
+$(OBJS_DIR)/drmach.o: $(UTSBASE)/sun4u/opl/io/drmach.c $(DRMACH_IL)
+ $(COMPILE.c) $(DRMACH_IL) -o $@ $(UTSBASE)/sun4u/opl/io/drmach.c
+ $(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o: $(UTSBASE)/sun4u/opl/io/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o: $(UTSBASE)/sun4u/opl/io/pcicmu/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o: $(UTSBASE)/sun4u/opl/ml/%.s
+ $(COMPILE.s) -o $@ $<
+
+$(OBJS_DIR)/%.o: $(UTSBASE)/sun4u/opl/os/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o: $(UTSBASE)/sun4u/opl/io/oplpanel/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o: $(UTSBASE)/sun4u/opl/io/scfd/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
+$(OBJS_DIR)/%.o: $(UTSBASE)/sun4u/opl/io/oplmsu/%.c
+ $(COMPILE.c) -o $@ $<
+ $(CTFCONVERT_O)
+
+OPL_IO= $(UTSBASE)/sun4u/opl/io
+
+CLEANFILES += $(OPL_IO)/drmach_err.c
+
+$(OPL_IO)/drmach_err.c: $(SBDGENERR) $(SBD_IOCTL)
+ $(RM) $@
+ $(SBDGENERR) EOPL < $(SBD_IOCTL) > $@
+
+CLEANFILES += $(DRMACH_IL)
+
+$(DRMACH_IL): $(UTSBASE)/sun4u/opl/ml/drmach.il.cpp
+ $(IL_CPP) $(UTSBASE)/sun4u/opl/ml/drmach.il.cpp > $@
+
+
+#
+# Section 1b: Lint object build rules
+#
+
+$(LINTS_DIR)/%.ln: $(UTSBASE)/sun4u/ngdr/io/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/dr_mem.ln: $(UTSBASE)/sun4u/opl/io/dr_mem.c
+ @($(LHEAD) $(LINT.c) $(UTSBASE)/sun4u/opl/io/dr_mem.c $(LTAIL))
+
+$(LINTS_DIR)/%.ln: $(UTSBASE)/sun4u/opl/io/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln: $(UTSBASE)/sun4u/opl/io/pcicmu/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln: $(UTSBASE)/sun4u/opl/ml/%.s
+ @($(LHEAD) $(LINT.s) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln: $(UTSBASE)/sun4u/opl/os/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln: $(UTSBASE)/sun4u/opl/io/oplpanel/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln: $(UTSBASE)/sun4u/opl/io/scfd/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+$(LINTS_DIR)/%.ln: $(UTSBASE)/sun4u/opl/io/oplmsu/%.c
+ @($(LHEAD) $(LINT.c) $< $(LTAIL))
+
+.KEEP_STATE:
diff --git a/usr/src/uts/sun4u/opl/Makefile.targ b/usr/src/uts/sun4u/opl/Makefile.targ
new file mode 100644
index 0000000000..c24a433d05
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/Makefile.targ
@@ -0,0 +1,116 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# Common targets for sun4u opl implementation specific modules.
+#
+# uts/sun4u/opl/Makefile.targ
+#
+
+.KEEP_STATE:
+
+#
+# Rules for implementation subdirectories.
+#
+$(ROOT_OPL_DIR): $(ROOT_PLAT_DIR)
+ -$(INS.dir.root.sys)
+
+$(ROOT_OPL_MOD_DIR): $(ROOT_OPL_DIR)
+ -$(INS.dir.root.sys)
+
+$(ROOT_OPL_DRV_DIR_32): $(ROOT_OPL_MOD_DIR)
+ -$(INS.dir.root.sys)
+
+$(ROOT_OPL_DRV_DIR_64): $(ROOT_OPL_DRV_DIR_32)
+ -$(INS.dir.root.sys)
+
+$(ROOT_OPL_CPU_DIR_32): $(ROOT_OPL_MOD_DIR)
+ -$(INS.dir.root.sys)
+
+$(ROOT_OPL_CPU_DIR_64): $(ROOT_OPL_CPU_DIR_32)
+ -$(INS.dir.root.sys)
+
+$(ROOT_OPL_MISC_DIR_32): $(ROOT_OPL_MOD_DIR)
+ -$(INS.dir.root.sys)
+
+$(ROOT_OPL_MISC_DIR_64): $(ROOT_OPL_MISC_DIR_32)
+ -$(INS.dir.root.sys)
+
+$(ROOT_OPL_CRYPTO_DIR_32): $(ROOT_OPL_MOD_DIR)
+ -$(INS.dir.root.sys)
+
+$(ROOT_OPL_CRYPTO_DIR_64): $(ROOT_OPL_CRYPTO_DIR_32)
+ -$(INS.dir.root.sys)
+
+$(USR_OPL_DIR): $(USR_PLAT_DIR)
+ -$(INS.dir.root.sys)
+
+$(USR_OPL_INC_DIR): $(USR_OPL_DIR) $(USR_PSM_INCL_DIR)
+ $(INS.slink4)
+
+$(USR_OPL_SBIN_DIR): $(USR_OPL_DIR) $(USR_PSM_SBIN_DIR)
+ $(INS.dir.root.bin)
+
+$(USR_OPL_SBIN_EEPROM): $(USR_OPL_SBIN_DIR)
+ $(RM) -r $@; $(SYMLINK) ../../$(PLATFORM)/sbin/eeprom $@ $(CHOWNLINK) $(CHGRPLINK)
+
+$(USR_OPL_SBIN_PRTDIAG): $(USR_OPL_SBIN_DIR)
+ $(RM) -r $@; $(SYMLINK) ../../$(PLATFORM)/sbin/prtdiag $@ $(CHOWNLINK) $(CHGRPLINK)
+
+$(USR_OPL_SBIN_TRAPSTAT): $(USR_OPL_SBIN_DIR)
+ $(RM) -r $@; $(SYMLINK) ../../$(PLATFORM)/sbin/trapstat $@ $(CHOWNLINK) $(CHGRPLINK)
+
+$(USR_OPL_SBIN_FRUADM): $(USR_OPL_SBIN_DIR)
+ $(RM) -r $@; $(SYMLINK) ../../$(PLATFORM)/sbin/fruadm $@ $(CHOWNLINK) $(CHGRPLINK)
+
+$(LINKED_PLATFORMS:%=$(ROOT_PLAT_DIR)/%): $(ROOT_PLAT_DIR)
+ $(INS.slink3)
+
+$(LINKED_PLATFORMS:%=$(USR_PLAT_DIR)/%): $(USR_PLAT_DIR)
+ $(INS.slink3)
+
+$(USR_OPL_LIB_DIR): $(USR_OPL_DIR) $(USR_PSM_LIB_DIR)
+ -$(INS.dir.root.bin)
+
+$(USR_OPL_ISYS_DIR): $(USR_OPL_INC_DIR)
+ $(INS.dir.root.bin)
+
+$(ROOT_OPL_KERN_DIR)/%: $(OBJS_DIR)/% $(ROOT_OPL_KERN_DIR) FRC
+ $(INS.file)
+
+$(ROOT_OPL_DRV_DIR)/%: $(OBJS_DIR)/% $(ROOT_OPL_DRV_DIR) FRC
+ $(INS.file)
+
+$(ROOT_OPL_CPU_DIR)/%: $(OBJS_DIR)/% $(ROOT_OPL_CPU_DIR) FRC
+ $(INS.file)
+
+$(ROOT_OPL_MISC_DIR)/%: $(OBJS_DIR)/% $(ROOT_OPL_MISC_DIR) FRC
+ $(INS.file)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.rules
+include $(UTSBASE)/sun4u/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/dm2s/Makefile b/usr/src/uts/sun4u/opl/dm2s/Makefile
new file mode 100644
index 0000000000..f97e0e7e65
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/dm2s/Makefile
@@ -0,0 +1,105 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the dm2s driver
+# kernel module.
+#
+# sun4u implementation architecture dependent
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../../..
+
+#
+# Define the module and object file sets.
+#
+
+MODULE = dm2s
+OBJECTS = $(DM2S_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(DM2S_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_OPL_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/sun4u/opl/io
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.opl
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY) $(SRC_CONFFILE)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+# Overrides
+#
+ALL_BUILDS = $(ALL_BUILDSONLY64)
+DEF_BUILDS = $(DEF_BUILDSONLY64)
+CLEANLINTFILES += $(LINT32_FILES)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += $(CCVERBOSE)
+
+#
+# Turn on doubleword alignment for 64 bit registers
+#
+CFLAGS += -dalign
+
+#
+# Dependency
+LDFLAGS += -dy -Ndrv/scfd
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/dr/Makefile b/usr/src/uts/sun4u/opl/dr/Makefile
new file mode 100644
index 0000000000..31d5a8388a
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/dr/Makefile
@@ -0,0 +1,92 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the dr driver module.
+#
+# sun4u opl implementation architecture dependent
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = dr
+OBJECTS = $(DR_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(DR_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_OPL_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/sun4u/opl/io
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.opl
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY) $(SRC_CONFFILE)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += $(CCVERBOSE)
+
+#
+# module dependencies
+#
+LDFLAGS += -dy -Nmisc/drmach
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/drmach/Makefile b/usr/src/uts/sun4u/opl/drmach/Makefile
new file mode 100644
index 0000000000..80ae24dbc1
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/drmach/Makefile
@@ -0,0 +1,116 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the drmach loadable module.
+#
+# sun4u opl implementation architecture dependent
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = drmach
+OBJECTS = $(DRMACH_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(DRMACH_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_OPL_MISC_DIR)/$(MODULE)
+
+DRMACH_OFFSETS = $(UTSBASE)/sun4u/opl/ml/drmach_offsets.in
+DRMACH_OFFSETS_H = $(OBJS_DIR)/drmach_offsets.h
+DRMACH_OFFSETS_C = $(OBJS_DIR)/drmach_offsets.c
+DRMACH_OFFSETS_S = $(OBJS_DIR)/drmach_offsets.s
+DRMACH_OFFSETS_TMP = $(OBJS_DIR)/drmach_offsets.tmp
+DRMACH_OFFSETS_OUT = $(OBJS_DIR)/drmach_offsets.out
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.opl
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+CLEANFILES += $(DRMACH_OFFSETS_H) $(DRMACH_OFFSETS_OUT)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += $(CCVERBOSE)
+
+#
+# module dependencies
+#
+LDFLAGS += -dy -Nmisc/opl_cfg
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+AS_INC_PATH += -I$(OBJS_DIR)
+
+$(DRMACH_DEPS:%=$(OBJS_DIR)/%): $(DRMACH_OFFSETS_H)
+
+$(DRMACH_OFFSETS_H): $(GENCONST) $(DRMACH_OFFSETS_OUT)
+ @cp $(DRMACH_OFFSETS_OUT) $(DRMACH_OFFSETS_H)
+ $(GENCONST) >> $(DRMACH_OFFSETS_H)
+
+$(DRMACH_OFFSETS_OUT): $(DRMACH_OFFSETS)
+ @grep "^#" $(DRMACH_OFFSETS) > $(DRMACH_OFFSETS_C)
+ $(CC) $(CFLAGS) $(CPPFLAGS) -g -S -o $(DRMACH_OFFSETS_S) $(DRMACH_OFFSETS_C)
+ @grep -v "^#" $(DRMACH_OFFSETS) > $(DRMACH_OFFSETS_TMP)
+ $(STABS) -t genassym -m $(MODEL) $(DRMACH_OFFSETS_TMP) \
+ < $(DRMACH_OFFSETS_S) > $(DRMACH_OFFSETS_OUT)
+ @rm $(DRMACH_OFFSETS_C) $(DRMACH_OFFSETS_S) $(DRMACH_OFFSETS_TMP)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/genassym/Makefile b/usr/src/uts/sun4u/opl/genassym/Makefile
new file mode 100644
index 0000000000..9b0ca0c1f7
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/genassym/Makefile
@@ -0,0 +1,97 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of assym.h through genconst/stabs.
+#
+# sun4u opl implementation architecture dependent
+#
+# uts/sun4u/opl/genassym/Makefile
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../../..
+
+ASSYM_H = $(DSF_DIR)/$(OBJS_DIR)/assym.h
+GENCONST = $(DSF_DIR)/$(OBJS_DIR)/genconst
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.opl
+
+#
+# Define targets
+#
+ALL_TARGET = $(ASSYM_H)
+
+#
+# This is DSF_DIR. Use a short path.
+#
+DSF_DIR = .
+
+#
+# Overrides
+#
+CLEANFILES = $(GENCONST) Nothing_to_remove
+CLOBBERFILES = $(ASSYM_H) $(CLEANFILES) Nothing_to_remove
+ALL_BUILDS = $(ALL_BUILDSONLY64)
+DEF_BUILDS = $(DEF_BUILDSONLY64)
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+all: $(ALL_DEPS)
+
+def: $(DEF_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+clean.lint:
+
+install: def
+
+#
+# create assym.h
+#
+$(GENCONST): $(GENCONST_SRC)
+ $(NATIVECC) -w $(ALWAYS_DEFS) $(GENCONST_DEFS) $(NATIVE_INC_PATH) \
+ -o $(GENCONST) $(GENCONST_SRC)
+
+$(ASSYM_H): $(GENCONST) $(OFFSETS) $(PLATFORM_OFFSETS)
+ $(OFFSETS_CREATE) <$(OFFSETS) >$@
+ $(OFFSETS_CREATE) <$(PLATFORM_OFFSETS) >>$@
+ $(GENCONST) >>$@
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/io/dm2s.c b/usr/src/uts/sun4u/opl/io/dm2s.c
new file mode 100644
index 0000000000..3bd528c2af
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/dm2s.c
@@ -0,0 +1,1291 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * DM2S - Domain side Mailbox to synchronous serial device driver.
+ *
+ * Description:
+ * -----------
+ * It is a streams driver which simulates a sync serial device on
+ * top of a mailbox type of communication. That is, it sends/receives
+ * frames as mailbox messages. The mailbox communication is provided
+ * by another driver, which exports the mailbox interfaces.
+ *
+ * Synchronization:
+ * ---------------
+ * This driver uses streams perimeters to simplify the synchronization.
+ * An inner perimeter D_MTPERMOD which protects the entire module,
+ * that is only one thread exists inside the perimeter, is used. As
+ * this driver supports only one instance and is not a high-performance
+ * driver, D_MTPERMOD is highly suitable.
+ *
+ * All transmission and reception of frames is done inside the service
+ * procedures so that all streams related operations are protected
+ * by the perimeters.
+ *
+ * The mailbox event handler is the only asynchronous callback which
+ * needs to be protected outside of the streams perimeters. This is
+ * done using the module private lock('ms_lock');
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/stream.h>
+#include <sys/cred.h>
+#include <sys/systm.h>
+#include <sys/sunddi.h>
+#include <sys/ddi.h>
+#include <sys/conf.h>
+#include <sys/modctl.h>
+#include <sys/mkdev.h>
+#include <sys/errno.h>
+#include <sys/debug.h>
+#include <sys/kbio.h>
+#include <sys/kmem.h>
+#include <sys/consdev.h>
+#include <sys/file.h>
+#include <sys/stropts.h>
+#include <sys/strsun.h>
+#include <sys/dlpi.h>
+#include <sys/stat.h>
+#include <sys/ser_sync.h>
+#include <sys/sysmacros.h>
+#include <sys/note.h>
+#include <sys/sdt.h>
+
+#include <sys/scfd/scfdscpif.h>
+#include <sys/dm2s.h>
+
+
+#define DM2S_MODNAME "dm2s" /* Module name */
+#define DM2S_TARGET_ID 0 /* Target ID of the peer */
+#define DM2S_ID_NUM 0x4D53 /* 'M''S' */
+#define DM2S_DEF_MTU 1504 /* Def. MTU size + PPP bytes */
+#define DM2S_MAXPSZ DM2S_DEF_MTU /* Set it to the default MTU */
+#define DM2S_LOWAT (4 * 1024) /* Low water mark */
+#define DM2S_HIWAT (12 * 1024) /* High water mark */
+#define DM2S_SM_TOUT 5000 /* Small timeout (5msec) */
+#define DM2S_LG_TOUT 50000 /* Large timeout (50msec) */
+#define DM2S_MB_TOUT 10000000 /* Mailbox timeout (10sec) */
+
+/*
+ * Global variables
+ */
+void *dm2s_softstate = NULL; /* Softstate pointer */
+
+
+/*
+ * Prototypes for the module related functions.
+ */
+int dm2s_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
+int dm2s_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
+int dm2s_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
+ void *arg, void **result);
+
+/*
+ * Prototypes for the streams related functions.
+ */
+int dm2s_open(queue_t *rq, dev_t *dev, int flag, int sflag, cred_t *cr);
+int dm2s_close(queue_t *rq, int flag, cred_t *cred);
+int dm2s_wput(queue_t *wq, mblk_t *mp);
+int dm2s_rsrv(queue_t *rq);
+int dm2s_wsrv(queue_t *wq);
+
+/*
+ * Prototypes for the internal functions.
+ */
+void dm2s_start(queue_t *wq, dm2s_t *dm2sp);
+void dm2s_event_handler(scf_event_t event, void *arg);
+int dm2s_transmit(queue_t *wq, mblk_t *mp, target_id_t target, mkey_t key);
+void dm2s_receive(dm2s_t *dm2sp);
+void dm2s_wq_timeout(void *arg);
+void dm2s_rq_timeout(void *arg);
+void dm2s_bufcall_rcv(void *arg);
+static clock_t dm2s_timeout_val(int error);
+static void dm2s_cleanup(dm2s_t *dm2sp);
+static int dm2s_mbox_init(dm2s_t *dm2sp);
+static void dm2s_mbox_fini(dm2s_t *dm2sp);
+static int dm2s_prep_scatgath(mblk_t *mp, uint32_t *numsg,
+ mscat_gath_t *sgp, int maxsg);
+
+#ifdef DEBUG
+uint32_t dm2s_debug = DBG_WARN;
+#endif /* DEBUG */
+
+
+/*
+ * Streams and module related structures.
+ */
+struct module_info dm2s_module_info = {
+ DM2S_ID_NUM, /* module ID number */
+ DM2S_MODNAME, /* module name. */
+ 0, /* Minimum packet size (none) */
+ DM2S_MAXPSZ, /* Maximum packet size (none) */
+ DM2S_HIWAT, /* queue high water mark */
+ DM2S_LOWAT /* queue low water mark */
+};
+
+struct qinit dm2s_rinit = {
+ putq, /* qi_putp */
+ dm2s_rsrv, /* qi_srvp */
+ dm2s_open, /* qi_qopen */
+ dm2s_close, /* qi_qlcose */
+ NULL, /* qi_qadmin */
+ &dm2s_module_info, /* qi_minfo */
+ NULL /* qi_mstat */
+};
+
+struct qinit dm2s_winit = {
+ dm2s_wput, /* qi_putp */
+ dm2s_wsrv, /* qi_srvp */
+ NULL, /* qi_qopen */
+ NULL, /* qi_qlcose */
+ NULL, /* qi_qadmin */
+ &dm2s_module_info, /* qi_minfo */
+ NULL /* qi_mstat */
+};
+
+
+struct streamtab dm2s_streamtab = {
+ &dm2s_rinit,
+ &dm2s_winit,
+ NULL,
+ NULL
+};
+
+DDI_DEFINE_STREAM_OPS(dm2s_ops, nulldev, nulldev, dm2s_attach, \
+ dm2s_detach, nodev, dm2s_info, D_NEW | D_MP | D_MTPERMOD, \
+ &dm2s_streamtab);
+
+
+struct modldrv modldrv = {
+ &mod_driverops,
+ "OPL Mbox to Serial Driver %I%",
+ &dm2s_ops
+};
+
+struct modlinkage modlinkage = {
+ MODREV_1,
+ &modldrv,
+ NULL
+};
+
+
+/*
+ * _init - Module's init routine.
+ */
+int
+_init(void)
+{
+ int ret;
+
+ if (ddi_soft_state_init(&dm2s_softstate, sizeof (dm2s_t), 1) != 0) {
+ cmn_err(CE_WARN, "softstate initialization failed\n");
+ return (DDI_FAILURE);
+ }
+ if ((ret = mod_install(&modlinkage)) != 0) {
+ cmn_err(CE_WARN, "mod_install failed, error = %d", ret);
+ ddi_soft_state_fini(&dm2s_softstate);
+ }
+ return (ret);
+}
+
+/*
+ * _fini - Module's fini routine.
+ */
+int
+_fini(void)
+{
+ int ret;
+
+ if ((ret = mod_remove(&modlinkage)) != 0) {
+ return (ret);
+ }
+ ddi_soft_state_fini(&dm2s_softstate);
+ return (ret);
+}
+
+/*
+ * _info - Module's info routine.
+ */
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+/*
+ * dm2s_attach - Module's attach routine.
+ */
+int
+dm2s_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int instance;
+ dm2s_t *dm2sp;
+ char name[20];
+
+
+ instance = ddi_get_instance(dip);
+
+ /* Only one instance is supported. */
+ if (instance != 0) {
+ cmn_err(CE_WARN, "only one instance is supported");
+ return (DDI_FAILURE);
+ }
+
+ if (cmd != DDI_ATTACH) {
+ return (DDI_FAILURE);
+ }
+ if (ddi_soft_state_zalloc(dm2s_softstate, instance) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "softstate allocation failure");
+ return (DDI_FAILURE);
+ }
+ dm2sp = (dm2s_t *)ddi_get_soft_state(dm2s_softstate, instance);
+ if (dm2sp == NULL) {
+ ddi_soft_state_free(dm2s_softstate, instance);
+ cmn_err(CE_WARN, "softstate allocation failure.");
+ return (DDI_FAILURE);
+ }
+ dm2sp->ms_dip = dip;
+ dm2sp->ms_major = ddi_name_to_major(ddi_get_name(dip));
+ dm2sp->ms_ppa = instance;
+
+ /*
+ * Get an interrupt block cookie corresponding to the
+ * interrupt priority of the event handler.
+ * Assert that the event priority is not re-defined to
+ * some higher priority.
+ */
+ /* LINTED */
+ ASSERT(SCF_EVENT_PRI == DDI_SOFTINT_LOW);
+ if (ddi_get_soft_iblock_cookie(dip, SCF_EVENT_PRI,
+ &dm2sp->ms_ibcookie) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "ddi_get_soft_iblock_cookie failed.");
+ goto error;
+ }
+ mutex_init(&dm2sp->ms_lock, NULL, MUTEX_DRIVER,
+ (void *)dm2sp->ms_ibcookie);
+
+ dm2sp->ms_clean |= DM2S_CLEAN_LOCK;
+ cv_init(&dm2sp->ms_wait, NULL, CV_DRIVER, NULL);
+ dm2sp->ms_clean |= DM2S_CLEAN_CV;
+
+ (void) sprintf(name, "%s%d", DM2S_MODNAME, instance);
+ if (ddi_create_minor_node(dip, name, S_IFCHR, instance,
+ DDI_PSEUDO, NULL) == DDI_FAILURE) {
+ ddi_remove_minor_node(dip, NULL);
+ cmn_err(CE_WARN, "Device node creation failed.");
+ goto error;
+ }
+
+ dm2sp->ms_clean |= DM2S_CLEAN_NODE;
+ ddi_set_driver_private(dip, (caddr_t)dm2sp);
+ ddi_report_dev(dip);
+ return (DDI_SUCCESS);
+error:
+ dm2s_cleanup(dm2sp);
+ return (DDI_FAILURE);
+}
+
+/*
+ * dm2s_info - Module's info routine.
+ */
+/*ARGSUSED*/
+int
+dm2s_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
+{
+ dm2s_t *dm2sp;
+ minor_t minor;
+ int ret = DDI_FAILURE;
+
+ switch (infocmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ minor = getminor((dev_t)arg);
+ dm2sp = (dm2s_t *)ddi_get_soft_state(dm2s_softstate, minor);
+ if (dm2sp == NULL) {
+ *result = NULL;
+ } else {
+ *result = dm2sp->ms_dip;
+ ret = DDI_SUCCESS;
+ }
+ break;
+
+ case DDI_INFO_DEVT2INSTANCE:
+ minor = getminor((dev_t)arg);
+ *result = (void *)(uintptr_t)minor;
+ ret = DDI_SUCCESS;
+ break;
+
+ default:
+ break;
+ }
+ return (ret);
+}
+
+/*
+ * dm2s_detach - Module's detach routine.
+ */
+int
+dm2s_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int instance;
+ dm2s_t *dm2sp;
+
+ if (cmd != DDI_DETACH) {
+ return (DDI_FAILURE);
+ }
+
+ instance = ddi_get_instance(dip);
+ dm2sp = (dm2s_t *)ddi_get_soft_state(dm2s_softstate, instance);
+ if (dm2sp == NULL) {
+ return (DDI_FAILURE);
+ }
+
+ mutex_enter(&dm2sp->ms_lock);
+
+ /* Check if the mailbox is still in use. */
+ if (dm2sp->ms_state & DM2S_MB_INITED) {
+ mutex_exit(&dm2sp->ms_lock);
+ cmn_err(CE_WARN, "Mailbox in use: Detach failed");
+ return (DDI_FAILURE);
+ }
+ mutex_exit(&dm2sp->ms_lock);
+ dm2s_cleanup(dm2sp);
+ return (DDI_SUCCESS);
+}
+
+/*
+ * dm2s_open - Device open routine.
+ *
+ * Only one open supported. Clone open is not supported.
+ */
+/* ARGSUSED */
+int
+dm2s_open(queue_t *rq, dev_t *dev, int flag, int sflag, cred_t *cr)
+{
+ dm2s_t *dm2sp;
+ int instance = getminor(*dev);
+ int ret = 0;
+
+ DPRINTF(DBG_DRV, ("dm2s_open: called\n"));
+ if (sflag == CLONEOPEN) {
+ /* Clone open not supported */
+ DPRINTF(DBG_WARN, ("dm2s_open: clone open not supported\n"));
+ return (ENOTSUP);
+ }
+
+ if (rq->q_ptr != NULL) {
+ DPRINTF(DBG_WARN, ("dm2s_open: already opened\n"));
+ return (EBUSY);
+ }
+
+ if ((dm2sp = ddi_get_soft_state(dm2s_softstate, instance)) == NULL) {
+ DPRINTF(DBG_WARN, ("dm2s_open: instance not found\n"));
+ return (ENODEV);
+ }
+
+ mutex_enter(&dm2sp->ms_lock);
+ if (dm2sp->ms_state & DM2S_OPENED) {
+ /* Only one open supported */
+ mutex_exit(&dm2sp->ms_lock);
+ DPRINTF(DBG_WARN, ("dm2s_open: already opened\n"));
+ return (EBUSY);
+ }
+
+ dm2sp->ms_state |= DM2S_OPENED;
+ /* Initialize the mailbox. */
+ if ((ret = dm2s_mbox_init(dm2sp)) != 0) {
+ dm2sp->ms_state = 0;
+ mutex_exit(&dm2sp->ms_lock);
+ return (ret);
+ }
+ rq->q_ptr = WR(rq)->q_ptr = (void *)dm2sp;
+ dm2sp->ms_rq = rq;
+ dm2sp->ms_wq = WR(rq);
+ mutex_exit(&dm2sp->ms_lock);
+
+ if (ret == 0) {
+ qprocson(rq); /* now schedule our queue */
+ }
+ DPRINTF(DBG_DRV, ("dm2s_open: ret=%d\n", ret));
+ return (ret);
+}
+
+/*
+ * dm2s_close - Device close routine.
+ */
+/* ARGSUSED */
+int
+dm2s_close(queue_t *rq, int flag, cred_t *cred)
+{
+ dm2s_t *dm2sp = (dm2s_t *)rq->q_ptr;
+
+ DPRINTF(DBG_DRV, ("dm2s_close: called\n"));
+ if (dm2sp == NULL) {
+ /* Already closed once */
+ return (ENODEV);
+ }
+
+ /* Close the lower layer first */
+ mutex_enter(&dm2sp->ms_lock);
+ (void) scf_mb_flush(dm2sp->ms_target, dm2sp->ms_key, MB_FLUSH_ALL);
+ dm2s_mbox_fini(dm2sp);
+ mutex_exit(&dm2sp->ms_lock);
+
+ /*
+ * Now we can assume that no asynchronous callbacks exist.
+ * Poison the stream head so that we can't be pushed again.
+ */
+ (void) putnextctl(rq, M_HANGUP);
+ qprocsoff(rq);
+ if (dm2sp->ms_rbufcid != 0) {
+ qunbufcall(rq, dm2sp->ms_rbufcid);
+ dm2sp->ms_rbufcid = 0;
+ }
+ if (dm2sp->ms_rq_timeoutid != 0) {
+ DTRACE_PROBE1(dm2s_rqtimeout__cancel, dm2s_t, dm2sp);
+ (void) quntimeout(dm2sp->ms_rq, dm2sp->ms_rq_timeoutid);
+ dm2sp->ms_rq_timeoutid = 0;
+ }
+ if (dm2sp->ms_wq_timeoutid != 0) {
+ DTRACE_PROBE1(dm2s_wqtimeout__cancel, dm2s_t, dm2sp);
+ (void) quntimeout(dm2sp->ms_wq, dm2sp->ms_wq_timeoutid);
+ dm2sp->ms_wq_timeoutid = 0;
+ }
+ /*
+ * Now we can really mark it closed.
+ */
+ mutex_enter(&dm2sp->ms_lock);
+ dm2sp->ms_rq = dm2sp->ms_wq = NULL;
+ dm2sp->ms_state &= ~DM2S_OPENED;
+ mutex_exit(&dm2sp->ms_lock);
+
+ rq->q_ptr = WR(rq)->q_ptr = NULL;
+ (void) qassociate(rq, -1);
+ DPRINTF(DBG_DRV, ("dm2s_close: successfully closed\n"));
+ return (0);
+}
+
+/*
+ * dm2s_rsrv - Streams read side service procedure.
+ *
+ * All messages are received in the service procedure
+ * only. This is done to simplify the streams synchronization.
+ */
+int
+dm2s_rsrv(queue_t *rq)
+{
+ mblk_t *mp;
+ dm2s_t *dm2sp = (dm2s_t *)rq->q_ptr;
+
+ DPRINTF(DBG_DRV, ("dm2s_rsrv: called\n"));
+ ASSERT(dm2sp != NULL);
+ mutex_enter(&dm2sp->ms_lock);
+
+ /* Receive if there are any messages waiting in the mailbox. */
+ dm2s_receive(dm2sp);
+ mutex_exit(&dm2sp->ms_lock);
+
+ /* Send the received messages up the stream. */
+ while ((mp = getq(rq)) != NULL) {
+ if (canputnext(rq)) {
+ putnext(rq, mp);
+ } else {
+ putbq(rq, mp);
+ break;
+ }
+ }
+ DPRINTF(DBG_DRV, ("dm2s_rsrv: return\n"));
+ return (0);
+}
+
+/*
+ * dm2s_wsrv - Streams write side service procedure.
+ *
+ * All messages are transmitted in the service procedure
+ * only. This is done to simplify the streams synchronization.
+ */
+int
+dm2s_wsrv(queue_t *wq)
+{
+ dm2s_t *dm2sp = (dm2s_t *)wq->q_ptr;
+
+ DPRINTF(DBG_DRV, ("dm2s_wsrv: called\n"));
+ ASSERT(dm2sp != NULL);
+ /* Lets cancel any timeouts waiting to be scheduled. */
+ if (dm2sp->ms_wq_timeoutid != 0) {
+ DTRACE_PROBE1(dm2s_wqtimeout__cancel, dm2s_t, dm2sp);
+ (void) quntimeout(dm2sp->ms_wq, dm2sp->ms_wq_timeoutid);
+ dm2sp->ms_wq_timeoutid = 0;
+ }
+ mutex_enter(&dm2sp->ms_lock);
+ dm2s_start(wq, dm2sp);
+ mutex_exit(&dm2sp->ms_lock);
+ DPRINTF(DBG_DRV, ("dm2s_wsrv: return\n"));
+ return (0);
+}
+
+/*
+ * dm2s_wput - Streams write side put routine.
+ *
+ * All M_DATA messages are queued so that they are transmitted in
+ * the service procedure. This is done to simplify the streams
+ * synchronization. Other messages are handled appropriately.
+ */
+int
+dm2s_wput(queue_t *wq, mblk_t *mp)
+{
+ dm2s_t *dm2sp = (dm2s_t *)wq->q_ptr;
+
+ DPRINTF(DBG_DRV, ("dm2s_wput: called\n"));
+ if (dm2sp == NULL) {
+ return (ENODEV); /* Can't happen. */
+ }
+
+ switch (mp->b_datap->db_type) {
+ case (M_DATA):
+ DPRINTF(DBG_DRV, ("dm2s_wput: M_DATA message\n"));
+ while (mp->b_wptr == mp->b_rptr) {
+ mblk_t *mp1;
+
+ mp1 = unlinkb(mp);
+ freemsg(mp);
+ mp = mp1;
+ if (mp == NULL) {
+ return (0);
+ }
+ }
+
+ /*
+ * Simply queue the message and handle it in the service
+ * procedure.
+ */
+ (void) putq(wq, mp);
+ qenable(wq);
+ return (0);
+
+ case (M_PROTO):
+ DPRINTF(DBG_DRV, ("dm2s_wput: M_PROTO message\n"));
+ /* We don't expect this */
+ mp->b_datap->db_type = M_ERROR;
+ mp->b_rptr = mp->b_wptr = mp->b_datap->db_base;
+ *mp->b_wptr++ = EPROTO;
+ qreply(wq, mp);
+ return (EINVAL);
+
+ case (M_IOCTL):
+ DPRINTF(DBG_DRV, ("dm2s_wput: M_IOCTL message\n"));
+ if (MBLKL(mp) < sizeof (struct iocblk)) {
+ freemsg(mp);
+ return (0);
+ }
+ /*
+ * No ioctls required to be supported by this driver, so
+ * return EINVAL for all ioctls.
+ */
+ miocnak(wq, mp, 0, EINVAL);
+ break;
+
+ case (M_CTL):
+ DPRINTF(DBG_DRV, ("dm2s_wput: M_CTL message\n"));
+ /*
+ * No M_CTL messages need to supported by this driver,
+ * so simply ignore them.
+ */
+ freemsg(mp);
+ break;
+
+ case (M_FLUSH):
+ DPRINTF(DBG_DRV, (
+ "dm2s_wput: M_FLUSH message 0x%X\n", *mp->b_rptr));
+ if (*mp->b_rptr & FLUSHW) { /* Flush write-side */
+ (void) scf_mb_flush(dm2sp->ms_target, dm2sp->ms_key,
+ MB_FLUSH_SEND);
+ flushq(wq, FLUSHDATA);
+ *mp->b_rptr &= ~FLUSHW;
+ }
+ if (*mp->b_rptr & FLUSHR) {
+ (void) scf_mb_flush(dm2sp->ms_target, dm2sp->ms_key,
+ MB_FLUSH_RECEIVE);
+ flushq(RD(wq), FLUSHDATA);
+ qreply(wq, mp);
+ } else {
+ freemsg(mp);
+ }
+ break;
+
+ default:
+ DPRINTF(DBG_DRV, ("dm2s_wput: UNKNOWN message\n"));
+ freemsg(mp);
+
+ }
+ return (0);
+}
+
+/*
+ * dm2s_cleanup - Cleanup routine.
+ */
+static void
+dm2s_cleanup(dm2s_t *dm2sp)
+{
+ char name[20];
+
+ DPRINTF(DBG_DRV, ("dm2s_cleanup: called\n"));
+ ASSERT(dm2sp != NULL);
+ if (dm2sp->ms_clean & DM2S_CLEAN_NODE) {
+ (void) sprintf(name, "%s%d", DM2S_MODNAME, dm2sp->ms_ppa);
+ ddi_remove_minor_node(dm2sp->ms_dip, name);
+ }
+ if (dm2sp->ms_clean & DM2S_CLEAN_LOCK)
+ mutex_destroy(&dm2sp->ms_lock);
+ if (dm2sp->ms_clean & DM2S_CLEAN_CV)
+ cv_destroy(&dm2sp->ms_wait);
+ ddi_set_driver_private(dm2sp->ms_dip, NULL);
+ ddi_soft_state_free(dm2s_softstate, dm2sp->ms_ppa);
+}
+
+/*
+ * dm2s_mbox_init - Mailbox specific initialization.
+ */
+static int
+dm2s_mbox_init(dm2s_t *dm2sp)
+{
+ int ret;
+ clock_t tout;
+
+ ASSERT(MUTEX_HELD(&dm2sp->ms_lock));
+ dm2sp->ms_target = DM2S_TARGET_ID;
+ dm2sp->ms_key = DSCP_KEY;
+ dm2sp->ms_state &= ~DM2S_MB_INITED;
+
+ /* Iterate until mailbox gets connected */
+ while (!(dm2sp->ms_state & DM2S_MB_CONN)) {
+ DPRINTF(DBG_MBOX, ("dm2s_mbox_init: calling mb_init\n"));
+ ret = scf_mb_init(dm2sp->ms_target, dm2sp->ms_key,
+ dm2s_event_handler, (void *)dm2sp);
+ DPRINTF(DBG_MBOX, ("dm2s_mbox_init: mb_init ret=%d\n", ret));
+
+ if (ret == 0) {
+ dm2sp->ms_state |= DM2S_MB_INITED;
+
+ /* Block until the mailbox is ready to communicate. */
+ while (!(dm2sp->ms_state &
+ (DM2S_MB_CONN | DM2S_MB_DISC))) {
+
+ if (cv_wait_sig(&dm2sp->ms_wait,
+ &dm2sp->ms_lock) <= 0) {
+ /* interrupted */
+ ret = EINTR;
+ break;
+ }
+ }
+ }
+
+ if (ret != 0) {
+
+ DPRINTF(DBG_MBOX,
+ ("dm2s_mbox_init: failed/interrupted\n"));
+ DTRACE_PROBE1(dm2s_mbox_fail, int, ret);
+ dm2sp->ms_state &= ~DM2S_MB_INITED;
+ (void) scf_mb_fini(dm2sp->ms_target, dm2sp->ms_key);
+
+ /* if interrupted, return immediately. */
+ if (ret == EINTR)
+ return (ret);
+
+ }
+
+ if ((ret != 0) || (dm2sp->ms_state & DM2S_MB_DISC)) {
+
+ DPRINTF(DBG_WARN,
+ ("dm2s_mbox_init: mbox DISC_ERROR\n"));
+ DTRACE_PROBE1(dm2s_mbox_fail, int, DM2S_MB_DISC);
+ dm2sp->ms_state &= ~DM2S_MB_INITED;
+ (void) scf_mb_fini(dm2sp->ms_target, dm2sp->ms_key);
+
+ /*
+ * If there was failure, then wait for
+ * DM2S_MB_TOUT secs and retry again.
+ */
+
+ DPRINTF(DBG_MBOX, ("dm2s_mbox_init: waiting...\n"));
+ tout = ddi_get_lbolt() + drv_usectohz(DM2S_MB_TOUT);
+ ret = cv_timedwait_sig(&dm2sp->ms_wait,
+ &dm2sp->ms_lock, tout);
+ if (ret == 0) {
+ /* if interrupted, return immediately. */
+ DPRINTF(DBG_MBOX,
+ ("dm2s_mbox_init: interrupted\n"));
+ return (EINTR);
+ }
+ }
+ }
+
+ /*
+ * Obtain the max size of a single message.
+ * NOTE: There is no mechanism to update the
+ * upperlayers dynamically, so we expect this
+ * size to be atleast the default MTU size.
+ */
+ ret = scf_mb_ctrl(dm2sp->ms_target, dm2sp->ms_key,
+ SCF_MBOP_MAXMSGSIZE, &dm2sp->ms_mtu);
+
+ if ((ret == 0) && (dm2sp->ms_mtu < DM2S_DEF_MTU)) {
+ cmn_err(CE_WARN, "Max message size expected >= %d "
+ "but found %d\n", DM2S_DEF_MTU, dm2sp->ms_mtu);
+ ret = EIO;
+ }
+
+ if (ret != 0) {
+ dm2sp->ms_state &= ~DM2S_MB_INITED;
+ (void) scf_mb_fini(dm2sp->ms_target, dm2sp->ms_key);
+ }
+ DPRINTF(DBG_MBOX, ("dm2s_mbox_init: mb_init ret=%d\n", ret));
+ return (ret);
+}
+
+/*
+ * dm2s_mbox_fini - Mailbox de-initialization.
+ */
+static void
+dm2s_mbox_fini(dm2s_t *dm2sp)
+{
+ int ret;
+
+ ASSERT(dm2sp != NULL);
+ if (dm2sp->ms_state & DM2S_MB_INITED) {
+ DPRINTF(DBG_MBOX, ("dm2s_mbox_fini: calling mb_fini\n"));
+ ret = scf_mb_fini(dm2sp->ms_target, dm2sp->ms_key);
+ if (ret != 0) {
+ cmn_err(CE_WARN,
+ "Failed to close the Mailbox error =%d", ret);
+ }
+ DPRINTF(DBG_MBOX, ("dm2s_mbox_fini: mb_fini ret=%d\n", ret));
+ dm2sp->ms_state &= ~(DM2S_MB_INITED |DM2S_MB_CONN |
+ DM2S_MB_DISC);
+ }
+}
+
+/*
+ * dm2s_event_handler - Mailbox event handler.
+ */
+void
+dm2s_event_handler(scf_event_t event, void *arg)
+{
+ dm2s_t *dm2sp = (dm2s_t *)arg;
+ queue_t *rq;
+
+ ASSERT(dm2sp != NULL);
+ mutex_enter(&dm2sp->ms_lock);
+ if (!(dm2sp->ms_state & DM2S_MB_INITED)) {
+ /*
+ * Ignore all events if the state flag indicates that the
+ * mailbox not initialized, this may happen during the close.
+ */
+ mutex_exit(&dm2sp->ms_lock);
+ DPRINTF(DBG_MBOX,
+ ("Event(0x%X) received - Mailbox not inited\n", event));
+ return;
+ }
+ switch (event) {
+ case SCF_MB_CONN_OK:
+ /*
+ * Now the mailbox is ready to use, lets wake up
+ * any one waiting for this event.
+ */
+ dm2sp->ms_state |= DM2S_MB_CONN;
+ cv_broadcast(&dm2sp->ms_wait);
+ DPRINTF(DBG_MBOX, ("Event received = CONN_OK\n"));
+ break;
+
+ case SCF_MB_MSG_DATA:
+ if (!DM2S_MBOX_READY(dm2sp)) {
+ DPRINTF(DBG_MBOX,
+ ("Event(MSG_DATA) received - Mailbox not READY\n"));
+ break;
+ }
+ /*
+ * A message is available in the mailbox.
+ * Lets enable the read service procedure
+ * to receive this message.
+ */
+ if (dm2sp->ms_rq != NULL) {
+ qenable(dm2sp->ms_rq);
+ }
+ DPRINTF(DBG_MBOX, ("Event received = MSG_DATA\n"));
+ break;
+
+ case SCF_MB_SPACE:
+ if (!DM2S_MBOX_READY(dm2sp)) {
+ DPRINTF(DBG_MBOX,
+ ("Event(MB_SPACE) received - Mailbox not READY\n"));
+ break;
+ }
+
+ /*
+ * Now the mailbox is ready to transmit, lets
+ * schedule the write service procedure.
+ */
+ if (dm2sp->ms_wq != NULL) {
+ qenable(dm2sp->ms_wq);
+ }
+ DPRINTF(DBG_MBOX, ("Event received = MB_SPACE\n"));
+ break;
+ case SCF_MB_DISC_ERROR:
+ dm2sp->ms_state |= DM2S_MB_DISC;
+ if (dm2sp->ms_state & DM2S_MB_CONN) {
+ /*
+ * If it was previously connected,
+ * then send a hangup message.
+ */
+ rq = dm2sp->ms_rq;
+ if (rq != NULL) {
+ mutex_exit(&dm2sp->ms_lock);
+ /*
+ * Send a hangup message to indicate
+ * disconnect event.
+ */
+ (void) putctl(rq, M_HANGUP);
+ DTRACE_PROBE1(dm2s_hangup, dm2s_t, dm2sp);
+ mutex_enter(&dm2sp->ms_lock);
+ }
+ } else {
+ /*
+ * Signal if the open is waiting for a
+ * connection.
+ */
+ cv_broadcast(&dm2sp->ms_wait);
+ }
+ DPRINTF(DBG_MBOX, ("Event received = DISC_ERROR\n"));
+ break;
+ default:
+ cmn_err(CE_WARN, "Unexpected event received\n");
+ break;
+ }
+ mutex_exit(&dm2sp->ms_lock);
+}
+
+/*
+ * dm2s_start - Start transmission function.
+ *
+ * Send all queued messages. If the mailbox is busy, then
+ * start a timeout as a polling mechanism. The timeout is useful
+ * to not rely entirely on the SCF_MB_SPACE event.
+ */
+void
+dm2s_start(queue_t *wq, dm2s_t *dm2sp)
+{
+ mblk_t *mp;
+ int ret;
+
+ DPRINTF(DBG_DRV, ("dm2s_start: called\n"));
+ ASSERT(dm2sp != NULL);
+ ASSERT(MUTEX_HELD(&dm2sp->ms_lock));
+
+ while ((mp = getq(wq)) != NULL) {
+ switch (mp->b_datap->db_type) {
+
+ case M_DATA:
+ ret = dm2s_transmit(wq, mp, dm2sp->ms_target,
+ dm2sp->ms_key);
+ if (ret == EBUSY || ret == ENOSPC || ret == EAGAIN) {
+ DPRINTF(DBG_MBOX,
+ ("dm2s_start: recoverable err=%d\n", ret));
+ /*
+ * Start a timeout to retry again.
+ */
+ if (dm2sp->ms_wq_timeoutid == 0) {
+ DTRACE_PROBE1(dm2s_wqtimeout__start,
+ dm2s_t, dm2sp);
+ dm2sp->ms_wq_timeoutid = qtimeout(wq,
+ dm2s_wq_timeout, (void *)dm2sp,
+ dm2s_timeout_val(ret));
+ }
+ return;
+ } else if (ret != 0) {
+ mutex_exit(&dm2sp->ms_lock);
+ /*
+ * An error occurred with the transmission,
+ * flush pending messages and initiate a
+ * hangup.
+ */
+ flushq(wq, FLUSHDATA);
+ (void) putnextctl(RD(wq), M_HANGUP);
+ DTRACE_PROBE1(dm2s_hangup, dm2s_t, dm2sp);
+ DPRINTF(DBG_WARN,
+ ("dm2s_start: hangup transmit err=%d\n",
+ ret));
+ mutex_enter(&dm2sp->ms_lock);
+ }
+ break;
+ default:
+ /*
+ * At this point, we don't expect any other messages.
+ */
+ freemsg(mp);
+ break;
+ }
+ }
+}
+
+/*
+ * dm2s_receive - Read all messages from the mailbox.
+ *
+ * This function is called from the read service procedure, to
+ * receive the messages awaiting in the mailbox.
+ */
+void
+dm2s_receive(dm2s_t *dm2sp)
+{
+ queue_t *rq = dm2sp->ms_rq;
+ mblk_t *mp;
+ int ret;
+ uint32_t len;
+
+ DPRINTF(DBG_DRV, ("dm2s_receive: called\n"));
+ ASSERT(dm2sp != NULL);
+ ASSERT(MUTEX_HELD(&dm2sp->ms_lock));
+ if (rq == NULL) {
+ return;
+ }
+ /*
+ * As the number of messages in the mailbox are pretty limited,
+ * it is safe to process all messages in one loop.
+ */
+ while (DM2S_MBOX_READY(dm2sp) && ((ret = scf_mb_canget(dm2sp->ms_target,
+ dm2sp->ms_key, &len)) == 0)) {
+ DPRINTF(DBG_MBOX, ("dm2s_receive: mb_canget len=%d\n", len));
+ if (len == 0) {
+ break;
+ }
+ mp = allocb(len, BPRI_MED);
+ if (mp == NULL) {
+ DPRINTF(DBG_WARN, ("dm2s_receive: allocb failed\n"));
+ /*
+ * Start a bufcall so that we can retry again
+ * when memory becomes available.
+ */
+ dm2sp->ms_rbufcid = qbufcall(rq, len, BPRI_MED,
+ dm2s_bufcall_rcv, dm2sp);
+ if (dm2sp->ms_rbufcid == 0) {
+ DPRINTF(DBG_WARN,
+ ("dm2s_receive: qbufcall failed\n"));
+ /*
+ * if bufcall fails, start a timeout to
+ * initiate a re-try after some time.
+ */
+ DTRACE_PROBE1(dm2s_rqtimeout__start,
+ dm2s_t, dm2sp);
+ dm2sp->ms_rq_timeoutid = qtimeout(rq,
+ dm2s_rq_timeout, (void *)dm2sp,
+ drv_usectohz(DM2S_SM_TOUT));
+ }
+ break;
+ }
+
+ /*
+ * Only a single scatter/gather element is enough here.
+ */
+ dm2sp->ms_sg_rcv.msc_dptr = (caddr_t)mp->b_wptr;
+ dm2sp->ms_sg_rcv.msc_len = len;
+ DPRINTF(DBG_MBOX, ("dm2s_receive: calling getmsg\n"));
+ ret = scf_mb_getmsg(dm2sp->ms_target, dm2sp->ms_key, len, 1,
+ &dm2sp->ms_sg_rcv, 0);
+ DPRINTF(DBG_MBOX, ("dm2s_receive: getmsg ret=%d\n", ret));
+ if (ret != 0) {
+ freemsg(mp);
+ break;
+ }
+ DMPBYTES("dm2s: Getmsg: ", len, 1, &dm2sp->ms_sg_rcv);
+ mp->b_wptr += len;
+ /*
+ * Queue the messages in the rq, so that the service
+ * procedure handles sending the messages up the stream.
+ */
+ putq(rq, mp);
+ }
+
+ if ((!DM2S_MBOX_READY(dm2sp)) || (ret != ENOMSG && ret != EMSGSIZE)) {
+ /*
+ * Some thing went wrong, flush pending messages
+ * and initiate a hangup.
+ * Note: flushing the wq initiates a faster close.
+ */
+ mutex_exit(&dm2sp->ms_lock);
+ flushq(WR(rq), FLUSHDATA);
+ (void) putnextctl(rq, M_HANGUP);
+ DTRACE_PROBE1(dm2s_hangup, dm2s_t, dm2sp);
+ mutex_enter(&dm2sp->ms_lock);
+ DPRINTF(DBG_WARN, ("dm2s_receive: encountered unknown "
+ "condition - hangup ret=%d\n", ret));
+ }
+}
+
+/*
+ * dm2s_transmit - Transmit a message.
+ */
+int
+dm2s_transmit(queue_t *wq, mblk_t *mp, target_id_t target, mkey_t key)
+{
+ dm2s_t *dm2sp = (dm2s_t *)wq->q_ptr;
+ int ret;
+ uint32_t len;
+ uint32_t numsg;
+
+ DPRINTF(DBG_DRV, ("dm2s_transmit: called\n"));
+ ASSERT(dm2sp != NULL);
+ ASSERT(MUTEX_HELD(&dm2sp->ms_lock));
+ /*
+ * Free the message if the mailbox is not in the connected state.
+ */
+ if (!DM2S_MBOX_READY(dm2sp)) {
+ DPRINTF(DBG_MBOX, ("dm2s_transmit: mailbox not ready yet\n"));
+ freemsg(mp);
+ return (EIO);
+ }
+
+ len = msgdsize(mp);
+ if (len > dm2sp->ms_mtu) {
+ /*
+ * Size is too big to send, free the message.
+ */
+ DPRINTF(DBG_MBOX, ("dm2s_transmit: message too large\n"));
+ DTRACE_PROBE2(dm2s_msg_too_big, dm2s_t, dm2sp, uint32_t, len);
+ freemsg(mp);
+ return (0);
+ }
+
+ if ((ret = dm2s_prep_scatgath(mp, &numsg, dm2sp->ms_sg_tx,
+ DM2S_MAX_SG)) != 0) {
+ DPRINTF(DBG_MBOX, ("dm2s_transmit: prep_scatgath failed\n"));
+ putbq(wq, mp);
+ return (EAGAIN);
+ }
+ DPRINTF(DBG_MBOX, ("dm2s_transmit: calling mb_putmsg numsg=%d len=%d\n",
+ numsg, len));
+ ret = scf_mb_putmsg(target, key, len, numsg, dm2sp->ms_sg_tx, 0);
+ if (ret == EBUSY || ret == ENOSPC) {
+ DPRINTF(DBG_MBOX,
+ ("dm2s_transmit: mailbox busy ret=%d\n", ret));
+ if (++dm2sp->ms_retries >= DM2S_MAX_RETRIES) {
+ /*
+ * If maximum retries are reached, then free the
+ * message.
+ */
+ DPRINTF(DBG_MBOX,
+ ("dm2s_transmit: freeing msg after max retries\n"));
+ DTRACE_PROBE2(dm2s_retry_fail, dm2s_t, dm2sp, int, ret);
+ freemsg(mp);
+ dm2sp->ms_retries = 0;
+ return (0);
+ }
+ DTRACE_PROBE2(dm2s_mb_busy, dm2s_t, dm2sp, int, ret);
+ /*
+ * Queue it back, so that we can retry again.
+ */
+ putbq(wq, mp);
+ return (ret);
+ }
+ DMPBYTES("dm2s: Putmsg: ", len, numsg, dm2sp->ms_sg_tx);
+ dm2sp->ms_retries = 0;
+ freemsg(mp);
+ DPRINTF(DBG_DRV, ("dm2s_transmit: ret=%d\n", ret));
+ return (ret);
+}
+
+/*
+ * dm2s_bufcall_rcv - Bufcall callaback routine.
+ *
+ * It simply enables read side queue so that the service procedure
+ * can retry receive operation.
+ */
+void
+dm2s_bufcall_rcv(void *arg)
+{
+ dm2s_t *dm2sp = (dm2s_t *)arg;
+
+ DPRINTF(DBG_DRV, ("dm2s_bufcall_rcv: called\n"));
+ mutex_enter(&dm2sp->ms_lock);
+ dm2sp->ms_rbufcid = 0;
+ if (dm2sp->ms_rq != NULL) {
+ qenable(dm2sp->ms_rq);
+ }
+ mutex_exit(&dm2sp->ms_lock);
+}
+
+/*
+ * dm2s_rq_timeout - Timeout callback for the read side.
+ *
+ * It simply enables read side queue so that the service procedure
+ * can retry the receive operation.
+ */
+void
+dm2s_rq_timeout(void *arg)
+{
+ dm2s_t *dm2sp = (dm2s_t *)arg;
+
+ DPRINTF(DBG_DRV, ("dm2s_rq_timeout: called\n"));
+ mutex_enter(&dm2sp->ms_lock);
+ dm2sp->ms_rq_timeoutid = 0;
+ if (dm2sp->ms_rq != NULL) {
+ qenable(dm2sp->ms_rq);
+ }
+ mutex_exit(&dm2sp->ms_lock);
+}
+
+/*
+ * dm2s_wq_timeout - Timeout callback for the write.
+ *
+ * It simply enables write side queue so that the service procedure
+ * can retry the transmission operation.
+ */
+void
+dm2s_wq_timeout(void *arg)
+{
+ dm2s_t *dm2sp = (dm2s_t *)arg;
+
+ DPRINTF(DBG_DRV, ("dm2s_wq_timeout: called\n"));
+ mutex_enter(&dm2sp->ms_lock);
+ dm2sp->ms_wq_timeoutid = 0;
+ if (dm2sp->ms_wq != NULL) {
+ qenable(dm2sp->ms_wq);
+ }
+ mutex_exit(&dm2sp->ms_lock);
+}
+
+/*
+ * dm2s_prep_scatgath - Prepare scatter/gather elements for transmission
+ * of a streams message.
+ */
+static int
+dm2s_prep_scatgath(mblk_t *mp, uint32_t *numsg, mscat_gath_t *sgp, int maxsg)
+{
+ uint32_t num = 0;
+ mblk_t *tmp = mp;
+
+ while ((tmp != NULL) && (num < maxsg)) {
+ sgp[num].msc_dptr = (caddr_t)tmp->b_rptr;
+ sgp[num].msc_len = MBLKL(tmp);
+ tmp = tmp->b_cont;
+ num++;
+ }
+
+ if (tmp != NULL) {
+ /*
+ * Number of scatter/gather elements available are not
+ * enough, so lets pullup the msg.
+ */
+ if (pullupmsg(mp, -1) != 1) {
+ return (EAGAIN);
+ }
+ sgp[0].msc_dptr = (caddr_t)mp->b_rptr;
+ sgp[0].msc_len = MBLKL(mp);
+ num = 1;
+ }
+ *numsg = num;
+ return (0);
+}
+
+/*
+ * dm2s_timeout_val -- Return appropriate timeout value.
+ *
+ * A small timeout value is returned for EBUSY and EAGAIN cases. This is
+ * because the condition is expected to be recovered sooner.
+ *
+ * A larger timeout value is returned for ENOSPC case, as the condition
+ * depends on the peer to release buffer space.
+ * NOTE: there will also be an event(SCF_MB_SPACE) but a timeout is
+ * used for reliability purposes.
+ */
+static clock_t
+dm2s_timeout_val(int error)
+{
+ clock_t tval;
+
+ ASSERT(error == EBUSY || error == ENOSPC || error == EAGAIN);
+
+ if (error == EBUSY || error == EAGAIN) {
+ tval = DM2S_SM_TOUT;
+ } else {
+ tval = DM2S_LG_TOUT;
+ }
+ return (drv_usectohz(tval));
+}
+
+#ifdef DEBUG
+
+static void
+dm2s_dump_bytes(char *str, uint32_t total_len,
+ uint32_t num_sg, mscat_gath_t *sgp)
+{
+ int i, j;
+ int nsg;
+ int len, tlen = 0;
+ mscat_gath_t *tp;
+ uint8_t *datap;
+#define BYTES_PER_LINE 20
+ char bytestr[BYTES_PER_LINE * 3 + 1];
+ uint32_t digest = 0;
+
+ if (!(dm2s_debug & DBG_MESG))
+ return;
+ ASSERT(num_sg != 0);
+
+ for (nsg = 0; (nsg < num_sg) && (tlen < total_len); nsg++) {
+ tp = &sgp[nsg];
+ datap = (uint8_t *)tp->msc_dptr;
+ len = tp->msc_len;
+ for (i = 0; i < len; i++) {
+ digest += datap[i];
+ }
+ tlen += len;
+ }
+ sprintf(bytestr, "%s Packet: Size=%d Digest=%d\n",
+ str, total_len, digest);
+ DTRACE_PROBE1(dm2s_dump_digest, unsigned char *, bytestr);
+
+ for (nsg = 0; (nsg < num_sg) && (tlen < total_len); nsg++) {
+ tp = &sgp[nsg];
+ datap = (uint8_t *)tp->msc_dptr;
+ len = tp->msc_len;
+ for (i = 0; i < len; ) {
+ for (j = 0; (j < BYTES_PER_LINE) &&
+ (i < len); j++, i++) {
+ sprintf(&bytestr[j * 3], "%02X ", datap[i]);
+ digest += datap[i];
+ }
+ if (j != 0) {
+ DTRACE_PROBE1(dm2s_dump, unsigned char *,
+ bytestr);
+ }
+ }
+ tlen += i;
+ }
+}
+
+#endif /* DEBUG */
diff --git a/usr/src/uts/sun4u/opl/io/dm2s.conf b/usr/src/uts/sun4u/opl/io/dm2s.conf
new file mode 100644
index 0000000000..d2256f5d38
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/dm2s.conf
@@ -0,0 +1,28 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+name="dm2s" parent="pseudo" instance=0;
diff --git a/usr/src/uts/sun4u/opl/io/dr.conf b/usr/src/uts/sun4u/opl/io/dr.conf
new file mode 100644
index 0000000000..16cac68654
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/dr.conf
@@ -0,0 +1,27 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+
+name="dr" parent="pseudo" instance=0;
diff --git a/usr/src/uts/sun4u/opl/io/dr_mem.c b/usr/src/uts/sun4u/opl/io/dr_mem.c
new file mode 100644
index 0000000000..0d4a415d7b
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/dr_mem.c
@@ -0,0 +1,2760 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * DR memory support routines.
+ */
+
+#include <sys/note.h>
+#include <sys/debug.h>
+#include <sys/types.h>
+#include <sys/errno.h>
+#include <sys/param.h>
+#include <sys/dditypes.h>
+#include <sys/kmem.h>
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/sunndi.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/ndi_impldefs.h>
+#include <sys/sysmacros.h>
+#include <sys/machsystm.h>
+#include <sys/spitregs.h>
+#include <sys/cpuvar.h>
+#include <sys/promif.h>
+#include <vm/seg_kmem.h>
+#include <sys/lgrp.h>
+#include <sys/platform_module.h>
+
+#include <vm/page.h>
+
+#include <sys/dr.h>
+#include <sys/dr_util.h>
+#include <sys/drmach.h>
+
+extern struct memlist *phys_install;
+extern vnode_t retired_pages;
+
+/* TODO: push this reference below drmach line */
+extern int kcage_on;
+
+/* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */
+static char *dr_ie_fmt = "%M% %d";
+
+typedef enum {
+ DR_TP_INVALID = -1,
+ DR_TP_SAME,
+ DR_TP_LARGE,
+ DR_TP_NONRELOC,
+ DR_TP_FLOATING
+} dr_target_pref_t;
+
+static int dr_post_detach_mem_unit(dr_mem_unit_t *mp);
+static int dr_reserve_mem_spans(memhandle_t *mhp,
+ struct memlist *mlist);
+static int dr_select_mem_target(dr_handle_t *hp,
+ dr_mem_unit_t *mp, struct memlist *ml);
+static void dr_init_mem_unit_data(dr_mem_unit_t *mp);
+static struct memlist *dr_memlist_del_retired_pages(struct memlist *ml);
+static dr_target_pref_t dr_get_target_preference(dr_handle_t *hp,
+ dr_mem_unit_t *t_mp, dr_mem_unit_t *s_mp,
+ struct memlist *s_ml, struct memlist *x_ml,
+ struct memlist *b_ml);
+
+static int memlist_canfit(struct memlist *s_mlist,
+ struct memlist *t_mlist);
+static int dr_del_mlist_query(struct memlist *mlist,
+ memquery_t *mp);
+static struct memlist *dr_get_copy_mlist(struct memlist *s_ml,
+ struct memlist *t_ml, dr_mem_unit_t *s_mp,
+ dr_mem_unit_t *t_mp);
+static struct memlist *dr_get_nonreloc_mlist(struct memlist *s_ml,
+ dr_mem_unit_t *s_mp);
+static int dr_memlist_canfit(struct memlist *s_mlist,
+ struct memlist *t_mlist, dr_mem_unit_t *s_mp,
+ dr_mem_unit_t *t_mp);
+
+extern void page_unretire_pages(void);
+
+/*
+ * dr_mem_unit_t.sbm_flags
+ */
+#define DR_MFLAG_RESERVED 0x01 /* mem unit reserved for delete */
+#define DR_MFLAG_SOURCE 0x02 /* source brd of copy/rename op */
+#define DR_MFLAG_TARGET 0x04 /* target brd of copy/rename op */
+#define DR_MFLAG_RELOWNER 0x20 /* memory release (delete) owner */
+#define DR_MFLAG_RELDONE 0x40 /* memory release (delete) done */
+
+/* helper macros */
+#define _ptob64(p) ((uint64_t)(p) << PAGESHIFT)
+#define _b64top(b) ((pgcnt_t)((b) >> PAGESHIFT))
+
+static struct memlist *
+dr_get_memlist(dr_mem_unit_t *mp)
+{
+ struct memlist *mlist = NULL;
+ sbd_error_t *err;
+ static fn_t f = "dr_get_memlist";
+
+ PR_MEM("%s for %s...\n", f, mp->sbm_cm.sbdev_path);
+
+ /*
+ * Return cached memlist, if present.
+ * This memlist will be present following an
+ * unconfigure (a.k.a: detach) of this memunit.
+ * It should only be used in the case were a configure
+ * is bringing this memunit back in without going
+ * through the disconnect and connect states.
+ */
+ if (mp->sbm_mlist) {
+ PR_MEM("%s: found cached memlist\n", f);
+
+ mlist = memlist_dup(mp->sbm_mlist);
+ } else {
+ uint64_t basepa = _ptob64(mp->sbm_basepfn);
+
+ /* attempt to construct a memlist using phys_install */
+
+ /* round down to slice base address */
+ basepa &= ~(mp->sbm_slice_size - 1);
+
+ /* get a copy of phys_install to edit */
+ memlist_read_lock();
+ mlist = memlist_dup(phys_install);
+ memlist_read_unlock();
+
+ /* trim lower irrelevant span */
+ if (mlist)
+ mlist = memlist_del_span(mlist, 0ull, basepa);
+
+ /* trim upper irrelevant span */
+ if (mlist) {
+ uint64_t endpa;
+
+ basepa += mp->sbm_slice_size;
+ endpa = _ptob64(physmax + 1);
+ if (endpa > basepa)
+ mlist = memlist_del_span(
+ mlist, basepa,
+ endpa - basepa);
+ }
+
+ if (mlist) {
+ /* successfully built a memlist */
+ PR_MEM("%s: derived memlist from phys_install\n", f);
+ }
+
+ /* if no mlist yet, try platform layer */
+ if (!mlist) {
+ err = drmach_mem_get_memlist(
+ mp->sbm_cm.sbdev_id, &mlist);
+ if (err) {
+ DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err);
+ mlist = NULL; /* paranoia */
+ }
+ }
+ }
+
+ PR_MEM("%s: memlist for %s\n", f, mp->sbm_cm.sbdev_path);
+ PR_MEMLIST_DUMP(mlist);
+
+ return (mlist);
+}
+
+typedef struct {
+ kcondvar_t cond;
+ kmutex_t lock;
+ int error;
+ int done;
+} dr_release_mem_sync_t;
+
+/*
+ * Memory has been logically removed by the time this routine is called.
+ */
+static void
+dr_mem_del_done(void *arg, int error)
+{
+ dr_release_mem_sync_t *ds = arg;
+
+ mutex_enter(&ds->lock);
+ ds->error = error;
+ ds->done = 1;
+ cv_signal(&ds->cond);
+ mutex_exit(&ds->lock);
+}
+
+/*
+ * When we reach here the memory being drained should have
+ * already been reserved in dr_pre_release_mem().
+ * Our only task here is to kick off the "drain" and wait
+ * for it to finish.
+ */
+void
+dr_release_mem(dr_common_unit_t *cp)
+{
+ dr_mem_unit_t *mp = (dr_mem_unit_t *)cp;
+ int err;
+ dr_release_mem_sync_t rms;
+ static fn_t f = "dr_release_mem";
+
+ /* check that this memory unit has been reserved */
+ if (!(mp->sbm_flags & DR_MFLAG_RELOWNER)) {
+ DR_DEV_INTERNAL_ERROR(&mp->sbm_cm);
+ return;
+ }
+
+ bzero((void *) &rms, sizeof (rms));
+
+ mutex_init(&rms.lock, NULL, MUTEX_DRIVER, NULL);
+ cv_init(&rms.cond, NULL, CV_DRIVER, NULL);
+
+ mutex_enter(&rms.lock);
+ err = kphysm_del_start(mp->sbm_memhandle,
+ dr_mem_del_done, (void *) &rms);
+ if (err == KPHYSM_OK) {
+ /* wait for completion or interrupt */
+ while (!rms.done) {
+ if (cv_wait_sig(&rms.cond, &rms.lock) == 0) {
+ /* then there is a pending UNIX signal */
+ (void) kphysm_del_cancel(mp->sbm_memhandle);
+
+ /* wait for completion */
+ while (!rms.done)
+ cv_wait(&rms.cond, &rms.lock);
+ }
+ }
+ /* get the result of the memory delete operation */
+ err = rms.error;
+ }
+ mutex_exit(&rms.lock);
+
+ cv_destroy(&rms.cond);
+ mutex_destroy(&rms.lock);
+
+ if (err != KPHYSM_OK) {
+ int e_code;
+
+ switch (err) {
+ case KPHYSM_ENOWORK:
+ e_code = ESBD_NOERROR;
+ break;
+
+ case KPHYSM_EHANDLE:
+ case KPHYSM_ESEQUENCE:
+ e_code = ESBD_INTERNAL;
+ break;
+
+ case KPHYSM_ENOTVIABLE:
+ e_code = ESBD_MEM_NOTVIABLE;
+ break;
+
+ case KPHYSM_EREFUSED:
+ e_code = ESBD_MEM_REFUSED;
+ break;
+
+ case KPHYSM_ENONRELOC:
+ e_code = ESBD_MEM_NONRELOC;
+ break;
+
+ case KPHYSM_ECANCELLED:
+ e_code = ESBD_MEM_CANCELLED;
+ break;
+
+ case KPHYSM_ERESOURCE:
+ e_code = ESBD_MEMFAIL;
+ break;
+
+ default:
+ cmn_err(CE_WARN,
+ "%s: unexpected kphysm error code %d,"
+ " id 0x%p",
+ f, err, mp->sbm_cm.sbdev_id);
+
+ e_code = ESBD_IO;
+ break;
+ }
+
+ if (e_code != ESBD_NOERROR) {
+ dr_dev_err(CE_IGNORE, &mp->sbm_cm, e_code);
+ }
+ }
+}
+
+void
+dr_attach_mem(dr_handle_t *hp, dr_common_unit_t *cp)
+{
+ _NOTE(ARGUNUSED(hp))
+
+ dr_mem_unit_t *mp = (dr_mem_unit_t *)cp;
+ struct memlist *ml, *mc;
+ sbd_error_t *err;
+ static fn_t f = "dr_attach_mem";
+
+ PR_MEM("%s...\n", f);
+
+ dr_lock_status(hp->h_bd);
+ err = drmach_configure(cp->sbdev_id, 0);
+ dr_unlock_status(hp->h_bd);
+ if (err) {
+ DRERR_SET_C(&cp->sbdev_error, &err);
+ return;
+ }
+
+ ml = dr_get_memlist(mp);
+ for (mc = ml; mc; mc = mc->next) {
+ int rv;
+ sbd_error_t *err;
+
+ rv = kphysm_add_memory_dynamic(
+ (pfn_t)(mc->address >> PAGESHIFT),
+ (pgcnt_t)(mc->size >> PAGESHIFT));
+ if (rv != KPHYSM_OK) {
+ /*
+ * translate kphysm error and
+ * store in devlist error
+ */
+ switch (rv) {
+ case KPHYSM_ERESOURCE:
+ rv = ESBD_NOMEM;
+ break;
+
+ case KPHYSM_EFAULT:
+ rv = ESBD_FAULT;
+ break;
+
+ default:
+ rv = ESBD_INTERNAL;
+ break;
+ }
+
+ if (rv == ESBD_INTERNAL) {
+ DR_DEV_INTERNAL_ERROR(&mp->sbm_cm);
+ } else
+ dr_dev_err(CE_WARN, &mp->sbm_cm, rv);
+ break;
+ }
+
+ err = drmach_mem_add_span(
+ mp->sbm_cm.sbdev_id, mc->address, mc->size);
+ if (err) {
+ DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err);
+ break;
+ }
+ }
+
+ memlist_delete(ml);
+
+ /* back out if configure failed */
+ if (mp->sbm_cm.sbdev_error != NULL) {
+ dr_lock_status(hp->h_bd);
+ err = drmach_unconfigure(cp->sbdev_id, 0);
+ if (err)
+ sbd_err_clear(&err);
+ dr_unlock_status(hp->h_bd);
+ }
+}
+
+static struct memlist *
+dr_memlist_del_retired_pages(struct memlist *mlist)
+{
+ page_t *pp;
+ pfn_t pfn;
+ kmutex_t *vphm;
+ vnode_t *vp = &retired_pages;
+ static fn_t f = "dr_memlist_del_retired_pages";
+
+ vphm = page_vnode_mutex(vp);
+ mutex_enter(vphm);
+
+ PR_MEM("%s\n", f);
+
+ if ((pp = vp->v_pages) == NULL) {
+ mutex_exit(vphm);
+ return (mlist);
+ }
+
+ do {
+ ASSERT(pp != NULL);
+ /*
+ * page_downgrade happens after page_hashin, so we
+ * can't assert PAGE_SE. Just assert locked to catch
+ * changes to the retired vnode locking scheme.
+ */
+ ASSERT(PAGE_LOCKED(pp));
+ ASSERT(pp->p_vnode == &retired_pages);
+
+ if (!page_trylock(pp, SE_SHARED))
+ continue;
+
+ pfn = page_pptonum(pp);
+
+ ASSERT((pp->p_offset >> PAGESHIFT) == pfn);
+ /*
+ * Page retirement currently breaks large pages into PAGESIZE
+ * pages. If this changes, need to remove the assert and deal
+ * with different page sizes.
+ */
+ ASSERT(pp->p_szc == 0);
+
+ if (address_in_memlist(mlist, ptob(pfn), PAGESIZE)) {
+ mlist = memlist_del_span(mlist, ptob(pfn), PAGESIZE);
+ PR_MEM("deleted retired page 0x%lx (pfn 0x%lx) "
+ "from memlist\n", ptob(pfn), pfn);
+ }
+
+ page_unlock(pp);
+ } while ((pp = pp->p_vpnext) != vp->v_pages);
+
+ mutex_exit(vphm);
+
+ return (mlist);
+}
+
+#ifdef DEBUG
+int dbg_retirecnt = 10;
+
+static void
+dbg_page_retire(struct memlist *r_ml)
+{
+ struct memlist *t_ml;
+ page_t *pp, *epp;
+ pfn_t pfn, epfn;
+ struct memseg *seg;
+
+ int dbg_retired = 0;
+ int dbg_skip = 10;
+ int dbg_seq = 1;
+
+ if (r_ml == NULL)
+ return;
+
+ for (t_ml = r_ml; (t_ml != NULL); t_ml = t_ml->next) {
+ pfn = _b64top(t_ml->address);
+ epfn = _b64top(t_ml->address + t_ml->size);
+
+ for (seg = memsegs; seg != NULL; seg = seg->next) {
+ int retire = 0;
+ int skip = 0;
+ if (pfn >= seg->pages_end || epfn < seg->pages_base)
+ continue;
+
+ pp = seg->pages;
+ if (pfn > seg->pages_base)
+ pp += pfn - seg->pages_base;
+
+ epp = seg->epages;
+ if (epfn < seg->pages_end)
+ epp -= seg->pages_end - epfn;
+
+ ASSERT(pp < epp);
+#if 0
+ while (pp < epp) {
+ if (PP_ISFREE(pp) && !page_isfaulty(pp)) {
+ if (retire++ < dbg_seq) {
+ page_settoxic(pp,
+ PAGE_IS_FAULTY);
+ page_retire(pp,
+ PAGE_IS_FAILING);
+ if (++dbg_retired >=
+ dbg_retirecnt)
+ return;
+ } else if (skip++ >= dbg_skip) {
+ skip = 0;
+ retire = 0;
+ dbg_seq++;
+ }
+ }
+ pp++;
+ }
+#endif /* 0 */
+ while (pp < epp) {
+ if (PP_ISFREE(pp)) {
+ if (retire++ < dbg_seq) {
+ page_retire(t_ml->address,
+ PR_OK);
+ if (++dbg_retired >=
+ dbg_retirecnt)
+ return;
+ } else if (skip++ >= dbg_skip) {
+ skip = 0;
+ retire = 0;
+ dbg_seq++;
+ }
+ }
+ pp++;
+ }
+ }
+ }
+}
+#endif
+
+static int
+dr_move_memory(dr_handle_t *hp, dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp)
+{
+ int rv = -1;
+ time_t copytime;
+ drmachid_t cr_id;
+ dr_sr_handle_t *srhp = NULL;
+ dr_board_t *t_bp, *s_bp;
+ struct memlist *c_ml, *d_ml;
+ sbd_error_t *err;
+ static fn_t f = "dr_move_memory";
+
+ PR_MEM("%s: (INLINE) moving memory from %s to %s\n",
+ f,
+ s_mp->sbm_cm.sbdev_path,
+ t_mp->sbm_cm.sbdev_path);
+
+ ASSERT(s_mp->sbm_flags & DR_MFLAG_SOURCE);
+ ASSERT(s_mp->sbm_peer == t_mp);
+ ASSERT(s_mp->sbm_mlist);
+
+ ASSERT(t_mp->sbm_flags & DR_MFLAG_TARGET);
+ ASSERT(t_mp->sbm_peer == s_mp);
+
+#ifdef DEBUG
+ if (dbg_retirecnt)
+ dbg_page_retire(s_mp->sbm_mlist);
+#endif
+
+ /*
+ * create a memlist of spans to copy by removing
+ * the spans that have been deleted, if any, from
+ * the full source board memlist. s_mp->sbm_del_mlist
+ * will be NULL if there were no spans deleted from
+ * the source board.
+ */
+ c_ml = memlist_dup(s_mp->sbm_mlist);
+ d_ml = s_mp->sbm_del_mlist;
+ while (d_ml != NULL) {
+ c_ml = memlist_del_span(c_ml, d_ml->address, d_ml->size);
+ d_ml = d_ml->next;
+ }
+
+ /*
+ * Remove retired pages from the copy list. The page content
+ * need not be copied since the pages are no longer in use.
+ */
+ PR_MEM("%s: copy list before removing retired pages (if any):\n", f);
+ PR_MEMLIST_DUMP(c_ml);
+
+ c_ml = dr_memlist_del_retired_pages(c_ml);
+
+ PR_MEM("%s: copy list after removing retired pages:\n", f);
+ PR_MEMLIST_DUMP(c_ml);
+
+ /*
+ * With parallel copy, it shouldn't make a difference which
+ * CPU is the actual master during copy-rename since all
+ * CPUs participate in the parallel copy anyway.
+ */
+ affinity_set(CPU_CURRENT);
+
+ err = drmach_copy_rename_init(
+ t_mp->sbm_cm.sbdev_id, s_mp->sbm_cm.sbdev_id, c_ml, &cr_id);
+ if (err) {
+ DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err);
+ affinity_clear();
+ memlist_delete(c_ml);
+ return (-1);
+ }
+
+ srhp = dr_get_sr_handle(hp);
+ ASSERT(srhp);
+
+ copytime = lbolt;
+
+ /* Quiesce the OS. */
+ if (dr_suspend(srhp)) {
+ cmn_err(CE_WARN, "%s: failed to quiesce OS"
+ " for copy-rename", f);
+
+ err = drmach_copy_rename_fini(cr_id);
+ if (err) {
+ /*
+ * no error is expected since the program has
+ * not yet run.
+ */
+
+ /* catch this in debug kernels */
+ ASSERT(0);
+
+ sbd_err_clear(&err);
+ }
+
+ /* suspend error reached via hp */
+ s_mp->sbm_cm.sbdev_error = hp->h_err;
+ hp->h_err = NULL;
+ goto done;
+ }
+
+ drmach_copy_rename(cr_id);
+
+ /* Resume the OS. */
+ dr_resume(srhp);
+
+ copytime = lbolt - copytime;
+
+ if (err = drmach_copy_rename_fini(cr_id))
+ goto done;
+
+ /*
+ * Rename memory for lgroup.
+ * Source and target board numbers are packaged in arg.
+ */
+ s_bp = s_mp->sbm_cm.sbdev_bp;
+ t_bp = t_mp->sbm_cm.sbdev_bp;
+
+ lgrp_plat_config(LGRP_CONFIG_MEM_RENAME,
+ (uintptr_t)(s_bp->b_num | (t_bp->b_num << 16)));
+
+
+ PR_MEM("%s: copy-rename elapsed time = %ld ticks (%ld secs)\n",
+ f, copytime, copytime / hz);
+
+ rv = 0;
+done:
+ if (srhp)
+ dr_release_sr_handle(srhp);
+ if (err)
+ DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err);
+ affinity_clear();
+
+ return (rv);
+}
+
+/*
+ * If detaching node contains memory that is "non-permanent"
+ * then the memory adr's are simply cleared. If the memory
+ * is non-relocatable, then do a copy-rename.
+ */
+void
+dr_detach_mem(dr_handle_t *hp, dr_common_unit_t *cp)
+{
+ int rv = 0;
+ dr_mem_unit_t *s_mp = (dr_mem_unit_t *)cp;
+ dr_mem_unit_t *t_mp;
+ dr_state_t state;
+ static fn_t f = "dr_detach_mem";
+
+ PR_MEM("%s...\n", f);
+
+ /* lookup target mem unit and target board structure, if any */
+ if (s_mp->sbm_flags & DR_MFLAG_SOURCE) {
+ t_mp = s_mp->sbm_peer;
+ ASSERT(t_mp != NULL);
+ ASSERT(t_mp->sbm_peer == s_mp);
+ } else {
+ t_mp = NULL;
+ }
+
+ /* verify mem unit's state is UNREFERENCED */
+ state = s_mp->sbm_cm.sbdev_state;
+ if (state != DR_STATE_UNREFERENCED) {
+ dr_dev_err(CE_IGNORE, &s_mp->sbm_cm, ESBD_STATE);
+ return;
+ }
+
+ /* verify target mem unit's state is UNREFERENCED, if any */
+ if (t_mp != NULL) {
+ state = t_mp->sbm_cm.sbdev_state;
+ if (state != DR_STATE_UNREFERENCED) {
+ dr_dev_err(CE_IGNORE, &t_mp->sbm_cm, ESBD_STATE);
+ return;
+ }
+ }
+
+ /*
+ * If there is no target board (no copy/rename was needed), then
+ * we're done!
+ */
+ if (t_mp == NULL) {
+ sbd_error_t *err;
+ /*
+ * Reprogram interconnect hardware and disable
+ * memory controllers for memory node that's going away.
+ */
+
+ err = drmach_mem_disable(s_mp->sbm_cm.sbdev_id);
+ if (err) {
+ DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err);
+ rv = -1;
+ }
+ } else {
+ rv = dr_move_memory(hp, s_mp, t_mp);
+ PR_MEM("%s: %s memory COPY-RENAME (board %d -> %d)\n",
+ f,
+ rv ? "FAILED" : "COMPLETED",
+ s_mp->sbm_cm.sbdev_bp->b_num,
+ t_mp->sbm_cm.sbdev_bp->b_num);
+
+ if (rv != 0)
+ (void) dr_cancel_mem(s_mp);
+ }
+
+ if (rv == 0) {
+ sbd_error_t *err;
+
+ dr_lock_status(hp->h_bd);
+ err = drmach_unconfigure(s_mp->sbm_cm.sbdev_id, 0);
+ dr_unlock_status(hp->h_bd);
+ if (err)
+ sbd_err_clear(&err);
+ }
+}
+
+/*
+ * This routine acts as a wrapper for kphysm_del_span_query in order to
+ * support potential memory holes in a board's physical address space.
+ * It calls kphysm_del_span_query for each node in a memlist and accumulates
+ * the results in *mp.
+ */
+static int
+dr_del_mlist_query(struct memlist *mlist, memquery_t *mp)
+{
+ struct memlist *ml;
+ int rv = 0;
+
+
+ if (mlist == NULL)
+ cmn_err(CE_WARN, "dr_del_mlist_query: mlist=NULL\n");
+
+ mp->phys_pages = 0;
+ mp->managed = 0;
+ mp->nonrelocatable = 0;
+ mp->first_nonrelocatable = (pfn_t)-1; /* XXX */
+ mp->last_nonrelocatable = 0;
+
+ for (ml = mlist; ml; ml = ml->next) {
+ memquery_t mq;
+
+ rv = kphysm_del_span_query(
+ _b64top(ml->address), _b64top(ml->size), &mq);
+ if (rv)
+ break;
+
+ mp->phys_pages += mq.phys_pages;
+ mp->managed += mq.managed;
+ mp->nonrelocatable += mq.nonrelocatable;
+
+ if (mq.nonrelocatable != 0) {
+ if (mq.first_nonrelocatable < mp->first_nonrelocatable)
+ mp->first_nonrelocatable =
+ mq.first_nonrelocatable;
+ if (mq.last_nonrelocatable > mp->last_nonrelocatable)
+ mp->last_nonrelocatable =
+ mq.last_nonrelocatable;
+ }
+ }
+
+ if (mp->nonrelocatable == 0)
+ mp->first_nonrelocatable = 0; /* XXX */
+
+ return (rv);
+}
+
+/*
+ * NOTE: This routine is only partially smart about multiple
+ * mem-units. Need to make mem-status structure smart
+ * about them also.
+ */
+int
+dr_mem_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
+{
+ int m, mix;
+ memdelstat_t mdst;
+ memquery_t mq;
+ dr_board_t *bp;
+ dr_mem_unit_t *mp;
+ sbd_mem_stat_t *msp;
+ static fn_t f = "dr_mem_status";
+
+ bp = hp->h_bd;
+ devset &= DR_DEVS_PRESENT(bp);
+
+ for (m = mix = 0; m < MAX_MEM_UNITS_PER_BOARD; m++) {
+ int rv;
+ sbd_error_t *err;
+ drmach_status_t pstat;
+ dr_mem_unit_t *p_mp;
+
+ if (DEVSET_IN_SET(devset, SBD_COMP_MEM, m) == 0)
+ continue;
+
+ mp = dr_get_mem_unit(bp, m);
+
+ if (mp->sbm_cm.sbdev_state == DR_STATE_EMPTY) {
+ /* present, but not fully initialized */
+ continue;
+ }
+
+ if (mp->sbm_cm.sbdev_id == (drmachid_t)0)
+ continue;
+
+ /* fetch platform status */
+ err = drmach_status(mp->sbm_cm.sbdev_id, &pstat);
+ if (err) {
+ DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err);
+ continue;
+ }
+
+ msp = &dsp->d_mem;
+ bzero((caddr_t)msp, sizeof (*msp));
+
+ strncpy(msp->ms_cm.c_id.c_name, pstat.type,
+ sizeof (msp->ms_cm.c_id.c_name));
+ msp->ms_cm.c_id.c_type = mp->sbm_cm.sbdev_type;
+ msp->ms_cm.c_id.c_unit = SBD_NULL_UNIT;
+ msp->ms_cm.c_cond = mp->sbm_cm.sbdev_cond;
+ msp->ms_cm.c_busy = mp->sbm_cm.sbdev_busy | pstat.busy;
+ msp->ms_cm.c_time = mp->sbm_cm.sbdev_time;
+ msp->ms_cm.c_ostate = mp->sbm_cm.sbdev_ostate;
+
+ msp->ms_totpages = mp->sbm_npages;
+ msp->ms_basepfn = mp->sbm_basepfn;
+ msp->ms_pageslost = mp->sbm_pageslost;
+ msp->ms_cage_enabled = kcage_on;
+
+ if (mp->sbm_flags & DR_MFLAG_RESERVED)
+ p_mp = mp->sbm_peer;
+ else
+ p_mp = NULL;
+
+ if (p_mp == NULL) {
+ msp->ms_peer_is_target = 0;
+ msp->ms_peer_ap_id[0] = '\0';
+ } else if (p_mp->sbm_flags & DR_MFLAG_RESERVED) {
+ char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
+ char *minor;
+
+ /*
+ * b_dip doesn't have to be held for ddi_pathname()
+ * because the board struct (dr_board_t) will be
+ * destroyed before b_dip detaches.
+ */
+ (void) ddi_pathname(bp->b_dip, path);
+ minor = strchr(p_mp->sbm_cm.sbdev_path, ':');
+
+ snprintf(msp->ms_peer_ap_id,
+ sizeof (msp->ms_peer_ap_id), "%s%s",
+ path, (minor == NULL) ? "" : minor);
+
+ kmem_free(path, MAXPATHLEN);
+
+ if (p_mp->sbm_flags & DR_MFLAG_TARGET)
+ msp->ms_peer_is_target = 1;
+ }
+
+ if (mp->sbm_flags & DR_MFLAG_RELOWNER)
+ rv = kphysm_del_status(mp->sbm_memhandle, &mdst);
+ else
+ rv = KPHYSM_EHANDLE; /* force 'if' to fail */
+
+ if (rv == KPHYSM_OK) {
+ /*
+ * Any pages above managed is "free",
+ * i.e. it's collected.
+ */
+ msp->ms_detpages += (uint_t)(mdst.collected +
+ mdst.phys_pages - mdst.managed);
+ } else {
+ /*
+ * If we're UNREFERENCED or UNCONFIGURED,
+ * then the number of detached pages is
+ * however many pages are on the board.
+ * I.e. detached = not in use by OS.
+ */
+ switch (msp->ms_cm.c_ostate) {
+ /*
+ * changed to use cfgadm states
+ *
+ * was:
+ * case DR_STATE_UNREFERENCED:
+ * case DR_STATE_UNCONFIGURED:
+ */
+ case SBD_STAT_UNCONFIGURED:
+ msp->ms_detpages = msp->ms_totpages;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /*
+ * kphysm_del_span_query can report non-reloc pages = total
+ * pages for memory that is not yet configured
+ */
+ if (mp->sbm_cm.sbdev_state != DR_STATE_UNCONFIGURED) {
+ struct memlist *ml;
+
+ ml = dr_get_memlist(mp);
+ rv = ml ? dr_del_mlist_query(ml, &mq) : -1;
+ memlist_delete(ml);
+
+ if (rv == KPHYSM_OK) {
+ msp->ms_managed_pages = mq.managed;
+ msp->ms_noreloc_pages = mq.nonrelocatable;
+ msp->ms_noreloc_first =
+ mq.first_nonrelocatable;
+ msp->ms_noreloc_last =
+ mq.last_nonrelocatable;
+ msp->ms_cm.c_sflags = 0;
+ if (mq.nonrelocatable) {
+ SBD_SET_SUSPEND(SBD_CMD_UNCONFIGURE,
+ msp->ms_cm.c_sflags);
+ }
+ } else {
+ PR_MEM("%s: kphysm_del_span_query() = %d\n",
+ f, rv);
+ }
+ }
+
+ /*
+ * Check source unit state during copy-rename
+ */
+ if ((mp->sbm_flags & DR_MFLAG_SOURCE) &&
+ (mp->sbm_cm.sbdev_state == DR_STATE_UNREFERENCED ||
+ mp->sbm_cm.sbdev_state == DR_STATE_RELEASE))
+ msp->ms_cm.c_ostate = SBD_STAT_CONFIGURED;
+
+ mix++;
+ dsp++;
+ }
+
+ return (mix);
+}
+
+int
+dr_pre_attach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
+{
+ _NOTE(ARGUNUSED(hp))
+
+ int err_flag = 0;
+ int d;
+ sbd_error_t *err;
+ static fn_t f = "dr_pre_attach_mem";
+
+ PR_MEM("%s...\n", f);
+
+ for (d = 0; d < devnum; d++) {
+ dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d];
+ dr_state_t state;
+
+ cmn_err(CE_CONT, "OS configure %s", mp->sbm_cm.sbdev_path);
+
+ state = mp->sbm_cm.sbdev_state;
+ switch (state) {
+ case DR_STATE_UNCONFIGURED:
+ PR_MEM("%s: recovering from UNCONFIG for %s\n",
+ f,
+ mp->sbm_cm.sbdev_path);
+
+ /* use memlist cached by dr_post_detach_mem_unit */
+ ASSERT(mp->sbm_mlist != NULL);
+ PR_MEM("%s: re-configuring cached memlist for %s:\n",
+ f, mp->sbm_cm.sbdev_path);
+ PR_MEMLIST_DUMP(mp->sbm_mlist);
+
+ /* kphysm del handle should be have been freed */
+ ASSERT((mp->sbm_flags & DR_MFLAG_RELOWNER) == 0);
+
+ /*FALLTHROUGH*/
+
+ case DR_STATE_CONNECTED:
+ PR_MEM("%s: reprogramming mem hardware on %s\n",
+ f, mp->sbm_cm.sbdev_bp->b_path);
+
+ PR_MEM("%s: enabling %s\n",
+ f, mp->sbm_cm.sbdev_path);
+
+ err = drmach_mem_enable(mp->sbm_cm.sbdev_id);
+ if (err) {
+ DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err);
+ err_flag = 1;
+ }
+ break;
+
+ default:
+ dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_STATE);
+ err_flag = 1;
+ break;
+ }
+
+ /* exit for loop if error encountered */
+ if (err_flag)
+ break;
+ }
+
+ return (err_flag ? -1 : 0);
+}
+
+int
+dr_post_attach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
+{
+ _NOTE(ARGUNUSED(hp))
+
+ int d;
+ static fn_t f = "dr_post_attach_mem";
+
+ PR_MEM("%s...\n", f);
+
+ for (d = 0; d < devnum; d++) {
+ dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d];
+ struct memlist *mlist, *ml;
+
+ mlist = dr_get_memlist(mp);
+ if (mlist == NULL) {
+ dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_MEMFAIL);
+ continue;
+ }
+
+ /*
+ * Verify the memory really did successfully attach
+ * by checking for its existence in phys_install.
+ */
+ memlist_read_lock();
+ if (memlist_intersect(phys_install, mlist) == 0) {
+ memlist_read_unlock();
+
+ DR_DEV_INTERNAL_ERROR(&mp->sbm_cm);
+
+ PR_MEM("%s: %s memlist not in phys_install",
+ f, mp->sbm_cm.sbdev_path);
+
+ memlist_delete(mlist);
+ continue;
+ }
+ memlist_read_unlock();
+
+ for (ml = mlist; ml != NULL; ml = ml->next) {
+ sbd_error_t *err;
+
+ err = drmach_mem_add_span(
+ mp->sbm_cm.sbdev_id,
+ ml->address,
+ ml->size);
+ if (err)
+ DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err);
+ }
+
+ memlist_delete(mlist);
+
+ /*
+ * Destroy cached memlist, if any.
+ * There will be a cached memlist in sbm_mlist if
+ * this board is being configured directly after
+ * an unconfigure.
+ * To support this transition, dr_post_detach_mem
+ * left a copy of the last known memlist in sbm_mlist.
+ * This memlist could differ from any derived from
+ * hardware if while this memunit was last configured
+ * the system detected and deleted bad pages from
+ * phys_install. The location of those bad pages
+ * will be reflected in the cached memlist.
+ */
+ if (mp->sbm_mlist) {
+ memlist_delete(mp->sbm_mlist);
+ mp->sbm_mlist = NULL;
+ }
+ }
+
+ return (0);
+}
+
+int
+dr_pre_detach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
+{
+ _NOTE(ARGUNUSED(hp))
+
+ int d;
+
+ for (d = 0; d < devnum; d++) {
+ dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d];
+
+ cmn_err(CE_CONT, "OS unconfigure %s", mp->sbm_cm.sbdev_path);
+ }
+
+ return (0);
+}
+
+int
+dr_post_detach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
+{
+ _NOTE(ARGUNUSED(hp))
+
+ int d, rv;
+ static fn_t f = "dr_post_detach_mem";
+
+ PR_MEM("%s...\n", f);
+
+ rv = 0;
+ for (d = 0; d < devnum; d++) {
+ dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d];
+
+ ASSERT(mp->sbm_cm.sbdev_bp == hp->h_bd);
+
+ if (dr_post_detach_mem_unit(mp))
+ rv = -1;
+ }
+
+ return (rv);
+}
+
+static void
+dr_add_memory_spans(dr_mem_unit_t *mp, struct memlist *ml)
+{
+ static fn_t f = "dr_add_memory_spans";
+
+ PR_MEM("%s...", f);
+ PR_MEMLIST_DUMP(ml);
+
+#ifdef DEBUG
+ memlist_read_lock();
+ if (memlist_intersect(phys_install, ml)) {
+ PR_MEM("%s:WARNING: memlist intersects with phys_install\n", f);
+ }
+ memlist_read_unlock();
+#endif
+
+ for (; ml; ml = ml->next) {
+ pfn_t base;
+ pgcnt_t npgs;
+ int rv;
+ sbd_error_t *err;
+
+ base = _b64top(ml->address);
+ npgs = _b64top(ml->size);
+
+ rv = kphysm_add_memory_dynamic(base, npgs);
+
+ err = drmach_mem_add_span(
+ mp->sbm_cm.sbdev_id,
+ ml->address,
+ ml->size);
+
+ if (err)
+ DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err);
+
+ if (rv != KPHYSM_OK) {
+ cmn_err(CE_WARN, "%s:"
+ " unexpected kphysm_add_memory_dynamic"
+ " return value %d;"
+ " basepfn=0x%lx, npages=%ld\n",
+ f, rv, base, npgs);
+
+ continue;
+ }
+ }
+}
+
+static int
+dr_post_detach_mem_unit(dr_mem_unit_t *s_mp)
+{
+ uint64_t sz = s_mp->sbm_slice_size;
+ uint64_t sm = sz - 1;
+ /* old and new below refer to PAs before and after copy-rename */
+ uint64_t s_old_basepa, s_new_basepa;
+ uint64_t t_old_basepa, t_new_basepa;
+ dr_mem_unit_t *t_mp, *x_mp;
+ drmach_mem_info_t minfo;
+ struct memlist *ml;
+ struct memlist *t_excess_mlist;
+ int rv;
+ int s_excess_mem_deleted = 0;
+ sbd_error_t *err;
+ static fn_t f = "dr_post_detach_mem_unit";
+
+ PR_MEM("%s...\n", f);
+
+ /* s_mp->sbm_del_mlist could be NULL, meaning no deleted spans */
+ PR_MEM("%s: %s: deleted memlist (EMPTY maybe okay):\n",
+ f, s_mp->sbm_cm.sbdev_path);
+ PR_MEMLIST_DUMP(s_mp->sbm_del_mlist);
+
+ /* sanity check */
+ ASSERT(s_mp->sbm_del_mlist == NULL ||
+ (s_mp->sbm_flags & DR_MFLAG_RELDONE) != 0);
+
+ if (s_mp->sbm_flags & DR_MFLAG_SOURCE) {
+ t_mp = s_mp->sbm_peer;
+ ASSERT(t_mp != NULL);
+ ASSERT(t_mp->sbm_flags & DR_MFLAG_TARGET);
+ ASSERT(t_mp->sbm_peer == s_mp);
+
+ ASSERT(t_mp->sbm_flags & DR_MFLAG_RELDONE);
+ ASSERT(t_mp->sbm_del_mlist);
+
+ PR_MEM("%s: target %s: deleted memlist:\n",
+ f, t_mp->sbm_cm.sbdev_path);
+ PR_MEMLIST_DUMP(t_mp->sbm_del_mlist);
+ } else {
+ /* this is no target unit */
+ t_mp = NULL;
+ }
+
+ /*
+ * Verify the memory really did successfully detach
+ * by checking for its non-existence in phys_install.
+ */
+ rv = 0;
+ memlist_read_lock();
+ if (s_mp->sbm_flags & DR_MFLAG_RELDONE) {
+ x_mp = s_mp;
+ rv = memlist_intersect(phys_install, x_mp->sbm_del_mlist);
+ }
+ if (rv == 0 && t_mp && (t_mp->sbm_flags & DR_MFLAG_RELDONE)) {
+ x_mp = t_mp;
+ rv = memlist_intersect(phys_install, x_mp->sbm_del_mlist);
+ }
+ memlist_read_unlock();
+
+ if (rv) {
+ /* error: memlist still in phys_install */
+ DR_DEV_INTERNAL_ERROR(&x_mp->sbm_cm);
+ }
+
+ /*
+ * clean mem unit state and bail out if an error has been recorded.
+ */
+ rv = 0;
+ if (s_mp->sbm_cm.sbdev_error) {
+ PR_MEM("%s: %s flags=%x", f,
+ s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags);
+ DR_DEV_CLR_UNREFERENCED(&s_mp->sbm_cm);
+ DR_DEV_CLR_RELEASED(&s_mp->sbm_cm);
+ dr_device_transition(&s_mp->sbm_cm, DR_STATE_CONFIGURED);
+ rv = -1;
+ }
+ if (t_mp != NULL && t_mp->sbm_cm.sbdev_error != NULL) {
+ PR_MEM("%s: %s flags=%x", f,
+ s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags);
+ DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm);
+ DR_DEV_CLR_RELEASED(&t_mp->sbm_cm);
+ dr_device_transition(&t_mp->sbm_cm, DR_STATE_CONFIGURED);
+ rv = -1;
+ }
+ if (rv)
+ goto cleanup;
+
+ s_old_basepa = _ptob64(s_mp->sbm_basepfn);
+ err = drmach_mem_get_info(s_mp->sbm_cm.sbdev_id, &minfo);
+ ASSERT(err == NULL);
+ s_new_basepa = minfo.mi_basepa;
+
+ PR_MEM("%s:s_old_basepa: 0x%lx\n", f, s_old_basepa);
+ PR_MEM("%s:s_new_basepa: 0x%lx\n", f, s_new_basepa);
+
+ if (t_mp != NULL) {
+ struct memlist *s_copy_mlist;
+
+ t_old_basepa = _ptob64(t_mp->sbm_basepfn);
+ err = drmach_mem_get_info(t_mp->sbm_cm.sbdev_id, &minfo);
+ ASSERT(err == NULL);
+ t_new_basepa = minfo.mi_basepa;
+
+ PR_MEM("%s:t_old_basepa: 0x%lx\n", f, t_old_basepa);
+ PR_MEM("%s:t_new_basepa: 0x%lx\n", f, t_new_basepa);
+
+ /*
+ * Construct copy list with original source addresses.
+ * Used to add back excess target mem.
+ */
+ s_copy_mlist = memlist_dup(s_mp->sbm_mlist);
+ for (ml = s_mp->sbm_del_mlist; ml; ml = ml->next) {
+ s_copy_mlist = memlist_del_span(s_copy_mlist,
+ ml->address, ml->size);
+ }
+
+ PR_MEM("%s: source copy list:\n:", f);
+ PR_MEMLIST_DUMP(s_copy_mlist);
+
+ /*
+ * We had to swap mem-units, so update
+ * memlists accordingly with new base
+ * addresses.
+ */
+ for (ml = t_mp->sbm_mlist; ml; ml = ml->next) {
+ ml->address -= t_old_basepa;
+ ml->address += t_new_basepa;
+ }
+
+ /*
+ * There is no need to explicitly rename the target delete
+ * memlist, because sbm_del_mlist and sbm_mlist always
+ * point to the same memlist for a copy/rename operation.
+ */
+ ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist);
+
+ PR_MEM("%s: renamed target memlist and delete memlist:\n", f);
+ PR_MEMLIST_DUMP(t_mp->sbm_mlist);
+
+ for (ml = s_mp->sbm_mlist; ml; ml = ml->next) {
+ ml->address -= s_old_basepa;
+ ml->address += s_new_basepa;
+ }
+
+ PR_MEM("%s: renamed source memlist:\n", f);
+ PR_MEMLIST_DUMP(s_mp->sbm_mlist);
+
+ /*
+ * Keep track of dynamically added segments
+ * since they cannot be split if we need to delete
+ * excess source memory later for this board.
+ */
+ if (t_mp->sbm_dyn_segs)
+ memlist_delete(t_mp->sbm_dyn_segs);
+ t_mp->sbm_dyn_segs = s_mp->sbm_dyn_segs;
+ s_mp->sbm_dyn_segs = NULL;
+
+ /*
+ * Add back excess target memory.
+ * Subtract out the portion of the target memory
+ * node that was taken over by the source memory
+ * node.
+ */
+ t_excess_mlist = memlist_dup(t_mp->sbm_mlist);
+ for (ml = s_copy_mlist; ml; ml = ml->next) {
+ t_excess_mlist =
+ memlist_del_span(t_excess_mlist,
+ ml->address, ml->size);
+ }
+
+ /*
+ * Update dynamically added segs
+ */
+ for (ml = s_mp->sbm_del_mlist; ml; ml = ml->next) {
+ t_mp->sbm_dyn_segs =
+ memlist_del_span(t_mp->sbm_dyn_segs,
+ ml->address, ml->size);
+ }
+ for (ml = t_excess_mlist; ml; ml = ml->next) {
+ t_mp->sbm_dyn_segs =
+ memlist_cat_span(t_mp->sbm_dyn_segs,
+ ml->address, ml->size);
+ }
+ PR_MEM("%s: %s: updated dynamic seg list:\n",
+ f, t_mp->sbm_cm.sbdev_path);
+ PR_MEMLIST_DUMP(t_mp->sbm_dyn_segs);
+
+ if (t_excess_mlist != NULL) {
+ /*
+ * After the small <-> big copy-rename,
+ * the original address space for the
+ * source board may have excess to be
+ * deleted. This is a case different
+ * from the big->small excess source
+ * memory case listed below.
+ * Remove s_mp->sbm_del_mlist from
+ * the kernel cage glist.
+ */
+ for (ml = s_mp->sbm_del_mlist; ml;
+ ml = ml->next) {
+ PR_MEM("%s: delete small<->big copy-"
+ "rename source excess memory", f);
+ PR_MEMLIST_DUMP(ml);
+
+ err = drmach_mem_del_span(
+ s_mp->sbm_cm.sbdev_id,
+ ml->address, ml->size);
+ if (err)
+ DRERR_SET_C(&s_mp->
+ sbm_cm.sbdev_error, &err);
+ ASSERT(err == NULL);
+ }
+
+ /*
+ * mark sbm_del_mlist as been deleted so that
+ * we won't end up to delete it twice later
+ * from the span list
+ */
+ s_excess_mem_deleted = 1;
+
+ PR_MEM("%s: adding back remaining portion"
+ " of %s, memlist:\n",
+ f, t_mp->sbm_cm.sbdev_path);
+ PR_MEMLIST_DUMP(t_excess_mlist);
+
+ dr_add_memory_spans(s_mp, t_excess_mlist);
+ memlist_delete(t_excess_mlist);
+ }
+ memlist_delete(s_copy_mlist);
+
+#ifdef DEBUG
+ /*
+ * s_mp->sbm_del_mlist may still needed
+ */
+ PR_MEM("%s: source delete memeory flag %d",
+ f, s_excess_mem_deleted);
+ PR_MEM("%s: source delete memlist", f);
+ PR_MEMLIST_DUMP(s_mp->sbm_del_mlist);
+#endif
+
+ }
+
+ if (t_mp != NULL) {
+ /* delete target's entire address space */
+ err = drmach_mem_del_span(
+ t_mp->sbm_cm.sbdev_id, t_old_basepa & ~ sm, sz);
+ if (err)
+ DRERR_SET_C(&t_mp->sbm_cm.sbdev_error, &err);
+ ASSERT(err == NULL);
+
+ /*
+ * After the copy/rename, the original address space
+ * for the source board (which is now located on the
+ * target board) may now have some excess to be deleted.
+ * Those excess memory on the source board are kept in
+ * source board's sbm_del_mlist
+ */
+ for (ml = s_mp->sbm_del_mlist; !s_excess_mem_deleted && ml;
+ ml = ml->next) {
+ PR_MEM("%s: delete source excess memory", f);
+ PR_MEMLIST_DUMP(ml);
+
+ err = drmach_mem_del_span(s_mp->sbm_cm.sbdev_id,
+ ml->address, ml->size);
+ if (err)
+ DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err);
+ ASSERT(err == NULL);
+ }
+
+ } else {
+ /* delete board's entire address space */
+ err = drmach_mem_del_span(s_mp->sbm_cm.sbdev_id,
+ s_old_basepa & ~ sm, sz);
+ if (err)
+ DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err);
+ ASSERT(err == NULL);
+ }
+
+cleanup:
+ /* clean up target mem unit */
+ if (t_mp != NULL) {
+ memlist_delete(t_mp->sbm_del_mlist);
+ /* no need to delete sbm_mlist, it shares sbm_del_mlist */
+
+ t_mp->sbm_del_mlist = NULL;
+ t_mp->sbm_mlist = NULL;
+ t_mp->sbm_peer = NULL;
+ t_mp->sbm_flags = 0;
+ t_mp->sbm_cm.sbdev_busy = 0;
+ dr_init_mem_unit_data(t_mp);
+
+ }
+ if (t_mp != NULL && t_mp->sbm_cm.sbdev_error == NULL) {
+ /*
+ * now that copy/rename has completed, undo this
+ * work that was done in dr_release_mem_done.
+ */
+ DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm);
+ DR_DEV_CLR_RELEASED(&t_mp->sbm_cm);
+ dr_device_transition(&t_mp->sbm_cm, DR_STATE_CONFIGURED);
+ }
+
+ /*
+ * clean up (source) board's mem unit structure.
+ * NOTE: sbm_mlist is retained if no error has been record (in other
+ * words, when s_mp->sbm_cm.sbdev_error is NULL). This memlist is
+ * referred to elsewhere as the cached memlist. The cached memlist
+ * is used to re-attach (configure back in) this memunit from the
+ * unconfigured state. The memlist is retained because it may
+ * represent bad pages that were detected while the memory was
+ * configured into the OS. The OS deletes bad pages from phys_install.
+ * Those deletes, if any, will be represented in the cached mlist.
+ */
+ if (s_mp->sbm_del_mlist && s_mp->sbm_del_mlist != s_mp->sbm_mlist)
+ memlist_delete(s_mp->sbm_del_mlist);
+
+ if (s_mp->sbm_cm.sbdev_error && s_mp->sbm_mlist) {
+ memlist_delete(s_mp->sbm_mlist);
+ s_mp->sbm_mlist = NULL;
+ }
+
+ if (s_mp->sbm_dyn_segs != NULL && s_mp->sbm_cm.sbdev_error == 0) {
+ memlist_delete(s_mp->sbm_dyn_segs);
+ s_mp->sbm_dyn_segs = NULL;
+ }
+
+ s_mp->sbm_del_mlist = NULL;
+ s_mp->sbm_peer = NULL;
+ s_mp->sbm_flags = 0;
+ s_mp->sbm_cm.sbdev_busy = 0;
+ dr_init_mem_unit_data(s_mp);
+
+ PR_MEM("%s: cached memlist for %s:", f, s_mp->sbm_cm.sbdev_path);
+ PR_MEMLIST_DUMP(s_mp->sbm_mlist);
+
+ return (0);
+}
+
+/*
+ * Successful return from this function will have the memory
+ * handle in bp->b_dev[..mem-unit...].sbm_memhandle allocated
+ * and waiting. This routine's job is to select the memory that
+ * actually has to be released (detached) which may not necessarily
+ * be the same memory node that came in in devlist[],
+ * i.e. a copy-rename is needed.
+ */
+int
+dr_pre_release_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
+{
+ int d;
+ int err_flag = 0;
+ static fn_t f = "dr_pre_release_mem";
+
+ PR_MEM("%s...\n", f);
+
+ for (d = 0; d < devnum; d++) {
+ dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d];
+ int rv;
+ memquery_t mq;
+ struct memlist *ml;
+
+ if (mp->sbm_cm.sbdev_error) {
+ err_flag = 1;
+ continue;
+ } else if (!kcage_on) {
+ dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_KCAGE_OFF);
+ err_flag = 1;
+ continue;
+ }
+
+ if (mp->sbm_flags & DR_MFLAG_RESERVED) {
+ /*
+ * Board is currently involved in a delete
+ * memory operation. Can't detach this guy until
+ * that operation completes.
+ */
+ dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_INVAL);
+ err_flag = 1;
+ break;
+ }
+
+ /* flags should be clean at this time */
+ ASSERT(mp->sbm_flags == 0);
+
+ ASSERT(mp->sbm_mlist == NULL);
+ ASSERT(mp->sbm_del_mlist == NULL);
+ if (mp->sbm_mlist != NULL) {
+ memlist_delete(mp->sbm_mlist);
+ mp->sbm_mlist = NULL;
+ }
+
+ ml = dr_get_memlist(mp);
+ if (ml == NULL) {
+ err_flag = 1;
+ PR_MEM("%s: no memlist found for %s\n",
+ f, mp->sbm_cm.sbdev_path);
+ continue;
+ }
+
+ /*
+ * Check whether the detaching memory requires a
+ * copy-rename.
+ */
+ ASSERT(mp->sbm_npages != 0);
+ rv = dr_del_mlist_query(ml, &mq);
+ if (rv != KPHYSM_OK) {
+ memlist_delete(ml);
+ DR_DEV_INTERNAL_ERROR(&mp->sbm_cm);
+ err_flag = 1;
+ break;
+ }
+
+ if (mq.nonrelocatable != 0) {
+ if (!(dr_cmd_flags(hp) &
+ (SBD_FLAG_FORCE | SBD_FLAG_QUIESCE_OKAY))) {
+ memlist_delete(ml);
+ /* caller wasn't prompted for a suspend */
+ dr_dev_err(CE_WARN, &mp->sbm_cm,
+ ESBD_QUIESCE_REQD);
+ err_flag = 1;
+ break;
+ }
+ }
+
+ /* allocate a kphysm handle */
+ rv = kphysm_del_gethandle(&mp->sbm_memhandle);
+ if (rv != KPHYSM_OK) {
+ memlist_delete(ml);
+
+ DR_DEV_INTERNAL_ERROR(&mp->sbm_cm);
+ err_flag = 1;
+ break;
+ }
+ mp->sbm_flags |= DR_MFLAG_RELOWNER;
+
+ if ((mq.nonrelocatable != 0) ||
+ dr_reserve_mem_spans(&mp->sbm_memhandle, ml)) {
+ /*
+ * Either the detaching memory node contains
+ * non-reloc memory or we failed to reserve the
+ * detaching memory node (which did _not_ have
+ * any non-reloc memory, i.e. some non-reloc mem
+ * got onboard).
+ */
+
+ if (dr_select_mem_target(hp, mp, ml)) {
+ int rv;
+
+ /*
+ * We had no luck locating a target
+ * memory node to be the recipient of
+ * the non-reloc memory on the node
+ * we're trying to detach.
+ * Clean up be disposing the mem handle
+ * and the mem list.
+ */
+ rv = kphysm_del_release(mp->sbm_memhandle);
+ if (rv != KPHYSM_OK) {
+ /*
+ * can do nothing but complain
+ * and hope helpful for debug
+ */
+ cmn_err(CE_WARN, "%s: unexpected"
+ " kphysm_del_release return"
+ " value %d",
+ f, rv);
+ }
+ mp->sbm_flags &= ~DR_MFLAG_RELOWNER;
+
+ memlist_delete(ml);
+
+ /* make sure sbm_flags is clean */
+ ASSERT(mp->sbm_flags == 0);
+
+ dr_dev_err(CE_WARN,
+ &mp->sbm_cm, ESBD_NO_TARGET);
+
+ err_flag = 1;
+ break;
+ }
+
+ /*
+ * ml is not memlist_delete'd here because
+ * it has been assigned to mp->sbm_mlist
+ * by dr_select_mem_target.
+ */
+ } else {
+ /* no target needed to detach this board */
+ mp->sbm_flags |= DR_MFLAG_RESERVED;
+ mp->sbm_peer = NULL;
+ mp->sbm_del_mlist = ml;
+ mp->sbm_mlist = ml;
+ mp->sbm_cm.sbdev_busy = 1;
+ }
+#ifdef DEBUG
+ ASSERT(mp->sbm_mlist != NULL);
+
+ if (mp->sbm_flags & DR_MFLAG_SOURCE) {
+ PR_MEM("%s: release of %s requires copy/rename;"
+ " selected target board %s\n",
+ f,
+ mp->sbm_cm.sbdev_path,
+ mp->sbm_peer->sbm_cm.sbdev_path);
+ } else {
+ PR_MEM("%s: copy/rename not required to release %s\n",
+ f, mp->sbm_cm.sbdev_path);
+ }
+
+ ASSERT(mp->sbm_flags & DR_MFLAG_RELOWNER);
+ ASSERT(mp->sbm_flags & DR_MFLAG_RESERVED);
+#endif
+ }
+
+ return (err_flag ? -1 : 0);
+}
+
+void
+dr_release_mem_done(dr_common_unit_t *cp)
+{
+ dr_mem_unit_t *s_mp = (dr_mem_unit_t *)cp;
+ dr_mem_unit_t *t_mp, *mp;
+ int rv;
+ static fn_t f = "dr_release_mem_done";
+
+ /*
+ * This unit will be flagged with DR_MFLAG_SOURCE, if it
+ * has a target unit.
+ */
+ if (s_mp->sbm_flags & DR_MFLAG_SOURCE) {
+ t_mp = s_mp->sbm_peer;
+ ASSERT(t_mp != NULL);
+ ASSERT(t_mp->sbm_peer == s_mp);
+ ASSERT(t_mp->sbm_flags & DR_MFLAG_TARGET);
+ ASSERT(t_mp->sbm_flags & DR_MFLAG_RESERVED);
+ } else {
+ /* this is no target unit */
+ t_mp = NULL;
+ }
+
+ /* free delete handle */
+ ASSERT(s_mp->sbm_flags & DR_MFLAG_RELOWNER);
+ ASSERT(s_mp->sbm_flags & DR_MFLAG_RESERVED);
+ rv = kphysm_del_release(s_mp->sbm_memhandle);
+ if (rv != KPHYSM_OK) {
+ /*
+ * can do nothing but complain
+ * and hope helpful for debug
+ */
+ cmn_err(CE_WARN, "%s: unexpected kphysm_del_release"
+ " return value %d", f, rv);
+ }
+ s_mp->sbm_flags &= ~DR_MFLAG_RELOWNER;
+
+ /*
+ * If an error was encountered during release, clean up
+ * the source (and target, if present) unit data.
+ */
+/* XXX Can we know that sbdev_error was encountered during release? */
+ if (s_mp->sbm_cm.sbdev_error != NULL) {
+ PR_MEM("%s: %s: error %d noted\n",
+ f,
+ s_mp->sbm_cm.sbdev_path,
+ s_mp->sbm_cm.sbdev_error->e_code);
+
+ if (t_mp != NULL) {
+ ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist);
+ t_mp->sbm_del_mlist = NULL;
+
+ if (t_mp->sbm_mlist != NULL) {
+ memlist_delete(t_mp->sbm_mlist);
+ t_mp->sbm_mlist = NULL;
+ }
+
+ t_mp->sbm_peer = NULL;
+ t_mp->sbm_flags = 0;
+ t_mp->sbm_cm.sbdev_busy = 0;
+ }
+
+ if (s_mp->sbm_del_mlist != s_mp->sbm_mlist)
+ memlist_delete(s_mp->sbm_del_mlist);
+ s_mp->sbm_del_mlist = NULL;
+
+ if (s_mp->sbm_mlist != NULL) {
+ memlist_delete(s_mp->sbm_mlist);
+ s_mp->sbm_mlist = NULL;
+ }
+
+ s_mp->sbm_peer = NULL;
+ s_mp->sbm_flags = 0;
+ s_mp->sbm_cm.sbdev_busy = 0;
+
+ /* bail out */
+ return;
+ }
+
+ DR_DEV_SET_RELEASED(&s_mp->sbm_cm);
+ dr_device_transition(&s_mp->sbm_cm, DR_STATE_RELEASE);
+
+ if (t_mp != NULL) {
+ /*
+ * the kphysm delete operation that drained the source
+ * board also drained this target board. Since the source
+ * board drain is now known to have succeeded, we know this
+ * target board is drained too.
+ *
+ * because DR_DEV_SET_RELEASED and dr_device_transition
+ * is done here, the dr_release_dev_done should not
+ * fail.
+ */
+ DR_DEV_SET_RELEASED(&t_mp->sbm_cm);
+ dr_device_transition(&t_mp->sbm_cm, DR_STATE_RELEASE);
+
+ /*
+ * NOTE: do not transition target's board state,
+ * even if the mem-unit was the last configure
+ * unit of the board. When copy/rename completes
+ * this mem-unit will transitioned back to
+ * the configured state. In the meantime, the
+ * board's must remain as is.
+ */
+ }
+
+ /* if board(s) had deleted memory, verify it is gone */
+ rv = 0;
+ memlist_read_lock();
+ if (s_mp->sbm_del_mlist != NULL) {
+ mp = s_mp;
+ rv = memlist_intersect(phys_install, mp->sbm_del_mlist);
+ }
+ if (rv == 0 && t_mp && t_mp->sbm_del_mlist != NULL) {
+ mp = t_mp;
+ rv = memlist_intersect(phys_install, mp->sbm_del_mlist);
+ }
+ memlist_read_unlock();
+ if (rv) {
+ cmn_err(CE_WARN, "%s: %smem-unit (%d.%d): "
+ "deleted memory still found in phys_install",
+ f,
+ (mp == t_mp ? "target " : ""),
+ mp->sbm_cm.sbdev_bp->b_num,
+ mp->sbm_cm.sbdev_unum);
+
+ DR_DEV_INTERNAL_ERROR(&s_mp->sbm_cm);
+ return;
+ }
+
+ s_mp->sbm_flags |= DR_MFLAG_RELDONE;
+ if (t_mp != NULL)
+ t_mp->sbm_flags |= DR_MFLAG_RELDONE;
+
+ /* this should not fail */
+ if (dr_release_dev_done(&s_mp->sbm_cm) != 0) {
+ /* catch this in debug kernels */
+ ASSERT(0);
+ return;
+ }
+
+ PR_MEM("%s: marking %s release DONE\n",
+ f, s_mp->sbm_cm.sbdev_path);
+
+ s_mp->sbm_cm.sbdev_ostate = SBD_STAT_UNCONFIGURED;
+
+ if (t_mp != NULL) {
+ /* should not fail */
+ rv = dr_release_dev_done(&t_mp->sbm_cm);
+ if (rv != 0) {
+ /* catch this in debug kernels */
+ ASSERT(0);
+ return;
+ }
+
+ PR_MEM("%s: marking %s release DONE\n",
+ f, t_mp->sbm_cm.sbdev_path);
+
+ t_mp->sbm_cm.sbdev_ostate = SBD_STAT_UNCONFIGURED;
+ }
+}
+
+/*ARGSUSED*/
+int
+dr_disconnect_mem(dr_mem_unit_t *mp)
+{
+ static fn_t f = "dr_disconnect_mem";
+ update_membounds_t umb;
+
+#ifdef DEBUG
+ int state = mp->sbm_cm.sbdev_state;
+ ASSERT(state == DR_STATE_CONNECTED ||
+ state == DR_STATE_UNCONFIGURED);
+#endif
+
+ PR_MEM("%s...\n", f);
+
+ if (mp->sbm_del_mlist && mp->sbm_del_mlist != mp->sbm_mlist)
+ memlist_delete(mp->sbm_del_mlist);
+ mp->sbm_del_mlist = NULL;
+
+ if (mp->sbm_mlist) {
+ memlist_delete(mp->sbm_mlist);
+ mp->sbm_mlist = NULL;
+ }
+
+ /*
+ * Remove memory from lgroup
+ * For now, only board info is required.
+ */
+ umb.u_board = mp->sbm_cm.sbdev_bp->b_num;
+ umb.u_base = (uint64_t)-1;
+ umb.u_len = (uint64_t)-1;
+
+ lgrp_plat_config(LGRP_CONFIG_MEM_DEL, (uintptr_t)&umb);
+
+ return (0);
+}
+
+int
+dr_cancel_mem(dr_mem_unit_t *s_mp)
+{
+ dr_mem_unit_t *t_mp;
+ dr_state_t state;
+ static fn_t f = "dr_cancel_mem";
+
+ state = s_mp->sbm_cm.sbdev_state;
+
+ if (s_mp->sbm_flags & DR_MFLAG_TARGET) {
+ /* must cancel source board, not target board */
+ /* TODO: set error */
+ return (-1);
+ } else if (s_mp->sbm_flags & DR_MFLAG_SOURCE) {
+ t_mp = s_mp->sbm_peer;
+ ASSERT(t_mp != NULL);
+ ASSERT(t_mp->sbm_peer == s_mp);
+
+ /* must always match the source board's state */
+ /* TODO: is this assertion correct? */
+ ASSERT(t_mp->sbm_cm.sbdev_state == state);
+ } else {
+ /* this is no target unit */
+ t_mp = NULL;
+ }
+
+ switch (state) {
+ case DR_STATE_UNREFERENCED: /* state set by dr_release_dev_done */
+ ASSERT((s_mp->sbm_flags & DR_MFLAG_RELOWNER) == 0);
+
+ if (t_mp != NULL && t_mp->sbm_del_mlist != NULL) {
+ PR_MEM("%s: undoing target %s memory delete\n",
+ f, t_mp->sbm_cm.sbdev_path);
+ dr_add_memory_spans(t_mp, t_mp->sbm_del_mlist);
+
+ DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm);
+ }
+
+ if (s_mp->sbm_del_mlist != NULL) {
+ PR_MEM("%s: undoing %s memory delete\n",
+ f, s_mp->sbm_cm.sbdev_path);
+
+ dr_add_memory_spans(s_mp, s_mp->sbm_del_mlist);
+ }
+
+ /*FALLTHROUGH*/
+
+/* TODO: should no longer be possible to see the release state here */
+ case DR_STATE_RELEASE: /* state set by dr_release_mem_done */
+
+ ASSERT((s_mp->sbm_flags & DR_MFLAG_RELOWNER) == 0);
+
+ if (t_mp != NULL) {
+ ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist);
+ t_mp->sbm_del_mlist = NULL;
+
+ if (t_mp->sbm_mlist != NULL) {
+ memlist_delete(t_mp->sbm_mlist);
+ t_mp->sbm_mlist = NULL;
+ }
+
+ t_mp->sbm_peer = NULL;
+ t_mp->sbm_flags = 0;
+ t_mp->sbm_cm.sbdev_busy = 0;
+ dr_init_mem_unit_data(t_mp);
+
+ DR_DEV_CLR_RELEASED(&t_mp->sbm_cm);
+
+ dr_device_transition(
+ &t_mp->sbm_cm, DR_STATE_CONFIGURED);
+ }
+
+ if (s_mp->sbm_del_mlist != s_mp->sbm_mlist)
+ memlist_delete(s_mp->sbm_del_mlist);
+ s_mp->sbm_del_mlist = NULL;
+
+ if (s_mp->sbm_mlist != NULL) {
+ memlist_delete(s_mp->sbm_mlist);
+ s_mp->sbm_mlist = NULL;
+ }
+
+ s_mp->sbm_peer = NULL;
+ s_mp->sbm_flags = 0;
+ s_mp->sbm_cm.sbdev_busy = 0;
+ dr_init_mem_unit_data(s_mp);
+
+ return (0);
+
+ default:
+ PR_MEM("%s: WARNING unexpected state (%d) for %s\n",
+ f, (int)state, s_mp->sbm_cm.sbdev_path);
+
+ return (-1);
+ }
+ /*NOTREACHED*/
+}
+
+void
+dr_init_mem_unit(dr_mem_unit_t *mp)
+{
+ dr_state_t new_state;
+
+
+ if (DR_DEV_IS_ATTACHED(&mp->sbm_cm)) {
+ new_state = DR_STATE_CONFIGURED;
+ mp->sbm_cm.sbdev_cond = SBD_COND_OK;
+ } else if (DR_DEV_IS_PRESENT(&mp->sbm_cm)) {
+ new_state = DR_STATE_CONNECTED;
+ mp->sbm_cm.sbdev_cond = SBD_COND_OK;
+ } else if (mp->sbm_cm.sbdev_id != (drmachid_t)0) {
+ new_state = DR_STATE_OCCUPIED;
+ } else {
+ new_state = DR_STATE_EMPTY;
+ }
+
+ if (DR_DEV_IS_PRESENT(&mp->sbm_cm))
+ dr_init_mem_unit_data(mp);
+
+ /* delay transition until fully initialized */
+ dr_device_transition(&mp->sbm_cm, new_state);
+}
+
+static void
+dr_init_mem_unit_data(dr_mem_unit_t *mp)
+{
+ drmachid_t id = mp->sbm_cm.sbdev_id;
+ drmach_mem_info_t minfo;
+ sbd_error_t *err;
+ static fn_t f = "dr_init_mem_unit_data";
+ update_membounds_t umb;
+
+ PR_MEM("%s...\n", f);
+
+ /* a little sanity checking */
+ ASSERT(mp->sbm_peer == NULL);
+ ASSERT(mp->sbm_flags == 0);
+
+ if (err = drmach_mem_get_info(id, &minfo)) {
+ DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err);
+ return;
+ }
+ mp->sbm_basepfn = _b64top(minfo.mi_basepa);
+ mp->sbm_npages = _b64top(minfo.mi_size);
+ mp->sbm_alignment_mask = _b64top(minfo.mi_alignment_mask);
+ mp->sbm_slice_size = minfo.mi_slice_size;
+
+ /*
+ * Add memory to lgroup
+ */
+ umb.u_board = mp->sbm_cm.sbdev_bp->b_num;
+ umb.u_base = (uint64_t)mp->sbm_basepfn << MMU_PAGESHIFT;
+ umb.u_len = (uint64_t)mp->sbm_npages << MMU_PAGESHIFT;
+
+ lgrp_plat_config(LGRP_CONFIG_MEM_ADD, (uintptr_t)&umb);
+
+ PR_MEM("%s: %s (basepfn = 0x%lx, npgs = %ld)\n",
+ f, mp->sbm_cm.sbdev_path, mp->sbm_basepfn, mp->sbm_npages);
+}
+
+static int
+dr_reserve_mem_spans(memhandle_t *mhp, struct memlist *ml)
+{
+ int err;
+ pfn_t base;
+ pgcnt_t npgs;
+ struct memlist *mc;
+ static fn_t f = "dr_reserve_mem_spans";
+
+ PR_MEM("%s...\n", f);
+
+ /*
+ * Walk the supplied memlist scheduling each span for removal
+ * with kphysm_del_span. It is possible that a span may intersect
+ * an area occupied by the cage.
+ */
+ for (mc = ml; mc != NULL; mc = mc->next) {
+ base = _b64top(mc->address);
+ npgs = _b64top(mc->size);
+
+ err = kphysm_del_span(*mhp, base, npgs);
+ if (err != KPHYSM_OK) {
+ cmn_err(CE_WARN, "%s memory reserve failed."
+ " unexpected kphysm_del_span return value %d;"
+ " basepfn=0x%lx npages=%ld",
+ f, err, base, npgs);
+
+ return (-1);
+ }
+ }
+
+ return (0);
+}
+
+#define DR_SMT_NPREF_SETS 6
+#define DR_SMT_NUNITS_PER_SET MAX_BOARDS * MAX_MEM_UNITS_PER_BOARD
+
+/* debug counters */
+int dr_smt_realigned;
+int dr_smt_preference[DR_SMT_NPREF_SETS];
+
+#ifdef DEBUG
+uint_t dr_ignore_board; /* if bit[bnum-1] set, board won't be candidate */
+#endif
+
+/*
+ * Find and reserve a copy/rename target board suitable for the
+ * given source board.
+ * All boards in the system are examined and categorized in relation to
+ * their memory size versus the source board's memory size. Order of
+ * preference is:
+ * 1st copy all source, source/target same size
+ * 2nd copy all source, larger target
+ * 3rd copy nonrelocatable source span
+ */
+static int
+dr_select_mem_target(dr_handle_t *hp,
+ dr_mem_unit_t *s_mp, struct memlist *s_ml)
+{
+ dr_target_pref_t preference; /* lower value is higher preference */
+ int idx;
+ dr_mem_unit_t **sets;
+
+ int t_bd;
+ int t_unit;
+ int rv;
+ dr_board_t *s_bp, *t_bp;
+ dr_mem_unit_t *t_mp, *c_mp;
+ struct memlist *d_ml, *t_ml, *ml, *b_ml, *x_ml = NULL;
+ memquery_t s_mq = {0};
+ static fn_t f = "dr_select_mem_target";
+
+ PR_MEM("%s...\n", f);
+
+ ASSERT(s_ml != NULL);
+
+ sets = GETSTRUCT(dr_mem_unit_t *, DR_SMT_NUNITS_PER_SET *
+ DR_SMT_NPREF_SETS);
+
+ s_bp = hp->h_bd;
+ /* calculate the offset into the slice of the last source board pfn */
+ ASSERT(s_mp->sbm_npages != 0);
+
+ /*
+ * Find non-relocatable span on source board.
+ */
+ rv = kphysm_del_span_query(s_mp->sbm_basepfn, s_mp->sbm_npages, &s_mq);
+ if (rv != KPHYSM_OK) {
+ PR_MEM("%s: %s: unexpected kphysm_del_span_query"
+ " return value %d; basepfn 0x%lx, npages %ld\n",
+ f, s_mp->sbm_cm.sbdev_path, rv, s_mp->sbm_basepfn,
+ s_mp->sbm_npages);
+ return (-1);
+ }
+
+ ASSERT(s_mq.phys_pages != 0);
+ ASSERT(s_mq.nonrelocatable != 0);
+
+ PR_MEM("%s: %s: nonrelocatable span (0x%lx..0x%lx)\n", f,
+ s_mp->sbm_cm.sbdev_path, s_mq.first_nonrelocatable,
+ s_mq.last_nonrelocatable);
+
+ /* break down s_ml if it contains dynamic segments */
+ b_ml = memlist_dup(s_ml);
+
+ for (ml = s_mp->sbm_dyn_segs; ml; ml = ml->next) {
+ b_ml = memlist_del_span(b_ml, ml->address, ml->size);
+ b_ml = memlist_cat_span(b_ml, ml->address, ml->size);
+ }
+
+
+ /*
+ * Make one pass through all memory units on all boards
+ * and categorize them with respect to the source board.
+ */
+ for (t_bd = 0; t_bd < MAX_BOARDS; t_bd++) {
+ /*
+ * The board structs are a contiguous array
+ * so we take advantage of that to find the
+ * correct board struct pointer for a given
+ * board number.
+ */
+ t_bp = dr_lookup_board(t_bd);
+
+ /* source board can not be its own target */
+ if (s_bp->b_num == t_bp->b_num)
+ continue;
+
+ for (t_unit = 0; t_unit < MAX_MEM_UNITS_PER_BOARD; t_unit++) {
+
+ t_mp = dr_get_mem_unit(t_bp, t_unit);
+
+ /* this memory node must be attached */
+ if (!DR_DEV_IS_ATTACHED(&t_mp->sbm_cm))
+ continue;
+
+ /* source unit can not be its own target */
+ if (s_mp == t_mp) {
+ /* catch this is debug kernels */
+ ASSERT(0);
+ continue;
+ }
+
+ /*
+ * this memory node must not already be reserved
+ * by some other memory delete operation.
+ */
+ if (t_mp->sbm_flags & DR_MFLAG_RESERVED)
+ continue;
+
+ /* get target board memlist */
+ t_ml = dr_get_memlist(t_mp);
+ if (t_ml == NULL) {
+ cmn_err(CE_WARN, "%s: no memlist for"
+ " mem-unit %d, board %d", f,
+ t_mp->sbm_cm.sbdev_bp->b_num,
+ t_mp->sbm_cm.sbdev_unum);
+ continue;
+ }
+
+ preference = dr_get_target_preference(hp, t_mp, s_mp,
+ t_ml, s_ml, b_ml);
+
+ if (preference == DR_TP_INVALID)
+ continue;
+
+ dr_smt_preference[preference]++;
+
+ /* calculate index to start of preference set */
+ idx = DR_SMT_NUNITS_PER_SET * preference;
+ /* calculate offset to respective element */
+ idx += t_bd * MAX_MEM_UNITS_PER_BOARD + t_unit;
+
+ ASSERT(idx < DR_SMT_NUNITS_PER_SET * DR_SMT_NPREF_SETS);
+ sets[idx] = t_mp;
+ }
+ }
+
+ if (b_ml != NULL)
+ memlist_delete(b_ml);
+
+ /*
+ * NOTE: this would be a good place to sort each candidate
+ * set in to some desired order, e.g. memory size in ascending
+ * order. Without an additional sorting step here, the order
+ * within a set is ascending board number order.
+ */
+
+ c_mp = NULL;
+ x_ml = NULL;
+ t_ml = NULL;
+ for (idx = 0; idx < DR_SMT_NUNITS_PER_SET * DR_SMT_NPREF_SETS; idx++) {
+ memquery_t mq;
+
+ preference = (dr_target_pref_t)(idx / DR_SMT_NUNITS_PER_SET);
+
+ ASSERT(preference != DR_TP_INVALID);
+
+ /* cleanup t_ml after previous pass */
+ if (t_ml != NULL) {
+ memlist_delete(t_ml);
+ t_ml = NULL;
+ }
+
+ /* get candidate target board mem unit */
+ t_mp = sets[idx];
+ if (t_mp == NULL)
+ continue;
+
+ /* get target board memlist */
+ t_ml = dr_get_memlist(t_mp);
+ if (t_ml == NULL) {
+ cmn_err(CE_WARN, "%s: no memlist for"
+ " mem-unit %d, board %d",
+ f,
+ t_mp->sbm_cm.sbdev_bp->b_num,
+ t_mp->sbm_cm.sbdev_unum);
+
+ continue;
+ }
+
+ PR_MEM("%s: checking for no-reloc in %s, "
+ " basepfn=0x%lx, npages=%ld\n",
+ f,
+ t_mp->sbm_cm.sbdev_path,
+ t_mp->sbm_basepfn,
+ t_mp->sbm_npages);
+
+ rv = dr_del_mlist_query(t_ml, &mq);
+ if (rv != KPHYSM_OK) {
+ PR_MEM("%s: kphysm_del_span_query:"
+ " unexpected return value %d\n", f, rv);
+
+ continue;
+ }
+
+ if (mq.nonrelocatable != 0) {
+ PR_MEM("%s: candidate %s has"
+ " nonrelocatable span [0x%lx..0x%lx]\n",
+ f,
+ t_mp->sbm_cm.sbdev_path,
+ mq.first_nonrelocatable,
+ mq.last_nonrelocatable);
+
+ continue;
+ }
+
+#ifdef DEBUG
+ /*
+ * This is a debug tool for excluding certain boards
+ * from being selected as a target board candidate.
+ * dr_ignore_board is only tested by this driver.
+ * It must be set with adb, obp, /etc/system or your
+ * favorite debugger.
+ */
+ if (dr_ignore_board &
+ (1 << (t_mp->sbm_cm.sbdev_bp->b_num - 1))) {
+ PR_MEM("%s: dr_ignore_board flag set,"
+ " ignoring %s as candidate\n",
+ f, t_mp->sbm_cm.sbdev_path);
+ continue;
+ }
+#endif
+
+ /*
+ * Reserve excess source board memory, if any.
+ *
+ * Only the nonrelocatable source span will be copied
+ * so schedule the rest of the source mem to be deleted.
+ */
+ switch (preference) {
+ case DR_TP_NONRELOC:
+ /*
+ * Get source copy memlist and use it to construct
+ * delete memlist.
+ */
+ d_ml = memlist_dup(s_ml);
+ x_ml = dr_get_copy_mlist(s_ml, t_ml, s_mp, t_mp);
+
+ /* XXX */
+ ASSERT(d_ml != NULL);
+ ASSERT(x_ml != NULL);
+
+ for (ml = x_ml; ml != NULL; ml = ml->next) {
+ d_ml = memlist_del_span(d_ml, ml->address,
+ ml->size);
+ }
+
+ PR_MEM("%s: %s: reserving src brd memlist:\n", f,
+ s_mp->sbm_cm.sbdev_path);
+ PR_MEMLIST_DUMP(d_ml);
+
+ /* reserve excess spans */
+ if (dr_reserve_mem_spans(&s_mp->sbm_memhandle,
+ d_ml) != 0) {
+ /* likely more non-reloc pages appeared */
+ /* TODO: restart from top? */
+ continue;
+ }
+ break;
+ default:
+ d_ml = NULL;
+ break;
+ }
+
+ s_mp->sbm_flags |= DR_MFLAG_RESERVED;
+
+ /*
+ * reserve all memory on target board.
+ * NOTE: source board's memhandle is used.
+ *
+ * If this succeeds (eq 0), then target selection is
+ * complete and all unwanted memory spans, both source and
+ * target, have been reserved. Loop is terminated.
+ */
+ if (dr_reserve_mem_spans(&s_mp->sbm_memhandle, t_ml) == 0) {
+ PR_MEM("%s: %s: target board memory reserved\n",
+ f, t_mp->sbm_cm.sbdev_path);
+
+ /* a candidate target board is now reserved */
+ t_mp->sbm_flags |= DR_MFLAG_RESERVED;
+ c_mp = t_mp;
+
+ /* *** EXITING LOOP *** */
+ break;
+ }
+
+ /* did not successfully reserve the target board. */
+ PR_MEM("%s: could not reserve target %s\n",
+ f, t_mp->sbm_cm.sbdev_path);
+
+ /*
+ * NOTE: an undo of the dr_reserve_mem_span work
+ * will happen automatically when the memhandle
+ * (s_mp->sbm_memhandle) is kphysm_del_release'd.
+ */
+
+ s_mp->sbm_flags &= ~DR_MFLAG_RESERVED;
+ }
+
+ /* clean up after memlist editing logic */
+ if (x_ml != NULL)
+ memlist_delete(x_ml);
+
+ FREESTRUCT(sets, dr_mem_unit_t *, DR_SMT_NUNITS_PER_SET *
+ DR_SMT_NPREF_SETS);
+
+ /*
+ * c_mp will be NULL when the entire sets[] array
+ * has been searched without reserving a target board.
+ */
+ if (c_mp == NULL) {
+ PR_MEM("%s: %s: target selection failed.\n",
+ f, s_mp->sbm_cm.sbdev_path);
+
+ if (t_ml != NULL)
+ memlist_delete(t_ml);
+
+ return (-1);
+ }
+
+ PR_MEM("%s: found target %s for source %s\n",
+ f,
+ c_mp->sbm_cm.sbdev_path,
+ s_mp->sbm_cm.sbdev_path);
+
+ s_mp->sbm_peer = c_mp;
+ s_mp->sbm_flags |= DR_MFLAG_SOURCE;
+ s_mp->sbm_del_mlist = d_ml; /* spans to be deleted, if any */
+ s_mp->sbm_mlist = s_ml;
+ s_mp->sbm_cm.sbdev_busy = 1;
+
+ c_mp->sbm_peer = s_mp;
+ c_mp->sbm_flags |= DR_MFLAG_TARGET;
+ c_mp->sbm_del_mlist = t_ml; /* spans to be deleted */
+ c_mp->sbm_mlist = t_ml;
+ c_mp->sbm_cm.sbdev_busy = 1;
+
+ return (0);
+}
+
+/*
+ * Returns target preference rank:
+ * -1 not a valid copy-rename target board
+ * 0 copy all source, source/target same size
+ * 1 copy all source, larger target
+ * 2 copy nonrelocatable source span
+ */
+static dr_target_pref_t
+dr_get_target_preference(dr_handle_t *hp,
+ dr_mem_unit_t *t_mp, dr_mem_unit_t *s_mp,
+ struct memlist *t_ml, struct memlist *s_ml,
+ struct memlist *b_ml)
+{
+ dr_target_pref_t preference;
+ struct memlist *s_nonreloc_ml = NULL;
+ drmachid_t t_id;
+ static fn_t f = "dr_get_target_preference";
+
+ t_id = t_mp->sbm_cm.sbdev_bp->b_id;
+
+ /*
+ * Can the entire source board be copied?
+ */
+ if (dr_memlist_canfit(s_ml, t_ml, s_mp, t_mp)) {
+ if (s_mp->sbm_npages == t_mp->sbm_npages)
+ preference = DR_TP_SAME; /* same size */
+ else
+ preference = DR_TP_LARGE; /* larger target */
+ } else {
+ /*
+ * Entire source won't fit so try non-relocatable memory only
+ * (target aligned).
+ */
+ s_nonreloc_ml = dr_get_nonreloc_mlist(b_ml, s_mp);
+ if (s_nonreloc_ml == NULL) {
+ PR_MEM("%s: dr_get_nonreloc_mlist failed\n", f);
+ preference = DR_TP_INVALID;
+ }
+ if (dr_memlist_canfit(s_nonreloc_ml, t_ml, s_mp, t_mp))
+ preference = DR_TP_NONRELOC;
+ else
+ preference = DR_TP_INVALID;
+ }
+
+ if (s_nonreloc_ml != NULL)
+ memlist_delete(s_nonreloc_ml);
+
+ /*
+ * Force floating board preference lower than all other boards
+ * if the force flag is present; otherwise disallow the board.
+ */
+ if ((preference != DR_TP_INVALID) && drmach_board_is_floating(t_id)) {
+ if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
+ preference += DR_TP_FLOATING;
+ else
+ preference = DR_TP_INVALID;
+ }
+
+ PR_MEM("%s: %s preference=%d\n", f, t_mp->sbm_cm.sbdev_path,
+ preference);
+
+ return (preference);
+}
+
+/*
+ * Create a memlist representing the source memory that will be copied to
+ * the target board. The memory to be copied is the maximum amount that
+ * will fit on the target board.
+ */
+static struct memlist *
+dr_get_copy_mlist(struct memlist *s_mlist, struct memlist *t_mlist,
+ dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp)
+{
+ struct memlist *t_ml, *s_copy_ml, *s_del_ml, *ml, *x_ml;
+ uint64_t s_slice_mask, s_slice_base;
+ uint64_t t_slice_mask, t_slice_base;
+ static fn_t f = "dr_get_copy_mlist";
+
+ ASSERT(s_mlist != NULL);
+ ASSERT(t_mlist != NULL);
+ ASSERT(t_mp->sbm_slice_size == s_mp->sbm_slice_size);
+
+ s_slice_mask = s_mp->sbm_slice_size - 1;
+ s_slice_base = s_mlist->address & ~s_slice_mask;
+
+ t_slice_mask = t_mp->sbm_slice_size - 1;
+ t_slice_base = t_mlist->address & ~t_slice_mask;
+
+ t_ml = memlist_dup(t_mlist);
+ s_del_ml = memlist_dup(s_mlist);
+ s_copy_ml = memlist_dup(s_mlist);
+
+ /* XXX */
+ ASSERT(t_ml != NULL);
+ ASSERT(s_del_ml != NULL);
+ ASSERT(s_copy_ml != NULL);
+
+ /*
+ * To construct the source copy memlist:
+ *
+ * The target memlist is converted to the post-rename
+ * source addresses. This is the physical address range
+ * the target will have after the copy-rename. Overlaying
+ * and deleting this from the current source memlist will
+ * give the source delete memlist. The copy memlist is
+ * the reciprocal of the source delete memlist.
+ */
+ for (ml = t_ml; ml != NULL; ml = ml->next) {
+ /*
+ * Normalize relative to target slice base PA
+ * in order to preseve slice offsets.
+ */
+ ml->address -= t_slice_base;
+ /*
+ * Convert to source slice PA address.
+ */
+ ml->address += s_slice_base;
+ }
+
+ for (ml = t_ml; ml != NULL; ml = ml->next) {
+ s_del_ml = memlist_del_span(s_del_ml, ml->address, ml->size);
+ }
+
+ /*
+ * Expand the delete mlist to fully include any dynamic segments
+ * it intersects with.
+ */
+ for (x_ml = NULL, ml = s_del_ml; ml != NULL; ml = ml->next) {
+ uint64_t del_base = ml->address;
+ uint64_t del_end = ml->address + ml->size;
+ struct memlist *dyn;
+
+ for (dyn = s_mp->sbm_dyn_segs; dyn != NULL; dyn = dyn->next) {
+ uint64_t dyn_base = dyn->address;
+ uint64_t dyn_end = dyn->address + dyn->size;
+
+ if (del_base > dyn_base && del_base < dyn_end)
+ del_base = dyn_base;
+
+ if (del_end > dyn_base && del_end < dyn_end)
+ del_end = dyn_end;
+ }
+
+ x_ml = memlist_cat_span(x_ml, del_base, del_end - del_base);
+ }
+
+ memlist_delete(s_del_ml);
+ s_del_ml = x_ml;
+
+ for (ml = s_del_ml; ml != NULL; ml = ml->next) {
+ s_copy_ml = memlist_del_span(s_copy_ml, ml->address, ml->size);
+ }
+
+ PR_MEM("%s: source delete mlist\n", f);
+ PR_MEMLIST_DUMP(s_del_ml);
+
+ PR_MEM("%s: source copy mlist\n", f);
+ PR_MEMLIST_DUMP(s_copy_ml);
+
+ memlist_delete(t_ml);
+ memlist_delete(s_del_ml);
+
+ return (s_copy_ml);
+}
+
+/*
+ * Scan the non-relocatable spans on the source memory
+ * and construct a minimum mlist that includes all non-reloc
+ * memory subject to target alignment, and dynamic segment
+ * constraints where only whole dynamic segments may be deleted.
+ */
+static struct memlist *
+dr_get_nonreloc_mlist(struct memlist *s_ml, dr_mem_unit_t *s_mp)
+{
+ struct memlist *x_ml = NULL;
+ struct memlist *ml;
+ static fn_t f = "dr_get_nonreloc_mlist";
+
+ PR_MEM("%s: checking for split of dyn seg list:\n", f);
+ PR_MEMLIST_DUMP(s_mp->sbm_dyn_segs);
+
+ for (ml = s_ml; ml; ml = ml->next) {
+ int rv;
+ uint64_t nr_base, nr_end;
+ memquery_t mq;
+ struct memlist *dyn;
+
+ rv = kphysm_del_span_query(
+ _b64top(ml->address), _b64top(ml->size), &mq);
+ if (rv) {
+ memlist_delete(x_ml);
+ return (NULL);
+ }
+
+ if (mq.nonrelocatable == 0)
+ continue;
+
+ PR_MEM("%s: non-reloc span: 0x%lx, 0x%lx (%lx, %lx)\n", f,
+ _ptob64(mq.first_nonrelocatable),
+ _ptob64(mq.last_nonrelocatable),
+ mq.first_nonrelocatable,
+ mq.last_nonrelocatable);
+
+ /*
+ * Align the span at both ends to allow for possible
+ * cage expansion.
+ */
+ nr_base = _ptob64(mq.first_nonrelocatable);
+ nr_end = _ptob64(mq.last_nonrelocatable + 1);
+
+ PR_MEM("%s: adjusted non-reloc span: 0x%lx, 0x%lx\n",
+ f, nr_base, nr_end);
+
+ /*
+ * Expand the non-reloc span to fully include any
+ * dynamic segments it intersects with.
+ */
+ for (dyn = s_mp->sbm_dyn_segs; dyn != NULL; dyn = dyn->next) {
+ uint64_t dyn_base = dyn->address;
+ uint64_t dyn_end = dyn->address + dyn->size;
+
+ if (nr_base > dyn_base && nr_base < dyn_end)
+ nr_base = dyn_base;
+
+ if (nr_end > dyn_base && nr_end < dyn_end)
+ nr_end = dyn_end;
+ }
+
+ x_ml = memlist_cat_span(x_ml, nr_base, nr_end - nr_base);
+ }
+
+ if (x_ml == NULL) {
+ PR_MEM("%s: source didn't have any non-reloc pages!\n", f);
+ return (NULL);
+ }
+
+ PR_MEM("%s: %s: edited source memlist:\n", f, s_mp->sbm_cm.sbdev_path);
+ PR_MEMLIST_DUMP(x_ml);
+
+ return (x_ml);
+}
+
+/*
+ * Check if source memlist can fit in target memlist while maintaining
+ * relative offsets within board.
+ */
+static int
+dr_memlist_canfit(struct memlist *s_mlist, struct memlist *t_mlist,
+ dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp)
+{
+ int canfit = 0;
+ struct memlist *s_ml, *t_ml, *ml;
+ uint64_t s_slice_mask, t_slice_mask;
+ static fn_t f = "dr_mlist_canfit";
+
+ s_ml = memlist_dup(s_mlist);
+ t_ml = memlist_dup(t_mlist);
+
+ if (s_ml == NULL || t_ml == NULL) {
+ cmn_err(CE_WARN, "%s: memlist_dup failed\n", f);
+ goto done;
+ }
+
+ s_slice_mask = s_mp->sbm_slice_size - 1;
+ t_slice_mask = t_mp->sbm_slice_size - 1;
+
+ /*
+ * Normalize to slice relative offsets.
+ */
+ for (ml = s_ml; ml; ml = ml->next)
+ ml->address &= s_slice_mask;
+
+ for (ml = t_ml; ml; ml = ml->next)
+ ml->address &= t_slice_mask;
+
+ canfit = memlist_canfit(s_ml, t_ml);
+done:
+ memlist_delete(s_ml);
+ memlist_delete(t_ml);
+
+ return (canfit);
+}
+
+/*
+ * Memlist support.
+ */
+
+/*
+ * Determine whether the source memlist (s_mlist) will
+ * fit into the target memlist (t_mlist) in terms of
+ * size and holes. Assumes the caller has normalized the
+ * memlist physical addresses for comparison.
+ */
+static int
+memlist_canfit(struct memlist *s_mlist, struct memlist *t_mlist)
+{
+ int rv = 0;
+ struct memlist *s_ml, *t_ml;
+
+ if ((s_mlist == NULL) || (t_mlist == NULL))
+ return (0);
+
+ s_ml = s_mlist;
+ for (t_ml = t_mlist; t_ml && s_ml; t_ml = t_ml->next) {
+ uint64_t s_start, s_end;
+ uint64_t t_start, t_end;
+
+ t_start = t_ml->address;
+ t_end = t_start + t_ml->size;
+
+ for (; s_ml; s_ml = s_ml->next) {
+ s_start = s_ml->address;
+ s_end = s_start + s_ml->size;
+
+ if ((s_start < t_start) || (s_end > t_end))
+ break;
+ }
+ }
+
+ /*
+ * If we ran out of source memlist chunks that mean
+ * we found a home for all of them.
+ */
+ if (s_ml == NULL)
+ rv = 1;
+
+ return (rv);
+}
diff --git a/usr/src/uts/sun4u/opl/io/drmach.c b/usr/src/uts/sun4u/opl/io/drmach.c
new file mode 100644
index 0000000000..3b6f520447
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/drmach.c
@@ -0,0 +1,3929 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/debug.h>
+#include <sys/types.h>
+#include <sys/varargs.h>
+#include <sys/errno.h>
+#include <sys/cred.h>
+#include <sys/dditypes.h>
+#include <sys/devops.h>
+#include <sys/modctl.h>
+#include <sys/poll.h>
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/sunndi.h>
+#include <sys/ndi_impldefs.h>
+#include <sys/stat.h>
+#include <sys/kmem.h>
+#include <sys/vmem.h>
+#include <sys/opl_olympus_regs.h>
+#include <sys/cpuvar.h>
+#include <sys/cpupart.h>
+#include <sys/mem_config.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/systm.h>
+#include <sys/machsystm.h>
+#include <sys/autoconf.h>
+#include <sys/cmn_err.h>
+#include <sys/sysmacros.h>
+#include <sys/x_call.h>
+#include <sys/promif.h>
+#include <sys/prom_plat.h>
+#include <sys/membar.h>
+#include <vm/seg_kmem.h>
+#include <sys/mem_cage.h>
+#include <sys/stack.h>
+#include <sys/archsystm.h>
+#include <vm/hat_sfmmu.h>
+#include <sys/pte.h>
+#include <sys/mmu.h>
+#include <sys/cpu_module.h>
+#include <sys/obpdefs.h>
+#include <sys/note.h>
+#include <sys/ontrap.h>
+#include <sys/cpu_sgnblk_defs.h>
+#include <sys/opl.h>
+
+
+#include <sys/promimpl.h>
+#include <sys/prom_plat.h>
+#include <sys/kobj.h>
+
+#include <sys/sysevent.h>
+#include <sys/sysevent/dr.h>
+#include <sys/sysevent/eventdefs.h>
+
+#include <sys/drmach.h>
+#include <sys/dr_util.h>
+
+#include <sys/fcode.h>
+#include <sys/opl_cfg.h>
+
+extern void bcopy32_il(uint64_t, uint64_t);
+extern void flush_cache_il(void);
+extern void drmach_sleep_il(void);
+
+typedef struct {
+ struct drmach_node *node;
+ void *data;
+} drmach_node_walk_args_t;
+
+typedef struct drmach_node {
+ void *here;
+
+ pnode_t (*get_dnode)(struct drmach_node *node);
+ int (*walk)(struct drmach_node *node, void *data,
+ int (*cb)(drmach_node_walk_args_t *args));
+ dev_info_t *(*n_getdip)(struct drmach_node *node);
+ int (*n_getproplen)(struct drmach_node *node, char *name,
+ int *len);
+ int (*n_getprop)(struct drmach_node *node, char *name,
+ void *buf, int len);
+ int (*get_parent)(struct drmach_node *node,
+ struct drmach_node *pnode);
+} drmach_node_t;
+
+typedef struct {
+ int min_index;
+ int max_index;
+ int arr_sz;
+ drmachid_t *arr;
+} drmach_array_t;
+
+typedef struct {
+ void *isa;
+
+ void (*dispose)(drmachid_t);
+ sbd_error_t *(*release)(drmachid_t);
+ sbd_error_t *(*status)(drmachid_t, drmach_status_t *);
+
+ char name[MAXNAMELEN];
+} drmach_common_t;
+
+typedef struct {
+ uint32_t core_present;
+ uint32_t core_hotadded;
+ uint32_t core_started;
+} drmach_cmp_t;
+
+typedef struct {
+ drmach_common_t cm;
+ int bnum;
+ int assigned;
+ int powered;
+ int connected;
+ int cond;
+ drmach_node_t *tree;
+ drmach_array_t *devices;
+ int boot_board; /* if board exists on bootup */
+ drmach_cmp_t cores[OPL_MAX_COREID_PER_BOARD];
+} drmach_board_t;
+
+typedef struct {
+ drmach_common_t cm;
+ drmach_board_t *bp;
+ int unum;
+ int portid;
+ int busy;
+ int powered;
+ const char *type;
+ drmach_node_t *node;
+} drmach_device_t;
+
+typedef struct drmach_cpu {
+ drmach_device_t dev;
+ processorid_t cpuid;
+ int sb;
+ int chipid;
+ int coreid;
+ int strandid;
+ int status;
+#define OPL_CPU_HOTADDED 1
+} drmach_cpu_t;
+
+typedef struct drmach_mem {
+ drmach_device_t dev;
+ uint64_t slice_base;
+ uint64_t slice_size;
+ uint64_t base_pa; /* lowest installed memory base */
+ uint64_t nbytes; /* size of installed memory */
+ struct memlist *memlist;
+} drmach_mem_t;
+
+typedef struct drmach_io {
+ drmach_device_t dev;
+ int channel;
+ int leaf;
+} drmach_io_t;
+
+typedef struct drmach_domain_info {
+ uint32_t floating;
+ int allow_dr;
+} drmach_domain_info_t;
+
+drmach_domain_info_t drmach_domain;
+
+typedef struct {
+ int flags;
+ drmach_device_t *dp;
+ sbd_error_t *err;
+ dev_info_t *dip;
+} drmach_config_args_t;
+
+typedef struct {
+ drmach_board_t *obj;
+ int ndevs;
+ void *a;
+ sbd_error_t *(*found)(void *a, const char *, int, drmachid_t);
+ sbd_error_t *err;
+} drmach_board_cb_data_t;
+
+static drmach_array_t *drmach_boards;
+
+static sbd_error_t *drmach_device_new(drmach_node_t *,
+ drmach_board_t *, int, drmachid_t *);
+static sbd_error_t *drmach_cpu_new(drmach_device_t *, drmachid_t *);
+static sbd_error_t *drmach_mem_new(drmach_device_t *, drmachid_t *);
+static sbd_error_t *drmach_io_new(drmach_device_t *, drmachid_t *);
+
+static dev_info_t *drmach_node_ddi_get_dip(drmach_node_t *np);
+static int drmach_node_ddi_get_prop(drmach_node_t *np,
+ char *name, void *buf, int len);
+static int drmach_node_ddi_get_proplen(drmach_node_t *np,
+ char *name, int *len);
+
+static int drmach_get_portid(drmach_node_t *);
+static sbd_error_t *drmach_i_status(drmachid_t, drmach_status_t *);
+static int opl_check_dr_status();
+static void drmach_io_dispose(drmachid_t);
+static sbd_error_t *drmach_io_release(drmachid_t);
+static sbd_error_t *drmach_io_status(drmachid_t, drmach_status_t *);
+static int drmach_init(void);
+static void drmach_fini(void);
+static void drmach_swap_pa(drmach_mem_t *, drmach_mem_t *);
+static drmach_board_t *drmach_get_board_by_bnum(int);
+
+/* options for the second argument in drmach_add_remove_cpu() */
+#define HOTADD_CPU 1
+#define HOTREMOVE_CPU 2
+
+#define ON_BOARD_CORE_NUM(x) (((uint_t)(x) / OPL_MAX_STRANDID_PER_CORE) & \
+ (OPL_MAX_COREID_PER_BOARD - 1))
+
+extern struct cpu *SIGBCPU;
+
+static int drmach_name2type_idx(char *);
+static drmach_board_t *drmach_board_new(int, int);
+
+#ifdef DEBUG
+
+#define DRMACH_PR if (drmach_debug) printf
+int drmach_debug = 1; /* set to non-zero to enable debug messages */
+#else
+
+#define DRMACH_PR _NOTE(CONSTANTCONDITION) if (0) printf
+#endif /* DEBUG */
+
+
+#define DRMACH_OBJ(id) ((drmach_common_t *)id)
+
+#define DRMACH_IS_BOARD_ID(id) \
+ ((id != 0) && \
+ (DRMACH_OBJ(id)->isa == (void *)drmach_board_new))
+
+#define DRMACH_IS_CPU_ID(id) \
+ ((id != 0) && \
+ (DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new))
+
+#define DRMACH_IS_MEM_ID(id) \
+ ((id != 0) && \
+ (DRMACH_OBJ(id)->isa == (void *)drmach_mem_new))
+
+#define DRMACH_IS_IO_ID(id) \
+ ((id != 0) && \
+ (DRMACH_OBJ(id)->isa == (void *)drmach_io_new))
+
+#define DRMACH_IS_DEVICE_ID(id) \
+ ((id != 0) && \
+ (DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new || \
+ DRMACH_OBJ(id)->isa == (void *)drmach_mem_new || \
+ DRMACH_OBJ(id)->isa == (void *)drmach_io_new))
+
+#define DRMACH_IS_ID(id) \
+ ((id != 0) && \
+ (DRMACH_OBJ(id)->isa == (void *)drmach_board_new || \
+ DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new || \
+ DRMACH_OBJ(id)->isa == (void *)drmach_mem_new || \
+ DRMACH_OBJ(id)->isa == (void *)drmach_io_new))
+
+#define DRMACH_INTERNAL_ERROR() \
+ drerr_new(1, EOPL_INTERNAL, drmach_ie_fmt, __LINE__)
+
+static char *drmach_ie_fmt = "drmach.c %d";
+
+static struct {
+ const char *name;
+ const char *type;
+ sbd_error_t *(*new)(drmach_device_t *, drmachid_t *);
+} drmach_name2type[] = {
+ { "cpu", DRMACH_DEVTYPE_CPU, drmach_cpu_new },
+ { "pseudo-mc", DRMACH_DEVTYPE_MEM, drmach_mem_new },
+ { "pci", DRMACH_DEVTYPE_PCI, drmach_io_new },
+};
+
+/* utility */
+#define MBYTE (1048576ull)
+
+/*
+ * drmach autoconfiguration data structures and interfaces
+ */
+
+extern struct mod_ops mod_miscops;
+
+static struct modlmisc modlmisc = {
+ &mod_miscops,
+ "OPL DR 1.1"
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1,
+ (void *)&modlmisc,
+ NULL
+};
+
+static krwlock_t drmach_boards_rwlock;
+
+typedef const char *fn_t;
+
+int
+_init(void)
+{
+ int err;
+
+ if ((err = drmach_init()) != 0) {
+ return (err);
+ }
+
+ if ((err = mod_install(&modlinkage)) != 0) {
+ drmach_fini();
+ }
+
+ return (err);
+}
+
+int
+_fini(void)
+{
+ int err;
+
+ if ((err = mod_remove(&modlinkage)) == 0)
+ drmach_fini();
+
+ return (err);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+/*
+ * The following routines are used to set up the memory
+ * properties in the board structure.
+ */
+
+struct drmach_mc_lookup {
+ int bnum;
+ drmach_board_t *bp;
+ dev_info_t *dip; /* rv - set if found */
+};
+
+#define _ptob64(p) ((uint64_t)(p) << PAGESHIFT)
+#define _b64top(b) ((pgcnt_t)((b) >> PAGESHIFT))
+
+static int
+drmach_setup_mc_info(dev_info_t *dip, drmach_mem_t *mp)
+{
+ uint64_t memory_ranges[128];
+ int len;
+ struct memlist *ml;
+ int rv;
+ hwd_sb_t *hwd;
+ hwd_memory_t *pm;
+
+ len = sizeof (memory_ranges);
+ if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "sb-mem-ranges",
+ (caddr_t)&memory_ranges[0], &len) != DDI_PROP_SUCCESS) {
+ mp->slice_base = 0;
+ mp->slice_size = 0;
+ return (-1);
+ }
+ mp->slice_base = memory_ranges[0];
+ mp->slice_size = memory_ranges[1];
+
+ if (!mp->dev.bp->boot_board) {
+ int i;
+
+ rv = opl_read_hwd(mp->dev.bp->bnum, NULL, NULL, NULL, &hwd);
+
+ if (rv != 0) {
+ return (-1);
+ }
+
+ ml = NULL;
+ pm = &hwd->sb_cmu.cmu_memory;
+ for (i = 0; i < HWD_MAX_MEM_CHUNKS; i++) {
+ if (pm->mem_chunks[i].chnk_size > 0) {
+ ml = memlist_add_span(ml,
+ pm->mem_chunks[i].chnk_start_address,
+ pm->mem_chunks[i].chnk_size);
+ }
+ }
+ } else {
+ /*
+ * we intersect phys_install to get base_pa.
+ * This only works at bootup time.
+ */
+
+ memlist_read_lock();
+ ml = memlist_dup(phys_install);
+ memlist_read_unlock();
+
+ ml = memlist_del_span(ml, 0ull, mp->slice_base);
+ if (ml) {
+ uint64_t basepa, endpa;
+ endpa = _ptob64(physmax + 1);
+
+ basepa = mp->slice_base + mp->slice_size;
+
+ ml = memlist_del_span(ml, basepa, endpa - basepa);
+ }
+ }
+
+ if (ml) {
+ uint64_t nbytes = 0;
+ struct memlist *p;
+ for (p = ml; p; p = p->next) {
+ nbytes += p->size;
+ }
+ if ((mp->nbytes = nbytes) > 0)
+ mp->base_pa = ml->address;
+ else
+ mp->base_pa = 0;
+ mp->memlist = ml;
+ } else {
+ mp->base_pa = 0;
+ mp->nbytes = 0;
+ }
+ return (0);
+}
+
+
+struct drmach_hotcpu {
+ drmach_board_t *bp;
+ int bnum;
+ int core_id;
+ int rv;
+ int option;
+};
+
+static int
+drmach_cpu_cb(dev_info_t *dip, void *arg)
+{
+ struct drmach_hotcpu *p = (struct drmach_hotcpu *)arg;
+ char name[OBP_MAXDRVNAME];
+ int len = OBP_MAXDRVNAME;
+ int bnum, core_id, strand_id;
+ drmach_board_t *bp;
+
+ if (dip == ddi_root_node()) {
+ return (DDI_WALK_CONTINUE);
+ }
+
+ if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "name",
+ (caddr_t)name, &len) != DDI_PROP_SUCCESS) {
+ return (DDI_WALK_PRUNECHILD);
+ }
+
+ /* only cmp has board number */
+ bnum = -1;
+ len = sizeof (bnum);
+ if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, OBP_BOARDNUM,
+ (caddr_t)&bnum, &len) != DDI_PROP_SUCCESS) {
+ bnum = -1;
+ }
+
+ if (strcmp(name, "cmp") == 0) {
+ if (bnum != p->bnum)
+ return (DDI_WALK_PRUNECHILD);
+ return (DDI_WALK_CONTINUE);
+ }
+ /* we have already pruned all unwanted cores and cpu's above */
+ if (strcmp(name, "core") == 0) {
+ return (DDI_WALK_CONTINUE);
+ }
+ if (strcmp(name, "cpu") == 0) {
+ processorid_t cpuid;
+ len = sizeof (cpuid);
+ if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "cpuid",
+ (caddr_t)&cpuid, &len) != DDI_PROP_SUCCESS) {
+ p->rv = -1;
+ return (DDI_WALK_TERMINATE);
+ }
+
+ core_id = p->core_id;
+
+ bnum = LSB_ID(cpuid);
+
+ if (ON_BOARD_CORE_NUM(cpuid) != core_id)
+ return (DDI_WALK_CONTINUE);
+
+ bp = p->bp;
+ ASSERT(bnum == bp->bnum);
+
+ if (p->option == HOTADD_CPU) {
+ if (prom_hotaddcpu(cpuid) != 0) {
+ p->rv = -1;
+ return (DDI_WALK_TERMINATE);
+ }
+ strand_id = STRAND_ID(cpuid);
+ bp->cores[core_id].core_hotadded |= (1 << strand_id);
+ } else if (p->option == HOTREMOVE_CPU) {
+ if (prom_hotremovecpu(cpuid) != 0) {
+ p->rv = -1;
+ return (DDI_WALK_TERMINATE);
+ }
+ strand_id = STRAND_ID(cpuid);
+ bp->cores[core_id].core_hotadded &= ~(1 << strand_id);
+ }
+ return (DDI_WALK_CONTINUE);
+ }
+
+ return (DDI_WALK_PRUNECHILD);
+}
+
+
+static int
+drmach_add_remove_cpu(int bnum, int core_id, int option)
+{
+ struct drmach_hotcpu arg;
+ drmach_board_t *bp;
+
+ bp = drmach_get_board_by_bnum(bnum);
+ ASSERT(bp);
+
+ arg.bp = bp;
+ arg.bnum = bnum;
+ arg.core_id = core_id;
+ arg.rv = 0;
+ arg.option = option;
+ ddi_walk_devs(ddi_root_node(), drmach_cpu_cb, (void *)&arg);
+ return (arg.rv);
+}
+
+struct drmach_setup_core_arg {
+ drmach_board_t *bp;
+};
+
+static int
+drmach_setup_core_cb(dev_info_t *dip, void *arg)
+{
+ struct drmach_setup_core_arg *p = (struct drmach_setup_core_arg *)arg;
+ char name[OBP_MAXDRVNAME];
+ int len = OBP_MAXDRVNAME;
+ int bnum;
+ int core_id, strand_id;
+
+ if (dip == ddi_root_node()) {
+ return (DDI_WALK_CONTINUE);
+ }
+
+ if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "name",
+ (caddr_t)name, &len) != DDI_PROP_SUCCESS) {
+ return (DDI_WALK_PRUNECHILD);
+ }
+
+ /* only cmp has board number */
+ bnum = -1;
+ len = sizeof (bnum);
+ if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, OBP_BOARDNUM,
+ (caddr_t)&bnum, &len) != DDI_PROP_SUCCESS) {
+ bnum = -1;
+ }
+
+ if (strcmp(name, "cmp") == 0) {
+ if (bnum != p->bp->bnum)
+ return (DDI_WALK_PRUNECHILD);
+ return (DDI_WALK_CONTINUE);
+ }
+ /* we have already pruned all unwanted cores and cpu's above */
+ if (strcmp(name, "core") == 0) {
+ return (DDI_WALK_CONTINUE);
+ }
+ if (strcmp(name, "cpu") == 0) {
+ processorid_t cpuid;
+ len = sizeof (cpuid);
+ if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "cpuid",
+ (caddr_t)&cpuid, &len) != DDI_PROP_SUCCESS) {
+ return (DDI_WALK_TERMINATE);
+ }
+ bnum = LSB_ID(cpuid);
+ ASSERT(bnum == p->bp->bnum);
+ core_id = ON_BOARD_CORE_NUM(cpuid);
+ strand_id = STRAND_ID(cpuid);
+ p->bp->cores[core_id].core_present |= (1 << strand_id);
+ return (DDI_WALK_CONTINUE);
+ }
+
+ return (DDI_WALK_PRUNECHILD);
+}
+
+
+static void
+drmach_setup_core_info(drmach_board_t *obj)
+{
+ struct drmach_setup_core_arg arg;
+ int i;
+
+ for (i = 0; i < OPL_MAX_COREID_PER_BOARD; i++) {
+ obj->cores[i].core_present = 0;
+ obj->cores[i].core_hotadded = 0;
+ obj->cores[i].core_started = 0;
+ }
+ arg.bp = obj;
+ ddi_walk_devs(ddi_root_node(), drmach_setup_core_cb, (void *)&arg);
+
+ for (i = 0; i < OPL_MAX_COREID_PER_BOARD; i++) {
+ if (obj->boot_board) {
+ obj->cores[i].core_hotadded =
+ obj->cores[i].core_started =
+ obj->cores[i].core_present;
+ }
+ }
+}
+
+/*
+ * drmach_node_* routines serve the purpose of separating the
+ * rest of the code from the device tree and OBP. This is necessary
+ * because of In-Kernel-Probing. Devices probed after stod, are probed
+ * by the in-kernel-prober, not OBP. These devices, therefore, do not
+ * have dnode ids.
+ */
+
+typedef struct {
+ drmach_node_walk_args_t *nwargs;
+ int (*cb)(drmach_node_walk_args_t *args);
+ int err;
+} drmach_node_ddi_walk_args_t;
+
+static int
+drmach_node_ddi_walk_cb(dev_info_t *dip, void *arg)
+{
+ drmach_node_ddi_walk_args_t *nargs;
+
+ nargs = (drmach_node_ddi_walk_args_t *)arg;
+
+ /*
+ * dip doesn't have to be held here as we are called
+ * from ddi_walk_devs() which holds the dip.
+ */
+ nargs->nwargs->node->here = (void *)dip;
+
+ nargs->err = nargs->cb(nargs->nwargs);
+
+
+ /*
+ * Set "here" to NULL so that unheld dip is not accessible
+ * outside ddi_walk_devs()
+ */
+ nargs->nwargs->node->here = NULL;
+
+ if (nargs->err)
+ return (DDI_WALK_TERMINATE);
+ else
+ return (DDI_WALK_CONTINUE);
+}
+
+static int
+drmach_node_ddi_walk(drmach_node_t *np, void *data,
+ int (*cb)(drmach_node_walk_args_t *args))
+{
+ drmach_node_walk_args_t args;
+ drmach_node_ddi_walk_args_t nargs;
+
+
+ /* initialized args structure for callback */
+ args.node = np;
+ args.data = data;
+
+ nargs.nwargs = &args;
+ nargs.cb = cb;
+ nargs.err = 0;
+
+ /*
+ * Root node doesn't have to be held in any way.
+ */
+ ddi_walk_devs(ddi_root_node(), drmach_node_ddi_walk_cb,
+ (void *)&nargs);
+
+ return (nargs.err);
+}
+
+static int
+drmach_node_ddi_get_parent(drmach_node_t *np, drmach_node_t *pp)
+{
+ dev_info_t *ndip;
+ static char *fn = "drmach_node_ddi_get_parent";
+
+ ndip = np->n_getdip(np);
+ if (ndip == NULL) {
+ cmn_err(CE_WARN, "%s: NULL dip", fn);
+ return (-1);
+ }
+
+ bcopy(np, pp, sizeof (drmach_node_t));
+
+ pp->here = (void *)ddi_get_parent(ndip);
+ if (pp->here == NULL) {
+ cmn_err(CE_WARN, "%s: NULL parent dip", fn);
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*ARGSUSED*/
+static pnode_t
+drmach_node_ddi_get_dnode(drmach_node_t *np)
+{
+ return ((pnode_t)NULL);
+}
+
+static drmach_node_t *
+drmach_node_new(void)
+{
+ drmach_node_t *np;
+
+ np = kmem_zalloc(sizeof (drmach_node_t), KM_SLEEP);
+
+ np->get_dnode = drmach_node_ddi_get_dnode;
+ np->walk = drmach_node_ddi_walk;
+ np->n_getdip = drmach_node_ddi_get_dip;
+ np->n_getproplen = drmach_node_ddi_get_proplen;
+ np->n_getprop = drmach_node_ddi_get_prop;
+ np->get_parent = drmach_node_ddi_get_parent;
+
+ return (np);
+}
+
+static void
+drmach_node_dispose(drmach_node_t *np)
+{
+ kmem_free(np, sizeof (*np));
+}
+
+static dev_info_t *
+drmach_node_ddi_get_dip(drmach_node_t *np)
+{
+ return ((dev_info_t *)np->here);
+}
+
+static int
+drmach_node_walk(drmach_node_t *np, void *param,
+ int (*cb)(drmach_node_walk_args_t *args))
+{
+ return (np->walk(np, param, cb));
+}
+
+static int
+drmach_node_ddi_get_prop(drmach_node_t *np, char *name, void *buf, int len)
+{
+ int rv = 0;
+ dev_info_t *ndip;
+ static char *fn = "drmach_node_ddi_get_prop";
+
+
+ ndip = np->n_getdip(np);
+ if (ndip == NULL) {
+ cmn_err(CE_WARN, "%s: NULL dip", fn);
+ rv = -1;
+ } else if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ndip,
+ DDI_PROP_DONTPASS, name,
+ (caddr_t)buf, &len) != DDI_PROP_SUCCESS) {
+ rv = -1;
+ }
+
+ return (rv);
+}
+
+static int
+drmach_node_ddi_get_proplen(drmach_node_t *np, char *name, int *len)
+{
+ int rv = 0;
+ dev_info_t *ndip;
+
+ ndip = np->n_getdip(np);
+ if (ndip == NULL) {
+ rv = -1;
+ } else if (ddi_getproplen(DDI_DEV_T_ANY, ndip, DDI_PROP_DONTPASS,
+ name, len) != DDI_PROP_SUCCESS) {
+ rv = -1;
+ }
+
+ return (rv);
+}
+
+static drmachid_t
+drmach_node_dup(drmach_node_t *np)
+{
+ drmach_node_t *dup;
+
+ dup = drmach_node_new();
+ dup->here = np->here;
+ dup->get_dnode = np->get_dnode;
+ dup->walk = np->walk;
+ dup->n_getdip = np->n_getdip;
+ dup->n_getproplen = np->n_getproplen;
+ dup->n_getprop = np->n_getprop;
+ dup->get_parent = np->get_parent;
+
+ return (dup);
+}
+
+/*
+ * drmach_array provides convenient array construction, access,
+ * bounds checking and array destruction logic.
+ */
+
+static drmach_array_t *
+drmach_array_new(int min_index, int max_index)
+{
+ drmach_array_t *arr;
+
+ arr = kmem_zalloc(sizeof (drmach_array_t), KM_SLEEP);
+
+ arr->arr_sz = (max_index - min_index + 1) * sizeof (void *);
+ if (arr->arr_sz > 0) {
+ arr->min_index = min_index;
+ arr->max_index = max_index;
+
+ arr->arr = kmem_zalloc(arr->arr_sz, KM_SLEEP);
+ return (arr);
+ } else {
+ kmem_free(arr, sizeof (*arr));
+ return (0);
+ }
+}
+
+static int
+drmach_array_set(drmach_array_t *arr, int idx, drmachid_t val)
+{
+ if (idx < arr->min_index || idx > arr->max_index)
+ return (-1);
+ else {
+ arr->arr[idx - arr->min_index] = val;
+ return (0);
+ }
+ /*NOTREACHED*/
+}
+
+static int
+drmach_array_get(drmach_array_t *arr, int idx, drmachid_t *val)
+{
+ if (idx < arr->min_index || idx > arr->max_index)
+ return (-1);
+ else {
+ *val = arr->arr[idx - arr->min_index];
+ return (0);
+ }
+ /*NOTREACHED*/
+}
+
+static int
+drmach_array_first(drmach_array_t *arr, int *idx, drmachid_t *val)
+{
+ int rv;
+
+ *idx = arr->min_index;
+ while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL)
+ *idx += 1;
+
+ return (rv);
+}
+
+static int
+drmach_array_next(drmach_array_t *arr, int *idx, drmachid_t *val)
+{
+ int rv;
+
+ *idx += 1;
+ while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL)
+ *idx += 1;
+
+ return (rv);
+}
+
+static void
+drmach_array_dispose(drmach_array_t *arr, void (*disposer)(drmachid_t))
+{
+ drmachid_t val;
+ int idx;
+ int rv;
+
+ rv = drmach_array_first(arr, &idx, &val);
+ while (rv == 0) {
+ (*disposer)(val);
+ rv = drmach_array_next(arr, &idx, &val);
+ }
+
+ kmem_free(arr->arr, arr->arr_sz);
+ kmem_free(arr, sizeof (*arr));
+}
+
+static drmach_board_t *
+drmach_get_board_by_bnum(int bnum)
+{
+ drmachid_t id;
+
+ if (drmach_array_get(drmach_boards, bnum, &id) == 0)
+ return ((drmach_board_t *)id);
+ else
+ return (NULL);
+}
+
+static pnode_t
+drmach_node_get_dnode(drmach_node_t *np)
+{
+ return (np->get_dnode(np));
+}
+
+/*ARGSUSED*/
+sbd_error_t *
+drmach_configure(drmachid_t id, int flags)
+{
+ drmach_device_t *dp;
+ sbd_error_t *err = NULL;
+ dev_info_t *rdip;
+ dev_info_t *fdip = NULL;
+
+ if (DRMACH_IS_CPU_ID(id)) {
+ return (NULL);
+ }
+ if (!DRMACH_IS_DEVICE_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ dp = id;
+ rdip = dp->node->n_getdip(dp->node);
+
+ ASSERT(rdip);
+
+ ASSERT(e_ddi_branch_held(rdip));
+
+ if (e_ddi_branch_configure(rdip, &fdip, 0) != 0) {
+ char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
+ dev_info_t *dip = (fdip != NULL) ? fdip : rdip;
+
+ (void) ddi_pathname(dip, path);
+ err = drerr_new(1, EOPL_DRVFAIL, path);
+
+ kmem_free(path, MAXPATHLEN);
+
+ /* If non-NULL, fdip is returned held and must be released */
+ if (fdip != NULL)
+ ddi_release_devi(fdip);
+ }
+
+ return (err);
+}
+
+
+static sbd_error_t *
+drmach_device_new(drmach_node_t *node,
+ drmach_board_t *bp, int portid, drmachid_t *idp)
+{
+ int i;
+ int rv;
+ drmach_device_t proto;
+ sbd_error_t *err;
+ char name[OBP_MAXDRVNAME];
+
+ rv = node->n_getprop(node, "name", name, OBP_MAXDRVNAME);
+ if (rv) {
+ /* every node is expected to have a name */
+ err = drerr_new(1, EOPL_GETPROP,
+ "device node %s: property %s",
+ ddi_node_name(node->n_getdip(node)), "name");
+ return (err);
+ }
+
+ /*
+ * The node currently being examined is not listed in the name2type[]
+ * array. In this case, the node is no interest to drmach. Both
+ * dp and err are initialized here to yield nothing (no device or
+ * error structure) for this case.
+ */
+ i = drmach_name2type_idx(name);
+
+
+ if (i < 0) {
+ *idp = (drmachid_t)0;
+ return (NULL);
+ }
+
+ /* device specific new function will set unum */
+
+ bzero(&proto, sizeof (proto));
+ proto.type = drmach_name2type[i].type;
+ proto.bp = bp;
+ proto.node = node;
+ proto.portid = portid;
+
+ return (drmach_name2type[i].new(&proto, idp));
+}
+
+static void
+drmach_device_dispose(drmachid_t id)
+{
+ drmach_device_t *self = id;
+
+ self->cm.dispose(id);
+}
+
+
+static drmach_board_t *
+drmach_board_new(int bnum, int boot_board)
+{
+ static sbd_error_t *drmach_board_release(drmachid_t);
+ static sbd_error_t *drmach_board_status(drmachid_t, drmach_status_t *);
+
+ drmach_board_t *bp;
+
+ bp = kmem_zalloc(sizeof (drmach_board_t), KM_SLEEP);
+
+ bp->cm.isa = (void *)drmach_board_new;
+ bp->cm.release = drmach_board_release;
+ bp->cm.status = drmach_board_status;
+
+ (void) drmach_board_name(bnum, bp->cm.name, sizeof (bp->cm.name));
+
+ bp->bnum = bnum;
+ bp->devices = NULL;
+ bp->connected = boot_board;
+ bp->tree = drmach_node_new();
+ bp->assigned = boot_board;
+ bp->powered = boot_board;
+ bp->boot_board = boot_board;
+
+ /*
+ * If this is not bootup initialization, we have to wait till
+ * IKP sets up the device nodes in drmach_board_connect().
+ */
+ if (boot_board)
+ drmach_setup_core_info(bp);
+
+ drmach_array_set(drmach_boards, bnum, bp);
+ return (bp);
+}
+
+static void
+drmach_board_dispose(drmachid_t id)
+{
+ drmach_board_t *bp;
+
+ ASSERT(DRMACH_IS_BOARD_ID(id));
+ bp = id;
+
+ if (bp->tree)
+ drmach_node_dispose(bp->tree);
+
+ if (bp->devices)
+ drmach_array_dispose(bp->devices, drmach_device_dispose);
+
+ kmem_free(bp, sizeof (*bp));
+}
+
+static sbd_error_t *
+drmach_board_status(drmachid_t id, drmach_status_t *stat)
+{
+ sbd_error_t *err = NULL;
+ drmach_board_t *bp;
+
+ if (!DRMACH_IS_BOARD_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ bp = id;
+
+ stat->assigned = bp->assigned;
+ stat->powered = bp->powered;
+ stat->busy = 0; /* assume not busy */
+ stat->configured = 0; /* assume not configured */
+ stat->empty = 0;
+ stat->cond = bp->cond = SBD_COND_OK;
+ strncpy(stat->type, "System Brd", sizeof (stat->type));
+ stat->info[0] = '\0';
+
+ if (bp->devices) {
+ int rv;
+ int d_idx;
+ drmachid_t d_id;
+
+ rv = drmach_array_first(bp->devices, &d_idx, &d_id);
+ while (rv == 0) {
+ drmach_status_t d_stat;
+
+ err = drmach_i_status(d_id, &d_stat);
+ if (err)
+ break;
+
+ stat->busy |= d_stat.busy;
+ stat->configured |= d_stat.configured;
+
+ rv = drmach_array_next(bp->devices, &d_idx, &d_id);
+ }
+ }
+
+ return (err);
+}
+
+int
+drmach_board_is_floating(drmachid_t id)
+{
+ drmach_board_t *bp;
+
+ if (!DRMACH_IS_BOARD_ID(id))
+ return (0);
+
+ bp = (drmach_board_t *)id;
+
+ return ((drmach_domain.floating & (1 << bp->bnum)) ? 1 : 0);
+}
+
+static int
+drmach_init(void)
+{
+ dev_info_t *rdip;
+ int i, rv, len;
+ int *floating;
+
+ rw_init(&drmach_boards_rwlock, NULL, RW_DEFAULT, NULL);
+
+ drmach_boards = drmach_array_new(0, MAX_BOARDS - 1);
+
+ rdip = ddi_root_node();
+
+ if (ddi_getproplen(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
+ "floating-boards", &len) != DDI_PROP_SUCCESS) {
+ cmn_err(CE_WARN, "Cannot get floating-boards proplen\n");
+ } else {
+ floating = (int *)kmem_alloc(len, KM_SLEEP);
+ rv = ddi_prop_op(DDI_DEV_T_ANY, rdip,
+ PROP_LEN_AND_VAL_BUF, DDI_PROP_DONTPASS,
+ "floating-boards", (caddr_t)floating, &len);
+ if (rv != DDI_PROP_SUCCESS) {
+ cmn_err(CE_WARN, "Cannot get floating-boards prop\n");
+ } else {
+ drmach_domain.floating = 0;
+ for (i = 0; i < len / sizeof (int); i++) {
+ drmach_domain.floating |= (1 << floating[i]);
+ }
+ }
+ kmem_free(floating, len);
+ }
+ drmach_domain.allow_dr = opl_check_dr_status();
+
+ rdip = ddi_get_child(ddi_root_node());
+ do {
+ int bnum;
+ drmachid_t id;
+
+ bnum = -1;
+ bnum = ddi_getprop(DDI_DEV_T_ANY, rdip,
+ DDI_PROP_DONTPASS, OBP_BOARDNUM, -1);
+ if (bnum == -1)
+ continue;
+
+ if (drmach_array_get(drmach_boards, bnum, &id) == -1) {
+ cmn_err(CE_WARN, "Device node 0x%p has"
+ " invalid property value, %s=%d",
+ rdip, OBP_BOARDNUM, bnum);
+ goto error;
+ } else if (id == NULL) {
+ (void) drmach_board_new(bnum, 1);
+ }
+ } while ((rdip = ddi_get_next_sibling(rdip)) != NULL);
+
+ opl_hold_devtree();
+
+ /*
+ * Initialize the IKP feature.
+ *
+ * This can be done only after DR has acquired a hold on all the
+ * device nodes that are interesting to IKP.
+ */
+ if (opl_init_cfg() != 0) {
+ cmn_err(CE_WARN, "DR - IKP initialization failed");
+
+ opl_release_devtree();
+
+ goto error;
+ }
+
+ return (0);
+error:
+ drmach_array_dispose(drmach_boards, drmach_board_dispose);
+ rw_destroy(&drmach_boards_rwlock);
+ return (ENXIO);
+}
+
+static void
+drmach_fini(void)
+{
+ rw_enter(&drmach_boards_rwlock, RW_WRITER);
+ drmach_array_dispose(drmach_boards, drmach_board_dispose);
+ drmach_boards = NULL;
+ rw_exit(&drmach_boards_rwlock);
+
+ /*
+ * Walk immediate children of the root devinfo node
+ * releasing holds acquired on branches in drmach_init()
+ */
+
+ opl_release_devtree();
+
+ rw_destroy(&drmach_boards_rwlock);
+}
+
+/*
+ * Each system board contains 2 Oberon PCI bridge and
+ * 1 CMUCH.
+ * Each oberon has 2 channels.
+ * Each channel has 2 pci-ex leaf.
+ * Each CMUCH has 1 pci bus.
+ *
+ *
+ * Device Path:
+ * /pci@<portid>,reg
+ *
+ * where
+ * portid[10] = 0
+ * portid[9:0] = LLEAF_ID[9:0] of the Oberon Channel
+ *
+ * LLEAF_ID[9:8] = 0
+ * LLEAF_ID[8:4] = LSB_ID[4:0]
+ * LLEAF_ID[3:1] = IO Channel#[2:0] (0,1,2,3 for Oberon)
+ * channel 4 is pcicmu
+ * LLEAF_ID[0] = PCI Leaf Number (0 for leaf-A, 1 for leaf-B)
+ *
+ * Properties:
+ * name = pci
+ * device_type = "pciex"
+ * board# = LSBID
+ * reg = int32 * 2, Oberon CSR space of the leaf and the UBC space
+ * portid = Jupiter Bus Device ID ((LSB_ID << 3)|pciport#)
+ */
+
+static sbd_error_t *
+drmach_io_new(drmach_device_t *proto, drmachid_t *idp)
+{
+ drmach_io_t *ip;
+
+ int portid;
+
+ portid = proto->portid;
+ ASSERT(portid != -1);
+ proto->unum = portid & (MAX_IO_UNITS_PER_BOARD - 1);
+
+ ip = kmem_zalloc(sizeof (drmach_io_t), KM_SLEEP);
+ bcopy(proto, &ip->dev, sizeof (ip->dev));
+ ip->dev.node = drmach_node_dup(proto->node);
+ ip->dev.cm.isa = (void *)drmach_io_new;
+ ip->dev.cm.dispose = drmach_io_dispose;
+ ip->dev.cm.release = drmach_io_release;
+ ip->dev.cm.status = drmach_io_status;
+ ip->channel = (portid >> 1) & 0x7;
+ ip->leaf = (portid & 0x1);
+
+ snprintf(ip->dev.cm.name, sizeof (ip->dev.cm.name), "%s%d",
+ ip->dev.type, ip->dev.unum);
+
+ *idp = (drmachid_t)ip;
+ return (NULL);
+}
+
+
+static void
+drmach_io_dispose(drmachid_t id)
+{
+ drmach_io_t *self;
+
+ ASSERT(DRMACH_IS_IO_ID(id));
+
+ self = id;
+ if (self->dev.node)
+ drmach_node_dispose(self->dev.node);
+
+ kmem_free(self, sizeof (*self));
+}
+
+/*ARGSUSED*/
+sbd_error_t *
+drmach_pre_op(int cmd, drmachid_t id, drmach_opts_t *opts)
+{
+ drmach_board_t *bp = (drmach_board_t *)id;
+ sbd_error_t *err = NULL;
+
+ /* allow status and ncm operations to always succeed */
+ if ((cmd == SBD_CMD_STATUS) || (cmd == SBD_CMD_GETNCM)) {
+ return (NULL);
+ }
+
+ /* check all other commands for the required option string */
+
+ if ((opts->size > 0) && (opts->copts != NULL)) {
+
+ DRMACH_PR("platform options: %s\n", opts->copts);
+
+ if (strstr(opts->copts, "opldr") == NULL) {
+ err = drerr_new(1, EOPL_SUPPORT, NULL);
+ }
+ } else {
+ err = drerr_new(1, EOPL_SUPPORT, NULL);
+ }
+
+ if (!err && id && DRMACH_IS_BOARD_ID(id)) {
+ switch (cmd) {
+ case SBD_CMD_TEST:
+ case SBD_CMD_STATUS:
+ case SBD_CMD_GETNCM:
+ break;
+ case SBD_CMD_CONNECT:
+ if (bp->connected)
+ err = drerr_new(0, ESBD_STATE, NULL);
+ else if (!drmach_domain.allow_dr)
+ err = drerr_new(1, EOPL_SUPPORT,
+ NULL);
+ break;
+ case SBD_CMD_DISCONNECT:
+ if (!bp->connected)
+ err = drerr_new(0, ESBD_STATE, NULL);
+ else if (!drmach_domain.allow_dr)
+ err = drerr_new(1, EOPL_SUPPORT,
+ NULL);
+ break;
+ default:
+ if (!drmach_domain.allow_dr)
+ err = drerr_new(1, EOPL_SUPPORT,
+ NULL);
+ break;
+
+ }
+ }
+
+ return (err);
+}
+
+/*ARGSUSED*/
+sbd_error_t *
+drmach_post_op(int cmd, drmachid_t id, drmach_opts_t *opts)
+{
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_board_assign(int bnum, drmachid_t *id)
+{
+ sbd_error_t *err = NULL;
+
+ rw_enter(&drmach_boards_rwlock, RW_WRITER);
+
+ if (drmach_array_get(drmach_boards, bnum, id) == -1) {
+ err = drerr_new(1, EOPL_BNUM, "%d", bnum);
+ } else {
+ drmach_board_t *bp;
+
+ if (*id)
+ rw_downgrade(&drmach_boards_rwlock);
+
+ bp = *id;
+ if (!(*id))
+ bp = *id =
+ (drmachid_t)drmach_board_new(bnum, 0);
+ bp->assigned = 1;
+ }
+
+ rw_exit(&drmach_boards_rwlock);
+
+ return (err);
+}
+
+/*ARGSUSED*/
+sbd_error_t *
+drmach_board_connect(drmachid_t id, drmach_opts_t *opts)
+{
+ drmach_board_t *obj = (drmach_board_t *)id;
+
+ if (!DRMACH_IS_BOARD_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ if (opl_probe_sb(obj->bnum) != 0)
+ return (DRMACH_INTERNAL_ERROR());
+
+ (void) prom_attach_notice(obj->bnum);
+
+ drmach_setup_core_info(obj);
+
+ obj->connected = 1;
+
+ return (NULL);
+}
+
+static int drmach_cache_flush_flag[NCPU];
+
+/*ARGSUSED*/
+static void
+drmach_flush_cache(uint64_t id, uint64_t dummy)
+{
+ extern void cpu_flush_ecache(void);
+
+ cpu_flush_ecache();
+ drmach_cache_flush_flag[id] = 0;
+}
+
+static void
+drmach_flush_all()
+{
+ cpuset_t xc_cpuset;
+ int i;
+
+ xc_cpuset = cpu_ready_set;
+ for (i = 0; i < NCPU; i++) {
+ if (CPU_IN_SET(xc_cpuset, i)) {
+ drmach_cache_flush_flag[i] = 1;
+ xc_one(i, drmach_flush_cache, i, 0);
+ while (drmach_cache_flush_flag[i]) {
+ DELAY(1000);
+ }
+ }
+ }
+}
+
+static int
+drmach_disconnect_cpus(drmach_board_t *bp)
+{
+ int i, bnum;
+
+ bnum = bp->bnum;
+
+ for (i = 0; i < OPL_MAX_COREID_PER_BOARD; i++) {
+ if (bp->cores[i].core_present) {
+ if (bp->cores[i].core_started)
+ return (-1);
+ if (bp->cores[i].core_hotadded) {
+ if (drmach_add_remove_cpu(bnum, i, HOTREMOVE_CPU)) {
+ cmn_err(CE_WARN,
+ "Failed to remove CMP %d on board %d\n",
+ i, bnum);
+ return (-1);
+ }
+ }
+ }
+ }
+ return (0);
+}
+
+/*ARGSUSED*/
+sbd_error_t *
+drmach_board_disconnect(drmachid_t id, drmach_opts_t *opts)
+{
+ drmach_board_t *obj;
+ int rv = 0;
+ sbd_error_t *err = NULL;
+
+
+ if (!DRMACH_IS_BOARD_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+
+
+ obj = (drmach_board_t *)id;
+
+ if (drmach_disconnect_cpus(obj)) {
+ err = drerr_new(0, EOPL_DEPROBE, obj->cm.name);
+ return (err);
+ }
+
+ rv = opl_unprobe_sb(obj->bnum);
+
+ if (rv == 0) {
+ prom_detach_notice(obj->bnum);
+ obj->connected = 0;
+
+ } else
+ err = drerr_new(0, EOPL_DEPROBE, obj->cm.name);
+
+ return (err);
+}
+
+static int
+drmach_get_portid(drmach_node_t *np)
+{
+ int portid;
+ char type[OBP_MAXPROPNAME];
+
+ if (np->n_getprop(np, "portid", &portid, sizeof (portid)) == 0)
+ return (portid);
+
+ /*
+ * Get the device_type property to see if we should
+ * continue processing this node.
+ */
+ if (np->n_getprop(np, "device_type", &type, sizeof (type)) != 0)
+ return (-1);
+
+ if (strcmp(type, OPL_CPU_NODE) == 0) {
+ /*
+ * We return cpuid because it has no portid
+ */
+ if (np->n_getprop(np, "cpuid", &portid, sizeof (portid)) == 0)
+ return (portid);
+ }
+
+ return (-1);
+}
+
+/*
+ * This is a helper function to determine if a given
+ * node should be considered for a dr operation according
+ * to predefined dr type nodes and the node's name.
+ * Formal Parameter : The name of a device node.
+ * Return Value: -1, name does not map to a valid dr type.
+ * A value greater or equal to 0, name is a valid dr type.
+ */
+static int
+drmach_name2type_idx(char *name)
+{
+ int index, ntypes;
+
+ if (name == NULL)
+ return (-1);
+
+ /*
+ * Determine how many possible types are currently supported
+ * for dr.
+ */
+ ntypes = sizeof (drmach_name2type) / sizeof (drmach_name2type[0]);
+
+ /* Determine if the node's name correspond to a predefined type. */
+ for (index = 0; index < ntypes; index++) {
+ if (strcmp(drmach_name2type[index].name, name) == 0)
+ /* The node is an allowed type for dr. */
+ return (index);
+ }
+
+ /*
+ * If the name of the node does not map to any of the
+ * types in the array drmach_name2type then the node is not of
+ * interest to dr.
+ */
+ return (-1);
+}
+
+/*
+ * there is some complication on OPL:
+ * - pseudo-mc nodes do not have portid property
+ * - portid[9:5] of cmp node is LSB #, portid[7:3] of pci is LSB#
+ * - cmp has board#
+ * - core and cpu nodes do not have portid and board# properties
+ * starcat uses portid to derive the board# but that does not work
+ * for us. starfire reads board# property to filter the devices.
+ * That does not work either. So for these specific device,
+ * we use specific hard coded methods to get the board# -
+ * cpu: LSB# = CPUID[9:5]
+ */
+
+static int
+drmach_board_find_devices_cb(drmach_node_walk_args_t *args)
+{
+ drmach_node_t *node = args->node;
+ drmach_board_cb_data_t *data = args->data;
+ drmach_board_t *obj = data->obj;
+
+ int rv, portid;
+ int bnum;
+ drmachid_t id;
+ drmach_device_t *device;
+ char name[OBP_MAXDRVNAME];
+
+ portid = drmach_get_portid(node);
+ /*
+ * core, cpu and pseudo-mc do not have portid
+ * we use cpuid as the portid of the cpu node
+ * for pseudo-mc, we do not use portid info.
+ */
+
+ rv = node->n_getprop(node, "name", name, OBP_MAXDRVNAME);
+ if (rv)
+ return (0);
+
+
+ rv = node->n_getprop(node, OBP_BOARDNUM, &bnum, sizeof (bnum));
+
+ if (rv) {
+ /*
+ * cpu does not have board# property. We use
+ * CPUID[9:5]
+ */
+ if (strcmp("cpu", name) == 0) {
+ bnum = (portid >> 5) & 0x1f;
+ } else
+ return (0);
+ }
+
+
+ if (bnum != obj->bnum)
+ return (0);
+
+ if (drmach_name2type_idx(name) < 0) {
+ return (0);
+ }
+
+ /*
+ * Create a device data structure from this node data.
+ * The call may yield nothing if the node is not of interest
+ * to drmach.
+ */
+ data->err = drmach_device_new(node, obj, portid, &id);
+ if (data->err)
+ return (-1);
+ else if (!id) {
+ /*
+ * drmach_device_new examined the node we passed in
+ * and determined that it was one not of interest to
+ * drmach. So, it is skipped.
+ */
+ return (0);
+ }
+
+ rv = drmach_array_set(obj->devices, data->ndevs++, id);
+ if (rv) {
+ data->err = DRMACH_INTERNAL_ERROR();
+ return (-1);
+ }
+ device = id;
+
+ data->err = (*data->found)(data->a, device->type, device->unum, id);
+ return (data->err == NULL ? 0 : -1);
+}
+
+sbd_error_t *
+drmach_board_find_devices(drmachid_t id, void *a,
+ sbd_error_t *(*found)(void *a, const char *, int, drmachid_t))
+{
+ drmach_board_t *bp = (drmach_board_t *)id;
+ sbd_error_t *err;
+ int max_devices;
+ int rv;
+ drmach_board_cb_data_t data;
+
+
+ if (!DRMACH_IS_BOARD_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ max_devices = MAX_CPU_UNITS_PER_BOARD;
+ max_devices += MAX_MEM_UNITS_PER_BOARD;
+ max_devices += MAX_IO_UNITS_PER_BOARD;
+
+ bp->devices = drmach_array_new(0, max_devices);
+
+ if (bp->tree == NULL)
+ bp->tree = drmach_node_new();
+
+ data.obj = bp;
+ data.ndevs = 0;
+ data.found = found;
+ data.a = a;
+ data.err = NULL;
+
+ rv = drmach_node_walk(bp->tree, &data, drmach_board_find_devices_cb);
+ if (rv == 0)
+ err = NULL;
+ else {
+ drmach_array_dispose(bp->devices, drmach_device_dispose);
+ bp->devices = NULL;
+
+ if (data.err)
+ err = data.err;
+ else
+ err = DRMACH_INTERNAL_ERROR();
+ }
+
+ return (err);
+}
+
+int
+drmach_board_lookup(int bnum, drmachid_t *id)
+{
+ int rv = 0;
+
+ rw_enter(&drmach_boards_rwlock, RW_READER);
+ if (drmach_array_get(drmach_boards, bnum, id)) {
+ *id = 0;
+ rv = -1;
+ }
+ rw_exit(&drmach_boards_rwlock);
+ return (rv);
+}
+
+sbd_error_t *
+drmach_board_name(int bnum, char *buf, int buflen)
+{
+ snprintf(buf, buflen, "SB%d", bnum);
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_board_poweroff(drmachid_t id)
+{
+ drmach_board_t *bp;
+ sbd_error_t *err;
+ drmach_status_t stat;
+
+ if (!DRMACH_IS_BOARD_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ bp = id;
+
+ err = drmach_board_status(id, &stat);
+
+ if (!err) {
+ if (stat.configured || stat.busy)
+ err = drerr_new(0, EOPL_CONFIGBUSY, bp->cm.name);
+ else {
+ bp->powered = 0;
+ }
+ }
+ return (err);
+}
+
+sbd_error_t *
+drmach_board_poweron(drmachid_t id)
+{
+ drmach_board_t *bp;
+
+ if (!DRMACH_IS_BOARD_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ bp = id;
+
+ bp->powered = 1;
+
+ return (NULL);
+}
+
+static sbd_error_t *
+drmach_board_release(drmachid_t id)
+{
+ if (!DRMACH_IS_BOARD_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ return (NULL);
+}
+
+/*ARGSUSED*/
+sbd_error_t *
+drmach_board_test(drmachid_t id, drmach_opts_t *opts, int force)
+{
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_board_unassign(drmachid_t id)
+{
+ drmach_board_t *bp;
+ sbd_error_t *err;
+ drmach_status_t stat;
+
+
+ if (!DRMACH_IS_BOARD_ID(id)) {
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ }
+ bp = id;
+
+ rw_enter(&drmach_boards_rwlock, RW_WRITER);
+
+ err = drmach_board_status(id, &stat);
+ if (err) {
+ rw_exit(&drmach_boards_rwlock);
+ return (err);
+ }
+ if (stat.configured || stat.busy) {
+ err = drerr_new(0, EOPL_CONFIGBUSY, bp->cm.name);
+ } else {
+ if (drmach_array_set(drmach_boards, bp->bnum, 0) != 0)
+ err = DRMACH_INTERNAL_ERROR();
+ else
+ drmach_board_dispose(bp);
+ }
+ rw_exit(&drmach_boards_rwlock);
+ return (err);
+}
+
+/*
+ * We have to do more on OPL - e.g. set up sram tte, read cpuid, strand id,
+ * implementation #, etc
+ */
+
+static sbd_error_t *
+drmach_cpu_new(drmach_device_t *proto, drmachid_t *idp)
+{
+ static void drmach_cpu_dispose(drmachid_t);
+ static sbd_error_t *drmach_cpu_release(drmachid_t);
+ static sbd_error_t *drmach_cpu_status(drmachid_t, drmach_status_t *);
+
+ int portid;
+ drmach_cpu_t *cp = NULL;
+
+ /* portid is CPUID of the node */
+ portid = proto->portid;
+ ASSERT(portid != -1);
+
+ /* unum = (CMP/CHIP ID) + (ON_BOARD_CORE_NUM * MAX_CMPID_PER_BOARD) */
+ proto->unum = ((portid/OPL_MAX_CPUID_PER_CMP) &
+ (OPL_MAX_CMPID_PER_BOARD - 1)) +
+ ((portid & (OPL_MAX_CPUID_PER_CMP - 1)) *
+ (OPL_MAX_CMPID_PER_BOARD));
+
+ cp = kmem_zalloc(sizeof (drmach_cpu_t), KM_SLEEP);
+ bcopy(proto, &cp->dev, sizeof (cp->dev));
+ cp->dev.node = drmach_node_dup(proto->node);
+ cp->dev.cm.isa = (void *)drmach_cpu_new;
+ cp->dev.cm.dispose = drmach_cpu_dispose;
+ cp->dev.cm.release = drmach_cpu_release;
+ cp->dev.cm.status = drmach_cpu_status;
+
+ snprintf(cp->dev.cm.name, sizeof (cp->dev.cm.name), "%s%d",
+ cp->dev.type, cp->dev.unum);
+
+/*
+ * CPU ID representation
+ * CPUID[9:5] = SB#
+ * CPUID[4:3] = Chip#
+ * CPUID[2:1] = Core# (Only 2 core for OPL)
+ * CPUID[0:0] = Strand#
+ */
+
+/*
+ * reg property of the strand contains strand ID
+ * reg property of the parent node contains core ID
+ * We should use them.
+ */
+ cp->cpuid = portid;
+ cp->sb = (portid >> 5) & 0x1f;
+ cp->chipid = (portid >> 3) & 0x3;
+ cp->coreid = (portid >> 1) & 0x3;
+ cp->strandid = portid & 0x1;
+
+ *idp = (drmachid_t)cp;
+ return (NULL);
+}
+
+
+static void
+drmach_cpu_dispose(drmachid_t id)
+{
+ drmach_cpu_t *self;
+
+ ASSERT(DRMACH_IS_CPU_ID(id));
+
+ self = id;
+ if (self->dev.node)
+ drmach_node_dispose(self->dev.node);
+
+ kmem_free(self, sizeof (*self));
+}
+
+static int
+drmach_cpu_start(struct cpu *cp)
+{
+ int cpuid = cp->cpu_id;
+ extern int restart_other_cpu(int);
+
+ ASSERT(MUTEX_HELD(&cpu_lock));
+ ASSERT(cpunodes[cpuid].nodeid != (pnode_t)0);
+
+ cp->cpu_flags &= ~CPU_POWEROFF;
+
+ /*
+ * NOTE: restart_other_cpu pauses cpus during the
+ * slave cpu start. This helps to quiesce the
+ * bus traffic a bit which makes the tick sync
+ * routine in the prom more robust.
+ */
+ DRMACH_PR("COLD START for cpu (%d)\n", cpuid);
+
+ restart_other_cpu(cpuid);
+
+ return (0);
+}
+
+static sbd_error_t *
+drmach_cpu_release(drmachid_t id)
+{
+ if (!DRMACH_IS_CPU_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ return (NULL);
+}
+
+static sbd_error_t *
+drmach_cpu_status(drmachid_t id, drmach_status_t *stat)
+{
+ drmach_cpu_t *cp;
+ drmach_device_t *dp;
+
+ ASSERT(DRMACH_IS_CPU_ID(id));
+ cp = (drmach_cpu_t *)id;
+ dp = &cp->dev;
+
+ stat->assigned = dp->bp->assigned;
+ stat->powered = dp->bp->powered;
+ mutex_enter(&cpu_lock);
+ stat->configured = (cpu_get(cp->cpuid) != NULL);
+ mutex_exit(&cpu_lock);
+ stat->busy = dp->busy;
+ strncpy(stat->type, dp->type, sizeof (stat->type));
+ stat->info[0] = '\0';
+
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_cpu_disconnect(drmachid_t id)
+{
+
+ if (!DRMACH_IS_CPU_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_cpu_get_id(drmachid_t id, processorid_t *cpuid)
+{
+ drmach_cpu_t *cpu;
+
+ if (!DRMACH_IS_CPU_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ cpu = (drmach_cpu_t *)id;
+
+ /* get from cpu directly on OPL */
+ *cpuid = cpu->cpuid;
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_cpu_get_impl(drmachid_t id, int *ip)
+{
+ drmach_device_t *cpu;
+ drmach_node_t *np;
+ drmach_node_t pp;
+ int impl;
+ char type[OBP_MAXPROPNAME];
+
+ if (!DRMACH_IS_CPU_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ cpu = id;
+ np = cpu->node;
+
+ if (np->get_parent(np, &pp) != 0) {
+ return (DRMACH_INTERNAL_ERROR());
+ }
+
+ /* the parent should be core */
+
+ if (pp.n_getprop(&pp, "device_type", &type, sizeof (type)) != 0) {
+ return (drerr_new(0, EOPL_GETPROP, NULL));
+ }
+
+ if (strcmp(type, OPL_CORE_NODE) == 0) {
+ if (pp.n_getprop(&pp, "implementation#",
+ &impl, sizeof (impl)) != 0) {
+ return (drerr_new(0, EOPL_GETPROP, NULL));
+ }
+ } else {
+ return (DRMACH_INTERNAL_ERROR());
+ }
+
+ *ip = impl;
+
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_get_dip(drmachid_t id, dev_info_t **dip)
+{
+ drmach_device_t *dp;
+
+ if (!DRMACH_IS_DEVICE_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ dp = id;
+
+ *dip = dp->node->n_getdip(dp->node);
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_io_is_attached(drmachid_t id, int *yes)
+{
+ drmach_device_t *dp;
+ dev_info_t *dip;
+ int state;
+
+ if (!DRMACH_IS_IO_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ dp = id;
+
+ dip = dp->node->n_getdip(dp->node);
+ if (dip == NULL) {
+ *yes = 0;
+ return (NULL);
+ }
+
+ state = ddi_get_devstate(dip);
+ *yes = ((i_ddi_node_state(dip) >= DS_ATTACHED) ||
+ (state == DDI_DEVSTATE_UP));
+
+ return (NULL);
+}
+
+struct drmach_io_cb {
+ char *name; /* name of the node */
+ int (*func)(dev_info_t *);
+ int rv;
+};
+
+#define DRMACH_IO_POST_ATTACH 0
+#define DRMACH_IO_PRE_RELEASE 1
+
+static int
+drmach_io_cb_check(dev_info_t *dip, void *arg)
+{
+ struct drmach_io_cb *p = (struct drmach_io_cb *)arg;
+ char name[OBP_MAXDRVNAME];
+ int len = OBP_MAXDRVNAME;
+
+ if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "name",
+ (caddr_t)name, &len) != DDI_PROP_SUCCESS) {
+ return (DDI_WALK_PRUNECHILD);
+ }
+
+ if (strcmp(name, p->name) == 0) {
+ p->rv = (*p->func)(dip);
+ return (DDI_WALK_TERMINATE);
+ }
+
+ return (DDI_WALK_CONTINUE);
+}
+
+
+static int
+drmach_console_ops(drmachid_t *id, int state)
+{
+ drmach_io_t *obj = (drmach_io_t *)id;
+ struct drmach_io_cb arg;
+ int (*msudetp)(dev_info_t *);
+ int (*msuattp)(dev_info_t *);
+ dev_info_t *dip, *pdip;
+ int circ;
+
+ /* 4 is pcicmu channel */
+ if (obj->channel != 4)
+ return (0);
+
+ arg.name = "serial";
+ arg.func = NULL;
+ if (state == DRMACH_IO_PRE_RELEASE) {
+ msudetp = (int (*)(dev_info_t *))
+ modgetsymvalue("oplmsu_dr_detach", 0);
+ if (msudetp != NULL)
+ arg.func = msudetp;
+ } else if (state == DRMACH_IO_POST_ATTACH) {
+ msuattp = (int (*)(dev_info_t *))
+ modgetsymvalue("oplmsu_dr_attach", 0);
+ if (msuattp != NULL)
+ arg.func = msuattp;
+ }
+ else
+ return (0);
+
+ if (arg.func == NULL) {
+ return (0);
+ }
+
+ arg.rv = 0;
+
+ dip = obj->dev.node->n_getdip(obj->dev.node);
+ if (pdip = ddi_get_parent(dip)) {
+ ndi_hold_devi(pdip);
+ ndi_devi_enter(pdip, &circ);
+ } else {
+ /* this cannot happen unless something bad happens */
+ return (-1);
+ }
+
+ ddi_walk_devs(dip, drmach_io_cb_check, (void *)&arg);
+
+ if (pdip) {
+ ndi_devi_exit(pdip, circ);
+ ndi_rele_devi(pdip);
+ }
+
+ return (arg.rv);
+}
+
+sbd_error_t *
+drmach_io_pre_release(drmachid_t id)
+{
+ int rv;
+
+ if (!DRMACH_IS_IO_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ rv = drmach_console_ops(id, DRMACH_IO_PRE_RELEASE);
+
+ if (rv != 0)
+ cmn_err(CE_WARN, "IO callback failed in pre-release\n");
+
+ return (NULL);
+}
+
+static sbd_error_t *
+drmach_io_release(drmachid_t id)
+{
+ if (!DRMACH_IS_IO_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_io_unrelease(drmachid_t id)
+{
+ if (!DRMACH_IS_IO_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ return (NULL);
+}
+
+/*ARGSUSED*/
+sbd_error_t *
+drmach_io_post_release(drmachid_t id)
+{
+ return (NULL);
+}
+
+/*ARGSUSED*/
+sbd_error_t *
+drmach_io_post_attach(drmachid_t id)
+{
+ int rv;
+
+ if (!DRMACH_IS_IO_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ rv = drmach_console_ops(id, DRMACH_IO_POST_ATTACH);
+
+ if (rv != 0)
+ cmn_err(CE_WARN, "IO callback failed in post-attach\n");
+
+ return (0);
+}
+
+static sbd_error_t *
+drmach_io_status(drmachid_t id, drmach_status_t *stat)
+{
+ drmach_device_t *dp;
+ sbd_error_t *err;
+ int configured;
+
+ ASSERT(DRMACH_IS_IO_ID(id));
+ dp = id;
+
+ err = drmach_io_is_attached(id, &configured);
+ if (err)
+ return (err);
+
+ stat->assigned = dp->bp->assigned;
+ stat->powered = dp->bp->powered;
+ stat->configured = (configured != 0);
+ stat->busy = dp->busy;
+ strncpy(stat->type, dp->type, sizeof (stat->type));
+ stat->info[0] = '\0';
+
+ return (NULL);
+}
+
+static sbd_error_t *
+drmach_mem_new(drmach_device_t *proto, drmachid_t *idp)
+{
+ static void drmach_mem_dispose(drmachid_t);
+ static sbd_error_t *drmach_mem_release(drmachid_t);
+ static sbd_error_t *drmach_mem_status(drmachid_t, drmach_status_t *);
+ dev_info_t *dip;
+
+ drmach_mem_t *mp;
+
+ mp = kmem_zalloc(sizeof (drmach_mem_t), KM_SLEEP);
+ proto->unum = 0;
+
+ bcopy(proto, &mp->dev, sizeof (mp->dev));
+ mp->dev.node = drmach_node_dup(proto->node);
+ mp->dev.cm.isa = (void *)drmach_mem_new;
+ mp->dev.cm.dispose = drmach_mem_dispose;
+ mp->dev.cm.release = drmach_mem_release;
+ mp->dev.cm.status = drmach_mem_status;
+
+ snprintf(mp->dev.cm.name,
+ sizeof (mp->dev.cm.name), "%s", mp->dev.type);
+
+ dip = mp->dev.node->n_getdip(mp->dev.node);
+ if (drmach_setup_mc_info(dip, mp) != 0) {
+ return (DRMACH_INTERNAL_ERROR());
+ }
+
+ *idp = (drmachid_t)mp;
+ return (NULL);
+}
+
+static void
+drmach_mem_dispose(drmachid_t id)
+{
+ drmach_mem_t *mp;
+
+ ASSERT(DRMACH_IS_MEM_ID(id));
+
+
+ mp = id;
+
+ if (mp->dev.node)
+ drmach_node_dispose(mp->dev.node);
+
+ if (mp->memlist) {
+ memlist_delete(mp->memlist);
+ mp->memlist = NULL;
+ }
+}
+
+sbd_error_t *
+drmach_mem_add_span(drmachid_t id, uint64_t basepa, uint64_t size)
+{
+ pfn_t basepfn = (pfn_t)(basepa >> PAGESHIFT);
+ pgcnt_t npages = (pgcnt_t)(size >> PAGESHIFT);
+ int rv;
+
+ ASSERT(size != 0);
+
+ if (!DRMACH_IS_MEM_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ kcage_range_lock();
+ rv = kcage_range_add(basepfn, npages, 1);
+ kcage_range_unlock();
+ if (rv == ENOMEM) {
+ cmn_err(CE_WARN, "%ld megabytes not available to kernel cage",
+ (size == 0 ? 0 : size / MBYTE));
+ } else if (rv != 0) {
+ /* catch this in debug kernels */
+ ASSERT(0);
+
+ cmn_err(CE_WARN, "unexpected kcage_range_add"
+ " return value %d", rv);
+ }
+
+ if (rv) {
+ return (DRMACH_INTERNAL_ERROR());
+ }
+ else
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_mem_del_span(drmachid_t id, uint64_t basepa, uint64_t size)
+{
+ pfn_t basepfn = (pfn_t)(basepa >> PAGESHIFT);
+ pgcnt_t npages = (pgcnt_t)(size >> PAGESHIFT);
+ int rv;
+
+ if (!DRMACH_IS_MEM_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ if (size > 0) {
+ kcage_range_lock();
+ rv = kcage_range_delete_post_mem_del(basepfn, npages);
+ kcage_range_unlock();
+ if (rv != 0) {
+ cmn_err(CE_WARN,
+ "unexpected kcage_range_delete_post_mem_del"
+ " return value %d", rv);
+ return (DRMACH_INTERNAL_ERROR());
+ }
+ }
+
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_mem_disable(drmachid_t id)
+{
+ if (!DRMACH_IS_MEM_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ else {
+ drmach_flush_all();
+ return (NULL);
+ }
+}
+
+sbd_error_t *
+drmach_mem_enable(drmachid_t id)
+{
+ if (!DRMACH_IS_MEM_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ else
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_mem_get_info(drmachid_t id, drmach_mem_info_t *mem)
+{
+ drmach_mem_t *mp;
+
+ if (!DRMACH_IS_MEM_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ mp = (drmach_mem_t *)id;
+
+ /*
+ * This is only used by dr to round up/down the memory
+ * for copying. Our unit of memory isolation is 64 MB.
+ */
+
+ mem->mi_alignment_mask = (64 * 1024 * 1024 - 1);
+ mem->mi_basepa = mp->base_pa;
+ mem->mi_size = mp->nbytes;
+ mem->mi_slice_size = mp->slice_size;
+
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_mem_get_base_physaddr(drmachid_t id, uint64_t *pa)
+{
+ drmach_mem_t *mp;
+
+ if (!DRMACH_IS_MEM_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ mp = (drmach_mem_t *)id;
+
+ *pa = mp->base_pa;
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_mem_get_memlist(drmachid_t id, struct memlist **ml)
+{
+ drmach_mem_t *mem;
+ int rv;
+ struct memlist *mlist;
+
+ if (!DRMACH_IS_MEM_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ mem = (drmach_mem_t *)id;
+ mlist = memlist_dup(mem->memlist);
+
+#ifdef DEBUG
+ /*
+ * Make sure the incoming memlist doesn't already
+ * intersect with what's present in the system (phys_install).
+ */
+ memlist_read_lock();
+ rv = memlist_intersect(phys_install, mlist);
+ memlist_read_unlock();
+ if (rv) {
+ DRMACH_PR("Derived memlist intersects"
+ " with phys_install\n");
+ memlist_dump(mlist);
+
+ DRMACH_PR("phys_install memlist:\n");
+ memlist_dump(phys_install);
+
+ memlist_delete(mlist);
+ return (DRMACH_INTERNAL_ERROR());
+ }
+
+ DRMACH_PR("Derived memlist:");
+ memlist_dump(mlist);
+#endif
+
+ *ml = mlist;
+
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_mem_get_slice_size(drmachid_t id, uint64_t *bytes)
+{
+ drmach_mem_t *mem;
+
+ if (!DRMACH_IS_MEM_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ mem = (drmach_mem_t *)id;
+
+ *bytes = mem->slice_size;
+
+ return (NULL);
+}
+
+
+/* ARGSUSED */
+processorid_t
+drmach_mem_cpu_affinity(drmachid_t id)
+{
+ return (CPU_CURRENT);
+}
+
+static sbd_error_t *
+drmach_mem_release(drmachid_t id)
+{
+ if (!DRMACH_IS_MEM_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ return (NULL);
+}
+
+static sbd_error_t *
+drmach_mem_status(drmachid_t id, drmach_status_t *stat)
+{
+ drmach_mem_t *dp;
+ uint64_t pa, slice_size;
+ struct memlist *ml;
+
+ ASSERT(DRMACH_IS_MEM_ID(id));
+ dp = id;
+
+ /* get starting physical address of target memory */
+ pa = dp->base_pa;
+
+ /* round down to slice boundary */
+ slice_size = dp->slice_size;
+ pa &= ~(slice_size - 1);
+
+ /* stop at first span that is in slice */
+ memlist_read_lock();
+ for (ml = phys_install; ml; ml = ml->next)
+ if (ml->address >= pa && ml->address < pa + slice_size)
+ break;
+ memlist_read_unlock();
+
+ stat->assigned = dp->dev.bp->assigned;
+ stat->powered = dp->dev.bp->powered;
+ stat->configured = (ml != NULL);
+ stat->busy = dp->dev.busy;
+ strncpy(stat->type, dp->dev.type, sizeof (stat->type));
+ stat->info[0] = '\0';
+
+ return (NULL);
+}
+
+
+sbd_error_t *
+drmach_board_deprobe(drmachid_t id)
+{
+ drmach_board_t *bp;
+
+ if (!DRMACH_IS_BOARD_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ bp = id;
+
+ cmn_err(CE_CONT, "DR: PROM detach board %d\n", bp->bnum);
+
+ if (bp->tree) {
+ drmach_node_dispose(bp->tree);
+ bp->tree = NULL;
+ }
+ if (bp->devices) {
+ drmach_array_dispose(bp->devices, drmach_device_dispose);
+ bp->devices = NULL;
+ }
+
+ bp->boot_board = 0;
+
+ return (NULL);
+}
+
+/*ARGSUSED*/
+static sbd_error_t *
+drmach_pt_ikprobe(drmachid_t id, drmach_opts_t *opts)
+{
+ drmach_board_t *bp = (drmach_board_t *)id;
+ sbd_error_t *err = NULL;
+ int rv;
+
+ if (!DRMACH_IS_BOARD_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ DRMACH_PR("calling opl_probe_board for bnum=%d\n", bp->bnum);
+ rv = opl_probe_sb(bp->bnum);
+ if (rv != 0) {
+ err = drerr_new(0, EOPL_PROBE, bp->cm.name);
+ return (err);
+ }
+ return (err);
+}
+
+/*ARGSUSED*/
+static sbd_error_t *
+drmach_pt_ikdeprobe(drmachid_t id, drmach_opts_t *opts)
+{
+ drmach_board_t *bp;
+ sbd_error_t *err = NULL;
+ int rv;
+
+ if (!DRMACH_IS_BOARD_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ bp = (drmach_board_t *)id;
+
+ cmn_err(CE_CONT, "DR: in-kernel unprobe board %d\n", bp->bnum);
+
+ rv = opl_unprobe_sb(bp->bnum);
+ if (rv != 0) {
+ err = drerr_new(0, EOPL_DEPROBE, bp->cm.name);
+ }
+
+ return (err);
+}
+
+
+/*ARGSUSED*/
+sbd_error_t *
+drmach_pt_readmem(drmachid_t id, drmach_opts_t *opts)
+{
+ struct memlist *ml;
+ uint64_t src_pa;
+ uint64_t dst_pa;
+ uint64_t dst;
+
+ dst_pa = va_to_pa(&dst);
+
+ memlist_read_lock();
+ for (ml = phys_install; ml; ml = ml->next) {
+ uint64_t nbytes;
+
+ src_pa = ml->address;
+ nbytes = ml->size;
+
+ while (nbytes != 0ull) {
+
+ /* copy 32 bytes at arc_pa to dst_pa */
+ bcopy32_il(src_pa, dst_pa);
+
+ /* increment by 32 bytes */
+ src_pa += (4 * sizeof (uint64_t));
+
+ /* decrement by 32 bytes */
+ nbytes -= (4 * sizeof (uint64_t));
+ }
+ }
+ memlist_read_unlock();
+
+ return (NULL);
+}
+
+static struct {
+ const char *name;
+ sbd_error_t *(*handler)(drmachid_t id, drmach_opts_t *opts);
+} drmach_pt_arr[] = {
+ { "readmem", drmach_pt_readmem },
+ { "ikprobe", drmach_pt_ikprobe },
+ { "ikdeprobe", drmach_pt_ikdeprobe },
+
+ /* the following line must always be last */
+ { NULL, NULL }
+};
+
+/*ARGSUSED*/
+sbd_error_t *
+drmach_passthru(drmachid_t id, drmach_opts_t *opts)
+{
+ int i;
+ sbd_error_t *err;
+
+ i = 0;
+ while (drmach_pt_arr[i].name != NULL) {
+ int len = strlen(drmach_pt_arr[i].name);
+
+ if (strncmp(drmach_pt_arr[i].name, opts->copts, len) == 0)
+ break;
+
+ i += 1;
+ }
+
+ if (drmach_pt_arr[i].name == NULL)
+ err = drerr_new(0, EOPL_UNKPTCMD, opts->copts);
+ else
+ err = (*drmach_pt_arr[i].handler)(id, opts);
+
+ return (err);
+}
+
+sbd_error_t *
+drmach_release(drmachid_t id)
+{
+ drmach_common_t *cp;
+
+ if (!DRMACH_IS_DEVICE_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ cp = id;
+
+ return (cp->release(id));
+}
+
+sbd_error_t *
+drmach_status(drmachid_t id, drmach_status_t *stat)
+{
+ drmach_common_t *cp;
+ sbd_error_t *err;
+
+ rw_enter(&drmach_boards_rwlock, RW_READER);
+
+ if (!DRMACH_IS_ID(id)) {
+ rw_exit(&drmach_boards_rwlock);
+ return (drerr_new(0, EOPL_NOTID, NULL));
+ }
+ cp = (drmach_common_t *)id;
+ err = cp->status(id, stat);
+
+ rw_exit(&drmach_boards_rwlock);
+
+ return (err);
+}
+
+static sbd_error_t *
+drmach_i_status(drmachid_t id, drmach_status_t *stat)
+{
+ drmach_common_t *cp;
+
+ if (!DRMACH_IS_ID(id))
+ return (drerr_new(0, EOPL_NOTID, NULL));
+ cp = id;
+
+ return (cp->status(id, stat));
+}
+
+/*ARGSUSED*/
+sbd_error_t *
+drmach_unconfigure(drmachid_t id, int flags)
+{
+ drmach_device_t *dp;
+ dev_info_t *rdip, *fdip = NULL;
+ char name[OBP_MAXDRVNAME];
+ int rv;
+
+ if (DRMACH_IS_CPU_ID(id))
+ return (NULL);
+
+ if (!DRMACH_IS_DEVICE_ID(id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ dp = id;
+
+ rdip = dp->node->n_getdip(dp->node);
+
+ ASSERT(rdip);
+
+ rv = dp->node->n_getprop(dp->node, "name", name, OBP_MAXDRVNAME);
+
+ if (rv)
+ return (NULL);
+
+ /*
+ * Note: FORCE flag is no longer necessary under devfs
+ */
+
+ ASSERT(e_ddi_branch_held(rdip));
+ if (e_ddi_branch_unconfigure(rdip, &fdip, 0)) {
+ sbd_error_t *err;
+ char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
+
+ /*
+ * If non-NULL, fdip is returned held and must be released.
+ */
+ if (fdip != NULL) {
+ (void) ddi_pathname(fdip, path);
+ ndi_rele_devi(fdip);
+ } else {
+ (void) ddi_pathname(rdip, path);
+ }
+
+ err = drerr_new(1, EOPL_DRVFAIL, path);
+
+ kmem_free(path, MAXPATHLEN);
+
+ return (err);
+ }
+
+ return (NULL);
+}
+
+
+int
+drmach_cpu_poweron(struct cpu *cp)
+{
+ int bnum, cpuid, onb_core_num, strand_id;
+ drmach_board_t *bp;
+
+ DRMACH_PR("drmach_cpu_poweron: starting cpuid %d\n", cp->cpu_id);
+
+ cpuid = cp->cpu_id;
+ bnum = LSB_ID(cpuid);
+ onb_core_num = ON_BOARD_CORE_NUM(cpuid);
+ strand_id = STRAND_ID(cpuid);
+ bp = drmach_get_board_by_bnum(bnum);
+
+ ASSERT(bp);
+ if (bp->cores[onb_core_num].core_hotadded == 0) {
+ if (drmach_add_remove_cpu(bnum, onb_core_num,
+ HOTADD_CPU) != 0) {
+ cmn_err(CE_WARN, "Failed to add CMP %d on board %d\n",
+ onb_core_num, bnum);
+ return (EIO);
+ }
+ }
+
+ ASSERT(MUTEX_HELD(&cpu_lock));
+
+ if (drmach_cpu_start(cp) != 0) {
+ if (bp->cores[onb_core_num].core_started == 0) {
+ /*
+ * we must undo the hotadd or no one will do that
+ * If this fails, we will do this again in
+ * drmach_board_disconnect.
+ */
+ if (drmach_add_remove_cpu(bnum, onb_core_num,
+ HOTREMOVE_CPU) != 0) {
+ cmn_err(CE_WARN, "Failed to remove CMP %d "
+ "on board %d\n",
+ onb_core_num, bnum);
+ }
+ }
+ return (EBUSY);
+ } else {
+ bp->cores[onb_core_num].core_started |= (1 << strand_id);
+ return (0);
+ }
+}
+
+int
+drmach_cpu_poweroff(struct cpu *cp)
+{
+ int rv = 0;
+ processorid_t cpuid = cp->cpu_id;
+
+ DRMACH_PR("drmach_cpu_poweroff: stopping cpuid %d\n", cp->cpu_id);
+
+ ASSERT(MUTEX_HELD(&cpu_lock));
+
+ /*
+ * Capture all CPUs (except for detaching proc) to prevent
+ * crosscalls to the detaching proc until it has cleared its
+ * bit in cpu_ready_set.
+ *
+ * The CPU's remain paused and the prom_mutex is known to be free.
+ * This prevents the x-trap victim from blocking when doing prom
+ * IEEE-1275 calls at a high PIL level.
+ */
+
+ promsafe_pause_cpus();
+
+ /*
+ * Quiesce interrupts on the target CPU. We do this by setting
+ * the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set) to
+ * prevent it from receiving cross calls and cross traps.
+ * This prevents the processor from receiving any new soft interrupts.
+ */
+ mp_cpu_quiesce(cp);
+
+ rv = prom_stopcpu_bycpuid(cpuid);
+ if (rv == 0)
+ cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
+
+ start_cpus();
+
+ if (rv == 0) {
+ int bnum, onb_core_num, strand_id;
+ drmach_board_t *bp;
+
+ CPU_SIGNATURE(OS_SIG, SIGST_DETACHED, SIGSUBST_NULL, cpuid);
+
+ bnum = LSB_ID(cpuid);
+ onb_core_num = ON_BOARD_CORE_NUM(cpuid);
+ strand_id = STRAND_ID(cpuid);
+ bp = drmach_get_board_by_bnum(bnum);
+ ASSERT(bp);
+
+ bp->cores[onb_core_num].core_started &= ~(1 << strand_id);
+ if (bp->cores[onb_core_num].core_started == 0) {
+ if (drmach_add_remove_cpu(bnum, onb_core_num,
+ HOTREMOVE_CPU) != 0) {
+ cmn_err(CE_WARN,
+ "Failed to remove CMP %d LSB %d\n",
+ onb_core_num, bnum);
+ return (EIO);
+ }
+ }
+ }
+
+ return (rv);
+}
+
+/*ARGSUSED*/
+int
+drmach_verify_sr(dev_info_t *dip, int sflag)
+{
+ return (0);
+}
+
+void
+drmach_suspend_last(void)
+{
+}
+
+void
+drmach_resume_first(void)
+{
+}
+
+/*
+ * Log a DR sysevent.
+ * Return value: 0 success, non-zero failure.
+ */
+int
+drmach_log_sysevent(int board, char *hint, int flag, int verbose)
+{
+ sysevent_t *ev;
+ sysevent_id_t eid;
+ int rv, km_flag;
+ sysevent_value_t evnt_val;
+ sysevent_attr_list_t *evnt_attr_list = NULL;
+ char attach_pnt[MAXNAMELEN];
+
+ km_flag = (flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
+ attach_pnt[0] = '\0';
+ if (drmach_board_name(board, attach_pnt, MAXNAMELEN)) {
+ rv = -1;
+ goto logexit;
+ }
+ if (verbose)
+ DRMACH_PR("drmach_log_sysevent: %s %s, flag: %d, verbose: %d\n",
+ attach_pnt, hint, flag, verbose);
+
+ if ((ev = sysevent_alloc(EC_DR, ESC_DR_AP_STATE_CHANGE,
+ SUNW_KERN_PUB"dr", km_flag)) == NULL) {
+ rv = -2;
+ goto logexit;
+ }
+ evnt_val.value_type = SE_DATA_TYPE_STRING;
+ evnt_val.value.sv_string = attach_pnt;
+ if ((rv = sysevent_add_attr(&evnt_attr_list, DR_AP_ID,
+ &evnt_val, km_flag)) != 0)
+ goto logexit;
+
+ evnt_val.value_type = SE_DATA_TYPE_STRING;
+ evnt_val.value.sv_string = hint;
+ if ((rv = sysevent_add_attr(&evnt_attr_list, DR_HINT,
+ &evnt_val, km_flag)) != 0) {
+ sysevent_free_attr(evnt_attr_list);
+ goto logexit;
+ }
+
+ (void) sysevent_attach_attributes(ev, evnt_attr_list);
+
+ /*
+ * Log the event but do not sleep waiting for its
+ * delivery. This provides insulation from syseventd.
+ */
+ rv = log_sysevent(ev, SE_NOSLEEP, &eid);
+
+logexit:
+ if (ev)
+ sysevent_free(ev);
+ if ((rv != 0) && verbose)
+ cmn_err(CE_WARN,
+ "drmach_log_sysevent failed (rv %d) for %s %s\n",
+ rv, attach_pnt, hint);
+
+ return (rv);
+}
+
+#define OPL_DR_STATUS_PROP "dr-status"
+
+static int
+opl_check_dr_status()
+{
+ pnode_t node;
+ int rtn, len;
+ char *str;
+
+ node = prom_rootnode();
+ if (node == OBP_BADNODE) {
+ return (1);
+ }
+
+ len = prom_getproplen(node, OPL_DR_STATUS_PROP);
+ if (len == -1) {
+ /*
+ * dr-status doesn't exist when DR is activated and
+ * any warning messages aren't needed.
+ */
+ return (1);
+ }
+
+ str = (char *)kmem_zalloc(len+1, KM_SLEEP);
+ rtn = prom_getprop(node, OPL_DR_STATUS_PROP, str);
+ kmem_free(str, len + 1);
+ if (rtn == -1) {
+ return (1);
+ } else {
+ return (0);
+ }
+}
+
+static sbd_error_t *
+drmach_get_scf_addr(uint64_t *addr)
+{
+ caddr_t *scf_cmd_addr;
+ uint64_t pa;
+ scf_cmd_addr = (caddr_t *)modgetsymvalue("scf_avail_cmd_reg_vaddr", 0);
+ if (scf_cmd_addr != NULL) {
+ pa = (uint64_t)va_to_pa(*scf_cmd_addr);
+ *addr = pa;
+ return (NULL);
+ }
+
+ return (DRMACH_INTERNAL_ERROR());
+}
+
+/* we are allocating memlist from TLB locked pages to avoid tlbmisses */
+
+static struct memlist *
+drmach_memlist_add_span(drmach_copy_rename_program_t *p,
+ struct memlist *mlist, uint64_t base, uint64_t len)
+{
+ struct memlist *ml, *tl, *nl;
+
+ if (len == 0ull)
+ return (NULL);
+
+ if (mlist == NULL) {
+ mlist = p->free_mlist;
+ if (mlist == NULL)
+ return (NULL);
+ p->free_mlist = mlist->next;
+ mlist->address = base;
+ mlist->size = len;
+ mlist->next = mlist->prev = NULL;
+
+ return (mlist);
+ }
+
+ for (tl = ml = mlist; ml; tl = ml, ml = ml->next) {
+ if (base < ml->address) {
+ if ((base + len) < ml->address) {
+ nl = p->free_mlist;
+ if (nl == NULL)
+ return (NULL);
+ p->free_mlist = nl->next;
+ nl->address = base;
+ nl->size = len;
+ nl->next = ml;
+ if ((nl->prev = ml->prev) != NULL)
+ nl->prev->next = nl;
+ ml->prev = nl;
+ if (mlist == ml)
+ mlist = nl;
+ } else {
+ ml->size = MAX((base + len),
+ (ml->address + ml->size)) -
+ base;
+ ml->address = base;
+ }
+ break;
+
+ } else if (base <= (ml->address + ml->size)) {
+ ml->size = MAX((base + len),
+ (ml->address + ml->size)) -
+ MIN(ml->address, base);
+ ml->address = MIN(ml->address, base);
+ break;
+ }
+ }
+ if (ml == NULL) {
+ nl = p->free_mlist;
+ if (nl == NULL)
+ return (NULL);
+ p->free_mlist = nl->next;
+ nl->address = base;
+ nl->size = len;
+ nl->next = NULL;
+ nl->prev = tl;
+ tl->next = nl;
+ }
+
+ return (mlist);
+}
+
+/*
+ * The routine performs the necessary memory COPY and MC adr SWITCH.
+ * Both operations MUST be at the same "level" so that the stack is
+ * maintained correctly between the copy and switch. The switch
+ * portion implements a caching mechanism to guarantee the code text
+ * is cached prior to execution. This is to guard against possible
+ * memory access while the MC adr's are being modified.
+ *
+ * IMPORTANT: The _drmach_copy_rename_end() function must immediately
+ * follow drmach_copy_rename_prog__relocatable() so that the correct
+ * "length" of the drmach_copy_rename_prog__relocatable can be
+ * calculated. This routine MUST be a LEAF function, i.e. it can
+ * make NO function calls, primarily for two reasons:
+ *
+ * 1. We must keep the stack consistent across the "switch".
+ * 2. Function calls are compiled to relative offsets, and
+ * we execute this function we'll be executing it from
+ * a copied version in a different area of memory, thus
+ * the relative offsets will be bogus.
+ *
+ * Moreover, it must have the "__relocatable" suffix to inform DTrace
+ * providers (and anything else, for that matter) that this
+ * function's text is manually relocated elsewhere before it is
+ * executed. That is, it cannot be safely instrumented with any
+ * methodology that is PC-relative.
+ */
+
+/*
+ * We multiply this to system_clock_frequency so we
+ * are setting a delay of fmem_timeout second for
+ * the rename command. The spec says 15 second is
+ * enough but the Fujitsu HW team suggested 17 sec.
+ */
+static int fmem_timeout = 17;
+static int min_copy_size_per_sec = 20 * 1024 * 1024;
+int drmach_disable_mcopy = 0;
+
+#define DR_DELAY_IL(ms, freq) \
+ { \
+ uint64_t start; \
+ uint64_t nstick; \
+ volatile uint64_t now; \
+ nstick = ((uint64_t)ms * freq)/1000; \
+ start = drmach_get_stick_il(); \
+ now = start; \
+ while ((now - start) <= nstick) { \
+ drmach_sleep_il(); \
+ now = drmach_get_stick_il(); \
+ } \
+ }
+
+static int
+drmach_copy_rename_prog__relocatable(drmach_copy_rename_program_t *prog,
+ int cpuid)
+{
+ struct memlist *ml;
+ register int rtn;
+ int i;
+ register uint64_t curr, limit;
+ extern uint64_t drmach_get_stick_il();
+ extern void membar_sync_il();
+ extern void flush_instr_mem_il(void*);
+ uint64_t copy_start;
+
+ prog->critical->stat[cpuid] = FMEM_LOOP_COPY_READY;
+ membar_sync_il();
+
+ if (prog->data->cpuid == cpuid) {
+ limit = drmach_get_stick_il();
+ limit += prog->critical->delay;
+
+ for (i = 0; i < NCPU; i++) {
+ if (CPU_IN_SET(prog->data->cpu_slave_set, i)) {
+ /* wait for all CPU's to be ready */
+ for (;;) {
+ if (prog->critical->stat[i] ==
+ FMEM_LOOP_COPY_READY) {
+ break;
+ }
+ }
+ curr = drmach_get_stick_il();
+ if (curr > limit) {
+ prog->data->fmem_status.error =
+ FMEM_XC_TIMEOUT;
+ return (FMEM_XC_TIMEOUT);
+ }
+ }
+ }
+ prog->data->fmem_status.stat = FMEM_LOOP_COPY_READY;
+ membar_sync_il();
+ copy_start = drmach_get_stick_il();
+ } else {
+ for (;;) {
+ if (prog->data->fmem_status.stat ==
+ FMEM_LOOP_COPY_READY) {
+ break;
+ }
+ if (prog->data->fmem_status.error) {
+ prog->data->error[cpuid] = FMEM_TERMINATE;
+ return (FMEM_TERMINATE);
+ }
+ }
+ }
+
+ /*
+ * DO COPY.
+ */
+ if (CPU_IN_SET(prog->data->cpu_copy_set, cpuid)) {
+ for (ml = prog->data->cpu_ml[cpuid]; ml; ml = ml->next) {
+ uint64_t s_pa, t_pa;
+ uint64_t nbytes;
+
+ s_pa = prog->data->s_copybasepa + ml->address;
+ t_pa = prog->data->t_copybasepa + ml->address;
+ nbytes = ml->size;
+
+ while (nbytes != 0ull) {
+ /* If the master has detected error, we just bail out */
+ if (prog->data->fmem_status.error) {
+ prog->data->error[cpuid] = FMEM_TERMINATE;
+ return (FMEM_TERMINATE);
+ }
+ /*
+ * This copy does NOT use an ASI
+ * that avoids the Ecache, therefore
+ * the dst_pa addresses may remain
+ * in our Ecache after the dst_pa
+ * has been removed from the system.
+ * A subsequent write-back to memory
+ * will cause an ARB-stop because the
+ * physical address no longer exists
+ * in the system. Therefore we must
+ * flush out local Ecache after we
+ * finish the copy.
+ */
+
+ /* copy 32 bytes at src_pa to dst_pa */
+ bcopy32_il(s_pa, t_pa);
+
+ /* increment the counter to signal that we are alive */
+ prog->stat->nbytes[cpuid] += 32;
+
+ /* increment by 32 bytes */
+ s_pa += (4 * sizeof (uint64_t));
+ t_pa += (4 * sizeof (uint64_t));
+
+ /* decrement by 32 bytes */
+ nbytes -= (4 * sizeof (uint64_t));
+ }
+ }
+ prog->critical->stat[cpuid] = FMEM_LOOP_COPY_DONE;
+ membar_sync_il();
+ }
+
+ /*
+ * Since bcopy32_il() does NOT use an ASI to bypass
+ * the Ecache, we need to flush our Ecache after
+ * the copy is complete.
+ */
+ flush_cache_il();
+
+ /*
+ * drmach_fmem_exec_script()
+ */
+ if (prog->data->cpuid == cpuid) {
+ uint64_t last, now;
+
+ limit = copy_start + prog->data->copy_delay;
+ for (i = 0; i < NCPU; i++) {
+ if (CPU_IN_SET(prog->data->cpu_slave_set, i)) {
+ for (;;) {
+ /* we get FMEM_LOOP_FMEM_READY in normal case */
+ if (prog->critical->stat[i] ==
+ FMEM_LOOP_FMEM_READY) {
+ break;
+ }
+ /* got error traps */
+ if (prog->critical->stat[i] ==
+ FMEM_COPY_ERROR) {
+ prog->data->fmem_status.error =
+ FMEM_COPY_ERROR;
+ return (FMEM_COPY_ERROR);
+ }
+ /* if we have not reached limit, wait more */
+ curr = drmach_get_stick_il();
+ if (curr <= limit)
+ continue;
+
+ prog->data->slowest_cpuid = i;
+ prog->data->copy_wait_time =
+ curr - copy_start;
+
+ /* now check if slave is alive */
+ last = prog->stat->nbytes[i];
+
+ DR_DELAY_IL(1, prog->data->stick_freq);
+
+ now = prog->stat->nbytes[i];
+ if (now <= last) {
+ /* no progress, perhaps just finished */
+ DR_DELAY_IL(1, prog->data->stick_freq);
+ if (prog->critical->stat[i] ==
+ FMEM_LOOP_FMEM_READY)
+ break;
+ /* copy error */
+ if (prog->critical->stat[i] ==
+ FMEM_COPY_ERROR) {
+ prog->data->fmem_status.error =
+ FMEM_COPY_ERROR;
+ return (FMEM_COPY_ERROR);
+ }
+ prog->data->fmem_status.error =
+ FMEM_COPY_TIMEOUT;
+ return (FMEM_COPY_TIMEOUT);
+ }
+ }
+ }
+ }
+ prog->critical->stat[cpuid] = FMEM_LOOP_FMEM_READY;
+ prog->data->fmem_status.stat = FMEM_LOOP_FMEM_READY;
+
+ membar_sync_il();
+ flush_instr_mem_il((void*) (prog->critical));
+ /*
+ * drmach_fmem_exec_script()
+ */
+ rtn = prog->critical->fmem((void *)prog->critical, PAGESIZE);
+ return (rtn);
+ } else {
+ flush_instr_mem_il((void*) (prog->critical));
+ /*
+ * drmach_fmem_loop_script()
+ */
+ rtn = prog->critical->loop((void *)(prog->critical),
+ PAGESIZE, (void *)&(prog->critical->stat[cpuid]));
+
+ prog->data->error[cpuid] = rtn;
+ /* slave thread does not care the rv */
+ return (0);
+ }
+}
+
+static void
+drmach_copy_rename_end(void)
+{
+ /*
+ * IMPORTANT: This function's location MUST be located immediately
+ * following drmach_copy_rename_prog__relocatable to
+ * accurately estimate its size. Note that this assumes
+ * the compiler keeps these functions in the order in
+ * which they appear :-o
+ */
+}
+
+
+static void
+drmach_setup_memlist(drmach_copy_rename_program_t *p)
+{
+ struct memlist *ml;
+ caddr_t buf;
+ int nbytes, s;
+
+ nbytes = PAGESIZE;
+ s = roundup(sizeof (struct memlist), sizeof (void *));
+ p->free_mlist = NULL;
+ buf = p->memlist_buffer;
+ while (nbytes >= sizeof (struct memlist)) {
+ ml = (struct memlist *)buf;
+ ml->next = p->free_mlist;
+ p->free_mlist = ml;
+ buf += s;
+ nbytes -= s;
+ }
+}
+
+sbd_error_t *
+drmach_copy_rename_init(drmachid_t t_id, drmachid_t s_id,
+ struct memlist *c_ml, drmachid_t *pgm_id)
+{
+ drmach_mem_t *s_mem;
+ drmach_mem_t *t_mem;
+ struct memlist *x_ml;
+ uint64_t s_copybasepa, t_copybasepa;
+ uint_t len;
+ caddr_t bp, wp;
+ int s_bd, t_bd, cpuid, active_cpus, i;
+ uint64_t c_addr;
+ size_t c_size, copy_sz, sz;
+ static sbd_error_t *drmach_get_scf_addr(uint64_t *);
+ extern void drmach_fmem_loop_script();
+ extern void drmach_fmem_loop_script_rtn();
+ extern int drmach_fmem_exec_script();
+ extern void drmach_fmem_exec_script_end();
+ sbd_error_t *err;
+ drmach_copy_rename_program_t *prog;
+ void (*mc_suspend)(void);
+ void (*mc_resume)(void);
+ int (*scf_fmem_start)(int, int);
+ int (*scf_fmem_end)(void);
+ int (*scf_fmem_cancel)(void);
+
+ if (!DRMACH_IS_MEM_ID(s_id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+ if (!DRMACH_IS_MEM_ID(t_id))
+ return (drerr_new(0, EOPL_INAPPROP, NULL));
+
+ for (i = 0; i < NCPU; i++) {
+ int lsb_id, onb_core_num, strand_id;
+ drmach_board_t *bp;
+
+ /*
+ * this kind of CPU will spin in cache
+ */
+ if (CPU_IN_SET(cpu_ready_set, i))
+ continue;
+
+ /*
+ * Now check for any inactive CPU's that
+ * have been hotadded. This can only occur in
+ * error condition in drmach_cpu_poweron().
+ */
+ lsb_id = LSB_ID(i);
+ onb_core_num = ON_BOARD_CORE_NUM(i);
+ strand_id = STRAND_ID(i);
+ bp = drmach_get_board_by_bnum(lsb_id);
+ if (bp == NULL)
+ continue;
+ if (bp->cores[onb_core_num].core_hotadded &
+ (1 << strand_id)) {
+ if (!(bp->cores[onb_core_num].core_started &
+ (1 << strand_id))) {
+ return (DRMACH_INTERNAL_ERROR());
+ }
+ }
+ }
+
+ mc_suspend = (void (*)(void))
+ modgetsymvalue("opl_mc_suspend", 0);
+ mc_resume = (void (*)(void))
+ modgetsymvalue("opl_mc_resume", 0);
+
+ if (mc_suspend == NULL || mc_resume == NULL) {
+ return (DRMACH_INTERNAL_ERROR());
+ }
+
+ scf_fmem_start = (int (*)(int, int))
+ modgetsymvalue("scf_fmem_start", 0);
+ if (scf_fmem_start == NULL) {
+ return (DRMACH_INTERNAL_ERROR());
+ }
+ scf_fmem_end = (int (*)(void))
+ modgetsymvalue("scf_fmem_end", 0);
+ if (scf_fmem_end == NULL) {
+ return (DRMACH_INTERNAL_ERROR());
+ }
+ scf_fmem_cancel = (int (*)(void))
+ modgetsymvalue("scf_fmem_cancel", 0);
+ if (scf_fmem_cancel == NULL) {
+ return (DRMACH_INTERNAL_ERROR());
+ }
+ s_mem = s_id;
+ t_mem = t_id;
+
+ s_bd = s_mem->dev.bp->bnum;
+ t_bd = t_mem->dev.bp->bnum;
+
+ /* calculate source and target base pa */
+
+ s_copybasepa = s_mem->slice_base;
+ t_copybasepa = t_mem->slice_base;
+
+ /* adjust copy memlist addresses to be relative to copy base pa */
+ x_ml = c_ml;
+ while (x_ml != NULL) {
+ x_ml->address -= s_copybasepa;
+ x_ml = x_ml->next;
+ }
+
+ /*
+ * bp will be page aligned, since we're calling
+ * kmem_zalloc() with an exact multiple of PAGESIZE.
+ */
+ wp = bp = kmem_zalloc(DRMACH_FMEM_LOCKED_PAGES * PAGESIZE,
+ KM_SLEEP);
+
+ prog = (drmach_copy_rename_program_t *)(wp +
+ DRMACH_FMEM_DATA_PAGE * PAGESIZE);
+ prog->data = (drmach_copy_rename_data_t *)roundup(((uint64_t)prog +
+ sizeof (drmach_copy_rename_program_t)), sizeof (void *));
+
+ ASSERT(((uint64_t)prog->data + sizeof (drmach_copy_rename_data_t))
+ <= ((uint64_t)prog + PAGESIZE));
+
+ prog->critical = (drmach_copy_rename_critical_t *)
+ (wp + DRMACH_FMEM_CRITICAL_PAGE * PAGESIZE);
+
+ prog->memlist_buffer = (caddr_t)(wp +
+ DRMACH_FMEM_MLIST_PAGE * PAGESIZE);
+
+ prog->stat = (drmach_cr_stat_t *)(wp +
+ DRMACH_FMEM_STAT_PAGE * PAGESIZE);
+
+ /* LINTED */
+ ASSERT(sizeof (drmach_cr_stat_t)
+ <= ((DRMACH_FMEM_LOCKED_PAGES - DRMACH_FMEM_STAT_PAGE)
+ * PAGESIZE));
+
+ prog->critical->scf_reg_base = (uint64_t)-1;
+ err = drmach_get_scf_addr(&(prog->critical->scf_reg_base));
+ if (err) {
+ kmem_free(wp, DRMACH_FMEM_LOCKED_PAGES * PAGESIZE);
+ return (err);
+ }
+
+ prog->critical->scf_td[0] = (s_bd & 0xff);
+ prog->critical->scf_td[1] = (t_bd & 0xff);
+ for (i = 2; i < 15; i++) {
+ prog->critical->scf_td[i] = 0;
+ }
+ prog->critical->scf_td[15] = ((0xaa + s_bd + t_bd) & 0xff);
+
+ bp = (caddr_t)prog->critical;
+ len = sizeof (drmach_copy_rename_critical_t);
+ wp = (caddr_t)roundup((uint64_t)bp + len, sizeof (void *));
+
+ len = (uint_t)((ulong_t)drmach_copy_rename_end -
+ (ulong_t)drmach_copy_rename_prog__relocatable);
+
+ /*
+ * We always leave 1K nop's to prevent the processor from
+ * speculative execution that causes memory access
+ */
+ wp = wp + len + 1024;
+
+ len = (uint_t)((ulong_t)drmach_fmem_exec_script_end -
+ (ulong_t)drmach_fmem_exec_script);
+ /* this is the entry point of the loop script */
+ wp = wp + len + 1024;
+
+ len = (uint_t)((ulong_t)drmach_fmem_exec_script -
+ (ulong_t)drmach_fmem_loop_script);
+ wp = wp + len + 1024;
+
+ /* now we make sure there is 1K extra */
+
+ if ((wp - bp) > PAGESIZE) {
+ kmem_free(prog, DRMACH_FMEM_LOCKED_PAGES * PAGESIZE);
+ return (DRMACH_INTERNAL_ERROR());
+ }
+
+ bp = (caddr_t)prog->critical;
+ len = sizeof (drmach_copy_rename_critical_t);
+ wp = (caddr_t)roundup((uint64_t)bp + len, sizeof (void *));
+
+ prog->critical->run = (int (*)())(wp);
+ len = (uint_t)((ulong_t)drmach_copy_rename_end -
+ (ulong_t)drmach_copy_rename_prog__relocatable);
+
+ bcopy((caddr_t)drmach_copy_rename_prog__relocatable, wp, len);
+
+ wp = (caddr_t)roundup((uint64_t)wp + len, 1024);
+
+ prog->critical->fmem = (int (*)())(wp);
+ len = (int)((ulong_t)drmach_fmem_exec_script_end -
+ (ulong_t)drmach_fmem_exec_script);
+ bcopy((caddr_t)drmach_fmem_exec_script, wp, len);
+
+ len = (int)((ulong_t)drmach_fmem_exec_script_end -
+ (ulong_t)drmach_fmem_exec_script);
+ wp = (caddr_t)roundup((uint64_t)wp + len, 1024);
+
+ prog->critical->loop = (int (*)())(wp);
+ len = (int)((ulong_t)drmach_fmem_exec_script -
+ (ulong_t)drmach_fmem_loop_script);
+ bcopy((caddr_t)drmach_fmem_loop_script, (void *)wp, len);
+ len = (int)((ulong_t)drmach_fmem_loop_script_rtn-
+ (ulong_t)drmach_fmem_loop_script);
+ prog->critical->loop_rtn = (void (*)()) (wp+len);
+
+ /* now we are committed, call SCF, soft suspend mac patrol */
+ if ((*scf_fmem_start)(s_bd, t_bd)) {
+ kmem_free(prog, DRMACH_FMEM_LOCKED_PAGES * PAGESIZE);
+ return (DRMACH_INTERNAL_ERROR());
+ }
+ prog->data->scf_fmem_end = scf_fmem_end;
+ prog->data->scf_fmem_cancel = scf_fmem_cancel;
+ prog->data->fmem_status.op |= OPL_FMEM_SCF_START;
+ /* soft suspend mac patrol */
+ (*mc_suspend)();
+ prog->data->fmem_status.op |= OPL_FMEM_MC_SUSPEND;
+ prog->data->mc_resume = mc_resume;
+
+ prog->critical->inst_loop_ret =
+ *(uint64_t *)(prog->critical->loop_rtn);
+
+ /*
+ * 0x30800000 is op code "ba,a +0"
+ */
+
+ *(uint_t *)(prog->critical->loop_rtn) = (uint_t)(0x30800000);
+
+ /*
+ * set the value of SCF FMEM TIMEOUT
+ */
+ prog->critical->delay = fmem_timeout * system_clock_freq;
+
+ prog->data->s_mem = (drmachid_t)s_mem;
+ prog->data->t_mem = (drmachid_t)t_mem;
+
+ cpuid = CPU->cpu_id;
+ prog->data->cpuid = cpuid;
+ prog->data->cpu_ready_set = cpu_ready_set;
+ prog->data->cpu_slave_set = cpu_ready_set;
+ prog->data->slowest_cpuid = (processorid_t)-1;
+ prog->data->copy_wait_time = 0;
+ CPUSET_DEL(prog->data->cpu_slave_set, cpuid);
+
+ for (i = 0; i < NCPU; i++) {
+ prog->data->cpu_ml[i] = NULL;
+ }
+
+ active_cpus = 0;
+ if (drmach_disable_mcopy) {
+ active_cpus = 1;
+ CPUSET_ADD(prog->data->cpu_copy_set, cpuid);
+ } else {
+ for (i = 0; i < NCPU; i++) {
+ if (CPU_IN_SET(cpu_ready_set, i) &&
+ CPU_ACTIVE(cpu[i])) {
+ CPUSET_ADD(prog->data->cpu_copy_set, i);
+ active_cpus++;
+ }
+ }
+ }
+
+ drmach_setup_memlist(prog);
+
+ x_ml = c_ml;
+ sz = 0;
+ while (x_ml != NULL) {
+ sz += x_ml->size;
+ x_ml = x_ml->next;
+ }
+
+ copy_sz = sz/active_cpus;
+ copy_sz = roundup(copy_sz, MMU_PAGESIZE4M);
+
+ while (sz > copy_sz*active_cpus) {
+ copy_sz += MMU_PAGESIZE4M;
+ }
+
+ prog->data->stick_freq = system_clock_freq;
+ prog->data->copy_delay = ((copy_sz / min_copy_size_per_sec) + 2) *
+ system_clock_freq;
+
+ x_ml = c_ml;
+ c_addr = x_ml->address;
+ c_size = x_ml->size;
+
+ for (i = 0; i < NCPU; i++) {
+ prog->stat->nbytes[i] = 0;
+ if (!CPU_IN_SET(prog->data->cpu_copy_set, i)) {
+ continue;
+ }
+ sz = copy_sz;
+
+ while (sz) {
+ if (c_size > sz) {
+ prog->data->cpu_ml[i] =
+ drmach_memlist_add_span(prog,
+ prog->data->cpu_ml[i],
+ c_addr, sz);
+ c_addr += sz;
+ c_size -= sz;
+ break;
+ } else {
+ sz -= c_size;
+ prog->data->cpu_ml[i] = drmach_memlist_add_span(
+ prog, prog->data->cpu_ml[i],
+ c_addr, c_size);
+ x_ml = x_ml->next;
+ if (x_ml != NULL) {
+ c_addr = x_ml->address;
+ c_size = x_ml->size;
+ } else {
+ goto end;
+ }
+ }
+ }
+ }
+end:
+ prog->data->s_copybasepa = s_copybasepa;
+ prog->data->t_copybasepa = t_copybasepa;
+ prog->data->c_ml = c_ml;
+ *pgm_id = prog;
+
+ return (NULL);
+}
+
+sbd_error_t *
+drmach_copy_rename_fini(drmachid_t id)
+{
+ drmach_copy_rename_program_t *prog = id;
+ sbd_error_t *err = NULL;
+ int rv;
+
+ /*
+ * Note that we have to delay calling SCF to find out the
+ * status of the FMEM operation here because SCF cannot
+ * respond while it is suspended.
+ * This create a small window when we are sure about the
+ * base address of the system board.
+ * If there is any call to mc-opl to get memory unum,
+ * mc-opl will return UNKNOWN as the unum.
+ */
+
+ if (prog->data->c_ml != NULL)
+ memlist_delete(prog->data->c_ml);
+
+ if ((prog->data->fmem_status.op &
+ (OPL_FMEM_SCF_START| OPL_FMEM_MC_SUSPEND)) !=
+ (OPL_FMEM_SCF_START | OPL_FMEM_MC_SUSPEND)) {
+ cmn_err(CE_PANIC, "drmach_copy_rename_fini: "
+ "invalid op code %x\n",
+ prog->data->fmem_status.op);
+ }
+
+ /* possible ops are SCF_START, MC_SUSPEND */
+ if (prog->critical->fmem_issued) {
+ if (prog->data->fmem_status.error != FMEM_NO_ERROR)
+ cmn_err(CE_PANIC, "scf fmem request failed");
+ rv = (*prog->data->scf_fmem_end)();
+ if (rv) {
+ cmn_err(CE_PANIC, "scf_fmem_end() failed");
+ }
+ /*
+ * If we get here, rename is successful.
+ * Do all the copy rename post processing.
+ */
+ drmach_swap_pa((drmach_mem_t *)prog->data->s_mem,
+ (drmach_mem_t *)prog->data->t_mem);
+ } else {
+ if (prog->data->fmem_status.error != 0) {
+ cmn_err(CE_WARN, "Kernel Migration fails. 0x%x",
+ prog->data->fmem_status.error);
+ err = DRMACH_INTERNAL_ERROR();
+ }
+ rv = (*prog->data->scf_fmem_cancel)();
+ if (rv) {
+ cmn_err(CE_WARN, "scf_fmem_cancel() failed");
+ if (!err)
+ err = DRMACH_INTERNAL_ERROR();
+ }
+ }
+ /* soft resume mac patrol */
+ (*prog->data->mc_resume)();
+
+ kmem_free(prog, DRMACH_FMEM_LOCKED_PAGES * PAGESIZE);
+ return (err);
+}
+
+static void
+drmach_lock_critical(caddr_t va)
+{
+ tte_t tte;
+ int i;
+
+ for (i = 0; i < DRMACH_FMEM_LOCKED_PAGES; i++) {
+ vtag_flushpage(va, KCONTEXT);
+ sfmmu_memtte(&tte, va_to_pfn(va),
+ PROC_DATA|HAT_NOSYNC, TTE8K);
+ tte.tte_intlo |= TTE_LCK_INT;
+ sfmmu_dtlb_ld(va, KCONTEXT, &tte);
+ sfmmu_itlb_ld(va, KCONTEXT, &tte);
+ va += PAGESIZE;
+ }
+}
+
+static void
+drmach_unlock_critical(caddr_t va)
+{
+ int i;
+
+ for (i = 0; i < DRMACH_FMEM_LOCKED_PAGES; i++) {
+ vtag_flushpage(va, KCONTEXT);
+ va += PAGESIZE;
+ }
+}
+
+/*ARGSUSED*/
+static void
+drmach_copy_rename_slave(struct regs *rp, drmachid_t id)
+{
+ drmach_copy_rename_program_t *prog = id;
+ register int cpuid;
+ extern void drmach_flush();
+ extern void membar_sync_il();
+ extern void drmach_flush_icache();
+ on_trap_data_t otd;
+
+ kpreempt_disable();
+ cpuid = CPU->cpu_id;
+
+ if (on_trap(&otd, OT_DATA_EC)) {
+ no_trap();
+ drmach_unlock_critical((caddr_t)prog);
+ kpreempt_enable();
+ prog->data->error[cpuid] = FMEM_COPY_ERROR;
+ prog->critical->stat[cpuid] = FMEM_LOOP_EXIT;
+ return;
+ }
+
+
+ (void) drmach_lock_critical((caddr_t)prog);
+
+ flush_windows();
+
+ /*
+ * jmp drmach_copy_rename_prog().
+ */
+
+ drmach_flush(prog->critical, PAGESIZE);
+ (void) prog->critical->run(prog, cpuid);
+ drmach_flush_icache();
+
+ no_trap();
+ drmach_unlock_critical((caddr_t)prog);
+
+ kpreempt_enable();
+
+ prog->critical->stat[cpuid] = FMEM_LOOP_EXIT;
+ membar_sync_il();
+}
+
+static void
+drmach_swap_pa(drmach_mem_t *s_mem, drmach_mem_t *t_mem)
+{
+ uint64_t s_base, t_base;
+ drmach_board_t *s_board, *t_board;
+ struct memlist *ml;
+
+ s_board = s_mem->dev.bp;
+ t_board = t_mem->dev.bp;
+ if (s_board == NULL || t_board == NULL) {
+ cmn_err(CE_PANIC, "Cannot locate source or target board\n");
+ return;
+ }
+ s_base = s_mem->slice_base;
+ t_base = t_mem->slice_base;
+
+ s_mem->slice_base = t_base;
+ s_mem->base_pa = (s_mem->base_pa - s_base) + t_base;
+
+ for (ml = s_mem->memlist; ml; ml = ml->next) {
+ ml->address = ml->address - s_base + t_base;
+ }
+
+ t_mem->slice_base = s_base;
+ t_mem->base_pa = (t_mem->base_pa - t_base) + s_base;
+
+ for (ml = t_mem->memlist; ml; ml = ml->next) {
+ ml->address = ml->address - t_base + s_base;
+ }
+
+ /*
+ * IKP has to update the sb-mem-ranges for mac patrol driver
+ * when it resumes, it will re-read the sb-mem-range property
+ * to get the new base address
+ */
+ if (oplcfg_pa_swap(s_board->bnum, t_board->bnum) != 0)
+ cmn_err(CE_PANIC, "Could not update device nodes\n");
+}
+
+void
+drmach_copy_rename(drmachid_t id)
+{
+ drmach_copy_rename_program_t *prog = id;
+ cpuset_t cpuset;
+ int cpuid;
+ uint64_t inst;
+ register int rtn;
+ extern int in_sync;
+ int old_in_sync;
+ extern void drmach_sys_trap();
+ extern void drmach_flush();
+ extern void drmach_flush_icache();
+ extern uint64_t patch_inst(uint64_t *, uint64_t);
+ on_trap_data_t otd;
+
+ if (prog->critical->scf_reg_base == (uint64_t)-1) {
+ prog->data->fmem_status.error = FMEM_SCF_ERR;
+ return;
+ }
+
+ kpreempt_disable();
+ cpuset = prog->data->cpu_ready_set;
+
+ for (cpuid = 0; cpuid < NCPU; cpuid++) {
+ if (CPU_IN_SET(cpuset, cpuid)) {
+ prog->critical->stat[cpuid] = FMEM_LOOP_START;
+ prog->data->error[cpuid] = FMEM_NO_ERROR;
+ }
+ }
+
+ old_in_sync = in_sync;
+ in_sync = 1;
+ cpuid = CPU->cpu_id;
+
+ CPUSET_DEL(cpuset, cpuid);
+
+ xc_some(cpuset, (xcfunc_t *)drmach_lock_critical,
+ (uint64_t)prog, (uint64_t)0);
+
+ xt_some(cpuset, (xcfunc_t *)drmach_sys_trap,
+ (uint64_t)drmach_copy_rename_slave, (uint64_t)prog);
+ xt_sync(cpuset);
+
+ (void) drmach_lock_critical((caddr_t)prog);
+
+ if (on_trap(&otd, OT_DATA_EC)) {
+ rtn = FMEM_COPY_ERROR;
+ goto done;
+ }
+
+ flush_windows();
+
+ /*
+ * jmp drmach_copy_rename_prog().
+ */
+ drmach_flush(prog->critical, PAGESIZE);
+ rtn = prog->critical->run(prog, cpuid);
+ drmach_flush_icache();
+
+
+done:
+ no_trap();
+ if (rtn == FMEM_HW_ERROR) {
+ kpreempt_enable();
+ prom_panic("URGENT_ERROR_TRAP is "
+ "detected during FMEM.\n");
+ }
+
+ /*
+ * In normal case, all slave CPU's are still spinning in
+ * the assembly code. The master has to patch the instruction
+ * to get them out.
+ * In error case, e.g. COPY_ERROR, some slave CPU's might
+ * have aborted and already returned and sset LOOP_EXIT status.
+ * Some CPU might still be copying.
+ * In any case, some delay is necessary to give them
+ * enough time to set the LOOP_EXIT status.
+ */
+
+ for (;;) {
+ inst = patch_inst((uint64_t *)prog->critical->loop_rtn,
+ prog->critical->inst_loop_ret);
+ if (prog->critical->inst_loop_ret == inst) {
+ break;
+ }
+ }
+
+ for (cpuid = 0; cpuid < NCPU; cpuid++) {
+ uint64_t last, now;
+ if (!CPU_IN_SET(cpuset, cpuid)) {
+ continue;
+ }
+ last = prog->stat->nbytes[cpuid];
+ /*
+ * Wait for all CPU to exit.
+ * However we do not want an infinite loop
+ * so we detect hangup situation here.
+ * If the slave CPU is still copying data,
+ * we will continue to wait.
+ * In error cases, the master has already set
+ * fmem_status.error to abort the copying.
+ * 1 m.s delay for them to abort copying and
+ * return to drmach_copy_rename_slave to set
+ * FMEM_LOOP_EXIT status should be enough.
+ */
+ for (;;) {
+ if (prog->critical->stat[cpuid] == FMEM_LOOP_EXIT)
+ break;
+ drmach_sleep_il();
+ drv_usecwait(1000);
+ now = prog->stat->nbytes[cpuid];
+ if (now <= last) {
+ drv_usecwait(1000);
+ if (prog->critical->stat[cpuid] == FMEM_LOOP_EXIT)
+ break;
+ cmn_err(CE_PANIC,
+ "CPU %d hang during Copy Rename", cpuid);
+ }
+ last = now;
+ }
+ if (prog->data->error[cpuid] == FMEM_HW_ERROR) {
+ prom_panic("URGENT_ERROR_TRAP is "
+ "detected during FMEM.\n");
+ }
+ }
+ drmach_unlock_critical((caddr_t)prog);
+
+ in_sync = old_in_sync;
+
+ kpreempt_enable();
+ if (prog->data->fmem_status.error == 0)
+ prog->data->fmem_status.error = rtn;
+
+ if (prog->data->copy_wait_time > 0) {
+ DRMACH_PR("Unexpected long wait time %ld seconds "
+ "during copy rename on CPU %d\n",
+ prog->data->copy_wait_time/prog->data->stick_freq,
+ prog->data->slowest_cpuid);
+ }
+}
diff --git a/usr/src/uts/sun4u/opl/io/mc-opl.c b/usr/src/uts/sun4u/opl/io/mc-opl.c
new file mode 100644
index 0000000000..fc19d282a4
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/mc-opl.c
@@ -0,0 +1,2442 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/sysmacros.h>
+#include <sys/conf.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/async.h>
+#include <sys/machsystm.h>
+#include <sys/ksynch.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/ddifm.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/util.h>
+#include <sys/kmem.h>
+#include <sys/fm/io/opl_mc_fm.h>
+#include <sys/memlist.h>
+#include <sys/param.h>
+#include <sys/ontrap.h>
+#include <vm/page.h>
+#include <sys/mc-opl.h>
+
+/*
+ * Function prototypes
+ */
+static int mc_open(dev_t *, int, int, cred_t *);
+static int mc_close(dev_t, int, int, cred_t *);
+static int mc_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
+static int mc_attach(dev_info_t *, ddi_attach_cmd_t);
+static int mc_detach(dev_info_t *, ddi_detach_cmd_t);
+
+static int mc_board_add(mc_opl_t *mcp);
+static int mc_board_del(mc_opl_t *mcp);
+static int mc_suspend(mc_opl_t *mcp, uint32_t flag);
+static int mc_resume(mc_opl_t *mcp, uint32_t flag);
+
+static void insert_mcp(mc_opl_t *mcp);
+static void delete_mcp(mc_opl_t *mcp);
+
+static int pa_to_maddr(mc_opl_t *mcp, uint64_t pa, mc_addr_t *maddr);
+
+static int mc_valid_pa(mc_opl_t *mcp, uint64_t pa);
+
+int mc_get_mem_unum(int, uint64_t, char *, int, int *);
+extern int plat_max_boards(void);
+
+static void mc_get_mlist(mc_opl_t *);
+
+#pragma weak opl_get_physical_board
+extern int opl_get_physical_board(int);
+static int mc_opl_get_physical_board(int);
+
+/*
+ * Configuration data structures
+ */
+static struct cb_ops mc_cb_ops = {
+ mc_open, /* open */
+ mc_close, /* close */
+ nulldev, /* strategy */
+ nulldev, /* print */
+ nodev, /* dump */
+ nulldev, /* read */
+ nulldev, /* write */
+ mc_ioctl, /* ioctl */
+ nodev, /* devmap */
+ nodev, /* mmap */
+ nodev, /* segmap */
+ nochpoll, /* poll */
+ ddi_prop_op, /* cb_prop_op */
+ 0, /* streamtab */
+ D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flag */
+ CB_REV, /* rev */
+ nodev, /* cb_aread */
+ nodev /* cb_awrite */
+};
+
+static struct dev_ops mc_ops = {
+ DEVO_REV, /* rev */
+ 0, /* refcnt */
+ ddi_getinfo_1to1, /* getinfo */
+ nulldev, /* identify */
+ nulldev, /* probe */
+ mc_attach, /* attach */
+ mc_detach, /* detach */
+ nulldev, /* reset */
+ &mc_cb_ops, /* cb_ops */
+ (struct bus_ops *)0, /* bus_ops */
+ nulldev /* power */
+};
+
+/*
+ * Driver globals
+ */
+int mc_patrol_interval_sec = 10;
+
+int inject_op_delay = 5;
+
+mc_inst_list_t *mc_instances;
+static kmutex_t mcmutex;
+
+void *mc_statep;
+
+#ifdef DEBUG
+int oplmc_debug = 1;
+#endif
+
+static int mc_debug_show_all;
+
+extern struct mod_ops mod_driverops;
+
+static struct modldrv modldrv = {
+ &mod_driverops, /* module type, this one is a driver */
+ "OPL Memory-controller 1.1", /* module name */
+ &mc_ops, /* driver ops */
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, /* rev */
+ (void *)&modldrv,
+ NULL
+};
+
+#pragma weak opl_get_mem_unum
+extern int (*opl_get_mem_unum)(int, uint64_t, char *, int, int *);
+
+/*
+ * pseudo-mc node portid format
+ *
+ * [10] = 0
+ * [9] = 1
+ * [8] = LSB_ID[4] = 0
+ * [7:4] = LSB_ID[3:0]
+ * [3:0] = 0
+ *
+ */
+
+/*
+ * These are the module initialization routines.
+ */
+int
+_init(void)
+{
+ int error;
+
+
+ if ((error = ddi_soft_state_init(&mc_statep,
+ sizeof (mc_opl_t), 1)) != 0)
+ return (error);
+
+ mutex_init(&mcmutex, NULL, MUTEX_DRIVER, NULL);
+ if (&opl_get_mem_unum)
+ opl_get_mem_unum = mc_get_mem_unum;
+
+ error = mod_install(&modlinkage);
+ if (error != 0) {
+ if (&opl_get_mem_unum)
+ opl_get_mem_unum = NULL;
+ mutex_destroy(&mcmutex);
+ ddi_soft_state_fini(&mc_statep);
+ }
+
+ return (error);
+}
+
+int
+_fini(void)
+{
+ int error;
+
+ if ((error = mod_remove(&modlinkage)) != 0)
+ return (error);
+
+ mutex_destroy(&mcmutex);
+
+ if (&opl_get_mem_unum)
+ opl_get_mem_unum = NULL;
+
+ ddi_soft_state_fini(&mc_statep);
+
+ return (0);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+static int
+mc_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
+{
+ mc_opl_t *mcp;
+ int instance;
+
+ /* get the instance of this devi */
+ instance = ddi_get_instance(devi);
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ break;
+ case DDI_RESUME:
+ mcp = ddi_get_soft_state(mc_statep, instance);
+ return (mc_resume(mcp, MC_DRIVER_SUSPENDED));
+ default:
+ return (DDI_FAILURE);
+ }
+
+
+ if (ddi_soft_state_zalloc(mc_statep, instance) != DDI_SUCCESS)
+ return (DDI_FAILURE);
+
+ if ((mcp = ddi_get_soft_state(mc_statep, instance)) == NULL) {
+ goto bad;
+ }
+
+ /* set informations in mc state */
+ mcp->mc_dip = devi;
+
+ if (mc_board_add(mcp))
+ goto bad;
+
+ insert_mcp(mcp);
+ ddi_report_dev(devi);
+
+ return (DDI_SUCCESS);
+
+bad:
+ ddi_soft_state_free(mc_statep, instance);
+ return (DDI_FAILURE);
+}
+
+/* ARGSUSED */
+static int
+mc_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
+{
+ int instance;
+ mc_opl_t *mcp;
+
+ /* get the instance of this devi */
+ instance = ddi_get_instance(devi);
+ if ((mcp = ddi_get_soft_state(mc_statep, instance)) == NULL) {
+ return (DDI_FAILURE);
+ }
+
+ switch (cmd) {
+ case DDI_SUSPEND:
+ return (mc_suspend(mcp, MC_DRIVER_SUSPENDED));
+ case DDI_DETACH:
+ break;
+ default:
+ return (DDI_FAILURE);
+ }
+
+ mutex_enter(&mcmutex);
+ if (mc_board_del(mcp) != DDI_SUCCESS) {
+ mutex_exit(&mcmutex);
+ return (DDI_FAILURE);
+ }
+
+ delete_mcp(mcp);
+ mutex_exit(&mcmutex);
+
+ /* free up the soft state */
+ ddi_soft_state_free(mc_statep, instance);
+
+ return (DDI_SUCCESS);
+}
+
+/* ARGSUSED */
+static int
+mc_open(dev_t *devp, int flag, int otyp, cred_t *credp)
+{
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+mc_close(dev_t devp, int flag, int otyp, cred_t *credp)
+{
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+mc_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
+ int *rvalp)
+{
+ return (ENXIO);
+}
+
+/*
+ * PA validity check:
+ * This function return 1 if the PA is valid, otherwise
+ * return 0.
+ */
+
+/* ARGSUSED */
+static int
+pa_is_valid(mc_opl_t *mcp, uint64_t addr)
+{
+ /*
+ * Check if the addr is on the board.
+ */
+ if ((addr < mcp->mc_start_address) ||
+ (mcp->mc_start_address + mcp->mc_size <= addr))
+ return (0);
+
+ if (mcp->mlist == NULL)
+ mc_get_mlist(mcp);
+
+ if (mcp->mlist && address_in_memlist(mcp->mlist, addr, 0)) {
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * mac-pa translation routines.
+ *
+ * Input: mc driver state, (LSB#, Bank#, DIMM address)
+ * Output: physical address
+ *
+ * Valid - return value: 0
+ * Invalid - return value: -1
+ */
+static int
+mcaddr_to_pa(mc_opl_t *mcp, mc_addr_t *maddr, uint64_t *pa)
+{
+ int i;
+ uint64_t pa_offset = 0;
+ int cs = (maddr->ma_dimm_addr >> CS_SHIFT) & 1;
+ int bank = maddr->ma_bank;
+ mc_addr_t maddr1;
+ int bank0, bank1;
+
+ MC_LOG("mcaddr /LSB%d/B%d/%x\n", maddr->ma_bd, bank,
+ maddr->ma_dimm_addr);
+
+ /* loc validity check */
+ ASSERT(maddr->ma_bd >= 0 && OPL_BOARD_MAX > maddr->ma_bd);
+ ASSERT(bank >= 0 && OPL_BANK_MAX > bank);
+
+ /* Do translation */
+ for (i = 0; i < PA_BITS_FOR_MAC; i++) {
+ int pa_bit = 0;
+ int mc_bit = mcp->mc_trans_table[cs][i];
+ if (mc_bit < MC_ADDRESS_BITS) {
+ pa_bit = (maddr->ma_dimm_addr >> mc_bit) & 1;
+ } else if (mc_bit == MP_NONE) {
+ pa_bit = 0;
+ } else if (mc_bit == MP_BANK_0) {
+ pa_bit = bank & 1;
+ } else if (mc_bit == MP_BANK_1) {
+ pa_bit = (bank >> 1) & 1;
+ } else if (mc_bit == MP_BANK_2) {
+ pa_bit = (bank >> 2) & 1;
+ }
+ pa_offset |= ((uint64_t)pa_bit) << i;
+ }
+ *pa = mcp->mc_start_address + pa_offset;
+ MC_LOG("pa = %lx\n", *pa);
+
+ if (pa_to_maddr(mcp, *pa, &maddr1) == -1) {
+ return (-1);
+ }
+
+
+ if (IS_MIRROR(mcp, maddr->ma_bank)) {
+ bank0 = maddr->ma_bank & ~(1);
+ bank1 = maddr1.ma_bank & ~(1);
+ } else {
+ bank0 = maddr->ma_bank;
+ bank1 = maddr1.ma_bank;
+ }
+ /*
+ * there is no need to check ma_bd because it is generated from
+ * mcp. They are the same.
+ */
+ if ((bank0 == bank1) &&
+ (maddr->ma_dimm_addr == maddr1.ma_dimm_addr)) {
+ return (0);
+ } else {
+ cmn_err(CE_WARN, "Translation error source /LSB%d/B%d/%x, "
+ "PA %lx, target /LSB%d/B%d/%x\n",
+ maddr->ma_bd, bank, maddr->ma_dimm_addr,
+ *pa, maddr1.ma_bd, maddr1.ma_bank,
+ maddr1.ma_dimm_addr);
+ return (-1);
+ }
+}
+
+/*
+ * PA to CS (used by pa_to_maddr).
+ */
+static int
+pa_to_cs(mc_opl_t *mcp, uint64_t pa_offset)
+{
+ int i;
+ int cs = 0;
+
+ for (i = 0; i < PA_BITS_FOR_MAC; i++) {
+ /* MAC address bit<29> is arranged on the same PA bit */
+ /* on both table. So we may use any table. */
+ if (mcp->mc_trans_table[0][i] == CS_SHIFT) {
+ cs = (pa_offset >> i) & 1;
+ break;
+ }
+ }
+ return (cs);
+}
+
+/*
+ * PA to DIMM (used by pa_to_maddr).
+ */
+/* ARGSUSED */
+static uint32_t
+pa_to_dimm(mc_opl_t *mcp, uint64_t pa_offset)
+{
+ int i;
+ int cs = pa_to_cs(mcp, pa_offset);
+ uint32_t dimm_addr = 0;
+
+ for (i = 0; i < PA_BITS_FOR_MAC; i++) {
+ int pa_bit_value = (pa_offset >> i) & 1;
+ int mc_bit = mcp->mc_trans_table[cs][i];
+ if (mc_bit < MC_ADDRESS_BITS) {
+ dimm_addr |= pa_bit_value << mc_bit;
+ }
+ }
+ return (dimm_addr);
+}
+
+/*
+ * PA to Bank (used by pa_to_maddr).
+ */
+static int
+pa_to_bank(mc_opl_t *mcp, uint64_t pa_offset)
+{
+ int i;
+ int cs = pa_to_cs(mcp, pa_offset);
+ int bankno = mcp->mc_trans_table[cs][INDEX_OF_BANK_SUPPLEMENT_BIT];
+
+
+ for (i = 0; i < PA_BITS_FOR_MAC; i++) {
+ int pa_bit_value = (pa_offset >> i) & 1;
+ int mc_bit = mcp->mc_trans_table[cs][i];
+ switch (mc_bit) {
+ case MP_BANK_0:
+ bankno |= pa_bit_value;
+ break;
+ case MP_BANK_1:
+ bankno |= pa_bit_value << 1;
+ break;
+ case MP_BANK_2:
+ bankno |= pa_bit_value << 2;
+ break;
+ }
+ }
+
+ return (bankno);
+}
+
+/*
+ * PA to MAC address translation
+ *
+ * Input: MAC driver state, physicall adress
+ * Output: LSB#, Bank id, mac address
+ *
+ * Valid - return value: 0
+ * Invalid - return value: -1
+ */
+
+int
+pa_to_maddr(mc_opl_t *mcp, uint64_t pa, mc_addr_t *maddr)
+{
+ uint64_t pa_offset;
+
+ /* PA validity check */
+ if (!pa_is_valid(mcp, pa))
+ return (-1);
+
+
+ /* Do translation */
+ pa_offset = pa - mcp->mc_start_address;
+
+ maddr->ma_bd = mcp->mc_board_num;
+ maddr->ma_bank = pa_to_bank(mcp, pa_offset);
+ maddr->ma_dimm_addr = pa_to_dimm(mcp, pa_offset);
+ MC_LOG("pa %lx -> mcaddr /LSB%d/B%d/%x\n",
+ pa_offset, maddr->ma_bd, maddr->ma_bank, maddr->ma_dimm_addr);
+ return (0);
+}
+
+static void
+mc_ereport_post(mc_aflt_t *mc_aflt)
+{
+ char buf[FM_MAX_CLASS];
+ char device_path[MAXPATHLEN];
+ nv_alloc_t *nva = NULL;
+ nvlist_t *ereport, *detector, *resource;
+ errorq_elem_t *eqep;
+ int nflts;
+ mc_flt_stat_t *flt_stat;
+ int i, n, blen;
+ char *p;
+ uint32_t values[2], synd[2], dslot[2];
+
+ if (panicstr) {
+ eqep = errorq_reserve(ereport_errorq);
+ if (eqep == NULL)
+ return;
+ ereport = errorq_elem_nvl(ereport_errorq, eqep);
+ nva = errorq_elem_nva(ereport_errorq, eqep);
+ } else {
+ ereport = fm_nvlist_create(nva);
+ }
+
+ /*
+ * Create the scheme "dev" FMRI.
+ */
+ detector = fm_nvlist_create(nva);
+ resource = fm_nvlist_create(nva);
+
+ nflts = mc_aflt->mflt_nflts;
+
+ ASSERT(nflts >= 1 && nflts <= 2);
+
+ flt_stat = mc_aflt->mflt_stat[0];
+ (void) ddi_pathname(mc_aflt->mflt_mcp->mc_dip, device_path);
+ (void) fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL,
+ device_path, NULL);
+
+ /*
+ * Encode all the common data into the ereport.
+ */
+ (void) snprintf(buf, FM_MAX_CLASS, "%s.%s-%s",
+ MC_OPL_ERROR_CLASS,
+ mc_aflt->mflt_is_ptrl ? MC_OPL_PTRL_SUBCLASS :
+ MC_OPL_MI_SUBCLASS,
+ mc_aflt->mflt_erpt_class);
+
+ MC_LOG("mc_ereport_post: ereport %s\n", buf);
+
+
+ fm_ereport_set(ereport, FM_EREPORT_VERSION, buf,
+ fm_ena_generate(mc_aflt->mflt_id, FM_ENA_FMT1),
+ detector, NULL);
+
+ /*
+ * Set payload.
+ */
+ fm_payload_set(ereport, MC_OPL_BOARD, DATA_TYPE_UINT32,
+ flt_stat->mf_flt_maddr.ma_bd, NULL);
+
+ fm_payload_set(ereport, MC_OPL_PA, DATA_TYPE_UINT64,
+ flt_stat->mf_flt_paddr, NULL);
+
+ if (flt_stat->mf_type == FLT_TYPE_PERMANENT_CE) {
+ fm_payload_set(ereport, MC_OPL_FLT_TYPE,
+ DATA_TYPE_UINT8, ECC_STICKY, NULL);
+ }
+
+ for (i = 0; i < nflts; i++)
+ values[i] = mc_aflt->mflt_stat[i]->mf_flt_maddr.ma_bank;
+
+ fm_payload_set(ereport, MC_OPL_BANK, DATA_TYPE_UINT32_ARRAY,
+ nflts, values, NULL);
+
+ for (i = 0; i < nflts; i++)
+ values[i] = mc_aflt->mflt_stat[i]->mf_cntl;
+
+ fm_payload_set(ereport, MC_OPL_STATUS, DATA_TYPE_UINT32_ARRAY,
+ nflts, values, NULL);
+
+ for (i = 0; i < nflts; i++)
+ values[i] = mc_aflt->mflt_stat[i]->mf_err_add;
+
+ fm_payload_set(ereport, MC_OPL_ERR_ADD, DATA_TYPE_UINT32_ARRAY,
+ nflts, values, NULL);
+
+ for (i = 0; i < nflts; i++)
+ values[i] = mc_aflt->mflt_stat[i]->mf_err_log;
+
+ fm_payload_set(ereport, MC_OPL_ERR_LOG, DATA_TYPE_UINT32_ARRAY,
+ nflts, values, NULL);
+
+ for (i = 0; i < nflts; i++) {
+ flt_stat = mc_aflt->mflt_stat[i];
+ if (flt_stat->mf_errlog_valid) {
+ synd[i] = flt_stat->mf_synd;
+ dslot[i] = flt_stat->mf_dimm_slot;
+ values[i] = flt_stat->mf_dram_place;
+ } else {
+ synd[i] = 0;
+ dslot[i] = 0;
+ values[i] = 0;
+ }
+ }
+
+ fm_payload_set(ereport, MC_OPL_ERR_SYND,
+ DATA_TYPE_UINT32_ARRAY, nflts, synd, NULL);
+
+ fm_payload_set(ereport, MC_OPL_ERR_DIMMSLOT,
+ DATA_TYPE_UINT32_ARRAY, nflts, dslot, NULL);
+
+ fm_payload_set(ereport, MC_OPL_ERR_DRAM,
+ DATA_TYPE_UINT32_ARRAY, nflts, values, NULL);
+
+ blen = MAXPATHLEN;
+ device_path[0] = 0;
+ p = &device_path[0];
+
+ for (i = 0; i < nflts; i++) {
+ int bank = flt_stat->mf_flt_maddr.ma_bank;
+ int psb = -1;
+
+ flt_stat = mc_aflt->mflt_stat[i];
+ psb = mc_opl_get_physical_board(
+ flt_stat->mf_flt_maddr.ma_bd);
+
+ if (psb != -1) {
+ snprintf(p, blen, "/CMU%d/B%d", psb, bank);
+ } else {
+ snprintf(p, blen, "/CMU/B%d", bank);
+ }
+
+ if (flt_stat->mf_errlog_valid) {
+ snprintf(p + strlen(p), blen, "/MEM%d%d%c",
+ bank/2, (bank & 0x1) * 2 + dslot[i] & 1,
+ (dslot[i] & 0x2) ? 'B' : 'A');
+ }
+
+ n = strlen(&device_path[0]);
+ blen = MAXPATHLEN - n;
+ p = &device_path[n];
+ if (i < (nflts - 1)) {
+ snprintf(p, blen, " ");
+ n += 1; blen -= 1; p += 1;
+ }
+ }
+
+ /*
+ * UNUM format /LSB#/B#/MEMxyZ
+ * where x is the MAC# = Bank#/2
+ * y is slot info = (Bank# & 0x1)*2 + {0, 1} 0 for DIMM-L, 1 for DIMM-H
+ * DIMM-L is 0 in bit 13, DIMM-H is 1 in bit 13.
+ * Z is A(CS0) or B(CS1) given by bit 14
+ */
+ (void) fm_fmri_mem_set(resource, FM_MEM_SCHEME_VERSION,
+ NULL, device_path, NULL, 0);
+
+ fm_payload_set(ereport, MC_OPL_RESOURCE, DATA_TYPE_NVLIST,
+ resource, NULL);
+
+ if (panicstr) {
+ errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
+ } else {
+ (void) fm_ereport_post(ereport, EVCH_TRYHARD);
+ fm_nvlist_destroy(ereport, FM_NVA_FREE);
+ fm_nvlist_destroy(detector, FM_NVA_FREE);
+ fm_nvlist_destroy(resource, FM_NVA_FREE);
+ }
+}
+
+static void
+mc_err_drain(mc_aflt_t *mc_aflt)
+{
+ int rv;
+ page_t *pp;
+ uint64_t errors;
+ uint64_t pa = (uint64_t)(-1);
+
+ MC_LOG("mc_err_drain: %s\n",
+ mc_aflt->mflt_erpt_class);
+ /*
+ * we come here only when we have:
+ * In mirror mode: CMPE, MUE, SUE
+ * In normal mode: UE, Permanent CE
+ */
+ rv = mcaddr_to_pa(mc_aflt->mflt_mcp,
+ &(mc_aflt->mflt_stat[0]->mf_flt_maddr), &pa);
+ if (rv == 0)
+ mc_aflt->mflt_stat[0]->mf_flt_paddr = pa;
+ else
+ mc_aflt->mflt_stat[0]->mf_flt_paddr = (uint64_t)-1;
+ if (rv == 0) {
+ MC_LOG("mc_err_drain:pa = %lx\n", pa);
+ pp = page_numtopp_nolock(pa >> PAGESHIFT);
+
+ if (pp) {
+ /*
+ * Don't keep retiring and make ereports
+ * on bad pages in PTRL case
+ */
+ MC_LOG("mc_err_drain:pp = %p\n", pp);
+ if (mc_aflt->mflt_is_ptrl) {
+ errors = 0;
+ if (page_retire_check(pa, &errors) == 0) {
+ MC_LOG("Page retired\n");
+ return;
+ }
+ if (errors & mc_aflt->mflt_pr) {
+ MC_LOG("errors %lx, mflt_pr %x\n",
+ errors, mc_aflt->mflt_pr);
+ return;
+ }
+ }
+ MC_LOG("offline page %p error %x\n", pp,
+ mc_aflt->mflt_pr);
+ (void) page_retire(pa, mc_aflt->mflt_pr);
+ }
+ }
+ mc_ereport_post(mc_aflt);
+}
+
+#define DIMM_SIZE 0x80000000
+
+#define INC_DIMM_ADDR(p, n) \
+ (p)->ma_dimm_addr += n; \
+ (p)->ma_dimm_addr &= (DIMM_SIZE - 1)
+
+/*
+ * The restart address is actually defined in unit of PA[37:6]
+ * the mac patrol will convert that to dimm offset. If the
+ * address is not in the bank, it will continue to search for
+ * the next PA that is within the bank.
+ *
+ * Also the mac patrol scans the dimms based on PA, not
+ * dimm offset.
+ */
+
+static int
+restart_patrol(mc_opl_t *mcp, int bank, mc_addr_info_t *maddr_info)
+{
+ page_t *pp;
+ uint32_t reg;
+ uint64_t pa;
+ int rv;
+ int loop_count = 0;
+
+ reg = ldphysio(MAC_PTRL_CNTL(mcp, bank));
+
+ /* already running, so we just return */
+ if (reg & MAC_CNTL_PTRL_START)
+ return (0);
+
+ if (maddr_info == NULL || (maddr_info->mi_valid == 0)) {
+ MAC_PTRL_START(mcp, bank);
+ return (0);
+ }
+
+
+ rv = mcaddr_to_pa(mcp, &maddr_info->mi_maddr, &pa);
+ if (rv != 0) {
+ MC_LOG("cannot convert mcaddr to pa. use auto restart\n");
+ MAC_PTRL_START(mcp, bank);
+ return (0);
+ }
+
+ /*
+ * pa is the last address scanned by the mac patrol
+ * we calculate the next restart address as follows:
+ * first we always advance it by 64 byte. Then begin the loop.
+ * loop {
+ * if it is not in phys_install, we advance to next 64 MB boundary
+ * if it is not backed by a page structure, done
+ * if the page is bad, advance to the next page boundary.
+ * else done
+ * if the new address exceeds the board, wrap around.
+ * } <stop if we come back to the same page>
+ */
+
+ if (pa < mcp->mc_start_address || pa >= (mcp->mc_start_address
+ + mcp->mc_size)) {
+ /* pa is not on this board, just retry */
+ cmn_err(CE_WARN, "restart_patrol: invalid address %lx "
+ "on board %d\n", pa, mcp->mc_board_num);
+ MAC_PTRL_START(mcp, bank);
+ return (0);
+ }
+
+ MC_LOG("restart_patrol: pa = %lx\n", pa);
+ if (maddr_info->mi_advance) {
+ uint64_t new_pa;
+
+ if (IS_MIRROR(mcp, bank))
+ new_pa = pa + 64 * 2;
+ else
+ new_pa = pa + 64;
+
+ if (!mc_valid_pa(mcp, new_pa)) {
+ /* Isolation unit size is 64 MB */
+#define MC_ISOLATION_BSIZE (64 * 1024 * 1024)
+ MC_LOG("Invalid PA\n");
+ pa = roundup(new_pa + 1, MC_ISOLATION_BSIZE);
+ } else {
+ pp = page_numtopp_nolock(new_pa >> PAGESHIFT);
+ if (pp != NULL) {
+ uint64_t errors = 0;
+ if (page_retire_check(new_pa, &errors) &&
+ (errors == 0)) {
+ MC_LOG("Page has no error\n");
+ MAC_PTRL_START(mcp, bank);
+ return (0);
+ }
+ /*
+ * skip bad pages
+ * and let the following loop to take care
+ */
+ pa = roundup(new_pa + 1, PAGESIZE);
+ MC_LOG("Skipping bad page to %lx\n", pa);
+ } else {
+ MC_LOG("Page has no page structure\n");
+ MAC_PTRL_START(mcp, bank);
+ return (0);
+ }
+ }
+ }
+
+ /*
+ * if we wrap around twice, we just give up and let
+ * mac patrol decide.
+ */
+ MC_LOG("pa is now %lx\n", pa);
+ while (loop_count <= 1) {
+ if (!mc_valid_pa(mcp, pa)) {
+ MC_LOG("pa is not valid. round up to 64 MB\n");
+ pa = roundup(pa + 1, 64 * 1024 * 1024);
+ } else {
+ pp = page_numtopp_nolock(pa >> PAGESHIFT);
+ if (pp != NULL) {
+ uint64_t errors = 0;
+ if (page_retire_check(pa, &errors) &&
+ (errors == 0)) {
+ MC_LOG("Page has no error\n");
+ break;
+ }
+ /* skip bad pages */
+ pa = roundup(pa + 1, PAGESIZE);
+ MC_LOG("Skipping bad page to %lx\n", pa);
+ } else {
+ MC_LOG("Page has no page structure\n");
+ break;
+ }
+ }
+ if (pa >= (mcp->mc_start_address + mcp->mc_size)) {
+ MC_LOG("Wrap around\n");
+ pa = mcp->mc_start_address;
+ loop_count++;
+ }
+ }
+
+ /* retstart MAC patrol: PA[37:6] */
+ MC_LOG("restart at pa = %lx\n", pa);
+ ST_MAC_REG(MAC_RESTART_ADD(mcp, bank), MAC_RESTART_PA(pa));
+ MAC_PTRL_START_ADD(mcp, bank);
+
+ return (0);
+}
+
+/*
+ * Rewriting is used for two purposes.
+ * - to correct the error in memory.
+ * - to determine whether the error is permanent or intermittent.
+ * It's done by writing the address in MAC_BANKm_REWRITE_ADD
+ * and issuing REW_REQ command in MAC_BANKm_PTRL_CNRL. After that,
+ * REW_END (and REW_CE/REW_UE if some error detected) is set when
+ * rewrite operation is done. See 4.7.3 and 4.7.11 in Columbus2 PRM.
+ *
+ * Note that rewrite operation doesn't change RAW_UE to Marked UE.
+ * Therefore, we use it only CE case.
+ */
+static uint32_t
+do_rewrite(mc_opl_t *mcp, int bank, uint32_t dimm_addr)
+{
+ uint32_t cntl;
+ int count = 0;
+
+ /* first wait to make sure PTRL_STATUS is 0 */
+ while (count++ < MAX_MC_LOOP_COUNT) {
+ cntl = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bank));
+ if (!(cntl & MAC_CNTL_PTRL_STATUS))
+ break;
+ delay(drv_usectohz(10 * 1000)); /* 10 m.s. */
+ }
+ if (count >= MAX_MC_LOOP_COUNT)
+ goto bad;
+
+ count = 0;
+
+ ST_MAC_REG(MAC_REWRITE_ADD(mcp, bank), dimm_addr);
+ MAC_REW_REQ(mcp, bank);
+
+ do {
+ cntl = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bank));
+ if (count++ >= MAX_MC_LOOP_COUNT) {
+ goto bad;
+ } else
+ delay(drv_usectohz(10 * 1000)); /* 10 m.s. */
+ /*
+ * If there are other MEMORY or PCI activities, this
+ * will be BUSY, else it should be set immediately
+ */
+ } while (!(cntl & MAC_CNTL_REW_END));
+
+ MAC_CLEAR_ERRS(mcp, bank, MAC_CNTL_REW_ERRS);
+ return (cntl);
+bad:
+ /* This is bad. Just reset the circuit */
+ cmn_err(CE_WARN, "mc-opl rewrite timeout on /LSB%d/B%d\n",
+ mcp->mc_board_num, bank);
+ cntl = MAC_CNTL_REW_END;
+ MAC_CMD(mcp, bank, MAC_CNTL_PTRL_RESET);
+ MAC_CLEAR_ERRS(mcp, bank, MAC_CNTL_REW_ERRS);
+ return (cntl);
+}
+
+void
+mc_process_scf_log(mc_opl_t *mcp)
+{
+ int count = 0;
+ scf_log_t *p;
+ int bank;
+
+ while ((p = mcp->mc_scf_log) != NULL) {
+ bank = p->sl_bank;
+ while ((LD_MAC_REG(MAC_STATIC_ERR_ADD(mcp, p->sl_bank))
+ & MAC_STATIC_ERR_VLD)) {
+ if (count++ >= (MAX_MC_LOOP_COUNT)) {
+ break;
+ }
+ delay(drv_usectohz(10 * 1000)); /* 10 m.s. */
+ }
+
+ if (count < MAX_MC_LOOP_COUNT) {
+ ST_MAC_REG(MAC_STATIC_ERR_LOG(mcp, p->sl_bank),
+ p->sl_err_log);
+
+ ST_MAC_REG(MAC_STATIC_ERR_ADD(mcp, p->sl_bank),
+ p->sl_err_add|MAC_STATIC_ERR_VLD);
+ mcp->mc_scf_retry[bank] = 0;
+ } else {
+ /* if we try too many times, just drop the req */
+ if (mcp->mc_scf_retry[bank]++ <= MAX_SCF_RETRY) {
+ return;
+ } else {
+ cmn_err(CE_WARN, "SCF is not responding. "
+ "Dropping the SCF LOG\n");
+ }
+ }
+ mcp->mc_scf_log = p->sl_next;
+ mcp->mc_scf_total--;
+ ASSERT(mcp->mc_scf_total >= 0);
+ kmem_free(p, sizeof (scf_log_t));
+ }
+}
+
+void
+mc_queue_scf_log(mc_opl_t *mcp, mc_flt_stat_t *flt_stat, int bank)
+{
+ scf_log_t *p;
+
+ if (mcp->mc_scf_total >= MAX_SCF_LOGS) {
+ cmn_err(CE_WARN,
+ "Max# SCF logs excceded on /LSB%d/B%d\n",
+ mcp->mc_board_num, bank);
+ return;
+ }
+ p = kmem_zalloc(sizeof (scf_log_t), KM_SLEEP);
+ p->sl_next = 0;
+ p->sl_err_add = flt_stat->mf_err_add;
+ p->sl_err_log = flt_stat->mf_err_log;
+ p->sl_bank = bank;
+
+ if (mcp->mc_scf_log == NULL) {
+ /*
+ * we rely on mc_scf_log to detect NULL queue.
+ * mc_scf_log_tail is irrelevant is such case.
+ */
+ mcp->mc_scf_log_tail = mcp->mc_scf_log = p;
+ } else {
+ mcp->mc_scf_log_tail->sl_next = p;
+ mcp->mc_scf_log_tail = p;
+ }
+ mcp->mc_scf_total++;
+}
+
+/*
+ * This routine determines what kind of CE happens, intermittent
+ * or permanent as follows. (See 4.7.3 in Columbus2 PRM.)
+ * - Do rewrite by issuing REW_REQ command to MAC_PTRL_CNTL register.
+ * - If CE is still detected on the same address even after doing
+ * rewrite operation twice, it is determined as permanent error.
+ * - If error is not detected anymore, it is determined as intermittent
+ * error.
+ * - If UE is detected due to rewrite operation, it should be treated
+ * as UE.
+ */
+
+/* ARGSUSED */
+static void
+mc_scrub_ce(mc_opl_t *mcp, int bank, mc_flt_stat_t *flt_stat, int ptrl_error)
+{
+ uint32_t cntl;
+ int i;
+
+ flt_stat->mf_type = FLT_TYPE_PERMANENT_CE;
+ /*
+ * rewrite request 1st time reads and correct error data
+ * and write to DIMM. 2nd rewrite request must be issued
+ * after REW_CE/UE/END is 0. When the 2nd request is completed,
+ * if REW_CE = 1, then it is permanent CE.
+ */
+ for (i = 0; i < 2; i++) {
+ cntl = do_rewrite(mcp, bank, flt_stat->mf_err_add);
+ /*
+ * If the error becomes UE or CMPE
+ * we return to the caller immediately.
+ */
+ if (cntl & MAC_CNTL_REW_UE) {
+ if (ptrl_error)
+ flt_stat->mf_cntl |= MAC_CNTL_PTRL_UE;
+ else
+ flt_stat->mf_cntl |= MAC_CNTL_MI_UE;
+ flt_stat->mf_type = FLT_TYPE_UE;
+ return;
+ }
+ if (cntl & MAC_CNTL_REW_CMPE) {
+ if (ptrl_error)
+ flt_stat->mf_cntl |= MAC_CNTL_PTRL_CMPE;
+ else
+ flt_stat->mf_cntl |= MAC_CNTL_MI_CMPE;
+ flt_stat->mf_type = FLT_TYPE_CMPE;
+ return;
+ }
+ }
+ if (!(cntl & MAC_CNTL_REW_CE)) {
+ flt_stat->mf_type = FLT_TYPE_INTERMITTENT_CE;
+ }
+
+ if (flt_stat->mf_type == FLT_TYPE_PERMANENT_CE) {
+ /* report PERMANENT_CE to SP via SCF */
+ if (!(flt_stat->mf_err_log & MAC_ERR_LOG_INVALID)) {
+ mc_queue_scf_log(mcp, flt_stat, bank);
+ }
+ }
+}
+
+#define IS_CMPE(cntl, f) ((cntl) & ((f) ? MAC_CNTL_PTRL_CMPE :\
+ MAC_CNTL_MI_CMPE))
+#define IS_UE(cntl, f) ((cntl) & ((f) ? MAC_CNTL_PTRL_UE : MAC_CNTL_MI_UE))
+#define IS_CE(cntl, f) ((cntl) & ((f) ? MAC_CNTL_PTRL_CE : MAC_CNTL_MI_CE))
+#define IS_OK(cntl, f) (!((cntl) & ((f) ? MAC_CNTL_PTRL_ERRS : \
+ MAC_CNTL_MI_ERRS)))
+
+
+static int
+IS_CE_ONLY(uint32_t cntl, int ptrl_error)
+{
+ if (ptrl_error) {
+ return ((cntl & MAC_CNTL_PTRL_ERRS) == MAC_CNTL_PTRL_CE);
+ } else {
+ return ((cntl & MAC_CNTL_MI_ERRS) == MAC_CNTL_MI_CE);
+ }
+}
+
+void
+mc_write_cntl(mc_opl_t *mcp, int bank, uint32_t value)
+{
+ value |= mcp->mc_bank[bank].mcb_ptrl_cntl;
+ ST_MAC_REG(MAC_PTRL_CNTL(mcp, bank), value);
+}
+
+static int
+mc_stop(mc_opl_t *mcp, int bank)
+{
+ uint32_t reg;
+ int count = 0;
+
+ reg = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bank));
+
+ if (reg & MAC_CNTL_PTRL_START)
+ MAC_PTRL_STOP(mcp, bank);
+
+ while (count++ <= MAX_MC_LOOP_COUNT) {
+ reg = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bank));
+ if ((reg & MAC_CNTL_PTRL_STATUS) == 0)
+ return (0);
+ delay(drv_usectohz(10 * 1000)); /* 10 m.s. */
+ }
+ return (-1);
+}
+
+static void
+mc_read_ptrl_reg(mc_opl_t *mcp, int bank, mc_flt_stat_t *flt_stat)
+{
+ flt_stat->mf_cntl = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bank)) &
+ MAC_CNTL_PTRL_ERRS;
+ flt_stat->mf_err_add = LD_MAC_REG(MAC_PTRL_ERR_ADD(mcp, bank));
+ flt_stat->mf_err_log = LD_MAC_REG(MAC_PTRL_ERR_LOG(mcp, bank));
+ flt_stat->mf_flt_maddr.ma_bd = mcp->mc_board_num;
+ flt_stat->mf_flt_maddr.ma_bank = bank;
+ flt_stat->mf_flt_maddr.ma_dimm_addr = flt_stat->mf_err_add;
+}
+
+static void
+mc_read_mi_reg(mc_opl_t *mcp, int bank, mc_flt_stat_t *flt_stat)
+{
+ uint32_t status, old_status;
+
+ status = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bank)) &
+ MAC_CNTL_MI_ERRS;
+ old_status = 0;
+
+ /* we keep reading until the status is stable */
+ while (old_status != status) {
+ old_status = status;
+ flt_stat->mf_err_add =
+ LD_MAC_REG(MAC_MI_ERR_ADD(mcp, bank));
+ flt_stat->mf_err_log =
+ LD_MAC_REG(MAC_MI_ERR_LOG(mcp, bank));
+ status = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bank)) &
+ MAC_CNTL_MI_ERRS;
+ if (status == old_status) {
+ break;
+ }
+ }
+
+ flt_stat->mf_cntl = status;
+ flt_stat->mf_flt_maddr.ma_bd = mcp->mc_board_num;
+ flt_stat->mf_flt_maddr.ma_bank = bank;
+ flt_stat->mf_flt_maddr.ma_dimm_addr = flt_stat->mf_err_add;
+}
+
+
+/*
+ * Error philosophy for mirror mode:
+ *
+ * PTRL (The error address for both banks are same, since ptrl stops if it
+ * detects error.)
+ * - Compaire error Report CMPE.
+ *
+ * - UE-UE Report MUE. No rewrite.
+ *
+ * - UE-* UE-(CE/OK). Rewrite to scrub UE. Report SUE.
+ *
+ * - CE-* CE-(CE/OK). Scrub to determine if CE is permanent.
+ * If CE is permanent, inform SCF. Once for each
+ * Dimm. If CE becomes UE or CMPE, go back to above.
+ *
+ *
+ * MI (The error addresses for each bank are the same or different.)
+ * - Compair error If addresses are the same. Just CMPE.
+ * If addresses are different (this could happen
+ * as a result of scrubbing. Report each seperately.
+ * Only report error info on each side.
+ *
+ * - UE-UE Addresses are the same. Report MUE.
+ * Addresses are different. Report SUE on each bank.
+ * Rewrite to clear UE.
+ *
+ * - UE-* UE-(CE/OK)
+ * Rewrite to clear UE. Report SUE for the bank.
+ *
+ * - CE-* CE-(CE/OK). Scrub to determine if CE is permanent.
+ * If CE becomes UE or CMPE, go back to above.
+ *
+ */
+
+static int
+mc_process_error_mir(mc_opl_t *mcp, mc_aflt_t *mc_aflt, mc_flt_stat_t *flt_stat)
+{
+ int ptrl_error = mc_aflt->mflt_is_ptrl;
+ int i;
+ int rv = 0;
+
+ MC_LOG("process mirror errors cntl[0] = %x, cntl[1] = %x\n",
+ flt_stat[0].mf_cntl, flt_stat[1].mf_cntl);
+
+ if (ptrl_error) {
+ if (((flt_stat[0].mf_cntl | flt_stat[1].mf_cntl)
+ & MAC_CNTL_PTRL_ERRS) == 0)
+ return (0);
+ } else {
+ if (((flt_stat[0].mf_cntl | flt_stat[1].mf_cntl)
+ & MAC_CNTL_MI_ERRS) == 0)
+ return (0);
+ }
+
+ /*
+ * First we take care of the case of CE
+ * because they can become UE or CMPE
+ */
+ for (i = 0; i < 2; i++) {
+ if (IS_CE_ONLY(flt_stat[i].mf_cntl, ptrl_error)) {
+ MC_LOG("CE detected on bank %d\n",
+ flt_stat[i].mf_flt_maddr.ma_bank);
+ mc_scrub_ce(mcp, flt_stat[i].mf_flt_maddr.ma_bank,
+ &flt_stat[i], ptrl_error);
+ rv = 1;
+ }
+ }
+
+ /* The above scrubbing can turn CE into UE or CMPE */
+
+ /*
+ * Now we distinguish two cases: same address or not
+ * the same address. It might seem more intuitive to
+ * distinguish PTRL v.s. MI error but it is more
+ * complicated that way.
+ */
+
+ if (flt_stat[0].mf_err_add == flt_stat[1].mf_err_add) {
+
+ if (IS_CMPE(flt_stat[0].mf_cntl, ptrl_error) ||
+ IS_CMPE(flt_stat[1].mf_cntl, ptrl_error)) {
+ flt_stat[0].mf_type = FLT_TYPE_CMPE;
+ flt_stat[1].mf_type = FLT_TYPE_CMPE;
+ mc_aflt->mflt_erpt_class = MC_OPL_CMPE;
+ MC_LOG("cmpe error detected\n");
+ mc_aflt->mflt_nflts = 2;
+ mc_aflt->mflt_stat[0] = &flt_stat[0];
+ mc_aflt->mflt_stat[1] = &flt_stat[1];
+ mc_aflt->mflt_pr = PR_UE;
+ mc_err_drain(mc_aflt);
+ return (1);
+ }
+
+ if (IS_UE(flt_stat[0].mf_cntl, ptrl_error) &&
+ IS_UE(flt_stat[1].mf_cntl, ptrl_error)) {
+ /* Both side are UE's */
+
+ MAC_SET_ERRLOG_INFO(&flt_stat[0]);
+ MAC_SET_ERRLOG_INFO(&flt_stat[1]);
+ MC_LOG("MUE detected\n");
+ flt_stat[0].mf_type = flt_stat[1].mf_type =
+ FLT_TYPE_MUE;
+ mc_aflt->mflt_erpt_class = MC_OPL_MUE;
+ mc_aflt->mflt_nflts = 2;
+ mc_aflt->mflt_stat[0] = &flt_stat[0];
+ mc_aflt->mflt_stat[1] = &flt_stat[1];
+ mc_aflt->mflt_pr = PR_UE;
+ mc_err_drain(mc_aflt);
+ return (1);
+ }
+
+ /* Now the only case is UE/CE, UE/OK, or don't care */
+ for (i = 0; i < 2; i++) {
+ if (IS_UE(flt_stat[i].mf_cntl, ptrl_error)) {
+ /* If we have CE, we would have done REW */
+ if (IS_OK(flt_stat[i^1].mf_cntl, ptrl_error)) {
+ (void) do_rewrite(mcp,
+ flt_stat[i].mf_flt_maddr.ma_bank,
+ flt_stat[i].mf_flt_maddr.ma_dimm_addr);
+ }
+ flt_stat[i].mf_type = FLT_TYPE_UE;
+ MAC_SET_ERRLOG_INFO(&flt_stat[i]);
+ mc_aflt->mflt_erpt_class = MC_OPL_SUE;
+ mc_aflt->mflt_stat[0] = &flt_stat[i];
+ mc_aflt->mflt_nflts = 1;
+ mc_aflt->mflt_pr = PR_MCE;
+ mc_err_drain(mc_aflt);
+ /* Once we hit a UE/CE or UE/OK case, done */
+ return (1);
+ }
+ }
+
+ } else {
+ /*
+ * addresses are different. That means errors
+ * on the 2 banks are not related at all.
+ */
+ for (i = 0; i < 2; i++) {
+ if (IS_CMPE(flt_stat[i].mf_cntl, ptrl_error)) {
+ flt_stat[i].mf_type = FLT_TYPE_CMPE;
+ mc_aflt->mflt_erpt_class = MC_OPL_CMPE;
+ MC_LOG("cmpe error detected\n");
+ mc_aflt->mflt_nflts = 1;
+ mc_aflt->mflt_stat[0] = &flt_stat[i];
+ mc_aflt->mflt_pr = PR_UE;
+ mc_err_drain(mc_aflt);
+ /* no more report on this bank */
+ flt_stat[i].mf_cntl = 0;
+ rv = 1;
+ }
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (IS_UE(flt_stat[i].mf_cntl, ptrl_error)) {
+ (void) do_rewrite(mcp,
+ flt_stat[i].mf_flt_maddr.ma_bank,
+ flt_stat[i].mf_flt_maddr.ma_dimm_addr);
+ flt_stat[i].mf_type = FLT_TYPE_UE;
+ MAC_SET_ERRLOG_INFO(&flt_stat[i]);
+ mc_aflt->mflt_erpt_class = MC_OPL_SUE;
+ mc_aflt->mflt_stat[0] = &flt_stat[i];
+ mc_aflt->mflt_nflts = 1;
+ mc_aflt->mflt_pr = PR_MCE;
+ mc_err_drain(mc_aflt);
+ rv = 1;
+ }
+ }
+ }
+ return (rv);
+}
+
+static void
+mc_error_handler_mir(mc_opl_t *mcp, int bank, mc_addr_info_t *maddr)
+{
+ mc_aflt_t mc_aflt;
+ mc_flt_stat_t flt_stat[2], mi_flt_stat[2];
+ int other_bank;
+
+ if (mc_stop(mcp, bank)) {
+ cmn_err(CE_WARN, "Cannot stop Memory Patrol at /LSB%d/B%d\n",
+ mcp->mc_board_num, bank);
+ return;
+ }
+ bzero(&mc_aflt, sizeof (mc_aflt_t));
+ bzero(&flt_stat, 2 * sizeof (mc_flt_stat_t));
+ bzero(&mi_flt_stat, 2 * sizeof (mc_flt_stat_t));
+
+ mc_aflt.mflt_mcp = mcp;
+ mc_aflt.mflt_id = gethrtime();
+
+ /* Now read all the registers into flt_stat */
+
+ MC_LOG("Reading registers of bank %d\n", bank);
+ /* patrol registers */
+ mc_read_ptrl_reg(mcp, bank, &flt_stat[0]);
+
+ ASSERT(maddr);
+ maddr->mi_maddr = flt_stat[0].mf_flt_maddr;
+
+ MC_LOG("ptrl registers cntl %x add %x log %x\n",
+ flt_stat[0].mf_cntl,
+ flt_stat[0].mf_err_add,
+ flt_stat[0].mf_err_log);
+
+ /* MI registers */
+ mc_read_mi_reg(mcp, bank, &mi_flt_stat[0]);
+
+ MC_LOG("MI registers cntl %x add %x log %x\n",
+ mi_flt_stat[0].mf_cntl,
+ mi_flt_stat[0].mf_err_add,
+ mi_flt_stat[0].mf_err_log);
+
+ other_bank = bank^1;
+
+ MC_LOG("Reading registers of bank %d\n", other_bank);
+
+ ASSERT(mcp->mc_bank[other_bank].mcb_status & BANK_INSTALLED);
+
+ mc_read_ptrl_reg(mcp, other_bank, &flt_stat[1]);
+ MC_LOG("ptrl registers cntl %x add %x log %x\n",
+ flt_stat[1].mf_cntl,
+ flt_stat[1].mf_err_add,
+ flt_stat[1].mf_err_log);
+
+ /* MI registers */
+ mc_read_mi_reg(mcp, other_bank, &mi_flt_stat[1]);
+ MC_LOG("MI registers cntl %x add %x log %x\n",
+ mi_flt_stat[1].mf_cntl,
+ mi_flt_stat[1].mf_err_add,
+ mi_flt_stat[1].mf_err_log);
+
+ /* clear errors once we read all the registers */
+ MAC_CLEAR_ERRS(mcp, other_bank,
+ (MAC_CNTL_PTRL_ERRS|MAC_CNTL_MI_ERRS));
+
+ MAC_CLEAR_ERRS(mcp, bank, (MAC_CNTL_PTRL_ERRS|MAC_CNTL_MI_ERRS));
+
+ /* Process PTRL errors first */
+
+ /* if not error mode, cntl1 is 0 */
+ if ((flt_stat[0].mf_err_add & MAC_ERR_ADD_INVALID) ||
+ (flt_stat[0].mf_err_log & MAC_ERR_LOG_INVALID))
+ flt_stat[0].mf_cntl = 0;
+
+ if ((flt_stat[1].mf_err_add & MAC_ERR_ADD_INVALID) ||
+ (flt_stat[1].mf_err_log & MAC_ERR_LOG_INVALID))
+ flt_stat[1].mf_cntl = 0;
+
+ mc_aflt.mflt_is_ptrl = 1;
+ maddr->mi_valid = mc_process_error_mir(mcp, &mc_aflt, &flt_stat[0]);
+
+ mc_aflt.mflt_is_ptrl = 0;
+ mc_process_error_mir(mcp, &mc_aflt, &mi_flt_stat[0]);
+}
+
+static int
+mc_process_error(mc_opl_t *mcp, int bank, mc_aflt_t *mc_aflt,
+ mc_flt_stat_t *flt_stat)
+{
+ int ptrl_error = mc_aflt->mflt_is_ptrl;
+ int rv = 0;
+
+ mc_aflt->mflt_erpt_class = NULL;
+ if (IS_UE(flt_stat->mf_cntl, ptrl_error)) {
+ MC_LOG("UE deteceted\n");
+ flt_stat->mf_type = FLT_TYPE_UE;
+ mc_aflt->mflt_erpt_class = MC_OPL_UE;
+ mc_aflt->mflt_pr = PR_UE;
+ MAC_SET_ERRLOG_INFO(flt_stat);
+ rv = 1;
+ } else if (IS_CE(flt_stat->mf_cntl, ptrl_error)) {
+ MC_LOG("CE deteceted\n");
+ MAC_SET_ERRLOG_INFO(flt_stat);
+
+ /* Error type can change after scrubing */
+ mc_scrub_ce(mcp, bank, flt_stat, ptrl_error);
+
+ if (flt_stat->mf_type == FLT_TYPE_PERMANENT_CE) {
+ mc_aflt->mflt_erpt_class = MC_OPL_CE;
+ mc_aflt->mflt_pr = PR_MCE;
+ } else if (flt_stat->mf_type == FLT_TYPE_UE) {
+ mc_aflt->mflt_erpt_class = MC_OPL_UE;
+ mc_aflt->mflt_pr = PR_UE;
+ }
+ rv = 1;
+ }
+ MC_LOG("mc_process_error: fault type %x erpt %s\n",
+ flt_stat->mf_type,
+ mc_aflt->mflt_erpt_class);
+ if (mc_aflt->mflt_erpt_class) {
+ mc_aflt->mflt_stat[0] = flt_stat;
+ mc_aflt->mflt_nflts = 1;
+ mc_err_drain(mc_aflt);
+ }
+ return (rv);
+}
+
+static void
+mc_error_handler(mc_opl_t *mcp, int bank, mc_addr_info_t *maddr)
+{
+ mc_aflt_t mc_aflt;
+ mc_flt_stat_t flt_stat, mi_flt_stat;
+
+ if (mc_stop(mcp, bank)) {
+ cmn_err(CE_WARN, "Cannot stop Memory Patrol at /LSB%d/B%d\n",
+ mcp->mc_board_num, bank);
+ return;
+ }
+
+ bzero(&mc_aflt, sizeof (mc_aflt_t));
+ bzero(&flt_stat, sizeof (mc_flt_stat_t));
+ bzero(&mi_flt_stat, sizeof (mc_flt_stat_t));
+
+ mc_aflt.mflt_mcp = mcp;
+ mc_aflt.mflt_id = gethrtime();
+
+ /* patrol registers */
+ mc_read_ptrl_reg(mcp, bank, &flt_stat);
+
+ ASSERT(maddr);
+ maddr->mi_maddr = flt_stat.mf_flt_maddr;
+
+ MC_LOG("ptrl registers cntl %x add %x log %x\n",
+ flt_stat.mf_cntl,
+ flt_stat.mf_err_add,
+ flt_stat.mf_err_log);
+
+ /* MI registers */
+ mc_read_mi_reg(mcp, bank, &mi_flt_stat);
+
+ MC_LOG("MI registers cntl %x add %x log %x\n",
+ mi_flt_stat.mf_cntl,
+ mi_flt_stat.mf_err_add,
+ mi_flt_stat.mf_err_log);
+
+ /* clear errors once we read all the registers */
+ MAC_CLEAR_ERRS(mcp, bank, (MAC_CNTL_PTRL_ERRS|MAC_CNTL_MI_ERRS));
+
+ mc_aflt.mflt_is_ptrl = 1;
+ if ((flt_stat.mf_cntl & MAC_CNTL_PTRL_ERRS) &&
+ ((flt_stat.mf_err_add & MAC_ERR_ADD_INVALID) == 0) &&
+ ((flt_stat.mf_err_log & MAC_ERR_LOG_INVALID) == 0)) {
+ maddr->mi_valid = mc_process_error(mcp, bank,
+ &mc_aflt, &flt_stat);
+ }
+ mc_aflt.mflt_is_ptrl = 0;
+ if ((mi_flt_stat.mf_cntl & MAC_CNTL_MI_ERRS) &&
+ ((mi_flt_stat.mf_err_add & MAC_ERR_ADD_INVALID) == 0) &&
+ ((mi_flt_stat.mf_err_log & MAC_ERR_LOG_INVALID) == 0)) {
+ mc_process_error(mcp, bank, &mc_aflt, &mi_flt_stat);
+ }
+}
+
+/*
+ * memory patrol error handling algorithm:
+ * timeout() is used to do periodic polling
+ * This is the flow chart.
+ * timeout ->
+ * mc_check_errors()
+ * if memory bank is installed, read the status register
+ * if any error bit is set,
+ * -> mc_error_handler()
+ * -> mc_stop()
+ * -> read all error regsiters
+ * -> mc_process_error()
+ * determine error type
+ * rewrite to clear error or scrub to determine CE type
+ * inform SCF on permanent CE
+ * -> mc_err_drain
+ * page offline processing
+ * -> mc_ereport_post()
+ */
+
+static void
+mc_check_errors_func(mc_opl_t *mcp)
+{
+ mc_addr_info_t maddr_info;
+ int i, error_count = 0;
+ uint32_t stat, cntl;
+
+ /*
+ * scan errors.
+ */
+ for (i = 0; i < BANKNUM_PER_SB; i++) {
+ if (mcp->mc_bank[i].mcb_status & BANK_INSTALLED) {
+ stat = ldphysio(MAC_PTRL_STAT(mcp, i));
+ cntl = ldphysio(MAC_PTRL_CNTL(mcp, i));
+ if (cntl & MAC_CNTL_PTRL_ADD_MAX) {
+ mcp->mc_period++;
+ MC_LOG("mc period %ld on "
+ "/LSB%d/B%d\n", mcp->mc_period,
+ mcp->mc_board_num, i);
+ MAC_CLEAR_MAX(mcp, i);
+ }
+ if (mc_debug_show_all) {
+ MC_LOG("/LSB%d/B%d stat %x cntl %x\n",
+ mcp->mc_board_num, i,
+ stat, cntl);
+ }
+ if (stat & (MAC_STAT_PTRL_ERRS|MAC_STAT_MI_ERRS)) {
+ maddr_info.mi_valid = 0;
+ maddr_info.mi_advance = 1;
+ if (IS_MIRROR(mcp, i))
+ mc_error_handler_mir(mcp, i,
+ &maddr_info);
+ else
+ mc_error_handler(mcp, i, &maddr_info);
+
+ error_count++;
+ restart_patrol(mcp, i, &maddr_info);
+ } else {
+ restart_patrol(mcp, i, NULL);
+ }
+ }
+ }
+ mc_process_scf_log(mcp);
+ if (error_count > 0)
+ mcp->mc_last_error += error_count;
+ else
+ mcp->mc_last_error = 0;
+}
+
+/* this is just a wrapper for the above func */
+
+static void
+mc_check_errors(void *arg)
+{
+ mc_opl_t *mcp = (mc_opl_t *)arg;
+ clock_t interval;
+
+ /*
+ * scan errors.
+ */
+ mutex_enter(&mcp->mc_lock);
+ mcp->mc_tid = 0;
+ if ((mcp->mc_status & MC_POLL_RUNNING) &&
+ !(mcp->mc_status & MC_SOFT_SUSPENDED)) {
+ mc_check_errors_func(mcp);
+
+ if (mcp->mc_last_error > 0) {
+ interval = (mcp->mc_interval_hz) >> mcp->mc_last_error;
+ if (interval < 1)
+ interval = 1;
+ } else
+ interval = mcp->mc_interval_hz;
+
+ mcp->mc_tid = timeout(mc_check_errors, mcp,
+ interval);
+ }
+ mutex_exit(&mcp->mc_lock);
+}
+
+static void
+get_ptrl_start_address(mc_opl_t *mcp, int bank, mc_addr_t *maddr)
+{
+ maddr->ma_bd = mcp->mc_board_num;
+ maddr->ma_bank = bank;
+ maddr->ma_dimm_addr = 0;
+}
+
+typedef struct mc_mem_range {
+ uint64_t addr;
+ uint64_t size;
+} mc_mem_range_t;
+
+static int
+get_base_address(mc_opl_t *mcp)
+{
+ mc_mem_range_t *mem_range;
+ int len;
+
+ if (ddi_getlongprop(DDI_DEV_T_ANY, mcp->mc_dip, DDI_PROP_DONTPASS,
+ "sb-mem-ranges", (caddr_t)&mem_range, &len) != DDI_SUCCESS) {
+ return (DDI_FAILURE);
+ }
+
+ mcp->mc_start_address = mem_range->addr;
+ mcp->mc_size = mem_range->size;
+
+ kmem_free(mem_range, len);
+ return (DDI_SUCCESS);
+}
+
+struct mc_addr_spec {
+ uint32_t bank;
+ uint32_t phys_hi;
+ uint32_t phys_lo;
+};
+
+#define REGS_PA(m, i) ((((uint64_t)m[i].phys_hi)<<32) | m[i].phys_lo)
+
+static char *mc_tbl_name[] = {
+ "cs0-mc-pa-trans-table",
+ "cs1-mc-pa-trans-table"
+};
+
+static int
+mc_valid_pa(mc_opl_t *mcp, uint64_t pa)
+{
+ struct memlist *ml;
+
+ if (mcp->mlist == NULL)
+ mc_get_mlist(mcp);
+
+ for (ml = mcp->mlist; ml; ml = ml->next) {
+ if (ml->address <= pa && pa < (ml->address + ml->size))
+ return (1);
+ }
+ return (0);
+}
+
+static void
+mc_memlist_delete(struct memlist *mlist)
+{
+ struct memlist *ml;
+
+ for (ml = mlist; ml; ml = mlist) {
+ mlist = ml->next;
+ kmem_free(ml, sizeof (struct memlist));
+ }
+}
+
+static struct memlist *
+mc_memlist_dup(struct memlist *mlist)
+{
+ struct memlist *hl = NULL, *tl, **mlp;
+
+ if (mlist == NULL)
+ return (NULL);
+
+ mlp = &hl;
+ tl = *mlp;
+ for (; mlist; mlist = mlist->next) {
+ *mlp = kmem_alloc(sizeof (struct memlist), KM_SLEEP);
+ (*mlp)->address = mlist->address;
+ (*mlp)->size = mlist->size;
+ (*mlp)->prev = tl;
+ tl = *mlp;
+ mlp = &((*mlp)->next);
+ }
+ *mlp = NULL;
+
+ return (hl);
+}
+
+
+static struct memlist *
+mc_memlist_del_span(struct memlist *mlist, uint64_t base, uint64_t len)
+{
+ uint64_t end;
+ struct memlist *ml, *tl, *nlp;
+
+ if (mlist == NULL)
+ return (NULL);
+
+ end = base + len;
+ if ((end <= mlist->address) || (base == end))
+ return (mlist);
+
+ for (tl = ml = mlist; ml; tl = ml, ml = nlp) {
+ uint64_t mend;
+
+ nlp = ml->next;
+
+ if (end <= ml->address)
+ break;
+
+ mend = ml->address + ml->size;
+ if (base < mend) {
+ if (base <= ml->address) {
+ ml->address = end;
+ if (end >= mend)
+ ml->size = 0ull;
+ else
+ ml->size = mend - ml->address;
+ } else {
+ ml->size = base - ml->address;
+ if (end < mend) {
+ struct memlist *nl;
+ /*
+ * splitting an memlist entry.
+ */
+ nl = kmem_alloc(sizeof (struct memlist),
+ KM_SLEEP);
+ nl->address = end;
+ nl->size = mend - nl->address;
+ if ((nl->next = nlp) != NULL)
+ nlp->prev = nl;
+ nl->prev = ml;
+ ml->next = nl;
+ nlp = nl;
+ }
+ }
+ if (ml->size == 0ull) {
+ if (ml == mlist) {
+ if ((mlist = nlp) != NULL)
+ nlp->prev = NULL;
+ kmem_free(ml, sizeof (struct memlist));
+ if (mlist == NULL)
+ break;
+ ml = nlp;
+ } else {
+ if ((tl->next = nlp) != NULL)
+ nlp->prev = tl;
+ kmem_free(ml, sizeof (struct memlist));
+ ml = tl;
+ }
+ }
+ }
+ }
+
+ return (mlist);
+}
+
+static void
+mc_get_mlist(mc_opl_t *mcp)
+{
+ struct memlist *mlist;
+
+ memlist_read_lock();
+ mlist = mc_memlist_dup(phys_install);
+ memlist_read_unlock();
+
+ if (mlist) {
+ mlist = mc_memlist_del_span(mlist, 0ull, mcp->mc_start_address);
+ }
+
+ if (mlist) {
+ uint64_t startpa, endpa;
+
+ startpa = mcp->mc_start_address + mcp->mc_size;
+ endpa = ptob(physmax + 1);
+ if (endpa > startpa) {
+ mlist = mc_memlist_del_span(mlist,
+ startpa, endpa - startpa);
+ }
+ }
+
+ if (mlist) {
+ mcp->mlist = mlist;
+ }
+}
+
+int
+mc_board_add(mc_opl_t *mcp)
+{
+ struct mc_addr_spec *macaddr;
+ int len, i, bk, cc;
+ mc_addr_info_t maddr;
+ uint32_t mirr;
+
+ mutex_init(&mcp->mc_lock, NULL, MUTEX_DRIVER, NULL);
+
+ /*
+ * Get configurations from "pseudo-mc" node which includes:
+ * board# : LSB number
+ * mac-addr : physical base address of MAC registers
+ * csX-mac-pa-trans-table: translation table from DIMM address
+ * to physical address or vice versa.
+ */
+ mcp->mc_board_num = (int)ddi_getprop(DDI_DEV_T_ANY, mcp->mc_dip,
+ DDI_PROP_DONTPASS, "board#", -1);
+
+ /*
+ * Get start address in this CAB. It can be gotten from
+ * "sb-mem-ranges" property.
+ */
+
+ if (get_base_address(mcp) == DDI_FAILURE) {
+ mutex_destroy(&mcp->mc_lock);
+ return (DDI_FAILURE);
+ }
+ /* get mac-pa trans tables */
+ for (i = 0; i < MC_TT_CS; i++) {
+ len = MC_TT_ENTRIES;
+ cc = ddi_getlongprop_buf(DDI_DEV_T_ANY, mcp->mc_dip,
+ DDI_PROP_DONTPASS, mc_tbl_name[i],
+ (caddr_t)mcp->mc_trans_table[i], &len);
+
+ if (cc != DDI_SUCCESS) {
+ bzero(mcp->mc_trans_table[i], MC_TT_ENTRIES);
+ }
+ }
+ mcp->mlist = NULL;
+
+ mc_get_mlist(mcp);
+
+ /* initialize bank informations */
+ cc = ddi_getlongprop(DDI_DEV_T_ANY, mcp->mc_dip, DDI_PROP_DONTPASS,
+ "mc-addr", (caddr_t)&macaddr, &len);
+ if (cc != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "Cannot get mc-addr. err=%d\n", cc);
+ mutex_destroy(&mcp->mc_lock);
+ return (DDI_FAILURE);
+ }
+
+ for (i = 0; i < len / sizeof (struct mc_addr_spec); i++) {
+ struct mc_bank *bankp;
+ uint32_t reg;
+
+ /*
+ * setup bank
+ */
+ bk = macaddr[i].bank;
+ bankp = &(mcp->mc_bank[bk]);
+ bankp->mcb_status = BANK_INSTALLED;
+ bankp->mcb_reg_base = REGS_PA(macaddr, i);
+
+ reg = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bk));
+ bankp->mcb_ptrl_cntl = (reg & MAC_CNTL_PTRL_PRESERVE_BITS);
+
+ /*
+ * check if mirror mode
+ */
+ mirr = LD_MAC_REG(MAC_MIRR(mcp, bk));
+
+ if (mirr & MAC_MIRR_MIRROR_MODE) {
+ MC_LOG("Mirror -> /LSB%d/B%d\n",
+ mcp->mc_board_num, bk);
+ bankp->mcb_status |= BANK_MIRROR_MODE;
+ /*
+ * The following bit is only used for
+ * error injection. We should clear it
+ */
+ if (mirr & MAC_MIRR_BANK_EXCLUSIVE)
+ ST_MAC_REG(MAC_MIRR(mcp, bk),
+ 0);
+ }
+
+ /*
+ * restart if not mirror mode or the other bank
+ * of the mirror is not running
+ */
+ if (!(mirr & MAC_MIRR_MIRROR_MODE) ||
+ !(mcp->mc_bank[bk^1].mcb_status &
+ BANK_PTRL_RUNNING)) {
+ MC_LOG("Starting up /LSB%d/B%d\n",
+ mcp->mc_board_num, bk);
+ get_ptrl_start_address(mcp, bk, &maddr.mi_maddr);
+ maddr.mi_valid = 1;
+ maddr.mi_advance = 0;
+ restart_patrol(mcp, bk, &maddr);
+ } else {
+ MC_LOG("Not starting up /LSB%d/B%d\n",
+ mcp->mc_board_num, bk);
+ }
+ bankp->mcb_status |= BANK_PTRL_RUNNING;
+ }
+ kmem_free(macaddr, len);
+
+ /*
+ * set interval in HZ.
+ */
+ for (i = 0; i < BANKNUM_PER_SB; i++) {
+ mcp->mc_scf_retry[i] = 0;
+ }
+ mcp->mc_last_error = 0;
+ mcp->mc_period = 0;
+
+ mcp->mc_interval_hz = drv_usectohz(mc_patrol_interval_sec * 1000000);
+ /* restart memory patrol checking */
+ mcp->mc_status |= MC_POLL_RUNNING;
+ mcp->mc_tid = timeout(mc_check_errors, mcp, mcp->mc_interval_hz);
+
+ return (DDI_SUCCESS);
+}
+
+int
+mc_board_del(mc_opl_t *mcp)
+{
+ int i;
+ scf_log_t *p;
+ timeout_id_t tid = 0;
+
+ /*
+ * cleanup mac state
+ */
+ mutex_enter(&mcp->mc_lock);
+ for (i = 0; i < BANKNUM_PER_SB; i++) {
+ if (mcp->mc_bank[i].mcb_status & BANK_INSTALLED) {
+ if (mc_stop(mcp, i)) {
+ mutex_exit(&mcp->mc_lock);
+ return (-1);
+ }
+ mcp->mc_bank[i].mcb_status &= ~BANK_INSTALLED;
+ }
+ }
+
+ /* stop memory patrol checking */
+ if (mcp->mc_status & MC_POLL_RUNNING) {
+ mcp->mc_status &= ~MC_POLL_RUNNING;
+ tid = mcp->mc_tid;
+ mcp->mc_tid = 0;
+ }
+
+ /* just throw away all the scf logs */
+ while ((p = mcp->mc_scf_log) != NULL) {
+ mcp->mc_scf_log = p->sl_next;
+ mcp->mc_scf_total--;
+ kmem_free(p, sizeof (scf_log_t));
+ }
+
+ if (mcp->mlist)
+ mc_memlist_delete(mcp->mlist);
+
+ mutex_exit(&mcp->mc_lock);
+ if (tid)
+ (void) untimeout(tid);
+
+ mutex_destroy(&mcp->mc_lock);
+ return (DDI_SUCCESS);
+}
+
+int
+mc_suspend(mc_opl_t *mcp, uint32_t flag)
+{
+ timeout_id_t tid = 0;
+ int i;
+ /* stop memory patrol checking */
+ mutex_enter(&mcp->mc_lock);
+ if (mcp->mc_status & MC_POLL_RUNNING) {
+ for (i = 0; i < BANKNUM_PER_SB; i++) {
+ if (mcp->mc_bank[i].mcb_status & BANK_INSTALLED) {
+ if (mc_stop(mcp, i)) {
+ mutex_exit(&mcp->mc_lock);
+ return (-1);
+ }
+ }
+ }
+ mcp->mc_status &= ~MC_POLL_RUNNING;
+ tid = mcp->mc_tid;
+ }
+ mcp->mc_status |= flag;
+ mcp->mc_tid = 0;
+ mutex_exit(&mcp->mc_lock);
+ if (tid)
+ (void) untimeout(tid);
+
+ return (DDI_SUCCESS);
+}
+
+/* caller must clear the SUSPEND bits or this will do nothing */
+
+int
+mc_resume(mc_opl_t *mcp, uint32_t flag)
+{
+ int i;
+ uint64_t basepa;
+
+ mutex_enter(&mcp->mc_lock);
+ basepa = mcp->mc_start_address;
+ if (get_base_address(mcp) == DDI_FAILURE) {
+ mutex_exit(&mcp->mc_lock);
+ return (DDI_FAILURE);
+ }
+
+ if (basepa != mcp->mc_start_address) {
+ if (mcp->mlist)
+ mc_memlist_delete(mcp->mlist);
+ mcp->mlist = NULL;
+ mc_get_mlist(mcp);
+ }
+
+ mcp->mc_status &= ~flag;
+ mcp->mc_list->mc_start_address = mcp->mc_start_address;
+
+ if (mcp->mc_status & (MC_SOFT_SUSPENDED | MC_DRIVER_SUSPENDED)) {
+ mutex_exit(&mcp->mc_lock);
+ return (DDI_SUCCESS);
+ }
+
+ if (!(mcp->mc_status & MC_POLL_RUNNING)) {
+ /* restart memory patrol checking */
+ mcp->mc_status |= MC_POLL_RUNNING;
+ for (i = 0; i < BANKNUM_PER_SB; i++) {
+ if (mcp->mc_bank[i].mcb_status & BANK_INSTALLED) {
+ restart_patrol(mcp, i, NULL);
+ }
+ }
+ /* check error asap */
+ mcp->mc_tid = timeout(mc_check_errors, mcp, 1);
+ }
+ mutex_exit(&mcp->mc_lock);
+
+ return (DDI_SUCCESS);
+}
+
+static mc_opl_t *
+mc_pa_to_mcp(uint64_t pa)
+{
+ mc_inst_list_t *p;
+ ASSERT(MUTEX_HELD(&mcmutex));
+ for (p = mc_instances; p; p = p->next) {
+ /* if mac patrol is suspended, we cannot rely on it */
+ if (!(p->mc_opl->mc_status & MC_POLL_RUNNING) ||
+ (p->mc_opl->mc_status & MC_SOFT_SUSPENDED))
+ continue;
+ if ((p->mc_start_address <= pa) &&
+ (pa < (p->mc_start_address + p->mc_size))) {
+ return (p->mc_opl);
+ }
+ }
+ return (NULL);
+}
+
+/*
+ * Get Physical Board number from Logical one.
+ */
+static int
+mc_opl_get_physical_board(int sb)
+{
+ if (&opl_get_physical_board) {
+ return (opl_get_physical_board(sb));
+ }
+
+ cmn_err(CE_NOTE, "!opl_get_physical_board() not loaded\n");
+ return (-1);
+}
+
+/* ARGSUSED */
+int
+mc_get_mem_unum(int synd_code, uint64_t flt_addr, char *buf, int buflen,
+ int *lenp)
+{
+ mc_opl_t *mcp;
+ int bank;
+ int sb;
+
+ mutex_enter(&mcmutex);
+
+ if (((mcp = mc_pa_to_mcp(flt_addr)) == NULL) ||
+ (!pa_is_valid(mcp, flt_addr))) {
+ mutex_exit(&mcmutex);
+ if (snprintf(buf, buflen, "UNKNOWN") >= buflen) {
+ return (ENOSPC);
+ } else {
+ if (lenp)
+ *lenp = strlen(buf);
+ }
+ return (0);
+ }
+
+ bank = pa_to_bank(mcp, flt_addr - mcp->mc_start_address);
+ sb = mc_opl_get_physical_board(mcp->mc_board_num);
+
+ if (sb == -1) {
+ mutex_exit(&mcmutex);
+ return (ENXIO);
+ }
+
+ if (snprintf(buf, buflen, "/CMU%d/B%d", sb, bank) >= buflen) {
+ mutex_exit(&mcmutex);
+ return (ENOSPC);
+ } else {
+ if (lenp)
+ *lenp = strlen(buf);
+ }
+ mutex_exit(&mcmutex);
+ return (0);
+}
+
+int
+opl_mc_suspend()
+{
+ mc_opl_t *mcp;
+ mc_inst_list_t *p;
+
+ mutex_enter(&mcmutex);
+
+ for (p = mc_instances; p; p = p->next) {
+ mcp = p->mc_opl;
+ (void) mc_suspend(mcp, MC_SOFT_SUSPENDED);
+ }
+
+ mutex_exit(&mcmutex);
+ return (0);
+}
+
+int
+opl_mc_resume()
+{
+ mc_opl_t *mcp;
+ mc_inst_list_t *p;
+
+ mutex_enter(&mcmutex);
+
+ for (p = mc_instances; p; p = p->next) {
+ mcp = p->mc_opl;
+ (void) mc_resume(mcp, MC_SOFT_SUSPENDED);
+ }
+
+ mutex_exit(&mcmutex);
+ return (0);
+}
+
+static void
+insert_mcp(mc_opl_t *mcp)
+{
+ mc_inst_list_t *p;
+
+ p = kmem_zalloc(sizeof (mc_inst_list_t), KM_SLEEP);
+ p->mc_opl = mcp;
+ p->mc_board_num = mcp->mc_board_num;
+ p->mc_start_address = mcp->mc_start_address;
+ p->mc_size = mcp->mc_size;
+ mcp->mc_list = p;
+
+ mutex_enter(&mcmutex);
+
+ p->next = mc_instances;
+ mc_instances = p;
+
+ mutex_exit(&mcmutex);
+}
+
+static void
+delete_mcp(mc_opl_t *mcp)
+{
+ mc_inst_list_t *prev, *current;
+ mc_inst_list_t *p;
+
+ p = mcp->mc_list;
+
+ if (mc_instances == p) {
+ mc_instances = p->next;
+ kmem_free(p, sizeof (mc_inst_list_t));
+ return;
+ }
+ prev = mc_instances;
+ for (current = mc_instances; current != NULL; current = current->next) {
+ if (current == p) {
+ prev->next = p->next;
+ kmem_free(p, sizeof (mc_inst_list_t));
+ return;
+ }
+ prev = current;
+ }
+}
+
+/* Error injection interface */
+
+/* ARGSUSED */
+int
+mc_inject_error(int error_type, uint64_t pa, uint32_t flags)
+{
+ mc_opl_t *mcp;
+ int bank;
+ uint32_t dimm_addr;
+ uint32_t cntl;
+ mc_addr_info_t maddr;
+ uint32_t data, stat;
+ int both_sides = 0;
+ uint64_t pa0;
+ on_trap_data_t otd;
+ extern void cpu_flush_ecache(void);
+
+ MC_LOG("HW mc_inject_error(%x, %lx, %x)\n", error_type, pa, flags);
+
+ mutex_enter(&mcmutex);
+
+ if ((mcp = mc_pa_to_mcp(pa)) == NULL) {
+ mutex_exit(&mcmutex);
+ MC_LOG("mc_inject_error: invalid pa\n");
+ return (ENOTSUP);
+ }
+
+ mutex_enter(&mcp->mc_lock);
+ mutex_exit(&mcmutex);
+
+ if (mcp->mc_status & (MC_SOFT_SUSPENDED | MC_DRIVER_SUSPENDED)) {
+ mutex_exit(&mcp->mc_lock);
+ MC_LOG("mc-opl has been suspended. No error injection.\n");
+ return (EBUSY);
+ }
+
+ /* convert pa to offset within the board */
+ MC_LOG("pa %lx, offset %lx\n", pa, pa - mcp->mc_start_address);
+
+ if (!pa_is_valid(mcp, pa)) {
+ mutex_exit(&mcp->mc_lock);
+ return (EINVAL);
+ }
+
+ pa0 = pa - mcp->mc_start_address;
+
+ bank = pa_to_bank(mcp, pa0);
+
+ if (flags & MC_INJECT_FLAG_OTHER)
+ bank = bank ^ 1;
+
+ if (MC_INJECT_MIRROR(error_type) && !IS_MIRROR(mcp, bank)) {
+ mutex_exit(&mcp->mc_lock);
+ MC_LOG("Not mirror mode\n");
+ return (EINVAL);
+ }
+
+ dimm_addr = pa_to_dimm(mcp, pa0);
+
+ MC_LOG("injecting error to /LSB%d/B%d/D%x\n",
+ mcp->mc_board_num, bank, dimm_addr);
+
+
+ switch (error_type) {
+ case MC_INJECT_INTERMITTENT_MCE:
+ case MC_INJECT_PERMANENT_MCE:
+ case MC_INJECT_MUE:
+ both_sides = 1;
+ }
+
+ if (flags & MC_INJECT_FLAG_RESET)
+ ST_MAC_REG(MAC_EG_CNTL(mcp, bank), 0);
+
+ ST_MAC_REG(MAC_EG_ADD(mcp, bank), dimm_addr & MAC_EG_ADD_MASK);
+
+ if (both_sides) {
+ ST_MAC_REG(MAC_EG_CNTL(mcp, bank^1), 0);
+ ST_MAC_REG(MAC_EG_ADD(mcp, bank^1),
+ dimm_addr & MAC_EG_ADD_MASK);
+ }
+
+ switch (error_type) {
+ case MC_INJECT_UE:
+ case MC_INJECT_SUE:
+ case MC_INJECT_MUE:
+ if (flags & MC_INJECT_FLAG_PATH) {
+ cntl = MAC_EG_ADD_FIX
+ |MAC_EG_FORCE_READ00|MAC_EG_FORCE_READ16
+ |MAC_EG_DERR_ONCE;
+ } else {
+ cntl = MAC_EG_ADD_FIX|MAC_EG_FORCE_DERR00
+ |MAC_EG_FORCE_DERR16|MAC_EG_DERR_ONCE;
+ }
+ flags |= MC_INJECT_FLAG_ST;
+ break;
+ case MC_INJECT_INTERMITTENT_CE:
+ case MC_INJECT_INTERMITTENT_MCE:
+ if (flags & MC_INJECT_FLAG_PATH) {
+ cntl = MAC_EG_ADD_FIX
+ |MAC_EG_FORCE_READ00
+ |MAC_EG_DERR_ONCE;
+ } else {
+ cntl = MAC_EG_ADD_FIX
+ |MAC_EG_FORCE_DERR16
+ |MAC_EG_DERR_ONCE;
+ }
+ flags |= MC_INJECT_FLAG_ST;
+ break;
+ case MC_INJECT_PERMANENT_CE:
+ case MC_INJECT_PERMANENT_MCE:
+ if (flags & MC_INJECT_FLAG_PATH) {
+ cntl = MAC_EG_ADD_FIX
+ |MAC_EG_FORCE_READ00
+ |MAC_EG_DERR_ALWAYS;
+ } else {
+ cntl = MAC_EG_ADD_FIX
+ |MAC_EG_FORCE_DERR16
+ |MAC_EG_DERR_ALWAYS;
+ }
+ flags |= MC_INJECT_FLAG_ST;
+ break;
+ case MC_INJECT_CMPE:
+ data = 0xabcdefab;
+ stphys(pa, data);
+ cpu_flush_ecache();
+ MC_LOG("CMPE: writing data %x to %lx\n", data, pa);
+ ST_MAC_REG(MAC_MIRR(mcp, bank), MAC_MIRR_BANK_EXCLUSIVE);
+ stphys(pa, data ^ 0xffffffff);
+ cpu_flush_ecache();
+ ST_MAC_REG(MAC_MIRR(mcp, bank), 0);
+ MC_LOG("CMPE: write new data %xto %lx\n", data, pa);
+ cntl = 0;
+ break;
+ case MC_INJECT_NOP:
+ cntl = 0;
+ break;
+ default:
+ MC_LOG("mc_inject_error: invalid option\n");
+ cntl = 0;
+ }
+
+ if (cntl) {
+ ST_MAC_REG(MAC_EG_CNTL(mcp, bank), cntl & MAC_EG_SETUP_MASK);
+ ST_MAC_REG(MAC_EG_CNTL(mcp, bank), cntl);
+
+ if (both_sides) {
+ ST_MAC_REG(MAC_EG_CNTL(mcp, bank^1), cntl &
+ MAC_EG_SETUP_MASK);
+ ST_MAC_REG(MAC_EG_CNTL(mcp, bank^1), cntl);
+ }
+ }
+
+ /*
+ * For all injection cases except compare error, we
+ * must write to the PA to trigger the error.
+ */
+
+ if (flags & MC_INJECT_FLAG_ST) {
+ data = 0xf0e0d0c0;
+ MC_LOG("Writing %x to %lx\n", data, pa);
+ stphys(pa, data);
+ cpu_flush_ecache();
+ }
+
+ delay(inject_op_delay * drv_usectohz(1000 * 1000));
+
+
+ if (flags & MC_INJECT_FLAG_LD) {
+ if (flags & MC_INJECT_FLAG_NO_TRAP) {
+ if (on_trap(&otd, OT_DATA_EC)) {
+ no_trap();
+ MC_LOG("Trap occurred\n");
+ } else {
+ MC_LOG("On-trap Reading from %lx\n", pa);
+ data = ldphys(pa);
+ no_trap();
+ MC_LOG("data = %x\n", data);
+ }
+ } else {
+ MC_LOG("Reading from %lx\n", pa);
+ data = ldphys(pa);
+ MC_LOG("data = %x\n", data);
+ }
+ }
+
+ if (flags & MC_INJECT_FLAG_RESTART) {
+ delay(inject_op_delay * drv_usectohz(1000 * 1000));
+
+ MC_LOG("Restart patrol\n");
+ if (mc_stop(mcp, bank)) {
+ cmn_err(CE_WARN, "Cannot stop Memory Patrol at "
+ "/LSB%d/B%d\n", mcp->mc_board_num, bank);
+ mutex_exit(&mcp->mc_lock);
+ return (EIO);
+ }
+ maddr.mi_maddr.ma_bd = mcp->mc_board_num;
+ maddr.mi_maddr.ma_bank = bank;
+ maddr.mi_maddr.ma_dimm_addr = dimm_addr;
+ maddr.mi_valid = 1;
+ maddr.mi_advance = 0;
+ restart_patrol(mcp, bank, &maddr);
+ }
+
+ if (flags & MC_INJECT_FLAG_POLL) {
+ delay(inject_op_delay * drv_usectohz(1000 * 1000));
+
+ MC_LOG("Poll patrol error\n");
+ stat = LD_MAC_REG(MAC_PTRL_STAT(mcp, bank));
+ cntl = LD_MAC_REG(MAC_PTRL_CNTL(mcp, bank));
+ if (stat & (MAC_STAT_PTRL_ERRS|MAC_STAT_MI_ERRS)) {
+ maddr.mi_valid = 0;
+ maddr.mi_advance = 1;
+ if (IS_MIRROR(mcp, bank))
+ mc_error_handler_mir(mcp, bank,
+ &maddr);
+ else
+ mc_error_handler(mcp, bank, &maddr);
+
+ restart_patrol(mcp, bank, &maddr);
+ } else
+ restart_patrol(mcp, bank, NULL);
+ }
+
+ mutex_exit(&mcp->mc_lock);
+ return (0);
+}
+
+void
+mc_stphysio(uint64_t pa, uint32_t data)
+{
+ MC_LOG("0x%x -> pa(%lx)\n", data, pa);
+ stphysio(pa, data);
+}
+
+uint32_t
+mc_ldphysio(uint64_t pa)
+{
+ uint32_t rv;
+
+ rv = ldphysio(pa);
+ MC_LOG("pa(%lx) = 0x%x\n", pa, rv);
+ return (rv);
+}
diff --git a/usr/src/uts/sun4u/opl/io/mc-opl.conf b/usr/src/uts/sun4u/opl/io/mc-opl.conf
new file mode 100644
index 0000000000..4b769e4928
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/mc-opl.conf
@@ -0,0 +1,26 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+#
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+ddi-forceattach=1;
diff --git a/usr/src/uts/sun4u/opl/io/oplkmdrv.c b/usr/src/uts/sun4u/opl/io/oplkmdrv.c
new file mode 100644
index 0000000000..cf7d238c04
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/oplkmdrv.c
@@ -0,0 +1,1107 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * OPL IPSec Key Management Driver.
+ *
+ * This driver runs on a OPL Domain. It processes requests received
+ * from the OPL Service Processor (SP) via mailbox message. It passes
+ * these requests to the sckmd daemon by means of an /ioctl interface.
+ *
+ * Requests received from the SP consist of IPsec security associations
+ * (SAs) needed to secure the communication between SC and Domain daemons
+ * communicating using DSCP.
+ */
+
+#include <sys/types.h>
+#include <sys/cmn_err.h>
+#include <sys/kmem.h>
+#include <sys/errno.h>
+#include <sys/file.h>
+#include <sys/open.h>
+#include <sys/stat.h>
+#include <sys/conf.h>
+#include <sys/ddi.h>
+#include <sys/cmn_err.h>
+#include <sys/sunddi.h>
+#include <sys/sunndi.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/ndi_impldefs.h>
+#include <sys/modctl.h>
+#include <sys/disp.h>
+#include <sys/note.h>
+#include <sys/byteorder.h>
+#include <sys/sdt.h>
+
+#include <sys/scfd/scfdscpif.h>
+#include <sys/oplkm_msg.h>
+#include <sys/sckm_io.h>
+#include <sys/oplkm.h>
+
+#define OKM_NODENAME "oplkmdrv" /* Node name */
+#define OKM_TARGET_ID 0 /* Target ID */
+#define OKM_SM_TOUT 5000 /* small timeout (5msec) */
+#define OKM_LG_TOUT 50000 /* large timeout (50msec) */
+#define OKM_MB_TOUT 10000000 /* Mailbox timeout (10sec) */
+
+okms_t okms_global; /* Global instance structure */
+
+#ifdef DEBUG
+uint32_t okm_debug = DBG_WARN;
+#endif
+
+/*
+ * Prototypes for the module related functions.
+ */
+int okm_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
+int okm_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
+int okm_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result);
+int okm_open(dev_t *devp, int flag, int otyp, struct cred *cred);
+int okm_close(dev_t dev, int flag, int otyp, struct cred *cred);
+int okm_ioctl(dev_t dev, int cmd, intptr_t data, int flag,
+ cred_t *cred, int *rvalp);
+
+/*
+ * Prototypes for the internal functions.
+ */
+int okm_get_req(okms_t *okmsp, sckm_ioctl_getreq_t *ireqp,
+ intptr_t data, int flag);
+int okm_process_req(okms_t *okmsp, okm_req_hdr_t *reqp, uint32_t len,
+ sckm_ioctl_getreq_t *ireqp, intptr_t data, int flag);
+int okm_process_status(okms_t *okmsp, sckm_ioctl_status_t *ireply);
+void okm_event_handler(scf_event_t event, void *arg);
+int okm_send_reply(okms_t *okmsp, uint32_t transid, uint32_t status,
+ uint32_t sadb_err, uint32_t sadb_ver);
+int block_until_ready(okms_t *okmsp);
+static int okm_copyin_ioctl_getreq(intptr_t userarg,
+ sckm_ioctl_getreq_t *driverarg, int flag);
+static int okm_copyout_ioctl_getreq(sckm_ioctl_getreq_t *driverarg,
+ intptr_t userarg, int flag);
+static void okm_cleanup(okms_t *okmsp);
+static int okm_mbox_init(okms_t *okmsp);
+static void okm_mbox_fini(okms_t *okmsp);
+static clock_t okm_timeout_val(int error);
+
+
+struct cb_ops okm_cb_ops = {
+ okm_open, /* open */
+ okm_close, /* close */
+ nodev, /* strategy */
+ nodev, /* print */
+ nodev, /* dump */
+ nodev, /* read */
+ nodev, /* write */
+ okm_ioctl, /* ioctl */
+ nodev, /* devmap */
+ nodev, /* mmap */
+ nodev, /* segmap */
+ nochpoll, /* poll */
+ ddi_prop_op, /* prop_op */
+ 0, /* streamtab */
+ D_NEW | D_MP /* Driver compatibility flag */
+};
+
+struct dev_ops okm_ops = {
+ DEVO_REV, /* devo_rev, */
+ 0, /* refcnt */
+ okm_info, /* get_dev_info */
+ nulldev, /* identify */
+ nulldev, /* probe */
+ okm_attach, /* attach */
+ okm_detach, /* detach */
+ nodev, /* reset */
+ &okm_cb_ops, /* driver operations */
+ (struct bus_ops *)0 /* no bus operations */
+};
+
+struct modldrv modldrv = {
+ &mod_driverops,
+ "OPL Key Management Driver v%I%",
+ &okm_ops,
+};
+
+struct modlinkage modlinkage = {
+ MODREV_1,
+ &modldrv,
+ NULL
+};
+
+
+/*
+ * _init - Module's init routine.
+ */
+int
+_init(void)
+{
+ int ret;
+
+ if ((ret = mod_install(&modlinkage)) != 0) {
+ cmn_err(CE_WARN, "mod_install failed, error = %d", ret);
+ }
+ return (ret);
+}
+
+/*
+ * _fini - Module's fini routine.
+ */
+int
+_fini(void)
+{
+ int ret;
+
+ if ((ret = mod_remove(&modlinkage)) != 0) {
+ return (ret);
+ }
+ return (ret);
+}
+
+/*
+ * _info - Module's info routine.
+ */
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+/*
+ * okm_attach - Module's attach routine.
+ *
+ * Description: Initializes the modules state structure and create
+ * the minor device node.
+ */
+int
+okm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ int instance;
+ okms_t *okmsp = &okms_global;
+
+ instance = ddi_get_instance(dip);
+
+ /* Only one instance is supported. */
+ if (instance != 0) {
+ return (DDI_FAILURE);
+ }
+
+ if (cmd != DDI_ATTACH) {
+ return (DDI_FAILURE);
+ }
+
+ okmsp->km_dip = dip;
+ okmsp->km_major = ddi_name_to_major(ddi_get_name(dip));
+ okmsp->km_inst = instance;
+
+ /*
+ * Get an interrupt block cookie corresponding to the
+ * interrupt priority of the event handler.
+ * Assert that the event priority is not redefined to
+ * some other priority.
+ */
+ /* LINTED */
+ ASSERT(SCF_EVENT_PRI == DDI_SOFTINT_LOW);
+ if (ddi_get_soft_iblock_cookie(dip, SCF_EVENT_PRI,
+ &okmsp->km_ibcookie) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "ddi_get_soft_iblock_cookie failed.");
+ return (DDI_FAILURE);
+ }
+ mutex_init(&okmsp->km_lock, NULL, MUTEX_DRIVER,
+ (void *)okmsp->km_ibcookie);
+ okmsp->km_clean |= OKM_CLEAN_LOCK;
+ cv_init(&okmsp->km_wait, NULL, CV_DRIVER, NULL);
+ okmsp->km_clean |= OKM_CLEAN_CV;
+
+ /*
+ * set clean_node ahead as remove_node has to be called even
+ * if create node fails.
+ */
+ okmsp->km_clean |= OKM_CLEAN_NODE;
+ if (ddi_create_minor_node(dip, OKM_NODENAME, S_IFCHR,
+ instance, NULL, NULL) == DDI_FAILURE) {
+ cmn_err(CE_WARN, "Device node creation failed");
+ okm_cleanup(okmsp);
+ return (DDI_FAILURE);
+ }
+
+ ddi_set_driver_private(dip, (caddr_t)okmsp);
+ ddi_report_dev(dip);
+ return (DDI_SUCCESS);
+}
+
+/*
+ * okm_detach - Module's detach routine.
+ *
+ * Description: Cleans up the module's state structures and any other
+ * relevant data.
+ */
+int
+okm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ okms_t *okmsp;
+
+ if (cmd != DDI_DETACH) {
+ return (DDI_FAILURE);
+ }
+
+ if ((okmsp = ddi_get_driver_private(dip)) == NULL) {
+ return (DDI_FAILURE);
+ }
+
+ mutex_enter(&okmsp->km_lock);
+ /*
+ * Check if the mailbox is still in use.
+ */
+ if (okmsp->km_state & OKM_MB_INITED) {
+ mutex_exit(&okmsp->km_lock);
+ cmn_err(CE_WARN, "Detach failure: Mailbox in use");
+ return (DDI_FAILURE);
+ }
+ mutex_exit(&okmsp->km_lock);
+ okm_cleanup(okmsp);
+ ddi_set_driver_private(dip, NULL);
+ return (DDI_SUCCESS);
+}
+
+/*
+ * okm_info - Module's info routine.
+ */
+/* ARGSUSED */
+int
+okm_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
+{
+ okms_t *okmsp;
+ minor_t minor;
+ int ret = DDI_FAILURE;
+
+ switch (infocmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ minor = getminor((dev_t)arg);
+ okmsp = ddi_get_driver_private(dip);
+ if (okmsp == NULL) {
+ *result = NULL;
+ } else {
+ *result = okmsp->km_dip;
+ ret = DDI_SUCCESS;
+ }
+ break;
+
+ case DDI_INFO_DEVT2INSTANCE:
+ minor = getminor((dev_t)arg);
+ *result = (void *)(uintptr_t)minor;
+ ret = DDI_SUCCESS;
+
+ default:
+ break;
+ }
+ return (ret);
+}
+
+/*
+ * okm_open - Device open routine.
+ *
+ * Description: Initializes the mailbox and waits until the mailbox
+ * gets connected. Only one open at a time is supported.
+ */
+/*ARGSUSED*/
+int
+okm_open(dev_t *devp, int flag, int otyp, struct cred *cred)
+{
+ okms_t *okmsp = &okms_global;
+ int ret = 0;
+
+ DPRINTF(DBG_DRV, ("okm_open: called\n"));
+ mutex_enter(&okmsp->km_lock);
+ if (okmsp->km_state & OKM_OPENED) {
+ /* Only one open supported */
+ mutex_exit(&okmsp->km_lock);
+ DPRINTF(DBG_WARN, ("okm_open: already opened\n"));
+ return (EBUSY);
+ }
+ okmsp->km_state |= OKM_OPENED;
+ ret = block_until_ready(okmsp);
+ if (ret != 0) {
+ okmsp->km_state &= ~OKM_OPENED;
+ }
+ mutex_exit(&okmsp->km_lock);
+ DPRINTF(DBG_DRV, ("okm_open: ret=%d\n", ret));
+ return (ret);
+}
+
+/*
+ * block_until_ready - Function to wait until the mailbox is ready to use.
+ *
+ * Description: It initializes the mailbox and waits for the mailbox
+ * state to transition to connected.
+ */
+int
+block_until_ready(okms_t *okmsp)
+{
+ int ret = 0;
+
+ DPRINTF(DBG_DRV, ("block_until_ready: called\n"));
+ ASSERT(MUTEX_HELD(&okmsp->km_lock));
+
+ if (okmsp->km_state & OKM_MB_DISC) {
+ DPRINTF(DBG_DRV, ("block_until_ready: closing the mailbox\n"));
+ okm_mbox_fini(okmsp);
+ }
+ if (okmsp->km_state & OKM_MB_CONN) {
+ DPRINTF(DBG_DRV, ("block_until_ready: mailbox connected\n"));
+ return (0);
+ }
+ /*
+ * Initialize mailbox.
+ */
+ if ((ret = okm_mbox_init(okmsp)) != 0) {
+ DPRINTF(DBG_MBOX,
+ ("block_until_ready: mailbox init failed ret=%d\n", ret));
+ return (ret);
+ }
+ DPRINTF(DBG_DRV, ("block_until_ready: ret=%d", ret));
+ return (ret);
+}
+
+/*
+ * okm_close - Device close routine.
+ *
+ * Description: Closes the mailbox.
+ */
+/*ARGSUSED*/
+int
+okm_close(dev_t dev, int flag, int otyp, struct cred *cred)
+{
+ okms_t *okmsp = &okms_global;
+
+ DPRINTF(DBG_DRV, ("okm_close: called\n"));
+ /* Close the lower layer first */
+ mutex_enter(&okmsp->km_lock);
+ okm_mbox_fini(okmsp);
+ okmsp->km_state = 0;
+ mutex_exit(&okmsp->km_lock);
+ return (0);
+}
+
+
+/*
+ * okm_ioctl - Device ioctl routine.
+ *
+ * Description: Processes ioctls from the daemon.
+ */
+/*ARGSUSED*/
+int
+okm_ioctl(dev_t dev, int cmd, intptr_t data, int flag, cred_t *cred, int *rvalp)
+{
+ okms_t *okmsp = &okms_global;
+ sckm_ioctl_getreq_t ireq;
+ sckm_ioctl_status_t istatus;
+ int ret = 0;
+
+ switch (cmd) {
+ case SCKM_IOCTL_GETREQ:
+
+ DPRINTF(DBG_DRV, ("okm_ioctl: GETREQ\n"));
+ if (okm_copyin_ioctl_getreq(data, &ireq, flag)) {
+ return (EFAULT);
+ }
+
+ ret = okm_get_req(okmsp, &ireq, data, flag);
+ DPRINTF(DBG_DRV, ("okm_ioctl: GETREQ ret=%d\n", ret));
+ break;
+
+ case SCKM_IOCTL_STATUS:
+
+ DPRINTF(DBG_DRV, ("okm_ioctl: STATUS\n"));
+ if (ddi_copyin((caddr_t)data, &istatus,
+ sizeof (sckm_ioctl_status_t), flag)) {
+ return (EFAULT);
+ }
+ ret = okm_process_status(okmsp, &istatus);
+ DPRINTF(DBG_DRV, ("okm_ioctl: STATUS ret=%d\n", ret));
+ break;
+
+ default:
+ DPRINTF(DBG_DRV, ("okm_ioctl: UNKNOWN ioctl\n"));
+ ret = EINVAL;
+ }
+ return (ret);
+}
+
+/*
+ * okm_get_req - Get a request from the mailbox.
+ *
+ * Description: It blocks until a message is received, then processes
+ * the message and returns it to the requestor.
+ */
+int
+okm_get_req(okms_t *okmsp, sckm_ioctl_getreq_t *ireqp, intptr_t data, int flag)
+{
+ okm_req_hdr_t *reqp;
+ caddr_t msgbuf;
+ uint32_t len;
+ int ret;
+
+ DPRINTF(DBG_DRV, ("okm_getreq: called\n"));
+ mutex_enter(&okmsp->km_lock);
+ if ((ret = block_until_ready(okmsp)) != 0) {
+ mutex_exit(&okmsp->km_lock);
+ DPRINTF(DBG_WARN, ("okm_getreq: failed ret=%d\n", ret));
+ return (ret);
+ }
+
+ if (okmsp->km_reqp != NULL) {
+ DPRINTF(DBG_DRV, ("okm_getreq: req cached\n"));
+ reqp = okmsp->km_reqp;
+ len = okmsp->km_reqlen;
+ okmsp->km_reqp = NULL;
+ okmsp->km_reqlen = 0;
+ } else {
+retry:
+ while (OKM_MBOX_READY(okmsp) &&
+ ((ret = scf_mb_canget(okmsp->km_target,
+ okmsp->km_key, &len)) != 0)) {
+ if (ret != ENOMSG) {
+ DPRINTF(DBG_WARN, ("okm_getreq: Unknown "
+ "mbox failure=%d\n", ret));
+ mutex_exit(&okmsp->km_lock);
+ return (EIO);
+ }
+ DPRINTF(DBG_MBOX, ("okm_getreq: waiting for mesg\n"));
+ if (cv_wait_sig(&okmsp->km_wait,
+ &okmsp->km_lock) <= 0) {
+ mutex_exit(&okmsp->km_lock);
+ DPRINTF(DBG_DRV, ("okm_getreq:interrupted\n"));
+ return (EINTR);
+ }
+ }
+ if (!OKM_MBOX_READY(okmsp)) {
+ mutex_exit(&okmsp->km_lock);
+ DPRINTF(DBG_WARN, ("okm_getreq: mailbox not ready\n"));
+ return (EIO);
+ }
+ ASSERT(len != 0);
+ msgbuf = kmem_alloc(len, KM_SLEEP);
+ okmsp->km_sg_rcv.msc_dptr = msgbuf;
+ okmsp->km_sg_rcv.msc_len = len;
+
+ DPRINTF(DBG_MBOX, ("okm_getreq: getmsg\n"));
+ ret = scf_mb_getmsg(okmsp->km_target, okmsp->km_key, len, 1,
+ &okmsp->km_sg_rcv, 0);
+ if (ret == ENOMSG || ret == EMSGSIZE) {
+ kmem_free(msgbuf, len);
+ DPRINTF(DBG_MBOX, ("okm_getreq: nomsg ret=%d\n", ret));
+ goto retry;
+ } else if (ret != 0) {
+ kmem_free(msgbuf, len);
+ mutex_exit(&okmsp->km_lock);
+ DPRINTF(DBG_WARN,
+ ("okm_getreq: Unknown mbox failure=%d\n", ret));
+ return (EIO);
+ }
+
+ /* check message length */
+ if (len < sizeof (okm_req_hdr_t)) {
+ /* protocol error, drop message */
+ kmem_free(msgbuf, len);
+ mutex_exit(&okmsp->km_lock);
+ DPRINTF(DBG_WARN, ("okm_getreq: Bad message\n"));
+ return (EBADMSG);
+ }
+
+ reqp = (okm_req_hdr_t *)msgbuf;
+ reqp->krq_version = ntohl(reqp->krq_version);
+ reqp->krq_transid = ntohl(reqp->krq_transid);
+ reqp->krq_cmd = ntohl(reqp->krq_cmd);
+ reqp->krq_reserved = ntohl(reqp->krq_reserved);
+
+ /* check version of the message received */
+ if (reqp->krq_version != OKM_PROTOCOL_VERSION) {
+ okm_send_reply(okmsp, reqp->krq_transid,
+ OKM_ERR_VERSION, 0, 0);
+ kmem_free(msgbuf, len);
+ mutex_exit(&okmsp->km_lock);
+ DPRINTF(DBG_WARN, ("okm_getreq: Unknown version=%d\n",
+ reqp->krq_version));
+ return (EBADMSG);
+ }
+ }
+
+ /* process message */
+ ret = okm_process_req(okmsp, reqp, len, ireqp, data, flag);
+ if (okmsp->km_reqp == NULL) {
+ /*
+ * The message is not saved, so free the buffer.
+ */
+ kmem_free(reqp, len);
+ }
+ mutex_exit(&okmsp->km_lock);
+ DPRINTF(DBG_DRV, ("okm_getreq: ret=%d\n", ret));
+ return (ret);
+}
+
+
+/*
+ * okm_process_req - Process the request.
+ *
+ * Description: Validate the request and then give the request to the
+ * daemon.
+ */
+int
+okm_process_req(okms_t *okmsp, okm_req_hdr_t *reqp, uint32_t len,
+ sckm_ioctl_getreq_t *ireqp, intptr_t data, int flag)
+{
+ void *req_datap = (void *)(((char *)reqp) + sizeof (okm_req_hdr_t));
+ int sadb_msglen = len - sizeof (okm_req_hdr_t);
+
+ DPRINTF(DBG_DRV, ("okm_process_req: called\n"));
+ DUMP_REQ(reqp, len);
+
+ switch (reqp->krq_cmd) {
+ case OKM_MSG_SADB:
+ /* sanity check request */
+ if (sadb_msglen <= 0) {
+ okm_send_reply(okmsp, reqp->krq_transid,
+ OKM_ERR_SADB_MSG, 0, 0);
+ DPRINTF(DBG_WARN, ("okm_process_req: bad message\n"));
+ return (EBADMSG);
+ }
+
+ /*
+ * Save the message, prior to giving it to the daemon.
+ */
+ okmsp->km_reqp = reqp;
+ okmsp->km_reqlen = len;
+
+ if (ireqp->buf_len < len) {
+ DPRINTF(DBG_WARN,
+ ("okm_process_req: not enough space\n"));
+ return (ENOSPC);
+ }
+
+ ireqp->transid = reqp->krq_transid;
+ ireqp->type = SCKM_IOCTL_REQ_SADB;
+ if (ddi_copyout(req_datap, ireqp->buf, sadb_msglen, flag)) {
+ DPRINTF(DBG_WARN,
+ ("okm_process_req: copyout failed\n"));
+ return (EFAULT);
+ }
+ ireqp->buf_len = sadb_msglen;
+ if (okm_copyout_ioctl_getreq(ireqp, data, flag)) {
+ DPRINTF(DBG_WARN,
+ ("okm_process_req: copyout failed\n"));
+ return (EFAULT);
+ }
+ break;
+
+ default:
+ cmn_err(CE_WARN, "Unknown cmd 0x%x received", reqp->krq_cmd);
+ /*
+ * Received an unknown command, send corresponding
+ * error message.
+ */
+ okm_send_reply(okmsp, reqp->krq_transid, OKM_ERR_BAD_CMD, 0, 0);
+ return (EBADMSG);
+ }
+ DPRINTF(DBG_DRV, ("okm_process_req: ret=0\n"));
+ return (0);
+}
+
+/*
+ * okm_process_status - Process the status from the daemon.
+ *
+ * Description: Processes the status received from the daemon and sends
+ * corresponding message to the SP.
+ */
+int
+okm_process_status(okms_t *okmsp, sckm_ioctl_status_t *ireply)
+{
+ uint32_t status;
+ uint32_t sadb_msg_errno = 0;
+ uint32_t sadb_msg_version = 0;
+ okm_req_hdr_t *reqp = okmsp->km_reqp;
+ int ret;
+
+ DPRINTF(DBG_DRV, ("okm_process_status: called\n"));
+ mutex_enter(&okmsp->km_lock);
+ if ((ret = block_until_ready(okmsp)) != 0) {
+ mutex_exit(&okmsp->km_lock);
+ DPRINTF(DBG_WARN,
+ ("okm_process_status: Unknown failure=%d\n", ret));
+ return (ret);
+ }
+
+ /* fail if no status is expected, or if it does not match */
+ if (!okmsp->km_reqp || (reqp->krq_transid != ireply->transid)) {
+ mutex_exit(&okmsp->km_lock);
+ DPRINTF(DBG_WARN,
+ ("okm_process_status: req/transid mismatch\n"));
+ return (EINVAL);
+ }
+
+ switch (ireply->status) {
+ case SCKM_IOCTL_STAT_SUCCESS:
+ DPRINTF(DBG_DRV, ("okm_process_status: SUCCESS\n"));
+ status = OKM_SUCCESS;
+ break;
+ case SCKM_IOCTL_STAT_ERR_PFKEY:
+ DPRINTF(DBG_DRV, ("okm_process_status: PFKEY ERROR\n"));
+ status = OKM_ERR_SADB_PFKEY;
+ sadb_msg_errno = ireply->sadb_msg_errno;
+ break;
+ case SCKM_IOCTL_STAT_ERR_REQ:
+ DPRINTF(DBG_DRV, ("okm_process_status: REQ ERROR\n"));
+ status = OKM_ERR_DAEMON;
+ break;
+ case SCKM_IOCTL_STAT_ERR_VERSION:
+ DPRINTF(DBG_DRV, ("okm_process_status: SADB VERSION ERROR\n"));
+ status = OKM_ERR_SADB_VERSION;
+ sadb_msg_version = ireply->sadb_msg_version;
+ break;
+ case SCKM_IOCTL_STAT_ERR_TIMEOUT:
+ DPRINTF(DBG_DRV, ("okm_process_status: TIMEOUT ERR\n"));
+ status = OKM_ERR_SADB_TIMEOUT;
+ break;
+ case SCKM_IOCTL_STAT_ERR_OTHER:
+ DPRINTF(DBG_DRV, ("okm_process_status: OTHER ERR\n"));
+ status = OKM_ERR_DAEMON;
+ break;
+ case SCKM_IOCTL_STAT_ERR_SADB_TYPE:
+ DPRINTF(DBG_DRV, ("okm_process_status: SADB TYPE ERR\n"));
+ status = OKM_ERR_SADB_BAD_TYPE;
+ break;
+ default:
+ cmn_err(CE_WARN, "SCKM daemon returned invalid status %d\n",
+ ireply->status);
+ status = OKM_ERR_DAEMON;
+ }
+ ret = okm_send_reply(okmsp, ireply->transid, status,
+ sadb_msg_errno, sadb_msg_version);
+ /*
+ * Clean up the cached request now.
+ */
+ if (ret == 0) {
+ kmem_free(okmsp->km_reqp, okmsp->km_reqlen);
+ okmsp->km_reqp = NULL;
+ okmsp->km_reqlen = 0;
+ }
+ mutex_exit(&okmsp->km_lock);
+ DPRINTF(DBG_DRV, ("okm_process_status: ret=%d\n", ret));
+ return (ret);
+}
+
+/*
+ * okm_copyin_ioctl_getreq - copy-in the ioctl request from the daemon.
+ */
+
+static int
+okm_copyin_ioctl_getreq(intptr_t userarg, sckm_ioctl_getreq_t *driverarg,
+ int flag)
+{
+#ifdef _MULTI_DATAMODEL
+ switch (ddi_model_convert_from(flag & FMODELS)) {
+ case DDI_MODEL_ILP32: {
+ sckm_ioctl_getreq32_t driverarg32;
+ if (ddi_copyin((caddr_t)userarg, &driverarg32,
+ sizeof (sckm_ioctl_getreq32_t), flag)) {
+ return (EFAULT);
+ }
+ driverarg->transid = driverarg32.transid;
+ driverarg->type = driverarg32.type;
+ driverarg->buf = (caddr_t)(uintptr_t)driverarg32.buf;
+ driverarg->buf_len = driverarg32.buf_len;
+ break;
+ }
+ case DDI_MODEL_NONE: {
+ if (ddi_copyin((caddr_t)userarg, &driverarg,
+ sizeof (sckm_ioctl_getreq_t), flag)) {
+ return (EFAULT);
+ }
+ break;
+ }
+ }
+#else /* ! _MULTI_DATAMODEL */
+ if (ddi_copyin((caddr_t)userarg, &driverarg,
+ sizeof (sckm_ioctl_getreq_t), flag)) {
+ return (EFAULT);
+ }
+#endif /* _MULTI_DATAMODEL */
+ return (0);
+}
+
+
+/*
+ * okm_copyout_ioctl_getreq - copy-out the request to the daemon.
+ */
+static int
+okm_copyout_ioctl_getreq(sckm_ioctl_getreq_t *driverarg, intptr_t userarg,
+ int flag)
+{
+#ifdef _MULTI_DATAMODEL
+ switch (ddi_model_convert_from(flag & FMODELS)) {
+ case DDI_MODEL_ILP32: {
+ sckm_ioctl_getreq32_t driverarg32;
+ driverarg32.transid = driverarg->transid;
+ driverarg32.type = driverarg->type;
+ driverarg32.buf = (caddr32_t)(uintptr_t)driverarg->buf;
+ driverarg32.buf_len = driverarg->buf_len;
+ if (ddi_copyout(&driverarg32, (caddr_t)userarg,
+ sizeof (sckm_ioctl_getreq32_t), flag)) {
+ return (EFAULT);
+ }
+ break;
+ }
+ case DDI_MODEL_NONE:
+ if (ddi_copyout(driverarg, (caddr_t)userarg,
+ sizeof (sckm_ioctl_getreq_t), flag)) {
+ return (EFAULT);
+ }
+ break;
+ }
+#else /* ! _MULTI_DATAMODEL */
+ if (ddi_copyout(driverarg, (caddr_t)userarg,
+ sizeof (sckm_ioctl_getreq_t), flag)) {
+ return (EFAULT);
+ }
+#endif /* _MULTI_DATAMODEL */
+ return (0);
+}
+
+/*
+ * okm_cleanup - Cleanup routine.
+ */
+static void
+okm_cleanup(okms_t *okmsp)
+{
+
+ ASSERT(okmsp != NULL);
+ if (okmsp->km_clean & OKM_CLEAN_NODE) {
+ ddi_remove_minor_node(okmsp->km_dip, NULL);
+ }
+ if (okmsp->km_clean & OKM_CLEAN_LOCK)
+ mutex_destroy(&okmsp->km_lock);
+ if (okmsp->km_clean & OKM_CLEAN_CV)
+ cv_destroy(&okmsp->km_wait);
+ if (okmsp->km_reqp != NULL) {
+ kmem_free(okmsp->km_reqp, okmsp->km_reqlen);
+ okmsp->km_reqp = NULL;
+ okmsp->km_reqlen = 0;
+ }
+ ddi_set_driver_private(okmsp->km_dip, NULL);
+}
+
+/*
+ * okm_mbox_init - Mailbox specific initialization.
+ */
+static int
+okm_mbox_init(okms_t *okmsp)
+{
+ int ret;
+ clock_t tout;
+
+ ASSERT(MUTEX_HELD(&okmsp->km_lock));
+ okmsp->km_target = OKM_TARGET_ID;
+ okmsp->km_key = DKMD_KEY;
+ okmsp->km_state &= ~OKM_MB_INITED;
+
+ /* Iterate until mailbox gets connected */
+ while (!(okmsp->km_state & OKM_MB_CONN)) {
+ DPRINTF(DBG_MBOX, ("okm_mbox_init: calling mb_init\n"));
+ ret = scf_mb_init(okmsp->km_target, okmsp->km_key,
+ okm_event_handler, (void *)okmsp);
+ DPRINTF(DBG_MBOX, ("okm_mbox_init: mb_init ret=%d\n", ret));
+
+ if (ret == 0) {
+ okmsp->km_state |= OKM_MB_INITED;
+
+ /* Block until the mailbox is ready to communicate. */
+ while (!(okmsp->km_state &
+ (OKM_MB_CONN | OKM_MB_DISC))) {
+
+ if (cv_wait_sig(&okmsp->km_wait,
+ &okmsp->km_lock) <= 0) {
+ /* interrupted */
+ ret = EINTR;
+ break;
+ }
+ }
+ }
+
+ if (ret != 0) {
+
+ DPRINTF(DBG_MBOX,
+ ("okm_mbox_init: failed/interrupted\n"));
+ DTRACE_PROBE1(okm_mbox_fail, int, ret);
+ okmsp->km_state &= ~OKM_MB_INITED;
+ (void) scf_mb_fini(okmsp->km_target, okmsp->km_key);
+
+ /* if interrupted, return immediately. */
+ if (ret == EINTR)
+ return (ret);
+
+ }
+
+ if ((ret != 0) || (okmsp->km_state & OKM_MB_DISC)) {
+
+ DPRINTF(DBG_WARN,
+ ("okm_mbox_init: mbox DISC_ERROR\n"));
+ DTRACE_PROBE1(okm_mbox_fail, int, OKM_MB_DISC);
+ okmsp->km_state &= ~OKM_MB_INITED;
+ (void) scf_mb_fini(okmsp->km_target, okmsp->km_key);
+
+ /*
+ * If there was failure, then wait for
+ * OKM_MB_TOUT secs and retry again.
+ */
+
+ DPRINTF(DBG_MBOX, ("okm_mbox_init: waiting...\n"));
+ tout = ddi_get_lbolt() + drv_usectohz(OKM_MB_TOUT);
+ ret = cv_timedwait_sig(&okmsp->km_wait,
+ &okmsp->km_lock, tout);
+ if (ret == 0) {
+ /* if interrupted, return immediately. */
+ DPRINTF(DBG_MBOX,
+ ("okm_mbox_init: interrupted\n"));
+ return (EINTR);
+ }
+ }
+ }
+
+ ret = scf_mb_ctrl(okmsp->km_target, okmsp->km_key,
+ SCF_MBOP_MAXMSGSIZE, &okmsp->km_maxsz);
+
+ /*
+ * The max msg size should be at least the size of reply
+ * we need to send.
+ */
+ if ((ret == 0) && (okmsp->km_maxsz < sizeof (okm_rep_hdr_t))) {
+ cmn_err(CE_WARN, "Max message size expected >= %ld "
+ "but found %d\n", sizeof (okm_rep_hdr_t), okmsp->km_maxsz);
+ ret = EIO;
+ }
+ if (ret != 0) {
+ okmsp->km_state &= ~OKM_MB_INITED;
+ (void) scf_mb_fini(okmsp->km_target, okmsp->km_key);
+ }
+ DPRINTF(DBG_MBOX, ("okm_mbox_init: mb_init ret=%d\n", ret));
+ return (ret);
+}
+
+/*
+ * okm_mbox_fini - Mailbox de-initialization.
+ */
+static void
+okm_mbox_fini(okms_t *okmsp)
+{
+ int ret = 0;
+
+ ASSERT(MUTEX_HELD(&okmsp->km_lock));
+ if (okmsp->km_state & OKM_MB_INITED) {
+ DPRINTF(DBG_MBOX, ("okm_mbox_fini: calling mb_fini\n"));
+ ret = scf_mb_fini(okmsp->km_target, okmsp->km_key);
+ DPRINTF(DBG_MBOX, ("okm_mbox_fini: mb_fini ret=%d\n", ret));
+ if (ret != 0) {
+ cmn_err(CE_WARN,
+ "Failed to close the Mailbox error=%d", ret);
+ }
+ okmsp->km_state &= ~(OKM_MB_INITED | OKM_MB_CONN | OKM_MB_DISC);
+ }
+}
+
+/*
+ * okm_event_handler - Mailbox event handler.
+ *
+ * Description: Implements a state machine to handle all the mailbox
+ * events. For each event, it sets the appropriate state
+ * flag and wakes up the threads waiting for that event.
+ */
+void
+okm_event_handler(scf_event_t event, void *arg)
+{
+ okms_t *okmsp = (okms_t *)arg;
+
+ DPRINTF(DBG_MBOX, ("okm_event_handler: called\n"));
+ ASSERT(okmsp != NULL);
+ mutex_enter(&okmsp->km_lock);
+ if (!(okmsp->km_state & OKM_MB_INITED)) {
+ /*
+ * Ignore all events if the state flag indicates that the
+ * mailbox not initialized, this may happen during the close.
+ */
+ mutex_exit(&okmsp->km_lock);
+ DPRINTF(DBG_MBOX,
+ ("okm_event_handler: event=0x%X - mailbox not inited \n",
+ event));
+ return;
+ }
+ switch (event) {
+ case SCF_MB_CONN_OK:
+ DPRINTF(DBG_MBOX, ("okm_event_handler: Event CONN_OK\n"));
+ /*
+ * Now the mailbox is ready to use, lets wake up
+ * any one waiting for this event.
+ */
+ okmsp->km_state |= OKM_MB_CONN;
+ cv_broadcast(&okmsp->km_wait);
+ break;
+
+ case SCF_MB_MSG_DATA:
+ DPRINTF(DBG_MBOX, ("okm_event_handler: Event MSG_DATA\n"));
+ /*
+ * A message is available in the mailbox,
+ * wakeup if any one is ready to read the message.
+ */
+ if (OKM_MBOX_READY(okmsp)) {
+ cv_broadcast(&okmsp->km_wait);
+ }
+ break;
+
+ case SCF_MB_SPACE:
+ DPRINTF(DBG_MBOX, ("okm_event_handler: Event MB_SPACE\n"));
+ /*
+ * Now the mailbox is ready to transmit, lets
+ * wakeup if any one is waiting to write.
+ */
+ if (OKM_MBOX_READY(okmsp)) {
+ cv_broadcast(&okmsp->km_wait);
+ }
+ break;
+ case SCF_MB_DISC_ERROR:
+ DPRINTF(DBG_MBOX, ("okm_event_handler: Event DISC_ERROR\n"));
+ okmsp->km_state &= ~OKM_MB_CONN;
+ okmsp->km_state |= OKM_MB_DISC;
+ cv_broadcast(&okmsp->km_wait);
+ break;
+ default:
+ cmn_err(CE_WARN, "Unexpected event received\n");
+ }
+ mutex_exit(&okmsp->km_lock);
+}
+
+/*
+ * okm_send_reply - Send a mailbox reply message.
+ */
+int
+okm_send_reply(okms_t *okmsp, uint32_t transid,
+ uint32_t status, uint32_t sadb_err, uint32_t sadb_ver)
+{
+ okm_rep_hdr_t reply;
+ int ret = EIO;
+
+ DPRINTF(DBG_DRV, ("okm_send_reply: called\n"));
+ ASSERT(MUTEX_HELD(&okmsp->km_lock));
+ reply.krp_version = htonl(OKM_PROTOCOL_VERSION);
+ reply.krp_transid = htonl(transid);
+ reply.krp_status = htonl(status);
+ reply.krp_sadb_errno = htonl(sadb_err);
+ reply.krp_sadb_version = htonl(sadb_ver);
+ okmsp->km_sg_tx.msc_dptr = (caddr_t)&reply;
+ okmsp->km_sg_tx.msc_len = sizeof (reply);
+ DUMP_REPLY(&reply);
+
+ while (OKM_MBOX_READY(okmsp)) {
+ DPRINTF(DBG_MBOX, ("okm_send_reply: sending reply\n"));
+ ret = scf_mb_putmsg(okmsp->km_target, okmsp->km_key,
+ sizeof (reply), 1, &okmsp->km_sg_tx, 0);
+ DPRINTF(DBG_MBOX, ("okm_send_reply: putmsg ret=%d\n", ret));
+ if (ret == EBUSY || ret == ENOSPC) {
+ /* mailbox is busy, poll/retry */
+ if (cv_timedwait_sig(&okmsp->km_wait,
+ &okmsp->km_lock, okm_timeout_val(ret)) == 0) {
+ /* interrupted */
+ ret = EINTR;
+ DPRINTF(DBG_DRV,
+ ("okm_send_reply: interrupted\n"));
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ DPRINTF(DBG_DRV, ("okm_send_reply: ret=%d\n", ret));
+ return (ret);
+}
+
+/*
+ * okm_timeout_val -- Return appropriate timeout value.
+ *
+ * A small timeout value is returned for EBUSY as the mailbox busy
+ * condition may go away sooner and we are expected to poll.
+ *
+ * A larger timeout value is returned for ENOSPC case, as the condition
+ * depends on the peer to release buffer space.
+ * NOTE: there will also be an event(SCF_MB_SPACE) but a timeout is
+ * used for reliability purposes.
+ */
+static clock_t
+okm_timeout_val(int error)
+{
+ clock_t tval;
+
+ ASSERT(error == EBUSY || error == ENOSPC);
+
+ if (error == EBUSY) {
+ tval = OKM_SM_TOUT;
+ } else {
+ tval = OKM_LG_TOUT;
+ }
+ return (drv_usectohz(tval));
+}
+
+#ifdef DEBUG
+static void
+okm_print_req(okm_req_hdr_t *reqp, uint32_t len)
+{
+ uint8_t *datap = (uint8_t *)(((char *)reqp) + sizeof (okm_req_hdr_t));
+ int msglen = len - sizeof (okm_req_hdr_t);
+ int i, j;
+#define BYTES_PER_LINE 20
+ char bytestr[BYTES_PER_LINE * 3 + 1];
+
+ if (!(okm_debug & DBG_MESG))
+ return;
+ printf("OKM: Request ver=%d transid=%d cmd=%s\n",
+ reqp->krq_version, reqp->krq_transid,
+ ((reqp->krq_cmd == OKM_MSG_SADB) ? "MSG_SADB" : "UNKNOWN"));
+ for (i = 0; i < msglen; ) {
+ for (j = 0; (j < BYTES_PER_LINE) && (i < msglen); j++, i++) {
+ sprintf(&bytestr[j * 3], "%02X ", datap[i]);
+ }
+ if (j != 0) {
+ printf("\t%s\n", bytestr);
+ }
+ }
+}
+
+static void
+okm_print_rep(okm_rep_hdr_t *repp)
+{
+ if (!(okm_debug & DBG_MESG))
+ return;
+ printf("OKM: Reply Ver=%d Transid=%d Status=%d ",
+ repp->krp_version, repp->krp_transid, repp->krp_status);
+ printf("Sadb_errno=%d Sadb_ver=%d\n", repp->krp_sadb_errno,
+ repp->krp_sadb_version);
+}
+#endif
diff --git a/usr/src/uts/sun4u/opl/io/oplkmdrv.conf b/usr/src/uts/sun4u/opl/io/oplkmdrv.conf
new file mode 100644
index 0000000000..20e8f7773d
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/oplkmdrv.conf
@@ -0,0 +1,28 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+name="oplkmdrv" parent="pseudo" instance=0;
diff --git a/usr/src/uts/sun4u/opl/io/oplmsu/oplmsu.c b/usr/src/uts/sun4u/opl/io/oplmsu/oplmsu.c
new file mode 100644
index 0000000000..b947a22da9
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/oplmsu/oplmsu.c
@@ -0,0 +1,2471 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/errno.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/kmem.h>
+#include <sys/ksynch.h>
+#include <sys/stream.h>
+#include <sys/stropts.h>
+#include <sys/termio.h>
+#include <sys/ddi.h>
+#include <sys/file.h>
+#include <sys/disp.h>
+#include <sys/sunddi.h>
+#include <sys/sunldi.h>
+#include <sys/sunndi.h>
+#include <sys/kbio.h>
+#include <sys/prom_plat.h>
+#include <sys/oplmsu/oplmsu.h>
+#include <sys/oplmsu/oplmsu_proto.h>
+
+extern int ddi_create_internal_pathname(dev_info_t *, char *, int, minor_t);
+
+#define MOD_ID 0xe145
+#define MOD_NAME "oplmsu"
+
+#define META_NAME "oplmsu"
+#define USER_NAME "a"
+
+struct module_info oplmsu_mod_info = {
+ MOD_ID,
+ MOD_NAME,
+ 0,
+ 16384,
+ 14336,
+ 2048
+};
+
+struct qinit oplmsu_urinit = {
+ NULL,
+ oplmsu_ursrv,
+ oplmsu_open,
+ oplmsu_close,
+ NULL,
+ &oplmsu_mod_info,
+ NULL
+};
+
+struct qinit oplmsu_uwinit = {
+ oplmsu_uwput,
+ oplmsu_uwsrv,
+ oplmsu_open,
+ oplmsu_close,
+ NULL,
+ &oplmsu_mod_info,
+ NULL
+};
+
+struct qinit oplmsu_lrinit = {
+ oplmsu_lrput,
+ oplmsu_lrsrv,
+ oplmsu_open,
+ oplmsu_close,
+ NULL,
+ &oplmsu_mod_info,
+ NULL
+};
+
+struct qinit oplmsu_lwinit = {
+ NULL,
+ oplmsu_lwsrv,
+ oplmsu_open,
+ oplmsu_close,
+ NULL,
+ &oplmsu_mod_info,
+ NULL
+};
+
+struct streamtab oplmsu_info = {
+ &oplmsu_urinit,
+ &oplmsu_uwinit,
+ &oplmsu_lrinit,
+ &oplmsu_lwinit
+};
+
+static struct cb_ops cb_oplmsu_ops = {
+ nulldev, /* cb_open */
+ nulldev, /* cb_close */
+ nodev, /* cb_strategy */
+ nodev, /* cb_print */
+ nodev, /* cb_dump */
+ nodev, /* cb_read */
+ nodev, /* cb_write */
+ nodev, /* cb_ioctl */
+ nodev, /* cb_devmap */
+ nodev, /* cb_mmap */
+ nodev, /* cb_segmap */
+ nochpoll, /* cb_chpoll */
+ ddi_prop_op, /* cb_prop_op */
+ (&oplmsu_info), /* cb_stream */
+ (int)(D_NEW|D_MP|D_HOTPLUG) /* cb_flag */
+};
+
+static struct dev_ops oplmsu_ops = {
+ DEVO_REV, /* devo_rev */
+ 0, /* devo_refcnt */
+ (oplmsu_getinfo), /* devo_getinfo */
+ (nulldev), /* devo_identify */
+ (nulldev), /* devo_probe */
+ (oplmsu_attach), /* devo_attach */
+ (oplmsu_detach), /* devo_detach */
+ (nodev), /* devo_reset */
+ &(cb_oplmsu_ops), /* devo_cb_ops */
+ (struct bus_ops *)NULL, /* devo_bus_ops */
+ NULL /* devo_power */
+};
+
+struct modldrv modldrv = {
+ &mod_driverops,
+ "OPL serial mux driver %I%",
+ &oplmsu_ops
+};
+
+struct modlinkage modlinkage = {
+ MODREV_1,
+ (void *)&modldrv,
+ NULL
+};
+
+uinst_t oplmsu_uinst_local; /* upper_instance_table structure */
+uinst_t *oplmsu_uinst = &oplmsu_uinst_local;
+int oplmsu_queue_flag; /* Enable/disable queueing flag */
+int oplmsu_check_su; /* Check super-user flag */
+
+#ifdef DEBUG
+int oplmsu_debug_mode = 0; /* Enable/disable debug mode */
+int oplmsu_trace_on; /* Enable/disable trace */
+uint_t oplmsu_ltrc_size; /* Trace buffer size */
+msu_trc_t *oplmsu_ltrc_top; /* Top of trace data area */
+msu_trc_t *oplmsu_ltrc_tail; /* Tail of trace data area */
+msu_trc_t *oplmsu_ltrc_cur; /* Current pointer of trace data area */
+ulong_t oplmsu_ltrc_ccnt; /* Current counter */
+kmutex_t oplmsu_ltrc_lock; /* Lock table for trace mode */
+#endif
+
+/* oplmsu_conf_st */
+#define MSU_CONFIGURED 2
+#define MSU_CONFIGURING 1
+#define MSU_UNCONFIGURED 0
+
+static kmutex_t oplmsu_bthrd_excl;
+static kthread_id_t oplmsu_bthrd_id = NULL;
+static int oplmsu_conf_st = MSU_UNCONFIGURED;
+static kcondvar_t oplmsu_conf_cv;
+
+
+/*
+ * Locking hierarcy of oplmsu driver. This driver have 5 locks in uinst_t.
+ *
+ * Each mutex guards as follows.
+ *
+ * uinst_t->lock: This mutex is read/write mutex.
+ * read lock : acquired if the member of uinst_t is refered only.
+ * write lock: acquired if the member of uinst_t is changed.
+ *
+ * uinst_t->u_lock: This mutex is normal mutex.
+ * This mutex is acquired at reading/changing the member of all upath_t.
+ *
+ * uinst_t->l_lock: This mutex is normal mutex.
+ * This mutex is acquired at reading/changing the member of all lpath_t.
+ *
+ * uinst_t->c_lock: This mutex is normal mutex.
+ * This mutex is acquired at reading/changing the member of the ctrl_t.
+ *
+ * oplmsu_bthrd_excl: This mutex is normal mutex.
+ * This mutex is used only to start/stop the configuring thread of the
+ * multiplexed STREAMS.
+ * This mutex is exclusively acquired with the above-mentioned 4 mutexes.
+ *
+ * To guard of the deadlock by cross locking, the base locking hierarcy
+ * is as follows:
+ *
+ * uisnt->lock ==> uinst->u_lock ==> uinst->l_lock ==> uinst->c_lock
+ *
+ */
+
+
+int
+_init(void)
+{
+ int rval;
+
+ /* Initialize R/W lock for uinst_t */
+ rw_init(&oplmsu_uinst->lock, "uinst rwlock", RW_DRIVER, NULL);
+
+ /* Initialize mutex for upath_t */
+ mutex_init(&oplmsu_uinst->u_lock, "upath lock", MUTEX_DRIVER, NULL);
+
+ /* Initialize mutex for lpath_t */
+ mutex_init(&oplmsu_uinst->l_lock, "lpath lock", MUTEX_DRIVER, NULL);
+
+ /* Initialize mutex for ctrl_t */
+ mutex_init(&oplmsu_uinst->c_lock, "ctrl lock", MUTEX_DRIVER, NULL);
+
+ /* Initialize mutex for protecting background thread */
+ mutex_init(&oplmsu_bthrd_excl, NULL, MUTEX_DRIVER, NULL);
+
+ /* Initialize condition variable */
+ cv_init(&oplmsu_conf_cv, NULL, CV_DRIVER, NULL);
+
+ rval = mod_install(&modlinkage);
+ if (rval != DDI_SUCCESS) {
+ cv_destroy(&oplmsu_conf_cv);
+ mutex_destroy(&oplmsu_bthrd_excl);
+ mutex_destroy(&oplmsu_uinst->c_lock);
+ mutex_destroy(&oplmsu_uinst->l_lock);
+ mutex_destroy(&oplmsu_uinst->u_lock);
+ rw_destroy(&oplmsu_uinst->lock);
+ }
+ return (rval);
+}
+
+int
+_fini(void)
+{
+ int rval;
+
+ rval = mod_remove(&modlinkage);
+ if (rval == DDI_SUCCESS) {
+ cv_destroy(&oplmsu_conf_cv);
+ mutex_destroy(&oplmsu_bthrd_excl);
+ mutex_destroy(&oplmsu_uinst->c_lock);
+ mutex_destroy(&oplmsu_uinst->l_lock);
+ mutex_destroy(&oplmsu_uinst->u_lock);
+ rw_destroy(&oplmsu_uinst->lock);
+ }
+ return (rval);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+/* ARGSUSED */
+int
+oplmsu_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
+{
+ dev_t dev = (dev_t)arg;
+ minor_t inst;
+ int rval = DDI_SUCCESS;
+
+ switch (cmd) {
+ case DDI_INFO_DEVT2DEVINFO :
+ if (oplmsu_uinst->msu_dip == NULL) {
+ rval = DDI_FAILURE;
+ } else {
+ *resultp = oplmsu_uinst->msu_dip;
+ }
+ break;
+
+ case DDI_INFO_DEVT2INSTANCE :
+ inst = getminor(dev) & ~(META_NODE_MASK|USER_NODE_MASK);
+ *resultp = (void *)(uintptr_t)inst;
+ break;
+
+ default :
+ rval = DDI_FAILURE;
+ break;
+ }
+ return (rval);
+}
+
+int
+oplmsu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ minor_t meta_minor, user_minor;
+ int rval = 0;
+ int instance;
+#define CNTRL(c) ((c) & 037)
+ char abt_ch_seq[3] = { '\r', '~', CNTRL('b') };
+
+ if (cmd == DDI_RESUME) {
+ return (DDI_SUCCESS);
+ }
+
+ if (cmd != DDI_ATTACH) {
+ return (DDI_FAILURE);
+ }
+
+ instance = ddi_get_instance(dip);
+ if (instance != 0) {
+ cmn_err(CE_WARN, "oplmsu: attach: "
+ "Invaild instance => %d", instance);
+ return (DDI_FAILURE);
+ }
+
+ /* Create minor number for meta control node */
+ meta_minor = instance | META_NODE_MASK;
+ /* Create minor number for user access node */
+ user_minor = instance | USER_NODE_MASK;
+
+ /* Create minor node for user access */
+ rval = ddi_create_minor_node(dip, USER_NAME, S_IFCHR, user_minor,
+ DDI_NT_SERIAL, 0);
+ if (rval != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "oplmsu: attach: "
+ "ddi_create_minor_node failed. errno = %d", rval);
+ ddi_remove_minor_node(dip, NULL);
+ return (rval);
+ }
+
+ /* Create minor node for meta control */
+ rval = ddi_create_internal_pathname(dip, META_NAME, S_IFCHR,
+ meta_minor);
+ if (rval != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "oplmsu: attach: "
+ "ddi_create_internal_pathname failed. errno = %d", rval);
+ ddi_remove_minor_node(dip, NULL);
+ return (rval);
+ }
+
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+
+ /* Get each properties */
+ oplmsu_check_su = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS|DDI_PROP_NOTPROM), "check-superuser", 1);
+
+ /*
+ * Initialize members of uinst_t
+ */
+
+ oplmsu_uinst->inst_status = INST_STAT_UNCONFIGURED;
+ oplmsu_uinst->path_num = UNDEFINED;
+ oplmsu_uinst->msu_dip = dip;
+ (void) strcpy(oplmsu_uinst->abts, abt_ch_seq);
+
+#ifdef DEBUG
+ oplmsu_trace_on = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS|DDI_PROP_NOTPROM), "trace-mode", 1);
+ oplmsu_ltrc_size = (uint_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS|DDI_PROP_NOTPROM), "trace-bufsize", 128);
+
+ if (oplmsu_trace_on == MSU_TRACE_ON) {
+ /* Initialize mutex for msu_trc_t */
+ mutex_init(&oplmsu_ltrc_lock, "trc lock", MUTEX_DRIVER, NULL);
+
+ mutex_enter(&oplmsu_ltrc_lock);
+ oplmsu_ltrc_top = (msu_trc_t *)kmem_zalloc(
+ (sizeof (msu_trc_t) * oplmsu_ltrc_size), KM_SLEEP);
+ oplmsu_ltrc_cur = (msu_trc_t *)(oplmsu_ltrc_top - 1);
+ oplmsu_ltrc_tail =
+ (msu_trc_t *)(oplmsu_ltrc_top + (oplmsu_ltrc_size - 1));
+ mutex_exit(&oplmsu_ltrc_lock);
+ }
+#endif
+ rw_exit(&oplmsu_uinst->lock);
+ ddi_report_dev(dip);
+ return (rval);
+}
+
+int
+oplmsu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ lpath_t *lpath, *next_lpath;
+
+ if (cmd == DDI_SUSPEND) {
+ return (DDI_SUCCESS);
+ }
+
+ if (cmd != DDI_DETACH) {
+ return (DDI_FAILURE);
+ }
+
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+
+ /* Delete all upath_t */
+ oplmsu_delete_upath_info();
+
+ /* Delete all lpath_t */
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath = oplmsu_uinst->first_lpath;
+ oplmsu_uinst->first_lpath = NULL;
+ oplmsu_uinst->last_lpath = NULL;
+ mutex_exit(&oplmsu_uinst->l_lock);
+
+#ifdef DEBUG
+ if (oplmsu_trace_on == MSU_TRACE_ON) {
+ mutex_enter(&oplmsu_ltrc_lock);
+ if (oplmsu_ltrc_top != NULL) {
+ kmem_free(oplmsu_ltrc_top,
+ (sizeof (msu_trc_t) * oplmsu_ltrc_size));
+ }
+ oplmsu_ltrc_top = NULL;
+ oplmsu_ltrc_cur = NULL;
+ oplmsu_ltrc_tail = NULL;
+ mutex_exit(&oplmsu_ltrc_lock);
+
+ mutex_destroy(&oplmsu_ltrc_lock);
+ }
+#endif
+ rw_exit(&oplmsu_uinst->lock);
+
+ while (lpath) {
+ if (lpath->rbuf_id) {
+ unbufcall(lpath->rbuf_id);
+ }
+
+ if (lpath->rtout_id) {
+ untimeout(lpath->rtout_id);
+ }
+
+ if (lpath->rbuftbl) {
+ kmem_free(lpath->rbuftbl, sizeof (struct buf_tbl));
+ }
+
+ cv_destroy(&lpath->sw_cv);
+ next_lpath = lpath->l_next;
+ kmem_free(lpath, sizeof (lpath_t));
+ lpath = next_lpath;
+ }
+ ddi_remove_minor_node(dip, NULL);
+ return (DDI_SUCCESS);
+}
+
+/* ARGSUSED */
+int
+oplmsu_open(queue_t *urq, dev_t *dev, int oflag, int sflag, cred_t *cred_p)
+{
+ ctrl_t *ctrl;
+ minor_t mindev = 0;
+ minor_t qmindev = 0;
+ major_t majdev;
+ ulong_t node_flag;
+
+ DBG_PRINT((CE_NOTE, "oplmsu: open: "
+ "devt = 0x%lx, sflag = 0x%x", *dev, sflag));
+
+ if (sflag == CLONEOPEN) {
+ return (EINVAL);
+ }
+
+ /* Get minor device number */
+ qmindev = (minor_t)getminor(*dev);
+ /* Get node type */
+ node_flag = MSU_NODE_TYPE(qmindev);
+ if ((node_flag != MSU_NODE_USER) && (node_flag != MSU_NODE_META)) {
+ return (EINVAL);
+ }
+
+ mutex_enter(&oplmsu_bthrd_excl);
+ if ((node_flag == MSU_NODE_USER) &&
+ (oplmsu_conf_st != MSU_CONFIGURED)) { /* User access & First open */
+ int cv_rval;
+
+ DBG_PRINT((CE_NOTE, "oplmsu: open: "
+ "oplmsu_conf_st = %x", oplmsu_conf_st));
+
+ if (oplmsu_conf_st == MSU_UNCONFIGURED) {
+ oplmsu_conf_st = MSU_CONFIGURING;
+
+ /* Start up background thread */
+ oplmsu_bthrd_id = thread_create(NULL, 2 * DEFAULTSTKSZ,
+ oplmsu_setup, (void *)oplmsu_uinst, 0, &p0, TS_RUN,
+ minclsyspri);
+ }
+
+ /*
+ * Wait with cv_wait_sig() until background thread is
+ * completed.
+ */
+ while (oplmsu_conf_st == MSU_CONFIGURING) {
+ cv_rval =
+ cv_wait_sig(&oplmsu_conf_cv, &oplmsu_bthrd_excl);
+ if (cv_rval == 0) {
+ mutex_exit(&oplmsu_bthrd_excl);
+ return (EINTR);
+ }
+ }
+ }
+ mutex_exit(&oplmsu_bthrd_excl);
+
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+
+ /*
+ * If the node which will open is meta-control-node or
+ * user-access-node, and q_ptr, this is queue_t queue
+ * table member, is not NULL, then oplmsu returns
+ * SUCCESS immidiately.
+ * This process is used to protect dual open.
+ */
+
+ if ((urq != NULL) && (urq->q_ptr != NULL)) {
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+ }
+
+ /*
+ * If the node which will open is User-Access-Node, and instance
+ * status of oplmsu is no ONLINE, then oplmsu_open process fails
+ * with return value 'EIO'.
+ */
+
+ if ((node_flag == MSU_NODE_USER) &&
+ (oplmsu_uinst->inst_status != INST_STAT_ONLINE)) {
+ rw_exit(&oplmsu_uinst->lock);
+ return (EIO);
+ }
+
+ mindev |= qmindev; /* Create minor device number */
+ majdev = getmajor(*dev); /* Get major device number */
+ *dev = makedevice(majdev, mindev); /* Make device number */
+
+ /* Allocate kernel memory for ctrl_t */
+ ctrl = (ctrl_t *)kmem_zalloc(sizeof (ctrl_t), KM_SLEEP);
+
+ /*
+ * Initialize members of ctrl_t
+ */
+ ctrl->minor = (minor_t)mindev;
+ ctrl->queue = urq;
+ ctrl->sleep_flag = CV_WAKEUP;
+ ctrl->node_type = node_flag;
+ ctrl->wbuftbl =
+ (struct buf_tbl *)kmem_zalloc(sizeof (struct buf_tbl), KM_SLEEP);
+ cv_init(&ctrl->cvp, "oplmsu ctrl_tbl condvar", CV_DRIVER, NULL);
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+
+ if (node_flag == MSU_NODE_USER) { /* User access node */
+
+ oplmsu_uinst->user_ctrl = ctrl;
+ oplmsu_queue_flag = 0;
+
+ } else { /* Meta control node */
+
+ oplmsu_uinst->meta_ctrl = ctrl;
+ }
+
+ RD(urq)->q_ptr = ctrl;
+ WR(urq)->q_ptr = ctrl;
+
+ mutex_exit(&oplmsu_uinst->c_lock);
+ rw_exit(&oplmsu_uinst->lock);
+
+ OPLMSU_TRACE(urq, (mblk_t *)node_flag, MSU_TRC_OPN);
+
+ qprocson(urq); /* Enable put and service routine */
+ return (SUCCESS);
+}
+
+/* ARGSUSED */
+int
+oplmsu_close(queue_t *urq, int flag, cred_t *cred_p)
+{
+ ctrl_t *ctrl;
+ minor_t qmindev = 0;
+ lpath_t *lpath;
+ ulong_t node_flag;
+ bufcall_id_t wbuf_id;
+ timeout_id_t wtout_id;
+
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ mutex_enter(&oplmsu_uinst->l_lock);
+ mutex_enter(&oplmsu_uinst->c_lock);
+ if ((ctrl = urq->q_ptr) == NULL) {
+ mutex_exit(&oplmsu_uinst->c_lock);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ rw_exit(&oplmsu_uinst->lock);
+
+ DBG_PRINT((CE_NOTE, "oplmsu: close: "
+ "close has already been completed"));
+ return (FAILURE);
+ }
+ qmindev = ctrl->minor;
+
+ DBG_PRINT((CE_NOTE, "oplmsu: close: ctrl->minor = 0x%x", qmindev));
+
+ node_flag = MSU_NODE_TYPE(qmindev);
+ if (node_flag > MSU_NODE_META) {
+ mutex_exit(&oplmsu_uinst->c_lock);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (EINVAL);
+ }
+
+ /*
+ * Check that queue which is waiting for response from lower stream
+ * exist. If queue exists, oplmsu sets CV_SLEEP to sleep_flag.
+ */
+
+ for (lpath = oplmsu_uinst->first_lpath; lpath; ) {
+ if (((RD(urq) == lpath->hndl_uqueue) ||
+ (WR(urq) == lpath->hndl_uqueue)) &&
+ (lpath->hndl_mp != NULL)) {
+ ctrl->sleep_flag = CV_SLEEP;
+ break;
+ }
+
+ lpath = lpath->l_next;
+ }
+ mutex_exit(&oplmsu_uinst->l_lock);
+ rw_exit(&oplmsu_uinst->lock);
+
+ /* If sleep_flag is not CV_SLEEP, oplmsu calls cv_wait. */
+ if (lpath) {
+ while (ctrl->sleep_flag != CV_WAKEUP) {
+ cv_wait(&ctrl->cvp, &oplmsu_uinst->c_lock);
+ }
+ }
+
+ flushq(RD(urq), FLUSHALL);
+ flushq(WR(urq), FLUSHALL);
+ mutex_exit(&oplmsu_uinst->c_lock);
+ qprocsoff(urq); /* Disable queuing of queue */
+
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+ switch (node_flag) {
+ case MSU_NODE_USER : /* User access node */
+ oplmsu_uinst->user_ctrl = NULL;
+ oplmsu_queue_flag = 0;
+ break;
+
+ case MSU_NODE_META : /* Meta control node */
+ oplmsu_uinst->meta_ctrl = NULL;
+ break;
+
+ default :
+ cmn_err(CE_WARN, "oplmsu: close: node_flag = 0x%lx", node_flag);
+ }
+
+ ctrl->minor = NULL;
+ ctrl->queue = NULL;
+ wbuf_id = ctrl->wbuf_id;
+ wtout_id = ctrl->wtout_id;
+ ctrl->wbuf_id = 0;
+ ctrl->wtout_id = 0;
+
+ cv_destroy(&ctrl->cvp);
+ kmem_free(ctrl->wbuftbl, sizeof (struct buf_tbl));
+ ctrl->wbuftbl = NULL;
+
+ RD(urq)->q_ptr = NULL;
+ WR(urq)->q_ptr = NULL;
+ rw_exit(&oplmsu_uinst->lock);
+
+ if (wbuf_id != 0) {
+ unbufcall(wbuf_id);
+ }
+
+ if (wtout_id != 0) {
+ untimeout(wtout_id);
+ }
+
+ /* Free kernel memory for ctrl_t */
+ kmem_free(ctrl, sizeof (ctrl_t));
+
+ OPLMSU_TRACE(urq, (mblk_t *)node_flag, MSU_TRC_CLS);
+ return (SUCCESS);
+}
+
+/*
+ * Upper write put procedure
+ */
+int
+oplmsu_uwput(queue_t *uwq, mblk_t *mp)
+{
+
+ if (mp == NULL) {
+ return (SUCCESS);
+ }
+
+ if ((uwq == NULL) || (uwq->q_ptr == NULL)) {
+ freemsg(mp);
+ return (SUCCESS);
+ }
+
+ OPLMSU_TRACE(uwq, mp, MSU_TRC_UI);
+
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ if (mp->b_datap->db_type == M_FLUSH) {
+ oplmsu_wcmn_flush_hndl(uwq, mp, RW_READER);
+ } else if (mp->b_datap->db_type >= QPCTL) {
+ ctrl_t *ctrl;
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+ ctrl = (ctrl_t *)uwq->q_ptr;
+
+ /* Link high priority message to local queue */
+ oplmsu_link_high_primsg(&ctrl->first_upri_hi,
+ &ctrl->last_upri_hi, mp);
+
+ mutex_exit(&oplmsu_uinst->c_lock);
+ oplmsu_wcmn_high_qenable(WR(uwq), RW_READER);
+ } else {
+ putq(WR(uwq), mp);
+ }
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+}
+
+/*
+ * Upper write service procedure
+ */
+int
+oplmsu_uwsrv(queue_t *uwq)
+{
+ struct iocblk *iocp = NULL;
+ mblk_t *mp = NULL;
+ int rval;
+
+ if ((uwq == NULL) || (uwq->q_ptr == NULL)) {
+ return (FAILURE);
+ }
+
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+
+ /* Handle high priority message */
+ while (mp = oplmsu_wcmn_high_getq(uwq)) {
+ if (mp->b_datap->db_type == M_FLUSH) {
+ oplmsu_wcmn_flush_hndl(uwq, mp, RW_READER);
+ continue;
+ }
+
+ if (oplmsu_wcmn_through_hndl(uwq, mp, MSU_HIGH, RW_READER) ==
+ FAILURE) {
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+ }
+ }
+ rw_exit(&oplmsu_uinst->lock);
+
+ /* Handle normal priority message */
+ while (mp = getq(uwq)) {
+ rval = SUCCESS;
+ switch (mp->b_datap->db_type) {
+ case M_IOCTL :
+ iocp = (struct iocblk *)mp->b_rptr;
+ switch (iocp->ioc_cmd) {
+ case I_PLINK :
+ if (oplmsu_cmn_pullup_msg(uwq, mp) != FAILURE) {
+ rval = oplmsu_uwioctl_iplink(uwq, mp);
+ }
+ break;
+
+ case I_PUNLINK :
+ if (oplmsu_cmn_pullup_msg(uwq, mp) != FAILURE) {
+ rval = oplmsu_uwioctl_ipunlink(uwq, mp);
+ }
+ break;
+
+ case TCSETS : /* FALLTHRU */
+ case TCSETSW : /* FALLTHRU */
+ case TCSETSF : /* FALLTHRU */
+ case TIOCMSET : /* FALLTHRU */
+ case TIOCSPPS : /* FALLTHRU */
+ case TIOCSWINSZ : /* FALLTHRU */
+ case TIOCSSOFTCAR :
+ rval = oplmsu_uwioctl_termios(uwq, mp);
+ break;
+
+ default :
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ rval = oplmsu_wcmn_through_hndl(uwq, mp,
+ MSU_NORM, RW_READER);
+ rw_exit(&oplmsu_uinst->lock);
+ break;
+ }
+ break;
+
+ default :
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ rval = oplmsu_wcmn_through_hndl(uwq, mp, MSU_NORM,
+ RW_READER);
+ rw_exit(&oplmsu_uinst->lock);
+ break;
+ }
+
+ if (rval == FAILURE) {
+ break;
+ }
+ }
+ return (SUCCESS);
+}
+
+/*
+ * Lower write service procedure
+ */
+int
+oplmsu_lwsrv(queue_t *lwq)
+{
+ mblk_t *mp;
+ queue_t *dst_queue;
+ lpath_t *lpath;
+
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ while (mp = getq(lwq)) {
+ if (mp->b_datap->db_type >= QPCTL) {
+ rw_exit(&oplmsu_uinst->lock);
+ OPLMSU_TRACE(WR(lwq), mp, MSU_TRC_LO);
+ putnext(WR(lwq), mp);
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ continue;
+ }
+
+ dst_queue = WR(lwq);
+ if (canputnext(dst_queue)) {
+ rw_exit(&oplmsu_uinst->lock);
+ OPLMSU_TRACE(dst_queue, mp, MSU_TRC_LO);
+ putnext(dst_queue, mp);
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ } else {
+ putbq(WR(lwq), mp);
+ break;
+ }
+ }
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath = (lpath_t *)lwq->q_ptr;
+ if (lpath->uwq_flag != 0) {
+ qenable(WR(lpath->uwq_queue));
+ lpath->uwq_flag = 0;
+ lpath->uwq_queue = NULL;
+ }
+ mutex_exit(&oplmsu_uinst->l_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+}
+
+/*
+ * Lower read put procedure
+ */
+int
+oplmsu_lrput(queue_t *lrq, mblk_t *mp)
+{
+
+ if (mp == NULL) {
+ return (SUCCESS);
+ }
+
+ if ((lrq == NULL) || (lrq->q_ptr == NULL)) {
+ freemsg(mp);
+ return (SUCCESS);
+ }
+
+ OPLMSU_TRACE(lrq, mp, MSU_TRC_LI);
+
+ if (mp->b_datap->db_type == M_FLUSH) {
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ oplmsu_rcmn_flush_hndl(lrq, mp);
+ rw_exit(&oplmsu_uinst->lock);
+ } else if (mp->b_datap->db_type >= QPCTL) {
+ lpath_t *lpath;
+
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath = lrq->q_ptr;
+
+ /* Link high priority message to local queue */
+ oplmsu_link_high_primsg(&lpath->first_lpri_hi,
+ &lpath->last_lpri_hi, mp);
+
+ mutex_exit(&oplmsu_uinst->l_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ oplmsu_rcmn_high_qenable(lrq);
+ } else {
+ putq(lrq, mp);
+ }
+ return (SUCCESS);
+}
+
+/*
+ * Lower read service procedure
+ */
+int
+oplmsu_lrsrv(queue_t *lrq)
+{
+ mblk_t *mp;
+ boolean_t aborted;
+ int rval;
+
+ if ((lrq == NULL) || (lrq->q_ptr == NULL)) {
+ return (FAILURE);
+ }
+
+ /* Handle normal priority message */
+ while (mp = getq(lrq)) {
+ if (mp->b_datap->db_type >= QPCTL) {
+ cmn_err(CE_WARN, "oplmsu: lr-srv: "
+ "Invalid db_type => %x", mp->b_datap->db_type);
+ }
+
+ switch (mp->b_datap->db_type) {
+ case M_DATA :
+ aborted = B_FALSE;
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ if ((abort_enable == KIOCABORTALTERNATE) &&
+ (RD(oplmsu_uinst->lower_queue) == lrq)) {
+ uchar_t *rx_char = mp->b_rptr;
+ lpath_t *lpath;
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath = lrq->q_ptr;
+ while (rx_char != mp->b_wptr) {
+ if (*rx_char == *lpath->abt_char) {
+ lpath->abt_char++;
+ if (*lpath->abt_char == '\0') {
+ abort_sequence_enter((char *)NULL);
+ lpath->abt_char
+ = oplmsu_uinst->abts;
+ aborted = B_TRUE;
+ break;
+ }
+ } else {
+ lpath->abt_char = (*rx_char ==
+ *oplmsu_uinst->abts) ?
+ oplmsu_uinst->abts + 1 :
+ oplmsu_uinst->abts;
+ }
+ rx_char++;
+ }
+ mutex_exit(&oplmsu_uinst->l_lock);
+ }
+ rw_exit(&oplmsu_uinst->lock);
+
+ if (aborted) {
+ freemsg(mp);
+ continue;
+ }
+
+ /*
+ * When 1st byte of the received M_DATA is XON or,
+ * 1st byte is XOFF and 2nd byte is XON.
+ */
+
+ if ((*(mp->b_rptr) == MSU_XON) ||
+ (((mp->b_wptr - mp->b_rptr) == 2) &&
+ ((*(mp->b_rptr) == MSU_XOFF) &&
+ (*(mp->b_rptr + 1) == MSU_XON)))) {
+ /* Path switching by XOFF/XON */
+ if (oplmsu_lrdata_xoffxon(lrq, mp) == FAILURE) {
+ return (SUCCESS);
+ }
+ } else {
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ rval =
+ oplmsu_rcmn_through_hndl(lrq, mp, MSU_NORM);
+ rw_exit(&oplmsu_uinst->lock);
+
+ if (rval == FAILURE) {
+ return (SUCCESS);
+ }
+ }
+ break;
+
+ case M_BREAK :
+ if ((mp->b_wptr - mp->b_rptr) == 0 && msgdsize(mp)
+ == 0) {
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ if ((abort_enable != KIOCABORTALTERNATE) &&
+ (RD(oplmsu_uinst->lower_queue) == lrq)) {
+ abort_sequence_enter((char *)NULL);
+ }
+ rw_exit(&oplmsu_uinst->lock);
+ freemsg(mp);
+ break;
+ }
+ /* FALLTHRU */
+
+ default :
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ (void) oplmsu_rcmn_through_hndl(lrq, mp, MSU_NORM);
+ rw_exit(&oplmsu_uinst->lock);
+ break;
+ }
+ }
+ return (SUCCESS);
+}
+
+/*
+ * Upper read service procedure
+ */
+int
+oplmsu_ursrv(queue_t *urq)
+{
+ mblk_t *mp;
+ queue_t *dst_queue;
+ lpath_t *lpath;
+ ctrl_t *ctrl;
+ int res_chk = 0;
+
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ while (mp = getq(urq)) {
+ if (mp->b_datap->db_type >= QPCTL) {
+ if ((mp->b_datap->db_type == M_IOCACK) ||
+ (mp->b_datap->db_type == M_IOCNAK)) {
+ res_chk = 1;
+ }
+ rw_exit(&oplmsu_uinst->lock);
+ OPLMSU_TRACE(RD(urq), mp, MSU_TRC_UO);
+ putnext(RD(urq), mp);
+
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath = oplmsu_uinst->first_lpath;
+ while (lpath) {
+ qenable(RD(lpath->lower_queue));
+ lpath = lpath->l_next;
+ }
+ mutex_exit(&oplmsu_uinst->l_lock);
+
+ if (res_chk == 1) {
+ mutex_enter(&oplmsu_uinst->c_lock);
+ ctrl = (ctrl_t *)urq->q_ptr;
+ if (ctrl != NULL) {
+ if (ctrl->wait_queue != NULL) {
+ qenable(WR(ctrl->wait_queue));
+ ctrl->wait_queue = NULL;
+ }
+ }
+ mutex_exit(&oplmsu_uinst->c_lock);
+ res_chk = 0;
+ }
+ continue;
+ }
+
+ dst_queue = RD(urq);
+ if (canputnext(dst_queue)) {
+ rw_exit(&oplmsu_uinst->lock);
+ OPLMSU_TRACE(dst_queue, mp, MSU_TRC_UO);
+ putnext(dst_queue, mp);
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ } else {
+ putbq(urq, mp);
+ break;
+ }
+ }
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+ ctrl = urq->q_ptr;
+ if (ctrl->lrq_flag != 0) {
+ qenable(ctrl->lrq_queue);
+ ctrl->lrq_flag = 0;
+ ctrl->lrq_queue = NULL;
+ }
+ mutex_exit(&oplmsu_uinst->c_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+}
+
+int
+oplmsu_open_msu(dev_info_t *dip, ldi_ident_t *lip, ldi_handle_t *lhp)
+{
+ dev_t devt;
+ int rval;
+
+ /* Allocate LDI identifier */
+ rval = ldi_ident_from_dip(dip, lip);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: open-msu: "
+ "ldi_ident_from_dip failed. errno = %d", rval);
+ return (rval);
+ }
+
+ /* Open oplmsu(meta ctrl node) */
+ devt = makedevice(ddi_driver_major(dip), META_NODE_MASK);
+ rval =
+ ldi_open_by_dev(&devt, OTYP_CHR, (FREAD|FWRITE), kcred, lhp, *lip);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: open-msu: "
+ "ldi_open_by_dev failed. errno = %d", rval);
+ ldi_ident_release(*lip);
+ }
+ return (rval);
+}
+
+int
+oplmsu_plink_serial(dev_info_t *dip, ldi_handle_t msu_lh, int *id)
+{
+ ldi_ident_t li = NULL;
+ ldi_handle_t lh = NULL;
+ int param;
+ int rval;
+ char pathname[MSU_PATHNAME_SIZE];
+ char wrkbuf[MSU_PATHNAME_SIZE];
+
+ /* Create physical path-name for serial */
+ ddi_pathname(dip, wrkbuf);
+ *(wrkbuf + strlen(wrkbuf)) = '\0';
+ sprintf(pathname, "/devices%s:%c", wrkbuf, 'a'+ ddi_get_instance(dip));
+
+ /* Allocate LDI identifier */
+ rval = ldi_ident_from_dip(dip, &li);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: plink-serial: "
+ "%s ldi_ident_from_dip failed. errno = %d", pathname, rval);
+ return (rval);
+ }
+
+ /* Open serial */
+ rval = ldi_open_by_name(pathname, (FREAD|FWRITE|FEXCL), kcred, &lh, li);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: plink-serial: "
+ "%s open failed. errno = %d", pathname, rval);
+ ldi_ident_release(li);
+ return (rval);
+ }
+
+ /* Try to remove the top module from the stream */
+ param = 0;
+ while ((ldi_ioctl(lh, I_POP, (intptr_t)0, FKIOCTL, kcred, &param))
+ == 0) {
+ continue;
+ }
+
+ /* Issue ioctl(I_PLINK) */
+ param = 0;
+ rval = ldi_ioctl(msu_lh, I_PLINK, (intptr_t)lh, FKIOCTL, kcred, &param);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: plink-serial: "
+ "%s ioctl(I_PLINK) failed. errno = %d", pathname, rval);
+ }
+
+ (void) ldi_close(lh, (FREAD|FWRITE|FEXCL), kcred);
+ ldi_ident_release(li);
+
+ *id = param; /* Save link-id */
+ return (rval);
+}
+
+int
+oplmsu_set_lpathnum(int lnk_id, int instance)
+{
+ lpath_t *lpath;
+ int rval = SUCCESS;
+
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath = oplmsu_uinst->first_lpath;
+ while (lpath) {
+ if ((lpath->path_no == UNDEFINED) &&
+ (lpath->link_id == lnk_id)) {
+ lpath->path_no = instance; /* Set instance number */
+ lpath->src_upath = NULL;
+ lpath->status = MSU_SETID_NU;
+ break;
+ }
+ lpath = lpath->l_next;
+ }
+ mutex_exit(&oplmsu_uinst->l_lock);
+ rw_exit(&oplmsu_uinst->lock);
+
+ if (lpath == NULL) {
+ rval = EINVAL;
+ }
+ return (rval);
+}
+
+int
+oplmsu_dr_attach(dev_info_t *dip)
+{
+ ldi_ident_t msu_li = NULL;
+ ldi_handle_t msu_lh = NULL;
+ upath_t *upath;
+ int len;
+ int instance;
+ int lnk_id = 0;
+ int param = 0;
+ int rval;
+
+ /* Get instance for serial */
+ instance = ddi_get_instance(dip);
+
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+ mutex_enter(&oplmsu_uinst->u_lock);
+
+ /* Get current number of paths */
+ oplmsu_uinst->path_num = oplmsu_get_pathnum();
+
+ /* Check specified upath_t */
+ upath = oplmsu_uinst->first_upath;
+ while (upath) {
+ if (instance == upath->path_no) {
+ break;
+ }
+ upath = upath->u_next;
+ }
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+
+ if (upath != NULL) {
+ cmn_err(CE_WARN, "oplmsu: attach(dr): "
+ "Instance %d already exist", instance);
+ return (EINVAL);
+ }
+
+ /* Open oplmsu */
+ rval = oplmsu_open_msu(oplmsu_uinst->msu_dip, &msu_li, &msu_lh);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: attach(dr): "
+ "msu open failed. errno = %d", rval);
+ return (rval);
+ }
+
+ /* Connect two streams */
+ rval = oplmsu_plink_serial(dip, msu_lh, &lnk_id);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: attach(dr): "
+ "i_plink failed. errno = %d", rval);
+ (void) ldi_close(msu_lh, (FREAD|FWRITE), kcred);
+ ldi_ident_release(msu_li);
+ return (rval);
+ }
+
+ rval = oplmsu_set_lpathnum(lnk_id, instance);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: attach(dr): "
+ "Link id %d is not found", lnk_id);
+ /* Issue ioctl(I_PUNLINK) */
+ (void) ldi_ioctl(msu_lh, I_PUNLINK, (intptr_t)lnk_id, FKIOCTL,
+ kcred, &param);
+ (void) ldi_close(msu_lh, (FREAD|FWRITE), kcred);
+ ldi_ident_release(msu_li);
+ return (rval);
+ }
+
+ /* Add the path */
+ rval = oplmsu_config_add(dip);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: attach(dr): "
+ "Failed to add the path. errno = %d", rval);
+ /* Issue ioctl(I_PUNLINK) */
+ (void) ldi_ioctl(msu_lh, I_PUNLINK, (intptr_t)lnk_id, FKIOCTL,
+ kcred, &param);
+
+ (void) ldi_close(msu_lh, (FREAD|FWRITE), kcred);
+ ldi_ident_release(msu_li);
+ return (rval);
+ }
+
+ /* Start to use the path */
+ rval = oplmsu_config_start(instance);
+ if (rval != 0) {
+ struct msu_path *mpath;
+ struct msu_dev *mdev;
+
+ cmn_err(CE_WARN, "oplmsu: attach(dr): "
+ "Failed to start the path. errno = %d", rval);
+
+ len = sizeof (struct msu_path) + sizeof (struct msu_dev);
+ mpath = (struct msu_path *)kmem_zalloc((size_t)len, KM_SLEEP);
+ mpath->num = 1;
+ mdev = (struct msu_dev *)(mpath + 1);
+ mdev->dip = dip;
+
+ /* Delete the path */
+ if ((oplmsu_config_del(mpath)) == 0) {
+ /* Issue ioctl(I_PUNLINK) */
+ (void) ldi_ioctl(msu_lh, I_PUNLINK, (intptr_t)lnk_id,
+ FKIOCTL, kcred, &param);
+ }
+ kmem_free(mpath, (size_t)len);
+ }
+
+ /* Close oplmsu */
+ (void) ldi_close(msu_lh, (FREAD|FWRITE), kcred);
+ ldi_ident_release(msu_li);
+ return (rval);
+}
+
+int
+oplmsu_dr_detach(dev_info_t *dip)
+{
+ ldi_ident_t msu_li = NULL;
+ ldi_handle_t msu_lh = NULL;
+ struct msu_path *mpath;
+ struct msu_dev *mdev;
+ upath_t *upath;
+ lpath_t *lpath;
+ int len;
+ int instance;
+ int count = 0;
+ int param = 0;
+ int status;
+ int rval;
+
+ /* Get instance for serial */
+ instance = ddi_get_instance(dip);
+
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+ mutex_enter(&oplmsu_uinst->u_lock);
+
+ /* Get current number of paths */
+ oplmsu_uinst->path_num = oplmsu_get_pathnum();
+
+ rval = FAILURE;
+
+ /* Check specified upath_t */
+ upath = oplmsu_uinst->first_upath;
+ while (upath) {
+ if (instance == upath->path_no) {
+ /* Save status of specified path */
+ status = upath->status;
+ rval = SUCCESS;
+ }
+ upath = upath->u_next;
+ count += 1;
+ }
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+
+ if (rval == FAILURE) {
+ if (count <= 1) {
+ cmn_err(CE_WARN, "oplmsu: detach(dr): "
+ "Instance %d is last path", instance);
+ } else {
+ cmn_err(CE_WARN, "oplmsu: detach(dr): "
+ "Instance %d doesn't find", instance);
+ }
+ return (EINVAL);
+ }
+
+ /* Check status of specified path */
+ if ((status == MSU_PSTAT_ACTIVE) || (status == MSU_PSTAT_STANDBY)) {
+ /* Stop to use the path */
+ rval = oplmsu_config_stop(instance);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: detach(dr): "
+ "Failed to stop the path. errno = %d", rval);
+ return (rval);
+ }
+ }
+
+ /* Prepare to unlink the path */
+ rval = oplmsu_config_disc(instance);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: detach(dr): "
+ "Failed to disconnect the path. errno = %d", rval);
+ return (rval);
+ }
+
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath = oplmsu_uinst->first_lpath;
+ while (lpath) {
+ if (lpath->path_no == instance) { /* Get link ID */
+ break;
+ }
+ lpath = lpath->l_next;
+ }
+ mutex_exit(&oplmsu_uinst->l_lock);
+ rw_exit(&oplmsu_uinst->lock);
+
+ if (lpath == NULL) {
+ cmn_err(CE_WARN, "oplmsu: detach(dr): Can not find link ID");
+ return (EINVAL);
+ }
+
+ /* Open oplmsu */
+ rval = oplmsu_open_msu(oplmsu_uinst->msu_dip, &msu_li, &msu_lh);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: detach(dr): "
+ "msu open failed. errno = %d", rval);
+ return (rval);
+ }
+
+ /* Issue ioctl(I_PUNLINK) */
+ rval = ldi_ioctl(msu_lh, I_PUNLINK, (intptr_t)lpath->link_id, FKIOCTL,
+ kcred, &param);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: detach(dr): "
+ "ioctl(I_PUNLINK) failed. errno = %d", rval);
+ (void) ldi_close(msu_lh, (FREAD|FWRITE), kcred);
+ ldi_ident_release(msu_li);
+ return (rval);
+ }
+
+ /* Close oplmsu(meta node) */
+ (void) ldi_close(msu_lh, (FREAD|FWRITE), kcred);
+ ldi_ident_release(msu_li);
+
+ len = sizeof (struct msu_path) + sizeof (struct msu_dev);
+ mpath = (struct msu_path *)kmem_zalloc((size_t)len, KM_SLEEP);
+ mpath->num = 1;
+ mdev = (struct msu_dev *)(mpath + 1);
+ mdev->dip = dip;
+
+ /* Delete the path */
+ rval = oplmsu_config_del(mpath);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: detach(dr): "
+ "Failed to delete the path. errno = %d", rval);
+ }
+
+ kmem_free(mpath, (size_t)len);
+ return (rval);
+}
+
+/*
+ * The ebus and the serial device path under a given CMU_CH chip
+ * is expected to be always at the same address. So, it is safe
+ * to hard-code the pathnames as below.
+ */
+#define EBUS_PATH "ebus@1"
+#define SERIAL_PATH "serial@14,400000"
+#define EBUS_SERIAL_PATH ("/" EBUS_PATH "/" SERIAL_PATH)
+
+/*
+ * Given the CMU_CH dip, find the serial device dip.
+ */
+dev_info_t *
+oplmsu_find_ser_dip(dev_info_t *cmuch_dip)
+{
+ int circ1, circ2;
+ dev_info_t *ebus_dip;
+ dev_info_t *ser_dip = NULL;
+
+ ndi_devi_enter(cmuch_dip, &circ1);
+ ebus_dip = ndi_devi_findchild(cmuch_dip, EBUS_PATH);
+
+ DBG_PRINT((CE_NOTE, "oplmsu: find-serial-dip: "
+ "ebus_dip = %p", ebus_dip));
+
+ if (ebus_dip != NULL) {
+ ndi_devi_enter(ebus_dip, &circ2);
+ ser_dip = ndi_devi_findchild(ebus_dip, SERIAL_PATH);
+
+ DBG_PRINT((CE_NOTE, "oplmsu: find-serial-dip: "
+ "ser_dip = %p", ser_dip));
+ ndi_devi_exit(ebus_dip, circ2);
+ }
+ ndi_devi_exit(cmuch_dip, circ1);
+ return (ser_dip);
+}
+
+/*
+ * Find all console related serial devices.
+ */
+int
+oplmsu_find_serial(ser_devl_t **ser_dl)
+{
+ dev_info_t *root_dip;
+ dev_info_t *cmuch_dip;
+ dev_info_t *dip;
+ ser_devl_t *wrk_ser_dl;
+ int circ;
+ int count = 0;
+ char pathname[MSU_PATHNAME_SIZE];
+ dev_t devt;
+ char *namep;
+
+ root_dip = ddi_root_node();
+ ndi_devi_enter(root_dip, &circ);
+ cmuch_dip = ddi_get_child(root_dip);
+
+ while (cmuch_dip != NULL) {
+ namep = ddi_binding_name(cmuch_dip); /* Get binding name */
+ if (namep == NULL) {
+ cmuch_dip = ddi_get_next_sibling(cmuch_dip);
+ continue;
+ }
+
+ DBG_PRINT((CE_NOTE, "oplmsu: find-serial: name => %s", namep));
+
+ if ((strcmp(namep, MSU_CMUCH_FF) != 0) &&
+ (strcmp(namep, MSU_CMUCH_DC) != 0)) {
+#ifdef DEBUG
+ if (strcmp(namep, MSU_CMUCH_DBG) != 0) {
+ cmuch_dip = ddi_get_next_sibling(cmuch_dip);
+ continue;
+ }
+#else
+ cmuch_dip = ddi_get_next_sibling(cmuch_dip);
+ continue;
+#endif
+ }
+
+ (void) ddi_pathname(cmuch_dip, pathname);
+ DBG_PRINT((CE_NOTE,
+ "oplmsu: find-serial: cmu-ch path => %s", pathname));
+ (void) strcat(pathname, EBUS_SERIAL_PATH);
+
+ /*
+ * Call ddi_pathname_to_dev_t to forceload and attach
+ * the required drivers.
+ */
+ devt = ddi_pathname_to_dev_t(pathname);
+ DBG_PRINT((CE_NOTE, "oplmsu: find-serial: serial device "
+ "dev_t = %lx", devt));
+ if ((devt != NODEV) &&
+ ((dip = oplmsu_find_ser_dip(cmuch_dip)) != NULL)) {
+ wrk_ser_dl = (ser_devl_t *)
+ kmem_zalloc(sizeof (ser_devl_t), KM_SLEEP);
+ wrk_ser_dl->dip = dip;
+ count += 1;
+
+ if (*ser_dl != NULL) {
+ wrk_ser_dl->next = *ser_dl;
+ }
+ *ser_dl = wrk_ser_dl;
+ }
+ cmuch_dip = ddi_get_next_sibling(cmuch_dip);
+ }
+ ndi_devi_exit(root_dip, circ);
+ return (count);
+}
+
+/* Configure STREAM */
+void
+oplmsu_conf_stream(uinst_t *msu_uinst)
+{
+ ldi_ident_t msu_li = NULL;
+ ldi_handle_t msu_lh = NULL;
+ struct msu_path *mpath;
+ struct msu_dev *mdev;
+ ser_devl_t *ser_dl = NULL, *next_ser_dl;
+ int *plink_id;
+ int size;
+ int i;
+ int param;
+ int connected = 0;
+ int devcnt = 0;
+ int rval;
+
+ DBG_PRINT((CE_NOTE,
+ "oplmsu: conf-stream: stream configuration start!"));
+
+ /* Find serial devices */
+ devcnt = oplmsu_find_serial(&ser_dl);
+ if ((devcnt == 0) || (ser_dl == NULL)) {
+ cmn_err(CE_WARN, "oplmsu: conf-stream: "
+ "Discovered serial device = %d", devcnt);
+ return;
+ }
+
+ /* Open oplmsu */
+ rval = oplmsu_open_msu(msu_uinst->msu_dip, &msu_li, &msu_lh);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: conf-stream: "
+ "msu open failed. errno = %d", rval);
+ return;
+ }
+
+ size = (sizeof (struct msu_path) + (sizeof (struct msu_dev) * devcnt));
+ mpath = (struct msu_path *)kmem_zalloc((size_t)size, KM_SLEEP);
+ plink_id = (int *)kmem_zalloc((sizeof (int) * devcnt), KM_SLEEP);
+
+ mdev = (struct msu_dev *)(mpath + 1);
+ for (i = 0; i < devcnt; i++) {
+ /* Connect two streams */
+ rval = oplmsu_plink_serial(ser_dl->dip, msu_lh, &plink_id[i]);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: conf-stream: "
+ "i_plink failed. errno = %d", rval);
+ next_ser_dl = ser_dl->next;
+ kmem_free(ser_dl, sizeof (ser_devl_t));
+ ser_dl = next_ser_dl;
+ continue;
+ }
+
+ rval = oplmsu_set_lpathnum(plink_id[i],
+ ddi_get_instance(ser_dl->dip));
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: conf-stream: "
+ "Link id %d is not found", plink_id[i]);
+ /* Issue ioctl(I_PUNLINK) */
+ (void) ldi_ioctl(msu_lh, I_PUNLINK,
+ (intptr_t)plink_id[i], FKIOCTL, kcred, &param);
+ next_ser_dl = ser_dl->next;
+ kmem_free(ser_dl, sizeof (ser_devl_t));
+ ser_dl = next_ser_dl;
+ continue;
+ }
+
+ mdev->dip = ser_dl->dip;
+ next_ser_dl = ser_dl->next;
+ kmem_free(ser_dl, sizeof (ser_devl_t));
+ ser_dl = next_ser_dl;
+
+ mdev++;
+ connected++;
+ }
+
+ if (connected == 0) {
+ cmn_err(CE_WARN, "oplmsu: conf-stream: "
+ "Connected paths = %d", connected);
+ (void) ldi_close(msu_lh, (FREAD|FWRITE), kcred);
+ ldi_ident_release(msu_li);
+ kmem_free(plink_id, (sizeof (int) * devcnt));
+ kmem_free(mpath, size);
+ return;
+ }
+
+ /* Setup all structure */
+ mpath->num = connected;
+ rval = oplmsu_config_new(mpath);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: conf-stream: "
+ "Failed to create all paths. errno = %d", rval);
+ oplmsu_unlinks(msu_lh, plink_id, devcnt);
+ (void) ldi_close(msu_lh, (FREAD|FWRITE), kcred);
+ ldi_ident_release(msu_li);
+ kmem_free(plink_id, (sizeof (int) * devcnt));
+ kmem_free(mpath, size);
+ return;
+ }
+
+ /* Start to use all paths */
+ rval = oplmsu_config_start(MSU_PATH_ALL);
+ if (rval != 0) {
+ cmn_err(CE_WARN, "oplmsu: conf-stream: "
+ "Failed to start all paths. errno = %d", rval);
+
+ /* Delete the path */
+ rval = oplmsu_config_del(mpath);
+ if (rval == 0) {
+ oplmsu_unlinks(msu_lh, plink_id, devcnt);
+ }
+ }
+
+ (void) ldi_close(msu_lh, (FREAD|FWRITE), kcred);
+ ldi_ident_release(msu_li);
+ kmem_free(plink_id, (sizeof (int) * devcnt));
+ kmem_free(mpath, size);
+
+ DBG_PRINT((CE_NOTE, "oplmsu: conf-stream: stream configuration end!"));
+}
+
+void
+oplmsu_unlinks(ldi_handle_t msu_lh, int *plink_id, int devcnt)
+{
+ int i;
+ int param = 0;
+
+ for (i = 0; i < devcnt; i++) {
+ if (plink_id[i] == 0) {
+ continue;
+ }
+
+ /* Issue ioctl(I_PUNLINK) */
+ (void) ldi_ioctl(msu_lh, I_PUNLINK, (intptr_t)plink_id[i],
+ FKIOCTL, kcred, &param);
+ }
+}
+
+void
+oplmsu_setup(uinst_t *msu_uinst)
+{
+
+ DBG_PRINT((CE_NOTE, "oplmsu: setup: Background thread start!"));
+
+ mutex_enter(&oplmsu_bthrd_excl);
+ if (oplmsu_conf_st == MSU_CONFIGURING) {
+ mutex_exit(&oplmsu_bthrd_excl);
+ oplmsu_conf_stream(msu_uinst); /* Configure stream */
+ mutex_enter(&oplmsu_bthrd_excl);
+ oplmsu_conf_st = MSU_CONFIGURED;
+ cv_broadcast(&oplmsu_conf_cv); /* Wake up from cv_wait_sig() */
+ }
+
+ if (oplmsu_bthrd_id != NULL) {
+ oplmsu_bthrd_id = NULL;
+ }
+ mutex_exit(&oplmsu_bthrd_excl);
+
+ DBG_PRINT((CE_NOTE, "oplmsu: setup: Background thread end!"));
+
+ thread_exit();
+}
+
+int
+oplmsu_create_upath(dev_info_t *dip)
+{
+ upath_t *upath;
+ lpath_t *lpath;
+ dev_info_t *cmuch_dip;
+ int instance;
+ int lsb;
+
+ cmuch_dip = ddi_get_parent(ddi_get_parent(dip));
+ lsb = ddi_prop_get_int(DDI_DEV_T_ANY, cmuch_dip, 0, MSU_BOARD_PROP,
+ FAILURE);
+ if (lsb == FAILURE) {
+ return (lsb);
+ }
+
+ instance = ddi_get_instance(dip);
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath = oplmsu_uinst->first_lpath;
+ while (lpath) {
+ if (lpath->path_no == instance) {
+ break;
+ }
+ lpath = lpath->l_next;
+ }
+
+ if (lpath == NULL) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ return (ENODEV);
+ }
+
+ upath = (upath_t *)kmem_zalloc(sizeof (upath_t), KM_SLEEP);
+
+ /*
+ * Initialize members of upath_t
+ */
+
+ upath->path_no = instance;
+ upath->lpath = lpath;
+ upath->ser_devcb.dip = dip;
+ upath->ser_devcb.lsb = lsb;
+ oplmsu_cmn_set_upath_sts(upath, MSU_PSTAT_STOP, MSU_PSTAT_EMPTY,
+ MSU_STOP);
+
+ lpath->src_upath = NULL;
+ lpath->status = MSU_EXT_NOTUSED;
+ mutex_exit(&oplmsu_uinst->l_lock);
+
+ oplmsu_link_upath(upath);
+ return (SUCCESS);
+}
+
+/* Setup new upper instance structure */
+int
+oplmsu_config_new(struct msu_path *mpath)
+{
+ struct msu_dev *mdev;
+ int i;
+ int rval = SUCCESS;
+
+ DBG_PRINT((CE_NOTE, "oplmsu: conf-new: config_new() called"));
+ ASSERT(mpath);
+
+ if (mpath->num == 0) {
+ cmn_err(CE_WARN, "oplmsu: conf-new: "
+ "Number of paths = %d", mpath->num);
+ return (EINVAL);
+ }
+
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ rval = oplmsu_check_lpath_usable();
+ mutex_exit(&oplmsu_uinst->l_lock);
+
+ if (rval == BUSY) { /* Check whether Lower path is usable */
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: conf-new: "
+ "Other processing is using this device");
+ return (EBUSY);
+ }
+
+ /*
+ * Because the OPLMSU instance already exists when the upper path
+ * table exists, the configure_new processing cannot be done.
+ */
+
+ mutex_enter(&oplmsu_uinst->u_lock);
+
+ if ((oplmsu_uinst->first_upath != NULL) ||
+ (oplmsu_uinst->last_upath != NULL)) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: conf-new: upath_t already exist");
+ return (EINVAL);
+ }
+
+ /*
+ * Because the config_new processing has already been done
+ * if oplmsu_uinst->path_num isn't -1, this processing cannot be
+ * continued.
+ */
+
+ if (oplmsu_uinst->path_num != UNDEFINED) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: conf-new: "
+ "conf-new processing has already been completed");
+ return (EINVAL);
+ }
+
+ /*
+ * Only the number of specified paths makes the upper path
+ * information tables.
+ */
+
+ mdev = (struct msu_dev *)(mpath + 1);
+ for (i = 0; i < mpath->num; i++) {
+ /*
+ * Associate upper path information table with lower path
+ * information table.
+ *
+ * If the upper path information table and the lower path
+ * information table cannot be associated, the link list of
+ * the upper path information table is released.
+ */
+ rval = oplmsu_create_upath(mdev->dip);
+ if (rval != SUCCESS) {
+ oplmsu_delete_upath_info();
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: conf-new: "
+ "Failed to create upath %d", rval);
+ return (rval);
+ }
+
+ mdev++;
+ }
+
+ /*
+ * Setup members of uinst_t
+ */
+
+ oplmsu_uinst->inst_status = oplmsu_get_inst_status();
+ oplmsu_uinst->path_num = mpath->num;
+ oplmsu_uinst->lower_queue = NULL;
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+}
+
+/* Add path information */
+int
+oplmsu_config_add(dev_info_t *dip)
+{
+ upath_t *upath;
+ int instance;
+ int rval = SUCCESS;
+
+ DBG_PRINT((CE_NOTE, "oplmsu: conf-add: config_add() called"));
+ ASSERT(dip);
+
+ instance = ddi_get_instance(dip);
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+
+ if (oplmsu_uinst->path_num == UNDEFINED) {
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: conf-add: "
+ "conf-new processing has not been completed yet");
+ return (EINVAL);
+ }
+
+ mutex_enter(&oplmsu_uinst->u_lock);
+ upath = oplmsu_search_upath_info(instance);
+ if (upath != NULL) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: conf-add: "
+ "Proper upath_t doesn't find");
+ return (EINVAL);
+ }
+
+ rval = oplmsu_create_upath(dip);
+ if (rval != SUCCESS) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: conf-add: "
+ "Failed to create upath %d", rval);
+ return (rval);
+ }
+
+ oplmsu_uinst->inst_status = oplmsu_get_inst_status();
+ oplmsu_uinst->path_num = oplmsu_get_pathnum();
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+}
+
+/* Delete each path information */
+int
+oplmsu_config_del(struct msu_path *mpath)
+{
+ struct msu_dev *mdev;
+ upath_t *upath;
+ lpath_t *lpath;
+ int rval = SUCCESS;
+ int use_flag;
+ int i;
+
+ DBG_PRINT((CE_NOTE, "oplmsu: conf-del: config_del() called"));
+ ASSERT(mpath);
+
+ mdev = (struct msu_dev *)(mpath + 1);
+
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+ mutex_enter(&oplmsu_uinst->u_lock);
+ for (i = 0; i < mpath->num; i++) {
+ upath = oplmsu_search_upath_info(ddi_get_instance(mdev->dip));
+ if (upath == NULL) {
+ cmn_err(CE_WARN, "oplmsu: conf-del: "
+ "Proper upath_t doesn't find");
+ rval = ENODEV;
+ mdev++;
+ continue;
+ }
+
+ lpath = upath->lpath;
+ if (lpath == NULL) {
+ if ((upath->traditional_status == MSU_WSTP_ACK) ||
+ (upath->traditional_status == MSU_WSTR_ACK) ||
+ (upath->traditional_status == MSU_WPTH_CHG) ||
+ (upath->traditional_status == MSU_WTCS_ACK) ||
+ (upath->traditional_status == MSU_WTMS_ACK) ||
+ (upath->traditional_status == MSU_WPPS_ACK) ||
+ (upath->traditional_status == MSU_WWSZ_ACK) ||
+ (upath->traditional_status == MSU_WCAR_ACK)) {
+ cmn_err(CE_WARN, "oplmsu: conf-del: "
+ "Other processing is using this device");
+ rval = EBUSY;
+ mdev++;
+ continue;
+ }
+
+ if ((upath->status != MSU_PSTAT_DISCON) ||
+ (upath->traditional_status != MSU_DISCON)) {
+ cmn_err(CE_WARN, "oplmsu: conf-del: "
+ "Status of path is improper");
+ rval = EINVAL;
+ mdev++;
+ continue;
+ }
+ } else {
+ mutex_enter(&oplmsu_uinst->l_lock);
+ use_flag = oplmsu_set_ioctl_path(lpath, NULL, NULL);
+ if (use_flag == BUSY) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ cmn_err(CE_WARN, "oplmsu: conf-del: "
+ "Other processing is using lower path");
+ rval = EBUSY;
+ mdev++;
+ continue;
+ }
+
+ if (((upath->status != MSU_PSTAT_STOP) ||
+ (upath->traditional_status != MSU_STOP)) &&
+ ((upath->status != MSU_PSTAT_FAIL) ||
+ (upath->traditional_status != MSU_FAIL))) {
+ oplmsu_clear_ioctl_path(lpath);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ cmn_err(CE_WARN, "oplmsu: conf-del: "
+ "Status of path isn't 'Offline:stop/fail'");
+ rval = EINVAL;
+ mdev++;
+ continue;
+ }
+ lpath->src_upath = NULL;
+ lpath->status = MSU_SETID_NU;
+ oplmsu_clear_ioctl_path(lpath);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ }
+ oplmsu_unlink_upath(upath); /* Unlink upath_t */
+ kmem_free(upath, sizeof (upath_t));
+ mdev++;
+ }
+
+ oplmsu_uinst->inst_status = oplmsu_get_inst_status();
+ oplmsu_uinst->path_num = oplmsu_get_pathnum();
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (rval);
+}
+
+/* Stop to use the path */
+int
+oplmsu_config_stop(int pathnum)
+{
+ upath_t *upath, *altn_upath;
+ lpath_t *lpath, *altn_lpath;
+ queue_t *stp_queue = NULL;
+ queue_t *dst_queue = NULL;
+ mblk_t *nmp = NULL, *fmp = NULL;
+ ctrl_t *ctrl;
+ int term_ioctl, term_stat;
+ int use_flag;
+
+ DBG_PRINT((CE_NOTE,
+ "oplmsu: conf-stop: config_stop(%d) called", pathnum));
+
+ if (pathnum == MSU_PATH_ALL) {
+ cmn_err(CE_WARN, "oplmsu: conf-stop: "
+ "All path can't be transferred to the status of "
+ "'Offline:stop'");
+ return (EINVAL);
+ }
+
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+ mutex_enter(&oplmsu_uinst->u_lock);
+
+ upath = oplmsu_search_upath_info(pathnum); /* Search upath_t */
+ if (upath == NULL) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: conf-stop: "
+ "Proper upath_t doesn't find");
+ return (ENODEV);
+ }
+
+ lpath = upath->lpath;
+ if (lpath == NULL) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: conf-stop: "
+ "Proper lpath_t doesn't exist");
+ return (ENODEV);
+ }
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+
+ /* Check status of lpath_t */
+ use_flag = oplmsu_set_ioctl_path(lpath, NULL, NULL);
+ if (use_flag == BUSY) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: conf-stop: "
+ "Other processing is using lower path");
+ return (EBUSY);
+ }
+
+ if (upath->status == MSU_PSTAT_FAIL) {
+ oplmsu_clear_ioctl_path(lpath);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (EIO);
+ } else if ((upath->status == MSU_PSTAT_STOP) &&
+ (upath->traditional_status == MSU_STOP)) {
+ oplmsu_clear_ioctl_path(lpath);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+ } else if ((upath->status == MSU_PSTAT_STANDBY) &&
+ (upath->traditional_status == MSU_STANDBY)) {
+ oplmsu_cmn_set_upath_sts(upath, MSU_PSTAT_STOP,
+ upath->status, MSU_STOP);
+ oplmsu_clear_ioctl_path(lpath);
+ lpath->src_upath = NULL;
+ lpath->status = MSU_EXT_NOTUSED;
+
+ oplmsu_uinst->inst_status = oplmsu_get_inst_status();
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+ } else if ((upath->status == MSU_PSTAT_ACTIVE) &&
+ (upath->traditional_status == MSU_ACTIVE)) {
+ altn_upath = oplmsu_search_standby();
+ if (altn_upath == NULL) { /* Alternate path doesn't exist */
+ DBG_PRINT((CE_NOTE, "oplmsu: conf-stop: "
+ "Alternate upper path doesn't find"));
+ oplmsu_clear_ioctl_path(lpath);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (EINVAL);
+ }
+
+ if ((fmp = allocb(sizeof (char), BPRI_LO)) == NULL) {
+ oplmsu_clear_ioctl_path(lpath);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (ENOSR);
+ }
+
+ if (oplmsu_stop_prechg(&nmp, &term_ioctl, &term_stat) !=
+ SUCCESS) {
+ oplmsu_clear_ioctl_path(lpath);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ freeb(fmp);
+ return (ENOSR);
+ }
+
+ altn_lpath = altn_upath->lpath;
+ use_flag = oplmsu_set_ioctl_path(altn_lpath, NULL, NULL);
+ if (use_flag == BUSY) {
+ oplmsu_clear_ioctl_path(lpath);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+
+ cmn_err(CE_WARN, "oplmsu: conf-stop: "
+ "Other processing is using alternate lower path");
+ freeb(fmp);
+ freemsg(nmp);
+ return (EBUSY);
+ }
+
+ dst_queue = WR(altn_lpath->lower_queue);
+
+ /* termios is not held. Change alternate path to MSU_ACTIVE */
+ if (nmp == NULL) {
+ altn_upath->traditional_status = term_stat;
+ altn_lpath->src_upath = upath;
+ altn_lpath->status = MSU_EXT_VOID;
+
+ oplmsu_uinst->lower_queue = NULL;
+
+ ctrl = oplmsu_uinst->user_ctrl;
+ if (ctrl != NULL) {
+ mutex_enter(&oplmsu_uinst->c_lock);
+ stp_queue = WR(ctrl->queue);
+ mutex_exit(&oplmsu_uinst->c_lock);
+ noenable(stp_queue);
+ oplmsu_queue_flag = 1;
+ }
+
+ /* Make M_FLUSH and send to alternate path */
+ oplmsu_cmn_set_mflush(fmp);
+ putq(dst_queue, fmp);
+
+ /* Change status of alternate path */
+ oplmsu_cmn_set_upath_sts(altn_upath, MSU_PSTAT_ACTIVE,
+ altn_upath->status, MSU_ACTIVE);
+
+ oplmsu_clear_ioctl_path(altn_lpath);
+ altn_lpath->uinst = oplmsu_uinst;
+ altn_lpath->src_upath = NULL;
+ altn_lpath->status = MSU_EXT_NOTUSED;
+
+ /* Notify of the active path changing */
+ prom_opl_switch_console(altn_upath->ser_devcb.lsb);
+
+ /* Send XON to notify active path */
+ (void) oplmsu_cmn_put_xoffxon(dst_queue, MSU_XON_4);
+
+ /* Send XOFF to notify all standby paths */
+ oplmsu_cmn_putxoff_standby();
+
+ oplmsu_uinst->lower_queue = RD(dst_queue);
+ ctrl = oplmsu_uinst->user_ctrl;
+
+ /* Switch active path of oplmsu */
+ if (ctrl != NULL) {
+ queue_t *altn_queue;
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+ altn_queue = WR(ctrl->queue);
+ mutex_exit(&oplmsu_uinst->c_lock);
+
+ /* Restart queuing of user access node */
+ enableok(altn_queue);
+
+ oplmsu_queue_flag = 0;
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ oplmsu_wcmn_high_qenable(altn_queue, RW_WRITER);
+ mutex_enter(&oplmsu_uinst->u_lock);
+ mutex_enter(&oplmsu_uinst->l_lock);
+ }
+
+ /* Stop previous active path */
+ oplmsu_cmn_set_upath_sts(upath, MSU_PSTAT_STOP,
+ upath->status, MSU_STOP);
+
+ lpath->uinst = NULL;
+ lpath->src_upath = NULL;
+ lpath->status = MSU_EXT_NOTUSED;
+ oplmsu_clear_ioctl_path(lpath);
+
+ oplmsu_uinst->inst_status = oplmsu_get_inst_status();
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+ }
+
+ /* Send termios information to alternate path */
+ if (canput(dst_queue)) {
+ altn_upath->traditional_status = term_stat;
+ altn_lpath->src_upath = upath;
+ altn_lpath->status = MSU_EXT_VOID;
+
+ upath->traditional_status = MSU_WSTP_ACK;
+ lpath->uinst = NULL;
+
+ oplmsu_uinst->lower_queue = NULL;
+
+ ctrl = oplmsu_uinst->user_ctrl;
+ if (ctrl != NULL) {
+ mutex_enter(&oplmsu_uinst->c_lock);
+ stp_queue = WR(ctrl->queue);
+ mutex_exit(&oplmsu_uinst->c_lock);
+ noenable(stp_queue);
+ oplmsu_queue_flag = 1;
+ }
+
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ oplmsu_cmn_set_mflush(fmp);
+ putq(dst_queue, fmp);
+ putq(dst_queue, nmp);
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath->sw_flag = 1;
+ while (lpath->sw_flag != 0) {
+ /* Wait for the completion of path switching */
+ cv_wait(&lpath->sw_cv, &oplmsu_uinst->l_lock);
+ }
+ mutex_exit(&oplmsu_uinst->l_lock);
+ return (SUCCESS);
+ } else {
+ oplmsu_clear_ioctl_path(altn_lpath);
+ oplmsu_clear_ioctl_path(lpath);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ freeb(fmp);
+ freemsg(nmp);
+ return (FAILURE);
+ }
+ /* NOTREACHED */
+ } else {
+ oplmsu_clear_ioctl_path(lpath);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+
+ cmn_err(CE_WARN, "oplmsu: conf-stop: "
+ "Status of path is improper");
+ return (EINVAL);
+ }
+ /* NOTREACHED */
+}
+
+/* Start to use path */
+int
+oplmsu_config_start(int pathnum)
+{
+ upath_t *upath = NULL;
+ lpath_t *lpath = NULL;
+ queue_t *dst_queue, *main_rq = NULL;
+ int msu_tty_port;
+
+ DBG_PRINT((CE_NOTE,
+ "oplmsu: conf-start: config_start(%d) called", pathnum));
+
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+ mutex_enter(&oplmsu_uinst->u_lock);
+
+ if (oplmsu_get_inst_status() == INST_STAT_BUSY) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (EBUSY);
+ }
+
+ if (pathnum == MSU_PATH_ALL) {
+ (void) oplmsu_search_min_stop_path();
+ }
+
+ for (upath = oplmsu_uinst->first_upath; upath; ) {
+ if ((pathnum != MSU_PATH_ALL) && (upath->path_no != pathnum)) {
+ upath = upath->u_next;
+ continue;
+ }
+
+ if (upath->path_no == pathnum) {
+ lpath = upath->lpath;
+ if (lpath == NULL) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: conf-start: "
+ "Proper lpath_t doesn't exist");
+ return (EINVAL);
+ }
+
+ oplmsu_cmn_set_upath_sts(upath, MSU_PSTAT_STANDBY,
+ upath->status, MSU_STANDBY);
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath->src_upath = NULL;
+ lpath->status = MSU_EXT_NOTUSED;
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+ }
+
+ /*
+ * with PATH_ALL
+ */
+ lpath = upath->lpath;
+ if (lpath == NULL) {
+ upath = upath->u_next;
+
+ DBG_PRINT((CE_WARN, "oplmsu: conf-start: "
+ "Proper lpath_t doesn't exist"));
+ continue;
+ }
+
+ msu_tty_port = ddi_prop_get_int(DDI_DEV_T_ANY,
+ oplmsu_uinst->msu_dip, 0, MSU_TTY_PORT_PROP, -1);
+
+ if (upath->ser_devcb.lsb == msu_tty_port) {
+ /* Notify of the active path changing */
+ prom_opl_switch_console(upath->ser_devcb.lsb);
+
+ oplmsu_cmn_set_upath_sts(upath, MSU_PSTAT_ACTIVE,
+ upath->status, MSU_ACTIVE);
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ main_rq = RD(lpath->lower_queue);
+ dst_queue = WR(lpath->lower_queue);
+ lpath->src_upath = NULL;
+ lpath->status = MSU_EXT_NOTUSED;
+ lpath->uinst = oplmsu_uinst;
+ mutex_exit(&oplmsu_uinst->l_lock);
+
+ /* Send XON to notify active path */
+ (void) oplmsu_cmn_put_xoffxon(dst_queue, MSU_XON_4);
+ } else {
+ oplmsu_cmn_set_upath_sts(upath, MSU_PSTAT_STANDBY,
+ upath->status, MSU_STANDBY);
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath->src_upath = NULL;
+ lpath->status = MSU_EXT_NOTUSED;
+ mutex_exit(&oplmsu_uinst->l_lock);
+ }
+ upath = upath->u_next;
+ }
+
+ if (main_rq == NULL) {
+ upath_t *altn_upath;
+ lpath_t *altn_lpath;
+
+ altn_upath = oplmsu_search_standby();
+ if (altn_upath) {
+ oplmsu_cmn_set_upath_sts(altn_upath, MSU_PSTAT_ACTIVE,
+ altn_upath->status, MSU_ACTIVE);
+
+ /* Notify of the active path changing */
+ prom_opl_switch_console(altn_upath->ser_devcb.lsb);
+
+ altn_lpath = altn_upath->lpath;
+ if (altn_lpath) {
+ mutex_enter(&oplmsu_uinst->l_lock);
+ main_rq = RD(altn_lpath->lower_queue);
+ dst_queue = WR(altn_lpath->lower_queue);
+ altn_lpath->src_upath = NULL;
+ altn_lpath->status = MSU_EXT_NOTUSED;
+ altn_lpath->uinst = oplmsu_uinst;
+ mutex_exit(&oplmsu_uinst->l_lock);
+
+ /* Send XON to notify active path */
+ (void) oplmsu_cmn_put_xoffxon(dst_queue,
+ MSU_XON_4);
+ } else {
+ cmn_err(CE_WARN, "oplmsu: conf-start: "
+ "Proper alternate lpath_t doesn't exist");
+ }
+ } else {
+ cmn_err(CE_WARN, "oplmsu: conf-start: "
+ "Proper alternate upath_t doesn't exist");
+ }
+ }
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+
+ /* Send XOFF to notify all standby paths */
+ oplmsu_cmn_putxoff_standby();
+
+ /* Change active path of oplmsu */
+ oplmsu_uinst->lower_queue = main_rq;
+ oplmsu_uinst->inst_status = oplmsu_get_inst_status();
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+}
+
+/* Prepare of unlink path */
+int
+oplmsu_config_disc(int pathnum)
+{
+ upath_t *upath;
+ lpath_t *lpath;
+ int use_flag;
+
+ DBG_PRINT((CE_NOTE,
+ "oplmsu: conf-disc: config_disc(%d) called", pathnum));
+
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ mutex_enter(&oplmsu_uinst->u_lock);
+
+ upath = oplmsu_search_upath_info(pathnum);
+ if (upath == NULL) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: conf-disc: "
+ "Proper upath_t doesn't find");
+ return (EINVAL);
+ }
+
+ if ((upath->status == MSU_PSTAT_DISCON) ||
+ (upath->traditional_status == MSU_DISCON)) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+ } else if (((upath->status != MSU_PSTAT_STOP) ||
+ (upath->traditional_status != MSU_STOP)) &&
+ ((upath->status != MSU_PSTAT_FAIL) ||
+ (upath->traditional_status != MSU_FAIL))) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: conf-disc: "
+ "Status of path is improper");
+ return (EINVAL);
+ }
+
+ lpath = upath->lpath;
+ if (lpath == NULL) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: conf-disc: "
+ "Proper lpath_t doesn't exist");
+ return (ENODEV);
+ }
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+
+ /* Check lower path status */
+ use_flag = oplmsu_set_ioctl_path(lpath, NULL, NULL);
+ if (use_flag == BUSY) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: conf-disc: "
+ "Other processing is using lower path");
+ return (EBUSY);
+ }
+
+ upath->status = MSU_PSTAT_STOP;
+ upath->traditional_status = MSU_SETID;
+
+ oplmsu_clear_ioctl_path(lpath);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+}
diff --git a/usr/src/uts/sun4u/opl/io/oplmsu/oplmsu_cmn_func.c b/usr/src/uts/sun4u/opl/io/oplmsu/oplmsu_cmn_func.c
new file mode 100644
index 0000000000..05e7b69bd9
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/oplmsu/oplmsu_cmn_func.c
@@ -0,0 +1,1845 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/errno.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/kmem.h>
+#include <sys/ksynch.h>
+#include <sys/stream.h>
+#include <sys/stropts.h>
+#include <sys/termio.h>
+#include <sys/ddi.h>
+#include <sys/file.h>
+#include <sys/disp.h>
+#include <sys/sunddi.h>
+#include <sys/sunldi.h>
+#include <sys/sunndi.h>
+#include <sys/strsun.h>
+#include <sys/oplmsu/oplmsu.h>
+#include <sys/oplmsu/oplmsu_proto.h>
+
+/*
+ * Link upper_path_table structure
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_WRITER]
+ * -. uinst_t->u_lock : M
+ * -. uinst_t->l_lock : A
+ * -. uinst_t->c_lock : A
+ */
+void
+oplmsu_link_upath(upath_t *add_upath)
+{
+
+ ASSERT(add_upath != NULL);
+ ASSERT(RW_WRITE_HELD(&oplmsu_uinst->lock));
+ ASSERT(MUTEX_HELD(&oplmsu_uinst->u_lock));
+
+ if (oplmsu_uinst->first_upath == NULL) {
+ oplmsu_uinst->first_upath = add_upath;
+ add_upath->u_prev = NULL;
+ } else {
+ upath_t *last_upath;
+
+ last_upath = oplmsu_uinst->last_upath;
+ last_upath->u_next = add_upath;
+ add_upath->u_prev = last_upath;
+ }
+
+ oplmsu_uinst->last_upath = add_upath;
+ add_upath->u_next = NULL;
+}
+
+/*
+ * Unlink upper_path_table structure
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_WRITER]
+ * -. uinst_t->u_lock : P
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_unlink_upath(upath_t *del_upath)
+{
+ upath_t **first, **last;
+
+ ASSERT(RW_WRITE_HELD(&oplmsu_uinst->lock));
+
+ first = &oplmsu_uinst->first_upath;
+ last = &oplmsu_uinst->last_upath;
+
+ if ((*first != del_upath) && (*last != del_upath)) {
+ del_upath->u_prev->u_next = del_upath->u_next;
+ del_upath->u_next->u_prev = del_upath->u_prev;
+ } else {
+ if (*first == del_upath) {
+ *first = (*first)->u_next;
+ if (*first) {
+ (*first)->u_prev = NULL;
+ }
+ }
+
+ if (*last == del_upath) {
+ *last = (*last)->u_prev;
+ if (*last) {
+ (*last)->u_next = NULL;
+ }
+ }
+ }
+
+ del_upath->u_next = NULL;
+ del_upath->u_prev = NULL;
+}
+
+/*
+ * Link lower_path_table structure
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_WRITER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : M
+ * -. uinst_t->c_lock : A
+ */
+void
+oplmsu_link_lpath(lpath_t *add_lpath)
+{
+
+ ASSERT(add_lpath != NULL);
+ ASSERT(RW_WRITE_HELD(&oplmsu_uinst->lock));
+
+ if (oplmsu_uinst->first_lpath == NULL) {
+ oplmsu_uinst->first_lpath = add_lpath;
+ add_lpath->l_prev = NULL;
+ } else {
+ lpath_t *last_lpath;
+
+ last_lpath = oplmsu_uinst->last_lpath;
+ last_lpath->l_next = add_lpath;
+ add_lpath->l_prev = last_lpath;
+ }
+
+ oplmsu_uinst->last_lpath = add_lpath;
+ add_lpath->l_next = NULL;
+}
+
+/*
+ * Unlink lower_path_table structure
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_WRITER]
+ * -. uinst_t->u_lock : P
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_unlink_lpath(lpath_t *del_lpath)
+{
+ lpath_t **first, **last;
+
+ ASSERT(RW_WRITE_HELD(&oplmsu_uinst->lock));
+
+ first = &oplmsu_uinst->first_lpath;
+ last = &oplmsu_uinst->last_lpath;
+
+ if ((*first != del_lpath) && (*last != del_lpath)) {
+ del_lpath->l_prev->l_next = del_lpath->l_next;
+ del_lpath->l_next->l_prev = del_lpath->l_prev;
+ } else {
+ if (*first == del_lpath) {
+ *first = (*first)->l_next;
+ if (*first) {
+ (*first)->l_prev = NULL;
+ }
+ }
+
+ if (*last == del_lpath) {
+ *last = (*last)->l_prev;
+ if (*last) {
+ (*last)->l_next = NULL;
+ }
+ }
+ }
+
+ del_lpath->l_next = NULL;
+ del_lpath->l_prev = NULL;
+}
+
+/*
+ * Link msgb structure of high priority
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : A [It depends on caller]
+ * -. uinst_t->c_lock : A [It depends on caller]
+ */
+void
+oplmsu_link_high_primsg(mblk_t **first, mblk_t **last, mblk_t *add_msg)
+{
+
+ ASSERT(add_msg != NULL);
+ ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));
+
+ if (*first == NULL) {
+ *first = add_msg;
+ add_msg->b_prev = NULL;
+ } else {
+ (*last)->b_next = add_msg;
+ add_msg->b_prev = *last;
+ }
+
+ *last = add_msg;
+ add_msg->b_next = NULL;
+}
+
+/*
+ * Check whether lower path is usable by lower path info table address
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : M
+ * -. uinst_t->c_lock : P
+ */
+int
+oplmsu_check_lpath_usable(void)
+{
+ lpath_t *lpath;
+ int rval = SUCCESS;
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+ ASSERT(MUTEX_HELD(&oplmsu_uinst->l_lock));
+
+ lpath = oplmsu_uinst->first_lpath;
+ while (lpath) {
+ if ((lpath->hndl_uqueue != NULL) || (lpath->hndl_mp != NULL)) {
+ rval = BUSY;
+ break;
+ }
+ lpath = lpath->l_next;
+ }
+ return (rval);
+}
+
+/*
+ * Search upath_t by path number
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : M
+ * -. uinst_t->l_lock : A
+ * -. uinst_t->c_lock : P
+ */
+upath_t *
+oplmsu_search_upath_info(int path_no)
+{
+ upath_t *upath;
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+ ASSERT(MUTEX_HELD(&oplmsu_uinst->u_lock));
+
+ upath = oplmsu_uinst->first_upath;
+ while (upath) {
+ if (upath->path_no == path_no) {
+ break;
+ }
+ upath = upath->u_next;
+ }
+ return (upath);
+}
+
+/*
+ * Send M_IOCACK(or M_IOCNAK) message to stream
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : P
+ * -. uinst_t->u_lock : P
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_iocack(queue_t *q, mblk_t *mp, int errno)
+{
+ struct iocblk *iocp = NULL;
+
+ ASSERT(mp != NULL);
+
+ iocp = (struct iocblk *)mp->b_rptr;
+ iocp->ioc_error = errno;
+
+ if (errno) { /* Error */
+ mp->b_datap->db_type = M_IOCNAK;
+ iocp->ioc_rval = FAILURE;
+
+ OPLMSU_TRACE(q, mp, MSU_TRC_UO);
+ qreply(q, mp);
+ } else { /* Good */
+ mp->b_datap->db_type = M_IOCACK;
+ iocp->ioc_rval = SUCCESS;
+
+ OPLMSU_TRACE(q, mp, MSU_TRC_UO);
+ qreply(q, mp);
+ }
+}
+
+/*
+ * Delete all upath_t
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_WRITER]
+ * -. uinst_t->u_lock : M
+ * -. uinst_t->l_lock : A
+ * -. uinst_t->c_lock : A
+ */
+void
+oplmsu_delete_upath_info(void)
+{
+ upath_t *upath, *next_upath;
+
+ ASSERT(RW_WRITE_HELD(&oplmsu_uinst->lock));
+ ASSERT(MUTEX_HELD(&oplmsu_uinst->u_lock));
+
+ upath = oplmsu_uinst->first_upath;
+ oplmsu_uinst->first_upath = NULL;
+ oplmsu_uinst->last_upath = NULL;
+
+ while (upath) {
+ next_upath = upath->u_next;
+ kmem_free(upath, sizeof (upath_t));
+ upath = next_upath;
+ }
+}
+
+/*
+ * Set queue and ioctl to lpath_t
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : M
+ * -. uinst_t->c_lock : P
+ */
+int
+oplmsu_set_ioctl_path(lpath_t *lpath, queue_t *hndl_queue, mblk_t *mp)
+{
+ int rval = SUCCESS;
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+ ASSERT(MUTEX_HELD(&oplmsu_uinst->l_lock));
+
+ if ((lpath->hndl_uqueue == NULL) && (lpath->hndl_mp == NULL) &&
+ (lpath->sw_flag == 0)) {
+ if ((lpath->status == MSU_EXT_NOTUSED) ||
+ (lpath->status == MSU_EXT_ACTIVE_CANDIDATE) ||
+ (lpath->status == MSU_SETID_NU)) {
+ if (hndl_queue == NULL) {
+ lpath->hndl_uqueue = hndl_queue;
+ } else {
+ lpath->hndl_uqueue = WR(hndl_queue);
+ }
+ lpath->hndl_mp = mp;
+ } else {
+ rval = BUSY;
+ }
+ } else {
+ rval = BUSY;
+ }
+ return (rval);
+}
+
+/*
+ * Clear queue and ioctl to lpath_t
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : M
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_clear_ioctl_path(lpath_t *lpath)
+{
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+ ASSERT(MUTEX_HELD(&oplmsu_uinst->l_lock));
+
+ lpath->hndl_uqueue = NULL;
+ lpath->hndl_mp = NULL;
+}
+
+/*
+ * Get instanse status from status of upath_t
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : M
+ * -. uinst_t->l_lock : A
+ * -. uinst_t->c_lock : P
+ */
+int
+oplmsu_get_inst_status(void)
+{
+ upath_t *upath;
+ int sts, pre_sts = INST_STAT_UNCONFIGURED;
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+ ASSERT(MUTEX_HELD(&oplmsu_uinst->u_lock));
+
+ upath = oplmsu_uinst->first_upath;
+ while (upath) {
+ if (((upath->status == MSU_PSTAT_ACTIVE) &&
+ (upath->traditional_status == MSU_ACTIVE)) ||
+ ((upath->status == MSU_PSTAT_STANDBY) &&
+ (upath->traditional_status == MSU_STANDBY))) {
+ sts = INST_STAT_ONLINE;
+ } else if (((upath->status == MSU_PSTAT_STOP) &&
+ (upath->traditional_status == MSU_STOP)) ||
+ ((upath->status == MSU_PSTAT_FAIL) &&
+ (upath->traditional_status == MSU_FAIL))) {
+ sts = INST_STAT_OFFLINE;
+ } else if (((upath->status == MSU_PSTAT_DISCON) &&
+ (upath->traditional_status == MSU_DISCON)) ||
+ ((upath->status == MSU_PSTAT_EMPTY) &&
+ (upath->traditional_status == MSU_EMPTY))) {
+ sts = INST_STAT_UNCONFIGURED;
+ } else {
+ sts = INST_STAT_BUSY;
+ }
+
+ if (pre_sts > sts) {
+ pre_sts = sts;
+ }
+ upath = upath->u_next;
+ }
+ return (pre_sts);
+}
+
+/*
+ * Search path of "online:standby" status
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : M
+ * -. uinst_t->l_lock : A
+ * -. uinst_t->c_lock : P
+ */
+upath_t *
+oplmsu_search_standby(void)
+{
+ upath_t *upath, *altn_upath = NULL;
+ int max_pathnum = UNDEFINED;
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+ ASSERT(MUTEX_HELD(&oplmsu_uinst->u_lock));
+
+ upath = oplmsu_uinst->first_upath;
+ while (upath) {
+ if ((upath->status == MSU_PSTAT_STANDBY) &&
+ (upath->traditional_status == MSU_STANDBY) &&
+ (upath->lpath != NULL)) {
+ if ((max_pathnum == UNDEFINED) ||
+ (max_pathnum > upath->path_no)) {
+ max_pathnum = upath->path_no;
+ altn_upath = upath;
+ }
+ }
+ upath = upath->u_next;
+ }
+ return (altn_upath);
+}
+
+/*
+ * Search path of "offline:stop" status, and minimum path number
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : M
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_search_min_stop_path(void)
+{
+ upath_t *upath, *min_upath;
+ lpath_t *lpath;
+ int min_no = UNDEFINED;
+ int active_flag = 0;
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+ ASSERT(MUTEX_HELD(&oplmsu_uinst->u_lock));
+
+ upath = oplmsu_uinst->first_upath;
+ while (upath) {
+ if ((upath->status == MSU_PSTAT_ACTIVE) &&
+ (upath->traditional_status == MSU_ACTIVE)) {
+ active_flag = 1;
+ break;
+ } else if ((upath->status == MSU_PSTAT_STOP) &&
+ (upath->traditional_status == MSU_STOP)) {
+ if (upath->lpath != NULL) {
+ if ((min_no == UNDEFINED) ||
+ (upath->path_no < min_no)) {
+ lpath = upath->lpath;
+ mutex_enter(&oplmsu_uinst->l_lock);
+ if (lpath->status == MSU_EXT_NOTUSED) {
+ min_upath = upath;
+ min_no = upath->path_no;
+ }
+ mutex_exit(&oplmsu_uinst->l_lock);
+ }
+ }
+ }
+ upath = upath->u_next;
+ }
+
+ if (active_flag == 0) {
+ lpath = min_upath->lpath;
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath->src_upath = NULL;
+ lpath->status = MSU_EXT_ACTIVE_CANDIDATE;
+ mutex_exit(&oplmsu_uinst->l_lock);
+ }
+}
+
+/*
+ * Get the total number of serial paths
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_WRITER]
+ * -. uinst_t->u_lock : M
+ * -. uinst_t->l_lock : A
+ * -. uinst_t->c_lock : A
+ */
+int
+oplmsu_get_pathnum(void)
+{
+ upath_t *upath;
+ int total_num = 0;
+
+ ASSERT(RW_WRITE_HELD(&oplmsu_uinst->lock));
+ ASSERT(MUTEX_HELD(&oplmsu_uinst->u_lock));
+
+ if (oplmsu_uinst->first_upath != NULL) {
+ upath = oplmsu_uinst->first_upath;
+ while (upath) {
+ total_num++;
+ upath = upath->u_next;
+ }
+ }
+ return (total_num);
+}
+
+/*
+ * Put XOFF/ XON message on write queue
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : A
+ * -. uinst_t->c_lock : A
+ */
+int
+oplmsu_cmn_put_xoffxon(queue_t *queue, int data)
+{
+ mblk_t *mp;
+ int rval = SUCCESS;
+
+ /* Send M_START */
+ if ((mp = allocb(0, BPRI_LO)) != NULL) {
+ mp->b_datap->db_type = M_START;
+ putq(queue, mp);
+
+ /* Send M_DATA(XOFF, XON) */
+ if ((mp = allocb(sizeof (int), BPRI_LO)) != NULL) {
+ *(uint_t *)mp->b_rptr = data;
+ mp->b_wptr = mp->b_rptr + sizeof (int);
+ putq(queue, mp);
+ } else {
+ rval = FAILURE;
+ }
+ } else {
+ rval = FAILURE;
+ }
+ return (rval);
+}
+
+/*
+ * Put XOFF message on write queue for all standby paths
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : M
+ * -. uinst_t->l_lock : M
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_cmn_putxoff_standby(void)
+{
+ upath_t *upath;
+ lpath_t *lpath;
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+ ASSERT(MUTEX_HELD(&oplmsu_uinst->u_lock));
+ ASSERT(MUTEX_HELD(&oplmsu_uinst->l_lock));
+
+ upath = oplmsu_uinst->first_upath;
+ while (upath) {
+ lpath = upath->lpath;
+ if ((upath->status != MSU_PSTAT_STANDBY) ||
+ (lpath == NULL)) {
+ upath = upath->u_next;
+ continue;
+ }
+
+ (void) oplmsu_cmn_put_xoffxon(
+ WR(lpath->lower_queue), MSU_XOFF_4);
+ upath = upath->u_next;
+ }
+}
+
+/*
+ * Set M_FLUSH message
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : A [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : A
+ * -. uinst_t->c_lock : A
+ */
+void
+oplmsu_cmn_set_mflush(mblk_t *mp)
+{
+
+ mp->b_datap->db_type = M_FLUSH;
+ *mp->b_rptr = FLUSHW;
+ mp->b_wptr = mp->b_rptr + sizeof (char);
+}
+
+/*
+ * Set status informations of upath_t
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : M
+ * -. uinst_t->l_lock : A
+ * -. uinst_t->c_lock : A
+ */
+void
+oplmsu_cmn_set_upath_sts(upath_t *upath, int sts, int prev_sts,
+ ulong_t trad_sts)
+{
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+ ASSERT(MUTEX_HELD(&oplmsu_uinst->u_lock));
+
+ upath->status = sts;
+ upath->prev_status = prev_sts;
+ upath->traditional_status = trad_sts;
+}
+
+/*
+ * Allocate a message block
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+int
+oplmsu_cmn_allocmb(queue_t *q, mblk_t *mp, mblk_t **nmp, size_t size,
+ int rw_flag)
+{
+ int rval = SUCCESS;
+
+ ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));
+
+ if ((*nmp = (mblk_t *)allocb(size, BPRI_LO)) == NULL) {
+ oplmsu_cmn_bufcall(q, mp, size, rw_flag);
+ rval = FAILURE;
+ } else {
+ (*nmp)->b_wptr = (*nmp)->b_rptr + size;
+ }
+ return (rval);
+}
+
+/*
+ * Copy a message
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+int
+oplmsu_cmn_copymb(queue_t *q, mblk_t *mp, mblk_t **nmp, mblk_t *cmp,
+ int rw_flag)
+{
+ int rval = SUCCESS;
+
+ ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));
+
+ if ((*nmp = copymsg(cmp)) == NULL) {
+ oplmsu_cmn_bufcall(q, mp, msgsize(cmp), rw_flag);
+ rval = FAILURE;
+ }
+ return (rval);
+}
+
+/*
+ * bufcall request
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_cmn_bufcall(queue_t *q, mblk_t *mp, size_t size, int rw_flag)
+{
+
+ ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));
+
+ if (rw_flag == MSU_WRITE_SIDE) {
+ ctrl_t *ctrl;
+
+ putbq(q, mp);
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+ ctrl = (ctrl_t *)q->q_ptr;
+ if (ctrl->wbuf_id != 0) {
+ mutex_exit(&oplmsu_uinst->c_lock);
+ return;
+ }
+
+ ctrl->wbuftbl->q = q;
+ ctrl->wbuftbl->rw_flag = rw_flag;
+ ctrl->wbuf_id = bufcall(size, BPRI_LO, oplmsu_cmn_bufcb,
+ (void *)ctrl->wbuftbl);
+
+ if (ctrl->wbuf_id == 0) {
+ if (ctrl->wtout_id != 0) {
+ mutex_exit(&oplmsu_uinst->c_lock);
+ return;
+ }
+
+ ctrl->wtout_id = timeout(oplmsu_cmn_bufcb,
+ (void *)ctrl->wbuftbl, drv_usectohz(MSU_TM_500MS));
+ }
+ mutex_exit(&oplmsu_uinst->c_lock);
+ } else if (rw_flag == MSU_READ_SIDE) {
+ lpath_t *lpath;
+ mblk_t *wrk_msg;
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath = (lpath_t *)q->q_ptr;
+ if (mp->b_datap->db_type >= QPCTL) {
+ if (lpath->first_lpri_hi == NULL) {
+ lpath->last_lpri_hi = mp;
+ mp->b_next = NULL;
+ } else {
+ wrk_msg = lpath->first_lpri_hi;
+ wrk_msg->b_prev = mp;
+ mp->b_next = wrk_msg;
+ }
+ mp->b_prev = NULL;
+ lpath->first_lpri_hi = mp;
+ } else {
+ putbq(q, mp);
+ }
+
+ if (lpath->rbuf_id != 0) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ return;
+ }
+
+ lpath->rbuftbl->q = q;
+ lpath->rbuftbl->rw_flag = rw_flag;
+ lpath->rbuf_id = bufcall(size, BPRI_LO, oplmsu_cmn_bufcb,
+ (void *)lpath->rbuftbl);
+
+ if (lpath->rbuf_id == 0) {
+ if (lpath->rtout_id != 0) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ return;
+ }
+
+ lpath->rtout_id = timeout(oplmsu_cmn_bufcb,
+ (void *)lpath->rbuftbl, drv_usectohz(MSU_TM_500MS));
+ }
+ mutex_exit(&oplmsu_uinst->l_lock);
+ }
+}
+
+/*
+ * Previous sequence for active path change
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+int
+oplmsu_cmn_prechg(queue_t *q, mblk_t *mp, int rw_flag, mblk_t **term_mp,
+ int *term_ioctl, int *term_stat)
+{
+
+ ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));
+
+ if (oplmsu_uinst->tcsets_p != NULL) {
+ struct iocblk *iocp;
+
+ if (oplmsu_cmn_copymb(q, mp, term_mp, oplmsu_uinst->tcsets_p,
+ rw_flag) == -1) {
+ return (FAILURE);
+ }
+
+ iocp = (struct iocblk *)(*term_mp)->b_rptr;
+ *term_ioctl = iocp->ioc_cmd;
+ *term_stat = MSU_WTCS_ACK;
+ } else if (oplmsu_uinst->tiocmset_p != NULL) {
+ if (oplmsu_cmn_copymb(q, mp, term_mp, oplmsu_uinst->tiocmset_p,
+ rw_flag) == -1) {
+ return (FAILURE);
+ }
+
+ *term_ioctl = TIOCMSET;
+ *term_stat = MSU_WTMS_ACK;
+ } else if (oplmsu_uinst->tiocspps_p != NULL) {
+ if (oplmsu_cmn_copymb(q, mp, term_mp, oplmsu_uinst->tiocspps_p,
+ rw_flag) == -1) {
+ return (FAILURE);
+ }
+
+ *term_ioctl = TIOCSPPS;
+ *term_stat = MSU_WPPS_ACK;
+ } else if (oplmsu_uinst->tiocswinsz_p != NULL) {
+ if (oplmsu_cmn_copymb(q, mp, term_mp,
+ oplmsu_uinst->tiocswinsz_p, rw_flag) == -1) {
+ return (FAILURE);
+ }
+
+ *term_ioctl = TIOCSWINSZ;
+ *term_stat = MSU_WWSZ_ACK;
+ } else if (oplmsu_uinst->tiocssoftcar_p != NULL) {
+ if (oplmsu_cmn_copymb(q, mp, term_mp,
+ oplmsu_uinst->tiocssoftcar_p, rw_flag) == -1) {
+ return (FAILURE);
+ }
+
+ *term_ioctl = TIOCSSOFTCAR;
+ *term_stat = MSU_WCAR_ACK;
+ } else {
+ *term_stat = MSU_WPTH_CHG;
+ *term_mp = NULL;
+ }
+ return (SUCCESS);
+}
+
+/*
+ * Pick up termios to re-set
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : A
+ * -. uinst_t->c_lock : A
+ */
+int
+oplmsu_stop_prechg(mblk_t **term_mp, int *term_ioctl, int *term_stat)
+{
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+
+ if (oplmsu_uinst->tcsets_p != NULL) {
+ struct iocblk *iocp;
+
+ if ((*term_mp = copymsg(oplmsu_uinst->tcsets_p)) == NULL) {
+ return (FAILURE);
+ }
+
+ iocp = (struct iocblk *)(*term_mp)->b_rptr;
+ *term_ioctl = iocp->ioc_cmd;
+ *term_stat = MSU_WTCS_ACK;
+ } else if (oplmsu_uinst->tiocmset_p != NULL) {
+ if ((*term_mp = copymsg(oplmsu_uinst->tiocmset_p)) == NULL) {
+ return (FAILURE);
+ }
+
+ *term_ioctl = TIOCMSET;
+ *term_stat = MSU_WTMS_ACK;
+ } else if (oplmsu_uinst->tiocspps_p != NULL) {
+ if ((*term_mp = copymsg(oplmsu_uinst->tiocspps_p)) == NULL) {
+ return (FAILURE);
+ }
+
+ *term_ioctl = TIOCSPPS;
+ *term_stat = MSU_WPPS_ACK;
+ } else if (oplmsu_uinst->tiocswinsz_p != NULL) {
+ if ((*term_mp = copymsg(oplmsu_uinst->tiocswinsz_p)) == NULL) {
+ return (FAILURE);
+ }
+
+ *term_ioctl = TIOCSWINSZ;
+ *term_stat = MSU_WWSZ_ACK;
+ } else if (oplmsu_uinst->tiocssoftcar_p != NULL) {
+ if ((*term_mp = copymsg(oplmsu_uinst->tiocssoftcar_p))
+ == NULL) {
+ return (FAILURE);
+ }
+
+ *term_ioctl = TIOCSSOFTCAR;
+ *term_stat = MSU_WCAR_ACK;
+ } else {
+ *term_stat = MSU_WPTH_CHG;
+ *term_mp = NULL;
+ }
+ return (SUCCESS);
+}
+
+/*
+ * Previous sequence for active path change termio
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+int
+oplmsu_cmn_prechg_termio(queue_t *q, mblk_t *mp, int rw_flag, int prev_flag,
+ mblk_t **term_mp, int *term_stat)
+{
+
+ ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));
+
+ if ((prev_flag == MSU_TIOS_TCSETS) &&
+ (oplmsu_uinst->tiocmset_p != NULL)) {
+ if (oplmsu_cmn_copymb(q, mp, term_mp, oplmsu_uinst->tiocmset_p,
+ rw_flag) == FAILURE) {
+ return (FAILURE);
+ }
+
+ *term_stat = MSU_WTMS_ACK;
+ } else if ((prev_flag <= MSU_TIOS_MSET) &&
+ (oplmsu_uinst->tiocspps_p != NULL)) {
+ if (oplmsu_cmn_copymb(q, mp, term_mp, oplmsu_uinst->tiocspps_p,
+ rw_flag) == FAILURE) {
+ return (FAILURE);
+ }
+
+ *term_stat = MSU_WPPS_ACK;
+ } else if ((prev_flag <= MSU_TIOS_PPS) &&
+ (oplmsu_uinst->tiocswinsz_p != NULL)) {
+ if (oplmsu_cmn_copymb(q, mp, term_mp,
+ oplmsu_uinst->tiocswinsz_p, rw_flag) == FAILURE) {
+ return (FAILURE);
+ }
+
+ *term_stat = MSU_WWSZ_ACK;
+ } else if ((prev_flag <= MSU_TIOS_WINSZP) &&
+ (oplmsu_uinst->tiocssoftcar_p != NULL)) {
+ if (oplmsu_cmn_copymb(q, mp, term_mp,
+ oplmsu_uinst->tiocssoftcar_p, rw_flag) == FAILURE) {
+ return (FAILURE);
+ }
+
+ *term_stat = MSU_WCAR_ACK;
+ } else if (prev_flag <= MSU_TIOS_SOFTCAR) {
+ *term_mp = NULL;
+ *term_stat = MSU_WPTH_CHG;
+ }
+ return (SUCCESS);
+}
+
+/*
+ * Pull up messages
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : P
+ * -. uinst_t->u_lock : P
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+int
+oplmsu_cmn_pullup_msg(queue_t *q, mblk_t *mp)
+{
+ mblk_t *nmp = NULL;
+
+ if ((mp != NULL) && (mp->b_cont != NULL) &&
+ (mp->b_cont->b_cont != NULL)) {
+ if ((nmp = msgpullup(mp->b_cont, -1)) == NULL) {
+ oplmsu_iocack(q, mp, ENOSR);
+ return (FAILURE);
+ } else {
+ freemsg(mp->b_cont);
+ mp->b_cont = nmp;
+ }
+ }
+ return (SUCCESS);
+}
+
+/*
+ * Wake up flow control
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : P
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_cmn_wakeup(queue_t *q)
+{
+ ctrl_t *ctrl;
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+ ctrl = (ctrl_t *)q->q_ptr;
+ if (ctrl->sleep_flag == CV_SLEEP) {
+ ctrl->sleep_flag = CV_WAKEUP;
+ cv_signal(&ctrl->cvp);
+ }
+ mutex_exit(&oplmsu_uinst->c_lock);
+}
+
+/*
+ * bufcall() and timeout() callback entry for read/write stream
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : P
+ * -. uinst_t->u_lock : P
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_cmn_bufcb(void *arg)
+{
+ struct buf_tbl *buftbl = arg;
+ lpath_t *lpath;
+ ctrl_t *ctrl;
+ queue_t *q;
+ int lq_flag = 0;
+
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+ mutex_enter(&oplmsu_uinst->l_lock);
+
+ lpath = oplmsu_uinst->first_lpath;
+ while (lpath) {
+ if ((buftbl == lpath->rbuftbl) &&
+ (buftbl->rw_flag == MSU_READ_SIDE)) {
+ if ((lpath->rbuf_id == 0) && (lpath->rtout_id == 0)) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ } else {
+ q = lpath->rbuftbl->q;
+ lpath->rbuftbl->q = NULL;
+ lpath->rbuftbl->rw_flag = UNDEFINED;
+
+ if (lpath->rbuf_id) {
+ lpath->rbuf_id = 0;
+ } else {
+ lpath->rtout_id = 0;
+ }
+ mutex_exit(&oplmsu_uinst->l_lock);
+
+ if (oplmsu_queue_flag == 1) {
+ lq_flag = 1;
+ oplmsu_queue_flag = 0;
+ }
+
+ rw_exit(&oplmsu_uinst->lock);
+ oplmsu_rcmn_high_qenable(q);
+
+ if (lq_flag == 1) {
+ rw_enter(&oplmsu_uinst->lock,
+ RW_WRITER);
+ oplmsu_queue_flag = 1;
+ rw_exit(&oplmsu_uinst->lock);
+ }
+ }
+ return;
+ }
+ lpath = lpath->l_next;
+ }
+ mutex_exit(&oplmsu_uinst->l_lock);
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+ if ((ctrl = oplmsu_uinst->user_ctrl) != NULL) {
+ if ((buftbl == ctrl->wbuftbl) &&
+ (buftbl->rw_flag == MSU_WRITE_SIDE)) {
+ oplmsu_wbufcb_posthndl(ctrl);
+ mutex_exit(&oplmsu_uinst->c_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return;
+ }
+ }
+
+ if ((ctrl = oplmsu_uinst->meta_ctrl) != NULL) {
+ if ((buftbl == ctrl->wbuftbl) &&
+ (buftbl->rw_flag == MSU_WRITE_SIDE)) {
+ oplmsu_wbufcb_posthndl(ctrl);
+ mutex_exit(&oplmsu_uinst->c_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return;
+ }
+ }
+ mutex_exit(&oplmsu_uinst->c_lock);
+ rw_exit(&oplmsu_uinst->lock);
+}
+
+/*
+ * bufcall() or timeout() callback post handling for write stream
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_WRITER]
+ * -. uinst_t->u_lock : P
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : M
+ */
+void
+oplmsu_wbufcb_posthndl(ctrl_t *ctrl)
+{
+ queue_t *q;
+ int lq_flag = 0;
+
+ ASSERT(RW_WRITE_HELD(&oplmsu_uinst->lock));
+ ASSERT(MUTEX_HELD(&oplmsu_uinst->c_lock));
+
+ if ((ctrl->wbuf_id == 0) && (ctrl->wtout_id == 0)) {
+ return;
+ }
+
+ q = ctrl->wbuftbl->q;
+ ctrl->wbuftbl->q = NULL;
+ ctrl->wbuftbl->rw_flag = UNDEFINED;
+ if (ctrl->wbuf_id) {
+ ctrl->wbuf_id = 0;
+ } else {
+ ctrl->wtout_id = 0;
+ }
+
+ if (oplmsu_queue_flag == 1) {
+ lq_flag = 1;
+ oplmsu_queue_flag = 0;
+ }
+
+ mutex_exit(&oplmsu_uinst->c_lock);
+ oplmsu_wcmn_high_qenable(q, RW_WRITER);
+ mutex_enter(&oplmsu_uinst->c_lock);
+
+ if (lq_flag == 1) {
+ oplmsu_queue_flag = 1;
+ }
+}
+
+/*
+ * COMMON FUNCTIONS FOR WRITE STREAM
+ */
+
+/*
+ * Check control node and driver privilege
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : A
+ * -. uinst_t->c_lock : P
+ */
+int
+oplmsu_wcmn_chknode(queue_t *q, int node, mblk_t *mp)
+{
+ struct iocblk *iocp;
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+ if (((ctrl_t *)q->q_ptr)->node_type != node) {
+ mutex_exit(&oplmsu_uinst->c_lock);
+ cmn_err(CE_WARN, "oplmsu: chk-node: ctrl node type = %d", node);
+ return (EINVAL);
+ }
+ mutex_exit(&oplmsu_uinst->c_lock);
+
+ /* Check super-user by oplmsu.conf */
+ if (oplmsu_check_su != 0) {
+ iocp = (struct iocblk *)mp->b_rptr;
+ if (drv_priv(iocp->ioc_cr) != 0) {
+ cmn_err(CE_WARN, "oplmsu: chk-node: Permission denied");
+ return (EPERM);
+ }
+ }
+ return (SUCCESS);
+}
+
+/*
+ * Flush handle for write side stream
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : P
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_wcmn_flush_hndl(queue_t *q, mblk_t *mp, krw_t rw)
+{
+ queue_t *dst_queue = NULL;
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+
+ if (*mp->b_rptr & FLUSHW) { /* Write side */
+ flushq(q, FLUSHDATA);
+ }
+
+ dst_queue = oplmsu_uinst->lower_queue;
+ if (dst_queue == NULL) {
+ if (*mp->b_rptr & FLUSHR) {
+ flushq(RD(q), FLUSHDATA);
+ *mp->b_rptr &= ~FLUSHW;
+
+ rw_exit(&oplmsu_uinst->lock);
+ OPLMSU_TRACE(q, mp, MSU_TRC_UO);
+ qreply(q, mp);
+ rw_enter(&oplmsu_uinst->lock, rw);
+ } else {
+ freemsg(mp);
+ }
+ } else {
+ putq(WR(dst_queue), mp);
+ }
+}
+
+/*
+ * Through message handle for write side stream
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : P
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+int
+oplmsu_wcmn_through_hndl(queue_t *q, mblk_t *mp, int pri_flag, krw_t rw)
+{
+ queue_t *usr_queue = NULL, *dst_queue = NULL;
+ ctrl_t *ctrl;
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+ if ((ctrl = oplmsu_uinst->user_ctrl) != NULL) {
+ usr_queue = ctrl->queue;
+ mutex_exit(&oplmsu_uinst->c_lock);
+ } else {
+ mutex_exit(&oplmsu_uinst->c_lock);
+ if (mp->b_datap->db_type == M_IOCTL) {
+ rw_exit(&oplmsu_uinst->lock);
+ oplmsu_iocack(q, mp, ENODEV);
+ rw_enter(&oplmsu_uinst->lock, rw);
+ } else {
+ freemsg(mp);
+ }
+ return (SUCCESS);
+ }
+
+ if (oplmsu_uinst->lower_queue != NULL) {
+ dst_queue = WR(oplmsu_uinst->lower_queue);
+ } else {
+ cmn_err(CE_WARN, "!oplmsu: through-lwq: "
+ "Active path doesn't exist");
+
+ if (mp->b_datap->db_type == M_IOCTL) {
+ rw_exit(&oplmsu_uinst->lock);
+ oplmsu_iocack(q, mp, ENODEV);
+ rw_enter(&oplmsu_uinst->lock, rw);
+ } else {
+ freemsg(mp);
+ }
+ return (SUCCESS);
+ }
+
+ if ((usr_queue == WR(q)) || (usr_queue == RD(q))) {
+ if (pri_flag == MSU_HIGH) {
+ putq(dst_queue, mp);
+ } else {
+ if (canput(dst_queue)) {
+ putq(dst_queue, mp);
+ } else {
+ oplmsu_wcmn_norm_putbq(WR(q), mp, dst_queue);
+ return (FAILURE);
+ }
+ }
+ } else {
+ cmn_err(CE_WARN, "oplmsu: through-lwq: "
+ "Inappropriate message for this node");
+
+ if (mp->b_datap->db_type == M_IOCTL) {
+ rw_exit(&oplmsu_uinst->lock);
+ oplmsu_iocack(q, mp, ENODEV);
+ rw_enter(&oplmsu_uinst->lock, rw);
+ } else {
+ freemsg(mp);
+ }
+ }
+ return (SUCCESS);
+}
+
+/*
+ * Get high priority message from buffer for upper write stream
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : A
+ * -. uinst_t->c_lock : P
+ */
+mblk_t *
+oplmsu_wcmn_high_getq(queue_t *uwq)
+{
+ mblk_t *mp;
+ ctrl_t *ctrl;
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+ ctrl = (ctrl_t *)uwq->q_ptr;
+ mp = ctrl->first_upri_hi;
+ if (mp != NULL) {
+ if (mp->b_next == NULL) {
+ ctrl->first_upri_hi = NULL;
+ ctrl->last_upri_hi = NULL;
+ } else {
+ ctrl->first_upri_hi = mp->b_next;
+ mp->b_next->b_prev = NULL;
+ mp->b_next = NULL;
+ }
+ mp->b_prev = NULL;
+ }
+ mutex_exit(&oplmsu_uinst->c_lock);
+ return (mp);
+}
+
+/*
+ * putbq() function for normal priority message of write stream
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_wcmn_norm_putbq(queue_t *uwq, mblk_t *mp, queue_t *dq)
+{
+ lpath_t *lpath;
+
+ ASSERT(mp != NULL);
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath = (lpath_t *)dq->q_ptr;
+ lpath->uwq_flag = 1;
+ lpath->uwq_queue = uwq;
+ mutex_exit(&oplmsu_uinst->l_lock);
+ putbq(uwq, mp);
+}
+
+/*
+ * Restart queuing for high priority message of write stream when flow control
+ * failed
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER or RW_WRITER]
+ * -. uinst_t->u_lock : P
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_wcmn_high_qenable(queue_t *q, krw_t rw)
+{
+ mblk_t *mp;
+
+ ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
+
+ if (oplmsu_queue_flag == 1) {
+ return;
+ }
+
+ /* Handle high priority message */
+ while (mp = oplmsu_wcmn_high_getq(WR(q))) {
+ if (mp->b_datap->db_type & M_FLUSH) {
+ oplmsu_wcmn_flush_hndl(q, mp, rw);
+ continue;
+ }
+
+ if (oplmsu_wcmn_through_hndl(q, mp, MSU_HIGH, rw) == FAILURE) {
+ return;
+ }
+ }
+ qenable(WR(q)); /* enable upper write queue */
+}
+
+/*
+ * COMMON FUNCTIONS FOR READ STREAM
+ */
+
+/*
+ * Flush handle for read side stream
+ *
+ * Requires lock ( M: mandatory P: prohibited A: allowed
+ * -. uinst_t->lock : M [RW_READER]
+ * -. uinst_t->u_lock : P
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_rcmn_flush_hndl(queue_t *q, mblk_t *mp)
+{
+ queue_t *dst_queue = NULL;
+ ctrl_t *ctrl;
+
+ ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));
+
+ if (*mp->b_rptr & FLUSHR) {
+ /* Remove only data messages from read queue */
+ flushq(q, FLUSHDATA);
+ }
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+ if ((ctrl = oplmsu_uinst->user_ctrl) != NULL) {
+ dst_queue = RD(ctrl->queue);
+ mutex_exit(&oplmsu_uinst->c_lock);
+
+ if (dst_queue != NULL) {
+ putq(dst_queue, mp);
+ } else {
+ if (*mp->b_rptr & FLUSHW) {
+ flushq(WR(q), FLUSHDATA);
+ *mp->b_rptr &= ~FLUSHR;
+
+ rw_exit(&oplmsu_uinst->lock);
+ OPLMSU_TRACE(q, mp, MSU_TRC_LO);
+ qreply(q, mp);
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ } else {
+ freemsg(mp);
+ }
+ }
+ } else {
+ mutex_exit(&oplmsu_uinst->c_lock);
+ if (*mp->b_rptr & FLUSHW) {
+ flushq(WR(q), FLUSHDATA);
+ *mp->b_rptr &= ~FLUSHR;
+
+ rw_exit(&oplmsu_uinst->lock);
+ OPLMSU_TRACE(q, mp, MSU_TRC_LO);
+ qreply(q, mp);
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ } else {
+ freemsg(mp);
+ }
+ }
+}
+
+/*
+ * Through message handle for read side stream
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : M [RW_READER]
+ * -. uinst_t->u_lock : A
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+int
+oplmsu_rcmn_through_hndl(queue_t *q, mblk_t *mp, int pri_flag)
+{
+ lpath_t *lpath;
+ ctrl_t *ctrl;
+ queue_t *dst_queue = NULL;
+ int act_flag;
+
+ ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath = (lpath_t *)q->q_ptr;
+ if (lpath->uinst != NULL) {
+ act_flag = ACTIVE_RES;
+ } else {
+ act_flag = NOT_ACTIVE_RES;
+ }
+ mutex_exit(&oplmsu_uinst->l_lock);
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+ if (((ctrl = oplmsu_uinst->user_ctrl) != NULL) &&
+ (((mp->b_datap->db_type == M_IOCACK) ||
+ (mp->b_datap->db_type == M_IOCNAK)) || (act_flag == ACTIVE_RES))) {
+ dst_queue = RD(ctrl->queue);
+ } else {
+ mutex_exit(&oplmsu_uinst->c_lock);
+ freemsg(mp);
+ return (SUCCESS);
+ }
+
+ if (pri_flag == MSU_HIGH) {
+ putq(dst_queue, mp);
+ } else {
+ if (canput(dst_queue)) {
+ putq(dst_queue, mp);
+ } else {
+ /*
+ * Place a normal priority message at the head of
+ * read queue
+ */
+
+ ctrl = (ctrl_t *)dst_queue->q_ptr;
+ ctrl->lrq_flag = 1;
+ ctrl->lrq_queue = q;
+ mutex_exit(&oplmsu_uinst->c_lock);
+ putbq(q, mp);
+ return (FAILURE);
+ }
+ }
+ mutex_exit(&oplmsu_uinst->c_lock);
+ return (SUCCESS);
+}
+
+/*
+ * Restart queuing for high priority message of read stream
+ * when flow control failed
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : P
+ * -. uinst_t->u_lock : P
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_rcmn_high_qenable(queue_t *q)
+{
+ mblk_t *mp;
+ struct iocblk *iocp = NULL;
+ lpath_t *lpath;
+ int rval;
+
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+
+ for (;;) { /* Handle high priority message */
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath = (lpath_t *)q->q_ptr;
+ if ((mp = lpath->first_lpri_hi) == NULL) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ break;
+ }
+
+ if (mp->b_next == NULL) {
+ lpath->first_lpri_hi = NULL;
+ lpath->last_lpri_hi = NULL;
+ } else {
+ lpath->first_lpri_hi = mp->b_next;
+ mp->b_next->b_prev = NULL;
+ mp->b_next = NULL;
+ }
+ mp->b_prev = NULL;
+ mutex_exit(&oplmsu_uinst->l_lock);
+
+ rval = SUCCESS;
+ switch (mp->b_datap->db_type) {
+ case M_IOCACK : /* FALLTHRU */
+ case M_IOCNAK :
+ iocp = (struct iocblk *)mp->b_rptr;
+ switch (iocp->ioc_cmd) {
+ case TCSETS : /* FALLTHRU */
+ case TCSETSW : /* FALLTHRU */
+ case TCSETSF : /* FALLTHRU */
+ case TIOCMSET : /* FALLTHRU */
+ case TIOCSPPS : /* FALLTHRU */
+ case TIOCSWINSZ : /* FALLTHRU */
+ case TIOCSSOFTCAR :
+ rw_exit(&oplmsu_uinst->lock);
+ rval = oplmsu_lrioctl_termios(q, mp);
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+ break;
+
+ default :
+ rval = oplmsu_rcmn_through_hndl(
+ q, mp, MSU_HIGH);
+ if (rval == FAILURE) {
+ rw_exit(&oplmsu_uinst->lock);
+ return;
+ }
+ }
+ break;
+
+ case M_ERROR :
+ rw_exit(&oplmsu_uinst->lock);
+ rval = oplmsu_lrmsg_error(q, mp);
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+ break;
+
+ case M_FLUSH :
+ oplmsu_rcmn_flush_hndl(q, mp);
+ break;
+
+ default :
+ rval = oplmsu_rcmn_through_hndl(q, mp, MSU_HIGH);
+ if (rval == FAILURE) {
+ rw_exit(&oplmsu_uinst->lock);
+ return;
+ }
+ }
+
+ if (rval == FAILURE) {
+ break;
+ }
+ }
+
+ rw_exit(&oplmsu_uinst->lock);
+ qenable(q); /* Enable lower read queue */
+}
+
+#ifdef DEBUG
+/*
+ * Online trace
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : P
+ * -. uinst_t->u_lock : P
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_cmn_trace(queue_t *q, mblk_t *mp, int op)
+{
+ struct iocblk *iocp;
+
+ if ((op < MSU_TRC_UI) || (op > MSU_TRC_CLS)) {
+ return;
+ }
+
+ mutex_enter(&oplmsu_ltrc_lock);
+
+ if (oplmsu_debug_mode & MSU_DPRINT_ON) {
+ oplmsu_cmn_msglog(mp, op);
+ }
+
+ /* Trace current counter */
+ drv_getparm(LBOLT, (void *)&oplmsu_ltrc_ccnt);
+
+ if (oplmsu_ltrc_cur == oplmsu_ltrc_tail) {
+ oplmsu_ltrc_cur = oplmsu_ltrc_top;
+ } else {
+ oplmsu_ltrc_cur++;
+ }
+ oplmsu_ltrc_cur->q = q;
+ oplmsu_ltrc_cur->mp = mp;
+
+ switch (op) {
+ case MSU_TRC_UI :
+ oplmsu_ltrc_cur->op[0] = 'u';
+ oplmsu_ltrc_cur->op[1] = 'i';
+ break;
+
+ case MSU_TRC_UO :
+ oplmsu_ltrc_cur->op[0] = 'u';
+ oplmsu_ltrc_cur->op[1] = 'o';
+ break;
+
+ case MSU_TRC_LI :
+ oplmsu_ltrc_cur->op[0] = 'l';
+ oplmsu_ltrc_cur->op[1] = 'i';
+ break;
+
+ case MSU_TRC_LO :
+ oplmsu_ltrc_cur->op[0] = 'l';
+ oplmsu_ltrc_cur->op[1] = 'o';
+ break;
+
+ case MSU_TRC_OPN :
+ oplmsu_ltrc_cur->op[0] = 'o';
+ oplmsu_ltrc_cur->op[1] = 'p';
+ break;
+
+ case MSU_TRC_CLS :
+ oplmsu_ltrc_cur->op[0] = 'c';
+ oplmsu_ltrc_cur->op[1] = 'l';
+ break;
+ }
+
+ if ((op == MSU_TRC_LI) || (op == MSU_TRC_LO)) {
+ mutex_enter(&oplmsu_uinst->l_lock);
+ oplmsu_ltrc_cur->pathno = ((lpath_t *)q->q_ptr)->path_no;
+ mutex_exit(&oplmsu_uinst->l_lock);
+ } else {
+ oplmsu_ltrc_cur->pathno = 0;
+ }
+
+ if ((op == MSU_TRC_OPN) || (op == MSU_TRC_CLS)) {
+ oplmsu_ltrc_cur->msg_type = 0;
+ oplmsu_ltrc_cur->msg_cmd = 0;
+ oplmsu_ltrc_cur->data = 0;
+
+ switch ((ulong_t)mp) {
+ case MSU_NODE_USER :
+ oplmsu_ltrc_cur->data = MSU_TRC_USER;
+ break;
+
+ case MSU_NODE_META :
+ oplmsu_ltrc_cur->data = MSU_TRC_META;
+ break;
+ }
+ oplmsu_ltrc_cur->mp = NULL;
+ } else {
+ oplmsu_ltrc_cur->msg_type = mp->b_datap->db_type;
+ iocp = (struct iocblk *)mp->b_rptr;
+ oplmsu_ltrc_cur->msg_cmd = iocp->ioc_cmd;
+
+ if ((mp->b_datap->db_type == M_IOCTL) ||
+ (mp->b_datap->db_type == M_IOCACK) ||
+ (mp->b_datap->db_type == M_IOCNAK)) {
+ oplmsu_ltrc_cur->msg_cmd = iocp->ioc_cmd;
+
+ if (mp->b_cont != NULL) {
+ oplmsu_ltrc_cur->data =
+ (ulong_t)mp->b_cont->b_rptr;
+ } else {
+ oplmsu_ltrc_cur->data = 0;
+ }
+ } else {
+ oplmsu_ltrc_cur->msg_cmd = 0;
+
+ if (mp->b_rptr == NULL) {
+ oplmsu_ltrc_cur->data = 0;
+ } else {
+ oplmsu_ltrc_cur->data = *(ulong_t *)mp->b_rptr;
+ }
+ }
+ }
+ mutex_exit(&oplmsu_ltrc_lock);
+}
+
+/*
+ * Display message log to console
+ *
+ * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
+ * -. uinst_t->lock : P
+ * -. uinst_t->u_lock : P
+ * -. uinst_t->l_lock : P
+ * -. uinst_t->c_lock : P
+ */
+void
+oplmsu_cmn_msglog(mblk_t *mp, int direction)
+{
+ uchar_t *cur = NULL;
+ mblk_t *tmp_mp = NULL;
+ ulong_t len;
+ ulong_t line;
+ ulong_t col;
+ ulong_t row;
+ ulong_t count;
+ char buffer[70];
+ char *bufp;
+
+ if (mp == NULL) {
+ return;
+ }
+
+ switch (direction) {
+ case 0:
+ cmn_err(CE_NOTE, "!---------- Upper in --------");
+ break;
+
+ case 1:
+ cmn_err(CE_NOTE, "!---------- Upper out -------");
+ break;
+
+ case 2:
+ cmn_err(CE_NOTE, "!---------- Lower in --------");
+ break;
+
+ case 3:
+ cmn_err(CE_NOTE, "!---------- Lower out -------");
+ break;
+
+ default:
+ return;
+ }
+
+ for (tmp_mp = mp; tmp_mp; tmp_mp = tmp_mp->b_cont) {
+ cmn_err(CE_NOTE, "!db_type = 0x%02x", tmp_mp->b_datap->db_type);
+
+ len = tmp_mp->b_wptr - tmp_mp->b_rptr;
+ line = (len + 31) / 32;
+ cur = (uchar_t *)tmp_mp->b_rptr;
+ count = 0;
+
+ for (col = 0; col < line; col++) {
+ bufp = buffer;
+
+ for (row = 0; row < 32; row++) {
+ if (row != 0 && (row % 8) == 0) {
+ *bufp = ' ';
+ bufp++;
+ }
+ sprintf(bufp, "%02x", *cur);
+ bufp += 2;
+ cur++;
+ count++;
+
+ if (count >= len) {
+ break;
+ }
+ }
+ *bufp = '\0';
+ cmn_err(CE_NOTE, "!%s", buffer);
+
+ if (count >= len) {
+ break;
+ }
+ }
+ }
+}
+
+void
+oplmsu_cmn_prt_pathname(dev_info_t *dip)
+{
+ char pathname[128];
+ char wrkbuf[128];
+
+ ddi_pathname(dip, wrkbuf);
+ *(wrkbuf + strlen(wrkbuf)) = '\0';
+ sprintf(pathname, "/devices%s:%c", wrkbuf, 'a'+ ddi_get_instance(dip));
+
+ DBG_PRINT((CE_NOTE, "oplmsu: debug-info: "
+ "Active path change to path => %s", pathname));
+}
+#endif
diff --git a/usr/src/uts/sun4u/opl/io/oplmsu/oplmsu_ioctl_lrp.c b/usr/src/uts/sun4u/opl/io/oplmsu/oplmsu_ioctl_lrp.c
new file mode 100644
index 0000000000..28df173a08
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/oplmsu/oplmsu_ioctl_lrp.c
@@ -0,0 +1,780 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/errno.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/kmem.h>
+#include <sys/ksynch.h>
+#include <sys/stream.h>
+#include <sys/stropts.h>
+#include <sys/termio.h>
+#include <sys/ddi.h>
+#include <sys/file.h>
+#include <sys/disp.h>
+#include <sys/sunddi.h>
+#include <sys/sunldi.h>
+#include <sys/sunndi.h>
+#include <sys/prom_plat.h>
+#include <sys/oplmsu/oplmsu.h>
+#include <sys/oplmsu/oplmsu_proto.h>
+
+/*
+ * LOWER READ SERVICE PROCEDURE
+ */
+
+/* termios ioctl response received */
+int
+oplmsu_lrioctl_termios(queue_t *lrq, mblk_t *mp)
+{
+ upath_t *upath, *altn_upath = NULL, *stp_upath = NULL;
+ lpath_t *lpath, *altn_lpath = NULL, *stp_lpath = NULL;
+ struct iocblk *iocp, *temp_iocp = NULL;
+ mblk_t *hndl_mp, *nmp = NULL, *fmp = NULL;
+ queue_t *dst_queue;
+ int term_ioctl, term_stat, sts;
+ int ack_flag, termio_flag, chkflag;
+ ulong_t trad_sts;
+
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ iocp = (struct iocblk *)mp->b_rptr;
+
+ mutex_enter(&oplmsu_uinst->u_lock);
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath = (lpath_t *)lrq->q_ptr;
+ hndl_mp = lpath->hndl_mp;
+
+ upath = oplmsu_search_upath_info(lpath->path_no);
+ trad_sts = upath->traditional_status;
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+
+ if (((iocp->ioc_cmd == TCSETS) && (trad_sts == MSU_WTCS_ACK)) ||
+ ((iocp->ioc_cmd == TCSETSW) && (trad_sts == MSU_WTCS_ACK)) ||
+ ((iocp->ioc_cmd == TCSETSF) && (trad_sts == MSU_WTCS_ACK)) ||
+ ((iocp->ioc_cmd == TIOCMSET) && (trad_sts == MSU_WTMS_ACK)) ||
+ ((iocp->ioc_cmd == TIOCSPPS) && (trad_sts == MSU_WPPS_ACK)) ||
+ ((iocp->ioc_cmd == TIOCSWINSZ) && (trad_sts == MSU_WWSZ_ACK)) ||
+ ((iocp->ioc_cmd == TIOCSSOFTCAR) && (trad_sts == MSU_WCAR_ACK))) {
+ if (mp->b_datap->db_type == M_IOCACK) {
+ ack_flag = ACK_RES;
+ } else {
+ ack_flag = NAK_RES;
+ }
+ } else {
+ rw_exit(&oplmsu_uinst->lock);
+ freemsg(mp);
+ cmn_err(CE_WARN, "oplmsu: lr-termios: "
+ "Status of path is improper");
+ return (SUCCESS);
+ }
+
+ switch (trad_sts) {
+ case MSU_WTCS_ACK :
+ termio_flag = MSU_TIOS_TCSETS;
+ break;
+
+ case MSU_WTMS_ACK :
+ termio_flag = MSU_TIOS_MSET;
+ break;
+
+ case MSU_WPPS_ACK :
+ termio_flag = MSU_TIOS_PPS;
+ break;
+
+ case MSU_WWSZ_ACK :
+ termio_flag = MSU_TIOS_WINSZP;
+ break;
+
+ case MSU_WCAR_ACK :
+ termio_flag = MSU_TIOS_SOFTCAR;
+ break;
+
+ default :
+ termio_flag = MSU_TIOS_END;
+ break;
+ }
+
+ if (hndl_mp == NULL) {
+ switch (trad_sts) {
+ case MSU_WTCS_ACK : /* FALLTHRU */
+ case MSU_WTMS_ACK : /* FALLTHRU */
+ case MSU_WPPS_ACK : /* FALLTHRU */
+ case MSU_WWSZ_ACK : /* FALLTHRU */
+ case MSU_WCAR_ACK :
+ chkflag = MSU_CMD_STOP;
+ break;
+
+ default :
+ chkflag = FAILURE;
+ break;
+ }
+ } else {
+ /* xoff/xon received */
+ if (hndl_mp->b_datap->db_type == M_DATA) {
+ chkflag = MSU_CMD_ACTIVE;
+ } else { /* Normal termios */
+ temp_iocp = (struct iocblk *)hndl_mp->b_rptr;
+ chkflag = temp_iocp->ioc_cmd;
+ }
+ }
+
+ if ((chkflag == MSU_CMD_ACTIVE) || (chkflag == MSU_CMD_STOP)) {
+ if (ack_flag == ACK_RES) { /* M_IOCACK received */
+ ctrl_t *ctrl;
+
+ if (oplmsu_cmn_prechg_termio(lrq, mp, MSU_READ_SIDE,
+ termio_flag, &nmp, &term_stat) == FAILURE) {
+ rw_exit(&oplmsu_uinst->lock);
+ return (FAILURE);
+ }
+
+ OPLMSU_RWLOCK_UPGRADE();
+ mutex_enter(&oplmsu_uinst->u_lock);
+ if (term_stat != MSU_WPTH_CHG) {
+ upath->traditional_status = term_stat;
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ freemsg(mp);
+
+ OPLMSU_TRACE(RD(lrq), nmp, MSU_TRC_LO);
+
+ /* Continue sending termios ioctls */
+ qreply(RD(lrq), nmp);
+ return (SUCCESS);
+ }
+ freemsg(mp);
+
+ /* Change status of new active path */
+ oplmsu_cmn_set_upath_sts(upath, MSU_PSTAT_ACTIVE,
+ upath->status, MSU_ACTIVE);
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath->uinst = oplmsu_uinst;
+ dst_queue = lpath->hndl_uqueue;
+
+ ctrl = oplmsu_uinst->user_ctrl;
+ if ((chkflag == MSU_CMD_ACTIVE) && (hndl_mp != NULL)) {
+ /* Put a message(M_DATA) on a queue */
+ if (ctrl != NULL) {
+ mutex_enter(&oplmsu_uinst->c_lock);
+ putq(RD(ctrl->queue), hndl_mp);
+ mutex_exit(&oplmsu_uinst->c_lock);
+ }
+ }
+
+ oplmsu_clear_ioctl_path(lpath);
+ stp_upath = lpath->src_upath;
+ lpath->src_upath = NULL;
+ lpath->status = MSU_EXT_NOTUSED;
+
+ /* Notify of the active path changing */
+ prom_opl_switch_console(upath->ser_devcb.lsb);
+
+ /* Send XON to notify active path */
+ (void) oplmsu_cmn_put_xoffxon(WR(lrq), MSU_XON_4);
+
+ stp_lpath = stp_upath->lpath;
+ stp_lpath->uinst = NULL;
+ oplmsu_clear_ioctl_path(stp_lpath);
+ stp_lpath->src_upath = NULL;
+ stp_lpath->status = MSU_EXT_NOTUSED;
+
+ /* Change status of stopped or old-active path */
+ if (chkflag == MSU_CMD_STOP) {
+ sts = MSU_PSTAT_STOP;
+ trad_sts = MSU_STOP;
+ } else { /* == MSU_CMD_ACTIVE */
+ sts = MSU_PSTAT_STANDBY;
+ trad_sts = MSU_STANDBY;
+ }
+ oplmsu_cmn_set_upath_sts(stp_upath, sts,
+ stp_upath->status, trad_sts);
+
+ /* Send XOFF to notify all standby paths */
+ oplmsu_cmn_putxoff_standby();
+ oplmsu_uinst->lower_queue = lrq;
+ oplmsu_uinst->inst_status = oplmsu_get_inst_status();
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+
+ /* Change active path of user node */
+ if (ctrl != NULL) {
+ queue_t *temp_queue;
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+ temp_queue = WR(ctrl->queue);
+ mutex_exit(&oplmsu_uinst->c_lock);
+
+ /* Reschedule a queue for service */
+ enableok(temp_queue);
+
+ oplmsu_queue_flag = 0;
+ oplmsu_wcmn_high_qenable(temp_queue, RW_WRITER);
+ }
+ rw_exit(&oplmsu_uinst->lock);
+
+ if (nmp != NULL) {
+ freemsg(nmp);
+ }
+
+ /* Wake up oplmsu_config_stop */
+ mutex_enter(&oplmsu_uinst->l_lock);
+ if (stp_lpath->sw_flag) {
+ stp_lpath->sw_flag = 0;
+ cv_signal(&stp_lpath->sw_cv);
+ }
+ mutex_exit(&oplmsu_uinst->l_lock);
+ return (SUCCESS);
+ } else { /* M_IOCNAK received */
+ mutex_enter(&oplmsu_uinst->u_lock);
+ mutex_enter(&oplmsu_uinst->l_lock);
+ if ((chkflag == MSU_CMD_ACTIVE) &&
+ (lpath->hndl_uqueue == NULL)) {
+ oplmsu_clear_ioctl_path(lpath);
+ stp_upath = lpath->src_upath;
+ lpath->src_upath = NULL;
+ lpath->status = MSU_EXT_NOTUSED;
+ mutex_exit(&oplmsu_uinst->l_lock);
+
+ oplmsu_cmn_set_upath_sts(upath,
+ MSU_PSTAT_STANDBY, upath->status,
+ MSU_STANDBY);
+ mutex_exit(&oplmsu_uinst->u_lock);
+
+ if (hndl_mp != NULL) {
+ freemsg(hndl_mp);
+ }
+
+ OPLMSU_RWLOCK_UPGRADE();
+ mutex_enter(&oplmsu_uinst->u_lock);
+ oplmsu_uinst->inst_status =
+ oplmsu_get_inst_status();
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+ } else if ((chkflag == MSU_CMD_STOP) &&
+ (lpath->src_upath != NULL) &&
+ (lpath->src_upath->lpath->sw_flag)) {
+ /* MSU_CMD_STOP for active path */
+
+ dst_queue = RD(lpath->hndl_uqueue);
+ stp_upath = lpath->src_upath;
+
+ /* Search alternate path from standby paths */
+ altn_upath = oplmsu_search_standby();
+ if (altn_upath == NULL) {
+ altn_upath = upath;
+ }
+
+ mutex_exit(&oplmsu_uinst->l_lock);
+ if (oplmsu_cmn_allocmb(lrq, mp, &fmp,
+ sizeof (char), MSU_READ_SIDE) == FAILURE) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (FAILURE);
+ }
+
+ if (oplmsu_cmn_prechg(lrq, mp, MSU_READ_SIDE,
+ &nmp, &term_ioctl, &term_stat) == FAILURE) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ freeb(fmp);
+ return (FAILURE);
+ }
+
+ altn_upath->traditional_status = term_stat;
+ altn_lpath = altn_upath->lpath;
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ altn_lpath->hndl_mp = hndl_mp;
+ altn_lpath->hndl_uqueue = dst_queue;
+ altn_lpath->src_upath = stp_upath;
+ altn_lpath->status = MSU_EXT_VOID;
+ dst_queue = RD(altn_lpath->lower_queue);
+
+ oplmsu_cmn_set_upath_sts(upath, MSU_PSTAT_FAIL,
+ upath->status, MSU_FAIL);
+
+ oplmsu_clear_ioctl_path(lpath);
+ lpath->src_upath = NULL;
+ lpath->status = MSU_EXT_NOTUSED;
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+
+ OPLMSU_RWLOCK_UPGRADE();
+ mutex_enter(&oplmsu_uinst->u_lock);
+ oplmsu_uinst->inst_status =
+ oplmsu_get_inst_status();
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ freemsg(mp);
+ oplmsu_cmn_set_mflush(fmp);
+
+ OPLMSU_TRACE(dst_queue, fmp, MSU_TRC_LO);
+ qreply(dst_queue, fmp);
+
+ OPLMSU_TRACE(dst_queue, nmp, MSU_TRC_LO);
+ qreply(dst_queue, nmp);
+ return (SUCCESS);
+ }
+ }
+ } else if ((chkflag == TCSETS) || (chkflag == TCSETSW) ||
+ (chkflag == TCSETSF) || (chkflag == TIOCMSET) ||
+ (chkflag == TIOCSPPS) || (chkflag == TIOCSWINSZ) ||
+ (chkflag == TIOCSSOFTCAR)) {
+ mutex_enter(&oplmsu_uinst->u_lock);
+ mutex_enter(&oplmsu_uinst->l_lock);
+
+ if ((ack_flag == ACK_RES) &&
+ (lpath->hndl_uqueue != NULL)) { /* M_IOCACK received */
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ if (oplmsu_cmn_copymb(lrq, mp, &nmp, hndl_mp,
+ MSU_READ_SIDE) == FAILURE) {
+ rw_exit(&oplmsu_uinst->lock);
+ return (FAILURE);
+ }
+
+ OPLMSU_RWLOCK_UPGRADE();
+ switch (chkflag) {
+ case TCSETS : /* FALLTHRU */
+ case TCSETSW : /* FALLTHRU */
+ case TCSETSF :
+ if (oplmsu_uinst->tcsets_p != NULL) {
+ freemsg(oplmsu_uinst->tcsets_p);
+ }
+ oplmsu_uinst->tcsets_p = nmp;
+ break;
+
+ case TIOCMSET :
+ if (oplmsu_uinst->tiocmset_p != NULL) {
+ freemsg(oplmsu_uinst->tiocmset_p);
+ }
+ oplmsu_uinst->tiocmset_p = nmp;
+ break;
+
+ case TIOCSPPS :
+ if (oplmsu_uinst->tiocspps_p != NULL) {
+ freemsg(oplmsu_uinst->tiocspps_p);
+ }
+ oplmsu_uinst->tiocspps_p = nmp;
+ break;
+
+ case TIOCSWINSZ :
+ if (oplmsu_uinst->tiocswinsz_p != NULL) {
+ freemsg(oplmsu_uinst->tiocswinsz_p);
+ }
+ oplmsu_uinst->tiocswinsz_p = nmp;
+ break;
+
+ case TIOCSSOFTCAR :
+ if (oplmsu_uinst->tiocssoftcar_p != NULL) {
+ freemsg(oplmsu_uinst->tiocssoftcar_p);
+ }
+ oplmsu_uinst->tiocssoftcar_p = nmp;
+ break;
+ }
+
+ mutex_enter(&oplmsu_uinst->u_lock);
+ mutex_enter(&oplmsu_uinst->l_lock);
+ upath->traditional_status = lpath->status;
+ nmp = lpath->hndl_mp;
+ nmp->b_datap->db_type = M_IOCACK;
+ dst_queue = RD(lpath->hndl_uqueue);
+ bcopy(mp->b_rptr, nmp->b_rptr, sizeof (struct iocblk));
+
+ oplmsu_clear_ioctl_path(lpath);
+ lpath->src_upath = NULL;
+ lpath->status = MSU_EXT_NOTUSED;
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ freemsg(mp);
+ putq(dst_queue, nmp);
+
+ /* Check sleep flag and wake up thread */
+ oplmsu_cmn_wakeup(dst_queue);
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+ } else if ((ack_flag == NAK_RES) &&
+ (lpath->hndl_uqueue != NULL)) { /* M_IOCNAK received */
+ upath->traditional_status = lpath->status;
+
+ nmp = lpath->hndl_mp;
+ nmp->b_datap->db_type = M_IOCNAK;
+ dst_queue = RD(lpath->hndl_uqueue);
+
+ oplmsu_clear_ioctl_path(lpath);
+ lpath->src_upath = NULL;
+ lpath->status = MSU_EXT_NOTUSED;
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ freemsg(mp);
+ putq(dst_queue, nmp);
+
+ /* Check sleep flag and wake up thread */
+ oplmsu_cmn_wakeup(dst_queue);
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+ }
+ }
+
+ mutex_enter(&oplmsu_uinst->u_lock);
+ switch (upath->status) {
+ case MSU_PSTAT_FAIL :
+ upath->traditional_status = MSU_FAIL;
+ break;
+
+ case MSU_PSTAT_STOP :
+ upath->traditional_status = MSU_STOP;
+ break;
+
+ case MSU_PSTAT_STANDBY :
+ upath->traditional_status = MSU_STANDBY;
+ break;
+
+ case MSU_PSTAT_ACTIVE :
+ upath->traditional_status = MSU_ACTIVE;
+ break;
+ }
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ oplmsu_clear_ioctl_path(lpath);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ freemsg(mp);
+ return (SUCCESS);
+}
+
+/* M_ERROR or M_HANGUP response received */
+int
+oplmsu_lrmsg_error(queue_t *lrq, mblk_t *mp)
+{
+ upath_t *upath, *altn_upath = NULL;
+ lpath_t *lpath, *altn_lpath = NULL;
+ mblk_t *nmp = NULL, *fmp = NULL;
+ queue_t *dst_queue = NULL;
+ ctrl_t *ctrl;
+ int term_stat, term_ioctl;
+
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ mutex_enter(&oplmsu_uinst->c_lock);
+ ctrl = oplmsu_uinst->user_ctrl;
+ if (ctrl != NULL) {
+ dst_queue = RD(ctrl->queue);
+ }
+ mutex_exit(&oplmsu_uinst->c_lock);
+
+ mutex_enter(&oplmsu_uinst->u_lock);
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath = (lpath_t *)lrq->q_ptr;
+ upath = oplmsu_search_upath_info(lpath->path_no);
+
+ if ((lpath->status == MSU_LINK_NU) ||
+ (lpath->status == MSU_SETID_NU) ||
+ (upath->traditional_status == MSU_WSTR_ACK) ||
+ (upath->traditional_status == MSU_WTCS_ACK) ||
+ (upath->traditional_status == MSU_WTMS_ACK) ||
+ (upath->traditional_status == MSU_WPPS_ACK) ||
+ (upath->traditional_status == MSU_WWSZ_ACK) ||
+ (upath->traditional_status == MSU_WCAR_ACK) ||
+ (upath->traditional_status == MSU_WSTP_ACK) ||
+ (upath->traditional_status == MSU_WPTH_CHG)) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ freemsg(mp);
+ } else if ((upath->traditional_status == MSU_MAKE_INST) ||
+ (upath->traditional_status == MSU_STOP) ||
+ (upath->traditional_status == MSU_STANDBY) ||
+ (upath->traditional_status == MSU_SETID) ||
+ (upath->traditional_status == MSU_LINK)) {
+ oplmsu_cmn_set_upath_sts(upath, MSU_PSTAT_FAIL, upath->status,
+ MSU_FAIL);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ freemsg(mp);
+ } else if (upath->traditional_status == MSU_FAIL) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ freemsg(mp);
+ } else if (upath->traditional_status == MSU_ACTIVE) {
+ altn_upath = oplmsu_search_standby();
+ if (altn_upath == NULL) {
+ oplmsu_cmn_set_upath_sts(upath, MSU_PSTAT_FAIL,
+ upath->status, MSU_FAIL);
+
+ oplmsu_clear_ioctl_path(lpath);
+ lpath->src_upath = NULL;
+ lpath->status = MSU_EXT_NOTUSED;
+ lpath->uinst = NULL;
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+
+ OPLMSU_RWLOCK_UPGRADE();
+ oplmsu_uinst->lower_queue = NULL;
+ rw_exit(&oplmsu_uinst->lock);
+ freemsg(mp);
+ return (SUCCESS);
+ }
+
+ mutex_exit(&oplmsu_uinst->l_lock);
+ if (oplmsu_cmn_allocmb(lrq, mp, &fmp, sizeof (char),
+ MSU_READ_SIDE) == FAILURE) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (FAILURE);
+ }
+
+ if (oplmsu_cmn_prechg(lrq, mp, MSU_READ_SIDE, &nmp, &term_ioctl,
+ &term_stat) == FAILURE) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ freeb(fmp);
+ return (FAILURE);
+ }
+
+ oplmsu_cmn_set_upath_sts(upath, MSU_PSTAT_FAIL,
+ upath->status, MSU_FAIL);
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath->uinst = NULL;
+
+ altn_upath->traditional_status = term_stat;
+ altn_lpath = altn_upath->lpath;
+
+ altn_lpath->hndl_mp = NULL;
+ altn_lpath->hndl_uqueue = NULL;
+ altn_lpath->src_upath = upath;
+ altn_lpath->status = MSU_EXT_VOID;
+ dst_queue = RD(altn_lpath->lower_queue);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+
+ OPLMSU_RWLOCK_UPGRADE();
+ oplmsu_uinst->lower_queue = NULL;
+ oplmsu_cmn_set_mflush(fmp);
+
+ if (ctrl != NULL) {
+ mutex_enter(&oplmsu_uinst->c_lock);
+ noenable(WR(ctrl->queue));
+ mutex_exit(&oplmsu_uinst->c_lock);
+
+ oplmsu_queue_flag = 1;
+ }
+
+ rw_exit(&oplmsu_uinst->lock);
+ freemsg(mp);
+
+ OPLMSU_TRACE(dst_queue, fmp, MSU_TRC_LO);
+ qreply(dst_queue, fmp);
+ OPLMSU_TRACE(dst_queue, nmp, MSU_TRC_LO);
+ qreply(dst_queue, nmp);
+ }
+ return (SUCCESS);
+}
+
+/* M_DATA[xoff/xon] was received from serial port */
+int
+oplmsu_lrdata_xoffxon(queue_t *lrq, mblk_t *mp)
+{
+ upath_t *upath, *stp_upath = NULL;
+ lpath_t *lpath, *stp_lpath = NULL;
+ mblk_t *nmp = NULL, *fmp = NULL;
+ ctrl_t *ctrl;
+ int term_stat, term_ioctl;
+
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+ mutex_enter(&oplmsu_uinst->u_lock);
+ mutex_enter(&oplmsu_uinst->l_lock);
+
+ if (oplmsu_uinst->lower_queue != NULL) {
+ /* Get lower path of active status */
+ stp_lpath = (lpath_t *)oplmsu_uinst->lower_queue->q_ptr;
+ if (stp_lpath != NULL) {
+ stp_upath =
+ oplmsu_search_upath_info(stp_lpath->path_no);
+ }
+ }
+
+ lpath = (lpath_t *)lrq->q_ptr;
+ upath = oplmsu_search_upath_info(lpath->path_no);
+
+ if ((stp_upath != NULL) && (stp_upath != upath)) {
+ if ((stp_upath->status != MSU_PSTAT_ACTIVE) ||
+ (stp_upath->traditional_status != MSU_ACTIVE)) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ putbq(lrq, mp);
+ return (FAILURE);
+ }
+ }
+
+ if ((upath->status == MSU_PSTAT_ACTIVE) &&
+ ((upath->traditional_status == MSU_ACTIVE) ||
+ (upath->traditional_status == MSU_WTCS_ACK) ||
+ (upath->traditional_status == MSU_WTMS_ACK) ||
+ (upath->traditional_status == MSU_WPPS_ACK) ||
+ (upath->traditional_status == MSU_WWSZ_ACK) ||
+ (upath->traditional_status == MSU_WCAR_ACK))) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ oplmsu_rcmn_through_hndl(lrq, mp, MSU_NORM);
+ rw_exit(&oplmsu_uinst->lock);
+ return (SUCCESS);
+ } else if ((upath->status != MSU_PSTAT_STANDBY) ||
+ (upath->traditional_status != MSU_STANDBY)) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ freemsg(mp);
+ cmn_err(CE_WARN, "oplmsu: lr-xoffxon: "
+ "Can't change to specified path");
+ return (SUCCESS);
+ }
+ mutex_exit(&oplmsu_uinst->l_lock);
+
+ if (oplmsu_cmn_allocmb(lrq, mp, &fmp, sizeof (char), MSU_READ_SIDE) ==
+ FAILURE) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ return (FAILURE);
+ }
+
+ if (oplmsu_cmn_prechg(lrq, mp, MSU_READ_SIDE, &nmp, &term_ioctl,
+ &term_stat) == FAILURE) {
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ freeb(fmp);
+ return (FAILURE);
+ }
+
+ oplmsu_cmn_set_mflush(fmp);
+ upath->traditional_status = term_stat;
+
+ mutex_enter(&oplmsu_uinst->l_lock);
+ lpath->hndl_mp = mp;
+ lpath->hndl_uqueue = NULL;
+ lpath->src_upath = stp_upath;
+ lpath->status = MSU_EXT_VOID;
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+ ctrl = oplmsu_uinst->user_ctrl;
+ if (term_stat != MSU_WPTH_CHG) {
+ /*
+ * Send termios to new active path and wait response
+ */
+ if (ctrl != NULL) {
+ noenable(WR(ctrl->queue));
+ }
+ mutex_exit(&oplmsu_uinst->c_lock);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+
+ OPLMSU_TRACE(RD(lrq), fmp, MSU_TRC_LO);
+ qreply(RD(lrq), fmp);
+ OPLMSU_TRACE(RD(lrq), nmp, MSU_TRC_LO);
+ qreply(RD(lrq), nmp);
+ } else {
+ /*
+ * No termios messages are received. Change active path.
+ */
+
+ oplmsu_cmn_set_upath_sts(upath, MSU_PSTAT_ACTIVE, upath->status,
+ MSU_ACTIVE);
+
+ lpath->uinst = oplmsu_uinst;
+ lpath->src_upath = NULL;
+ lpath->status = MSU_EXT_NOTUSED;
+
+ /* Notify of the active path changing */
+ prom_opl_switch_console(upath->ser_devcb.lsb);
+
+ putq(WR(lrq), fmp);
+
+ /* Send XON to notify active path */
+ (void) oplmsu_cmn_put_xoffxon(WR(lrq), MSU_XON_4);
+
+ if (lpath->hndl_mp != NULL) {
+ /* Put a message(M_DATA) on a queue */
+ if (ctrl != NULL) {
+ putq(RD(ctrl->queue), lpath->hndl_mp);
+ }
+ }
+
+ oplmsu_clear_ioctl_path(lpath);
+
+ if (ctrl != NULL) {
+ noenable(WR(ctrl->queue));
+ }
+
+ if ((stp_upath != NULL) && (stp_lpath != NULL)) {
+ /* Change the status of stop path */
+ oplmsu_cmn_set_upath_sts(stp_upath, MSU_PSTAT_STANDBY,
+ stp_upath->status, MSU_STANDBY);
+
+ oplmsu_clear_ioctl_path(stp_lpath);
+ stp_lpath->uinst = NULL;
+ stp_lpath->src_upath = NULL;
+ stp_lpath->status = MSU_EXT_NOTUSED;
+ }
+#ifdef DEBUG
+ oplmsu_cmn_prt_pathname(upath->ser_devcb.dip);
+#endif
+ /* Send XOFF to notify all standby paths */
+ oplmsu_cmn_putxoff_standby();
+ mutex_exit(&oplmsu_uinst->c_lock);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+
+ OPLMSU_RWLOCK_UPGRADE();
+ mutex_enter(&oplmsu_uinst->u_lock);
+ oplmsu_uinst->lower_queue = lrq;
+ oplmsu_uinst->inst_status = oplmsu_get_inst_status();
+ mutex_exit(&oplmsu_uinst->u_lock);
+
+ if (ctrl != NULL) {
+ queue_t *temp_queue;
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+ temp_queue = WR(ctrl->queue);
+ mutex_exit(&oplmsu_uinst->c_lock);
+
+ /* Reschedule a queue for service */
+ enableok(temp_queue);
+
+ oplmsu_queue_flag = 0;
+ oplmsu_wcmn_high_qenable(temp_queue, RW_WRITER);
+ }
+ rw_exit(&oplmsu_uinst->lock);
+ }
+ return (SUCCESS);
+}
diff --git a/usr/src/uts/sun4u/opl/io/oplmsu/oplmsu_ioctl_uwp.c b/usr/src/uts/sun4u/opl/io/oplmsu/oplmsu_ioctl_uwp.c
new file mode 100644
index 0000000000..b64a1e8121
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/oplmsu/oplmsu_ioctl_uwp.c
@@ -0,0 +1,471 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/errno.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/kmem.h>
+#include <sys/ksynch.h>
+#include <sys/stream.h>
+#include <sys/stropts.h>
+#include <sys/termio.h>
+#include <sys/ddi.h>
+#include <sys/file.h>
+#include <sys/disp.h>
+#include <sys/sunddi.h>
+#include <sys/sunldi.h>
+#include <sys/sunndi.h>
+#include <sys/oplmsu/oplmsu.h>
+#include <sys/oplmsu/oplmsu_proto.h>
+
+/*
+ * UPPER WRITE SERVICE PROCEDURE
+ */
+
+/* I_PLINK ioctl command received */
+int
+oplmsu_uwioctl_iplink(queue_t *uwq, mblk_t *mp)
+{
+ struct linkblk *lbp;
+ lpath_t *lpath;
+ int ncode;
+
+ if (mp == NULL) {
+ return (EINVAL);
+ }
+
+ if ((mp->b_cont->b_wptr - mp->b_cont->b_rptr) <
+ sizeof (struct linkblk)) {
+ cmn_err(CE_WARN, "oplmsu: uw-iplink: Invalid data length");
+ oplmsu_iocack(uwq, mp, EINVAL);
+ return (EINVAL);
+ }
+
+ lbp = (struct linkblk *)mp->b_cont->b_rptr;
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+
+ /*
+ * Check whether this is called by super-user privilege.
+ * uwq => Queue of meta control node
+ */
+
+ ncode = oplmsu_wcmn_chknode(uwq, MSU_NODE_META, mp);
+ if (ncode != SUCCESS) {
+ rw_exit(&oplmsu_uinst->lock);
+ oplmsu_iocack(uwq, mp, ncode);
+ return (ncode);
+ }
+
+ /* Allocate kernel memory for lpath_t */
+ lpath = (lpath_t *)kmem_zalloc(sizeof (lpath_t), KM_NOSLEEP);
+ if (lpath == NULL) {
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: uw-iplink: "
+ "Failed to allocate kernel memory");
+ oplmsu_iocack(uwq, mp, ENOMEM);
+ return (ENOMEM);
+ }
+
+ /*
+ * Initialize members of lpath_t
+ */
+
+ lpath->rbuftbl =
+ (struct buf_tbl *)kmem_zalloc(sizeof (struct buf_tbl), KM_NOSLEEP);
+ if (lpath->rbuftbl == NULL) {
+ rw_exit(&oplmsu_uinst->lock);
+ kmem_free(lpath, sizeof (lpath_t));
+ cmn_err(CE_WARN, "oplmsu: uw-iplink: "
+ "Failed to allocate kernel memory");
+ oplmsu_iocack(uwq, mp, ENOMEM);
+ return (ENOMEM);
+ }
+
+ cv_init(&lpath->sw_cv, "oplmsu lpath condvar", CV_DRIVER, NULL);
+ lpath->src_upath = NULL;
+ lpath->status = MSU_EXT_NOTUSED;
+ lpath->lower_queue = lbp->l_qbot; /* Set lower queue pointer */
+ lpath->link_id = lbp->l_index; /* Set Link-ID */
+ lpath->path_no = UNDEFINED; /* Set initial path number */
+ lpath->abt_char = oplmsu_uinst->abts; /* Set abort character seq */
+
+ WR(lpath->lower_queue)->q_ptr = lpath;
+ RD(lpath->lower_queue)->q_ptr = lpath;
+
+ oplmsu_link_lpath(lpath); /* Link lpath_t */
+ rw_exit(&oplmsu_uinst->lock);
+ oplmsu_iocack(uwq, mp, 0);
+ return (SUCCESS);
+}
+
+/* I_PUNLINK ioctl command received */
+int
+oplmsu_uwioctl_ipunlink(queue_t *uwq, mblk_t *mp)
+{
+ struct linkblk *lbp;
+ upath_t *upath;
+ lpath_t *lpath;
+ mblk_t *hmp = NULL, *next_hmp = NULL;
+ bufcall_id_t rbuf_id;
+ timeout_id_t rtout_id;
+ int ncode;
+ int use_flag;
+
+ if (mp == NULL) {
+ return (EINVAL);
+ }
+
+ if ((mp->b_cont->b_wptr - mp->b_cont->b_rptr) <
+ sizeof (struct linkblk)) {
+ cmn_err(CE_WARN, "oplmsu: uw-ipunlink: Invalid data length");
+ oplmsu_iocack(uwq, mp, ENOSR);
+ return (ENOSR);
+ }
+
+ lbp = (struct linkblk *)mp->b_cont->b_rptr;
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER);
+
+ /*
+ * Check whether this is called by super-user privilege.
+ * uwq => Queue of meta control node
+ */
+
+ ncode = oplmsu_wcmn_chknode(uwq, MSU_NODE_META, mp);
+ if (ncode != SUCCESS) {
+ rw_exit(&oplmsu_uinst->lock);
+ oplmsu_iocack(uwq, mp, ncode);
+ return (ncode);
+ }
+
+ mutex_enter(&oplmsu_uinst->u_lock);
+ mutex_enter(&oplmsu_uinst->l_lock);
+
+ /*
+ * Search for a corresponding lower path information table to
+ * lbp->l_qbot from the lower queue address.
+ */
+
+ lpath = oplmsu_uinst->first_lpath;
+ while (lpath) {
+ if ((lpath->lower_queue == RD(lbp->l_qbot)) ||
+ (lpath->lower_queue == WR(lbp->l_qbot))) {
+ break;
+ }
+ lpath = lpath->l_next;
+ }
+
+ if (lpath == NULL) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: uw-ipunlink: "
+ "Proper lpath_t doesn't find");
+ oplmsu_iocack(uwq, mp, EINVAL);
+ return (EINVAL);
+ }
+
+ /* lpath_t come into the busy status */
+ use_flag = oplmsu_set_ioctl_path(lpath, uwq, NULL);
+ if (use_flag == BUSY) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: uw-ipunlink: "
+ "Other processing is using lower path");
+ oplmsu_iocack(uwq, mp, EBUSY);
+ return (EBUSY);
+ }
+
+ /* upath_t is retrieved by using the path number */
+ upath = oplmsu_search_upath_info(lpath->path_no);
+ if (upath != NULL) { /* When the upath_t exists */
+ switch (upath->status) {
+ case MSU_PSTAT_STOP : /* FALLTHRU */
+ case MSU_PSTAT_FAIL :
+ /*
+ * When traditional_status is MSU_SETID, the path
+ * status is changed into the state of disconnect.
+ */
+
+ if (upath->traditional_status == MSU_SETID) {
+ oplmsu_cmn_set_upath_sts(upath,
+ MSU_PSTAT_DISCON, upath->status,
+ MSU_DISCON);
+ upath->lpath = NULL;
+ break;
+ }
+
+ /*
+ * When traditional_status isn't MSU_SETID,
+ * the error is reported.
+ */
+
+ default :
+ /*
+ * When upath->status isn't MSU_PSTAT_STOP or
+ * MSU_PSTAT_FAIL, the error is reported.
+ */
+
+ oplmsu_clear_ioctl_path(lpath);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ cmn_err(CE_WARN, "oplmsu: uw-ipunlink: "
+ "trad_status = %lx", upath->traditional_status);
+ cmn_err(CE_WARN, "oplmsu: uw-ipunlink: "
+ "status = %d", upath->status);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ oplmsu_iocack(uwq, mp, EINVAL);
+ return (EINVAL);
+ }
+ } else {
+ /*
+ * This pattern is no upper info table before config_add or
+ * after config_del.
+ */
+
+ /*
+ * When the upper path table doesn't exist, path is deleted
+ * with config_del/config_add ioctl processed.
+ */
+
+ if ((lpath->status != MSU_LINK_NU) &&
+ (lpath->status != MSU_SETID_NU)) {
+ oplmsu_clear_ioctl_path(lpath);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ oplmsu_iocack(uwq, mp, EINVAL);
+ return (EINVAL);
+ }
+ }
+
+ oplmsu_uinst->inst_status = oplmsu_get_inst_status();
+ oplmsu_clear_ioctl_path(lpath);
+
+ /* Free high priority message */
+ if (lpath->first_lpri_hi != NULL) {
+ cmn_err(CE_WARN, "oplmsu: uw-ipunlink: "
+ "Free high-priority message by unlinking lower path");
+
+ for (hmp = lpath->first_lpri_hi; hmp; ) {
+ next_hmp = hmp->b_next;
+ freemsg(hmp);
+ hmp = next_hmp;
+ }
+ lpath->first_lpri_hi = NULL;
+ lpath->last_lpri_hi = NULL;
+ }
+
+ rbuf_id = lpath->rbuf_id;
+ rtout_id = lpath->rtout_id;
+ lpath->rbuf_id = 0;
+ lpath->rtout_id = 0;
+
+ kmem_free(lpath->rbuftbl, sizeof (struct buf_tbl));
+ lpath->rbuftbl = NULL;
+ cv_destroy(&lpath->sw_cv);
+ oplmsu_unlink_lpath(lpath);
+ kmem_free(lpath, sizeof (lpath_t));
+
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+
+ if (rbuf_id != 0) {
+ unbufcall(rbuf_id);
+ }
+
+ if (rtout_id != 0) {
+ untimeout(rtout_id);
+ }
+ oplmsu_iocack(uwq, mp, 0);
+ return (SUCCESS);
+}
+
+/* termio ioctl command received */
+int
+oplmsu_uwioctl_termios(queue_t *uwq, mblk_t *mp)
+{
+ struct iocblk *iocp = NULL;
+ queue_t *dst_queue;
+ upath_t *upath = NULL;
+ lpath_t *lpath = NULL;
+ mblk_t *nmp = NULL;
+ ctrl_t *ctrl;
+ int term_stat;
+ int use_flag;
+
+ if (mp == NULL) {
+ return (EINVAL);
+ }
+
+ if (mp->b_cont == NULL) {
+ cmn_err(CE_WARN, "oplmsu: uw-termios: "
+ "b_cont data block is NULL");
+ oplmsu_iocack(uwq, mp, EINVAL);
+ return (FAILURE);
+ }
+
+ if (mp->b_cont->b_rptr == NULL) {
+ cmn_err(CE_WARN, "oplmsu: uw-termios: "
+ "b_rptr data pointer is NULL");
+ oplmsu_iocack(uwq, mp, EINVAL);
+ return (EINVAL);
+ }
+
+ iocp = (struct iocblk *)mp->b_rptr;
+ rw_enter(&oplmsu_uinst->lock, RW_READER);
+
+ /*
+ * Check control node type
+ * uwq : Queue of user control node
+ */
+
+ mutex_enter(&oplmsu_uinst->c_lock);
+ ctrl = (ctrl_t *)uwq->q_ptr;
+ if (ctrl != NULL) {
+ if (ctrl->node_type != MSU_NODE_USER) {
+ mutex_exit(&oplmsu_uinst->c_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: uw-termios: "
+ "ctrl node type = %d", ctrl->node_type);
+ oplmsu_iocack(uwq, mp, EINVAL);
+ return (EINVAL);
+ }
+ }
+ mutex_exit(&oplmsu_uinst->c_lock);
+
+ switch (iocp->ioc_cmd) {
+ case TCSETS : /* FALLTHRU */
+ case TCSETSW : /* FALLTHRU */
+ case TCSETSF :
+ term_stat = MSU_WTCS_ACK;
+ break;
+
+ case TIOCMSET :
+ term_stat = MSU_WTMS_ACK;
+ break;
+
+ case TIOCSPPS :
+ term_stat = MSU_WPPS_ACK;
+ break;
+
+ case TIOCSWINSZ :
+ term_stat = MSU_WWSZ_ACK;
+ break;
+
+ case TIOCSSOFTCAR :
+ term_stat = MSU_WCAR_ACK;
+ break;
+
+ default :
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: uw-termios: ioctl mismatch");
+ oplmsu_iocack(uwq, mp, EINVAL);
+ return (EINVAL);
+ }
+
+ if (oplmsu_uinst->lower_queue == NULL) {
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "!oplmsu: uw-termios: "
+ "Active path doesn't exist");
+ oplmsu_iocack(uwq, mp, ENODEV);
+ return (FAILURE);
+ }
+
+ lpath = oplmsu_uinst->lower_queue->q_ptr;
+ if (lpath == NULL) {
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: uw-termios: "
+ "Proper lpath_t doesn't exist");
+ oplmsu_iocack(uwq, mp, EINVAL);
+ return (EINVAL);
+ }
+
+ if (oplmsu_cmn_copymb(uwq, mp, &nmp, mp, MSU_WRITE_SIDE) == FAILURE) {
+ rw_exit(&oplmsu_uinst->lock);
+ return (FAILURE);
+ }
+
+ mutex_enter(&oplmsu_uinst->u_lock);
+ mutex_enter(&oplmsu_uinst->l_lock);
+
+ upath = oplmsu_search_upath_info(lpath->path_no);
+ if (upath == NULL) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+ cmn_err(CE_WARN, "oplmsu: uw-termios: "
+ "Proper upath_t doesn't find");
+ oplmsu_iocack(uwq, mp, EINVAL);
+ return (EINVAL);
+ }
+
+ /* Set ioctl command to lower path info table */
+ use_flag = oplmsu_set_ioctl_path(lpath, uwq, mp);
+ if (use_flag == BUSY) {
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ freemsg(nmp);
+
+ if (ctrl != NULL) {
+ mutex_enter(&oplmsu_uinst->c_lock);
+ ctrl->wait_queue = uwq;
+ mutex_exit(&oplmsu_uinst->c_lock);
+ rw_exit(&oplmsu_uinst->lock);
+
+ putbq(uwq, mp);
+ return (SUCCESS);
+ } else {
+ rw_exit(&oplmsu_uinst->lock);
+ oplmsu_iocack(uwq, mp, EBUSY);
+ return (EBUSY);
+ }
+ }
+
+ /* Set destination queue (active path) */
+ dst_queue = WR(oplmsu_uinst->lower_queue);
+ if (canput(dst_queue)) {
+ lpath->src_upath = NULL;
+ lpath->status = upath->traditional_status;
+ upath->traditional_status = term_stat;
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+ rw_exit(&oplmsu_uinst->lock);
+
+ putq(dst_queue, nmp);
+ return (SUCCESS);
+ } else {
+ oplmsu_clear_ioctl_path(lpath);
+ mutex_exit(&oplmsu_uinst->l_lock);
+ mutex_exit(&oplmsu_uinst->u_lock);
+
+ freemsg(nmp);
+ oplmsu_wcmn_norm_putbq(WR(uwq), mp, dst_queue);
+ rw_exit(&oplmsu_uinst->lock);
+ return (FAILURE);
+ }
+}
diff --git a/usr/src/uts/sun4u/opl/io/oplpanel/oplpanel.c b/usr/src/uts/sun4u/opl/io/oplpanel/oplpanel.c
new file mode 100644
index 0000000000..0de3baa0b2
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/oplpanel/oplpanel.c
@@ -0,0 +1,414 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/errno.h>
+#include <sys/cmn_err.h>
+#include <sys/param.h>
+#include <sys/modctl.h>
+#include <sys/conf.h>
+#include <sys/open.h>
+#include <sys/stat.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/file.h>
+#include <sys/intr.h>
+#include <sys/machsystm.h>
+
+#define PNLIE_MASK 0x010 /* interrupt enable/disable */
+#define PNLINT_MASK 0x001 /* interrupted flag */
+
+#ifdef DEBUG
+int panel_debug = 0;
+static void panel_ddi_put8(ddi_acc_handle_t, uint8_t *, uint8_t);
+#define DCMN_ERR(x) if (panel_debug) cmn_err x
+
+#else
+
+#define DCMN_ERR(x)
+#define panel_ddi_put8(x, y, z) ddi_put8(x, y, z)
+
+#endif
+
+static int panel_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
+static int panel_attach(dev_info_t *, ddi_attach_cmd_t);
+static int panel_detach(dev_info_t *, ddi_detach_cmd_t);
+static uint_t panel_intr(caddr_t);
+static int panel_open(dev_t *, int, int, cred_t *);
+static int panel_close(dev_t, int, int, cred_t *);
+
+static char *panel_name = "oplpanel";
+int panel_enable = 1; /* enable or disable */
+
+extern uint32_t cpc_level15_inum; /* in cpc_subr.c */
+
+struct panel_state {
+ dev_info_t *dip;
+ ddi_iblock_cookie_t iblock_cookie;
+ ddi_acc_handle_t panel_regs_handle;
+ uint8_t *panelregs; /* mapping address */
+ uint8_t panelregs_state; /* keeping regs. */
+};
+
+struct cb_ops panel_cb_ops = {
+ nodev, /* open */
+ nodev, /* close */
+ nodev, /* strategy */
+ nodev, /* print */
+ nodev, /* dump */
+ nodev, /* read */
+ nodev, /* write */
+ nodev, /* ioctl */
+ nodev, /* devmap */
+ nodev, /* mmap */
+ nodev, /* segmap */
+ nochpoll, /* poll */
+ nodev, /* prop_op */
+ NULL, /* streamtab */
+ D_NEW | D_MP | D_HOTPLUG, /* flag */
+ CB_REV, /* cb_rev */
+ nodev, /* async I/O read entry point */
+ nodev /* async I/O write entry point */
+};
+
+static struct dev_ops panel_dev_ops = {
+ DEVO_REV, /* driver build version */
+ 0, /* device reference count */
+ panel_getinfo, /* getinfo */
+ nulldev, /* identify */
+ nulldev, /* probe */
+ panel_attach, /* attach */
+ panel_detach, /* detach */
+ nulldev, /* reset */
+ &panel_cb_ops, /* cb_ops */
+ NULL, /* bus_ops */
+ nulldev /* power */
+};
+
+/* module configuration stuff */
+static void *panelstates;
+extern struct mod_ops mod_driverops;
+
+static struct modldrv modldrv = {
+ &mod_driverops,
+ "OPL panel driver %I%",
+ &panel_dev_ops
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1,
+ &modldrv,
+ 0
+};
+
+
+int
+_init(void)
+{
+ int status;
+
+ DCMN_ERR((CE_CONT, "%s: _init\n", panel_name));
+
+ status = ddi_soft_state_init(&panelstates,
+ sizeof (struct panel_state), 0);
+ if (status != 0) {
+ cmn_err(CE_WARN, "%s: ddi_soft_state_init failed.",
+ panel_name);
+ return (status);
+ }
+
+ status = mod_install(&modlinkage);
+ if (status != 0) {
+ ddi_soft_state_fini(&panelstates);
+ }
+
+ return (status);
+}
+
+int
+_fini(void)
+{
+ /*
+ * Can't unload to make sure the panel switch always works.
+ */
+ return (EBUSY);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+static int
+panel_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+
+ int instance;
+ struct panel_state *statep = NULL;
+
+ ddi_device_acc_attr_t access_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_BE_ACC,
+ DDI_STRICTORDER_ACC
+ };
+
+ instance = ddi_get_instance(dip);
+
+ DCMN_ERR((CE_CONT, "%s%d: attach\n", panel_name, instance));
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ DCMN_ERR((CE_CONT, "%s%d: DDI_ATTACH\n",
+ panel_name, instance));
+ break;
+
+ case DDI_RESUME:
+ DCMN_ERR((CE_CONT, "%s%d: DDI_RESUME\n",
+ panel_name, instance));
+
+ if ((statep = (struct panel_state *)
+ ddi_get_soft_state(panelstates, instance)) == NULL) {
+ cmn_err(CE_WARN, "%s%d: ddi_get_soft_state failed.",
+ panel_name, instance);
+ return (DDI_FAILURE);
+ }
+
+ /* enable the interrupt just in case */
+ panel_ddi_put8(statep->panel_regs_handle, statep->panelregs,
+ statep->panelregs_state);
+ return (DDI_SUCCESS);
+
+ default:
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Attach routine
+ */
+
+ /* alloc and get soft state */
+ if (ddi_soft_state_zalloc(panelstates, instance) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s%d: ddi_soft_state_zalloc failed.",
+ panel_name, instance);
+ goto attach_failed2;
+ }
+ if ((statep = (struct panel_state *)
+ ddi_get_soft_state(panelstates, instance)) == NULL) {
+ cmn_err(CE_WARN, "%s%d: ddi_get_soft_state failed.",
+ panel_name, instance);
+ goto attach_failed1;
+ }
+
+ /* set the dip in the soft state */
+ statep->dip = dip;
+
+ /* mapping register */
+ if (ddi_regs_map_setup(dip, 0, (caddr_t *)&statep->panelregs,
+ 0, 0, /* the entire space is mapped */
+ &access_attr, &statep->panel_regs_handle) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s%d: ddi_regs_map_setup failed.",
+ panel_name, instance);
+ goto attach_failed1;
+ }
+
+ /* setup the interrupt handler */
+ ddi_get_iblock_cookie(dip, 0, &statep->iblock_cookie);
+ if (ddi_add_intr(dip, 0, &statep->iblock_cookie, 0, &panel_intr,
+ (caddr_t)statep) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s%d: cannot add interrupt handler.",
+ panel_name, instance);
+ goto attach_failed0;
+ }
+
+ /* ATTACH SUCCESS */
+
+ /* announce the device */
+ ddi_report_dev(dip);
+
+ /* turn on interrupt */
+ statep->panelregs_state = 0 | PNLIE_MASK;
+ panel_ddi_put8(statep->panel_regs_handle, statep->panelregs,
+ statep->panelregs_state);
+
+ return (DDI_SUCCESS);
+
+attach_failed0:
+ ddi_regs_map_free(&statep->panel_regs_handle);
+attach_failed1:
+ ddi_soft_state_free(panelstates, instance);
+attach_failed2:
+ DCMN_ERR((CE_NOTE, "%s%d: attach failed", panel_name, instance));
+ return (DDI_FAILURE);
+}
+
+static int
+panel_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int instance;
+ struct panel_state *statep;
+
+ instance = ddi_get_instance(dip);
+
+ DCMN_ERR((CE_CONT, "%s%d: detach\n", panel_name, instance));
+
+ if ((statep = (struct panel_state *)
+ ddi_get_soft_state(panelstates, instance)) == NULL) {
+ cmn_err(CE_WARN, "%s%d: ddi_get_soft_state failed.",
+ panel_name, instance);
+ return (DDI_FAILURE);
+ }
+
+ switch (cmd) {
+ case DDI_DETACH:
+ DCMN_ERR((CE_CONT, "%s%d: DDI_DETACH\n",
+ panel_name, instance));
+
+ /* turn off interrupt */
+ statep->panelregs_state &= ~PNLIE_MASK;
+ panel_ddi_put8(statep->panel_regs_handle, statep->panelregs,
+ statep->panelregs_state);
+
+ /* free all resources for the dip */
+ ddi_remove_intr(dip, 0, statep->iblock_cookie);
+
+ /* need not free iblock_cookie */
+ ddi_regs_map_free(&statep->panel_regs_handle);
+ ddi_soft_state_free(panelstates, instance);
+
+ return (DDI_SUCCESS);
+
+ case DDI_SUSPEND:
+ DCMN_ERR((CE_CONT, "%s%d: DDI_SUSPEND\n",
+ panel_name, instance));
+ return (DDI_SUCCESS);
+
+ default:
+ return (DDI_FAILURE);
+
+ }
+ /* Not reached */
+}
+
+/*ARGSUSED*/
+static int
+panel_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
+{
+ struct panel_state *statep;
+ int instance;
+ dev_t dev = (dev_t)arg;
+
+ instance = getminor(dev);
+
+ DCMN_ERR((CE_CONT, "%s%d: getinfo\n", panel_name, instance));
+
+ switch (cmd) {
+ case DDI_INFO_DEVT2DEVINFO:
+ if ((statep = (struct panel_state *)
+ ddi_get_soft_state(panelstates, instance)) == NULL) {
+ cmn_err(CE_WARN, "%s%d: ddi_get_soft_state failed.",
+ panel_name, instance);
+ *resultp = NULL;
+ return (DDI_FAILURE);
+ }
+ *resultp = statep->dip;
+ break;
+ case DDI_INFO_DEVT2INSTANCE:
+ *resultp = (void *)(uintptr_t)instance;
+ break;
+ default:
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+static uint_t
+panel_intr(caddr_t arg)
+{
+ int instance;
+ struct panel_state *statep = (struct panel_state *)arg;
+
+ instance = ddi_get_instance(statep->dip);
+
+ DCMN_ERR((CE_CONT, "%s%d: intr\n", panel_name, instance));
+
+ /* to confirm the validity of the interrupt */
+ if (!(ddi_get8(statep->panel_regs_handle, statep->panelregs) &
+ PNLINT_MASK)) {
+ cmn_err(CE_WARN, "%s%d: spurious interrupt detected.",
+ panel_name, instance);
+ return (DDI_INTR_UNCLAIMED);
+ }
+
+ /* clear the PNLINT bit */
+ panel_ddi_put8(statep->panel_regs_handle, statep->panelregs,
+ statep->panelregs_state | PNLINT_MASK);
+
+ if (panel_enable) {
+ /* avoid double panic */
+ panel_enable = 0;
+
+ /*
+ * Re-enqueue the cpc interrupt handler for PIL15 here since we
+ * are not unwinding back to the interrupt handler subsystem.
+ * This is to allow potential cpc overflow interrupts to
+ * function while we go thru the panic flow. Note that this
+ * logic could be implemented in panic_enter_hw(), we do
+ * it here for now as it is less risky. This particular
+ * condition is only specific to OPL hardware and we want
+ * to minimize exposure of this new logic to other existing
+ * platforms.
+ */
+ intr_enqueue_req(PIL_15, cpc_level15_inum);
+
+ cmn_err(CE_PANIC,
+ "System Panel Driver: Emergency panic request "
+ "detected!");
+ /* Not reached */
+ }
+
+ return (DDI_INTR_CLAIMED);
+}
+
+#ifdef DEBUG
+static void
+panel_ddi_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
+{
+ if (panel_debug) {
+ cmn_err(CE_CONT, "%s: old value = 0x%x\n",
+ panel_name, ddi_get8(handle, dev_addr));
+ cmn_err(CE_CONT, "%s: writing value = 0x%x\n",
+ panel_name, value);
+ ddi_put8(handle, dev_addr, value);
+ cmn_err(CE_CONT, "%s: new value = 0x%x\n",
+ panel_name, ddi_get8(handle, dev_addr));
+ } else {
+ ddi_put8(handle, dev_addr, value);
+ }
+}
+#endif
diff --git a/usr/src/uts/sun4u/opl/io/oplpanel/oplpanel.conf b/usr/src/uts/sun4u/opl/io/oplpanel/oplpanel.conf
new file mode 100644
index 0000000000..4a639e42f2
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/oplpanel/oplpanel.conf
@@ -0,0 +1,29 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# All Rights Reserved, Copyright (c) FUJITSU LIMITED 2005
+#
+#pragma ident "%Z%%M% %I% %E% SMI"
+#
+# force attach panel driver
+#
+ddi-forceattach=1;
+interrupt-priorities=0xf;
diff --git a/usr/src/uts/sun4u/opl/io/options.conf b/usr/src/uts/sun4u/opl/io/options.conf
new file mode 100644
index 0000000000..ecf2bf358e
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/options.conf
@@ -0,0 +1,42 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+#
+# The property "ttymodes" defines the default termios modes
+# (upon driver open) for the console ttys on OPL platform.
+#
+# ttymodes=<string>
+#
+# The format of <string> is the same format as the output of
+# the "-g" option to /usr/bin/stty. Some standard termios modes
+# have been included for reference:
+#
+# For SVID compliance, use these modes,
+# "522:1805:4b7:2b:7f:1c:23:40:4:0:0:0:11:13:1a:19:12:f:17:16"
+#
+#
+
+name="options" class="root"
+ttymodes="2502:1805:800000bd:8a3b:3:1c:7f:15:4:0:0:0:11:13:1a:19:12:f:17:16";
diff --git a/usr/src/uts/sun4u/opl/io/pcicmu/pcicmu.c b/usr/src/uts/sun4u/opl/io/pcicmu/pcicmu.c
new file mode 100644
index 0000000000..a3dcec894a
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/pcicmu/pcicmu.c
@@ -0,0 +1,2202 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * OPL CMU-CH PCI nexus driver.
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/sysmacros.h>
+#include <sys/systm.h>
+#include <sys/intreg.h>
+#include <sys/intr.h>
+#include <sys/machsystm.h>
+#include <sys/conf.h>
+#include <sys/stat.h>
+#include <sys/kmem.h>
+#include <sys/async.h>
+#include <sys/ivintr.h>
+#include <sys/sunddi.h>
+#include <sys/sunndi.h>
+#include <sys/ndifm.h>
+#include <sys/ontrap.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/ddi_subrdefs.h>
+#include <sys/epm.h>
+#include <sys/spl.h>
+#include <sys/fm/util.h>
+#include <sys/fm/util.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/io/pci.h>
+#include <sys/fm/io/sun4upci.h>
+#include <sys/pcicmu/pcicmu.h>
+
+#include <sys/cmn_err.h>
+#include <sys/time.h>
+#include <sys/pci.h>
+#include <sys/modctl.h>
+#include <sys/open.h>
+#include <sys/errno.h>
+#include <sys/file.h>
+
+
+uint32_t pcmu_spurintr_duration = 60000000; /* One minute */
+
+/*
+ * The variable controls the default setting of the command register
+ * for pci devices. See pcmu_init_child() for details.
+ *
+ * This flags also controls the setting of bits in the bridge control
+ * register pci to pci bridges. See pcmu_init_child() for details.
+ */
+ushort_t pcmu_command_default = PCI_COMM_SERR_ENABLE |
+ PCI_COMM_WAIT_CYC_ENAB |
+ PCI_COMM_PARITY_DETECT |
+ PCI_COMM_ME |
+ PCI_COMM_MAE |
+ PCI_COMM_IO;
+/*
+ * The following driver parameters are defined as variables to allow
+ * patching for debugging and tuning. Flags that can be set on a per
+ * PBM basis are bit fields where the PBM device instance number maps
+ * to the bit position.
+ */
+#ifdef DEBUG
+uint64_t pcmu_debug_flags = 0;
+#endif
+uint_t ecc_error_intr_enable = 1;
+
+uint_t pcmu_ecc_afsr_retries = 100; /* XXX - what's a good value? */
+
+uint_t pcmu_intr_retry_intv = 5; /* for interrupt retry reg */
+uint_t pcmu_panic_on_fatal_errors = 1; /* should be 1 at beta */
+
+hrtime_t pcmu_intrpend_timeout = 5ll * NANOSEC; /* 5 seconds in nanoseconds */
+
+uint64_t pcmu_errtrig_pa = 0x0;
+
+
+/*
+ * The following value is the number of consecutive unclaimed interrupts that
+ * will be tolerated for a particular ino_p before the interrupt is deemed to
+ * be jabbering and is blocked.
+ */
+uint_t pcmu_unclaimed_intr_max = 20;
+
+/*
+ * function prototypes for dev ops routines:
+ */
+static int pcmu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
+static int pcmu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
+static int pcmu_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
+ void *arg, void **result);
+static int pcmu_open(dev_t *devp, int flags, int otyp, cred_t *credp);
+static int pcmu_close(dev_t dev, int flags, int otyp, cred_t *credp);
+static int pcmu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
+ cred_t *credp, int *rvalp);
+static int pcmu_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
+ int flags, char *name, caddr_t valuep, int *lengthp);
+static int pcmu_ctlops_poke(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args);
+static int pcmu_ctlops_peek(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args,
+ void *result);
+
+static int map_pcmu_registers(pcmu_t *, dev_info_t *);
+static void unmap_pcmu_registers(pcmu_t *);
+static void pcmu_pbm_clear_error(pcmu_pbm_t *);
+
+static int pcmu_ctlops(dev_info_t *, dev_info_t *, ddi_ctl_enum_t,
+ void *, void *);
+static int pcmu_map(dev_info_t *, dev_info_t *, ddi_map_req_t *,
+ off_t, off_t, caddr_t *);
+static int pcmu_intr_ops(dev_info_t *, dev_info_t *, ddi_intr_op_t,
+ ddi_intr_handle_impl_t *, void *);
+
+static uint32_t pcmu_identity_init(pcmu_t *pcmu_p);
+static int pcmu_intr_setup(pcmu_t *pcmu_p);
+static void pcmu_pbm_errstate_get(pcmu_t *pcmu_p,
+ pcmu_pbm_errstate_t *pbm_err_p);
+static int pcmu_obj_setup(pcmu_t *pcmu_p);
+static void pcmu_obj_destroy(pcmu_t *pcmu_p);
+static void pcmu_obj_resume(pcmu_t *pcmu_p);
+static void pcmu_obj_suspend(pcmu_t *pcmu_p);
+
+static void u2u_ittrans_init(pcmu_t *, u2u_ittrans_data_t **);
+static void u2u_ittrans_resume(u2u_ittrans_data_t **);
+static void u2u_ittrans_uninit(u2u_ittrans_data_t *);
+
+static pcmu_ksinfo_t *pcmu_name_kstat;
+
+/*
+ * bus ops and dev ops structures:
+ */
+static struct bus_ops pcmu_bus_ops = {
+ BUSO_REV,
+ pcmu_map,
+ 0,
+ 0,
+ 0,
+ i_ddi_map_fault,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ pcmu_ctlops,
+ ddi_bus_prop_op,
+ ndi_busop_get_eventcookie, /* (*bus_get_eventcookie)(); */
+ ndi_busop_add_eventcall, /* (*bus_add_eventcall)(); */
+ ndi_busop_remove_eventcall, /* (*bus_remove_eventcall)(); */
+ ndi_post_event, /* (*bus_post_event)(); */
+ NULL, /* (*bus_intr_ctl)(); */
+ NULL, /* (*bus_config)(); */
+ NULL, /* (*bus_unconfig)(); */
+ NULL, /* (*bus_fm_init)(); */
+ NULL, /* (*bus_fm_fini)(); */
+ NULL, /* (*bus_fm_access_enter)(); */
+ NULL, /* (*bus_fm_access_fini)(); */
+ NULL, /* (*bus_power)(); */
+ pcmu_intr_ops /* (*bus_intr_op)(); */
+};
+
+struct cb_ops pcmu_cb_ops = {
+ pcmu_open, /* open */
+ pcmu_close, /* close */
+ nodev, /* strategy */
+ nodev, /* print */
+ nodev, /* dump */
+ nodev, /* read */
+ nodev, /* write */
+ pcmu_ioctl, /* ioctl */
+ nodev, /* devmap */
+ nodev, /* mmap */
+ nodev, /* segmap */
+ nochpoll, /* poll */
+ pcmu_prop_op, /* cb_prop_op */
+ NULL, /* streamtab */
+ D_NEW | D_MP | D_HOTPLUG, /* Driver compatibility flag */
+ CB_REV, /* rev */
+ nodev, /* int (*cb_aread)() */
+ nodev /* int (*cb_awrite)() */
+};
+
+static struct dev_ops pcmu_ops = {
+ DEVO_REV,
+ 0,
+ pcmu_info,
+ nulldev,
+ 0,
+ pcmu_attach,
+ pcmu_detach,
+ nodev,
+ &pcmu_cb_ops,
+ &pcmu_bus_ops,
+ 0
+};
+
+/*
+ * module definitions:
+ */
+extern struct mod_ops mod_driverops;
+
+static struct modldrv modldrv = {
+ &mod_driverops, /* Type of module - driver */
+ "OPL CMU-CH PCI Nexus driver %I%", /* Name of module. */
+ &pcmu_ops, /* driver ops */
+};
+
+static struct modlinkage modlinkage = {
+ MODREV_1, (void *)&modldrv, NULL
+};
+
+/*
+ * driver global data:
+ */
+void *per_pcmu_state; /* per-pbm soft state pointer */
+kmutex_t pcmu_global_mutex; /* attach/detach common struct lock */
+errorq_t *pcmu_ecc_queue = NULL; /* per-system ecc handling queue */
+
+extern void pcmu_child_cfg_save(dev_info_t *dip);
+extern void pcmu_child_cfg_restore(dev_info_t *dip);
+
+int
+_init(void)
+{
+ int e;
+
+ /*
+ * Initialize per-pci bus soft state pointer.
+ */
+ e = ddi_soft_state_init(&per_pcmu_state, sizeof (pcmu_t), 1);
+ if (e != 0)
+ return (e);
+
+ /*
+ * Initialize global mutexes.
+ */
+ mutex_init(&pcmu_global_mutex, NULL, MUTEX_DRIVER, NULL);
+
+ /*
+ * Create the performance kstats.
+ */
+ pcmu_kstat_init();
+
+ /*
+ * Install the module.
+ */
+ e = mod_install(&modlinkage);
+ if (e != 0) {
+ ddi_soft_state_fini(&per_pcmu_state);
+ mutex_destroy(&pcmu_global_mutex);
+ }
+ return (e);
+}
+
+int
+_fini(void)
+{
+ int e;
+
+ /*
+ * Remove the module.
+ */
+ e = mod_remove(&modlinkage);
+ if (e != 0) {
+ return (e);
+ }
+
+ /*
+ * Destroy pcmu_ecc_queue, and set it to NULL.
+ */
+ if (pcmu_ecc_queue) {
+ errorq_destroy(pcmu_ecc_queue);
+ pcmu_ecc_queue = NULL;
+ }
+
+ /*
+ * Destroy the performance kstats.
+ */
+ pcmu_kstat_fini();
+
+ /*
+ * Free the per-pci and per-CMU-CH soft state info and destroy
+ * mutex for per-CMU-CH soft state.
+ */
+ ddi_soft_state_fini(&per_pcmu_state);
+ mutex_destroy(&pcmu_global_mutex);
+ return (e);
+}
+
+int
+_info(struct modinfo *modinfop)
+{
+ return (mod_info(&modlinkage, modinfop));
+}
+
+/*ARGSUSED*/
+static int
+pcmu_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
+{
+ int instance = getminor((dev_t)arg) >> 8;
+ pcmu_t *pcmu_p = get_pcmu_soft_state(instance);
+
+ switch (infocmd) {
+ case DDI_INFO_DEVT2INSTANCE:
+ *result = (void *)(uintptr_t)instance;
+ return (DDI_SUCCESS);
+
+ case DDI_INFO_DEVT2DEVINFO:
+ if (pcmu_p == NULL)
+ return (DDI_FAILURE);
+ *result = (void *)pcmu_p->pcmu_dip;
+ return (DDI_SUCCESS);
+
+ default:
+ return (DDI_FAILURE);
+ }
+}
+
+
+/* device driver entry points */
+/*
+ * attach entry point:
+ */
+static int
+pcmu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+ pcmu_t *pcmu_p;
+ int instance = ddi_get_instance(dip);
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ PCMU_DBG0(PCMU_DBG_ATTACH, dip, "DDI_ATTACH\n");
+
+ /*
+ * Allocate and get the per-pci soft state structure.
+ */
+ if (alloc_pcmu_soft_state(instance) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s%d: can't allocate pci state",
+ ddi_driver_name(dip), instance);
+ goto err_bad_pcmu_softstate;
+ }
+ pcmu_p = get_pcmu_soft_state(instance);
+ pcmu_p->pcmu_dip = dip;
+ mutex_init(&pcmu_p->pcmu_mutex, NULL, MUTEX_DRIVER, NULL);
+ pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_CLOSED;
+ pcmu_p->pcmu_open_count = 0;
+
+ /*
+ * Get key properties of the pci bridge node.
+ */
+ if (get_pcmu_properties(pcmu_p, dip) == DDI_FAILURE) {
+ goto err_bad_pcmu_prop;
+ }
+
+ /*
+ * Map in the registers.
+ */
+ if (map_pcmu_registers(pcmu_p, dip) == DDI_FAILURE) {
+ goto err_bad_reg_prop;
+ }
+ if (pcmu_obj_setup(pcmu_p) != DDI_SUCCESS) {
+ goto err_bad_objs;
+ }
+
+ if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
+ (uint_t)instance<<8 | 0xff,
+ DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
+ goto err_bad_devctl_node;
+ }
+
+ /*
+ * Due to unresolved hardware issues, disable PCIPM until
+ * the problem is fully understood.
+ *
+ * pcmu_pwr_setup(pcmu_p, dip);
+ */
+
+ ddi_report_dev(dip);
+
+ pcmu_p->pcmu_state = PCMU_ATTACHED;
+ PCMU_DBG0(PCMU_DBG_ATTACH, dip, "attach success\n");
+ break;
+
+err_bad_objs:
+ ddi_remove_minor_node(dip, "devctl");
+err_bad_devctl_node:
+ unmap_pcmu_registers(pcmu_p);
+err_bad_reg_prop:
+ free_pcmu_properties(pcmu_p);
+err_bad_pcmu_prop:
+ mutex_destroy(&pcmu_p->pcmu_mutex);
+ free_pcmu_soft_state(instance);
+err_bad_pcmu_softstate:
+ return (DDI_FAILURE);
+
+ case DDI_RESUME:
+ PCMU_DBG0(PCMU_DBG_ATTACH, dip, "DDI_RESUME\n");
+
+ /*
+ * Make sure the CMU-CH control registers
+ * are configured properly.
+ */
+ pcmu_p = get_pcmu_soft_state(instance);
+ mutex_enter(&pcmu_p->pcmu_mutex);
+
+ /*
+ * Make sure this instance has been suspended.
+ */
+ if (pcmu_p->pcmu_state != PCMU_SUSPENDED) {
+ PCMU_DBG0(PCMU_DBG_ATTACH, dip,
+ "instance NOT suspended\n");
+ mutex_exit(&pcmu_p->pcmu_mutex);
+ return (DDI_FAILURE);
+ }
+ pcmu_obj_resume(pcmu_p);
+ pcmu_p->pcmu_state = PCMU_ATTACHED;
+
+ pcmu_child_cfg_restore(dip);
+
+ mutex_exit(&pcmu_p->pcmu_mutex);
+ break;
+
+ default:
+ PCMU_DBG0(PCMU_DBG_ATTACH, dip, "unsupported attach op\n");
+ return (DDI_FAILURE);
+ }
+
+ return (DDI_SUCCESS);
+}
+
+/*
+ * detach entry point:
+ */
+static int
+pcmu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+ int instance = ddi_get_instance(dip);
+ pcmu_t *pcmu_p = get_pcmu_soft_state(instance);
+ int len;
+
+ /*
+ * Make sure we are currently attached
+ */
+ if (pcmu_p->pcmu_state != PCMU_ATTACHED) {
+ PCMU_DBG0(PCMU_DBG_ATTACH, dip,
+ "failed - instance not attached\n");
+ return (DDI_FAILURE);
+ }
+
+ mutex_enter(&pcmu_p->pcmu_mutex);
+
+ switch (cmd) {
+ case DDI_DETACH:
+ PCMU_DBG0(PCMU_DBG_DETACH, dip, "DDI_DETACH\n");
+ pcmu_obj_destroy(pcmu_p);
+
+ /*
+ * Free the pci soft state structure and the rest of the
+ * resources it's using.
+ */
+ free_pcmu_properties(pcmu_p);
+ unmap_pcmu_registers(pcmu_p);
+ mutex_exit(&pcmu_p->pcmu_mutex);
+ mutex_destroy(&pcmu_p->pcmu_mutex);
+ free_pcmu_soft_state(instance);
+
+ /* Free the interrupt-priorities prop if we created it. */
+ if (ddi_getproplen(DDI_DEV_T_ANY, dip,
+ DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
+ "interrupt-priorities", &len) == DDI_PROP_SUCCESS) {
+ (void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
+ "interrupt-priorities");
+ }
+ return (DDI_SUCCESS);
+
+ case DDI_SUSPEND:
+ pcmu_child_cfg_save(dip);
+ pcmu_obj_suspend(pcmu_p);
+ pcmu_p->pcmu_state = PCMU_SUSPENDED;
+
+ mutex_exit(&pcmu_p->pcmu_mutex);
+ return (DDI_SUCCESS);
+
+ default:
+ PCMU_DBG0(PCMU_DBG_DETACH, dip, "unsupported detach op\n");
+ mutex_exit(&pcmu_p->pcmu_mutex);
+ return (DDI_FAILURE);
+ }
+}
+
+
+/*LINTLIBRARY*/
+
+/* ARGSUSED3 */
+static int
+pcmu_open(dev_t *devp, int flags, int otyp, cred_t *credp)
+{
+ pcmu_t *pcmu_p;
+
+ if (otyp != OTYP_CHR) {
+ return (EINVAL);
+ }
+
+ /*
+ * Get the soft state structure for the device.
+ */
+ pcmu_p = DEV_TO_SOFTSTATE(*devp);
+ if (pcmu_p == NULL) {
+ return (ENXIO);
+ }
+
+ /*
+ * Handle the open by tracking the device state.
+ */
+ PCMU_DBG2(PCMU_DBG_OPEN, pcmu_p->pcmu_dip,
+ "devp=%x: flags=%x\n", devp, flags);
+ mutex_enter(&pcmu_p->pcmu_mutex);
+ if (flags & FEXCL) {
+ if (pcmu_p->pcmu_soft_state != PCMU_SOFT_STATE_CLOSED) {
+ mutex_exit(&pcmu_p->pcmu_mutex);
+ PCMU_DBG0(PCMU_DBG_OPEN, pcmu_p->pcmu_dip, "busy\n");
+ return (EBUSY);
+ }
+ pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_OPEN_EXCL;
+ } else {
+ if (pcmu_p->pcmu_soft_state == PCMU_SOFT_STATE_OPEN_EXCL) {
+ mutex_exit(&pcmu_p->pcmu_mutex);
+ PCMU_DBG0(PCMU_DBG_OPEN, pcmu_p->pcmu_dip, "busy\n");
+ return (EBUSY);
+ }
+ pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_OPEN;
+ }
+ pcmu_p->pcmu_open_count++;
+ mutex_exit(&pcmu_p->pcmu_mutex);
+ return (0);
+}
+
+
+/* ARGSUSED */
+static int
+pcmu_close(dev_t dev, int flags, int otyp, cred_t *credp)
+{
+ pcmu_t *pcmu_p;
+
+ if (otyp != OTYP_CHR) {
+ return (EINVAL);
+ }
+
+ pcmu_p = DEV_TO_SOFTSTATE(dev);
+ if (pcmu_p == NULL) {
+ return (ENXIO);
+ }
+
+ PCMU_DBG2(PCMU_DBG_CLOSE, pcmu_p->pcmu_dip,
+ "dev=%x: flags=%x\n", dev, flags);
+ mutex_enter(&pcmu_p->pcmu_mutex);
+ pcmu_p->pcmu_soft_state = PCMU_SOFT_STATE_CLOSED;
+ pcmu_p->pcmu_open_count = 0;
+ mutex_exit(&pcmu_p->pcmu_mutex);
+ return (0);
+}
+
+/* ARGSUSED */
+static int
+pcmu_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
+ cred_t *credp, int *rvalp)
+{
+ pcmu_t *pcmu_p;
+ dev_info_t *dip;
+ struct devctl_iocdata *dcp;
+ uint_t bus_state;
+ int rv = 0;
+
+ pcmu_p = DEV_TO_SOFTSTATE(dev);
+ if (pcmu_p == NULL) {
+ return (ENXIO);
+ }
+
+ dip = pcmu_p->pcmu_dip;
+ PCMU_DBG2(PCMU_DBG_IOCTL, dip, "dev=%x: cmd=%x\n", dev, cmd);
+
+ /*
+ * We can use the generic implementation for these ioctls
+ */
+ switch (cmd) {
+ case DEVCTL_DEVICE_GETSTATE:
+ case DEVCTL_DEVICE_ONLINE:
+ case DEVCTL_DEVICE_OFFLINE:
+ case DEVCTL_BUS_GETSTATE:
+ return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0));
+ }
+
+ /*
+ * read devctl ioctl data
+ */
+ if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
+ return (EFAULT);
+
+ switch (cmd) {
+
+ case DEVCTL_DEVICE_RESET:
+ PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_DEVICE_RESET\n");
+ rv = ENOTSUP;
+ break;
+
+
+ case DEVCTL_BUS_QUIESCE:
+ PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_QUIESCE\n");
+ if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) {
+ if (bus_state == BUS_QUIESCED) {
+ break;
+ }
+ }
+ (void) ndi_set_bus_state(dip, BUS_QUIESCED);
+ break;
+
+ case DEVCTL_BUS_UNQUIESCE:
+ PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_UNQUIESCE\n");
+ if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) {
+ if (bus_state == BUS_ACTIVE) {
+ break;
+ }
+ }
+ (void) ndi_set_bus_state(dip, BUS_ACTIVE);
+ break;
+
+ case DEVCTL_BUS_RESET:
+ PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_RESET\n");
+ rv = ENOTSUP;
+ break;
+
+ case DEVCTL_BUS_RESETALL:
+ PCMU_DBG0(PCMU_DBG_IOCTL, dip, "DEVCTL_BUS_RESETALL\n");
+ rv = ENOTSUP;
+ break;
+
+ default:
+ rv = ENOTTY;
+ }
+
+ ndi_dc_freehdl(dcp);
+ return (rv);
+}
+
+static int pcmu_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
+ int flags, char *name, caddr_t valuep, int *lengthp)
+{
+ return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp));
+}
+/* bus driver entry points */
+
+/*
+ * bus map entry point:
+ *
+ * if map request is for an rnumber
+ * get the corresponding regspec from device node
+ * build a new regspec in our parent's format
+ * build a new map_req with the new regspec
+ * call up the tree to complete the mapping
+ */
+static int
+pcmu_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
+ off_t off, off_t len, caddr_t *addrp)
+{
+ pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip));
+ struct regspec p_regspec;
+ ddi_map_req_t p_mapreq;
+ int reglen, rval, r_no;
+ pci_regspec_t reloc_reg, *rp = &reloc_reg;
+
+ PCMU_DBG2(PCMU_DBG_MAP, dip, "rdip=%s%d:",
+ ddi_driver_name(rdip), ddi_get_instance(rdip));
+
+ if (mp->map_flags & DDI_MF_USER_MAPPING) {
+ return (DDI_ME_UNIMPLEMENTED);
+ }
+
+ switch (mp->map_type) {
+ case DDI_MT_REGSPEC:
+ reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */
+ break;
+
+ case DDI_MT_RNUMBER:
+ r_no = mp->map_obj.rnumber;
+ PCMU_DBG1(PCMU_DBG_MAP | PCMU_DBG_CONT, dip, " r#=%x", r_no);
+
+ if (ddi_getlongprop(DDI_DEV_T_NONE, rdip, DDI_PROP_DONTPASS,
+ "reg", (caddr_t)&rp, &reglen) != DDI_SUCCESS) {
+ return (DDI_ME_RNUMBER_RANGE);
+ }
+
+ if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) {
+ kmem_free(rp, reglen);
+ return (DDI_ME_RNUMBER_RANGE);
+ }
+ rp += r_no;
+ break;
+
+ default:
+ return (DDI_ME_INVAL);
+ }
+ PCMU_DBG0(PCMU_DBG_MAP | PCMU_DBG_CONT, dip, "\n");
+
+ /* use "assigned-addresses" to relocate regspec within pci space */
+ if (rval = pcmu_reloc_reg(dip, rdip, pcmu_p, rp)) {
+ goto done;
+ }
+
+ /* adjust regspec according to mapping request */
+ if (len) {
+ rp->pci_size_low = (uint_t)len;
+ }
+ rp->pci_phys_low += off;
+
+ /* use "ranges" to translate relocated pci regspec into parent space */
+ if (rval = pcmu_xlate_reg(pcmu_p, rp, &p_regspec)) {
+ goto done;
+ }
+
+ p_mapreq = *mp; /* dup the whole structure */
+ p_mapreq.map_type = DDI_MT_REGSPEC;
+ p_mapreq.map_obj.rp = &p_regspec;
+ rval = ddi_map(dip, &p_mapreq, 0, 0, addrp);
+
+done:
+ if (mp->map_type == DDI_MT_RNUMBER) {
+ kmem_free(rp - r_no, reglen);
+ }
+ return (rval);
+}
+
+#ifdef DEBUG
+int pcmu_peekfault_cnt = 0;
+int pcmu_pokefault_cnt = 0;
+#endif /* DEBUG */
+
+static int
+pcmu_do_poke(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args)
+{
+ pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
+ int err = DDI_SUCCESS;
+ on_trap_data_t otd;
+
+ mutex_enter(&pcbm_p->pcbm_pokeflt_mutex);
+ pcbm_p->pcbm_ontrap_data = &otd;
+
+ /* Set up protected environment. */
+ if (!on_trap(&otd, OT_DATA_ACCESS)) {
+ uintptr_t tramp = otd.ot_trampoline;
+
+ otd.ot_trampoline = (uintptr_t)&poke_fault;
+ err = do_poke(in_args->size, (void *)in_args->dev_addr,
+ (void *)in_args->host_addr);
+ otd.ot_trampoline = tramp;
+ } else {
+ err = DDI_FAILURE;
+ }
+
+ /*
+ * Read the async fault register for the PBM to see it sees
+ * a master-abort.
+ */
+ pcmu_pbm_clear_error(pcbm_p);
+
+ if (otd.ot_trap & OT_DATA_ACCESS) {
+ err = DDI_FAILURE;
+ }
+
+ /* Take down protected environment. */
+ no_trap();
+
+ pcbm_p->pcbm_ontrap_data = NULL;
+ mutex_exit(&pcbm_p->pcbm_pokeflt_mutex);
+
+#ifdef DEBUG
+ if (err == DDI_FAILURE)
+ pcmu_pokefault_cnt++;
+#endif
+ return (err);
+}
+
+
+static int
+pcmu_ctlops_poke(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args)
+{
+ return (pcmu_do_poke(pcmu_p, in_args));
+}
+
+/* ARGSUSED */
+static int
+pcmu_do_peek(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args)
+{
+ int err = DDI_SUCCESS;
+ on_trap_data_t otd;
+
+ if (!on_trap(&otd, OT_DATA_ACCESS)) {
+ uintptr_t tramp = otd.ot_trampoline;
+
+ otd.ot_trampoline = (uintptr_t)&peek_fault;
+ err = do_peek(in_args->size, (void *)in_args->dev_addr,
+ (void *)in_args->host_addr);
+ otd.ot_trampoline = tramp;
+ } else
+ err = DDI_FAILURE;
+
+ no_trap();
+
+#ifdef DEBUG
+ if (err == DDI_FAILURE)
+ pcmu_peekfault_cnt++;
+#endif
+ return (err);
+}
+
+
+static int
+pcmu_ctlops_peek(pcmu_t *pcmu_p, peekpoke_ctlops_t *in_args, void *result)
+{
+ result = (void *)in_args->host_addr;
+ return (pcmu_do_peek(pcmu_p, in_args));
+}
+
+/*
+ * control ops entry point:
+ *
+ * Requests handled completely:
+ * DDI_CTLOPS_INITCHILD see pcmu_init_child() for details
+ * DDI_CTLOPS_UNINITCHILD
+ * DDI_CTLOPS_REPORTDEV see report_dev() for details
+ * DDI_CTLOPS_XLATE_INTRS nothing to do
+ * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1
+ * DDI_CTLOPS_REGSIZE
+ * DDI_CTLOPS_NREGS
+ * DDI_CTLOPS_NINTRS
+ * DDI_CTLOPS_DVMAPAGESIZE
+ * DDI_CTLOPS_POKE
+ * DDI_CTLOPS_PEEK
+ * DDI_CTLOPS_QUIESCE
+ * DDI_CTLOPS_UNQUIESCE
+ *
+ * All others passed to parent.
+ */
+static int
+pcmu_ctlops(dev_info_t *dip, dev_info_t *rdip,
+ ddi_ctl_enum_t op, void *arg, void *result)
+{
+ pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip));
+
+ switch (op) {
+ case DDI_CTLOPS_INITCHILD:
+ return (pcmu_init_child(pcmu_p, (dev_info_t *)arg));
+
+ case DDI_CTLOPS_UNINITCHILD:
+ return (pcmu_uninit_child(pcmu_p, (dev_info_t *)arg));
+
+ case DDI_CTLOPS_REPORTDEV:
+ return (pcmu_report_dev(rdip));
+
+ case DDI_CTLOPS_IOMIN:
+ /*
+ * If we are using the streaming cache, align at
+ * least on a cache line boundary. Otherwise use
+ * whatever alignment is passed in.
+ */
+ return (DDI_SUCCESS);
+
+ case DDI_CTLOPS_REGSIZE:
+ *((off_t *)result) = pcmu_get_reg_set_size(rdip, *((int *)arg));
+ return (DDI_SUCCESS);
+
+ case DDI_CTLOPS_NREGS:
+ *((uint_t *)result) = pcmu_get_nreg_set(rdip);
+ return (DDI_SUCCESS);
+
+ case DDI_CTLOPS_DVMAPAGESIZE:
+ *((ulong_t *)result) = 0;
+ return (DDI_SUCCESS);
+
+ case DDI_CTLOPS_POKE:
+ return (pcmu_ctlops_poke(pcmu_p, (peekpoke_ctlops_t *)arg));
+
+ case DDI_CTLOPS_PEEK:
+ return (pcmu_ctlops_peek(pcmu_p, (peekpoke_ctlops_t *)arg,
+ result));
+
+ case DDI_CTLOPS_AFFINITY:
+ break;
+
+ case DDI_CTLOPS_QUIESCE:
+ return (DDI_FAILURE);
+
+ case DDI_CTLOPS_UNQUIESCE:
+ return (DDI_FAILURE);
+
+ default:
+ break;
+ }
+
+ /*
+ * Now pass the request up to our parent.
+ */
+ PCMU_DBG2(PCMU_DBG_CTLOPS, dip,
+ "passing request to parent: rdip=%s%d\n",
+ ddi_driver_name(rdip), ddi_get_instance(rdip));
+ return (ddi_ctlops(dip, rdip, op, arg, result));
+}
+
+
+/* ARGSUSED */
+static int
+pcmu_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op,
+ ddi_intr_handle_impl_t *hdlp, void *result)
+{
+ pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip));
+ int ret = DDI_SUCCESS;
+
+ switch (intr_op) {
+ case DDI_INTROP_GETCAP:
+ /* GetCap will always fail for all non PCI devices */
+ (void) pci_intx_get_cap(rdip, (int *)result);
+ break;
+ case DDI_INTROP_SETCAP:
+ ret = DDI_ENOTSUP;
+ break;
+ case DDI_INTROP_ALLOC:
+ *(int *)result = hdlp->ih_scratch1;
+ break;
+ case DDI_INTROP_FREE:
+ break;
+ case DDI_INTROP_GETPRI:
+ *(int *)result = hdlp->ih_pri ? hdlp->ih_pri : 0;
+ break;
+ case DDI_INTROP_SETPRI:
+ break;
+ case DDI_INTROP_ADDISR:
+ ret = pcmu_add_intr(dip, rdip, hdlp);
+ break;
+ case DDI_INTROP_REMISR:
+ ret = pcmu_remove_intr(dip, rdip, hdlp);
+ break;
+ case DDI_INTROP_ENABLE:
+ ret = pcmu_ib_update_intr_state(pcmu_p, rdip, hdlp,
+ PCMU_INTR_STATE_ENABLE);
+ break;
+ case DDI_INTROP_DISABLE:
+ ret = pcmu_ib_update_intr_state(pcmu_p, rdip, hdlp,
+ PCMU_INTR_STATE_DISABLE);
+ break;
+ case DDI_INTROP_SETMASK:
+ ret = pci_intx_set_mask(rdip);
+ break;
+ case DDI_INTROP_CLRMASK:
+ ret = pci_intx_clr_mask(rdip);
+ break;
+ case DDI_INTROP_GETPENDING:
+ ret = pci_intx_get_pending(rdip, (int *)result);
+ break;
+ case DDI_INTROP_NINTRS:
+ case DDI_INTROP_NAVAIL:
+ *(int *)result = i_ddi_get_nintrs(rdip);
+ break;
+ case DDI_INTROP_SUPPORTED_TYPES:
+ /* PCI nexus driver supports only fixed interrupts */
+ *(int *)result = i_ddi_get_nintrs(rdip) ?
+ DDI_INTR_TYPE_FIXED : 0;
+ break;
+ default:
+ ret = DDI_ENOTSUP;
+ break;
+ }
+
+ return (ret);
+}
+
+/*
+ * CMU-CH specifics implementation:
+ * interrupt mapping register
+ * PBM configuration
+ * ECC and PBM error handling
+ */
+
+/* called by pcmu_attach() DDI_ATTACH to initialize pci objects */
+static int
+pcmu_obj_setup(pcmu_t *pcmu_p)
+{
+ int ret;
+
+ mutex_enter(&pcmu_global_mutex);
+ pcmu_p->pcmu_rev = ddi_prop_get_int(DDI_DEV_T_ANY, pcmu_p->pcmu_dip,
+ DDI_PROP_DONTPASS, "module-revision#", 0);
+
+ pcmu_ib_create(pcmu_p);
+ pcmu_cb_create(pcmu_p);
+ pcmu_ecc_create(pcmu_p);
+ pcmu_pbm_create(pcmu_p);
+ pcmu_err_create(pcmu_p);
+ if ((ret = pcmu_intr_setup(pcmu_p)) != DDI_SUCCESS)
+ goto done;
+
+ pcmu_kstat_create(pcmu_p);
+done:
+ mutex_exit(&pcmu_global_mutex);
+ if (ret != DDI_SUCCESS) {
+ cmn_err(CE_NOTE, "Interrupt register failure, returning 0x%x\n",
+ ret);
+ }
+ return (ret);
+}
+
+/* called by pcmu_detach() DDI_DETACH to destroy pci objects */
+static void
+pcmu_obj_destroy(pcmu_t *pcmu_p)
+{
+ mutex_enter(&pcmu_global_mutex);
+
+ pcmu_kstat_destroy(pcmu_p);
+ pcmu_pbm_destroy(pcmu_p);
+ pcmu_err_destroy(pcmu_p);
+ pcmu_ecc_destroy(pcmu_p);
+ pcmu_cb_destroy(pcmu_p);
+ pcmu_ib_destroy(pcmu_p);
+ pcmu_intr_teardown(pcmu_p);
+
+ mutex_exit(&pcmu_global_mutex);
+}
+
+/* called by pcmu_attach() DDI_RESUME to (re)initialize pci objects */
+static void
+pcmu_obj_resume(pcmu_t *pcmu_p)
+{
+ mutex_enter(&pcmu_global_mutex);
+
+ pcmu_ib_configure(pcmu_p->pcmu_ib_p);
+ pcmu_ecc_configure(pcmu_p);
+ pcmu_ib_resume(pcmu_p->pcmu_ib_p);
+ u2u_ittrans_resume((u2u_ittrans_data_t **)
+ &(pcmu_p->pcmu_cb_p->pcb_ittrans_cookie));
+
+ pcmu_pbm_configure(pcmu_p->pcmu_pcbm_p);
+
+ pcmu_cb_resume(pcmu_p->pcmu_cb_p);
+
+ pcmu_pbm_resume(pcmu_p->pcmu_pcbm_p);
+
+ mutex_exit(&pcmu_global_mutex);
+}
+
+/* called by pcmu_detach() DDI_SUSPEND to suspend pci objects */
+static void
+pcmu_obj_suspend(pcmu_t *pcmu_p)
+{
+ mutex_enter(&pcmu_global_mutex);
+
+ pcmu_pbm_suspend(pcmu_p->pcmu_pcbm_p);
+ pcmu_ib_suspend(pcmu_p->pcmu_ib_p);
+ pcmu_cb_suspend(pcmu_p->pcmu_cb_p);
+
+ mutex_exit(&pcmu_global_mutex);
+}
+
+static int
+pcmu_intr_setup(pcmu_t *pcmu_p)
+{
+ dev_info_t *dip = pcmu_p->pcmu_dip;
+ pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
+ pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p;
+ int i, no_of_intrs;
+
+ /*
+ * Get the interrupts property.
+ */
+ if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
+ "interrupts", (caddr_t)&pcmu_p->pcmu_inos,
+ &pcmu_p->pcmu_inos_len) != DDI_SUCCESS) {
+ cmn_err(CE_PANIC, "%s%d: no interrupts property\n",
+ ddi_driver_name(dip), ddi_get_instance(dip));
+ }
+
+ /*
+ * figure out number of interrupts in the "interrupts" property
+ * and convert them all into ino.
+ */
+ i = ddi_getprop(DDI_DEV_T_ANY, dip, 0, "#interrupt-cells", 1);
+ i = CELLS_1275_TO_BYTES(i);
+ no_of_intrs = pcmu_p->pcmu_inos_len / i;
+ for (i = 0; i < no_of_intrs; i++) {
+ pcmu_p->pcmu_inos[i] =
+ PCMU_IB_MONDO_TO_INO(pcmu_p->pcmu_inos[i]);
+ }
+
+ pcb_p->pcb_no_of_inos = no_of_intrs;
+ if (i = pcmu_ecc_register_intr(pcmu_p)) {
+ goto teardown;
+ }
+
+ intr_dist_add(pcmu_cb_intr_dist, pcb_p);
+ pcmu_ecc_enable_intr(pcmu_p);
+
+ if (i = pcmu_pbm_register_intr(pcbm_p)) {
+ intr_dist_rem(pcmu_cb_intr_dist, pcb_p);
+ goto teardown;
+ }
+ intr_dist_add(pcmu_pbm_intr_dist, pcbm_p);
+ pcmu_ib_intr_enable(pcmu_p, pcmu_p->pcmu_inos[CBNINTR_PBM]);
+
+ intr_dist_add_weighted(pcmu_ib_intr_dist_all, pcmu_p->pcmu_ib_p);
+ return (DDI_SUCCESS);
+teardown:
+ pcmu_intr_teardown(pcmu_p);
+ return (i);
+}
+
+/*
+ * pcmu_fix_ranges - fixes the config space entry of the "ranges"
+ * property on CMU-CH platforms
+ */
+void
+pcmu_fix_ranges(pcmu_ranges_t *rng_p, int rng_entries)
+{
+ int i;
+ for (i = 0; i < rng_entries; i++, rng_p++) {
+ if ((rng_p->child_high & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG)
+ rng_p->parent_low |= rng_p->child_high;
+ }
+}
+
+/*
+ * map_pcmu_registers
+ *
+ * This function is called from the attach routine to map the registers
+ * accessed by this driver.
+ *
+ * used by: pcmu_attach()
+ *
+ * return value: DDI_FAILURE on failure
+ */
+static int
+map_pcmu_registers(pcmu_t *pcmu_p, dev_info_t *dip)
+{
+ ddi_device_acc_attr_t attr;
+
+ attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
+ attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
+
+ attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
+ if (ddi_regs_map_setup(dip, 0, &pcmu_p->pcmu_address[0], 0, 0,
+ &attr, &pcmu_p->pcmu_ac[0]) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s%d: unable to map reg entry 0\n",
+ ddi_driver_name(dip), ddi_get_instance(dip));
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * We still use pcmu_address[2]
+ */
+ if (ddi_regs_map_setup(dip, 2, &pcmu_p->pcmu_address[2], 0, 0,
+ &attr, &pcmu_p->pcmu_ac[2]) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s%d: unable to map reg entry 2\n",
+ ddi_driver_name(dip), ddi_get_instance(dip));
+ ddi_regs_map_free(&pcmu_p->pcmu_ac[0]);
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * The second register set contains the bridge's configuration
+ * header. This header is at the very beginning of the bridge's
+ * configuration space. This space has litte-endian byte order.
+ */
+ attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
+ if (ddi_regs_map_setup(dip, 1, &pcmu_p->pcmu_address[1], 0,
+ PCI_CONF_HDR_SIZE, &attr, &pcmu_p->pcmu_ac[1]) != DDI_SUCCESS) {
+
+ cmn_err(CE_WARN, "%s%d: unable to map reg entry 1\n",
+ ddi_driver_name(dip), ddi_get_instance(dip));
+ ddi_regs_map_free(&pcmu_p->pcmu_ac[0]);
+ return (DDI_FAILURE);
+ }
+ PCMU_DBG2(PCMU_DBG_ATTACH, dip, "address (%p,%p)\n",
+ pcmu_p->pcmu_address[0], pcmu_p->pcmu_address[1]);
+ return (DDI_SUCCESS);
+}
+
+/*
+ * unmap_pcmu_registers:
+ *
+ * This routine unmap the registers mapped by map_pcmu_registers.
+ *
+ * used by: pcmu_detach()
+ *
+ * return value: none
+ */
+static void
+unmap_pcmu_registers(pcmu_t *pcmu_p)
+{
+ ddi_regs_map_free(&pcmu_p->pcmu_ac[0]);
+ ddi_regs_map_free(&pcmu_p->pcmu_ac[1]);
+ ddi_regs_map_free(&pcmu_p->pcmu_ac[2]);
+}
+
+/*
+ * These convenience wrappers relies on map_pcmu_registers() to setup
+ * pcmu_address[0-2] correctly at first.
+ */
+static uintptr_t
+get_reg_base(pcmu_t *pcmu_p)
+{
+ return ((uintptr_t)pcmu_p->pcmu_address[2]);
+}
+
+/* The CMU-CH config reg base is always the 2nd reg entry */
+static uintptr_t
+get_config_reg_base(pcmu_t *pcmu_p)
+{
+ return ((uintptr_t)(pcmu_p->pcmu_address[1]));
+}
+
+uint64_t
+ib_get_map_reg(pcmu_ib_mondo_t mondo, uint32_t cpu_id)
+{
+ return ((mondo) | (cpu_id << PCMU_INTR_MAP_REG_TID_SHIFT) |
+ PCMU_INTR_MAP_REG_VALID);
+
+}
+
+uint32_t
+ib_map_reg_get_cpu(volatile uint64_t reg)
+{
+ return ((reg & PCMU_INTR_MAP_REG_TID) >>
+ PCMU_INTR_MAP_REG_TID_SHIFT);
+}
+
+uint64_t *
+ib_intr_map_reg_addr(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino)
+{
+ uint64_t *addr;
+
+ ASSERT(ino & 0x20);
+ addr = (uint64_t *)(pib_p->pib_obio_intr_map_regs +
+ (((uint_t)ino & 0x1f) << 3));
+ return (addr);
+}
+
+uint64_t *
+ib_clear_intr_reg_addr(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino)
+{
+ uint64_t *addr;
+
+ ASSERT(ino & 0x20);
+ addr = (uint64_t *)(pib_p->pib_obio_clear_intr_regs +
+ (((uint_t)ino & 0x1f) << 3));
+ return (addr);
+}
+
+uintptr_t
+pcmu_ib_setup(pcmu_ib_t *pib_p)
+{
+ pcmu_t *pcmu_p = pib_p->pib_pcmu_p;
+ uintptr_t a = get_reg_base(pcmu_p);
+
+ pib_p->pib_ign = PCMU_ID_TO_IGN(pcmu_p->pcmu_id);
+ pib_p->pib_max_ino = PCMU_MAX_INO;
+ pib_p->pib_obio_intr_map_regs = a + PCMU_IB_OBIO_INTR_MAP_REG_OFFSET;
+ pib_p->pib_obio_clear_intr_regs =
+ a + PCMU_IB_OBIO_CLEAR_INTR_REG_OFFSET;
+ return (a);
+}
+
+/*
+ * Return the cpuid to to be used for an ino.
+ *
+ * On multi-function pci devices, functions have separate devinfo nodes and
+ * interrupts.
+ *
+ * This function determines if there is already an established slot-oriented
+ * interrupt-to-cpu binding established, if there is then it returns that
+ * cpu. Otherwise a new cpu is selected by intr_dist_cpuid().
+ *
+ * The devinfo node we are trying to associate a cpu with is
+ * ino_p->pino_ih_head->ih_dip.
+ */
+uint32_t
+pcmu_intr_dist_cpuid(pcmu_ib_t *pib_p, pcmu_ib_ino_info_t *ino_p)
+{
+ dev_info_t *rdip = ino_p->pino_ih_head->ih_dip;
+ dev_info_t *prdip = ddi_get_parent(rdip);
+ pcmu_ib_ino_info_t *sino_p;
+ dev_info_t *sdip;
+ dev_info_t *psdip;
+ char *buf1 = NULL, *buf2 = NULL;
+ char *s1, *s2, *s3;
+ int l2;
+ int cpu_id;
+
+ /* must be CMU-CH driver parent (not ebus) */
+ if (strcmp(ddi_driver_name(prdip), "pcicmu") != 0)
+ goto newcpu;
+
+ /*
+ * From PCI 1275 binding: 2.2.1.3 Unit Address representation:
+ * Since the "unit-number" is the address that appears in on Open
+ * Firmware 'device path', it follows that only the DD and DD,FF
+ * forms of the text representation can appear in a 'device path'.
+ *
+ * The rdip unit address is of the form "DD[,FF]". Define two
+ * unit address strings that represent same-slot use: "DD" and "DD,".
+ * The first compare uses strcmp, the second uses strncmp.
+ */
+ s1 = ddi_get_name_addr(rdip);
+ if (s1 == NULL) {
+ goto newcpu;
+ }
+
+ buf1 = kmem_alloc(MAXNAMELEN, KM_SLEEP); /* strcmp */
+ buf2 = kmem_alloc(MAXNAMELEN, KM_SLEEP); /* strncmp */
+ s1 = strcpy(buf1, s1);
+ s2 = strcpy(buf2, s1);
+
+ s1 = strrchr(s1, ',');
+ if (s1) {
+ *s1 = '\0'; /* have "DD,FF" */
+ s1 = buf1; /* search via strcmp "DD" */
+
+ s2 = strrchr(s2, ',');
+ *(s2 + 1) = '\0';
+ s2 = buf2;
+ l2 = strlen(s2); /* search via strncmp "DD," */
+ } else {
+ (void) strcat(s2, ","); /* have "DD" */
+ l2 = strlen(s2); /* search via strncmp "DD," */
+ }
+
+ /*
+ * Search the established ino list for devinfo nodes bound
+ * to an ino that matches one of the slot use strings.
+ */
+ ASSERT(MUTEX_HELD(&pib_p->pib_ino_lst_mutex));
+ for (sino_p = pib_p->pib_ino_lst; sino_p; sino_p = sino_p->pino_next) {
+ /* skip self and non-established */
+ if ((sino_p == ino_p) || (sino_p->pino_established == 0))
+ continue;
+
+ /* skip non-siblings */
+ sdip = sino_p->pino_ih_head->ih_dip;
+ psdip = ddi_get_parent(sdip);
+ if (psdip != prdip)
+ continue;
+
+ /* must be CMU-CH driver parent (not ebus) */
+ if (strcmp(ddi_driver_name(psdip), "pcicmu") != 0)
+ continue;
+
+ s3 = ddi_get_name_addr(sdip);
+ if ((s1 && (strcmp(s1, s3) == 0)) ||
+ (strncmp(s2, s3, l2) == 0)) {
+ extern int intr_dist_debug;
+
+ if (intr_dist_debug) {
+ cmn_err(CE_CONT, "intr_dist: "
+ "pcicmu`pcmu_intr_dist_cpuid "
+ "%s#%d %s: cpu %d established "
+ "by %s#%d %s\n", ddi_driver_name(rdip),
+ ddi_get_instance(rdip),
+ ddi_deviname(rdip, buf1),
+ sino_p->pino_cpuid,
+ ddi_driver_name(sdip),
+ ddi_get_instance(sdip),
+ ddi_deviname(sdip, buf2));
+ }
+ break;
+ }
+ }
+
+ /* If a slot use match is found then use established cpu */
+ if (sino_p) {
+ cpu_id = sino_p->pino_cpuid; /* target established cpu */
+ goto out;
+ }
+
+newcpu: cpu_id = intr_dist_cpuid(); /* target new cpu */
+
+out: if (buf1)
+ kmem_free(buf1, MAXNAMELEN);
+ if (buf2)
+ kmem_free(buf2, MAXNAMELEN);
+ return (cpu_id);
+}
+
+void
+pcmu_cb_teardown(pcmu_t *pcmu_p)
+{
+ pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p;
+
+ u2u_ittrans_uninit((u2u_ittrans_data_t *)pcb_p->pcb_ittrans_cookie);
+}
+
+int
+pcmu_ecc_add_intr(pcmu_t *pcmu_p, int inum, pcmu_ecc_intr_info_t *eii_p)
+{
+ uint32_t mondo;
+
+ mondo = ((pcmu_p->pcmu_cb_p->pcb_ign << PCMU_INO_BITS) |
+ pcmu_p->pcmu_inos[inum]);
+
+ VERIFY(add_ivintr(mondo, pcmu_pil[inum], pcmu_ecc_intr,
+ (caddr_t)eii_p, NULL) == 0);
+ return (PCMU_ATTACH_RETCODE(PCMU_ECC_OBJ,
+ PCMU_OBJ_INTR_ADD, DDI_SUCCESS));
+}
+
+/* ARGSUSED */
+void
+pcmu_ecc_rem_intr(pcmu_t *pcmu_p, int inum, pcmu_ecc_intr_info_t *eii_p)
+{
+ uint32_t mondo;
+
+ mondo = ((pcmu_p->pcmu_cb_p->pcb_ign << PCMU_INO_BITS) |
+ pcmu_p->pcmu_inos[inum]);
+ rem_ivintr(mondo, NULL);
+}
+
+void
+pcmu_pbm_configure(pcmu_pbm_t *pcbm_p)
+{
+ pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p;
+ dev_info_t *dip = pcmu_p->pcmu_dip;
+
+#define pbm_err ((PCMU_PCI_AFSR_E_MASK << PCMU_PCI_AFSR_PE_SHIFT) | \
+ (PCMU_PCI_AFSR_E_MASK << PCMU_PCI_AFSR_SE_SHIFT))
+#define csr_err (PCI_STAT_PERROR | PCI_STAT_S_PERROR | \
+ PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB | \
+ PCI_STAT_S_TARG_AB | PCI_STAT_S_PERROR)
+
+ /*
+ * Clear any PBM errors.
+ */
+ *pcbm_p->pcbm_async_flt_status_reg = pbm_err;
+
+ /*
+ * Clear error bits in configuration status register.
+ */
+ PCMU_DBG1(PCMU_DBG_ATTACH, dip,
+ "pcmu_pbm_configure: conf status reg=%x\n", csr_err);
+
+ pcbm_p->pcbm_config_header->ch_status_reg = csr_err;
+
+ PCMU_DBG1(PCMU_DBG_ATTACH, dip,
+ "pcmu_pbm_configure: conf status reg==%x\n",
+ pcbm_p->pcbm_config_header->ch_status_reg);
+
+ (void) ndi_prop_update_int(DDI_DEV_T_ANY, dip, "latency-timer",
+ (int)pcbm_p->pcbm_config_header->ch_latency_timer_reg);
+#undef pbm_err
+#undef csr_err
+}
+
+uint_t
+pcmu_pbm_disable_errors(pcmu_pbm_t *pcbm_p)
+{
+ pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p;
+ pcmu_ib_t *pib_p = pcmu_p->pcmu_ib_p;
+
+ /*
+ * Disable error and streaming byte hole interrupts via the
+ * PBM control register.
+ */
+ *pcbm_p->pcbm_ctrl_reg &= ~PCMU_PCI_CTRL_ERR_INT_EN;
+
+ /*
+ * Disable error interrupts via the interrupt mapping register.
+ */
+ pcmu_ib_intr_disable(pib_p,
+ pcmu_p->pcmu_inos[CBNINTR_PBM], PCMU_IB_INTR_NOWAIT);
+ return (BF_NONE);
+}
+
+void
+pcmu_cb_setup(pcmu_t *pcmu_p)
+{
+ uint64_t csr, csr_pa, pa;
+ pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p;
+
+ pcb_p->pcb_ign = PCMU_ID_TO_IGN(pcmu_p->pcmu_id);
+ pa = (uint64_t)hat_getpfnum(kas.a_hat, pcmu_p->pcmu_address[0]);
+ pcb_p->pcb_base_pa = pa = pa >> (32 - MMU_PAGESHIFT) << 32;
+ pcb_p->pcb_map_pa = pa + PCMU_IB_OBIO_INTR_MAP_REG_OFFSET;
+ pcb_p->pcb_clr_pa = pa + PCMU_IB_OBIO_CLEAR_INTR_REG_OFFSET;
+ pcb_p->pcb_obsta_pa = pa + PCMU_IB_OBIO_INTR_STATE_DIAG_REG;
+
+ csr_pa = pa + PCMU_CB_CONTROL_STATUS_REG_OFFSET;
+ csr = lddphysio(csr_pa);
+
+ /*
+ * Clear any pending address parity errors.
+ */
+ if (csr & PCMU_CB_CONTROL_STATUS_APERR) {
+ csr |= PCMU_CB_CONTROL_STATUS_APERR;
+ cmn_err(CE_WARN, "clearing UPA address parity error\n");
+ }
+ csr |= PCMU_CB_CONTROL_STATUS_APCKEN;
+ csr &= ~PCMU_CB_CONTROL_STATUS_IAP;
+ stdphysio(csr_pa, csr);
+
+ u2u_ittrans_init(pcmu_p,
+ (u2u_ittrans_data_t **)&pcb_p->pcb_ittrans_cookie);
+}
+
+void
+pcmu_ecc_setup(pcmu_ecc_t *pecc_p)
+{
+ pecc_p->pecc_ue.pecc_errpndg_mask = 0;
+ pecc_p->pecc_ue.pecc_offset_mask = PCMU_ECC_UE_AFSR_DW_OFFSET;
+ pecc_p->pecc_ue.pecc_offset_shift = PCMU_ECC_UE_AFSR_DW_OFFSET_SHIFT;
+ pecc_p->pecc_ue.pecc_size_log2 = 3;
+}
+
+static uintptr_t
+get_pbm_reg_base(pcmu_t *pcmu_p)
+{
+ return ((uintptr_t)(pcmu_p->pcmu_address[0]));
+}
+
+void
+pcmu_pbm_setup(pcmu_pbm_t *pcbm_p)
+{
+ pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p;
+
+ /*
+ * Get the base virtual address for the PBM control block.
+ */
+ uintptr_t a = get_pbm_reg_base(pcmu_p);
+
+ /*
+ * Get the virtual address of the PCI configuration header.
+ * This should be mapped little-endian.
+ */
+ pcbm_p->pcbm_config_header =
+ (config_header_t *)get_config_reg_base(pcmu_p);
+
+ /*
+ * Get the virtual addresses for control, error and diag
+ * registers.
+ */
+ pcbm_p->pcbm_ctrl_reg = (uint64_t *)(a + PCMU_PCI_CTRL_REG_OFFSET);
+ pcbm_p->pcbm_diag_reg = (uint64_t *)(a + PCMU_PCI_DIAG_REG_OFFSET);
+ pcbm_p->pcbm_async_flt_status_reg =
+ (uint64_t *)(a + PCMU_PCI_ASYNC_FLT_STATUS_REG_OFFSET);
+ pcbm_p->pcbm_async_flt_addr_reg =
+ (uint64_t *)(a + PCMU_PCI_ASYNC_FLT_ADDR_REG_OFFSET);
+}
+
+/*ARGSUSED*/
+void
+pcmu_pbm_teardown(pcmu_pbm_t *pcbm_p)
+{
+}
+
+int
+pcmu_get_numproxy(dev_info_t *dip)
+{
+ return (ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
+ "#upa-interrupt-proxies", 1));
+}
+
+int
+pcmu_get_portid(dev_info_t *dip)
+{
+ return (ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
+ "portid", -1));
+}
+
+/*
+ * CMU-CH Performance Events.
+ */
+static pcmu_kev_mask_t
+pcicmu_pcmu_events[] = {
+ {"pio_cycles_b", 0xf}, {"interrupts", 0x11},
+ {"upa_inter_nack", 0x12}, {"pio_reads", 0x13},
+ {"pio_writes", 0x14},
+ {"clear_pic", 0x1f}
+};
+
+/*
+ * Create the picN kstat's.
+ */
+void
+pcmu_kstat_init()
+{
+ pcmu_name_kstat = (pcmu_ksinfo_t *)kmem_alloc(sizeof (pcmu_ksinfo_t),
+ KM_NOSLEEP);
+
+ if (pcmu_name_kstat == NULL) {
+ cmn_err(CE_WARN, "pcicmu : no space for kstat\n");
+ } else {
+ pcmu_name_kstat->pic_no_evs =
+ sizeof (pcicmu_pcmu_events) / sizeof (pcmu_kev_mask_t);
+ pcmu_name_kstat->pic_shift[0] = PCMU_SHIFT_PIC0;
+ pcmu_name_kstat->pic_shift[1] = PCMU_SHIFT_PIC1;
+ pcmu_create_name_kstat("pcmup",
+ pcmu_name_kstat, pcicmu_pcmu_events);
+ }
+}
+
+/*
+ * Called from _fini()
+ */
+void
+pcmu_kstat_fini()
+{
+ if (pcmu_name_kstat != NULL) {
+ pcmu_delete_name_kstat(pcmu_name_kstat);
+ kmem_free(pcmu_name_kstat, sizeof (pcmu_ksinfo_t));
+ pcmu_name_kstat = NULL;
+ }
+}
+
+/*
+ * Create the performance 'counters' kstat.
+ */
+void
+pcmu_add_upstream_kstat(pcmu_t *pcmu_p)
+{
+ pcmu_cntr_pa_t *cntr_pa_p = &pcmu_p->pcmu_uks_pa;
+ uint64_t regbase = va_to_pa((void *)get_reg_base(pcmu_p));
+
+ cntr_pa_p->pcr_pa = regbase + PCMU_PERF_PCR_OFFSET;
+ cntr_pa_p->pic_pa = regbase + PCMU_PERF_PIC_OFFSET;
+ pcmu_p->pcmu_uksp = pcmu_create_cntr_kstat(pcmu_p, "pcmup",
+ NUM_OF_PICS, pcmu_cntr_kstat_pa_update, cntr_pa_p);
+}
+
+/*
+ * u2u_ittrans_init() is caled from in pci.c's pcmu_cb_setup() per CMU.
+ * Second argument "ittrans_cookie" is address of pcb_ittrans_cookie in
+ * pcb_p member. allocated interrupt block is returned in it.
+ */
+static void
+u2u_ittrans_init(pcmu_t *pcmu_p, u2u_ittrans_data_t **ittrans_cookie)
+{
+
+ u2u_ittrans_data_t *u2u_trans_p;
+ ddi_device_acc_attr_t attr;
+ int ret;
+ int board;
+
+ /*
+ * Allocate the data structure to support U2U's
+ * interrupt target translations.
+ */
+ u2u_trans_p = (u2u_ittrans_data_t *)
+ kmem_zalloc(sizeof (u2u_ittrans_data_t), KM_SLEEP);
+
+ /*
+ * Get other properties, "board#"
+ */
+ board = ddi_getprop(DDI_DEV_T_ANY, pcmu_p->pcmu_dip,
+ DDI_PROP_DONTPASS, "board#", -1);
+
+ u2u_trans_p->u2u_board = board;
+
+ if (board == -1) {
+ /* this cannot happen on production systems */
+ cmn_err(CE_PANIC, "u2u:Invalid property;board = %d", board);
+ }
+
+ /*
+ * Initialize interrupt target translations mutex.
+ */
+ mutex_init(&(u2u_trans_p->u2u_ittrans_lock), "u2u_ittrans_lock",
+ MUTEX_DEFAULT, NULL);
+
+ /*
+ * Get U2U's registers space by ddi_regs_map_setup(9F)
+ */
+ attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
+ attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
+ attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
+
+ ret = ddi_regs_map_setup(pcmu_p->pcmu_dip,
+ REGS_INDEX_OF_U2U, (caddr_t *)(&(u2u_trans_p->u2u_regs_base)),
+ 0, 0, &attr, &(u2u_trans_p->u2u_acc));
+
+ /*
+ * check result of ddi_regs_map_setup().
+ */
+ if (ret != DDI_SUCCESS) {
+ cmn_err(CE_PANIC, "u2u%d: registers map setup failed", board);
+ }
+
+ /*
+ * Read Port-id(1 byte) in u2u
+ */
+ u2u_trans_p->u2u_port_id = *(volatile int32_t *)
+ (u2u_trans_p->u2u_regs_base + U2U_PID_REGISTER_OFFSET);
+
+ if (pcmu_p->pcmu_id != u2u_trans_p->u2u_port_id) {
+ cmn_err(CE_PANIC, "u2u%d: Invalid Port-ID", board);
+ }
+
+ *ittrans_cookie = u2u_trans_p;
+}
+
+/*
+ * u2u_ittras_resume() is called from pcmu_obj_resume() at DDI_RESUME entry.
+ */
+static void
+u2u_ittrans_resume(u2u_ittrans_data_t **ittrans_cookie)
+{
+
+ u2u_ittrans_data_t *u2u_trans_p;
+ u2u_ittrans_id_t *ittrans_id_p;
+ uintptr_t data_reg_addr;
+ int ix;
+
+ u2u_trans_p = *ittrans_cookie;
+
+ /*
+ * Set U2U Data Register
+ */
+ for (ix = 0; ix < U2U_DATA_NUM; ix++) {
+ ittrans_id_p = &(u2u_trans_p->u2u_ittrans_id[ix]);
+ data_reg_addr = u2u_trans_p->u2u_regs_base +
+ U2U_DATA_REGISTER_OFFSET + (ix * sizeof (uint64_t));
+ if (ittrans_id_p->u2u_ino_map_reg == NULL) {
+ /* This index was not set */
+ continue;
+ }
+ *(volatile uint32_t *) (data_reg_addr) =
+ (uint32_t)ittrans_id_p->u2u_tgt_cpu_id;
+
+ }
+}
+
+/*
+ * u2u_ittras_uninit() is called from ib_destroy() at detach,
+ * or occuring error in attach.
+ */
+static void
+u2u_ittrans_uninit(u2u_ittrans_data_t *ittrans_cookie)
+{
+
+ if (ittrans_cookie == NULL) {
+ return; /* not support */
+ }
+
+ if (ittrans_cookie == (u2u_ittrans_data_t *)(-1)) {
+ return; /* illeagal case */
+ }
+
+ ddi_regs_map_free(&(ittrans_cookie->u2u_acc));
+ mutex_destroy(&(ittrans_cookie->u2u_ittrans_lock));
+ kmem_free((void *)ittrans_cookie, sizeof (u2u_ittrans_data_t));
+}
+
+/*
+ * This routine,u2u_translate_tgtid(, , cpu_id, pino_map_reg),
+ * searches index having same value of pino_map_reg, or empty.
+ * Then, stores cpu_id in a U2U Data Register as this index,
+ * and return this index.
+ */
+int
+u2u_translate_tgtid(pcmu_t *pcmu_p, uint_t cpu_id,
+ volatile uint64_t *pino_map_reg)
+{
+
+ int index = -1;
+ int ix;
+ int err_level; /* severity level for cmn_err */
+ u2u_ittrans_id_t *ittrans_id_p;
+ uintptr_t data_reg_addr;
+ u2u_ittrans_data_t *ittrans_cookie;
+
+ ittrans_cookie =
+ (u2u_ittrans_data_t *)(pcmu_p->pcmu_cb_p->pcb_ittrans_cookie);
+
+ if (ittrans_cookie == NULL) {
+ return (cpu_id);
+ }
+
+ if (ittrans_cookie == (u2u_ittrans_data_t *)(-1)) {
+ return (-1); /* illeagal case */
+ }
+
+ mutex_enter(&(ittrans_cookie->u2u_ittrans_lock));
+
+ /*
+ * Decide index No. of U2U Data registers in either
+ * already used by same pino_map_reg, or empty.
+ */
+ for (ix = 0; ix < U2U_DATA_NUM; ix++) {
+ ittrans_id_p = &(ittrans_cookie->u2u_ittrans_id[ix]);
+ if (ittrans_id_p->u2u_ino_map_reg == pino_map_reg) {
+ /* already used this pino_map_reg */
+ index = ix;
+ break;
+ }
+ if (index == -1 &&
+ ittrans_id_p->u2u_ino_map_reg == NULL) {
+ index = ix;
+ }
+ }
+
+ if (index == -1) {
+ if (panicstr) {
+ err_level = CE_WARN;
+ } else {
+ err_level = CE_PANIC;
+ }
+ cmn_err(err_level, "u2u%d:No more U2U-Data regs!!",
+ ittrans_cookie->u2u_board);
+ return (cpu_id);
+ }
+
+ /*
+ * For U2U
+ * set cpu_id into u2u_data_reg by index.
+ * ((uint64_t)(u2u_regs_base
+ * + U2U_DATA_REGISTER_OFFSET))[index] = cpu_id;
+ */
+
+ data_reg_addr = ittrans_cookie->u2u_regs_base
+ + U2U_DATA_REGISTER_OFFSET
+ + (index * sizeof (uint64_t));
+
+ /*
+ * Set cpu_id into U2U Data register[index]
+ */
+ *(volatile uint32_t *) (data_reg_addr) = (uint32_t)cpu_id;
+
+ /*
+ * Setup for software, excepting at panicing.
+ * and rebooting, etc...?
+ */
+ if (!panicstr) {
+ ittrans_id_p = &(ittrans_cookie->u2u_ittrans_id[index]);
+ ittrans_id_p->u2u_tgt_cpu_id = cpu_id;
+ ittrans_id_p->u2u_ino_map_reg = pino_map_reg;
+ }
+
+ mutex_exit(&(ittrans_cookie->u2u_ittrans_lock));
+
+ return (index);
+}
+
+/*
+ * u2u_ittrans_cleanup() is called from common_pcmu_ib_intr_disable()
+ * after called intr_rem_cpu(mondo).
+ */
+void
+u2u_ittrans_cleanup(u2u_ittrans_data_t *ittrans_cookie,
+ volatile uint64_t *pino_map_reg)
+{
+
+ int ix;
+ u2u_ittrans_id_t *ittrans_id_p;
+
+ if (ittrans_cookie == NULL) {
+ return;
+ }
+
+ if (ittrans_cookie == (u2u_ittrans_data_t *)(-1)) {
+ return; /* illeagal case */
+ }
+
+ mutex_enter(&(ittrans_cookie->u2u_ittrans_lock));
+
+ for (ix = 0; ix < U2U_DATA_NUM; ix++) {
+ ittrans_id_p = &(ittrans_cookie->u2u_ittrans_id[ix]);
+ if (ittrans_id_p->u2u_ino_map_reg == pino_map_reg) {
+ ittrans_id_p->u2u_ino_map_reg = NULL;
+ break;
+ }
+ }
+
+ mutex_exit(&(ittrans_cookie->u2u_ittrans_lock));
+}
+
+/*
+ * pcmu_ecc_classify, called by ecc_handler to classify ecc errors
+ * and determine if we should panic or not.
+ */
+void
+pcmu_ecc_classify(uint64_t err, pcmu_ecc_errstate_t *ecc_err_p)
+{
+ struct async_flt *ecc = &ecc_err_p->ecc_aflt;
+ /* LINTED */
+ pcmu_t *pcmu_p = ecc_err_p->ecc_ii_p.pecc_p->pecc_pcmu_p;
+
+ ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex));
+
+ ecc_err_p->ecc_bridge_type = PCI_OPLCMU; /* RAGS */
+ /*
+ * Get the parent bus id that caused the error.
+ */
+ ecc_err_p->ecc_dev_id = (ecc_err_p->ecc_afsr & PCMU_ECC_UE_AFSR_ID)
+ >> PCMU_ECC_UE_AFSR_ID_SHIFT;
+ /*
+ * Determine the doubleword offset of the error.
+ */
+ ecc_err_p->ecc_dw_offset = (ecc_err_p->ecc_afsr &
+ PCMU_ECC_UE_AFSR_DW_OFFSET) >> PCMU_ECC_UE_AFSR_DW_OFFSET_SHIFT;
+ /*
+ * Determine the primary error type.
+ */
+ switch (err) {
+ case PCMU_ECC_UE_AFSR_E_PIO:
+ if (ecc_err_p->pecc_pri) {
+ ecc->flt_erpt_class = PCI_ECC_PIO_UE;
+ } else {
+ ecc->flt_erpt_class = PCI_ECC_SEC_PIO_UE;
+ }
+ /* For CMU-CH, a UE is always fatal. */
+ ecc->flt_panic = 1;
+ break;
+
+ default:
+ return;
+ }
+}
+
+/*
+ * pcmu_pbm_classify, called by pcmu_pbm_afsr_report to classify piow afsr.
+ */
+int
+pcmu_pbm_classify(pcmu_pbm_errstate_t *pbm_err_p)
+{
+ uint32_t e;
+ int nerr = 0;
+ char **tmp_class;
+
+ if (pbm_err_p->pcbm_pri) {
+ tmp_class = &pbm_err_p->pcbm_pci.pcmu_err_class;
+ e = PBM_AFSR_TO_PRIERR(pbm_err_p->pbm_afsr);
+ pbm_err_p->pbm_log = FM_LOG_PCI;
+ } else {
+ tmp_class = &pbm_err_p->pbm_err_class;
+ e = PBM_AFSR_TO_SECERR(pbm_err_p->pbm_afsr);
+ pbm_err_p->pbm_log = FM_LOG_PBM;
+ }
+
+ if (e & PCMU_PCI_AFSR_E_MA) {
+ *tmp_class = pbm_err_p->pcbm_pri ? PCI_MA : PCI_SEC_MA;
+ nerr++;
+ }
+ return (nerr);
+}
+
+/*
+ * Function used to clear PBM/PCI/IOMMU error state after error handling
+ * is complete. Only clearing error bits which have been logged. Called by
+ * pcmu_pbm_err_handler and pcmu_bus_exit.
+ */
+static void
+pcmu_clear_error(pcmu_t *pcmu_p, pcmu_pbm_errstate_t *pbm_err_p)
+{
+ pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
+
+ ASSERT(MUTEX_HELD(&pcbm_p->pcbm_pcmu_p->pcmu_err_mutex));
+
+ *pcbm_p->pcbm_ctrl_reg = pbm_err_p->pbm_ctl_stat;
+ *pcbm_p->pcbm_async_flt_status_reg = pbm_err_p->pbm_afsr;
+ pcbm_p->pcbm_config_header->ch_status_reg =
+ pbm_err_p->pcbm_pci.pcmu_cfg_stat;
+}
+
+/*ARGSUSED*/
+int
+pcmu_pbm_err_handler(dev_info_t *dip, ddi_fm_error_t *derr,
+ const void *impl_data, int caller)
+{
+ int fatal = 0;
+ int nonfatal = 0;
+ int unknown = 0;
+ uint32_t prierr, secerr;
+ pcmu_pbm_errstate_t pbm_err;
+ pcmu_t *pcmu_p = (pcmu_t *)impl_data;
+ int ret = 0;
+
+ ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex));
+ pcmu_pbm_errstate_get(pcmu_p, &pbm_err);
+
+ derr->fme_ena = derr->fme_ena ? derr->fme_ena :
+ fm_ena_generate(0, FM_ENA_FMT1);
+
+ prierr = PBM_AFSR_TO_PRIERR(pbm_err.pbm_afsr);
+ secerr = PBM_AFSR_TO_SECERR(pbm_err.pbm_afsr);
+
+ if (derr->fme_flag == DDI_FM_ERR_PEEK) {
+ /*
+ * For ddi_peek treat all events as nonfatal. We only
+ * really call this function so that pcmu_clear_error()
+ * and ndi_fm_handler_dispatch() will get called.
+ */
+ nonfatal++;
+ goto done;
+ } else if (derr->fme_flag == DDI_FM_ERR_POKE) {
+ /*
+ * For ddi_poke we can treat as nonfatal if the
+ * following conditions are met :
+ * 1. Make sure only primary error is MA/TA
+ * 2. Make sure no secondary error
+ * 3. check pci config header stat reg to see MA/TA is
+ * logged. We cannot verify only MA/TA is recorded
+ * since it gets much more complicated when a
+ * PCI-to-PCI bridge is present.
+ */
+ if ((prierr == PCMU_PCI_AFSR_E_MA) && !secerr &&
+ (pbm_err.pcbm_pci.pcmu_cfg_stat & PCI_STAT_R_MAST_AB)) {
+ nonfatal++;
+ goto done;
+ }
+ }
+
+ if (prierr || secerr) {
+ ret = pcmu_pbm_afsr_report(dip, derr->fme_ena, &pbm_err);
+ if (ret == DDI_FM_FATAL) {
+ fatal++;
+ } else {
+ nonfatal++;
+ }
+ }
+
+ ret = pcmu_cfg_report(dip, derr, &pbm_err.pcbm_pci, caller, prierr);
+ if (ret == DDI_FM_FATAL) {
+ fatal++;
+ } else if (ret == DDI_FM_NONFATAL) {
+ nonfatal++;
+ }
+
+done:
+ if (ret == DDI_FM_FATAL) {
+ fatal++;
+ } else if (ret == DDI_FM_NONFATAL) {
+ nonfatal++;
+ } else if (ret == DDI_FM_UNKNOWN) {
+ unknown++;
+ }
+
+ /* Cleanup and reset error bits */
+ pcmu_clear_error(pcmu_p, &pbm_err);
+
+ return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
+ (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
+}
+
+int
+pcmu_check_error(pcmu_t *pcmu_p)
+{
+ pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
+ uint16_t pcmu_cfg_stat;
+ uint64_t pbm_afsr;
+
+ ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex));
+
+ pcmu_cfg_stat = pcbm_p->pcbm_config_header->ch_status_reg;
+ pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg;
+
+ if ((pcmu_cfg_stat & (PCI_STAT_S_PERROR | PCI_STAT_S_TARG_AB |
+ PCI_STAT_R_TARG_AB | PCI_STAT_R_MAST_AB |
+ PCI_STAT_S_SYSERR | PCI_STAT_PERROR)) ||
+ (PBM_AFSR_TO_PRIERR(pbm_afsr))) {
+ return (1);
+ }
+ return (0);
+
+}
+
+/*
+ * Function used to gather PBM/PCI error state for the
+ * pcmu_pbm_err_handler. This function must be called while pcmu_err_mutex
+ * is held.
+ */
+static void
+pcmu_pbm_errstate_get(pcmu_t *pcmu_p, pcmu_pbm_errstate_t *pbm_err_p)
+{
+ pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
+
+ ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex));
+ bzero(pbm_err_p, sizeof (pcmu_pbm_errstate_t));
+
+ /*
+ * Capture all pbm error state for later logging
+ */
+ pbm_err_p->pbm_bridge_type = PCI_OPLCMU; /* RAGS */
+ pbm_err_p->pcbm_pci.pcmu_cfg_stat =
+ pcbm_p->pcbm_config_header->ch_status_reg;
+ pbm_err_p->pbm_ctl_stat = *pcbm_p->pcbm_ctrl_reg;
+ pbm_err_p->pcbm_pci.pcmu_cfg_comm =
+ pcbm_p->pcbm_config_header->ch_command_reg;
+ pbm_err_p->pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg;
+ pbm_err_p->pbm_afar = *pcbm_p->pcbm_async_flt_addr_reg;
+ pbm_err_p->pcbm_pci.pcmu_pa = *pcbm_p->pcbm_async_flt_addr_reg;
+}
+
+static void
+pcmu_pbm_clear_error(pcmu_pbm_t *pcbm_p)
+{
+ uint64_t pbm_afsr;
+
+ /*
+ * for poke() support - called from POKE_FLUSH. Spin waiting
+ * for MA, TA or SERR to be cleared by a pcmu_pbm_error_intr().
+ * We have to wait for SERR too in case the device is beyond
+ * a pci-pci bridge.
+ */
+ pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg;
+ while (((pbm_afsr >> PCMU_PCI_AFSR_PE_SHIFT) &
+ (PCMU_PCI_AFSR_E_MA | PCMU_PCI_AFSR_E_TA))) {
+ pbm_afsr = *pcbm_p->pcbm_async_flt_status_reg;
+ }
+}
+
+void
+pcmu_err_create(pcmu_t *pcmu_p)
+{
+ /*
+ * PCI detected ECC errorq, to schedule async handling
+ * of ECC errors and logging.
+ * The errorq is created here but destroyed when _fini is called
+ * for the pci module.
+ */
+ if (pcmu_ecc_queue == NULL) {
+ pcmu_ecc_queue = errorq_create("pcmu_ecc_queue",
+ (errorq_func_t)pcmu_ecc_err_drain,
+ (void *)NULL,
+ ECC_MAX_ERRS, sizeof (pcmu_ecc_errstate_t),
+ PIL_2, ERRORQ_VITAL);
+ if (pcmu_ecc_queue == NULL)
+ panic("failed to create required system error queue");
+ }
+
+ /*
+ * Initialize error handling mutex.
+ */
+ mutex_init(&pcmu_p->pcmu_err_mutex, NULL, MUTEX_DRIVER,
+ (void *)pcmu_p->pcmu_fm_ibc);
+}
+
+void
+pcmu_err_destroy(pcmu_t *pcmu_p)
+{
+ mutex_destroy(&pcmu_p->pcmu_err_mutex);
+}
+
+/*
+ * Function used to post PCI block module specific ereports.
+ */
+void
+pcmu_pbm_ereport_post(dev_info_t *dip, uint64_t ena,
+ pcmu_pbm_errstate_t *pbm_err)
+{
+ char *aux_msg;
+ uint32_t prierr, secerr;
+ pcmu_t *pcmu_p;
+ int instance = ddi_get_instance(dip);
+
+ ena = ena ? ena : fm_ena_generate(0, FM_ENA_FMT1);
+
+ pcmu_p = get_pcmu_soft_state(instance);
+ prierr = PBM_AFSR_TO_PRIERR(pbm_err->pbm_afsr);
+ secerr = PBM_AFSR_TO_SECERR(pbm_err->pbm_afsr);
+ if (prierr)
+ aux_msg = "PCI primary error: Master Abort";
+ else if (secerr)
+ aux_msg = "PCI secondary error: Master Abort";
+ else
+ aux_msg = "";
+ cmn_err(CE_WARN, "%s %s: %s %s=0x%lx, %s=0x%lx, %s=0x%lx %s=0x%x",
+ (pcmu_p->pcmu_pcbm_p)->pcbm_nameinst_str,
+ (pcmu_p->pcmu_pcbm_p)->pcbm_nameaddr_str,
+ aux_msg,
+ PCI_PBM_AFAR, pbm_err->pbm_afar,
+ PCI_PBM_AFSR, pbm_err->pbm_afsr,
+ PCI_PBM_CSR, pbm_err->pbm_ctl_stat,
+ "portid", pcmu_p->pcmu_id);
+}
diff --git a/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_cb.c b/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_cb.c
new file mode 100644
index 0000000000..7cb0950247
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_cb.c
@@ -0,0 +1,288 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * CMU-CH Control Block object
+ */
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/systm.h>
+#include <sys/async.h>
+#include <sys/sunddi.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/pcicmu/pcicmu.h>
+#include <sys/machsystm.h>
+
+/*LINTLIBRARY*/
+
+extern uint64_t xc_tick_jump_limit;
+
+void
+pcmu_cb_create(pcmu_t *pcmu_p)
+{
+ pcmu_cb_t *pcb_p = (pcmu_cb_t *)
+ kmem_zalloc(sizeof (pcmu_cb_t), KM_SLEEP);
+ mutex_init(&pcb_p->pcb_intr_lock, NULL, MUTEX_DRIVER, NULL);
+ pcmu_p->pcmu_cb_p = pcb_p;
+ pcb_p->pcb_pcmu_p = pcmu_p;
+ pcmu_cb_setup(pcmu_p);
+}
+
+void
+pcmu_cb_destroy(pcmu_t *pcmu_p)
+{
+ pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p;
+
+ intr_dist_rem(pcmu_cb_intr_dist, pcb_p);
+ pcmu_cb_teardown(pcmu_p);
+ pcmu_p->pcmu_cb_p = NULL;
+ mutex_destroy(&pcb_p->pcb_intr_lock);
+ kmem_free(pcb_p, sizeof (pcmu_cb_t));
+}
+
+uint64_t
+pcmu_cb_ino_to_map_pa(pcmu_cb_t *pcb_p, pcmu_ib_ino_t ino)
+{
+ return (pcb_p->pcb_map_pa + ((ino & 0x1f) << 3));
+}
+
+uint64_t
+pcmu_cb_ino_to_clr_pa(pcmu_cb_t *pcb_p, pcmu_ib_ino_t ino)
+{
+ return (pcb_p->pcb_clr_pa + ((ino & 0x1f) << 3));
+}
+
+static void
+pcmu_cb_set_nintr_reg(pcmu_cb_t *pcb_p, pcmu_ib_ino_t ino, uint64_t value)
+{
+ uint64_t pa = pcmu_cb_ino_to_clr_pa(pcb_p, ino);
+
+ PCMU_DBG3(PCMU_DBG_CB|PCMU_DBG_CONT, NULL,
+ "pci-%x pcmu_cb_set_nintr_reg: ino=%x PA=%016llx\n",
+ pcb_p->pcb_pcmu_p->pcmu_id, ino, pa);
+
+ stdphysio(pa, value);
+ (void) lddphysio(pa); /* flush the previous write */
+}
+
+/*
+ * enable an internal interrupt source:
+ * if an interrupt is shared by both sides, record it in pcb_inos[] and
+ * cb will own its distribution.
+ */
+void
+pcmu_cb_enable_nintr(pcmu_t *pcmu_p, pcmu_cb_nintr_index_t idx)
+{
+ pcmu_cb_t *pcb_p = pcmu_p->pcmu_cb_p;
+ pcmu_ib_ino_t ino = PCMU_IB_MONDO_TO_INO(pcmu_p->pcmu_inos[idx]);
+ pcmu_ib_mondo_t mondo = PCMU_CB_INO_TO_MONDO(pcb_p, ino);
+ uint32_t cpu_id;
+ uint64_t reg, pa;
+ pcmu_ib_t *pib_p = pcb_p->pcb_pcmu_p->pcmu_ib_p;
+ volatile uint64_t *imr_p = ib_intr_map_reg_addr(pib_p, ino);
+
+ ASSERT(idx < CBNINTR_MAX);
+ pa = pcmu_cb_ino_to_map_pa(pcb_p, ino);
+
+ mutex_enter(&pcb_p->pcb_intr_lock);
+ cpu_id = intr_dist_cpuid();
+
+ cpu_id = u2u_translate_tgtid(pib_p->pib_pcmu_p, cpu_id, imr_p);
+
+ reg = ib_get_map_reg(mondo, cpu_id);
+ stdphysio(pa, reg);
+
+ ASSERT(pcb_p->pcb_inos[idx] == 0);
+ pcb_p->pcb_inos[idx] = ino;
+
+ pcmu_cb_set_nintr_reg(pcb_p, ino, PCMU_CLEAR_INTR_REG_IDLE);
+ mutex_exit(&pcb_p->pcb_intr_lock);
+
+ PCMU_DBG3(PCMU_DBG_CB|PCMU_DBG_CONT, NULL,
+ "pci-%x pcmu_cb_enable_nintr: ino=%x cpu_id=%x\n",
+ pcmu_p->pcmu_id, ino, cpu_id);
+ PCMU_DBG2(PCMU_DBG_CB|PCMU_DBG_CONT, NULL,
+ "\tPA=%016llx data=%016llx\n", pa, reg);
+}
+
+static void
+pcmu_cb_disable_nintr_reg(pcmu_cb_t *pcb_p, pcmu_ib_ino_t ino, int wait)
+{
+ uint64_t tmp, map_reg_pa = pcmu_cb_ino_to_map_pa(pcb_p, ino);
+ ASSERT(MUTEX_HELD(&pcb_p->pcb_intr_lock));
+
+ /* mark interrupt invalid in mapping register */
+ tmp = lddphysio(map_reg_pa) & ~PCMU_INTR_MAP_REG_VALID;
+ stdphysio(map_reg_pa, tmp);
+ (void) lddphysio(map_reg_pa); /* flush previous write */
+
+ if (wait) {
+ hrtime_t start_time;
+ hrtime_t prev, curr, interval, jump;
+ hrtime_t intr_timeout;
+ uint64_t state_reg_pa = pcb_p->pcb_obsta_pa;
+ uint_t shift = (ino & 0x1f) << 1;
+
+ /* busy wait if there is interrupt being processed */
+ /* unless panic or timeout for interrupt pending is reached */
+
+ intr_timeout = pcmu_intrpend_timeout;
+ jump = TICK_TO_NSEC(xc_tick_jump_limit);
+ start_time = curr = gethrtime();
+ while ((((lddphysio(state_reg_pa) >> shift) &
+ PCMU_CLEAR_INTR_REG_MASK) ==
+ PCMU_CLEAR_INTR_REG_PENDING) && !panicstr) {
+ /*
+ * If we have a really large jump in hrtime, it is most
+ * probably because we entered the debugger (or OBP,
+ * in general). So, we adjust the timeout accordingly
+ * to prevent declaring an interrupt timeout. The
+ * master-interrupt mechanism in OBP should deliver
+ * the interrupts properly.
+ */
+ prev = curr;
+ curr = gethrtime();
+ interval = curr - prev;
+ if (interval > jump)
+ intr_timeout += interval;
+ if (curr - start_time > intr_timeout) {
+ cmn_err(CE_WARN, "pcmu@%x "
+ "pcmu_cb_disable_nintr_reg(%lx,%x) timeout",
+ pcb_p->pcb_pcmu_p->pcmu_id, map_reg_pa,
+ PCMU_CB_INO_TO_MONDO(pcb_p, ino));
+ break;
+ }
+ }
+ }
+}
+
+void
+pcmu_cb_disable_nintr(pcmu_cb_t *pcb_p, pcmu_cb_nintr_index_t idx, int wait)
+{
+ pcmu_ib_t *pib_p = pcb_p->pcb_pcmu_p->pcmu_ib_p;
+ volatile uint64_t *imr_p;
+ pcmu_ib_ino_t ino = pcb_p->pcb_inos[idx];
+ ASSERT(idx < CBNINTR_MAX);
+ ASSERT(ino);
+
+ imr_p = ib_intr_map_reg_addr(pib_p, ino);
+ mutex_enter(&pcb_p->pcb_intr_lock);
+ pcmu_cb_disable_nintr_reg(pcb_p, ino, wait);
+ pcmu_cb_set_nintr_reg(pcb_p, ino, PCMU_CLEAR_INTR_REG_PENDING);
+ pcb_p->pcb_inos[idx] = 0;
+ mutex_exit(&pcb_p->pcb_intr_lock);
+ u2u_ittrans_cleanup((u2u_ittrans_data_t *)(pcb_p->pcb_ittrans_cookie),
+ imr_p);
+}
+
+void
+pcmu_cb_clear_nintr(pcmu_cb_t *pcb_p, pcmu_cb_nintr_index_t idx)
+{
+ pcmu_ib_ino_t ino = pcb_p->pcb_inos[idx];
+ ASSERT(idx < CBNINTR_MAX);
+ ASSERT(ino);
+ pcmu_cb_set_nintr_reg(pcb_p, ino, PCMU_CLEAR_INTR_REG_IDLE);
+}
+
+void
+pcmu_cb_intr_dist(void *arg)
+{
+ int i;
+ pcmu_cb_t *pcb_p = (pcmu_cb_t *)arg;
+
+ mutex_enter(&pcb_p->pcb_intr_lock);
+ for (i = 0; i < pcb_p->pcb_no_of_inos; i++) {
+ uint64_t mr_pa;
+ volatile uint64_t imr;
+ pcmu_ib_mondo_t mondo;
+ uint32_t cpu_id;
+ pcmu_ib_t *pib_p = pcb_p->pcb_pcmu_p->pcmu_ib_p;
+ volatile uint64_t *imr_p;
+
+ pcmu_ib_ino_t ino = pcb_p->pcb_inos[i];
+ if (!ino) /* skip non-shared interrupts */
+ continue;
+
+ mr_pa = pcmu_cb_ino_to_map_pa(pcb_p, ino);
+ imr = lddphysio(mr_pa);
+ if (!PCMU_IB_INO_INTR_ISON(imr))
+ continue;
+
+ mondo = PCMU_CB_INO_TO_MONDO(pcb_p, ino);
+ cpu_id = intr_dist_cpuid();
+ imr_p = ib_intr_map_reg_addr(pib_p, ino);
+
+ cpu_id = u2u_translate_tgtid(pib_p->pib_pcmu_p, cpu_id, imr_p);
+
+ pcmu_cb_disable_nintr_reg(pcb_p, ino, PCMU_IB_INTR_WAIT);
+ stdphysio(mr_pa, ib_get_map_reg(mondo, cpu_id));
+ (void) lddphysio(mr_pa); /* flush previous write */
+ }
+ mutex_exit(&pcb_p->pcb_intr_lock);
+}
+
+void
+pcmu_cb_suspend(pcmu_cb_t *pcb_p)
+{
+ int i, inos = pcb_p->pcb_no_of_inos;
+ ASSERT(!pcb_p->pcb_imr_save);
+ pcb_p->pcb_imr_save = kmem_alloc(inos * sizeof (uint64_t), KM_SLEEP);
+
+ /*
+ * save the internal interrupts' mapping registers content
+ *
+ * The PBM IMR really doesn't need to be saved, as it is
+ * different per side and is handled by pcmu_pbm_suspend/resume.
+ * But it complicates the logic.
+ */
+ for (i = 0; i < inos; i++) {
+ uint64_t pa;
+ pcmu_ib_ino_t ino = pcb_p->pcb_inos[i];
+ if (!ino)
+ continue;
+ pa = pcmu_cb_ino_to_map_pa(pcb_p, ino);
+ pcb_p->pcb_imr_save[i] = lddphysio(pa);
+ }
+}
+
+void
+pcmu_cb_resume(pcmu_cb_t *pcb_p)
+{
+ int i;
+ for (i = 0; i < pcb_p->pcb_no_of_inos; i++) {
+ uint64_t pa;
+ pcmu_ib_ino_t ino = pcb_p->pcb_inos[i];
+ if (!ino)
+ continue;
+ pa = pcmu_cb_ino_to_map_pa(pcb_p, ino);
+ pcmu_cb_set_nintr_reg(pcb_p, ino, PCMU_CLEAR_INTR_REG_IDLE);
+ stdphysio(pa, pcb_p->pcb_imr_save[i]); /* restore IMR */
+ }
+ kmem_free(pcb_p->pcb_imr_save,
+ pcb_p->pcb_no_of_inos * sizeof (uint64_t));
+ pcb_p->pcb_imr_save = NULL;
+}
diff --git a/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_counters.c b/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_counters.c
new file mode 100644
index 0000000000..aee1a922f8
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_counters.c
@@ -0,0 +1,253 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/async.h>
+#include <sys/sunddi.h>
+#include <sys/sunndi.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/pcicmu/pcicmu.h>
+#include <sys/machsystm.h>
+#include <sys/kstat.h>
+
+/*LINTLIBRARY*/
+
+static kstat_t *pcmu_create_picN_kstat(char *, int, int, int,
+ pcmu_kev_mask_t *);
+
+void
+pcmu_kstat_create(pcmu_t *pcmu_p)
+{
+ pcmu_add_upstream_kstat(pcmu_p);
+}
+
+void
+pcmu_kstat_destroy(pcmu_t *pcmu_p)
+{
+ pcmu_rem_upstream_kstat(pcmu_p);
+}
+
+void
+pcmu_create_name_kstat(char *name, pcmu_ksinfo_t *pp, pcmu_kev_mask_t *ev)
+{
+ int i;
+
+ for (i = 0; i < NUM_OF_PICS; i++) {
+ pp->pic_name_ksp[i] = pcmu_create_picN_kstat(name,
+ i, pp->pic_shift[i], pp->pic_no_evs, ev);
+
+ if (pp->pic_name_ksp[i] == NULL) {
+ cmn_err(CE_WARN, "pci: unable to create name kstat");
+ }
+ }
+}
+
+void
+pcmu_delete_name_kstat(pcmu_ksinfo_t *pp)
+{
+ int i;
+
+ if (pp == NULL) {
+ return;
+ }
+ for (i = 0; i < NUM_OF_PICS; i++) {
+ if (pp->pic_name_ksp[i] != NULL)
+ kstat_delete(pp->pic_name_ksp[i]);
+ }
+}
+
+/*
+ * Create the picN kstat. Returns a pointer to the
+ * kstat which the driver must store to allow it
+ * to be deleted when necessary.
+ */
+static kstat_t *
+pcmu_create_picN_kstat(char *mod_name, int pic, int pic_shift,
+ int num_ev, pcmu_kev_mask_t *ev_array)
+{
+ struct kstat_named *pic_named_data;
+ int inst = 0;
+ int event;
+ char pic_name[30];
+ kstat_t *picN_ksp = NULL;
+
+ (void) sprintf(pic_name, "pic%d", pic);
+ if ((picN_ksp = kstat_create(mod_name, inst, pic_name,
+ "bus", KSTAT_TYPE_NAMED, num_ev, NULL)) == NULL) {
+ cmn_err(CE_WARN, "%s %s : kstat create failed",
+ mod_name, pic_name);
+
+ /*
+ * It is up to the calling function to delete any kstats
+ * that may have been created already. We just
+ * return NULL to indicate an error has occured.
+ */
+ return (NULL);
+ }
+
+ pic_named_data = (struct kstat_named *)picN_ksp->ks_data;
+
+ /*
+ * Write event names and their associated pcr masks. The
+ * last entry in the array (clear_pic) is added seperately
+ * below as the pic value must be inverted.
+ */
+ for (event = 0; event < num_ev - 1; event++) {
+ pic_named_data[event].value.ui64 =
+ (ev_array[event].pcr_mask << pic_shift);
+
+ kstat_named_init(&pic_named_data[event],
+ ev_array[event].event_name, KSTAT_DATA_UINT64);
+ }
+
+ /*
+ * add the clear_pic entry.
+ */
+ pic_named_data[event].value.ui64 =
+ (uint64_t)~(ev_array[event].pcr_mask << pic_shift);
+
+ kstat_named_init(&pic_named_data[event],
+ ev_array[event].event_name, KSTAT_DATA_UINT64);
+
+ kstat_install(picN_ksp);
+ return (picN_ksp);
+}
+
+/*
+ * Create the "counters" kstat.
+ */
+kstat_t *pcmu_create_cntr_kstat(pcmu_t *pcmu_p, char *name,
+ int num_pics, int (*update)(kstat_t *, int),
+ void *cntr_addr_p)
+{
+ struct kstat_named *counters_named_data;
+ struct kstat *counters_ksp;
+ dev_info_t *dip = pcmu_p->pcmu_dip;
+ char *drv_name = (char *)ddi_driver_name(dip);
+ int drv_instance = ddi_get_instance(dip);
+ char pic_str[10];
+ int i;
+
+ /*
+ * Size of kstat is num_pics + 1 as it
+ * also contains the %pcr
+ */
+ if ((counters_ksp = kstat_create(name, drv_instance,
+ "counters", "bus", KSTAT_TYPE_NAMED, num_pics + 1,
+ KSTAT_FLAG_WRITABLE)) == NULL) {
+ cmn_err(CE_WARN, "%s%d counters kstat_create failed",
+ drv_name, drv_instance);
+ return (NULL);
+ }
+
+ counters_named_data = (struct kstat_named *)(counters_ksp->ks_data);
+
+ /* initialize the named kstats */
+ kstat_named_init(&counters_named_data[0], "pcr", KSTAT_DATA_UINT64);
+
+ for (i = 0; i < num_pics; i++) {
+ (void) sprintf(pic_str, "pic%d", i);
+ kstat_named_init(&counters_named_data[i+1],
+ pic_str, KSTAT_DATA_UINT64);
+ }
+
+ /*
+ * Store the register offset's in the kstat's
+ * private field so that they are available
+ * to the update function.
+ */
+ counters_ksp->ks_private = (void *)cntr_addr_p;
+ counters_ksp->ks_update = update;
+ kstat_install(counters_ksp);
+ return (counters_ksp);
+}
+
+/*
+ * kstat update function. Handles reads/writes
+ * from/to kstat.
+ */
+int
+pcmu_cntr_kstat_update(kstat_t *ksp, int rw)
+{
+ struct kstat_named *data_p;
+ pcmu_cntr_addr_t *cntr_addr_p = ksp->ks_private;
+ uint64_t pic;
+
+ data_p = (struct kstat_named *)ksp->ks_data;
+ if (rw == KSTAT_WRITE) {
+ *cntr_addr_p->pcr_addr = data_p[0].value.ui64;
+ return (0);
+ } else {
+ pic = *cntr_addr_p->pic_addr;
+ data_p[0].value.ui64 = *cntr_addr_p->pcr_addr;
+
+ /* pic0 : lo 32 bits */
+ data_p[1].value.ui64 = (pic <<32) >> 32;
+ /* pic1 : hi 32 bits */
+ data_p[2].value.ui64 = pic >> 32;
+ }
+ return (0);
+}
+
+/*
+ * kstat update function using physical addresses.
+ */
+int
+pcmu_cntr_kstat_pa_update(kstat_t *ksp, int rw)
+{
+ struct kstat_named *data_p;
+ pcmu_cntr_pa_t *cntr_pa_p = (pcmu_cntr_pa_t *)ksp->ks_private;
+ uint64_t pic;
+
+ data_p = (struct kstat_named *)ksp->ks_data;
+
+ if (rw == KSTAT_WRITE) {
+ stdphysio(cntr_pa_p->pcr_pa, data_p[0].value.ui64);
+ return (0);
+ } else {
+ pic = lddphysio(cntr_pa_p->pic_pa);
+ data_p[0].value.ui64 = lddphysio(cntr_pa_p->pcr_pa);
+
+ /* pic0 : lo 32 bits */
+ data_p[1].value.ui64 = (pic << 32) >> 32;
+ /* pic1 : hi 32 bits */
+ data_p[2].value.ui64 = pic >> 32;
+ }
+ return (0);
+}
+
+
+/*
+ * Matched with pcmu_add_upstream_kstat()
+ */
+void
+pcmu_rem_upstream_kstat(pcmu_t *pcmu_p)
+{
+ if (pcmu_p->pcmu_uksp != NULL)
+ kstat_delete(pcmu_p->pcmu_uksp);
+ pcmu_p->pcmu_uksp = NULL;
+}
diff --git a/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_ecc.c b/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_ecc.c
new file mode 100644
index 0000000000..b54cf7bd78
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_ecc.c
@@ -0,0 +1,469 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * CMU-CH ECC support
+ */
+
+#include <sys/types.h>
+#include <sys/systm.h>
+#include <sys/kmem.h>
+#include <sys/sunddi.h>
+#include <sys/intr.h>
+#include <sys/async.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/machsystm.h>
+#include <sys/sysmacros.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/util.h>
+#include <sys/fm/io/pci.h>
+#include <sys/fm/io/sun4upci.h>
+#include <sys/fm/io/ddi.h>
+#include <sys/pcicmu/pcicmu.h>
+
+/*LINTLIBRARY*/
+
+static void pcmu_ecc_disable(pcmu_ecc_t *, int);
+static uint64_t pcmu_ecc_read_afsr(pcmu_ecc_intr_info_t *);
+static void pcmu_ecc_ereport_post(dev_info_t *dip,
+ pcmu_ecc_errstate_t *ecc_err);
+
+clock_t pcmu_pecc_panic_delay = 200;
+
+void
+pcmu_ecc_create(pcmu_t *pcmu_p)
+{
+ uint64_t pcb_base_pa = pcmu_p->pcmu_cb_p->pcb_base_pa;
+ pcmu_ecc_t *pecc_p;
+ /* LINTED variable */
+ dev_info_t *dip = pcmu_p->pcmu_dip;
+
+ pecc_p = (pcmu_ecc_t *)kmem_zalloc(sizeof (pcmu_ecc_t), KM_SLEEP);
+ pecc_p->pecc_pcmu_p = pcmu_p;
+ pcmu_p->pcmu_pecc_p = pecc_p;
+
+ pecc_p->pecc_ue.pecc_p = pecc_p;
+ pecc_p->pecc_ue.pecc_type = CBNINTR_UE;
+
+ pcmu_ecc_setup(pecc_p);
+
+ /*
+ * Determine the virtual addresses of the streaming cache
+ * control/status and flush registers.
+ */
+ pecc_p->pecc_csr_pa = pcb_base_pa + PCMU_ECC_CSR_OFFSET;
+ pecc_p->pecc_ue.pecc_afsr_pa = pcb_base_pa + PCMU_UE_AFSR_OFFSET;
+ pecc_p->pecc_ue.pecc_afar_pa = pcb_base_pa + PCMU_UE_AFAR_OFFSET;
+
+ PCMU_DBG1(PCMU_DBG_ATTACH, dip, "pcmu_ecc_create: csr=%x\n",
+ pecc_p->pecc_csr_pa);
+ PCMU_DBG2(PCMU_DBG_ATTACH, dip,
+ "pcmu_ecc_create: ue_afsr=%x, ue_afar=%x\n",
+ pecc_p->pecc_ue.pecc_afsr_pa, pecc_p->pecc_ue.pecc_afar_pa);
+
+ pcmu_ecc_configure(pcmu_p);
+
+ /*
+ * Register routines to be called from system error handling code.
+ */
+ bus_func_register(BF_TYPE_ERRDIS,
+ (busfunc_t)pcmu_ecc_disable_nowait, pecc_p);
+}
+
+int
+pcmu_ecc_register_intr(pcmu_t *pcmu_p)
+{
+ pcmu_ecc_t *pecc_p = pcmu_p->pcmu_pecc_p;
+ int ret;
+
+ /*
+ * Install the UE error interrupt handlers.
+ */
+ ret = pcmu_ecc_add_intr(pcmu_p, CBNINTR_UE, &pecc_p->pecc_ue);
+ return (ret);
+}
+
+void
+pcmu_ecc_destroy(pcmu_t *pcmu_p)
+{
+ pcmu_ecc_t *pecc_p = pcmu_p->pcmu_pecc_p;
+
+ PCMU_DBG0(PCMU_DBG_DETACH, pcmu_p->pcmu_dip, "pcmu_ecc_destroy:\n");
+
+ /*
+ * Disable UE ECC error interrupts.
+ */
+ pcmu_ecc_disable_wait(pecc_p);
+
+ /*
+ * Remove the ECC interrupt handlers.
+ */
+ pcmu_ecc_rem_intr(pcmu_p, CBNINTR_UE, &pecc_p->pecc_ue);
+
+ /*
+ * Unregister our error handling functions.
+ */
+ bus_func_unregister(BF_TYPE_ERRDIS,
+ (busfunc_t)pcmu_ecc_disable_nowait, pecc_p);
+ /*
+ * If a timer has been set, unset it.
+ */
+ (void) untimeout(pecc_p->pecc_tout_id);
+ kmem_free(pecc_p, sizeof (pcmu_ecc_t));
+ pcmu_p->pcmu_pecc_p = NULL;
+}
+
+void
+pcmu_ecc_configure(pcmu_t *pcmu_p)
+{
+ pcmu_ecc_t *pecc_p = pcmu_p->pcmu_pecc_p;
+ uint64_t l;
+ /* LINTED variable */
+ dev_info_t *dip = pcmu_p->pcmu_dip;
+
+ /*
+ * Clear any pending ECC errors.
+ */
+ PCMU_DBG0(PCMU_DBG_ATTACH, dip,
+ "pcmu_ecc_configure: clearing UE errors\n");
+ l = (PCMU_ECC_UE_AFSR_E_MASK << PCMU_ECC_UE_AFSR_PE_SHIFT) |
+ (PCMU_ECC_UE_AFSR_E_MASK << PCMU_ECC_UE_AFSR_SE_SHIFT);
+ stdphysio(pecc_p->pecc_ue.pecc_afsr_pa, l);
+
+ /*
+ * Enable ECC error detections via the control register.
+ */
+ PCMU_DBG0(PCMU_DBG_ATTACH, dip,
+ "pcmu_ecc_configure: enabling UE detection\n");
+ l = PCMU_ECC_CTRL_ECC_EN;
+ if (ecc_error_intr_enable)
+ l |= PCMU_ECC_CTRL_UE_INTEN;
+ stdphysio(pecc_p->pecc_csr_pa, l);
+}
+
+void
+pcmu_ecc_enable_intr(pcmu_t *pcmu_p)
+{
+ pcmu_cb_enable_nintr(pcmu_p, CBNINTR_UE);
+}
+
+void
+pcmu_ecc_disable_wait(pcmu_ecc_t *pecc_p)
+{
+ pcmu_ecc_disable(pecc_p, PCMU_IB_INTR_WAIT);
+}
+
+uint_t
+pcmu_ecc_disable_nowait(pcmu_ecc_t *pecc_p)
+{
+ pcmu_ecc_disable(pecc_p, PCMU_IB_INTR_NOWAIT);
+ return (BF_NONE);
+}
+
+static void
+pcmu_ecc_disable(pcmu_ecc_t *pecc_p, int wait)
+{
+ pcmu_cb_t *pcb_p = pecc_p->pecc_pcmu_p->pcmu_cb_p;
+ uint64_t csr_pa = pecc_p->pecc_csr_pa;
+ uint64_t csr = lddphysio(csr_pa);
+
+ csr &= ~(PCMU_ECC_CTRL_UE_INTEN);
+ stdphysio(csr_pa, csr);
+ pcmu_cb_disable_nintr(pcb_p, CBNINTR_UE, wait);
+}
+
+/*
+ * I/O ECC error handling:
+ *
+ * Below are the generic functions that handle detected ECC errors.
+ *
+ * The registered interrupt handler is pcmu_ecc_intr(), it's function
+ * is to receive the error, capture some state, and pass that on to
+ * the pcmu_ecc_err_handler() for reporting purposes.
+ *
+ * pcmu_ecc_err_handler() gathers more state(via pcmu_ecc_errstate_get)
+ * and attempts to handle and report the error. pcmu_ecc_err_handler()
+ * must determine if we need to panic due to this error (via
+ * pcmu_ecc_classify, which also decodes the * ECC afsr), and if any
+ * side effects exist that may have caused or are due * to this error.
+ * PBM errors related to the ECC error may exist, to report
+ * them we call pcmu_pbm_err_handler().
+ *
+ * To report the error we must also get the syndrome and unum, which can not
+ * be done in high level interrupted context. Therefore we have an error
+ * queue(pcmu_ecc_queue) which we dispatch errors to, to report the errors
+ * (pcmu_ecc_err_drain()).
+ *
+ * pcmu_ecc_err_drain() will be called when either the softint is triggered
+ * or the system is panicing. Either way it will gather more information
+ * about the error from the CPU(via ecc_cpu_call(), ecc.c), attempt to
+ * retire the faulty page(if error is a UE), and report the detected error.
+ *
+ */
+
+/*
+ * Function used to get ECC AFSR register
+ */
+static uint64_t
+pcmu_ecc_read_afsr(pcmu_ecc_intr_info_t *ecc_ii_p)
+{
+ ASSERT(ecc_ii_p->pecc_type == CBNINTR_UE);
+ return (lddphysio(ecc_ii_p->pecc_afsr_pa));
+}
+
+/*
+ * IO detected ECC error interrupt handler, calls pcmu_ecc_err_handler to post
+ * error reports and handle the interrupt. Re-entry into pcmu_ecc_err_handler
+ * is protected by the per-chip mutex pcmu_err_mutex.
+ */
+uint_t
+pcmu_ecc_intr(caddr_t a)
+{
+ pcmu_ecc_intr_info_t *ecc_ii_p = (pcmu_ecc_intr_info_t *)a;
+ pcmu_ecc_t *pecc_p = ecc_ii_p->pecc_p;
+ pcmu_t *pcmu_p = pecc_p->pecc_pcmu_p;
+ pcmu_ecc_errstate_t ecc_err;
+ int ret = DDI_FM_OK;
+
+ bzero(&ecc_err, sizeof (pcmu_ecc_errstate_t));
+ ecc_err.ecc_ena = fm_ena_generate(0, FM_ENA_FMT1); /* RAGS */
+ ecc_err.ecc_ii_p = *ecc_ii_p;
+ ecc_err.pecc_p = pecc_p;
+ ecc_err.ecc_caller = PCI_ECC_CALL;
+
+ mutex_enter(&pcmu_p->pcmu_err_mutex);
+ ret = pcmu_ecc_err_handler(&ecc_err);
+ mutex_exit(&pcmu_p->pcmu_err_mutex);
+ if (ret == DDI_FM_FATAL) {
+ /*
+ * Need delay here to allow CPUs to handle related traps,
+ * such as FRUs for USIIIi systems.
+ */
+ DELAY(pcmu_pecc_panic_delay);
+ cmn_err(CE_PANIC, "Fatal PCI UE Error");
+ }
+
+ return (DDI_INTR_CLAIMED);
+}
+
+/*
+ * Function used to gather IO ECC error state.
+ */
+static void
+pcmu_ecc_errstate_get(pcmu_ecc_errstate_t *ecc_err_p)
+{
+ pcmu_ecc_t *pecc_p;
+ uint_t bus_id;
+
+ ASSERT(ecc_err_p);
+
+ pecc_p = ecc_err_p->ecc_ii_p.pecc_p;
+ bus_id = pecc_p->pecc_pcmu_p->pcmu_id;
+
+ ASSERT(MUTEX_HELD(&pecc_p->pecc_pcmu_p->pcmu_err_mutex));
+ /*
+ * Read the fault registers.
+ */
+ ecc_err_p->ecc_afsr = pcmu_ecc_read_afsr(&ecc_err_p->ecc_ii_p);
+ ecc_err_p->ecc_afar = lddphysio(ecc_err_p->ecc_ii_p.pecc_afar_pa);
+
+ ecc_err_p->ecc_offset = ((ecc_err_p->ecc_afsr &
+ ecc_err_p->ecc_ii_p.pecc_offset_mask) >>
+ ecc_err_p->ecc_ii_p.pecc_offset_shift) <<
+ ecc_err_p->ecc_ii_p.pecc_size_log2;
+
+ ecc_err_p->ecc_aflt.flt_id = gethrtime();
+ ecc_err_p->ecc_aflt.flt_stat = ecc_err_p->ecc_afsr;
+ ecc_err_p->ecc_aflt.flt_addr = P2ALIGN(ecc_err_p->ecc_afar, 64) +
+ ecc_err_p->ecc_offset;
+ ecc_err_p->ecc_aflt.flt_bus_id = bus_id;
+ ecc_err_p->ecc_aflt.flt_inst = 0;
+ ecc_err_p->ecc_aflt.flt_status = ECC_IOBUS;
+ ecc_err_p->ecc_aflt.flt_in_memory = 0;
+ ecc_err_p->ecc_aflt.flt_class = BUS_FAULT;
+}
+
+/*
+ * pcmu_ecc_check: Called by pcmu_ecc_err_handler() this function is responsible
+ * for calling pcmu_pbm_err_handler() and calling their children error
+ * handlers(via ndi_fm_handler_dispatch()).
+ */
+static int
+pcmu_ecc_check(pcmu_ecc_t *pecc_p, uint64_t fme_ena)
+{
+ ddi_fm_error_t derr;
+ int ret;
+ pcmu_t *pcmu_p;
+
+
+ ASSERT(MUTEX_HELD(&pecc_p->pecc_pcmu_p->pcmu_err_mutex));
+
+ bzero(&derr, sizeof (ddi_fm_error_t));
+ derr.fme_version = DDI_FME_VERSION;
+ derr.fme_ena = fme_ena;
+ ret = DDI_FM_NONFATAL;
+
+ /*
+ * Need to report any PBM errors which may have caused or
+ * resulted from this error.
+ */
+ pcmu_p = pecc_p->pecc_pcmu_p;
+ if (pcmu_pbm_err_handler(pcmu_p->pcmu_dip, &derr, (void *)pcmu_p,
+ PCI_ECC_CALL) == DDI_FM_FATAL)
+ ret = DDI_FM_FATAL;
+
+ if (ret == DDI_FM_FATAL)
+ return (DDI_FM_FATAL);
+ else
+ return (DDI_FM_NONFATAL);
+}
+
+/*
+ * Function used to handle and log IO detected ECC errors, can be called by
+ * pcmu_ecc_intr and pcmu_err_callback(trap callback). Protected by
+ * pcmu_err_mutex.
+ */
+int
+pcmu_ecc_err_handler(pcmu_ecc_errstate_t *ecc_err_p)
+{
+ /* LINTED variable */
+ uint64_t pri_err, sec_err;
+ pcmu_ecc_intr_info_t *ecc_ii_p = &ecc_err_p->ecc_ii_p;
+ pcmu_ecc_t *pecc_p = ecc_ii_p->pecc_p;
+ /* LINTED variable */
+ pcmu_t *pcmu_p;
+ pcmu_cb_t *pcb_p;
+ int fatal = 0;
+ int nonfatal = 0;
+
+ ASSERT(MUTEX_HELD(&pecc_p->pecc_pcmu_p->pcmu_err_mutex));
+
+ pcmu_p = pecc_p->pecc_pcmu_p;
+ pcb_p = pecc_p->pecc_pcmu_p->pcmu_cb_p;
+
+ pcmu_ecc_errstate_get(ecc_err_p);
+ pri_err = (ecc_err_p->ecc_afsr >> PCMU_ECC_UE_AFSR_PE_SHIFT) &
+ PCMU_ECC_UE_AFSR_E_MASK;
+
+ sec_err = (ecc_err_p->ecc_afsr >> PCMU_ECC_UE_AFSR_SE_SHIFT) &
+ PCMU_ECC_UE_AFSR_E_MASK;
+
+ switch (ecc_ii_p->pecc_type) {
+ case CBNINTR_UE:
+ if (pri_err) {
+ ecc_err_p->ecc_aflt.flt_synd = 0;
+ ecc_err_p->pecc_pri = 1;
+ pcmu_ecc_classify(pri_err, ecc_err_p);
+ errorq_dispatch(pcmu_ecc_queue, (void *)ecc_err_p,
+ sizeof (pcmu_ecc_errstate_t),
+ ecc_err_p->ecc_aflt.flt_panic);
+ }
+ if (sec_err) {
+ pcmu_ecc_errstate_t ecc_sec_err;
+
+ ecc_sec_err = *ecc_err_p;
+ ecc_sec_err.pecc_pri = 0;
+ pcmu_ecc_classify(sec_err, &ecc_sec_err);
+ pcmu_ecc_ereport_post(pcmu_p->pcmu_dip,
+ &ecc_sec_err);
+ }
+ /*
+ * Check for PCI bus errors that may have resulted from or
+ * caused this UE.
+ */
+ if (ecc_err_p->ecc_caller == PCI_ECC_CALL &&
+ pcmu_ecc_check(pecc_p, ecc_err_p->ecc_ena) == DDI_FM_FATAL)
+ ecc_err_p->ecc_aflt.flt_panic = 1;
+
+ if (ecc_err_p->ecc_aflt.flt_panic) {
+ /*
+ * Disable all further errors since this will be
+ * treated as a fatal error.
+ */
+ (void) pcmu_ecc_disable_nowait(pecc_p);
+ fatal++;
+ }
+ break;
+
+ default:
+ return (DDI_FM_OK);
+ }
+ /* Clear the errors */
+ stdphysio(ecc_ii_p->pecc_afsr_pa, ecc_err_p->ecc_afsr);
+ /*
+ * Clear the interrupt if called by pcmu_ecc_intr and UE error
+ * or if called by pcmu_ecc_intr and CE error and delayed CE
+ * interrupt handling is turned off.
+ */
+ if (ecc_err_p->ecc_caller == PCI_ECC_CALL &&
+ ecc_ii_p->pecc_type == CBNINTR_UE && !fatal)
+ pcmu_cb_clear_nintr(pcb_p, ecc_ii_p->pecc_type);
+ if (!fatal && !nonfatal)
+ return (DDI_FM_OK);
+ else if (fatal)
+ return (DDI_FM_FATAL);
+ return (DDI_FM_NONFATAL);
+}
+
+/*
+ * Function used to drain pcmu_ecc_queue, either during panic or after softint
+ * is generated, to log IO detected ECC errors.
+ */
+void
+pcmu_ecc_err_drain(void *not_used, pcmu_ecc_errstate_t *ecc_err)
+{
+ struct async_flt *ecc = &ecc_err->ecc_aflt;
+ pcmu_t *pcmu_p = ecc_err->pecc_p->pecc_pcmu_p;
+
+ ecc_cpu_call(ecc, ecc_err->ecc_unum, ECC_IO_UE);
+ ecc_err->ecc_err_type = "U";
+ pcmu_ecc_ereport_post(pcmu_p->pcmu_dip, ecc_err);
+}
+
+/*
+ * Function used to post IO detected ECC ereports.
+ */
+static void
+pcmu_ecc_ereport_post(dev_info_t *dip, pcmu_ecc_errstate_t *ecc_err)
+{
+ char *aux_msg;
+ pcmu_t *pcmu_p;
+ int instance = ddi_get_instance(dip);
+
+ pcmu_p = get_pcmu_soft_state(instance);
+ if (ecc_err->pecc_pri) {
+ aux_msg = "PIO primary uncorrectable error";
+ } else {
+ aux_msg = "PIO secondary uncorrectable error";
+ }
+ cmn_err(CE_WARN, "%s %s: %s %s=0x%lx, %s=0x%lx, %s=0x%x",
+ (pcmu_p->pcmu_pcbm_p)->pcbm_nameinst_str,
+ (pcmu_p->pcmu_pcbm_p)->pcbm_nameaddr_str,
+ aux_msg, PCI_ECC_AFSR, ecc_err->ecc_afsr,
+ PCI_ECC_AFAR, ecc_err->ecc_aflt.flt_addr,
+ "portid", ecc_err->ecc_aflt.flt_bus_id);
+}
diff --git a/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_ib.c b/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_ib.c
new file mode 100644
index 0000000000..1befbb3ade
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_ib.c
@@ -0,0 +1,747 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * CMU-CH Interrupt Block
+ */
+
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/async.h>
+#include <sys/systm.h>
+#include <sys/spl.h>
+#include <sys/sunddi.h>
+#include <sys/machsystm.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/pcicmu/pcicmu.h>
+
+/*LINTLIBRARY*/
+static uint_t pcmu_ib_intr_reset(void *arg);
+
+extern uint64_t xc_tick_jump_limit;
+
+void
+pcmu_ib_create(pcmu_t *pcmu_p)
+{
+ pcmu_ib_t *pib_p;
+ uintptr_t a;
+ int i;
+
+ /*
+ * Allocate interrupt block state structure and link it to
+ * the pci state structure.
+ */
+ pib_p = kmem_zalloc(sizeof (pcmu_ib_t), KM_SLEEP);
+ pcmu_p->pcmu_ib_p = pib_p;
+ pib_p->pib_pcmu_p = pcmu_p;
+
+ a = pcmu_ib_setup(pib_p);
+
+ /*
+ * Determine virtual addresses of interrupt mapping, clear and diag
+ * registers that have common offsets.
+ */
+ pib_p->pib_intr_retry_timer_reg =
+ (uint64_t *)(a + PCMU_IB_INTR_RETRY_TIMER_OFFSET);
+ pib_p->pib_obio_intr_state_diag_reg =
+ (uint64_t *)(a + PCMU_IB_OBIO_INTR_STATE_DIAG_REG);
+
+ PCMU_DBG2(PCMU_DBG_ATTACH, pcmu_p->pcmu_dip,
+ "pcmu_ib_create: obio_imr=%x, obio_cir=%x\n",
+ pib_p->pib_obio_intr_map_regs, pib_p->pib_obio_clear_intr_regs);
+ PCMU_DBG2(PCMU_DBG_ATTACH, pcmu_p->pcmu_dip,
+ "pcmu_ib_create: retry_timer=%x, obio_diag=%x\n",
+ pib_p->pib_intr_retry_timer_reg,
+ pib_p->pib_obio_intr_state_diag_reg);
+
+ pib_p->pib_ino_lst = (pcmu_ib_ino_info_t *)NULL;
+ mutex_init(&pib_p->pib_intr_lock, NULL, MUTEX_DRIVER, NULL);
+ mutex_init(&pib_p->pib_ino_lst_mutex, NULL, MUTEX_DRIVER, NULL);
+
+ PCMU_DBG1(PCMU_DBG_ATTACH, pcmu_p->pcmu_dip,
+ "pcmu_ib_create: numproxy=%x\n", pcmu_p->pcmu_numproxy);
+ for (i = 1; i <= pcmu_p->pcmu_numproxy; i++) {
+ set_intr_mapping_reg(pcmu_p->pcmu_id,
+ (uint64_t *)pib_p->pib_upa_imr[i - 1], i);
+ }
+
+ pcmu_ib_configure(pib_p);
+ bus_func_register(BF_TYPE_RESINTR, pcmu_ib_intr_reset, pib_p);
+}
+
+void
+pcmu_ib_destroy(pcmu_t *pcmu_p)
+{
+ pcmu_ib_t *pib_p = pcmu_p->pcmu_ib_p;
+
+ PCMU_DBG0(PCMU_DBG_IB, pcmu_p->pcmu_dip, "pcmu_ib_destroy\n");
+ bus_func_unregister(BF_TYPE_RESINTR, pcmu_ib_intr_reset, pib_p);
+
+ intr_dist_rem_weighted(pcmu_ib_intr_dist_all, pib_p);
+ mutex_destroy(&pib_p->pib_ino_lst_mutex);
+ mutex_destroy(&pib_p->pib_intr_lock);
+
+ pcmu_ib_free_ino_all(pib_p);
+
+ kmem_free(pib_p, sizeof (pcmu_ib_t));
+ pcmu_p->pcmu_ib_p = NULL;
+}
+
+void
+pcmu_ib_configure(pcmu_ib_t *pib_p)
+{
+ *pib_p->pib_intr_retry_timer_reg = pcmu_intr_retry_intv;
+}
+
+/*
+ * can only used for CMU-CH internal interrupts ue, pbm
+ */
+void
+pcmu_ib_intr_enable(pcmu_t *pcmu_p, pcmu_ib_ino_t ino)
+{
+ pcmu_ib_t *pib_p = pcmu_p->pcmu_ib_p;
+ pcmu_ib_mondo_t mondo = PCMU_IB_INO_TO_MONDO(pib_p, ino);
+ volatile uint64_t *imr_p = ib_intr_map_reg_addr(pib_p, ino);
+ uint_t cpu_id;
+
+ /*
+ * Determine the cpu for the interrupt.
+ */
+ mutex_enter(&pib_p->pib_intr_lock);
+ cpu_id = intr_dist_cpuid();
+ cpu_id = u2u_translate_tgtid(pcmu_p, cpu_id, imr_p);
+ PCMU_DBG2(PCMU_DBG_IB, pcmu_p->pcmu_dip,
+ "pcmu_ib_intr_enable: ino=%x cpu_id=%x\n", ino, cpu_id);
+
+ *imr_p = ib_get_map_reg(mondo, cpu_id);
+ PCMU_IB_INO_INTR_CLEAR(ib_clear_intr_reg_addr(pib_p, ino));
+ mutex_exit(&pib_p->pib_intr_lock);
+}
+
+/*
+ * Disable the interrupt via its interrupt mapping register.
+ * Can only be used for internal interrupts: ue, pbm.
+ * If called under interrupt context, wait should be set to 0
+ */
+void
+pcmu_ib_intr_disable(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino, int wait)
+{
+ volatile uint64_t *imr_p = ib_intr_map_reg_addr(pib_p, ino);
+ volatile uint64_t *state_reg_p = PCMU_IB_INO_INTR_STATE_REG(pib_p, ino);
+ hrtime_t start_time;
+ hrtime_t prev, curr, interval, jump;
+ hrtime_t intr_timeout;
+
+ /* disable the interrupt */
+ mutex_enter(&pib_p->pib_intr_lock);
+ PCMU_IB_INO_INTR_OFF(imr_p);
+ *imr_p; /* flush previous write */
+ mutex_exit(&pib_p->pib_intr_lock);
+
+ if (!wait)
+ goto wait_done;
+
+ intr_timeout = pcmu_intrpend_timeout;
+ jump = TICK_TO_NSEC(xc_tick_jump_limit);
+ start_time = curr = gethrtime();
+ /* busy wait if there is interrupt being processed */
+ while (PCMU_IB_INO_INTR_PENDING(state_reg_p, ino) && !panicstr) {
+ /*
+ * If we have a really large jump in hrtime, it is most
+ * probably because we entered the debugger (or OBP,
+ * in general). So, we adjust the timeout accordingly
+ * to prevent declaring an interrupt timeout. The
+ * master-interrupt mechanism in OBP should deliver
+ * the interrupts properly.
+ */
+ prev = curr;
+ curr = gethrtime();
+ interval = curr - prev;
+ if (interval > jump)
+ intr_timeout += interval;
+ if (curr - start_time > intr_timeout) {
+ pcmu_pbm_t *pcbm_p = pib_p->pib_pcmu_p->pcmu_pcbm_p;
+ cmn_err(CE_WARN,
+ "%s:%s: pcmu_ib_intr_disable timeout %x",
+ pcbm_p->pcbm_nameinst_str,
+ pcbm_p->pcbm_nameaddr_str, ino);
+ break;
+ }
+ }
+wait_done:
+ PCMU_IB_INO_INTR_PEND(ib_clear_intr_reg_addr(pib_p, ino));
+ u2u_ittrans_cleanup((u2u_ittrans_data_t *)
+ (PCMU_IB2CB(pib_p)->pcb_ittrans_cookie), imr_p);
+}
+
+/* can only used for CMU-CH internal interrupts ue, pbm */
+void
+pcmu_ib_nintr_clear(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino)
+{
+ uint64_t *clr_reg = ib_clear_intr_reg_addr(pib_p, ino);
+ PCMU_IB_INO_INTR_CLEAR(clr_reg);
+}
+
+/*
+ * distribute PBM and UPA interrupts. ino is set to 0 by caller if we
+ * are dealing with UPA interrupts (without inos).
+ */
+void
+pcmu_ib_intr_dist_nintr(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino,
+ volatile uint64_t *imr_p)
+{
+ volatile uint64_t imr = *imr_p;
+ uint32_t cpu_id;
+
+ if (!PCMU_IB_INO_INTR_ISON(imr))
+ return;
+
+ cpu_id = intr_dist_cpuid();
+
+ if (ino) {
+ cpu_id = u2u_translate_tgtid(pib_p->pib_pcmu_p, cpu_id, imr_p);
+ }
+
+ if (ib_map_reg_get_cpu(*imr_p) == cpu_id) {
+ return;
+ }
+ *imr_p = ib_get_map_reg(PCMU_IB_IMR2MONDO(imr), cpu_id);
+ imr = *imr_p; /* flush previous write */
+}
+
+static void
+pcmu_ib_intr_dist(pcmu_ib_t *pib_p, pcmu_ib_ino_info_t *ino_p)
+{
+ uint32_t cpu_id = ino_p->pino_cpuid;
+ pcmu_ib_ino_t ino = ino_p->pino_ino;
+ volatile uint64_t imr, *imr_p, *state_reg;
+ hrtime_t start_time;
+ hrtime_t prev, curr, interval, jump;
+ hrtime_t intr_timeout;
+
+ ASSERT(MUTEX_HELD(&pib_p->pib_ino_lst_mutex));
+ imr_p = ib_intr_map_reg_addr(pib_p, ino);
+ state_reg = PCMU_IB_INO_INTR_STATE_REG(pib_p, ino);
+
+ /* disable interrupt, this could disrupt devices sharing our slot */
+ PCMU_IB_INO_INTR_OFF(imr_p);
+ imr = *imr_p; /* flush previous write */
+
+ /* busy wait if there is interrupt being processed */
+ intr_timeout = pcmu_intrpend_timeout;
+ jump = TICK_TO_NSEC(xc_tick_jump_limit);
+ start_time = curr = gethrtime();
+ while (PCMU_IB_INO_INTR_PENDING(state_reg, ino) && !panicstr) {
+ /*
+ * If we have a really large jump in hrtime, it is most
+ * probably because we entered the debugger (or OBP,
+ * in general). So, we adjust the timeout accordingly
+ * to prevent declaring an interrupt timeout. The
+ * master-interrupt mechanism in OBP should deliver
+ * the interrupts properly.
+ */
+ prev = curr;
+ curr = gethrtime();
+ interval = curr - prev;
+ if (interval > jump)
+ intr_timeout += interval;
+ if (curr - start_time > intr_timeout) {
+ pcmu_pbm_t *pcbm_p = pib_p->pib_pcmu_p->pcmu_pcbm_p;
+ cmn_err(CE_WARN,
+ "%s:%s: pcmu_ib_intr_dist(%p,%x) timeout",
+ pcbm_p->pcbm_nameinst_str,
+ pcbm_p->pcbm_nameaddr_str,
+ imr_p, PCMU_IB_INO_TO_MONDO(pib_p, ino));
+ break;
+ }
+ }
+ cpu_id = u2u_translate_tgtid(pib_p->pib_pcmu_p, cpu_id, imr_p);
+ *imr_p = ib_get_map_reg(PCMU_IB_IMR2MONDO(imr), cpu_id);
+ imr = *imr_p; /* flush previous write */
+}
+
+/*
+ * Redistribute interrupts of the specified weight. The first call has a weight
+ * of weight_max, which can be used to trigger initialization for
+ * redistribution. The inos with weight [weight_max, inf.) should be processed
+ * on the "weight == weight_max" call. This first call is followed by calls
+ * of decreasing weights, inos of that weight should be processed. The final
+ * call specifies a weight of zero, this can be used to trigger processing of
+ * stragglers.
+ */
+void
+pcmu_ib_intr_dist_all(void *arg, int32_t weight_max, int32_t weight)
+{
+ pcmu_ib_t *pib_p = (pcmu_ib_t *)arg;
+ pcmu_ib_ino_info_t *ino_p;
+ ih_t *ih_lst;
+ int32_t dweight;
+ int i;
+
+ mutex_enter(&pib_p->pib_ino_lst_mutex);
+
+ /* Perform special processing for first call of a redistribution. */
+ if (weight == weight_max) {
+ for (ino_p = pib_p->pib_ino_lst; ino_p;
+ ino_p = ino_p->pino_next) {
+
+ /*
+ * Clear pino_established of each ino on first call.
+ * The pino_established field may be used by a pci
+ * nexus driver's pcmu_intr_dist_cpuid implementation
+ * when detection of established pci slot-cpu binding
+ * for multi function pci cards.
+ */
+ ino_p->pino_established = 0;
+
+ /*
+ * recompute the pino_intr_weight based on the device
+ * weight of all devinfo nodes sharing the ino (this
+ * will allow us to pick up new weights established by
+ * i_ddi_set_intr_weight()).
+ */
+ ino_p->pino_intr_weight = 0;
+ for (i = 0, ih_lst = ino_p->pino_ih_head;
+ i < ino_p->pino_ih_size;
+ i++, ih_lst = ih_lst->ih_next) {
+ dweight = i_ddi_get_intr_weight(ih_lst->ih_dip);
+ if (dweight > 0)
+ ino_p->pino_intr_weight += dweight;
+ }
+ }
+ }
+
+ for (ino_p = pib_p->pib_ino_lst; ino_p; ino_p = ino_p->pino_next) {
+ /*
+ * Get the weight of the ino and determine if we are going to
+ * process call. We wait until an pcmu_ib_intr_dist_all call of
+ * the proper weight occurs to support redistribution of all
+ * heavy weighted interrupts first (across all nexus driver
+ * instances). This is done to ensure optimal
+ * INTR_WEIGHTED_DIST behavior.
+ */
+ if ((weight == ino_p->pino_intr_weight) ||
+ ((weight >= weight_max) &&
+ (ino_p->pino_intr_weight >= weight_max))) {
+ /* select cpuid to target and mark ino established */
+ ino_p->pino_cpuid = pcmu_intr_dist_cpuid(pib_p, ino_p);
+ ino_p->pino_established = 1;
+
+ /* Add device weight of ino devinfos to targeted cpu. */
+ for (i = 0, ih_lst = ino_p->pino_ih_head;
+ i < ino_p->pino_ih_size;
+ i++, ih_lst = ih_lst->ih_next) {
+ dweight = i_ddi_get_intr_weight(ih_lst->ih_dip);
+ intr_dist_cpuid_add_device_weight(
+ ino_p->pino_cpuid, ih_lst->ih_dip, dweight);
+ }
+
+ /* program the hardware */
+ pcmu_ib_intr_dist(pib_p, ino_p);
+ }
+ }
+ mutex_exit(&pib_p->pib_ino_lst_mutex);
+}
+
+/*
+ * Reset interrupts to IDLE. This function is called during
+ * panic handling after redistributing interrupts; it's needed to
+ * support dumping to network devices after 'sync' from OBP.
+ *
+ * N.B. This routine runs in a context where all other threads
+ * are permanently suspended.
+ */
+static uint_t
+pcmu_ib_intr_reset(void *arg)
+{
+ pcmu_ib_t *pib_p = (pcmu_ib_t *)arg;
+ pcmu_ib_ino_t ino;
+ uint64_t *clr_reg;
+
+ /*
+ * Note that we only actually care about interrupts that are
+ * potentially from network devices.
+ */
+ for (ino = 0; ino <= pib_p->pib_max_ino; ino++) {
+ clr_reg = ib_clear_intr_reg_addr(pib_p, ino);
+ PCMU_IB_INO_INTR_CLEAR(clr_reg);
+ }
+ return (BF_NONE);
+}
+
+void
+pcmu_ib_suspend(pcmu_ib_t *pib_p)
+{
+ pcmu_ib_ino_info_t *ip;
+
+ /* save ino_lst interrupts' mapping registers content */
+ mutex_enter(&pib_p->pib_ino_lst_mutex);
+ for (ip = pib_p->pib_ino_lst; ip; ip = ip->pino_next) {
+ ip->pino_map_reg_save = *ip->pino_map_reg;
+ }
+ mutex_exit(&pib_p->pib_ino_lst_mutex);
+}
+
+void
+pcmu_ib_resume(pcmu_ib_t *pib_p)
+{
+ pcmu_ib_ino_info_t *ip;
+
+ /* restore ino_lst interrupts' mapping registers content */
+ mutex_enter(&pib_p->pib_ino_lst_mutex);
+ for (ip = pib_p->pib_ino_lst; ip; ip = ip->pino_next) {
+ PCMU_IB_INO_INTR_CLEAR(ip->pino_clr_reg); /* set intr to idle */
+ *ip->pino_map_reg = ip->pino_map_reg_save; /* restore IMR */
+ }
+ mutex_exit(&pib_p->pib_ino_lst_mutex);
+}
+
+/*
+ * locate ino_info structure on pib_p->pib_ino_lst according to ino#
+ * returns NULL if not found.
+ */
+pcmu_ib_ino_info_t *
+pcmu_ib_locate_ino(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino_num)
+{
+ pcmu_ib_ino_info_t *ino_p = pib_p->pib_ino_lst;
+ ASSERT(MUTEX_HELD(&pib_p->pib_ino_lst_mutex));
+
+ for (; ino_p && ino_p->pino_ino != ino_num; ino_p = ino_p->pino_next);
+ return (ino_p);
+}
+
+#define PCMU_IB_INO_TO_SLOT(ino) \
+ (PCMU_IB_IS_OBIO_INO(ino) ? 0xff : ((ino) & 0x1f) >> 2)
+
+pcmu_ib_ino_info_t *
+pcmu_ib_new_ino(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino_num, ih_t *ih_p)
+{
+ pcmu_ib_ino_info_t *ino_p = kmem_alloc(sizeof (pcmu_ib_ino_info_t),
+ KM_SLEEP);
+ ino_p->pino_ino = ino_num;
+ ino_p->pino_slot_no = PCMU_IB_INO_TO_SLOT(ino_num);
+ ino_p->pino_ib_p = pib_p;
+ ino_p->pino_clr_reg = ib_clear_intr_reg_addr(pib_p, ino_num);
+ ino_p->pino_map_reg = ib_intr_map_reg_addr(pib_p, ino_num);
+ ino_p->pino_unclaimed = 0;
+
+ /*
+ * cannot disable interrupt since we might share slot
+ * PCMU_IB_INO_INTR_OFF(ino_p->pino_map_reg);
+ */
+
+ ih_p->ih_next = ih_p;
+ ino_p->pino_ih_head = ih_p;
+ ino_p->pino_ih_tail = ih_p;
+ ino_p->pino_ih_start = ih_p;
+ ino_p->pino_ih_size = 1;
+
+ ino_p->pino_next = pib_p->pib_ino_lst;
+ pib_p->pib_ino_lst = ino_p;
+ return (ino_p);
+}
+
+/* the ino_p is retrieved by previous call to pcmu_ib_locate_ino() */
+void
+pcmu_ib_delete_ino(pcmu_ib_t *pib_p, pcmu_ib_ino_info_t *ino_p)
+{
+ pcmu_ib_ino_info_t *list = pib_p->pib_ino_lst;
+ ASSERT(MUTEX_HELD(&pib_p->pib_ino_lst_mutex));
+ if (list == ino_p) {
+ pib_p->pib_ino_lst = list->pino_next;
+ } else {
+ for (; list->pino_next != ino_p; list = list->pino_next);
+ list->pino_next = ino_p->pino_next;
+ }
+}
+
+/* free all ino when we are detaching */
+void
+pcmu_ib_free_ino_all(pcmu_ib_t *pib_p)
+{
+ pcmu_ib_ino_info_t *tmp = pib_p->pib_ino_lst;
+ pcmu_ib_ino_info_t *next = NULL;
+ while (tmp) {
+ next = tmp->pino_next;
+ kmem_free(tmp, sizeof (pcmu_ib_ino_info_t));
+ tmp = next;
+ }
+}
+
+void
+pcmu_ib_ino_add_intr(pcmu_t *pcmu_p, pcmu_ib_ino_info_t *ino_p, ih_t *ih_p)
+{
+ pcmu_ib_ino_t ino = ino_p->pino_ino;
+ pcmu_ib_t *pib_p = ino_p->pino_ib_p;
+ volatile uint64_t *state_reg = PCMU_IB_INO_INTR_STATE_REG(pib_p, ino);
+ hrtime_t start_time;
+ hrtime_t prev, curr, interval, jump;
+ hrtime_t intr_timeout;
+
+ ASSERT(pib_p == pcmu_p->pcmu_ib_p);
+ ASSERT(MUTEX_HELD(&pib_p->pib_ino_lst_mutex));
+
+ /* disable interrupt, this could disrupt devices sharing our slot */
+ PCMU_IB_INO_INTR_OFF(ino_p->pino_map_reg);
+ *ino_p->pino_map_reg;
+
+ /* do NOT modify the link list until after the busy wait */
+
+ /*
+ * busy wait if there is interrupt being processed.
+ * either the pending state will be cleared by the interrupt wrapper
+ * or the interrupt will be marked as blocked indicating that it was
+ * jabbering.
+ */
+ intr_timeout = pcmu_intrpend_timeout;
+ jump = TICK_TO_NSEC(xc_tick_jump_limit);
+ start_time = curr = gethrtime();
+ while ((ino_p->pino_unclaimed <= pcmu_unclaimed_intr_max) &&
+ PCMU_IB_INO_INTR_PENDING(state_reg, ino) && !panicstr) {
+ /*
+ * If we have a really large jump in hrtime, it is most
+ * probably because we entered the debugger (or OBP,
+ * in general). So, we adjust the timeout accordingly
+ * to prevent declaring an interrupt timeout. The
+ * master-interrupt mechanism in OBP should deliver
+ * the interrupts properly.
+ */
+ prev = curr;
+ curr = gethrtime();
+ interval = curr - prev;
+ if (interval > jump)
+ intr_timeout += interval;
+ if (curr - start_time > intr_timeout) {
+ pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
+ cmn_err(CE_WARN,
+ "%s:%s: pcmu_ib_ino_add_intr %x timeout",
+ pcbm_p->pcbm_nameinst_str,
+ pcbm_p->pcbm_nameaddr_str, ino);
+ break;
+ }
+ }
+
+ /* link up pcmu_ispec_t portion of the ppd */
+ ih_p->ih_next = ino_p->pino_ih_head;
+ ino_p->pino_ih_tail->ih_next = ih_p;
+ ino_p->pino_ih_tail = ih_p;
+
+ ino_p->pino_ih_start = ino_p->pino_ih_head;
+ ino_p->pino_ih_size++;
+
+ /*
+ * if the interrupt was previously blocked (left in pending state)
+ * because of jabber we need to clear the pending state in case the
+ * jabber has gone away.
+ */
+ if (ino_p->pino_unclaimed > pcmu_unclaimed_intr_max) {
+ cmn_err(CE_WARN,
+ "%s%d: pcmu_ib_ino_add_intr: ino 0x%x has been unblocked",
+ ddi_driver_name(pcmu_p->pcmu_dip),
+ ddi_get_instance(pcmu_p->pcmu_dip),
+ ino_p->pino_ino);
+ ino_p->pino_unclaimed = 0;
+ PCMU_IB_INO_INTR_CLEAR(ino_p->pino_clr_reg);
+ }
+
+ /* re-enable interrupt */
+ PCMU_IB_INO_INTR_ON(ino_p->pino_map_reg);
+ *ino_p->pino_map_reg;
+}
+
+/*
+ * removes pcmu_ispec_t from the ino's link list.
+ * uses hardware mutex to lock out interrupt threads.
+ * Side effects: interrupt belongs to that ino is turned off on return.
+ * if we are sharing PCI slot with other inos, the caller needs
+ * to turn it back on.
+ */
+void
+pcmu_ib_ino_rem_intr(pcmu_t *pcmu_p, pcmu_ib_ino_info_t *ino_p, ih_t *ih_p)
+{
+ int i;
+ pcmu_ib_ino_t ino = ino_p->pino_ino;
+ ih_t *ih_lst = ino_p->pino_ih_head;
+ volatile uint64_t *state_reg =
+ PCMU_IB_INO_INTR_STATE_REG(ino_p->pino_ib_p, ino);
+ hrtime_t start_time;
+ hrtime_t prev, curr, interval, jump;
+ hrtime_t intr_timeout;
+
+ ASSERT(MUTEX_HELD(&ino_p->pino_ib_p->pib_ino_lst_mutex));
+ /* disable interrupt, this could disrupt devices sharing our slot */
+ PCMU_IB_INO_INTR_OFF(ino_p->pino_map_reg);
+ *ino_p->pino_map_reg;
+
+ /* do NOT modify the link list until after the busy wait */
+
+ /*
+ * busy wait if there is interrupt being processed.
+ * either the pending state will be cleared by the interrupt wrapper
+ * or the interrupt will be marked as blocked indicating that it was
+ * jabbering.
+ */
+ intr_timeout = pcmu_intrpend_timeout;
+ jump = TICK_TO_NSEC(xc_tick_jump_limit);
+ start_time = curr = gethrtime();
+ while ((ino_p->pino_unclaimed <= pcmu_unclaimed_intr_max) &&
+ PCMU_IB_INO_INTR_PENDING(state_reg, ino) && !panicstr) {
+ /*
+ * If we have a really large jump in hrtime, it is most
+ * probably because we entered the debugger (or OBP,
+ * in general). So, we adjust the timeout accordingly
+ * to prevent declaring an interrupt timeout. The
+ * master-interrupt mechanism in OBP should deliver
+ * the interrupts properly.
+ */
+ prev = curr;
+ curr = gethrtime();
+ interval = curr - prev;
+ if (interval > jump)
+ intr_timeout += interval;
+ if (curr - start_time > intr_timeout) {
+ pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
+ cmn_err(CE_WARN,
+ "%s:%s: pcmu_ib_ino_rem_intr %x timeout",
+ pcbm_p->pcbm_nameinst_str,
+ pcbm_p->pcbm_nameaddr_str, ino);
+ break;
+ }
+ }
+
+ if (ino_p->pino_ih_size == 1) {
+ if (ih_lst != ih_p)
+ goto not_found;
+ /* no need to set head/tail as ino_p will be freed */
+ goto reset;
+ }
+
+ /*
+ * if the interrupt was previously blocked (left in pending state)
+ * because of jabber we need to clear the pending state in case the
+ * jabber has gone away.
+ */
+ if (ino_p->pino_unclaimed > pcmu_unclaimed_intr_max) {
+ cmn_err(CE_WARN,
+ "%s%d: pcmu_ib_ino_rem_intr: ino 0x%x has been unblocked",
+ ddi_driver_name(pcmu_p->pcmu_dip),
+ ddi_get_instance(pcmu_p->pcmu_dip),
+ ino_p->pino_ino);
+ ino_p->pino_unclaimed = 0;
+ PCMU_IB_INO_INTR_CLEAR(ino_p->pino_clr_reg);
+ }
+
+ /* search the link list for ih_p */
+ for (i = 0; (i < ino_p->pino_ih_size) && (ih_lst->ih_next != ih_p);
+ i++, ih_lst = ih_lst->ih_next);
+ if (ih_lst->ih_next != ih_p) {
+ goto not_found;
+ }
+
+ /* remove ih_p from the link list and maintain the head/tail */
+ ih_lst->ih_next = ih_p->ih_next;
+ if (ino_p->pino_ih_head == ih_p) {
+ ino_p->pino_ih_head = ih_p->ih_next;
+ }
+ if (ino_p->pino_ih_tail == ih_p) {
+ ino_p->pino_ih_tail = ih_lst;
+ }
+ ino_p->pino_ih_start = ino_p->pino_ih_head;
+reset:
+ if (ih_p->ih_config_handle) {
+ pci_config_teardown(&ih_p->ih_config_handle);
+ }
+ kmem_free(ih_p, sizeof (ih_t));
+ ino_p->pino_ih_size--;
+
+ return;
+not_found:
+ PCMU_DBG2(PCMU_DBG_R_INTX, ino_p->pino_ib_p->pib_pcmu_p->pcmu_dip,
+ "ino_p=%x does not have ih_p=%x\n", ino_p, ih_p);
+}
+
+ih_t *
+pcmu_ib_ino_locate_intr(pcmu_ib_ino_info_t *ino_p,
+ dev_info_t *rdip, uint32_t inum)
+{
+ ih_t *ih_lst = ino_p->pino_ih_head;
+ int i;
+ for (i = 0; i < ino_p->pino_ih_size; i++, ih_lst = ih_lst->ih_next) {
+ if (ih_lst->ih_dip == rdip && ih_lst->ih_inum == inum) {
+ return (ih_lst);
+ }
+ }
+ return ((ih_t *)NULL);
+}
+
+ih_t *
+pcmu_ib_alloc_ih(dev_info_t *rdip, uint32_t inum,
+ uint_t (*int_handler)(caddr_t int_handler_arg1, caddr_t int_handler_arg2),
+ caddr_t int_handler_arg1,
+ caddr_t int_handler_arg2)
+{
+ ih_t *ih_p;
+
+ ih_p = kmem_alloc(sizeof (ih_t), KM_SLEEP);
+ ih_p->ih_dip = rdip;
+ ih_p->ih_inum = inum;
+ ih_p->ih_intr_state = PCMU_INTR_STATE_DISABLE;
+ ih_p->ih_handler = int_handler;
+ ih_p->ih_handler_arg1 = int_handler_arg1;
+ ih_p->ih_handler_arg2 = int_handler_arg2;
+ ih_p->ih_config_handle = NULL;
+ return (ih_p);
+}
+
+int
+pcmu_ib_update_intr_state(pcmu_t *pcmu_p, dev_info_t *rdip,
+ ddi_intr_handle_impl_t *hdlp, uint_t new_intr_state)
+{
+ pcmu_ib_t *pib_p = pcmu_p->pcmu_ib_p;
+ pcmu_ib_ino_info_t *ino_p;
+ pcmu_ib_mondo_t mondo;
+ ih_t *ih_p;
+ int ret = DDI_FAILURE;
+
+ mutex_enter(&pib_p->pib_ino_lst_mutex);
+
+ if ((mondo = PCMU_IB_INO_TO_MONDO(pcmu_p->pcmu_ib_p,
+ PCMU_IB_MONDO_TO_INO((int32_t)hdlp->ih_vector))) == 0) {
+ mutex_exit(&pib_p->pib_ino_lst_mutex);
+ return (ret);
+ }
+
+ if (ino_p = pcmu_ib_locate_ino(pib_p, PCMU_IB_MONDO_TO_INO(mondo))) {
+ if (ih_p = pcmu_ib_ino_locate_intr(ino_p,
+ rdip, hdlp->ih_inum)) {
+ ih_p->ih_intr_state = new_intr_state;
+ ret = DDI_SUCCESS;
+ }
+ }
+ mutex_exit(&pib_p->pib_ino_lst_mutex);
+ return (ret);
+}
diff --git a/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_intr.c b/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_intr.c
new file mode 100644
index 0000000000..7f523f6280
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_intr.c
@@ -0,0 +1,340 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * CMU-CH nexus interrupt handling:
+ * PCI device interrupt handler wrapper
+ * pil lookup routine
+ * PCI device interrupt related initchild code
+ */
+
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/async.h>
+#include <sys/spl.h>
+#include <sys/sunddi.h>
+#include <sys/machsystm.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/pcicmu/pcicmu.h>
+#include <sys/sdt.h>
+
+uint_t pcmu_intr_wrapper(caddr_t arg);
+
+/*
+ * interrupt jabber:
+ *
+ * When an interrupt line is jabbering, every time the state machine for the
+ * associated ino is idled, a new mondo will be sent and the ino will go into
+ * the pending state again. The mondo will cause a new call to
+ * pcmu_intr_wrapper() which normally idles the ino's state machine which would
+ * precipitate another trip round the loop.
+ * The loop can be broken by preventing the ino's state machine from being
+ * idled when an interrupt line is jabbering. See the comment at the
+ * beginning of pcmu_intr_wrapper() explaining how the 'interrupt jabber
+ * protection' code does this.
+ */
+
+
+/*
+ * If the unclaimed interrupt count has reached the limit set by
+ * pcmu_unclaimed_intr_max within the time limit, then all interrupts
+ * on this ino is blocked by not idling the interrupt state machine.
+ */
+static int
+pcmu_spurintr(pcmu_ib_ino_info_t *ino_p) {
+ int i;
+ ih_t *ih_p = ino_p->pino_ih_start;
+ pcmu_t *pcmu_p = ino_p->pino_ib_p->pib_pcmu_p;
+ char *err_fmt_str;
+
+ if (ino_p->pino_unclaimed > pcmu_unclaimed_intr_max) {
+ return (DDI_INTR_CLAIMED);
+ }
+ if (!ino_p->pino_unclaimed) {
+ ino_p->pino_spurintr_begin = ddi_get_lbolt();
+ }
+ ino_p->pino_unclaimed++;
+ if (ino_p->pino_unclaimed <= pcmu_unclaimed_intr_max) {
+ goto clear;
+ }
+ if (drv_hztousec(ddi_get_lbolt() - ino_p->pino_spurintr_begin)
+ > pcmu_spurintr_duration) {
+ ino_p->pino_unclaimed = 0;
+ goto clear;
+ }
+ err_fmt_str = "%s%d: ino 0x%x blocked";
+ goto warn;
+clear:
+ /* clear the pending state */
+ PCMU_IB_INO_INTR_CLEAR(ino_p->pino_clr_reg);
+ err_fmt_str = "!%s%d: spurious interrupt from ino 0x%x";
+warn:
+ cmn_err(CE_WARN, err_fmt_str, NAMEINST(pcmu_p->pcmu_dip),
+ ino_p->pino_ino);
+ for (i = 0; i < ino_p->pino_ih_size; i++, ih_p = ih_p->ih_next) {
+ cmn_err(CE_CONT, "!%s-%d#%x ", NAMEINST(ih_p->ih_dip),
+ ih_p->ih_inum);
+ }
+ cmn_err(CE_CONT, "!\n");
+ return (DDI_INTR_CLAIMED);
+}
+
+/*
+ * pcmu_intr_wrapper
+ *
+ * This routine is used as wrapper around interrupt handlers installed by child
+ * device drivers. This routine invokes the driver interrupt handlers and
+ * examines the return codes.
+ * There is a count of unclaimed interrupts kept on a per-ino basis. If at
+ * least one handler claims the interrupt then the counter is halved and the
+ * interrupt state machine is idled. If no handler claims the interrupt then
+ * the counter is incremented by one and the state machine is idled.
+ * If the count ever reaches the limit value set by pcmu_unclaimed_intr_max
+ * then the interrupt state machine is not idled thus preventing any further
+ * interrupts on that ino. The state machine will only be idled again if a
+ * handler is subsequently added or removed.
+ *
+ * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt,
+ * DDI_INTR_UNCLAIMED otherwise.
+ */
+uint_t
+pcmu_intr_wrapper(caddr_t arg)
+{
+ pcmu_ib_ino_info_t *ino_p = (pcmu_ib_ino_info_t *)arg;
+ uint_t result = 0, r;
+ ih_t *ih_p = ino_p->pino_ih_start;
+ int i;
+#ifdef DEBUG
+ pcmu_t *pcmu_p = ino_p->pino_ib_p->pib_pcmu_p;
+#endif
+
+
+ for (i = 0; i < ino_p->pino_ih_size; i++, ih_p = ih_p->ih_next) {
+ dev_info_t *dip = ih_p->ih_dip;
+ uint_t (*handler)() = ih_p->ih_handler;
+ caddr_t arg1 = ih_p->ih_handler_arg1;
+ caddr_t arg2 = ih_p->ih_handler_arg2;
+
+ if (ih_p->ih_intr_state == PCMU_INTR_STATE_DISABLE) {
+ PCMU_DBG3(PCMU_DBG_INTR, pcmu_p->pcmu_dip,
+ "pcmu_intr_wrapper: %s%d interrupt %d is "
+ "disabled\n", ddi_driver_name(dip),
+ ddi_get_instance(dip), ino_p->pino_ino);
+ continue;
+ }
+
+ DTRACE_PROBE4(pcmu__interrupt__start, dev_info_t, dip,
+ void *, handler, caddr_t, arg1, caddr_t, arg2);
+
+ r = (*handler)(arg1, arg2);
+ DTRACE_PROBE4(pcmu__interrupt__complete, dev_info_t, dip,
+ void *, handler, caddr_t, arg1, int, r);
+
+ result += r;
+ }
+
+ if (!result) {
+ return (pcmu_spurintr(ino_p));
+ }
+ ino_p->pino_unclaimed = 0;
+ /* clear the pending state */
+ PCMU_IB_INO_INTR_CLEAR(ino_p->pino_clr_reg);
+ return (DDI_INTR_CLAIMED);
+}
+
+int
+pcmu_add_intr(dev_info_t *dip, dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp)
+{
+ pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip));
+ pcmu_ib_t *pib_p = pcmu_p->pcmu_ib_p;
+ ih_t *ih_p;
+ pcmu_ib_ino_t ino;
+ pcmu_ib_ino_info_t *ino_p; /* pulse interrupts have no ino */
+ pcmu_ib_mondo_t mondo;
+ uint32_t cpu_id;
+ int ret;
+
+ ino = PCMU_IB_MONDO_TO_INO(hdlp->ih_vector);
+
+ PCMU_DBG3(PCMU_DBG_A_INTX, dip, "pcmu_add_intr: rdip=%s%d ino=%x\n",
+ ddi_driver_name(rdip), ddi_get_instance(rdip), ino);
+
+ if (ino > pib_p->pib_max_ino) {
+ PCMU_DBG1(PCMU_DBG_A_INTX, dip, "ino %x is invalid\n", ino);
+ return (DDI_INTR_NOTFOUND);
+ }
+
+ if ((mondo = PCMU_IB_INO_TO_MONDO(pcmu_p->pcmu_ib_p, ino)) == 0)
+ goto fail1;
+
+ ino = PCMU_IB_MONDO_TO_INO(mondo);
+
+ mutex_enter(&pib_p->pib_ino_lst_mutex);
+ ih_p = pcmu_ib_alloc_ih(rdip, hdlp->ih_inum,
+ hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2);
+
+ if (ino_p = pcmu_ib_locate_ino(pib_p, ino)) { /* sharing ino */
+ uint32_t intr_index = hdlp->ih_inum;
+ if (pcmu_ib_ino_locate_intr(ino_p, rdip, intr_index)) {
+ PCMU_DBG1(PCMU_DBG_A_INTX, dip,
+ "dup intr #%d\n", intr_index);
+ goto fail3;
+ }
+
+ /*
+ * add default weight(0) to the cpu that we are
+ * already targeting
+ */
+ cpu_id = ino_p->pino_cpuid;
+ intr_dist_cpuid_add_device_weight(cpu_id, rdip, 0);
+ pcmu_ib_ino_add_intr(pcmu_p, ino_p, ih_p);
+ goto ino_done;
+ }
+
+ ino_p = pcmu_ib_new_ino(pib_p, ino, ih_p);
+ hdlp->ih_vector = mondo;
+
+ PCMU_DBG2(PCMU_DBG_A_INTX, dip, "pcmu_add_intr: pil=0x%x mondo=0x%x\n",
+ hdlp->ih_pri, hdlp->ih_vector);
+
+ DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp,
+ (ddi_intr_handler_t *)pcmu_intr_wrapper, (caddr_t)ino_p, NULL);
+
+ ret = i_ddi_add_ivintr(hdlp);
+
+ /*
+ * Restore original interrupt handler
+ * and arguments in interrupt handle.
+ */
+ DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler,
+ ih_p->ih_handler_arg1, ih_p->ih_handler_arg2);
+
+ if (ret != DDI_SUCCESS) {
+ goto fail4;
+ }
+ /* Save the pil for this ino */
+ ino_p->pino_pil = hdlp->ih_pri;
+
+ /* clear and enable interrupt */
+ PCMU_IB_INO_INTR_CLEAR(ino_p->pino_clr_reg);
+
+ /* select cpu for sharing and removal */
+ cpu_id = pcmu_intr_dist_cpuid(pib_p, ino_p);
+ ino_p->pino_cpuid = cpu_id;
+ ino_p->pino_established = 1;
+ intr_dist_cpuid_add_device_weight(cpu_id, rdip, 0);
+
+ cpu_id = u2u_translate_tgtid(pib_p->pib_pcmu_p,
+ cpu_id, ino_p->pino_map_reg);
+ *ino_p->pino_map_reg = ib_get_map_reg(mondo, cpu_id);
+ *ino_p->pino_map_reg;
+ino_done:
+ mutex_exit(&pib_p->pib_ino_lst_mutex);
+done:
+ PCMU_DBG2(PCMU_DBG_A_INTX, dip, "done! Interrupt 0x%x pil=%x\n",
+ hdlp->ih_vector, hdlp->ih_pri);
+ return (DDI_SUCCESS);
+fail4:
+ pcmu_ib_delete_ino(pib_p, ino_p);
+fail3:
+ if (ih_p->ih_config_handle)
+ pci_config_teardown(&ih_p->ih_config_handle);
+ mutex_exit(&pib_p->pib_ino_lst_mutex);
+ kmem_free(ih_p, sizeof (ih_t));
+fail1:
+ PCMU_DBG2(PCMU_DBG_A_INTX, dip, "Failed! Interrupt 0x%x pil=%x\n",
+ hdlp->ih_vector, hdlp->ih_pri);
+ return (DDI_FAILURE);
+}
+
+int
+pcmu_remove_intr(dev_info_t *dip, dev_info_t *rdip,
+ ddi_intr_handle_impl_t *hdlp)
+{
+ pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip));
+ pcmu_ib_t *pib_p = pcmu_p->pcmu_ib_p;
+ pcmu_ib_ino_t ino;
+ pcmu_ib_mondo_t mondo;
+ pcmu_ib_ino_info_t *ino_p; /* non-pulse only */
+ ih_t *ih_p; /* non-pulse only */
+
+ ino = PCMU_IB_MONDO_TO_INO(hdlp->ih_vector);
+
+ PCMU_DBG3(PCMU_DBG_R_INTX, dip, "pcmu_rem_intr: rdip=%s%d ino=%x\n",
+ ddi_driver_name(rdip), ddi_get_instance(rdip), ino);
+
+ /* Translate the interrupt property */
+ mondo = PCMU_IB_INO_TO_MONDO(pcmu_p->pcmu_ib_p, ino);
+ if (mondo == 0) {
+ PCMU_DBG1(PCMU_DBG_R_INTX, dip,
+ "can't get mondo for ino %x\n", ino);
+ return (DDI_FAILURE);
+ }
+ ino = PCMU_IB_MONDO_TO_INO(mondo);
+
+ mutex_enter(&pib_p->pib_ino_lst_mutex);
+ ino_p = pcmu_ib_locate_ino(pib_p, ino);
+ if (!ino_p) {
+ mutex_exit(&pib_p->pib_ino_lst_mutex);
+ return (DDI_SUCCESS);
+ }
+
+ ih_p = pcmu_ib_ino_locate_intr(ino_p, rdip, hdlp->ih_inum);
+ pcmu_ib_ino_rem_intr(pcmu_p, ino_p, ih_p);
+ intr_dist_cpuid_rem_device_weight(ino_p->pino_cpuid, rdip);
+ if (ino_p->pino_ih_size == 0) {
+ PCMU_IB_INO_INTR_PEND(ib_clear_intr_reg_addr(pib_p, ino));
+ hdlp->ih_vector = mondo;
+ i_ddi_rem_ivintr(hdlp);
+ pcmu_ib_delete_ino(pib_p, ino_p);
+ }
+
+ /* re-enable interrupt only if mapping register still shared */
+ if (ino_p->pino_ih_size) {
+ PCMU_IB_INO_INTR_ON(ino_p->pino_map_reg);
+ *ino_p->pino_map_reg;
+ }
+ mutex_exit(&pib_p->pib_ino_lst_mutex);
+ if (ino_p->pino_ih_size == 0) {
+ kmem_free(ino_p, sizeof (pcmu_ib_ino_info_t));
+ }
+ PCMU_DBG1(PCMU_DBG_R_INTX, dip, "success! mondo=%x\n", mondo);
+ return (DDI_SUCCESS);
+}
+
+/*
+ * free the pcmu_inos array allocated during pcmu_intr_setup. the actual
+ * interrupts are torn down by their respective block destroy routines:
+ * cb_destroy, pcmu_pbm_destroy, and ib_destroy.
+ */
+void
+pcmu_intr_teardown(pcmu_t *pcmu_p)
+{
+ kmem_free(pcmu_p->pcmu_inos, pcmu_p->pcmu_inos_len);
+ pcmu_p->pcmu_inos = NULL;
+ pcmu_p->pcmu_inos_len = 0;
+}
diff --git a/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_pbm.c b/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_pbm.c
new file mode 100644
index 0000000000..26891fa6fd
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_pbm.c
@@ -0,0 +1,290 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * CMU-CH PBM implementation:
+ * initialization
+ * Bus error interrupt handler
+ */
+
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/spl.h>
+#include <sys/sysmacros.h>
+#include <sys/sunddi.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/util.h>
+#include <sys/machsystm.h>
+#include <sys/async.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/ontrap.h>
+#include <sys/pcicmu/pcicmu.h>
+#include <sys/membar.h>
+#include <sys/ivintr.h>
+
+/*LINTLIBRARY*/
+
+static uint_t pcmu_pbm_error_intr(caddr_t a);
+
+/* The nexus interrupt priority values */
+int pcmu_pil[] = {14, 14, 14, 14, 14, 14};
+
+void
+pcmu_pbm_create(pcmu_t *pcmu_p)
+{
+ pcmu_pbm_t *pcbm_p;
+ int len;
+ dev_info_t *dip = pcmu_p->pcmu_dip;
+
+ /*
+ * Allocate a state structure for the PBM and cross-link it
+ * to its per pci node state structure.
+ */
+ pcbm_p = (pcmu_pbm_t *)kmem_zalloc(sizeof (pcmu_pbm_t), KM_SLEEP);
+ pcmu_p->pcmu_pcbm_p = pcbm_p;
+ pcbm_p->pcbm_pcmu_p = pcmu_p;
+
+ len = snprintf(pcbm_p->pcbm_nameinst_str,
+ sizeof (pcbm_p->pcbm_nameinst_str), "%s%d", NAMEINST(dip));
+ pcbm_p->pcbm_nameaddr_str = pcbm_p->pcbm_nameinst_str + ++len;
+ (void) snprintf(pcbm_p->pcbm_nameaddr_str,
+ sizeof (pcbm_p->pcbm_nameinst_str) - len, "%s@%s", NAMEADDR(dip));
+
+ pcmu_pbm_setup(pcbm_p);
+
+ PCMU_DBG4(PCMU_DBG_ATTACH, dip,
+ "pcmu_pbm_create: ctrl=%x, afsr=%x, afar=%x, diag=%x\n",
+ pcbm_p->pcbm_ctrl_reg, pcbm_p->pcbm_async_flt_status_reg,
+ pcbm_p->pcbm_async_flt_addr_reg, pcbm_p->pcbm_diag_reg);
+ PCMU_DBG1(PCMU_DBG_ATTACH, dip, "pcmu_pbm_create: conf=%x\n",
+ pcbm_p->pcbm_config_header);
+
+ /*
+ * Register a function to disable pbm error interrupts during a panic.
+ */
+ bus_func_register(BF_TYPE_ERRDIS,
+ (busfunc_t)pcmu_pbm_disable_errors, pcbm_p);
+
+ /*
+ * create the interrupt-priorities property if it doesn't
+ * already exist to provide a hint as to the PIL level for
+ * our interrupt.
+ */
+ if (ddi_getproplen(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "interrupt-priorities",
+ &len) != DDI_PROP_SUCCESS) {
+ /* Create the interrupt-priorities property. */
+ (void) ddi_prop_create(DDI_DEV_T_NONE, dip,
+ DDI_PROP_CANSLEEP, "interrupt-priorities",
+ (caddr_t)pcmu_pil, sizeof (pcmu_pil));
+ }
+ pcmu_pbm_configure(pcbm_p);
+}
+
+int
+pcmu_pbm_register_intr(pcmu_pbm_t *pcbm_p)
+{
+ pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p;
+ uint32_t mondo;
+ int r = DDI_SUCCESS;
+
+ pcmu_ib_nintr_clear(pcmu_p->pcmu_ib_p, pcmu_p->pcmu_inos[CBNINTR_PBM]);
+
+ /*
+ * Install the PCI error interrupt handler.
+ */
+ mondo = PCMU_IB_INO_TO_MONDO(pcmu_p->pcmu_ib_p,
+ pcmu_p->pcmu_inos[CBNINTR_PBM]);
+
+ VERIFY(add_ivintr(mondo, pcmu_pil[CBNINTR_PBM], pcmu_pbm_error_intr,
+ (caddr_t)pcmu_p, NULL) == 0);
+
+ pcbm_p->pcbm_iblock_cookie = (void *)(uintptr_t)pcmu_pil[CBNINTR_PBM];
+
+ /*
+ * Create the pokefault mutex at the PIL below the error interrupt.
+ */
+
+ mutex_init(&pcbm_p->pcbm_pokeflt_mutex, NULL, MUTEX_DRIVER,
+ (void *)(uintptr_t)ipltospl(spltoipl(
+ (int)(uintptr_t)pcbm_p->pcbm_iblock_cookie) - 1));
+
+ return (PCMU_ATTACH_RETCODE(PCMU_PBM_OBJ, PCMU_OBJ_INTR_ADD, r));
+}
+
+void
+pcmu_pbm_destroy(pcmu_t *pcmu_p)
+{
+ pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
+ pcmu_ib_t *pib_p = pcmu_p->pcmu_ib_p;
+ uint32_t mondo;
+
+ PCMU_DBG0(PCMU_DBG_DETACH, pcmu_p->pcmu_dip, "pcmu_pbm_destroy:\n");
+
+ mondo = PCMU_IB_INO_TO_MONDO(pcmu_p->pcmu_ib_p,
+ pcmu_p->pcmu_inos[CBNINTR_PBM]);
+
+ /*
+ * Free the pokefault mutex.
+ */
+ mutex_destroy(&pcbm_p->pcbm_pokeflt_mutex);
+
+ /*
+ * Remove the error interrupt.
+ */
+ intr_dist_rem(pcmu_pbm_intr_dist, pcbm_p);
+ pcmu_ib_intr_disable(pib_p,
+ pcmu_p->pcmu_inos[CBNINTR_PBM], PCMU_IB_INTR_WAIT);
+ rem_ivintr(mondo, NULL);
+
+ /*
+ * Remove the error disable function.
+ */
+ bus_func_unregister(BF_TYPE_ERRDIS,
+ (busfunc_t)pcmu_pbm_disable_errors, pcbm_p);
+
+ pcmu_pbm_teardown(pcbm_p);
+
+ /*
+ * Free the pbm state structure.
+ */
+ kmem_free(pcbm_p, sizeof (pcmu_pbm_t));
+ pcmu_p->pcmu_pcbm_p = NULL;
+}
+
+static uint_t
+pcmu_pbm_error_intr(caddr_t a)
+{
+ pcmu_t *pcmu_p = (pcmu_t *)a;
+ pcmu_pbm_t *pcbm_p = pcmu_p->pcmu_pcbm_p;
+ ddi_fm_error_t derr;
+ int err = DDI_FM_OK;
+ on_trap_data_t *otp = pcbm_p->pcbm_ontrap_data;
+
+ bzero(&derr, sizeof (ddi_fm_error_t));
+ derr.fme_version = DDI_FME_VERSION;
+ mutex_enter(&pcmu_p->pcmu_err_mutex);
+ if ((otp != NULL) && (otp->ot_prot & OT_DATA_ACCESS)) {
+ /*
+ * ddi_poke protection, check nexus and children for
+ * expected errors.
+ */
+ otp->ot_trap |= OT_DATA_ACCESS;
+ membar_sync();
+ derr.fme_flag = DDI_FM_ERR_POKE;
+ err = pcmu_pbm_err_handler(pcmu_p->pcmu_dip, &derr,
+ (void *)pcmu_p, PCI_INTR_CALL);
+ } else if (pcmu_check_error(pcmu_p) != 0) {
+ /*
+ * unprotected error, check for all errors.
+ */
+ if (pcmu_errtrig_pa) {
+ (void) ldphysio(pcmu_errtrig_pa);
+ }
+ derr.fme_flag = DDI_FM_ERR_UNEXPECTED;
+ err = pcmu_pbm_err_handler(pcmu_p->pcmu_dip, &derr,
+ (void *)pcmu_p, PCI_INTR_CALL);
+ }
+
+ if (err == DDI_FM_FATAL) {
+ if (pcmu_panic_on_fatal_errors) {
+ mutex_exit(&pcmu_p->pcmu_err_mutex);
+ cmn_err(CE_PANIC, "%s-%d: Fatal PCI bus error(s)\n",
+ ddi_driver_name(pcmu_p->pcmu_dip),
+ ddi_get_instance(pcmu_p->pcmu_dip));
+ }
+ }
+
+ mutex_exit(&pcmu_p->pcmu_err_mutex);
+ pcmu_ib_nintr_clear(pcmu_p->pcmu_ib_p, pcmu_p->pcmu_inos[CBNINTR_PBM]);
+ return (DDI_INTR_CLAIMED);
+}
+
+void
+pcmu_pbm_suspend(pcmu_pbm_t *pcbm_p)
+{
+ pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p;
+ pcmu_ib_ino_t ino = pcmu_p->pcmu_inos[CBNINTR_PBM];
+ pcbm_p->pcbm_imr_save = *ib_intr_map_reg_addr(pcmu_p->pcmu_ib_p, ino);
+}
+
+void
+pcmu_pbm_resume(pcmu_pbm_t *pcbm_p)
+{
+ pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p;
+ pcmu_ib_ino_t ino = pcmu_p->pcmu_inos[CBNINTR_PBM];
+
+ pcmu_ib_nintr_clear(pcmu_p->pcmu_ib_p, ino);
+ *ib_intr_map_reg_addr(pcmu_p->pcmu_ib_p, ino) = pcbm_p->pcbm_imr_save;
+}
+
+void
+pcmu_pbm_intr_dist(void *arg)
+{
+ pcmu_pbm_t *pcbm_p = (pcmu_pbm_t *)arg;
+ pcmu_t *pcmu_p = pcbm_p->pcbm_pcmu_p;
+ pcmu_ib_t *pib_p = pcmu_p->pcmu_ib_p;
+ pcmu_ib_ino_t ino =
+ PCMU_IB_MONDO_TO_INO(pcmu_p->pcmu_inos[CBNINTR_PBM]);
+ mutex_enter(&pib_p->pib_intr_lock);
+ pcmu_ib_intr_dist_nintr(pib_p, ino, ib_intr_map_reg_addr(pib_p, ino));
+ mutex_exit(&pib_p->pib_intr_lock);
+}
+
+/*
+ * Function used to log PBM AFSR register bits and to lookup and fault
+ * handle associated with PBM AFAR register. Called by
+ * pcmu_pbm_err_handler with pcmu_err_mutex held.
+ */
+int
+pcmu_pbm_afsr_report(dev_info_t *dip, uint64_t fme_ena,
+ pcmu_pbm_errstate_t *pbm_err_p)
+{
+ int fatal = 0;
+ /* LINTED variable */
+ pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip));
+
+ ASSERT(MUTEX_HELD(&pcmu_p->pcmu_err_mutex));
+
+ pbm_err_p->pcbm_pri = PBM_PRIMARY;
+ (void) pcmu_pbm_classify(pbm_err_p);
+
+ /*
+ * We are currently not dealing with the multiple error
+ * case, for any secondary errors we will panic.
+ */
+ pbm_err_p->pcbm_pri = PBM_SECONDARY;
+ if (pcmu_pbm_classify(pbm_err_p)) {
+ fatal++;
+ pcmu_pbm_ereport_post(dip, fme_ena, pbm_err_p);
+ }
+
+ if (fatal) {
+ return (DDI_FM_FATAL);
+ }
+ return (DDI_FM_NONFATAL);
+}
diff --git a/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_util.c b/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_util.c
new file mode 100644
index 0000000000..6cd0383a23
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/pcicmu/pcmu_util.c
@@ -0,0 +1,707 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * CMU-CH nexus utility routines:
+ * property and config routines for attach()
+ * reg/intr/range/assigned-address property routines for bus_map()
+ * init_child()
+ * fault handling
+ * debug functions
+ */
+
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/async.h>
+#include <sys/sysmacros.h>
+#include <sys/sunddi.h>
+#include <sys/sunndi.h>
+#include <sys/fm/protocol.h>
+#include <sys/fm/io/pci.h>
+#include <sys/fm/util.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/pcicmu/pcicmu.h>
+#include <sys/promif.h>
+
+/*LINTLIBRARY*/
+
+/*
+ * get_pcmu_properties
+ *
+ * This function is called from the attach routine to get the key
+ * properties of the pci nodes.
+ *
+ * used by: pcmu_attach()
+ *
+ * return value: DDI_FAILURE on failure
+ */
+int
+get_pcmu_properties(pcmu_t *pcmu_p, dev_info_t *dip)
+{
+ int i;
+
+ /*
+ * Get the device's port id.
+ */
+ if ((pcmu_p->pcmu_id = (uint32_t)pcmu_get_portid(dip)) == -1u) {
+ cmn_err(CE_WARN, "%s%d: no portid property\n",
+ ddi_driver_name(dip), ddi_get_instance(dip));
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Get the bus-ranges property.
+ */
+ i = sizeof (pcmu_p->pcmu_bus_range);
+ if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
+ "bus-range", (caddr_t)&pcmu_p->pcmu_bus_range, &i) != DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s%d: no bus-range property\n",
+ ddi_driver_name(dip), ddi_get_instance(dip));
+ return (DDI_FAILURE);
+ }
+ PCMU_DBG2(PCMU_DBG_ATTACH, dip,
+ "get_pcmu_properties: bus-range (%x,%x)\n",
+ pcmu_p->pcmu_bus_range.lo, pcmu_p->pcmu_bus_range.hi);
+
+ /*
+ * Get the ranges property.
+ */
+ if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges",
+ (caddr_t)&pcmu_p->pcmu_ranges, &pcmu_p->pcmu_ranges_length) !=
+ DDI_SUCCESS) {
+ cmn_err(CE_WARN, "%s%d: no ranges property\n",
+ ddi_driver_name(dip), ddi_get_instance(dip));
+ return (DDI_FAILURE);
+ }
+ pcmu_fix_ranges(pcmu_p->pcmu_ranges,
+ pcmu_p->pcmu_ranges_length / sizeof (pcmu_ranges_t));
+
+ /*
+ * Determine the number upa slot interrupts.
+ */
+ pcmu_p->pcmu_numproxy = pcmu_get_numproxy(pcmu_p->pcmu_dip);
+ PCMU_DBG1(PCMU_DBG_ATTACH, dip, "get_pcmu_properties: numproxy=%d\n",
+ pcmu_p->pcmu_numproxy);
+ return (DDI_SUCCESS);
+}
+
+/*
+ * free_pcmu_properties:
+ *
+ * This routine frees the memory used to cache the
+ * "ranges" properties of the pci bus device node.
+ *
+ * used by: pcmu_detach()
+ *
+ * return value: none
+ */
+void
+free_pcmu_properties(pcmu_t *pcmu_p)
+{
+ kmem_free(pcmu_p->pcmu_ranges, pcmu_p->pcmu_ranges_length);
+}
+
+/*
+ * pcmu_reloc_reg
+ *
+ * If the "reg" entry (*pcmu_rp) is relocatable, lookup "assigned-addresses"
+ * property to fetch corresponding relocated address.
+ *
+ * used by: pcmu_map()
+ *
+ * return value:
+ *
+ * DDI_SUCCESS - on success
+ * DDI_ME_INVAL - regspec is invalid
+ */
+int
+pcmu_reloc_reg(dev_info_t *dip, dev_info_t *rdip, pcmu_t *pcmu_p,
+ pci_regspec_t *rp)
+{
+ int assign_len, assign_entries, i;
+ pci_regspec_t *assign_p;
+ register uint32_t phys_hi = rp->pci_phys_hi;
+ register uint32_t mask = PCI_REG_ADDR_M | PCI_CONF_ADDR_MASK;
+ register uint32_t phys_addr = phys_hi & mask;
+
+ PCMU_DBG5(PCMU_DBG_MAP | PCMU_DBG_CONT, dip,
+ "\tpcmu_reloc_reg fr: %x.%x.%x %x.%x\n",
+ rp->pci_phys_hi, rp->pci_phys_mid, rp->pci_phys_low,
+ rp->pci_size_hi, rp->pci_size_low);
+
+ if ((phys_hi & PCI_RELOCAT_B) || !(phys_hi & PCI_ADDR_MASK)) {
+ return (DDI_SUCCESS);
+ }
+
+ /* phys_mid must be 0 regardless space type. XXX-64 bit mem space */
+ if (rp->pci_phys_mid != 0 || rp->pci_size_hi != 0) {
+ PCMU_DBG0(PCMU_DBG_MAP | PCMU_DBG_CONT, pcmu_p->pcmu_dip,
+ "phys_mid or size_hi not 0\n");
+ return (DDI_ME_INVAL);
+ }
+
+ if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS,
+ "assigned-addresses", (caddr_t)&assign_p, &assign_len)) {
+ return (DDI_ME_INVAL);
+ }
+
+ assign_entries = assign_len / sizeof (pci_regspec_t);
+ for (i = 0; i < assign_entries; i++, assign_p++) {
+ if ((assign_p->pci_phys_hi & mask) == phys_addr) {
+ rp->pci_phys_low += assign_p->pci_phys_low;
+ break;
+ }
+ }
+ kmem_free(assign_p - i, assign_len);
+ PCMU_DBG5(PCMU_DBG_MAP | PCMU_DBG_CONT, dip,
+ "\tpcmu_reloc_reg to: %x.%x.%x %x.%x\n",
+ rp->pci_phys_hi, rp->pci_phys_mid, rp->pci_phys_low,
+ rp->pci_size_hi, rp->pci_size_low);
+ return (i < assign_entries ? DDI_SUCCESS : DDI_ME_INVAL);
+}
+
+/*
+ * use "ranges" to translate relocated pci regspec into parent space
+ */
+int
+pcmu_xlate_reg(pcmu_t *pcmu_p, pci_regspec_t *pcmu_rp, struct regspec *new_rp)
+{
+ int n;
+ pcmu_ranges_t *rng_p = pcmu_p->pcmu_ranges;
+ int rng_n = pcmu_p->pcmu_ranges_length / sizeof (pcmu_ranges_t);
+
+ uint32_t space_type = PCI_REG_ADDR_G(pcmu_rp->pci_phys_hi);
+ uint32_t reg_end, reg_begin = pcmu_rp->pci_phys_low;
+ uint32_t sz = pcmu_rp->pci_size_low;
+
+ uint32_t rng_begin, rng_end;
+
+ if (space_type == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
+ if (reg_begin > PCI_CONF_HDR_SIZE) {
+ return (DDI_ME_INVAL);
+ }
+ sz = sz ? MIN(sz, PCI_CONF_HDR_SIZE) : PCI_CONF_HDR_SIZE;
+ reg_begin += pcmu_rp->pci_phys_hi;
+ }
+ reg_end = reg_begin + sz - 1;
+
+ for (n = 0; n < rng_n; n++, rng_p++) {
+ if (space_type != PCI_REG_ADDR_G(rng_p->child_high)) {
+ continue; /* not the same space type */
+ }
+
+ rng_begin = rng_p->child_low;
+ if (space_type == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
+ rng_begin += rng_p->child_high;
+ }
+ rng_end = rng_begin + rng_p->size_low - 1;
+ if (reg_begin >= rng_begin && reg_end <= rng_end) {
+ break;
+ }
+ }
+ if (n >= rng_n) {
+ return (DDI_ME_REGSPEC_RANGE);
+ }
+
+ new_rp->regspec_addr = reg_begin - rng_begin + rng_p->parent_low;
+ new_rp->regspec_bustype = rng_p->parent_high;
+ new_rp->regspec_size = sz;
+ PCMU_DBG4(PCMU_DBG_MAP | PCMU_DBG_CONT, pcmu_p->pcmu_dip,
+ "\tpcmu_xlate_reg: entry %d new_rp %x.%x %x\n",
+ n, new_rp->regspec_bustype, new_rp->regspec_addr, sz);
+ return (DDI_SUCCESS);
+}
+
+
+/*
+ * pcmu_report_dev
+ *
+ * This function is called from our control ops routine on a
+ * DDI_CTLOPS_REPORTDEV request.
+ *
+ * The display format is
+ *
+ * <name><inst> at <pname><pinst> device <dev> function <func>
+ *
+ * where
+ *
+ * <name> this device's name property
+ * <inst> this device's instance number
+ * <name> parent device's name property
+ * <inst> parent device's instance number
+ * <dev> this device's device number
+ * <func> this device's function number
+ */
+int
+pcmu_report_dev(dev_info_t *dip)
+{
+ if (dip == (dev_info_t *)0) {
+ return (DDI_FAILURE);
+ }
+ cmn_err(CE_CONT, "?PCI-device: %s@%s, %s%d\n", ddi_node_name(dip),
+ ddi_get_name_addr(dip), ddi_driver_name(dip),
+ ddi_get_instance(dip));
+ return (DDI_SUCCESS);
+}
+
+/*
+ * name_child
+ *
+ * This function is called from pcmu_init_child to name a node. It is
+ * also passed as a callback for node merging functions.
+ *
+ * return value: DDI_SUCCESS, DDI_FAILURE
+ */
+static int
+name_child(dev_info_t *child, char *name, int namelen)
+{
+ pci_regspec_t *pcmu_rp;
+ int reglen;
+ uint_t func;
+ char **unit_addr;
+ uint_t n;
+
+ /*
+ * Set the address portion of the node name based on
+ * unit-address property, if it exists.
+ * The interpretation of the unit-address is DD[,F]
+ * where DD is the device id and F is the function.
+ */
+ if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, child,
+ DDI_PROP_DONTPASS, "unit-address", &unit_addr, &n) ==
+ DDI_PROP_SUCCESS) {
+ if (n != 1 || *unit_addr == NULL || **unit_addr == 0) {
+ cmn_err(CE_WARN, "unit-address property in %s.conf"
+ " not well-formed", ddi_driver_name(child));
+ ddi_prop_free(unit_addr);
+ return (DDI_FAILURE);
+ }
+ (void) snprintf(name, namelen, "%s", *unit_addr);
+ ddi_prop_free(unit_addr);
+ return (DDI_SUCCESS);
+ }
+
+ /*
+ * The unit-address property is does not exist. Set the address
+ * portion of the node name based on the function and device number.
+ */
+ if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
+ "reg", (int **)&pcmu_rp, (uint_t *)&reglen) == DDI_SUCCESS) {
+ if (((reglen * sizeof (int)) % sizeof (pci_regspec_t)) != 0) {
+ cmn_err(CE_WARN, "reg property not well-formed");
+ return (DDI_FAILURE);
+ }
+
+ func = PCI_REG_FUNC_G(pcmu_rp[0].pci_phys_hi);
+ if (func != 0) {
+ (void) snprintf(name, namelen, "%x,%x",
+ PCI_REG_DEV_G(pcmu_rp[0].pci_phys_hi), func);
+ } else {
+ (void) snprintf(name, namelen, "%x",
+ PCI_REG_DEV_G(pcmu_rp[0].pci_phys_hi));
+ }
+ ddi_prop_free(pcmu_rp);
+ return (DDI_SUCCESS);
+ }
+ cmn_err(CE_WARN, "cannot name pci child '%s'", ddi_node_name(child));
+ return (DDI_FAILURE);
+}
+
+int
+pcmu_uninit_child(pcmu_t *pcmu_p, dev_info_t *child)
+{
+ PCMU_DBG2(PCMU_DBG_CTLOPS, pcmu_p->pcmu_dip,
+ "DDI_CTLOPS_UNINITCHILD: arg=%s%d\n",
+ ddi_driver_name(child), ddi_get_instance(child));
+
+ ddi_set_name_addr(child, NULL);
+ ddi_remove_minor_node(child, NULL);
+ impl_rem_dev_props(child);
+
+ PCMU_DBG0(PCMU_DBG_PWR, ddi_get_parent(child), "\n\n");
+ return (DDI_SUCCESS);
+}
+
+/*
+ * pcmu_init_child
+ *
+ * This function is called from our control ops routine on a
+ * DDI_CTLOPS_INITCHILD request. It builds and sets the device's
+ * parent private data area.
+ *
+ * used by: pcmu_ctlops()
+ *
+ * return value: none
+ */
+int
+pcmu_init_child(pcmu_t *pcmu_p, dev_info_t *child)
+{
+ char name[10];
+ ddi_acc_handle_t config_handle;
+ uint8_t bcr;
+ uint8_t header_type;
+
+ if (name_child(child, name, 10) != DDI_SUCCESS)
+ return (DDI_FAILURE);
+ ddi_set_name_addr(child, name);
+
+ PCMU_DBG2(PCMU_DBG_PWR, ddi_get_parent(child),
+ "INITCHILD: config regs setup for %s@%s\n",
+ ddi_node_name(child), ddi_get_name_addr(child));
+
+ /*
+ * Map the child configuration space to for initialization.
+ * We assume the obp will do the following in the devices
+ * config space:
+ *
+ * Set the latency-timer register to values appropriate
+ * for the devices on the bus (based on other devices
+ * MIN_GNT and MAX_LAT registers.
+ *
+ * Set the fast back-to-back enable bit in the command
+ * register if it's supported and all devices on the bus
+ * have the capability.
+ *
+ */
+ if (pci_config_setup(child, &config_handle) != DDI_SUCCESS) {
+ ddi_set_name_addr(child, NULL);
+ return (DDI_FAILURE);
+ }
+
+ /*
+ * Determine the configuration header type.
+ */
+ header_type = pci_config_get8(config_handle, PCI_CONF_HEADER);
+ PCMU_DBG2(PCMU_DBG_INIT_CLD, pcmu_p->pcmu_dip, "%s: header_type=%x\n",
+ ddi_driver_name(child), header_type);
+
+ /*
+ * If the device has a bus control register then program it
+ * based on the settings in the command register.
+ */
+ if ((header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
+ bcr = pci_config_get8(config_handle, PCI_BCNF_BCNTRL);
+ if (pcmu_command_default & PCI_COMM_PARITY_DETECT)
+ bcr |= PCI_BCNF_BCNTRL_PARITY_ENABLE;
+ if (pcmu_command_default & PCI_COMM_SERR_ENABLE)
+ bcr |= PCI_BCNF_BCNTRL_SERR_ENABLE;
+ bcr |= PCI_BCNF_BCNTRL_MAST_AB_MODE;
+ pci_config_put8(config_handle, PCI_BCNF_BCNTRL, bcr);
+ }
+
+ pci_config_teardown(&config_handle);
+ return (DDI_SUCCESS);
+}
+
+/*
+ * pcmu_get_reg_set_size
+ *
+ * Given a dev info pointer to a pci child and a register number, this
+ * routine returns the size element of that reg set property.
+ *
+ * used by: pcmu_ctlops() - DDI_CTLOPS_REGSIZE
+ *
+ * return value: size of reg set on success, zero on error
+ */
+off_t
+pcmu_get_reg_set_size(dev_info_t *child, int rnumber)
+{
+ pci_regspec_t *pcmu_rp;
+ off_t size;
+ int i;
+
+ if (rnumber < 0) {
+ return (0);
+ }
+
+ /*
+ * Get the reg property for the device.
+ */
+ if (ddi_getlongprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "reg",
+ (caddr_t)&pcmu_rp, &i) != DDI_SUCCESS) {
+ return (0);
+ }
+
+ if (rnumber >= (i / (int)sizeof (pci_regspec_t))) {
+ kmem_free(pcmu_rp, i);
+ return (0);
+ }
+
+ size = pcmu_rp[rnumber].pci_size_low |
+ ((uint64_t)pcmu_rp[rnumber].pci_size_hi << 32);
+ kmem_free(pcmu_rp, i);
+ return (size);
+}
+
+
+/*
+ * pcmu_get_nreg_set
+ *
+ * Given a dev info pointer to a pci child, this routine returns the
+ * number of sets in its "reg" property.
+ *
+ * used by: pcmu_ctlops() - DDI_CTLOPS_NREGS
+ *
+ * return value: # of reg sets on success, zero on error
+ */
+uint_t
+pcmu_get_nreg_set(dev_info_t *child)
+{
+ pci_regspec_t *pcmu_rp;
+ int i, n;
+
+ /*
+ * Get the reg property for the device.
+ */
+ if (ddi_getlongprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "reg",
+ (caddr_t)&pcmu_rp, &i) != DDI_SUCCESS) {
+ return (0);
+ }
+ n = i / (int)sizeof (pci_regspec_t);
+ kmem_free(pcmu_rp, i);
+ return (n);
+}
+
+int
+pcmu_cfg_report(dev_info_t *dip, ddi_fm_error_t *derr,
+ pcmu_errstate_t *pcmu_err_p, int caller, uint32_t prierr)
+{
+ int fatal = 0;
+ int nonfatal = 0;
+ int i;
+ pcmu_t *pcmu_p;
+ int instance = ddi_get_instance(dip);
+
+ ASSERT(dip);
+
+ pcmu_p = get_pcmu_soft_state(instance);
+
+ derr->fme_ena = derr->fme_ena ? derr->fme_ena :
+ fm_ena_generate(0, FM_ENA_FMT1);
+
+ for (i = 0; pci_err_tbl[i].err_class != NULL; i++) {
+ if (pcmu_err_p->pcmu_cfg_stat & pci_err_tbl[i].reg_bit) {
+ char buf[FM_MAX_CLASS];
+ char *aux_msg = NULL;
+
+ switch (pci_err_tbl[i].reg_bit) {
+ case PCI_STAT_R_MAST_AB:
+ aux_msg = "Recieved Master Abort";
+ /* LINTED fallthrough on case statement */
+ case PCI_STAT_R_TARG_AB:
+ if (aux_msg != NULL)
+ aux_msg = "Recieved Target Abort";
+ if (prierr) {
+ /*
+ * piow case are already handled in
+ * pcmu_pbm_afsr_report()
+ */
+ break;
+ }
+ if (caller != PCI_TRAP_CALL) {
+ /*
+ * if we haven't come from trap handler
+ * we won't have an address
+ */
+ fatal++;
+ }
+ break;
+ default:
+ /*
+ * dpe on dma write or ta on dma
+ */
+ nonfatal++;
+ break;
+ }
+ (void) snprintf(buf, FM_MAX_CLASS, "%s %s: %s %s",
+ (pcmu_p->pcmu_pcbm_p)->pcbm_nameinst_str,
+ (pcmu_p->pcmu_pcbm_p)->pcbm_nameaddr_str,
+ "PCI config space:", aux_msg);
+ cmn_err(CE_WARN, "%s %s=0x%p", buf,
+ "pbm-csr", (pcmu_p->pcmu_pcbm_p)->pcbm_ctrl_reg);
+ }
+ }
+
+ if (fatal)
+ return (DDI_FM_FATAL);
+ else if (nonfatal)
+ return (DDI_FM_NONFATAL);
+
+ return (DDI_FM_OK);
+}
+
+void
+pcmu_child_cfg_save(dev_info_t *dip)
+{
+ dev_info_t *cdip;
+ int ret = DDI_SUCCESS;
+
+ /*
+ * Save the state of the configuration headers of child
+ * nodes.
+ */
+
+ for (cdip = ddi_get_child(dip); cdip != NULL;
+ cdip = ddi_get_next_sibling(cdip)) {
+
+ /*
+ * Not interested in children who are not already
+ * init'ed. They will be set up in pcmu_init_child().
+ */
+ if (i_ddi_node_state(cdip) < DS_INITIALIZED) {
+ PCMU_DBG2(PCMU_DBG_DETACH, dip, "DDI_SUSPEND: skipping "
+ "%s%d not in CF1\n", ddi_driver_name(cdip),
+ ddi_get_instance(cdip));
+
+ continue;
+ }
+
+ /*
+ * Only save config registers if not already saved by child.
+ */
+ if (ddi_prop_exists(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
+ SAVED_CONFIG_REGS) == 1) {
+
+ continue;
+ }
+
+ /*
+ * The nexus needs to save config registers. Create a property
+ * so it knows to restore on resume.
+ */
+ ret = ndi_prop_create_boolean(DDI_DEV_T_NONE, cdip,
+ "nexus-saved-config-regs");
+
+ if (ret != DDI_PROP_SUCCESS) {
+ cmn_err(CE_WARN, "%s%d can't update prop %s",
+ ddi_driver_name(cdip), ddi_get_instance(cdip),
+ "nexus-saved-config-regs");
+ }
+
+ (void) pci_save_config_regs(cdip);
+ }
+}
+
+void
+pcmu_child_cfg_restore(dev_info_t *dip)
+{
+ dev_info_t *cdip;
+
+ /*
+ * Restore config registers for children that did not save
+ * their own registers. Children pwr states are UNKNOWN after
+ * a resume since it is possible for the PM framework to call
+ * resume without an actual power cycle. (ie if suspend fails).
+ */
+ for (cdip = ddi_get_child(dip); cdip != NULL;
+ cdip = ddi_get_next_sibling(cdip)) {
+
+ /*
+ * Not interested in children who are not already
+ * init'ed. They will be set up by pcmu_init_child().
+ */
+ if (i_ddi_node_state(cdip) < DS_INITIALIZED) {
+ PCMU_DBG2(PCMU_DBG_DETACH, dip,
+ "DDI_RESUME: skipping %s%d not in CF1\n",
+ ddi_driver_name(cdip), ddi_get_instance(cdip));
+ continue;
+ }
+
+ /*
+ * Only restore config registers if saved by nexus.
+ */
+ if (ddi_prop_exists(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
+ "nexus-saved-config-regs") == 1) {
+ (void) pci_restore_config_regs(cdip);
+
+ PCMU_DBG2(PCMU_DBG_PWR, dip,
+ "DDI_RESUME: nexus restoring %s%d config regs\n",
+ ddi_driver_name(cdip), ddi_get_instance(cdip));
+
+ if (ndi_prop_remove(DDI_DEV_T_NONE, cdip,
+ "nexus-saved-config-regs") != DDI_PROP_SUCCESS) {
+ cmn_err(CE_WARN, "%s%d can't remove prop %s",
+ ddi_driver_name(cdip),
+ ddi_get_instance(cdip),
+ "nexus-saved-config-regs");
+ }
+ }
+ }
+}
+
+#ifdef DEBUG
+extern uint64_t pcmu_debug_flags;
+
+pcmu_dflag_to_str_t pcmu_dflag_strings [] = {
+ {PCMU_DBG_ATTACH, "pcmu_attach"},
+ {PCMU_DBG_DETACH, "pcmu_detach"},
+ {PCMU_DBG_MAP, "pcmu_map"},
+ {PCMU_DBG_A_INTX, "pcmu_add_intx"},
+ {PCMU_DBG_R_INTX, "pcmu_rem_intx"},
+ {PCMU_DBG_INIT_CLD, "pcmu_init_child"},
+ {PCMU_DBG_CTLOPS, "pcmu_ctlops"},
+ {PCMU_DBG_INTR, "pcmu_intr_wrapper"},
+ {PCMU_DBG_ERR_INTR, "pcmu_pbm_error_intr"},
+ {PCMU_DBG_BUS_FAULT, "pcmu_fault"},
+ {PCMU_DBG_IB, "pcmu_ib"},
+ {PCMU_DBG_CB, "pcmu_cb"},
+ {PCMU_DBG_PBM, "pcmu_pbm"},
+ {PCMU_DBG_OPEN, "pcmu_open"},
+ {PCMU_DBG_CLOSE, "pcmu_close"},
+ {PCMU_DBG_IOCTL, "pcmu_ioctl"},
+ {PCMU_DBG_PWR, "pcmu_pwr"}
+};
+
+void
+pcmu_debug(uint64_t flag, dev_info_t *dip, char *fmt,
+ uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5)
+{
+ char *s = "pcmu unknown";
+ uint_t cont = 0;
+ int i;
+ int no_rec = (sizeof (pcmu_dflag_strings) /
+ sizeof (pcmu_dflag_to_str_t));
+
+ if (flag & PCMU_DBG_CONT) {
+ flag &= ~PCMU_DBG_CONT;
+ cont = 1;
+ }
+ if ((pcmu_debug_flags & flag) == flag) {
+ for (i = 0; i < no_rec; i++) {
+ if (pcmu_dflag_strings[i].flag == flag) {
+ s = pcmu_dflag_strings[i].string;
+ break;
+ }
+ }
+ if (s && cont == 0) {
+ prom_printf("%s(%d): %s: ", ddi_driver_name(dip),
+ ddi_get_instance(dip), s);
+ }
+ prom_printf(fmt, a1, a2, a3, a4, a5);
+ }
+}
+#endif
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scf_os_interface.c b/usr/src/uts/sun4u/opl/io/scfd/scf_os_interface.c
new file mode 100644
index 0000000000..f99b860329
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scf_os_interface.c
@@ -0,0 +1,143 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/sunddi.h>
+#include <sys/ddi.h>
+#include <sys/kobj.h>
+
+#include <sys/scfd/scfsys.h>
+#include <sys/scfd/scfostoescf.h>
+
+#define XSCF_DATA_LEN 16
+#define SCF_RETRY_COUNT 10
+
+
+static int
+scf_os_putinfo(uint8_t type, char *datap, uint32_t length)
+{
+ int rv, count;
+
+ rv = 0;
+
+ count = SCF_RETRY_COUNT;
+ while (count-- > 0) {
+ rv = scf_service_putinfo(KEY_ESCF, type, 0, length,
+ (void *)datap);
+ if (rv == EBUSY) {
+ /* 5 sec delay */
+ delay(5 * drv_usectohz(1000000));
+ continue;
+ }
+ break;
+ };
+
+ return (rv);
+}
+
+static int
+scf_os_getinfo(uint8_t type, char *datap, uint32_t *lengthp)
+{
+ int rv, count;
+
+ rv = 0;
+ count = SCF_RETRY_COUNT;
+ while (count-- > 0) {
+ rv = scf_service_getinfo(KEY_ESCF, type, 0, lengthp,
+ (void *)datap);
+ if (rv == EBUSY) {
+ /* 5 sec delay */
+ delay(5 * drv_usectohz(1000000));
+ continue;
+ }
+ break;
+ };
+
+ return (rv);
+}
+
+/*
+ * scf_fmem_start()
+ *
+ * Description: Before starting rename memory,
+ * sending the message
+ * from OS to XSCF.
+ *
+ */
+int
+scf_fmem_start(int s_bd, int t_bd)
+{
+ char data[XSCF_DATA_LEN];
+
+ bzero(data, XSCF_DATA_LEN);
+ data[0] = (char)s_bd;
+ data[1] = (char)t_bd;
+
+ return (scf_os_putinfo(SUB_OS_SEND_PRE_FMEMA,
+ data, XSCF_DATA_LEN));
+}
+
+/*
+ * scf_fmem_end()
+ *
+ * Description: After doing rename memory, sending the message
+ * from OS to XSCF.
+ *
+ */
+int
+scf_fmem_end()
+{
+ char data[XSCF_DATA_LEN];
+ int rv;
+ uint32_t len;
+
+ bzero(data, XSCF_DATA_LEN);
+ len = XSCF_DATA_LEN;
+ rv = scf_os_getinfo(SUB_OS_SEND_COMPLETE_FMEMA,
+ data, &len);
+
+ if (rv == 0) {
+ /* 0 is OK and everything less than 0 is BAD but TBD */
+ if (len > 0)
+ rv = (int)data[0];
+ else
+ rv = -1;
+ }
+ return (rv);
+}
+
+/*
+ * scf_fmem_cancel()
+ *
+ * Description: If the status failed after doing rename memory
+ * and check the result, sending the message from OS to XSCF.
+ *
+ */
+int
+scf_fmem_cancel()
+{
+ return (scf_os_putinfo(SUB_OS_SEND_CANCEL_FMEMA, 0, 0));
+}
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scfconf.c b/usr/src/uts/sun4u/opl/io/scfd/scfconf.c
new file mode 100644
index 0000000000..c6e9e11646
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scfconf.c
@@ -0,0 +1,1124 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/stat.h>
+#include <sys/file.h>
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/kmem.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/scfd/scfparam.h>
+#include <sys/scfd/scfdscpif.h>
+
+static struct driver_minor_data {
+ char *name;
+ int type;
+ int minor_num;
+} scf_minor[] = {
+ { "pwrctl", S_IFCHR, SCF_USER_INSTANCE },
+ { "rasctl", S_IFCHR, SCF_USER_INSTANCE },
+ { "rcictl", S_IFCHR, SCF_USER_INSTANCE },
+ SCF_DBG_IOMP_INSTANCE
+ { NULL, 0}
+};
+
+
+/*
+ * Function list
+ */
+void scf_resource_free_dev(scf_state_t *statep);
+void scf_reload_conf(scf_state_t *statep);
+
+/*
+ * External function
+ */
+extern void scf_dscp_init(void);
+extern void scf_dscp_fini(void);
+
+/*
+ * External value
+ */
+extern int scf_devbusy_wait_time;
+extern int scf_cmdend_wait_time;
+extern int scf_online_wait_time;
+extern int scf_rxbuff_wait_time;
+extern int scf_dscp_ack_wait_time;
+extern int scf_dscp_end_wait_time;
+extern int scf_dscp_txbusy_time;
+extern int scf_dscp_callback_time;
+extern int scf_shutdown_wait_time;
+extern int scf_poff_wait_time;
+extern int scf_halt_wait_time;
+
+
+/*
+ * scf_attach()
+ *
+ * Description: Driver attach() entry processing.
+ *
+ */
+int
+scf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
+{
+#define SCF_FUNC_NAME "scf_attach() "
+ scf_state_t *statep;
+ int instance;
+ struct driver_minor_data *dmdp;
+ int ret = DDI_FAILURE;
+ char wk_pathname[MAXPATHLEN];
+ timeout_id_t save_tmids[SCF_TIMERCD_MAX];
+ int tm_stop_cnt;
+
+ SCFDBGMSG2(SCF_DBGFLAG_DDI,
+ SCF_FUNC_NAME ": start instance = %d name = %s",
+ ddi_get_instance(dip), ddi_get_name(dip));
+ SC_DBG_DRV_TRACE(TC_ATTACH|TC_IN, __LINE__, &cmd,
+ sizeof (ddi_attach_cmd_t));
+
+ if (strcmp(ddi_get_name(dip), SCF_DRIVER_NAME) == 0) {
+ /* pseudo device */
+ if (cmd == DDI_ATTACH) {
+ SCFDBGMSG(SCF_DBGFLAG_DDI, "pseudo attach proc");
+ mutex_enter(&scf_comtbl.attach_mutex);
+
+ /* get instance number */
+ instance = ddi_get_instance(dip);
+
+ /* allocate softstate */
+ if (ddi_soft_state_zalloc(scfstate, instance) !=
+ DDI_SUCCESS) {
+ SC_DBG_DRV_TRACE(TC_ATTACH|TC_ERR, __LINE__,
+ "attach ", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_attach: "
+ "ddi_soft_state_zalloc failed.\n",
+ scf_driver_name);
+
+ mutex_exit(&scf_comtbl.attach_mutex);
+ goto END_attach;
+ }
+
+ /* get softstate */
+ if ((statep = ddi_get_soft_state(scfstate, instance)) ==
+ NULL) {
+ SC_DBG_DRV_TRACE(TC_ATTACH|TC_ERR, __LINE__,
+ "attach ", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_attach: "
+ "ddi_get_soft_state failed.\n",
+ scf_driver_name);
+ ddi_soft_state_free(scfstate, instance);
+
+ mutex_exit(&scf_comtbl.attach_mutex);
+ goto END_attach;
+ }
+
+ /* retain dip in soft state */
+ statep->dip = dip;
+
+ /* create minor node */
+ for (dmdp = scf_minor; dmdp->name != NULL; dmdp++) {
+ if (ddi_create_minor_node(dip, dmdp->name,
+ dmdp->type, dmdp->minor_num,
+ DDI_PSEUDO, 0) == DDI_FAILURE) {
+ SC_DBG_DRV_TRACE(TC_ATTACH|TC_ERR,
+ __LINE__, "attach ", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_attach: "
+ "ddi_create_minor_node "
+ "failed.\n",
+ scf_driver_name);
+
+ /* remove minor node */
+ if (scf_comtbl.resource_flag &
+ DID_MNODE) {
+ ddi_remove_minor_node(dip,
+ NULL);
+ scf_comtbl.resource_flag &=
+ (~DID_MNODE);
+ }
+
+ /* soft state free */
+ ddi_soft_state_free(scfstate, instance);
+
+ mutex_exit(&scf_comtbl.attach_mutex);
+ goto END_attach;
+ }
+ scf_comtbl.resource_flag |= DID_MNODE;
+ SCFDBGMSG(SCF_DBGFLAG_DDI,
+ "ddi_create_minor_node() is success");
+ }
+
+ scf_comtbl.scf_pseudo_p = statep;
+
+ mutex_exit(&scf_comtbl.attach_mutex);
+ }
+ ret = DDI_SUCCESS;
+ goto END_attach;
+ }
+
+ /* get SCF Driver mutex */
+ mutex_enter(&scf_comtbl.attach_mutex);
+
+ if (!(scf_comtbl.resource_flag & DID_MUTEX_ALL)) {
+
+ if (ddi_get_iblock_cookie(dip, 0, &scf_comtbl.iblock_cookie) !=
+ DDI_SUCCESS) {
+ SC_DBG_DRV_TRACE(TC_ATTACH|TC_ERR, __LINE__,
+ "attach ", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_attach: "
+ "ddi_get_iblock_cookie failed.\n",
+ scf_driver_name);
+
+ mutex_exit(&scf_comtbl.attach_mutex);
+ goto END_attach;
+ }
+
+ mutex_init(&scf_comtbl.all_mutex, NULL, MUTEX_DRIVER,
+ scf_comtbl.iblock_cookie);
+ scf_comtbl.resource_flag |= DID_MUTEX_ALL;
+ }
+ if (!(scf_comtbl.resource_flag & DID_MUTEX_SI)) {
+
+ if (ddi_get_soft_iblock_cookie(dip, SCF_EVENT_PRI,
+ &scf_comtbl.soft_iblock_cookie) !=
+ DDI_SUCCESS) {
+ SC_DBG_DRV_TRACE(TC_ATTACH|TC_ERR, __LINE__,
+ "attach ", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_attach: "
+ "ddi_get_soft_iblock_cookie failed.\n",
+ scf_driver_name);
+
+ mutex_exit(&scf_comtbl.attach_mutex);
+ goto END_attach;
+ }
+
+ mutex_init(&scf_comtbl.si_mutex, NULL, MUTEX_DRIVER,
+ scf_comtbl.soft_iblock_cookie);
+ scf_comtbl.resource_flag |= DID_MUTEX_SI;
+ }
+ /* add software interrupt handler */
+ if (!(scf_comtbl.resource_flag & DID_SOFTINTR)) {
+ if (ddi_add_softintr(dip, SCF_EVENT_PRI,
+ &scf_comtbl.scf_softintr_id, NULL, NULL,
+ &scf_softintr, NULL) != DDI_SUCCESS) {
+ SC_DBG_DRV_TRACE(TC_ATTACH | TC_ERR, __LINE__,
+ "attach ", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_attach: ddi_add_softintr failed.",
+ scf_driver_name);
+ goto ATTACH_failed;
+ }
+ scf_comtbl.resource_flag |= DID_SOFTINTR;
+ }
+ /* kstat resource initialize */
+ if (!(scf_comtbl.resource_flag & DID_KSTAT)) {
+ scf_kstat_init();
+ scf_comtbl.resource_flag |= DID_KSTAT;
+ }
+
+ mutex_exit(&scf_comtbl.attach_mutex);
+
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* get instance number */
+ instance = ddi_get_instance(dip);
+
+ switch (cmd) {
+ case DDI_ATTACH:
+ /* DDI_ATTACH */
+ SCFDBGMSG(SCF_DBGFLAG_DDI, "attach proc");
+ /* allocate softstate */
+ if (ddi_soft_state_zalloc(scfstate, instance) != DDI_SUCCESS) {
+ SC_DBG_DRV_TRACE(TC_ATTACH|TC_ERR, __LINE__,
+ "attach ", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_attach: "
+ "ddi_soft_state_zalloc failed.\n",
+ scf_driver_name);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_attach;
+ }
+
+ /* get softstate */
+ if ((statep = ddi_get_soft_state(scfstate, instance)) == NULL) {
+ SC_DBG_DRV_TRACE(TC_ATTACH|TC_ERR, __LINE__,
+ "attach ", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_attach: ddi_get_soft_state failed.\n",
+ scf_driver_name);
+ ddi_soft_state_free(scfstate, instance);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_attach;
+ }
+
+ /* pathname get (use cmn_err) */
+ if (ddi_pathname(dip, &wk_pathname[0]) != 0) {
+ sprintf(&statep->pathname[0], "%s(%s%d)",
+ &wk_pathname[0], ddi_get_name(dip), instance);
+ } else {
+ sprintf(&statep->pathname[0], "(%s%d)",
+ ddi_get_name(dip), instance);
+ }
+
+ /* retain dip in soft state */
+ statep->dip = dip;
+
+ /* create minor node */
+ sprintf(wk_pathname, "%s%d", ddi_get_name(dip), instance);
+ if (ddi_create_minor_node(dip, wk_pathname, S_IFCHR, instance,
+ DDI_PSEUDO, 0) == DDI_FAILURE) {
+ SC_DBG_DRV_TRACE(TC_ATTACH|TC_ERR, __LINE__,
+ "attach ", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_attach: "
+ "ddi_create_minor_node failed.\n",
+ scf_driver_name);
+ goto ATTACH_failed;
+ }
+ statep->resource_flag |= S_DID_MNODE;
+
+ statep->instance = instance;
+
+ /* get configuration file */
+ scf_reload_conf(statep);
+
+ /* map SCF registers */
+ if (scf_map_regs(dip, statep) != 0) {
+ SC_DBG_DRV_TRACE(TC_ATTACH|TC_ERR, __LINE__,
+ "attach ", 8);
+ goto ATTACH_failed;
+ }
+
+ /* add interrupt handler */
+ if (ddi_add_intr(dip, 0, NULL, 0, &scf_intr, (caddr_t)statep) !=
+ DDI_SUCCESS) {
+ SC_DBG_DRV_TRACE(TC_ATTACH|TC_ERR, __LINE__,
+ "attach ", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_attach: ddi_add_intr failed.\n",
+ scf_driver_name);
+ goto ATTACH_failed;
+ }
+ statep->resource_flag |= S_DID_INTR;
+
+ SCF_DBG_IOMP_ADD(statep);
+
+ /* DSCP inteface initialize */
+ if (!(scf_comtbl.resource_flag & DID_DSCPINIT)) {
+ scf_dscp_init();
+ scf_comtbl.resource_flag |= DID_DSCPINIT;
+ }
+
+ /* permit SCF intr */
+ scf_permit_intr(statep, 1);
+
+ /* first attach */
+ if ((scf_comtbl.scf_path_p == NULL) &&
+ (scf_comtbl.scf_exec_p == NULL)) {
+ /* no execute scf device */
+ if (scf_comtbl.watchdog_after_resume) {
+ scf_comtbl.alive_running = SCF_ALIVE_START;
+ scf_comtbl.watchdog_after_resume = 0;
+ }
+ scf_chg_scf(statep, PATH_STAT_ACTIVE);
+ scf_comtbl.scf_pchg_event_sub = EVENT_SUB_PCHG_WAIT;
+ scf_next_cmd_check(statep);
+ } else {
+ /* exists execute scf device */
+ scf_chg_scf(statep, PATH_STAT_STANDBY);
+ }
+ scf_comtbl.attach_count++;
+
+ ddi_report_dev(dip);
+
+ /* Collect the timers which need to be stopped */
+ tm_stop_cnt = scf_timer_stop_collect(save_tmids,
+ SCF_TIMERCD_MAX);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Timer stop */
+ if (tm_stop_cnt != 0) {
+ scf_timer_untimeout(save_tmids, SCF_TIMERCD_MAX);
+ }
+
+ ret = DDI_SUCCESS;
+ goto END_attach;
+
+ case DDI_RESUME:
+ SCFDBGMSG(SCF_DBGFLAG_DDI, "resume proc");
+ /* get softstate */
+ if ((statep = ddi_get_soft_state(scfstate, instance)) == NULL) {
+ SC_DBG_DRV_TRACE(TC_ATTACH|TC_ERR, __LINE__,
+ "attach ", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_attach: ddi_get_soft_state failed.\n",
+ scf_driver_name);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_attach;
+ }
+
+ /* Transmitting stop release by SUSPEND */
+ scf_comtbl.scf_suspend_sendstop = 0;
+ /* queue update */
+ scf_del_queue(statep);
+ if ((statep->old_path_status == PATH_STAT_ACTIVE) ||
+ (statep->old_path_status == PATH_STAT_STANDBY)) {
+ if ((scf_comtbl.scf_path_p == NULL) &&
+ (scf_comtbl.scf_exec_p == NULL)) {
+ scf_comtbl.suspend_flag = 0;
+ if (scf_comtbl.watchdog_after_resume) {
+ scf_comtbl.alive_running =
+ SCF_ALIVE_START;
+ scf_comtbl.watchdog_after_resume = 0;
+ }
+ /* permit SCF intr */
+ scf_permit_intr(statep, 1);
+ scf_chg_scf(statep, PATH_STAT_ACTIVE);
+ scf_comtbl.scf_pchg_event_sub =
+ EVENT_SUB_PCHG_WAIT;
+ scf_next_cmd_check(statep);
+ scf_comtbl.scf_report_event_sub =
+ EVENT_SUB_REPORT_RUN_WAIT;
+ } else {
+ /* exists execute SCF device */
+ scf_chg_scf(statep, PATH_STAT_STANDBY);
+ /* permit SCF intr */
+ scf_permit_intr(statep, 1);
+ }
+ } else {
+ scf_chg_scf(statep, statep->old_path_status);
+ }
+
+ /* Collect the timers which need to be stopped */
+ tm_stop_cnt = scf_timer_stop_collect(save_tmids,
+ SCF_TIMERCD_MAX);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Timer stop */
+ if (tm_stop_cnt != 0) {
+ scf_timer_untimeout(save_tmids, SCF_TIMERCD_MAX);
+ }
+
+ ret = DDI_SUCCESS;
+ goto END_attach;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_ATTACH|TC_ERR, __LINE__, "attach ", 8);
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_attach;
+ }
+
+/*
+ * ATTACH_failed
+ */
+ ATTACH_failed:
+
+ scf_resource_free_dev(statep);
+
+ if ((scf_comtbl.scf_exec_p == NULL) &&
+ (scf_comtbl.scf_path_p == NULL) &&
+ (scf_comtbl.scf_wait_p == NULL) &&
+ (scf_comtbl.scf_suspend_p == NULL) &&
+ (scf_comtbl.scf_stop_p == NULL) &&
+ (scf_comtbl.scf_disc_p == NULL) &&
+ (scf_comtbl.scf_err_p == NULL)) {
+ /* last SCF device */
+
+ /* DSCP interface area release */
+ if (scf_comtbl.resource_flag & DID_DSCPINIT) {
+ scf_dscp_fini();
+ scf_comtbl.resource_flag &= (~DID_DSCPINIT);
+ }
+
+ /* All timer stop */
+ scf_timer_all_stop();
+
+ /* Collect the timers which need to be stopped */
+ tm_stop_cnt =
+ scf_timer_stop_collect(save_tmids, SCF_TIMERCD_MAX);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Timer stop */
+ if (tm_stop_cnt != 0) {
+ scf_timer_untimeout(save_tmids, SCF_TIMERCD_MAX);
+ }
+
+ mutex_enter(&scf_comtbl.attach_mutex);
+
+ /* destroy kstat resources */
+ if (scf_comtbl.resource_flag & DID_KSTAT) {
+ scf_kstat_fini();
+ scf_comtbl.resource_flag &= (~DID_KSTAT);
+ }
+
+ mutex_exit(&scf_comtbl.attach_mutex);
+ } else {
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+ }
+
+ ddi_soft_state_free(scfstate, instance);
+
+/*
+ * END_attach
+ */
+ END_attach:
+
+ SC_DBG_DRV_TRACE(TC_ATTACH|TC_OUT, __LINE__, &ret, sizeof (int));
+ SCFDBGMSG1(SCF_DBGFLAG_DDI, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_detach()
+ *
+ * Description: Driver detach() entry processing.
+ *
+ */
+int
+scf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_detach() "
+ scf_state_t *statep;
+ int instance;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t sbuf;
+ int ret = DDI_FAILURE;
+ scf_state_t *next_path = 0;
+ int cv_ret;
+ clock_t wk_time;
+ timeout_id_t save_tmids[SCF_TIMERCD_MAX];
+ int tm_stop_cnt;
+
+ SCFDBGMSG2(SCF_DBGFLAG_DDI,
+ SCF_FUNC_NAME ": start instance = %d name = %s",
+ ddi_get_instance(dip), ddi_get_name(dip));
+ SC_DBG_DRV_TRACE(TC_DETACH|TC_IN, __LINE__, &cmd,
+ sizeof (ddi_detach_cmd_t));
+
+ if (strcmp(ddi_get_name(dip), SCF_DRIVER_NAME) == 0) {
+ if (cmd == DDI_DETACH) {
+ SCFDBGMSG(SCF_DBGFLAG_DDI, "pseudo detach proc");
+ mutex_enter(&scf_comtbl.attach_mutex);
+
+ /* get instance number */
+ instance = ddi_get_instance(dip);
+
+ /* remove minor node */
+ if (scf_comtbl.resource_flag & DID_MNODE) {
+ ddi_remove_minor_node(dip, NULL);
+ scf_comtbl.resource_flag &= (~DID_MNODE);
+ SCFDBGMSG(SCF_DBGFLAG_DDI,
+ "ddi_remove_minor_node() is success");
+ }
+
+ /* soft state free */
+ ddi_soft_state_free(scfstate, instance);
+
+ scf_comtbl.scf_pseudo_p = NULL;
+
+ mutex_exit(&scf_comtbl.attach_mutex);
+ }
+ ret = DDI_SUCCESS;
+ goto END_detach;
+ }
+ bzero((void *)&sbuf.b[0], SCF_S_CNT_16);
+
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ switch (cmd) {
+ case DDI_DETACH:
+ SCFDBGMSG(SCF_DBGFLAG_DDI, "detach proc");
+ /* get instance number */
+ instance = ddi_get_instance(dip);
+
+ /* get softstate */
+ if ((statep = ddi_get_soft_state(scfstate, instance)) == NULL) {
+ SC_DBG_DRV_TRACE(TC_DETACH|TC_ERR, __LINE__,
+ "detach ", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_detach: ddi_get_soft_state failed.\n",
+ scf_driver_name);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_detach;
+ }
+
+ if ((scf_comtbl.scf_exec_p == statep) ||
+ (scf_comtbl.scf_path_p == statep)) {
+ if ((next_path = scf_comtbl.scf_wait_p) == 0) {
+ if (scf_last_detach_mode == 0) {
+ /* Last deveice detach is error */
+ SC_DBG_DRV_TRACE(TC_DETACH|TC_MSG,
+ __LINE__, "detach ", 8);
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_detach;
+ }
+ }
+ }
+
+ /* SCF command transmit sync stop */
+ (void) scf_make_send_cmd(&scf_cmd, SCF_USE_STOP);
+
+ scf_del_queue(statep);
+ scf_comtbl.attach_count--;
+
+ /* forbid interrupt */
+ scf_forbid_intr(statep);
+
+ if (next_path) {
+ /* SCF path change */
+ scf_comtbl.scf_wait_p = next_path->next;
+ scf_chg_scf(next_path, PATH_STAT_ACTIVE);
+ scf_comtbl.scf_pchg_event_sub = EVENT_SUB_PCHG_WAIT;
+ scf_next_cmd_check(next_path);
+ }
+ /* SCF command sync start */
+ (void) scf_make_send_cmd(&scf_cmd, SCF_USE_START);
+ SCF_DBG_IOMP_DEL(statep);
+
+ scf_resource_free_dev(statep);
+
+ /* free resources allocated in driver */
+ if ((scf_comtbl.scf_exec_p == NULL) &&
+ (scf_comtbl.scf_path_p == NULL) &&
+ (scf_comtbl.scf_wait_p == NULL) &&
+ (scf_comtbl.scf_suspend_p == NULL) &&
+ (scf_comtbl.scf_stop_p == NULL) &&
+ (scf_comtbl.scf_disc_p == NULL) &&
+ (scf_comtbl.scf_err_p == NULL)) {
+ /* last device */
+
+ /* DSCP interface area release */
+ if (scf_comtbl.resource_flag & DID_DSCPINIT) {
+ scf_dscp_fini();
+ scf_comtbl.resource_flag &= (~DID_DSCPINIT);
+ }
+
+ /* All timer stop */
+ scf_timer_all_stop();
+
+ /* Collect the timers which need to be stopped */
+ tm_stop_cnt = scf_timer_stop_collect(save_tmids,
+ SCF_TIMERCD_MAX);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Timer stop */
+ if (tm_stop_cnt != 0) {
+ scf_timer_untimeout(save_tmids,
+ SCF_TIMERCD_MAX);
+ }
+
+ SCF_DBG_TEST_TIMER_STOP;
+
+ mutex_enter(&scf_comtbl.attach_mutex);
+
+ /* destroy kstat resources */
+ if (scf_comtbl.resource_flag & DID_KSTAT) {
+ scf_kstat_fini();
+ scf_comtbl.resource_flag &= (~DID_KSTAT);
+ }
+
+ mutex_exit(&scf_comtbl.attach_mutex);
+ } else {
+ /* Collect the timers which need to be stopped */
+ tm_stop_cnt = scf_timer_stop_collect(save_tmids,
+ SCF_TIMERCD_MAX);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Timer stop */
+ if (tm_stop_cnt != 0) {
+ scf_timer_untimeout(save_tmids,
+ SCF_TIMERCD_MAX);
+ }
+ }
+
+ /* soft state free */
+ ddi_soft_state_free(scfstate, instance);
+
+ ret = DDI_SUCCESS;
+ goto END_detach;
+
+ case DDI_SUSPEND:
+ SCFDBGMSG(SCF_DBGFLAG_DDI, "suspend proc");
+
+ /* get instance number */
+ instance = ddi_get_instance(dip);
+
+ /* get softstate */
+ if ((statep = ddi_get_soft_state(scfstate, instance)) == NULL) {
+ SC_DBG_DRV_TRACE(TC_DETACH|TC_ERR, __LINE__,
+ "detach ", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_detach: ddi_get_soft_state failed.\n",
+ scf_driver_name);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_detach;
+ }
+
+ if ((scf_comtbl.scf_exec_p == statep) ||
+ (scf_comtbl.scf_path_p == statep)) {
+ /* report "Shutdown start" to SCF */
+ scf_comtbl.suspend_flag = 1;
+
+ /*
+ * if watching cpu stop it, but set flag for
+ * restart after resume
+ */
+ if (scf_comtbl.alive_running == SCF_ALIVE_START) {
+ scf_comtbl.watchdog_after_resume = 1;
+ scf_comtbl.alive_running = SCF_ALIVE_STOP;
+ }
+ scf_comtbl.scf_alive_event_sub = EVENT_SUB_ALSP_WAIT;
+ scf_next_cmd_check(statep);
+ /* SUSPEND wait state */
+ wk_time = drv_usectohz(SCF_MIL2MICRO(scf_timer_value_get
+ (SCF_TIMERCD_CMDEND)) + ddi_get_lbolt());
+ scf_comtbl.suspend_wait = 1;
+ while (scf_comtbl.suspend_wait != 0) {
+ cv_ret = cv_timedwait_sig
+ (&scf_comtbl.suspend_wait_cv,
+ &scf_comtbl.all_mutex, wk_time);
+ if (cv_ret == 0) {
+ scf_comtbl.suspend_wait = 0;
+ SC_DBG_DRV_TRACE(TC_KILL, __LINE__,
+ &scf_comtbl.suspend_wait_cv,
+ sizeof (kcondvar_t));
+ break;
+ } else if (cv_ret == (-1)) {
+ scf_comtbl.suspend_wait = 0;
+ SC_DBG_DRV_TRACE(TC_DETACH|TC_ERR,
+ __LINE__, "detach ", 8);
+ break;
+ }
+ }
+ }
+
+ scf_del_queue(statep);
+ scf_chg_scf(statep, PATH_STAT_EMPTY);
+
+ /* forbid interrupt */
+ scf_forbid_intr(statep);
+
+ /* Collect the timers which need to be stopped */
+ tm_stop_cnt =
+ scf_timer_stop_collect(save_tmids, SCF_TIMERCD_MAX);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Timer stop */
+ if (tm_stop_cnt != 0) {
+ scf_timer_untimeout(save_tmids, SCF_TIMERCD_MAX);
+ }
+
+ ret = DDI_SUCCESS;
+ goto END_detach;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_DETACH|TC_ERR, __LINE__, "detach ", 8);
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+ break;
+
+ }
+
+/*
+ * END_detach
+ */
+ END_detach:
+
+ SC_DBG_DRV_TRACE(TC_DETACH|TC_OUT, __LINE__, &ret, sizeof (int));
+ SCFDBGMSG1(SCF_DBGFLAG_DDI, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_resource_free_dev()
+ *
+ * Description: Release processing of device resources.
+ *
+ */
+void
+scf_resource_free_dev(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_resource_free_dev() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DDI, SCF_FUNC_NAME ": start");
+
+ /* remove intr */
+ if (statep->resource_flag & S_DID_INTR) {
+ ddi_remove_intr(statep->dip, 0, scf_comtbl.iblock_cookie);
+ statep->resource_flag &= (~S_DID_INTR);
+ SCFDBGMSG(SCF_DBGFLAG_DDI, "ddi_remove_intr() is success");
+ }
+
+ /* remove minor node */
+ if (statep->resource_flag & S_DID_MNODE) {
+ ddi_remove_minor_node(statep->dip, NULL);
+ statep->resource_flag &= (~S_DID_MNODE);
+ SCFDBGMSG(SCF_DBGFLAG_DDI,
+ "ddi_remove_minor_node() is success");
+ }
+
+ /* unmap SCF registers */
+ scf_unmap_regs(statep);
+
+ SCFDBGMSG(SCF_DBGFLAG_DDI, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_getinfo()
+ *
+ * Description: Driver getinfo() entry processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_getinfo() "
+ scf_state_t *statep;
+ int ret;
+ int instance;
+
+ SCFDBGMSG1(SCF_DBGFLAG_OPCLS, SCF_FUNC_NAME ": start instance = %d",
+ getminor((dev_t)arg));
+
+ instance = getminor((dev_t)arg);
+ if (SCF_CHECK_INSTANCE(instance)) {
+ instance = SCF_USER_INSTANCE;
+ }
+
+ switch (cmd) {
+ case DDI_INFO_DEVT2INSTANCE:
+ *resultp = (void *)(uintptr_t)instance;
+ ret = DDI_SUCCESS;
+ goto END_getinfo;
+ case DDI_INFO_DEVT2DEVINFO:
+ statep = (scf_state_t *)ddi_get_soft_state(scfstate, instance);
+ if (statep != NULL) {
+ *resultp = statep->dip;
+ ret = DDI_SUCCESS;
+ goto END_getinfo;
+ }
+ default:
+ SC_DBG_DRV_TRACE(TC_GETINFO|TC_ERR, __LINE__, "getinfo ", 8);
+ *resultp = NULL;
+ ret = DDI_FAILURE;
+ }
+
+/*
+ * END_getinfo
+ */
+ END_getinfo:
+
+ SC_DBG_DRV_TRACE(TC_GETINFO|TC_OUT, __LINE__, &ret, sizeof (int));
+ SCFDBGMSG1(SCF_DBGFLAG_OPCLS, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_reload_conf()
+ *
+ * Description: Read in processing of driver configuration file.
+ *
+ */
+void
+scf_reload_conf(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_reload_conf() "
+ dev_info_t *dip;
+ int get_prm;
+ char *wkcharp = NULL;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DDI, SCF_FUNC_NAME ": start");
+
+ if (scf_comtbl.reload_conf_flag == FLAG_OFF) {
+ dip = statep->dip;
+
+ /*
+ * get driver control mode value
+ */
+
+ /* SCFHALT after processing mode */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_halt_proc_mode", (-1));
+ if (get_prm != (-1)) {
+ scf_halt_proc_mode = (uint_t)get_prm;
+ }
+
+ /*
+ * get alive check function parameter value
+ */
+ /* Operation of alive check function */
+ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf-alive-check-function", &wkcharp) ==
+ DDI_PROP_SUCCESS) {
+ if (strcmp(wkcharp, SCF_ALIVE_FUNC_ON) == 0) {
+ scf_comtbl.alive_running = SCF_ALIVE_START;
+ } else if (strcmp(wkcharp, SCF_ALIVE_FUNC_OFF) == 0) {
+ scf_comtbl.alive_running = SCF_ALIVE_STOP;
+ }
+ ddi_prop_free(wkcharp);
+ }
+
+ /* Interrupt interval time */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf-alive-interval-time", (-1));
+ if (get_prm != (-1)) {
+ SCF_MIN_TO_10SEC(get_prm);
+ scf_alive_interval_time = (uchar_t)get_prm;
+ }
+ /* Monitoring timeout */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf-alive-monitor-time", (-1));
+ if (get_prm != (-1)) {
+ SCF_MIN_TO_10SEC(get_prm);
+ scf_alive_monitor_time = (uchar_t)get_prm;
+ }
+ /* Panic timeout */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf-alive-panic-time", (-1));
+ if (get_prm != (-1)) {
+ SCF_MIN_TO_10SEC(get_prm);
+ scf_alive_panic_time = (ushort_t)get_prm;
+ }
+
+ if ((scf_alive_interval_time < INTERVAL_TIME_MIN) ||
+ (scf_alive_interval_time > INTERVAL_TIME_MAX) ||
+ (scf_alive_monitor_time < MONITOR_TIME_MIN) ||
+ (scf_alive_monitor_time > MONITOR_TIME_MAX) ||
+ ((scf_alive_panic_time != PANIC_TIME_NONE) &&
+ (scf_alive_panic_time < PANIC_TIME_MIN)) ||
+ (scf_alive_panic_time > PANIC_TIME_MAX)) {
+ scf_alive_interval_time = INTERVAL_TIME_DEF;
+ scf_alive_monitor_time = MONITOR_TIME_DEF;
+ scf_alive_panic_time = PANIC_TIME_DEF;
+ }
+ if (scf_alive_interval_time >= scf_alive_monitor_time) {
+ scf_alive_monitor_time =
+ scf_alive_interval_time + MONITOR_TIME_CORRECT;
+ }
+
+ /*
+ * get system interface control value
+ */
+
+ /* SCFIOCRDCTRL wait timer value */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_rdctrl_sense_wait", (-1));
+ if ((get_prm >= SCF_SEC2MICRO(1)) &&
+ (get_prm <= SCF_SEC2MICRO(120))) {
+ scf_rdctrl_sense_wait = (uint_t)get_prm;
+ }
+
+ /* Buff full wait retry timer value */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_buf_ful_rtime", (-1));
+ if (get_prm >= 0) {
+ scf_buf_ful_rtime = (uint_t)get_prm;
+ }
+
+ /* RCI busy wait retry timer value */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_rci_busy_rtime", (-1));
+ if (get_prm >= 0) {
+ scf_rci_busy_rtime = (uint_t)get_prm;
+ }
+
+ /* Tx sum retry counter */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_tesum_rcnt", (-1));
+ if (get_prm >= 0) {
+ scf_tesum_rcnt = (uint_t)get_prm;
+ }
+
+ /* Rx sum retry counter */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_resum_rcnt", (-1));
+ if (get_prm >= 0) {
+ scf_resum_rcnt = (uint_t)get_prm;
+ }
+
+ /* Command to retry counter */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_cmd_to_rcnt", (-1));
+ if (get_prm >= 0) {
+ scf_cmd_to_rcnt = (uint_t)get_prm;
+ }
+
+ /* Command device busy retry counter */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_devbusy_wait_rcnt", (-1));
+ if (get_prm >= 0) {
+ scf_devbusy_wait_rcnt = (uint_t)get_prm;
+ }
+
+ /* SCF online retry counter */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_online_wait_rcnt", (-1));
+ if (get_prm >= 0) {
+ scf_online_wait_rcnt = (uint_t)get_prm;
+ }
+
+ /* SCF path change retry counter */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_path_change_max", (-1));
+ if (get_prm >= 0) {
+ scf_path_change_max = (uint_t)get_prm;
+ }
+
+ /*
+ * get timer control value
+ */
+
+ /* SCF command busy watch timer value */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_devbusy_wait_time", (-1));
+ if (get_prm >= 0) {
+ scf_devbusy_wait_time = (uint_t)get_prm;
+ }
+
+ /* SCF command completion watch value */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_cmdend_wait_time", (-1));
+ if (get_prm >= 0) {
+ scf_cmdend_wait_time = (uint_t)get_prm;
+ }
+
+ /* SCF online watch timer value */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_online_wait_time", (-1));
+ if (get_prm >= 0) {
+ scf_online_wait_time = (uint_t)get_prm;
+ }
+
+ /* Next receive wait timer value */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_rxbuff_wait_time", (-1));
+ if (get_prm >= 0) {
+ scf_rxbuff_wait_time = (uint_t)get_prm;
+ }
+
+ /* DSCP interface TxACK watch timer value */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_dscp_ack_wait_time", (-1));
+ if (get_prm >= 0) {
+ scf_dscp_ack_wait_time = (uint_t)get_prm;
+ }
+
+ /* DSCP interface TxEND watch timer value */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_dscp_end_wait_time", (-1));
+ if (get_prm >= 0) {
+ scf_dscp_end_wait_time = (uint_t)get_prm;
+ }
+
+ /* DSCP interface busy watch timer value */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_dscp_txbusy_time", (-1));
+ if (get_prm >= 0) {
+ scf_dscp_txbusy_time = (uint_t)get_prm;
+ }
+
+ /* DSCP interface callback timer value */
+ get_prm = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
+ "scf_dscp_callback_time", (-1));
+ if (get_prm >= 0) {
+ scf_dscp_callback_time = (uint_t)get_prm;
+ }
+
+ /* Timer value set */
+ scf_timer_init();
+
+ scf_comtbl.reload_conf_flag = FLAG_ON;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DDI, SCF_FUNC_NAME ": end");
+}
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scfd.conf b/usr/src/uts/sun4u/opl/io/scfd/scfd.conf
new file mode 100644
index 0000000000..56f2c038f2
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scfd.conf
@@ -0,0 +1,52 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+#
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+# Begin (do not edit)
+name="scfd" parent="pseudo" instance=200;
+ddi-forceattach=1;
+interrupt-priorities=0x9;
+# End (do not edit)
+
+# When scf-alive-check-function is set to "on", starts the Alive check
+# function by XSCF. If XSCF detected abnormality of the domain, OS panic of
+# the domain is executed. The default is "off".
+# "on" : Starts the Alive check function
+# "off" : Stops the Alive check function
+scf-alive-check-function="off";
+
+# You can set the Alive check interval time for the Alive check function.
+# Specify this parameter in minutes. The range is 1 - 10 minutes.
+# The default is 2 minutes.
+scf-alive-interval-time=2;
+
+# You can set the Alive check monitoring time for the Alive check function.
+# Specify this parameter in minutes. The range is 3 - 30 minutes.
+# The default is 6 minutes.
+scf-alive-monitor-time=6;
+
+# You can set the monitoring time for the OS panic monitoring of Alive
+# check function. Specify this parameter in minutes. The range is
+# 30 - 360 minutes. The default is 30 minutes.
+scf-alive-panic-time=30;
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scfdebug.c b/usr/src/uts/sun4u/opl/io/scfd/scfdebug.c
new file mode 100644
index 0000000000..6eab9238bf
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scfdebug.c
@@ -0,0 +1,2028 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/ksynch.h>
+#include <sys/types.h>
+#include <sys/file.h>
+#include <sys/conf.h>
+#include <sys/cmn_err.h>
+#include <sys/kmem.h>
+#include <sys/errno.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/scfd/scfparam.h>
+#include <sys/scfd/scfdscp.h>
+
+#ifdef DEBUG
+/*
+ * Debug control value and flag
+ */
+uint_t scf_debug_test_sys_int_flag = SCF_DBF_SYS_INTR_OFF;
+uint_t scf_debug_test_rxbuff_nosum_check_flag = SCF_DBF_RXBUFF_NOSUM_CHECK_OFF;
+uint_t scf_debug_test_sys_event_flag = SCF_DBF_SYS_EVENT_OFF;
+uint_t scf_debug_test_sys_poff_flag = SCF_DBF_SYS_POFF_OFF;
+uint_t scf_debug_test_dscp_int_flag = SCF_DBF_DSCP_INT_OFF;
+uint_t scf_debug_test_cmdr_busy = SCF_DBC_CMDR_BUSY_CLEAR;
+uint_t scf_debug_test_cmdexr_busy = SCF_DBC_CMDEXR_BUSY_CLEAR;
+uint_t scf_debug_test_path_check = SCF_DBC_PATH_CHECK_CLEAR;
+uint_t scf_debug_test_path_check_rtn = SCF_DBC_PATH_CHECK_RTN_CLEAR;
+uint_t scf_debug_test_offline_check = SCF_DBC_OFFLINE_CHECK_CLEAR;
+uint_t scf_debug_test_offline_check_rtn = SCF_DBC_OFFLINE_CHECK_RTN_CLEAR;
+uint_t scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_OFF;
+uint_t scf_debug_test_osescf_call_flag = SCF_DBF_OSESCF_CALL_OFF;
+
+uint_t scf_no_make_sum_s = SCF_DBF_NO_MAKE_SUM_S_OFF;
+uint_t scf_no_make_sum_l = SCF_DBF_NO_MAKE_SUM_L_OFF;
+
+uint_t scf_debug_nofirm_sys = SCF_DBF_NOFIRM_SYS_OFF;
+uint_t scf_debug_scfint_time = SCF_DBT_SCFINT_TIME_100MS;
+uint_t scf_debug_nofirm_dscp = SCF_DBF_NOFIRM_DSCP_OFF;
+uint_t scf_debug_idbcint_time = SCF_DBT_IDBCINT_TIME_100MS;
+uint_t scf_debug_test_dscp_loopback = SCF_DBF_DSCP_LOOPBACK_OFF;
+uint_t scf_debug_nooffline_check = SCF_DBF_NOOFFLINE_CHECK_OFF;
+uint_t scf_debug_no_dscp_path = SCF_DBF_NO_DSCP_PATH_OFF;
+uint_t scf_debug_no_alive = SCF_DBF_NO_ALIVE_OFF;
+uint_t scf_debug_norxsum_check = SCF_DBF_NORXSUM_CHECK_OFF;
+uint_t scf_debug_no_int_reason = SCF_DBF_NO_INT_REASON_OFF;
+
+uint_t scf_debug_no_device = SCF_DBF_NO_DEVICE_OFF;
+
+scf_regs_t *scf_debug_scf_regs;
+scf_regs_c_t *scf_debug_scf_regs_c;
+scf_dscp_sram_t *scf_debug_scf_dscp_sram;
+scf_sys_sram_t *scf_debug_scf_sys_sram;
+scf_interface_t *scf_debug_scf_interface;
+scf_if_drvtrc_t *scf_debug_scf_reg_drvtrc;
+
+scf_regs_t scf_debug_scf_regs_tbl;
+scf_regs_c_t scf_debug_scf_regs_c_tbl;
+scf_dscp_sram_t scf_debug_scf_dscp_sram_tbl;
+scf_sys_sram_t scf_debug_scf_sys_sram_tbl;
+scf_interface_t scf_debug_scf_interface_tbl;
+struct {
+ uint8_t data[0x00001000];
+
+} scf_debug_scf_reg_drvtrc_tbl;
+
+struct {
+ uint16_t STATUS;
+ uint16_t INT_ST;
+ uint32_t STATUS_ExR;
+ uint32_t rxsize;
+ uint32_t RDATA0;
+ uint32_t RDATA1;
+ uint32_t RDATA2;
+ uint32_t RDATA3;
+ uint32_t POFF_FACTOR;
+ uint32_t EVENT[8 * 4];
+} scf_debug_test_sys_int_tbl;
+
+struct {
+ uint8_t DSR;
+ uint8_t rev01;
+ uint16_t TxDSR_C_FLAG;
+ uint16_t TxDSR_OFFSET;
+ uint32_t rxsize;
+ uint16_t RxDCR_C_FLAG;
+ uint16_t RxDCR_OFFSET;
+ uint32_t RxDCR_LENGTH;
+ uint32_t rsv14;
+ uint32_t rsv18;
+} scf_debug_test_dscp_int_tbl;
+
+uint32_t scf_debug_rdata[4] = {0, 0, 0, 0};
+
+timeout_id_t scf_debug_test_intr_id = 0;
+timeout_id_t scf_debug_test_alive_id = 0;
+uint_t scf_debug_test_alive_flag = FLAG_OFF;
+
+/*
+ * Function list
+ */
+int scf_debug_cmdthrough(intptr_t arg, int mode);
+int scf_debug_test(intptr_t arg, int mode);
+void scf_debug_test_intr_tout(void *arg);
+void scf_debug_test_intr(scf_state_t *statep);
+void scf_debug_test_intr_scfint(scf_state_t *statep);
+void scf_debug_test_intr_cmdend(scf_state_t *statep);
+void scf_debug_test_intr_poff(void);
+void scf_debug_test_dsens(struct scf_cmd *scfcmdp, scf_int_reason_t *int_rp,
+ int len);
+void scf_debug_test_intr_dscp_dsr(scf_state_t *statep);
+void scf_debug_test_intr_dscp_rxtx(scf_state_t *statep, uint8_t dsr);
+void scf_debug_test_alive_start(scf_state_t *statep);
+void scf_debug_test_alive_stop(scf_state_t *statep);
+void scf_debug_test_alive_intr_tout(void *arg);
+void scf_debug_test_send_cmd(struct scf_state *statep,
+ struct scf_cmd *scfcmdp);
+void scf_debug_test_txreq_send(scf_state_t *statep, scf_dscp_dsc_t *dsc_p);
+void scf_debug_test_event_handler(scf_event_t mevent, void *arg);
+void scf_debug_test_timer_stop(void);
+void scf_debug_test_map_regs(scf_state_t *statep);
+void scf_debug_test_unmap_regs(scf_state_t *statep);
+
+/*
+ * External function
+ */
+extern int scf_dscp_init(void);
+extern void scf_dscp_fini(void);
+extern void scf_dscp_start(uint32_t factor);
+extern void scf_dscp_stop(uint32_t factor);
+extern int scf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
+extern int scf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
+
+extern int scf_fmem_start(int s_bd, int t_bd);
+extern int scf_fmem_end(void);
+extern int scf_fmem_cancel(void);
+
+int
+scf_debug_cmdthrough(intptr_t arg, int mode)
+{
+#define SCF_FUNC_NAME "scf_debug_cmdthrough() "
+ int ret = 0;
+ scfcmdthrough_t *scfcmdthrough_p = NULL;
+ struct scf_cmd scf_cmd;
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ scfcmdthrough_p =
+ kmem_zalloc((size_t)(sizeof (scfcmdthrough_t)),
+ KM_SLEEP);
+ if (ddi_copyin((void *)arg, (void *)scfcmdthrough_p,
+ sizeof (scfcmdthrough_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "dbg_cmdt", 8);
+ ret = EFAULT;
+ goto END_cmdthrough;
+ }
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ switch (scfcmdthrough_p->mode) {
+ case SCF_CMDTHROUGH_START:
+ scf_comtbl.debugxscf_flag = 1;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ break;
+
+ case SCF_CMDTHROUGH_STOP:
+ scf_comtbl.debugxscf_flag = 0;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ break;
+
+ case SCF_CMDTHROUGH_CMD:
+ if (!scf_comtbl.debugxscf_flag) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "dbg_cmdt", 8);
+ mutex_exit(&scf_comtbl.all_mutex);
+ ret = EINVAL;
+ goto END_cmdthrough;
+ }
+ scf_cmd.cmd = (unsigned char)scfcmdthrough_p->code;
+ scf_cmd.subcmd = (unsigned char)(scfcmdthrough_p->code >> 8);
+
+ switch (scfcmdthrough_p->cmdtype) {
+ case SCF_CMDTHROUGH_TYPE_NN:
+ case SCF_CMDTHROUGH_TYPE_NS:
+ case SCF_CMDTHROUGH_TYPE_NL:
+ scf_cmd.scount = 0;
+ break;
+
+ case SCF_CMDTHROUGH_TYPE_SN:
+ case SCF_CMDTHROUGH_TYPE_SS:
+ case SCF_CMDTHROUGH_TYPE_SL:
+ if (scfcmdthrough_p->sbufleng > SCF_S_CNT_16) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__,
+ "dbg_cmdt", 8);
+ mutex_exit(&scf_comtbl.all_mutex);
+ ret = EINVAL;
+ goto END_cmdthrough;
+ }
+ scf_cmd.scount = scfcmdthrough_p->sbufleng;
+ break;
+
+ case SCF_CMDTHROUGH_TYPE_LN:
+ case SCF_CMDTHROUGH_TYPE_LS:
+ if (scfcmdthrough_p->sbufleng > SCF_L_CNT_MAX) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__,
+ "dbg_cmdt", 8);
+ mutex_exit(&scf_comtbl.all_mutex);
+ ret = EINVAL;
+ goto END_cmdthrough;
+ }
+ scf_cmd.scount = scfcmdthrough_p->sbufleng;
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "dbg_cmdt", 8);
+ mutex_exit(&scf_comtbl.all_mutex);
+ ret = EINVAL;
+ goto END_cmdthrough;
+ }
+
+ switch (scfcmdthrough_p->cmdtype) {
+ case SCF_CMDTHROUGH_TYPE_NN:
+ case SCF_CMDTHROUGH_TYPE_SN:
+ scf_cmd.flag = SCF_USE_S_BUF;
+ break;
+
+ case SCF_CMDTHROUGH_TYPE_NS:
+ case SCF_CMDTHROUGH_TYPE_SS:
+ scf_cmd.flag = SCF_USE_SSBUF;
+ break;
+
+ case SCF_CMDTHROUGH_TYPE_NL:
+ case SCF_CMDTHROUGH_TYPE_SL:
+ scf_cmd.flag = SCF_USE_SLBUF;
+ break;
+
+ case SCF_CMDTHROUGH_TYPE_LN:
+ scf_cmd.flag = SCF_USE_L_BUF;
+ break;
+
+ case SCF_CMDTHROUGH_TYPE_LS:
+ scf_cmd.flag = SCF_USE_LSBUF;
+ break;
+ }
+ scf_cmd.sbuf = &scfcmdthrough_p->sbuf[0];
+ scf_cmd.scount = scfcmdthrough_p->sbufleng;
+ scf_cmd.rbuf = &scfcmdthrough_p->rbuf[0];
+ scf_cmd.rcount = SCF_L_CNT_MAX;
+ scf_cmd.rbufleng = 0;
+ scf_cmd.status = 0;
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ scfcmdthrough_p->rbufleng = scf_cmd.rbufleng;
+ scfcmdthrough_p->status = scf_cmd.status;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ if (ret != 0) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "dbg_cmdt", 8);
+ goto END_cmdthrough;
+ }
+ if (ddi_copyout((void *)scfcmdthrough_p, (void *)arg,
+ sizeof (scfcmdthrough_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "dbg_cmdt", 8);
+ ret = EFAULT;
+ }
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "dbg_cmdt", 8);
+ mutex_exit(&scf_comtbl.all_mutex);
+ ret = EINVAL;
+ }
+
+/*
+ * END_cmdthrough
+ */
+ END_cmdthrough:
+
+ if (scfcmdthrough_p) {
+ kmem_free((void *)scfcmdthrough_p,
+ (size_t)(sizeof (scfcmdthrough_t)));
+ }
+ SCFDBGMSG1(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+int
+scf_debug_test(intptr_t arg, int mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test() "
+ scf_state_t *statep;
+ int func_ret = 0;
+ int ret = 0;
+
+ scf_scfioctest_t *test_p = NULL;
+ caddr_t data_addr = NULL;
+ caddr_t msc_dptr = NULL;
+ uint32_t msc_len;
+ uint8_t *wk_out_p;
+ int ii;
+ int jj;
+
+ target_id_t target_id;
+ mkey_t mkey;
+ uint_t func_arg;
+ uint32_t data_len;
+ uint32_t num_sg;
+ mscat_gath_t *sgp = NULL;
+ mflush_type_t flush_type;
+ uint32_t op;
+
+ uint32_t key;
+ uint8_t type;
+ uint32_t transid;
+ uint32_t length;
+ uint16_t offset_low;
+ uint16_t offset_hight;
+ int kmem_size = 0;
+ int kmem_size2 = 0;
+ timeout_id_t save_tmids[SCF_TIMERCD_MAX];
+ int tm_stop_cnt;
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ test_p = kmem_zalloc((size_t)(sizeof (scf_scfioctest_t)), KM_SLEEP);
+ if (ddi_copyin((void *)arg, (void *)test_p,
+ sizeof (scf_scfioctest_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_test;
+ }
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ if (scf_comtbl.scf_exec_p != NULL) {
+ statep = scf_comtbl.scf_exec_p;
+ } else if (scf_comtbl.scf_path_p != NULL) {
+ statep = scf_comtbl.scf_path_p;
+ } else if (scf_comtbl.scf_wait_p != NULL) {
+ statep = scf_comtbl.scf_wait_p;
+ } else if (scf_comtbl.scf_err_p != NULL) {
+ statep = scf_comtbl.scf_err_p;
+ }
+
+ test_p->scf_debugxscf = scf_comtbl.debugxscf_flag;
+
+ switch (test_p->mode & TEST_MODE_MASK_LOW) {
+ case TEST_NONE:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_NONE");
+ break;
+
+ case TEST_CONF_RESET:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_CONF_RESET");
+
+ /* Not use info */
+ scf_debug_test_sys_int_flag = SCF_DBF_SYS_INTR_OFF;
+ scf_debug_test_rxbuff_nosum_check_flag =
+ SCF_DBF_RXBUFF_NOSUM_CHECK_OFF;
+ scf_debug_test_dscp_int_flag = SCF_DBF_DSCP_INT_OFF;
+ scf_debug_test_cmdr_busy = SCF_DBC_CMDR_BUSY_CLEAR;
+ scf_debug_test_cmdexr_busy = SCF_DBC_CMDEXR_BUSY_CLEAR;
+ scf_debug_test_path_check = SCF_DBC_PATH_CHECK_CLEAR;
+ scf_debug_test_path_check_rtn = SCF_DBC_PATH_CHECK_RTN_CLEAR;
+ scf_debug_test_offline_check = SCF_DBC_OFFLINE_CHECK_CLEAR;
+ scf_debug_test_offline_check_rtn =
+ SCF_DBC_OFFLINE_CHECK_RTN_CLEAR;
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_OFF;
+ scf_debug_test_osescf_call_flag = SCF_DBF_OSESCF_CALL_OFF;
+ break;
+
+ case TEST_CONF_DEBUG_MSG:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_CONF_DEBUG_MSG");
+
+ /*
+ * IN:
+ * info[0] : trace massege flag
+ */
+ scf_trace_msg_flag = test_p->info[0];
+ break;
+
+ case TEST_CONF_CMD_BUSY:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_CONF_CMD_BUSY");
+
+ /*
+ * IN:
+ * info[0] : command busy count
+ * info[1] : command ex busy count
+ */
+ scf_debug_test_cmdr_busy = test_p->info[0];
+ scf_debug_test_cmdexr_busy = test_p->info[1];
+ break;
+
+ case TEST_CONF_SCF_PATH:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_CONF_SCF_PATH");
+
+ /*
+ * IN:
+ * info[0] : scf_path_check count
+ * info[1] : scf_path_check return
+ * info[2] : scf_offline_check count
+ * info[3] : scf_offline_check return
+ */
+ scf_debug_test_path_check = test_p->info[0];
+ scf_debug_test_path_check_rtn = test_p->info[1];
+ scf_debug_test_offline_check = test_p->info[2];
+ scf_debug_test_offline_check_rtn = test_p->info[3];
+ break;
+
+ case TEST_CONF_DSCP_LOOPBACK:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_CONF_DSCP_LOOPBACK");
+
+ /*
+ * IN:
+ * info[0] : loopback mode
+ */
+ scf_debug_test_dscp_loopback =
+ (test_p->info[0]) ?
+ SCF_DBF_DSCP_LOOPBACK_ON : SCF_DBF_DSCP_LOOPBACK_OFF;
+ break;
+
+ case TEST_INT_SYS:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_INT_SYS");
+
+ /*
+ * IN:
+ * info[0] : STR/ISR register
+ * info[1] : STExR register
+ * info[2] : receive data size
+ * info[3] : RxDR register 0
+ * info[4] : RxDR register 1
+ * info[5] : RxDR register 2
+ * info[6] : RxDR register 3
+ */
+ if (scf_debug_scfint_time != 0) {
+ if (scf_debug_test_sys_int_flag ==
+ SCF_DBF_SYS_INTR_OFF) {
+ if (statep != NULL) {
+ scf_debug_test_sys_int_flag =
+ SCF_DBF_SYS_INTR_ON;
+
+ scf_debug_test_sys_int_tbl.STATUS =
+ (uint16_t)
+ (test_p->info[0] >> 16);
+ scf_debug_test_sys_int_tbl.INT_ST |=
+ (uint16_t)test_p->info[0];
+ scf_debug_test_sys_int_tbl.STATUS_ExR =
+ test_p->info[1];
+ scf_debug_test_sys_int_tbl.rxsize =
+ test_p->info[2];
+ scf_debug_test_sys_int_tbl.RDATA0 =
+ test_p->info[3];
+ scf_debug_test_sys_int_tbl.RDATA1 =
+ test_p->info[4];
+ scf_debug_test_sys_int_tbl.RDATA2 =
+ test_p->info[5];
+ scf_debug_test_sys_int_tbl.RDATA3 =
+ test_p->info[6];
+
+ if (scf_debug_test_intr_id == 0) {
+ scf_debug_test_intr_id =
+ timeout((void (*)())scf_debug_test_intr_tout,
+ (void *)statep,
+ drv_usectohz(SCF_MIL2MICRO(scf_debug_scfint_time)));
+ }
+ } else {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__,
+ "dbg_test", 8);
+ ret = EIO;
+ }
+ } else {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__,
+ "dbg_test", 8);
+ ret = EBUSY;
+ }
+ }
+ break;
+
+ case TEST_INT_SYS_POFF:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_INT_SYS_POFF");
+
+ /*
+ * IN:
+ * info[0] : POFF factor
+ */
+ if (scf_debug_test_sys_poff_flag == SCF_DBF_SYS_POFF_OFF) {
+ if (statep != NULL) {
+ scf_debug_test_sys_poff_flag =
+ SCF_DBF_SYS_POFF_ON;
+
+ scf_debug_test_sys_int_tbl.POFF_FACTOR =
+ test_p->info[0];
+ } else {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__,
+ "dbg_test", 8);
+ ret = EIO;
+ }
+ } else {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "dbg_test", 8);
+ ret = EBUSY;
+ }
+ break;
+
+ case TEST_INT_SYS_EVENT:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_INT_SYS_EVENT");
+
+ /*
+ * IN:
+ * info[0] - info[7] : Event 0
+ * info[8] - info[15] : Event 1
+ * info[16] - info[23] : Event 2
+ * info[24] - info[31] : Event 3
+ */
+ if (scf_debug_test_sys_event_flag == FLAG_OFF) {
+ if (statep != NULL) {
+ scf_debug_test_sys_event_flag =
+ SCF_DBF_SYS_EVENT_ON;
+
+ for (ii = 0; ii < TEST_INFO_MAX; ii++) {
+ scf_debug_test_sys_int_tbl.EVENT[ii] =
+ test_p->info[ii];
+ }
+ } else {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__,
+ "dbg_test", 8);
+ ret = EIO;
+ }
+ } else {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "dbg_test", 8);
+ ret = EBUSY;
+ }
+ break;
+
+ case TEST_INT_DSCP:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_INT_DSCP");
+
+ /*
+ * IN:
+ * info[0] : DSR/ISR register
+ * info[1] : TxDSR_C_FLAG/TxDSR_OFFSET register
+ * info[2] : RxDCR_C_FLAG/RxDCR_OFFSET register
+ * info[3] : RxDCR_LENGTH register
+ */
+ if (scf_debug_idbcint_time != 0) {
+ if (scf_debug_test_dscp_int_flag ==
+ SCF_DBF_DSCP_INT_OFF) {
+ if (statep != NULL) {
+ scf_debug_test_dscp_int_flag = SCF_DBF_DSCP_INT_ON;
+ scf_debug_test_sys_int_flag = SCF_DBF_SYS_INTR_ON;
+
+ scf_debug_test_dscp_int_tbl.DSR = (uint8_t)(test_p->info[0] >> 16);
+ scf_debug_test_sys_int_tbl.INT_ST |= (uint16_t)test_p->info[0];
+ scf_debug_test_dscp_int_tbl.TxDSR_C_FLAG =
+ (uint16_t)(test_p->info[1] >> 16);
+ scf_debug_test_dscp_int_tbl.TxDSR_OFFSET =
+ (uint16_t)test_p->info[1];
+ scf_debug_test_dscp_int_tbl.RxDCR_C_FLAG =
+ (uint16_t)(test_p->info[2] >> 16);
+
+ offset_low = SCF_TXBUFFSIZE * SCF_TX_SRAM_MAXCOUNT / DSC_OFFSET_CONVERT;
+ offset_hight = offset_low +
+ SCF_RXBUFFSIZE * SCF_RX_SRAM_MAXCOUNT / DSC_OFFSET_CONVERT;
+ if ((test_p->info[2] >= offset_low) &&
+ (test_p->info[2] < offset_hight)) {
+ scf_debug_test_dscp_int_tbl.RxDCR_OFFSET =
+ (uint16_t)test_p->info[2];
+ } else {
+ scf_debug_test_dscp_int_tbl.RxDCR_OFFSET = offset_low;
+ }
+ scf_debug_test_dscp_int_tbl.RxDCR_LENGTH = test_p->info[3];
+
+ if ((scf_debug_test_dscp_int_tbl.RxDCR_OFFSET >= offset_low) &&
+ (scf_debug_test_dscp_int_tbl.RxDCR_LENGTH != 0)) {
+ /* Data copy to SRAM */
+ ii = scf_debug_test_dscp_int_tbl.RxDCR_OFFSET *
+ DSC_OFFSET_CONVERT;
+ wk_out_p =
+ (uint8_t *)&statep->scf_dscp_sram->DATA[ii];
+ for (ii = 0; ii < scf_debug_test_dscp_int_tbl.RxDCR_LENGTH;
+ ii++, wk_out_p++) {
+ SCF_DDI_PUT8(statep, statep->scf_dscp_sram_handle,
+ wk_out_p, (uint8_t)ii);
+ }
+ }
+
+ if (scf_debug_test_intr_id == 0) {
+ scf_debug_test_intr_id =
+ timeout((void (*)())scf_debug_test_intr_tout,
+ (void *)statep,
+ drv_usectohz(SCF_MIL2MICRO(scf_debug_idbcint_time)));
+ }
+ } else {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__,
+ "dbg_test", 8);
+ ret = EIO;
+ }
+ } else {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__,
+ "dbg_test", 8);
+ ret = EBUSY;
+ }
+ }
+ break;
+
+ case TEST_SYS_CALL_INT:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_SYS_CALL_INT");
+
+ /* Not use info */
+ if (scf_debug_scfint_time != 0) {
+ if (statep != NULL) {
+ if (scf_debug_test_intr_id == 0) {
+ scf_debug_test_intr_id =
+ timeout((void (*)())scf_debug_test_intr_tout,
+ (void *)statep,
+ drv_usectohz(SCF_MIL2MICRO(scf_debug_scfint_time)));
+ }
+ } else {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__,
+ "dbg_test", 8);
+ ret = EIO;
+ }
+ }
+ break;
+
+ case TEST_DSCP_CALL_RESET:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_DSCP_CALL_RESET");
+
+ /* Not use info */
+
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_OFF;
+ break;
+
+
+ case TEST_DSCP_CALL_INIT:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_DSCP_CALL_INIT");
+
+ /*
+ * IN:
+ * info[0] : target_id
+ * info[1] : mkey
+ */
+ target_id = (target_id_t)test_p->info[0];
+ mkey = (mkey_t)test_p->info[1];
+
+ /*
+ * scf_mb_init(target_id_t target_id, mkey_t mkey,
+ * void (*event_handler) (scf_event_t mevent, void *arg),
+ * void *arg);
+ */
+
+ func_arg = 0x01020304;
+
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_ON;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (test_p->data[0] == 1) {
+ func_ret = scf_mb_init(target_id, mkey, NULL,
+ (void *)&func_arg);
+ } else {
+ func_ret = scf_mb_init(target_id, mkey,
+ scf_debug_test_event_handler,
+ (void *)&func_arg);
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_OFF;
+ break;
+
+ case TEST_DSCP_CALL_FINI:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_DSCP_CALL_FINI");
+
+ /*
+ * IN:
+ * info[0] : target_id
+ * info[1] : mkey
+ */
+ target_id = (target_id_t)test_p->info[0];
+ mkey = (mkey_t)test_p->info[1];
+
+ /* scf_mb_fini(target_id_t target_id, mkey_t mkey); */
+
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_ON;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ func_ret = scf_mb_fini(target_id, mkey);
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_OFF;
+ break;
+
+ case TEST_DSCP_CALL_PUTMSG:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_DSCP_CALL_PUTMSG");
+
+ /*
+ * IN:
+ * info[0] : target_id
+ * info[1] : mkey
+ * info[2] : data_len
+ * info[3] : num_sg
+ */
+ target_id = (target_id_t)test_p->info[0];
+ mkey = (mkey_t)test_p->info[1];
+ data_len = (uint32_t)test_p->info[2];
+ num_sg = (uint32_t)test_p->info[3];
+
+ /*
+ * scf_mb_putmsg(target_id_t target_id, mkey_t mkey,
+ * uint32_t data_len, uint32_t num_sg, mscat_gath_t *sgp,
+ * clock_t timeout);
+ */
+
+
+ if (data_len != 0) {
+ kmem_size = data_len;
+ data_addr = (caddr_t)kmem_zalloc(kmem_size, KM_SLEEP);
+ }
+ if (num_sg != 0) {
+ kmem_size2 = sizeof (mscat_gath_t) * num_sg;
+ sgp = (mscat_gath_t *)kmem_zalloc(kmem_size2, KM_SLEEP);
+ }
+
+ msc_dptr = data_addr;
+ msc_len = data_len;
+ for (ii = 0; ii < num_sg; ii++) {
+ if (msc_len != 0) {
+ sgp[ii].msc_dptr = msc_dptr;
+ if ((msc_len < 0x00000010) ||
+ (ii == (num_sg - 1))) {
+ sgp[ii].msc_len = msc_len;
+ } else {
+ sgp[ii].msc_len = 0x00000010;
+ }
+ msc_len -= sgp[ii].msc_len;
+ for (jj = 0; jj < sgp[ii].msc_len; jj++,
+ msc_dptr++) {
+ *msc_dptr = jj;
+ }
+ } else {
+ sgp[ii].msc_dptr = NULL;
+ sgp[ii].msc_len = 0;
+ }
+ }
+
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_ON;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (test_p->data[0] == 1) {
+ func_ret = scf_mb_putmsg(target_id, mkey, data_len,
+ num_sg, NULL, 0);
+ } else if (test_p->data[0] == 2) {
+ sgp->msc_len = 0x00000010;
+ sgp->msc_dptr = NULL;
+ func_ret = scf_mb_putmsg(target_id, mkey, data_len,
+ num_sg, sgp, 0);
+ } else if (test_p->data[0] == 3) {
+ sgp->msc_len += 1;
+ func_ret = scf_mb_putmsg(target_id, mkey, data_len,
+ num_sg, sgp, 0);
+ } else {
+ func_ret = scf_mb_putmsg(target_id, mkey, data_len,
+ num_sg, sgp, 0);
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ if (data_len != 0)
+ bcopy((void *)data_addr, (void *)&test_p->rdata[0],
+ data_len);
+
+ if (data_addr != NULL) kmem_free(data_addr, kmem_size);
+ if (sgp != NULL) kmem_free(sgp, kmem_size2);
+
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_OFF;
+ break;
+
+ case TEST_DSCP_CALL_CANGET:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_DSCP_CALL_CANGET");
+
+ /*
+ * IN:
+ * info[0] : target_id
+ * info[1] : mkey
+ * OUT:
+ * info[2] : data_len
+ */
+ target_id = (target_id_t)test_p->info[0];
+ mkey = (mkey_t)test_p->info[1];
+ data_len = 0xffffffff;
+
+ /*
+ * scf_mb_canget(target_id_t target_id, mkey_t mkey,
+ * uint32_t *data_lenp);
+ */
+
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_ON;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (test_p->data[0] == 1) {
+ func_ret = scf_mb_canget(target_id, mkey, NULL);
+ } else {
+ func_ret = scf_mb_canget(target_id, mkey,
+ (uint32_t *)&data_len);
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_OFF;
+
+ test_p->info[2] = (uint_t)data_len;
+
+ break;
+
+ case TEST_DSCP_CALL_GETMSG:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_DSCP_CALL_GETMSG");
+
+ /*
+ * IN:
+ * info[0] : target_id
+ * info[1] : mkey
+ * info[2] : data_len
+ * info[3] : num_sg
+ */
+ target_id = (target_id_t)test_p->info[0];
+ mkey = (mkey_t)test_p->info[1];
+ data_len = (uint32_t)test_p->info[2];
+ num_sg = (uint32_t)test_p->info[3];
+
+ /*
+ * scf_mb_getmsg(target_id_t target_id, mkey_t mkey,
+ * uint32_t data_len, uint32_t num_sg, mscat_gath_t *sgp,
+ * clock_t timeout);
+ */
+
+ if (data_len != 0) {
+ kmem_size = data_len;
+ data_addr = (caddr_t)kmem_zalloc(kmem_size, KM_SLEEP);
+ }
+ if (num_sg != 0) {
+ kmem_size2 = sizeof (mscat_gath_t) * num_sg;
+ sgp = (mscat_gath_t *)kmem_zalloc(kmem_size2, KM_SLEEP);
+ }
+
+ msc_dptr = data_addr;
+ msc_len = data_len;
+ for (ii = 0; ii < num_sg; ii++) {
+ if (msc_len != 0) {
+ sgp[ii].msc_dptr = msc_dptr;
+ if ((msc_len < 0x00000010) ||
+ (ii == (num_sg - 1))) {
+ sgp[ii].msc_len = msc_len;
+ } else {
+ sgp[ii].msc_len = 0x00000010;
+ }
+ msc_len -= sgp[ii].msc_len;
+ for (jj = 0; jj < sgp[ii].msc_len; jj++,
+ msc_dptr++) {
+ *msc_dptr = jj;
+ }
+ } else {
+ sgp[ii].msc_dptr = NULL;
+ sgp[ii].msc_len = 0;
+ }
+ }
+
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_ON;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (test_p->data[0] == 1) {
+ func_ret = scf_mb_getmsg(target_id, mkey, data_len,
+ num_sg, NULL, 0);
+ } else if (test_p->data[0] == 2) {
+ sgp->msc_len = 0x00000010;
+ sgp->msc_dptr = NULL;
+ func_ret = scf_mb_getmsg(target_id, mkey, data_len,
+ num_sg, sgp, 0);
+ } else if (test_p->data[0] == 3) {
+ sgp->msc_len += 1;
+ func_ret = scf_mb_getmsg(target_id, mkey, data_len,
+ num_sg, sgp, 0);
+ } else {
+ func_ret = scf_mb_getmsg(target_id, mkey, data_len,
+ num_sg, sgp, 0);
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ if (data_len != 0)
+ bcopy((void *)data_addr, (void *)&test_p->rdata[0],
+ data_len);
+
+ if (data_addr != NULL) kmem_free(data_addr, kmem_size);
+ if (sgp != NULL) kmem_free(sgp, kmem_size2);
+
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_OFF;
+ break;
+
+ case TEST_DSCP_CALL_FLUSH:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_DSCP_CALL_FLUSH");
+
+ /*
+ * IN:
+ * info[0] : target_id
+ * info[1] : mkey
+ * info[2] : flush_type
+ */
+ target_id = (target_id_t)test_p->info[0];
+ mkey = (mkey_t)test_p->info[1];
+ flush_type = (mflush_type_t)test_p->info[2];
+
+ /*
+ * scf_mb_flush(target_id_t target_id, mkey_t mkey,
+ * mflush_type_t flush_type);
+ */
+
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_ON;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ func_ret = scf_mb_flush(target_id, mkey, flush_type);
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_OFF;
+ break;
+
+ case TEST_DSCP_CALL_CTRL:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_DSCP_CALL_CTRL");
+
+ /*
+ * IN:
+ * info[0] : target_id
+ * info[1] : mkey
+ * info[2] : op
+ * OUT:
+ * info[3] : arg
+ */
+ target_id = (target_id_t)test_p->info[0];
+ mkey = (mkey_t)test_p->info[1];
+ op = test_p->info[2];
+
+ /*
+ * scf_mb_ctrl(target_id_t target_id, mkey_t mkey,
+ * uint32_t op, void *arg);
+ */
+
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_ON;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (test_p->data[0] == 1) {
+ func_ret = scf_mb_ctrl(target_id, mkey, op, NULL);
+ } else {
+ func_ret = scf_mb_ctrl(target_id, mkey, op,
+ (void *)&func_arg);
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ scf_debug_test_dscp_call_flag = SCF_DBF_DSCP_CALL_OFF;
+
+ test_p->info[3] = (uint_t)func_arg;
+
+ break;
+
+ case TEST_DSCP_CALL_OTHER:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_DSCP_CALL_OTHER");
+
+ /*
+ * IN:
+ * info[0] : function code
+ * info[1] : factor
+ * OUT:
+ * info[3] : return code
+ */
+ switch (test_p->info[0]) {
+ case 0x00000001:
+ test_p->info[3] = scf_dscp_init();
+ break;
+
+ case 0x00000002:
+ scf_dscp_fini();
+ break;
+
+ case 0x00000003:
+ scf_dscp_start(test_p->info[1]);
+ break;
+
+ case 0x00000004:
+ scf_dscp_stop(test_p->info[1]);
+ break;
+
+ case 0x00000101:
+ if (test_p->info[1] == 0xffffffff) {
+ for (ii = 0; ii < SCF_TIMERCD_MAX; ii++) {
+ scf_timer_start(ii);
+ }
+ } else {
+ scf_timer_start(test_p->info[1]);
+ }
+ break;
+
+ case 0x00000102:
+ if (test_p->info[1] == 0xffffffff) {
+ scf_timer_all_stop();
+ } else {
+ scf_timer_stop(test_p->info[1]);
+ }
+ break;
+
+ case 0x00000103:
+ func_ret = scf_timer_check(test_p->info[1]);
+ break;
+
+ case 0x00000104:
+ func_ret = scf_timer_value_get(test_p->info[1]);
+ break;
+
+ case 0x00000200:
+ if (statep != NULL) {
+ SCF_SRAM_TRACE(statep, DTC_ONLINETO);
+ SCF_SRAM_TRACE(statep, DTC_ONLINE);
+ SCF_SRAM_TRACE(statep, DTC_OFFLINE);
+
+ SCF_SRAM_TRACE(statep, DTC_SENDDATA);
+
+ SCF_SRAM_TRACE(statep, DTC_RECVDATA);
+
+ SCF_SRAM_TRACE(statep, DTC_ERRRTN);
+ SCF_SRAM_TRACE(statep, DTC_RCI_BUF_FUL);
+ SCF_SRAM_TRACE(statep, DTC_RCI_BUSY);
+ SCF_SRAM_TRACE(statep, DTC_INTERFACE);
+ SCF_SRAM_TRACE(statep, DTC_E_NOT_SUPPORT);
+ SCF_SRAM_TRACE(statep, DTC_E_PARAM);
+ SCF_SRAM_TRACE(statep, DTC_E_SCFC_PATH);
+ SCF_SRAM_TRACE(statep, DTC_E_RCI_ACCESS);
+ SCF_SRAM_TRACE(statep, DTC_E_SEQUENCE);
+
+ SCF_SRAM_TRACE(statep, DTC_RSUMERR);
+
+ SCF_SRAM_TRACE(statep, DTC_DSCP_TXREQ);
+
+ SCF_SRAM_TRACE(statep, DTC_DSCP_RXACK);
+
+ SCF_SRAM_TRACE(statep, DTC_DSCP_RXEND);
+
+ SCF_SRAM_TRACE(statep, DTC_DSCP_RXREQ);
+
+ SCF_SRAM_TRACE(statep, DTC_DSCP_TXACK);
+ SCF_SRAM_TRACE(statep, DTC_DSCP_ACKTO);
+ SCF_SRAM_TRACE(statep, DTC_DSCP_ENDTO);
+
+ SCF_SRAM_TRACE(statep, DTC_DSCP_TXEND);
+
+ SCF_SRAM_TRACE(statep, DTC_SENDDATA_SRAM);
+ SCF_SRAM_TRACE(statep, DTC_RECVDATA_SRAM);
+ SCF_SRAM_TRACE(statep, DTC_DSCP_SENDDATA);
+ SCF_SRAM_TRACE(statep, DTC_DSCP_RECVDATA);
+
+ SCF_SRAM_TRACE(statep, DTC_CMD);
+ SCF_SRAM_TRACE(statep, DTC_INT);
+ SCF_SRAM_TRACE(statep, DTC_CMDTO);
+ SCF_SRAM_TRACE(statep, DTC_CMDBUSYTO);
+ SCF_SRAM_TRACE(statep, 0x99);
+ }
+ break;
+
+ case 0x00010000:
+ if (statep != NULL) {
+ mutex_exit(&scf_comtbl.all_mutex);
+ func_ret = scf_detach(statep->dip, DDI_SUSPEND);
+ func_ret =
+ scf_detach(scf_comtbl.scf_pseudo_p->dip,
+ DDI_SUSPEND);
+
+ drv_usecwait(5000000);
+
+ func_ret =
+ scf_attach(scf_comtbl.scf_pseudo_p->dip,
+ DDI_RESUME);
+ func_ret = scf_attach(statep->dip, DDI_RESUME);
+ mutex_enter(&scf_comtbl.all_mutex);
+ }
+ break;
+
+ case 0x00019990:
+ case 0x00019991:
+ mutex_exit(&scf_comtbl.all_mutex);
+ mutex_enter(&scf_comtbl.si_mutex);
+ if (test_p->info[0] & 0x00000001) {
+ scf_comtbl.scf_softintr_dscp_kicked = FLAG_ON;
+ } else {
+ scf_comtbl.scf_softintr_dscp_kicked = FLAG_OFF;
+ }
+ mutex_exit(&scf_comtbl.si_mutex);
+ scf_softintr(NULL);
+ mutex_enter(&scf_comtbl.all_mutex);
+ break;
+
+ case 0x00019998:
+ mutex_exit(&scf_comtbl.all_mutex);
+ scf_panic_callb(1);
+ mutex_enter(&scf_comtbl.all_mutex);
+ break;
+
+ case 0x00019999:
+ mutex_exit(&scf_comtbl.all_mutex);
+ scf_shutdown_callb(1);
+ mutex_enter(&scf_comtbl.all_mutex);
+ break;
+
+ case 0x00020000:
+ func_ret = scf_offline_check(statep, FLAG_OFF);
+ func_ret = scf_offline_check(statep, FLAG_ON);
+ func_ret = scf_cmdbusy_check(statep);
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case TEST_OSESCF_CALL_RESET:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_OSESCF_CALL_RESET");
+
+ /* Not use info */
+ scf_debug_test_osescf_call_flag = SCF_DBF_OSESCF_CALL_OFF;
+ break;
+
+ case TEST_OSESCF_CALL_PUTINFO:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_OSESCF_CALL_PUTINFO");
+
+ /*
+ * IN:
+ * info[0] : key
+ * info[1] : type
+ * info[2] : transid
+ * info[3] : length
+ */
+ key = (uint32_t)test_p->info[0];
+ type = (uint8_t)test_p->info[1];
+ transid = (uint32_t)test_p->info[2];
+ length = (uint32_t)test_p->info[3];
+
+ /*
+ * scf_service_putinfo(uint32_t key, uint8_t type,
+ * uint32_t transid, uint32_t length, void *datap);
+ */
+
+ if (length != 0) {
+ kmem_size = length;
+ data_addr = (caddr_t)kmem_zalloc(kmem_size, KM_SLEEP);
+ }
+
+ msc_dptr = data_addr;
+ for (ii = 0; ii < length; ii++, msc_dptr++) {
+ *msc_dptr = ii;
+ }
+
+ scf_debug_test_osescf_call_flag = SCF_DBF_OSESCF_CALL_ON;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (test_p->data[0] == 1) {
+ func_ret = scf_service_putinfo(key, type, transid,
+ length, NULL);
+ } else {
+ func_ret = scf_service_putinfo(key, type, transid,
+ length, (void *)data_addr);
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ if (length != 0)
+ bcopy((void *)data_addr, (void *)&test_p->rdata[0],
+ length);
+
+ if (data_addr != NULL) kmem_free(data_addr, kmem_size);
+
+ scf_debug_test_osescf_call_flag = SCF_DBF_OSESCF_CALL_OFF;
+
+ break;
+
+ case TEST_OSESCF_CALL_GETINFO:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_OSESCF_CALL_GETINFO");
+
+ /*
+ * IN:
+ * info[0] : key
+ * info[1] : type
+ * info[2] : transid
+ * info[3] : length
+ * OUT:
+ * info[3] : length
+ */
+ key = (uint32_t)test_p->info[0];
+ type = (uint8_t)test_p->info[1];
+ transid = (uint32_t)test_p->info[2];
+ length = (uint32_t)test_p->info[3];
+
+ /*
+ * scf_service_getinfo(uint32_t key, uint8_t type,
+ * uint32_t transid, uint32_t *lengthp, void *datap);
+ */
+ if (length != 0) {
+ kmem_size = length;
+ data_addr = (caddr_t)kmem_zalloc(kmem_size, KM_SLEEP);
+ }
+
+ msc_dptr = data_addr;
+ for (ii = 0; ii < length; ii++, msc_dptr++) {
+ *msc_dptr = 0x7f;
+ }
+
+ scf_debug_test_osescf_call_flag = SCF_DBF_OSESCF_CALL_ON;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (test_p->data[0] == 1) {
+ func_ret = scf_service_getinfo(key, type, transid,
+ (uint32_t *)&length, NULL);
+ } else if (test_p->data[0] == 2) {
+ func_ret = scf_service_getinfo(key, type, transid,
+ NULL, (void *)data_addr);
+ } else {
+ func_ret = scf_service_getinfo(key, type, transid,
+ (uint32_t *)&length, (void *)data_addr);
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ if (scf_debug_nofirm_sys == SCF_DBF_NOFIRM_SYS_ON) {
+ length = kmem_size;
+ msc_dptr = data_addr;
+ for (ii = 0; ii < length; ii++, msc_dptr++) {
+ *msc_dptr = ii;
+ }
+ }
+ if (length != 0)
+ bcopy((void *)data_addr, (void *)&test_p->rdata[0],
+ length);
+
+ if (data_addr != NULL) kmem_free(data_addr, kmem_size);
+
+ scf_debug_test_osescf_call_flag = SCF_DBF_OSESCF_CALL_OFF;
+
+ test_p->info[3] = (uint_t)length;
+ break;
+
+ case TEST_FMEM_START:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_FMEM_START");
+
+ /*
+ * IN:
+ * info[0] : lsb_1
+ * info[1] : lsb_2
+ */
+
+ /*
+ * scf_fmem_start(int s_bd, int t_bd);
+ */
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ func_ret = scf_fmem_start(test_p->info[0], test_p->info[1]);
+ mutex_enter(&scf_comtbl.all_mutex);
+ break;
+
+ case TEST_FMEM_END:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_FMEM_END");
+
+ /*
+ * scf_fmem_end(void);
+ */
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ func_ret = scf_fmem_end();
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ break;
+
+ case TEST_FMEM_CANCEL:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "TEST_FMEM_CANCEL");
+
+ /*
+ * scf_fmem_cancel(void);
+ */
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ func_ret = scf_fmem_cancel();
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ break;
+
+ default:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "Undefine mod");
+
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "dbg_test", 8);
+ ret = EINVAL;
+ break;
+ }
+
+ test_p->rtncode = func_ret;
+
+ /* Collect the timers which need to be stopped */
+ tm_stop_cnt = scf_timer_stop_collect(save_tmids, SCF_TIMERCD_MAX);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Timer stop */
+ if (tm_stop_cnt != 0) {
+ scf_timer_untimeout(save_tmids, SCF_TIMERCD_MAX);
+ }
+
+ if (ret == 0) {
+ if (ddi_copyout((void *)test_p, (void *)arg,
+ sizeof (scf_scfioctest_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ }
+
+/*
+ * END_test
+ */
+ END_test:
+
+ if (test_p) {
+ kmem_free((void *)test_p,
+ (size_t)(sizeof (scf_scfioctest_t)));
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+void
+scf_debug_test_intr_tout(void *arg)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_intr_tout() "
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ if (scf_debug_test_intr_id != 0) {
+ scf_debug_test_intr_id = 0;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ scf_intr(arg);
+ } else {
+ mutex_exit(&scf_comtbl.all_mutex);
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+
+void
+scf_debug_test_intr(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_intr() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ statep->reg_int_st = scf_debug_test_sys_int_tbl.INT_ST;
+
+ if (SCF_DBG_CHECK_NODEVICE) {
+ SCF_DDI_PUT16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST, statep->reg_int_st);
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_DBG, "set ISR = 0x%04x", statep->reg_int_st);
+
+ scf_debug_test_sys_int_tbl.INT_ST = 0;
+
+ if ((statep->reg_int_st & INT_ST_IDBCINT) == 0) {
+ scf_debug_test_dscp_int_flag = SCF_DBF_DSCP_INT_OFF;
+ }
+ if ((statep->reg_int_st & INT_ST_SCFINT) == 0) {
+ scf_debug_test_sys_int_flag = SCF_DBF_SYS_INTR_OFF;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+
+void
+scf_debug_test_intr_scfint(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_intr_scfint() "
+ uint16_t wk_STATUS;
+ uint32_t wk_STATUS_ExR;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ if ((scf_debug_test_sys_int_tbl.STATUS & STATUS_MODE_CHANGED) == 0) {
+ wk_STATUS =
+ statep->reg_status &
+ (STATUS_SECURE_MODE | STATUS_BOOT_MODE);
+ scf_debug_test_sys_int_tbl.STATUS &=
+ ~(STATUS_SECURE_MODE | STATUS_BOOT_MODE);
+ scf_debug_test_sys_int_tbl.STATUS |= wk_STATUS;
+ }
+ statep->reg_status = scf_debug_test_sys_int_tbl.STATUS;
+
+ if (SCF_DBG_CHECK_NODEVICE) {
+ SCF_DDI_PUT16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->STATUS, statep->reg_status);
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_DBG, "set STR = 0x%04x", statep->reg_status);
+
+ scf_debug_test_sys_int_tbl.STATUS = 0;
+
+ if ((scf_debug_test_sys_int_tbl.STATUS_ExR &
+ STATUS_SCF_STATUS_CHANGE) == 0) {
+ wk_STATUS_ExR = statep->reg_status_exr &
+ (STATUS_SCF_STATUS | STATUS_SCF_NO);
+ scf_debug_test_sys_int_tbl.STATUS_ExR &=
+ ~(STATUS_SCF_STATUS | STATUS_SCF_NO);
+ scf_debug_test_sys_int_tbl.STATUS_ExR |= wk_STATUS_ExR;
+ }
+ statep->reg_status_exr = scf_debug_test_sys_int_tbl.STATUS_ExR;
+
+ if (SCF_DBG_CHECK_NODEVICE) {
+ SCF_DDI_PUT32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->STATUS_ExR, statep->reg_status_exr);
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_DBG, "set STExR = 0x%08x",
+ statep->reg_status_exr);
+
+ scf_debug_test_sys_int_tbl.STATUS_ExR = 0;
+
+ if ((statep->reg_status & STATUS_CMD_COMPLETE) == 0) {
+ scf_debug_test_sys_int_flag = SCF_DBF_SYS_INTR_OFF;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+
+void
+scf_debug_test_intr_cmdend(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_intr_cmdend() "
+ uint_t ii;
+ uint8_t *wk_charp;
+ uint8_t sum = SCF_MAGICNUMBER_S;
+ uint32_t sum4 = SCF_MAGICNUMBER_L;
+ uint32_t wk_data;
+ uint8_t *wk_out_p;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ if (scf_debug_test_sys_int_tbl.rxsize < SCF_S_CNT_16) {
+ if ((scf_debug_rdata[0] == 0) &&
+ (scf_debug_rdata[1] == 0) &&
+ (scf_debug_rdata[2] == 0) &&
+ (scf_debug_rdata[3] == 0)) {
+ statep->reg_rdata[0] =
+ scf_debug_test_sys_int_tbl.RDATA0;
+ statep->reg_rdata[1] =
+ scf_debug_test_sys_int_tbl.RDATA1;
+ statep->reg_rdata[2] =
+ scf_debug_test_sys_int_tbl.RDATA2;
+ statep->reg_rdata[3] =
+ scf_debug_test_sys_int_tbl.RDATA3;
+ } else {
+ statep->reg_rdata[0] = scf_debug_rdata[0];
+ statep->reg_rdata[1] = scf_debug_rdata[1];
+ statep->reg_rdata[2] = scf_debug_rdata[2];
+ statep->reg_rdata[3] = scf_debug_rdata[3];
+ }
+ } else {
+ statep->reg_rdata[0] = scf_debug_test_sys_int_tbl.rxsize;
+ statep->reg_rdata[1] = 0;
+ if (scf_debug_test_sys_int_tbl.RDATA2 != 0) {
+ statep->reg_rdata[2] =
+ scf_debug_test_sys_int_tbl.RDATA2;
+ scf_debug_test_rxbuff_nosum_check_flag =
+ SCF_DBF_RXBUFF_NOSUM_CHECK_OFF;
+ } else {
+ statep->reg_rdata[2] = 0;
+ scf_debug_test_rxbuff_nosum_check_flag =
+ SCF_DBF_RXBUFF_NOSUM_CHECK_ON;
+ }
+ statep->reg_rdata[3] = scf_debug_test_sys_int_tbl.RDATA3;
+
+ if ((scf_comtbl.scf_exec_cmd_id == 0) &&
+ (scf_comtbl.scf_cmd_intr.cmd == CMD_INT_REASON)) {
+ wk_out_p = (uint8_t *)&statep->scf_sys_sram->DATA[0];
+ for (ii = 0; ii < scf_debug_test_sys_int_tbl.rxsize;
+ ii++, wk_out_p++) {
+ SCF_DDI_PUT8(statep,
+ statep->scf_sys_sram_handle, wk_out_p,
+ 0x00);
+ }
+ } else {
+ wk_data = 0x00010203;
+ wk_out_p = (uint8_t *)&statep->scf_sys_sram->DATA[0];
+ for (ii = 0; ii < scf_debug_test_sys_int_tbl.rxsize;
+ ii++, wk_out_p++) {
+ SCF_DDI_PUT8(statep,
+ statep->scf_sys_sram_handle, wk_out_p,
+ ii);
+ if ((ii % 4) == 0) {
+ wk_data = (ii & 0x000000ff) << 24;
+ } else if ((ii % 4) == 1) {
+ wk_data |= (ii & 0x000000ff) << 16;
+ } else if ((ii % 4) == 2) {
+ wk_data |= (ii & 0x000000ff) << 8;
+ } else {
+ wk_data |= (ii & 0x000000ff);
+ sum4 += wk_data;
+ }
+ }
+ }
+
+ if (scf_no_make_sum_l == SCF_DBF_NO_MAKE_SUM_L_OFF) {
+ statep->reg_rdata[2] = sum4;
+ }
+ }
+
+ wk_charp = (uint8_t *)&statep->reg_rdata[0];
+ for (ii = 0; ii < SCF_S_CNT_15; ii++, wk_charp++) {
+ sum += (*wk_charp);
+ }
+
+ if (scf_no_make_sum_s == SCF_DBF_NO_MAKE_SUM_S_OFF) {
+ *wk_charp = sum;
+ }
+
+ SCFDBGMSG4(SCF_DBGFLAG_DBG, "set RxDR = 0x%08x 0x%08x 0x%08x 0x%08x",
+ statep->reg_rdata[0], statep->reg_rdata[1],
+ statep->reg_rdata[2], statep->reg_rdata[3]);
+
+ scf_debug_test_sys_int_flag = SCF_DBF_SYS_INTR_OFF;
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+
+void
+scf_debug_test_intr_poff(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_intr_poff() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ scf_comtbl.scf_poff_id = scf_debug_test_sys_int_tbl.POFF_FACTOR;
+
+ SCFDBGMSG1(SCF_DBGFLAG_DBG, "set POFF factor = 0x%02x",
+ scf_comtbl.scf_poff_id);
+
+ scf_debug_test_sys_poff_flag = SCF_DBF_SYS_POFF_OFF;
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+void
+scf_debug_test_dsens(struct scf_cmd *scfcmdp, scf_int_reason_t *int_rp, int len)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_dsens() "
+ int wk_len = len;
+ uint8_t *wk_in_p;
+ uint8_t *wk_out_p;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ if (wk_len == 0) {
+ wk_len = SCF_INT_CNT_MAX;
+ scfcmdp->rbufleng = wk_len;
+ }
+
+ wk_in_p = (uint8_t *)&scf_debug_test_sys_int_tbl.EVENT[0];
+ wk_out_p = (uint8_t *)int_rp;
+ bcopy((void *)wk_in_p, (void *)wk_out_p, wk_len);
+
+ scf_debug_test_sys_event_flag = SCF_DBF_SYS_EVENT_OFF;
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+
+void
+scf_debug_test_intr_dscp_dsr(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_intr_dscp_dsr() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ statep->reg_dsr = scf_debug_test_dscp_int_tbl.DSR;
+
+ if (SCF_DBG_CHECK_NODEVICE) {
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->DSR, statep->reg_dsr);
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_DBG, "set DSR = 0x%02x", statep->reg_dsr);
+
+ scf_debug_test_dscp_int_tbl.DSR = 0;
+
+ if ((statep->reg_dsr & (DSR_RxREQ | DSR_TxACK | DSR_TxEND)) == 0) {
+ scf_debug_test_dscp_int_flag = SCF_DBF_DSCP_INT_OFF;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+
+void
+scf_debug_test_intr_dscp_rxtx(scf_state_t *statep, uint8_t dsr)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_intr_dscp_rxtx() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ if (dsr & DSR_RxREQ) {
+ statep->reg_rxdcr_c_flag =
+ (scf_debug_test_dscp_int_tbl.RxDCR_C_FLAG |
+ DSC_FLAG_DEFAULT);
+ statep->reg_rxdcr_c_length =
+ scf_debug_test_dscp_int_tbl.RxDCR_LENGTH;
+ if (scf_debug_test_dscp_int_tbl.RxDCR_LENGTH != 0) {
+ if (scf_debug_test_dscp_int_tbl.RxDCR_OFFSET !=
+ DSC_OFFSET_NOTHING) {
+ statep->reg_rxdcr_c_offset =
+ scf_debug_test_dscp_int_tbl.RxDCR_OFFSET;
+ } else {
+ statep->reg_rxdcr_c_offset =
+ (SCF_TX_SRAM_MAXCOUNT * SCF_RXBUFFSIZE /
+ DSC_OFFSET_CONVERT);
+ }
+ } else {
+ statep->reg_rxdcr_c_offset = DSC_OFFSET_NOTHING;
+ }
+
+ SCFDBGMSG3(SCF_DBGFLAG_DBG,
+ "set RxDCR = 0x%04x 0x%04x 0x%08x",
+ statep->reg_rxdcr_c_flag,
+ statep->reg_rxdcr_c_offset,
+ statep->reg_rxdcr_c_length);
+
+ if ((dsr & DSR_TxEND) == 0) {
+ scf_debug_test_dscp_int_flag = SCF_DBF_DSCP_INT_OFF;
+ }
+
+ } else if (dsr == DSR_TxEND) {
+ statep->reg_txdsr_c_flag =
+ (statep->reg_txdcr_c_flag & 0xff00) |
+ scf_debug_test_dscp_int_tbl.TxDSR_C_FLAG;
+ if (scf_debug_test_dscp_int_tbl.TxDSR_OFFSET == 0) {
+ statep->reg_txdsr_c_offset = statep->reg_txdcr_c_offset;
+ } else {
+ statep->reg_txdsr_c_offset =
+ scf_debug_test_dscp_int_tbl.TxDSR_OFFSET;
+ }
+
+ SCFDBGMSG2(SCF_DBGFLAG_DBG, "set TxDSR = 0x%04x 0x%04x",
+ statep->reg_rxdcr_c_flag,
+ statep->reg_rxdcr_c_offset);
+
+ scf_debug_test_dscp_int_flag = SCF_DBF_DSCP_INT_OFF;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+
+void
+scf_debug_test_alive_start(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_alive_start() "
+ uint8_t wk_int8;
+ uint_t alive_timer;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ wk_int8 = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->ATR);
+
+ switch (wk_int8 & ATR_INTERVAL) {
+ case ATR_INTERVAL_30S:
+ alive_timer = 30000;
+ break;
+
+ case ATR_INTERVAL_60S:
+ alive_timer = 60000;
+ break;
+
+ case ATR_INTERVAL_120S:
+ alive_timer = 120000;
+ break;
+
+ default:
+ alive_timer = 0;
+ break;
+ }
+ if ((alive_timer != 0) && (scf_debug_test_alive_id == 0)) {
+ scf_debug_test_sys_int_flag = SCF_DBF_SYS_INTR_ON;
+ scf_debug_test_sys_int_tbl.INT_ST |= INT_ST_ALIVEINT;
+ scf_debug_test_alive_id =
+ timeout((void (*)())scf_debug_test_alive_intr_tout,
+ (void *)statep,
+ drv_usectohz(SCF_MIL2MICRO(alive_timer)));
+ scf_debug_test_alive_flag = FLAG_ON;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+
+/* ARGSUSED */
+void
+scf_debug_test_alive_stop(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_alive_stop() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ scf_debug_test_alive_flag = FLAG_OFF;
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+
+void
+scf_debug_test_alive_intr_tout(void *arg)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_alive_intr_tout() "
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ scf_debug_test_alive_id = 0;
+
+ if (scf_debug_test_alive_flag == FLAG_ON) {
+ scf_debug_test_alive_start(arg);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ scf_intr(arg);
+ } else {
+ mutex_exit(&scf_comtbl.all_mutex);
+ }
+
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+
+void
+scf_debug_test_send_cmd(scf_state_t *statep, struct scf_cmd *scfcmdp)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_send_cmd() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ if (scf_debug_scfint_time != 0) {
+ scf_debug_test_sys_int_flag = SCF_DBF_SYS_INTR_ON;
+ scf_debug_test_sys_int_tbl.INT_ST |= INT_ST_SCFINT;
+
+ scf_debug_test_sys_int_tbl.STATUS = STATUS_CMD_COMPLETE;
+ scf_debug_test_sys_int_tbl.STATUS_ExR = 0;
+
+ switch (scfcmdp->flag) {
+ case SCF_USE_SSBUF:
+ case SCF_USE_LSBUF:
+ scf_debug_test_sys_int_tbl.rxsize = scfcmdp->rcount;
+ scf_debug_test_sys_int_tbl.RDATA0 = 0x00010203;
+ scf_debug_test_sys_int_tbl.RDATA1 = 0x04050607;
+ scf_debug_test_sys_int_tbl.RDATA2 = 0x08090a0b;
+ scf_debug_test_sys_int_tbl.RDATA3 = 0x0c0d0e0f;
+ break;
+
+ case SCF_USE_SLBUF:
+ scf_debug_test_sys_int_tbl.rxsize = scfcmdp->rcount;
+ scf_debug_test_sys_int_tbl.RDATA0 = scfcmdp->rcount;
+ scf_debug_test_sys_int_tbl.RDATA1 = 0;
+ scf_debug_test_sys_int_tbl.RDATA2 = 0;
+ scf_debug_test_sys_int_tbl.RDATA3 = 0;
+ break;
+
+ default:
+ scf_debug_test_sys_int_tbl.rxsize = 0;
+ scf_debug_test_sys_int_tbl.RDATA0 = 0;
+ scf_debug_test_sys_int_tbl.RDATA1 = 0;
+ scf_debug_test_sys_int_tbl.RDATA2 = 0;
+ scf_debug_test_sys_int_tbl.RDATA3 = 0;
+ break;
+ }
+
+ if (scf_debug_test_intr_id == 0) {
+ scf_debug_test_intr_id =
+ timeout((void (*)())scf_debug_test_intr_tout,
+ (void *)statep,
+ drv_usectohz(SCF_MIL2MICRO(scf_debug_scfint_time)));
+ }
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+
+void
+scf_debug_test_txreq_send(scf_state_t *statep, scf_dscp_dsc_t *dsc_p)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_txreq_send() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ if (scf_debug_idbcint_time != 0) {
+ scf_debug_test_dscp_int_flag = SCF_DBF_DSCP_INT_ON;
+ scf_debug_test_sys_int_tbl.INT_ST |= INT_ST_IDBCINT;
+
+ if (dsc_p->dinfo.base.length != 0) {
+ if (scf_debug_test_dscp_loopback ==
+ SCF_DBF_DSCP_LOOPBACK_ON) {
+ scf_debug_test_dscp_int_tbl.DSR =
+ (DSR_RxREQ | DSR_TxACK | DSR_TxEND);
+
+ scf_debug_test_dscp_int_tbl.RxDCR_C_FLAG =
+ dsc_p->dinfo.base.c_flag;
+ scf_debug_test_dscp_int_tbl.RxDCR_OFFSET =
+ dsc_p->dinfo.base.offset;
+ scf_debug_test_dscp_int_tbl.RxDCR_LENGTH =
+ dsc_p->dinfo.base.length;
+ } else {
+ scf_debug_test_dscp_int_tbl.DSR =
+ (DSR_TxACK | DSR_TxEND);
+ }
+ } else {
+ scf_debug_test_dscp_int_tbl.DSR = DSR_TxEND;
+ }
+
+ scf_debug_test_dscp_int_tbl.TxDSR_C_FLAG =
+ (dsc_p->dinfo.base.c_flag & 0xff00) | DSC_STATUS_NORMAL;
+ scf_debug_test_dscp_int_tbl.TxDSR_OFFSET =
+ dsc_p->dinfo.base.offset;
+
+ if (scf_debug_test_intr_id == 0) {
+ scf_debug_test_intr_id =
+ timeout((void (*)())scf_debug_test_intr_tout,
+ (void *)statep,
+ drv_usectohz(SCF_MIL2MICRO(scf_debug_idbcint_time)));
+ }
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+
+/* ARGSUSED */
+void
+scf_debug_test_event_handler(scf_event_t mevent, void *arg)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_event_handler() "
+
+ SCFDBGMSG1(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start mevent = %d",
+ mevent);
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "=======================================");
+
+ switch (mevent) {
+ case SCF_MB_CONN_OK:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "SCF_MB_CONN_OK");
+ break;
+ case SCF_MB_MSG_DATA:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "SCF_MB_MSG_DATA");
+ break;
+ case SCF_MB_SPACE:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "SCF_MB_SPACE");
+ break;
+ case SCF_MB_DISC_ERROR:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "SCF_MB_DISC_ERROR");
+ break;
+ default:
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "Undefine event code");
+ break;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, "=======================================");
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+
+void
+scf_debug_test_timer_stop()
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_timer_stop() "
+ timeout_id_t save_tmid[4];
+ int timer_cnt = 0;
+ int ii;
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ if (scf_debug_test_intr_id != 0) {
+ save_tmid[timer_cnt] = scf_debug_test_intr_id;
+ scf_debug_test_intr_id = 0;
+ timer_cnt++;
+ }
+ if (scf_debug_test_alive_id != 0) {
+ save_tmid[timer_cnt] = scf_debug_test_alive_id;
+ scf_debug_test_alive_id = 0;
+ timer_cnt++;
+ }
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ for (ii = 0; ii < timer_cnt; ii++) {
+ (void) untimeout(save_tmid[ii]);
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+
+void
+scf_debug_test_map_regs(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_map_regs() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ statep->scf_regs = &scf_debug_scf_regs_tbl;
+ statep->scf_regs_c = &scf_debug_scf_regs_c_tbl;
+ statep->scf_dscp_sram = &scf_debug_scf_dscp_sram_tbl;
+ statep->scf_sys_sram = &scf_debug_scf_sys_sram_tbl;
+ statep->scf_interface = &scf_debug_scf_interface_tbl;
+ statep->scf_reg_drvtrc = (void *)&scf_debug_scf_reg_drvtrc_tbl;
+ statep->scf_reg_drvtrc_len =
+ (off_t)sizeof (scf_debug_scf_reg_drvtrc_tbl);
+
+ statep->resource_flag |=
+ (S_DID_REG1 | S_DID_REG2 | S_DID_REG3 |
+ S_DID_REG4 | S_DID_REG5 | S_DID_REG6);
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+
+
+void
+scf_debug_test_unmap_regs(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_debug_test_unmap_regs() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": start");
+
+ statep->scf_regs = NULL;
+ statep->scf_regs_c = NULL;
+ statep->scf_dscp_sram = NULL;
+ statep->scf_sys_sram = NULL;
+ statep->scf_interface = NULL;
+ statep->scf_reg_drvtrc = NULL;
+
+ statep->resource_flag &=
+ ~(S_DID_REG1 | S_DID_REG2 | S_DID_REG3 |
+ S_DID_REG4 | S_DID_REG5 | S_DID_REG6);
+
+ SCFDBGMSG(SCF_DBGFLAG_DBG, SCF_FUNC_NAME ": end");
+}
+#endif /* DEBUG */
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scfdscp.c b/usr/src/uts/sun4u/opl/io/scfd/scfdscp.c
new file mode 100644
index 0000000000..bdee1377b4
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scfdscp.c
@@ -0,0 +1,4450 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/ksynch.h>
+#include <sys/types.h>
+#include <sys/cmn_err.h>
+#include <sys/kmem.h>
+#include <sys/errno.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/scfd/scfparam.h>
+#include <sys/scfd/scfdscp.h>
+
+/*
+ * DSCP control table
+ */
+scf_dscp_comtbl_t scf_dscp_comtbl; /* DSCP control table */
+
+mkey_t scf_dscp_mkey_search[] = {
+ DSCP_KEY, /* DSCP mailbox interface key */
+ DKMD_KEY /* DKMD mailbox interface key */
+ /* Add mailbox key */
+};
+
+/*
+ * SCF driver system control intafece function
+ */
+void scf_dscp_init(void);
+void scf_dscp_fini(void);
+void scf_dscp_start(uint32_t factor);
+void scf_dscp_stop(uint32_t factor);
+void scf_dscp_intr(scf_state_t *statep);
+
+/*
+ * Timeout function : from SCF driver timer contorol function
+ */
+void scf_dscp_ack_tout(void);
+void scf_dscp_end_tout(void);
+void scf_dscp_busy_tout(void);
+void scf_dscp_callback_tout(void);
+void scf_dscp_callback(void);
+
+/*
+ * Interrupt function : from scf_dscp_intr()
+ */
+void scf_dscp_txack_recv(scf_state_t *statep);
+void scf_dscp_txend_recv(scf_state_t *statep);
+void scf_dscp_rxreq_recv(scf_state_t *statep);
+
+/*
+ * Main and Tx/Rx interface function
+ */
+void scf_dscp_txend_notice(scf_dscp_main_t *mainp);
+void scf_dscp_txrelbusy_notice(scf_dscp_main_t *mainp);
+void scf_dscp_rxreq_notice(scf_dscp_main_t *mainp);
+void scf_dscp_rxdata_notice(scf_dscp_main_t *mainp);
+
+/*
+ * Tx subroutine function
+ */
+void scf_dscp_send_matrix(void);
+void scf_dscp_txreq_send(scf_state_t *statep, scf_dscp_dsc_t *dsc_p);
+
+/*
+ * Rx subroutine function
+ */
+void scf_dscp_recv_matrix(void);
+void scf_dscp_rxack_send(scf_state_t *statep);
+void scf_dscp_rxend_send(scf_state_t *statep, scf_dscp_dsc_t *dsc_p);
+
+/*
+ * subroutine function
+ */
+void scf_dscp_dscbuff_free_all(void);
+void scf_dscp_txdscbuff_free(scf_dscp_main_t *mainp);
+void scf_dscp_rxdscbuff_free(scf_dscp_main_t *mainp);
+void scf_dscp_rdata_free(scf_dscp_main_t *mainp);
+void scf_dscp_event_queue(scf_dscp_main_t *mainp, scf_event_t mevent);
+void scf_dscp_event_queue_free(scf_dscp_main_t *mainp);
+scf_dscp_main_t *scf_dscp_mkey2mainp(mkey_t mkey);
+scf_dscp_main_t *scf_dscp_id2mainp(uint8_t id);
+uint16_t scf_dscp_sram_get(void);
+void scf_dscp_sram_free(uint16_t offset);
+
+
+/*
+ * DSCP Driver interface function
+ */
+
+/*
+ * scf_mb_init()
+ *
+ * Description: Initialize the mailbox and register a callback for receiving
+ * events related to the specified mailbox.
+ * Arguments:
+ *
+ * target_id - The target_id of the peer. It must be 0 on a Domain.
+ * mkey - mailbox key
+ * event_handler- handler to be called for all events related
+ * to a mailbox. It should be called back with
+ * the event type and the registered argument.
+ *
+ * arg - A callback argument to be passed back to the
+ * event_handler.
+ *
+ * Return Values: returns 0 on success, otherwise any meaningful errno
+ * values are returned, some of the notable error values
+ * are given below.
+ * EINVAL - Invalid values.
+ * EEXIST - Already OPEN.
+ * EIO - DSCP I/F path not available.
+ */
+int
+scf_mb_init(target_id_t target_id, mkey_t mkey,
+ void (*event_handler) (scf_event_t mevent, void *arg), void *arg)
+{
+#define SCF_FUNC_NAME "scf_mb_init() "
+ scf_dscp_main_t *mainp; /* Main table address */
+ scf_dscp_dsc_t *dsc_p; /* TxDSC address */
+ int path_ret; /* SCF path status return value */
+ int ret = 0; /* Return value */
+ timeout_id_t save_tmids[SCF_TIMERCD_MAX];
+ int tm_stop_cnt;
+
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start mkey = 0x%08x",
+ mkey);
+ SC_DBG_DRV_TRACE(TC_MB_INIT | TC_IN, __LINE__, &mkey, sizeof (mkey));
+
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* Check target_id */
+ if (target_id != 0) {
+ /* Invalid "target_id" */
+ SC_DBG_DRV_TRACE(TC_MB_INIT | TC_ERRCD, __LINE__, &target_id,
+ sizeof (target_id));
+ ret = EINVAL;
+ goto END_mb_init;
+ }
+
+ /* Get main table address from "mkey" */
+ mainp = scf_dscp_mkey2mainp(mkey);
+
+ /* Check mainp address */
+ if (mainp == NULL) {
+ /* Invalid "mkey" */
+ SC_DBG_DRV_TRACE(TC_MB_INIT | TC_ERRCD, __LINE__, &mkey,
+ sizeof (mkey));
+ ret = EINVAL;
+ goto END_mb_init;
+ }
+
+ /* Check "event_handler" address */
+ if (event_handler == NULL) {
+ /* Invalid "event_handler" */
+ SC_DBG_DRV_TRACE(TC_MB_INIT | TC_ERRCD, __LINE__,
+ &event_handler, sizeof (event_handler));
+ ret = EINVAL;
+ goto END_mb_init;
+ }
+
+ /* Get SCF path status */
+ path_ret = scf_path_check(NULL);
+
+ /* Check SCF path status */
+ if (path_ret == SCF_PATH_HALT) {
+ /* SCF path status is halt */
+ SC_DBG_DRV_TRACE(TC_MB_INIT | TC_ERRCD, __LINE__, &path_ret,
+ sizeof (path_ret));
+ ret = EIO;
+ goto END_mb_init;
+ }
+
+ /* Check main status */
+ if (mainp->status != SCF_ST_IDLE) {
+ /* Main status != A0 */
+ SC_DBG_DRV_TRACE(TC_MB_INIT | TC_ERRCD, __LINE__,
+ &mainp->status, sizeof (mainp->status));
+ ret = EEXIST;
+ goto END_mb_init;
+ }
+
+ /* Initialize flag */
+ mainp->conn_chk_flag = FLAG_OFF;
+ mainp->putmsg_busy_flag = FLAG_OFF;
+
+ /* Get TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_put];
+
+ /* Make Tx descriptor : INIT_REQ */
+ dsc_p->dinfo.base.c_flag = DSC_FLAG_DEFAULT;
+ dsc_p->dinfo.base.offset = DSC_OFFSET_NOTHING;
+ dsc_p->dinfo.base.length = 0;
+ dsc_p->dinfo.base.dscp_datap = NULL;
+ dsc_p->dinfo.bdcr.id = mainp->id & DSC_CNTL_MASK_ID;
+ dsc_p->dinfo.bdcr.code = DSC_CNTL_INIT_REQ;
+
+ /* Update Tx descriptor offset */
+ if (scf_dscp_comtbl.tx_put == scf_dscp_comtbl.tx_last) {
+ scf_dscp_comtbl.tx_put = scf_dscp_comtbl.tx_first;
+ } else {
+ scf_dscp_comtbl.tx_put++;
+ }
+
+ /* Update Tx descriptor count */
+ scf_dscp_comtbl.tx_dsc_count++;
+
+ /* Change TxDSC status (SB2) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_TX_ST_TXREQ_SEND_WAIT);
+
+ /* Call send matrix */
+ scf_dscp_send_matrix();
+
+ /* Change main status (B0) */
+ SCF_SET_STATUS(mainp, SCF_ST_EST_TXEND_RECV_WAIT);
+
+ /* Save parameter */
+ mainp->event_handler = event_handler;
+ mainp->arg = arg;
+ mainp->target_id = target_id;
+ mainp->mkey = mkey;
+
+/*
+ * END_mb_init
+ */
+ END_mb_init:
+
+ /* Collect the timers which need to be stopped */
+ tm_stop_cnt = scf_timer_stop_collect(save_tmids, SCF_TIMERCD_MAX);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Timer stop */
+ if (tm_stop_cnt != 0) {
+ scf_timer_untimeout(save_tmids, SCF_TIMERCD_MAX);
+ }
+
+ SC_DBG_DRV_TRACE(TC_MB_INIT | TC_OUT, __LINE__, &ret, sizeof (ret));
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_mb_fini()
+ *
+ * Description: Cleanup the mailbox and unregister an event_handler,
+ * if it is registered.
+ *
+ * target_id - The target_id of the peer. It must be 0 on a Domain.
+ * mkey - mailbox key
+ *
+ * Return Values: returns 0 on success, otherwise any meaningful errno
+ * values are returned, some of the notable error values
+ * are given below.
+ * EINVAL - Invalid values.
+ * EBADF - Specified target_id is not OPEN.
+ */
+int
+scf_mb_fini(target_id_t target_id, mkey_t mkey)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_mb_fini() "
+ scf_dscp_main_t *mainp; /* Main table address */
+ scf_dscp_dsc_t *dsc_p; /* TxDSC address */
+ int path_ret; /* SCF path status return value */
+ int ret = 0; /* Return value */
+ timeout_id_t save_tmids[SCF_TIMERCD_MAX];
+ int tm_stop_cnt;
+
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start mkey = 0x%08x",
+ mkey);
+ SC_DBG_DRV_TRACE(TC_MB_FINI | TC_IN, __LINE__, &mkey, sizeof (mkey));
+
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* Check target_id */
+ if (target_id != 0) {
+ /* Invalid "target_id" */
+ SC_DBG_DRV_TRACE(TC_MB_FINI | TC_ERRCD, __LINE__, &target_id,
+ sizeof (target_id));
+ ret = EINVAL;
+ goto END_mb_fini;
+ }
+
+ /* Get main table address from "mkey" */
+ mainp = scf_dscp_mkey2mainp(mkey);
+
+ /* Check mainp address */
+ if (mainp == NULL) {
+ /* Invalid "mkey" */
+ SC_DBG_DRV_TRACE(TC_MB_FINI | TC_ERRCD, __LINE__, &mkey,
+ sizeof (mkey));
+ ret = EINVAL;
+ goto END_mb_fini;
+ }
+
+ /* Get SCF path status */
+ path_ret = scf_path_check(NULL);
+
+ /* Check SCF path status */
+ if (path_ret == SCF_PATH_HALT) {
+ /* SCF path status is halt */
+ if (mainp->status != SCF_ST_IDLE) {
+ /* TxDSC buffer release */
+ scf_dscp_txdscbuff_free(mainp);
+
+ /* RxDSC buffer release */
+ scf_dscp_rxdscbuff_free(mainp);
+
+ /* All queing event release */
+ scf_dscp_event_queue_free(mainp);
+
+ /* All receive buffer release */
+ scf_dscp_rdata_free(mainp);
+
+ /* event_handler and arg NULL */
+ mainp->event_handler = NULL;
+ mainp->arg = NULL;
+
+ /* Change main status (A0) */
+ SCF_SET_STATUS(mainp, SCF_ST_IDLE);
+ }
+ goto END_mb_fini;
+ }
+
+ /* Check main status */
+ switch (mainp->status) {
+ case SCF_ST_EST_TXEND_RECV_WAIT: /* Main status (B0) */
+ case SCF_ST_ESTABLISHED: /* Main status (C0) */
+ /* Get TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_put];
+
+ /* Make Tx descriptor : FINI_REQ */
+ dsc_p->dinfo.base.c_flag = DSC_FLAG_DEFAULT;
+ dsc_p->dinfo.base.offset = DSC_OFFSET_NOTHING;
+ dsc_p->dinfo.base.length = 0;
+ dsc_p->dinfo.base.dscp_datap = NULL;
+ dsc_p->dinfo.bdcr.id = mainp->id & DSC_CNTL_MASK_ID;
+ dsc_p->dinfo.bdcr.code = DSC_CNTL_FINI_REQ;
+
+ /* Update Tx descriptor offset */
+ if (scf_dscp_comtbl.tx_put == scf_dscp_comtbl.tx_last) {
+ scf_dscp_comtbl.tx_put = scf_dscp_comtbl.tx_first;
+ } else {
+ scf_dscp_comtbl.tx_put++;
+ }
+
+ /* Update Tx descriptor count */
+ scf_dscp_comtbl.tx_dsc_count++;
+
+ /* Change TxDSC status (SB2) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_TX_ST_TXREQ_SEND_WAIT);
+
+ /* Call send matrix */
+ scf_dscp_send_matrix();
+
+ /* Change main status (D0) */
+ SCF_SET_STATUS(mainp, SCF_ST_CLOSE_TXEND_RECV_WAIT);
+
+ /* TxEND(FINI) receive wait */
+ SC_DBG_DRV_TRACE(TC_W_SIG, __LINE__, &mainp->fini_cv,
+ sizeof (kcondvar_t));
+ mainp->fini_wait_flag = FLAG_ON;
+ while (mainp->fini_wait_flag == FLAG_ON) {
+ cv_wait(&mainp->fini_cv, &scf_comtbl.all_mutex);
+ }
+
+ /* TxDSC buffer release */
+ scf_dscp_txdscbuff_free(mainp);
+
+ /* RxDSC buffer release */
+ scf_dscp_rxdscbuff_free(mainp);
+
+ /* All queing event release */
+ scf_dscp_event_queue_free(mainp);
+
+ /* All receive buffer release */
+ scf_dscp_rdata_free(mainp);
+
+ /* event_handler and arg NULL */
+ mainp->event_handler = NULL;
+ mainp->arg = NULL;
+
+ /* Change main status (A0) */
+ SCF_SET_STATUS(mainp, SCF_ST_IDLE);
+ break;
+
+ case SCF_ST_EST_FINI_WAIT: /* Main status (C1) */
+ /* TxDSC buffer release */
+ scf_dscp_txdscbuff_free(mainp);
+
+ /* RxDSC buffer release */
+ scf_dscp_rxdscbuff_free(mainp);
+
+ /* All queing event release */
+ scf_dscp_event_queue_free(mainp);
+
+ /* All receive buffer release */
+ scf_dscp_rdata_free(mainp);
+
+ /* event_handler and arg NULL */
+ mainp->event_handler = NULL;
+ mainp->arg = NULL;
+
+ /* Change main status (A0) */
+ SCF_SET_STATUS(mainp, SCF_ST_IDLE);
+ break;
+
+ case SCF_ST_IDLE: /* Main status (A0) */
+ /* Main status == A0 is NOP */
+ break;
+
+ default:
+ /* Not open */
+ SC_DBG_DRV_TRACE(TC_MB_FINI | TC_ERRCD, __LINE__,
+ &mainp->status, TC_INFO_SIZE);
+ ret = EBADF;
+ break;
+ }
+
+/*
+ * END_mb_fini
+ */
+ END_mb_fini:
+
+ /* Collect the timers which need to be stopped */
+ tm_stop_cnt = scf_timer_stop_collect(save_tmids, SCF_TIMERCD_MAX);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Timer stop */
+ if (tm_stop_cnt != 0) {
+ scf_timer_untimeout(save_tmids, SCF_TIMERCD_MAX);
+ }
+
+ SC_DBG_DRV_TRACE(TC_MB_FINI | TC_OUT, __LINE__, &ret, sizeof (ret));
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_mb_putmsg()
+ *
+ * Description: Send a message via the mailbox identified by mkey. The message
+ * need to be sent either completely or none. That is, no partial
+ * messages should be sent.
+ *
+ * If a 0 timeout value is specified, then it should act as
+ * a non-blocking interface, that is, it should either send
+ * the message immediately or return appropriate error.
+ * If a timeout value is specified, then it can blocked
+ * until either the message is sent successfully or timedout.
+ *
+ * Arguments:
+ *
+ * target_id - The target_id of the peer. It must be 0 on a Domain.
+ * mkey - Unique key corresponding to a mailbox.
+ * data_len - Total length of the data to be sent.
+ * num_sg - Number of scatter/gather elements in the argument sgp.
+ * sgp - Scatter/gather list pointer.
+ * timeout - timeout value in milliseconds. If 0 specified, no waiting
+ * is required.
+ *
+ * Return Values: returns 0 on success, otherwise any meaningful errno
+ * values are returned, some of the notable error values
+ * are given below.
+ *
+ * EINVAL - Invalid values.
+ * EBADF - Specified target_id is not OPEN.
+ * EBUSY - Driver is BUSY.
+ * ENOSPC - Not enough space to send the message.
+ * EIO - DSCP I/F path not available.
+ */
+/* ARGSUSED */
+int
+scf_mb_putmsg(target_id_t target_id, mkey_t mkey, uint32_t data_len,
+ uint32_t num_sg, mscat_gath_t *sgp, clock_t timeout)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_mb_putmsg() "
+ scf_dscp_main_t *mainp; /* Main table address */
+ scf_dscp_dsc_t *dsc_p; /* Current TxDSC address */
+ caddr_t wkaddr; /* Working value : buffer address */
+ uint32_t wkleng = 0; /* Working value : length */
+ uint32_t wkoffset; /* Working value : Tx SRAM offset */
+ int ii; /* Working value : counter */
+ int path_ret; /* SCF path status return value */
+ int ret = 0; /* Return value */
+ timeout_id_t save_tmids[SCF_TIMERCD_MAX];
+ int tm_stop_cnt;
+
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start mkey = 0x%08x",
+ mkey);
+ SC_DBG_DRV_TRACE(TC_MB_PUTMSG | TC_IN, __LINE__, &mkey, sizeof (mkey));
+
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* Check target_id */
+ if (target_id != 0) {
+ /* Invalid "target_id" */
+ SC_DBG_DRV_TRACE(TC_MB_PUTMSG | TC_ERRCD, __LINE__, &target_id,
+ sizeof (target_id));
+ ret = EINVAL;
+ goto END_mb_putmsg;
+ }
+
+ /* Get main table address from "mkey" */
+ mainp = scf_dscp_mkey2mainp(mkey);
+
+ /* Check mainp address */
+ if (mainp == NULL) {
+ /* Invalid "mkey" */
+ SC_DBG_DRV_TRACE(TC_MB_PUTMSG | TC_ERRCD, __LINE__, &mkey,
+ sizeof (mkey));
+ ret = EINVAL;
+ goto END_mb_putmsg;
+ }
+
+ /* Get SCF path status */
+ path_ret = scf_path_check(NULL);
+
+ /* Check SCF path status */
+ if (path_ret == SCF_PATH_HALT) {
+ /* SCF path status halt */
+ SC_DBG_DRV_TRACE(TC_MB_PUTMSG | TC_ERRCD, __LINE__, &path_ret,
+ sizeof (path_ret));
+ ret = EIO;
+ goto END_mb_putmsg;
+ }
+
+ /* Check main status */
+ switch (mainp->status) {
+ case SCF_ST_ESTABLISHED: /* Main status (C0) */
+ /* Check "data_len" is "maxdatalen" */
+ if (data_len > scf_dscp_comtbl.maxdatalen) {
+ /* Invalid "data_len" */
+ SC_DBG_DRV_TRACE(TC_MB_PUTMSG | TC_ERRCD, __LINE__,
+ &data_len, sizeof (data_len));
+ ret = EINVAL;
+ goto END_mb_putmsg;
+ }
+
+ /* Check "data_len" is 0 */
+ if (data_len == 0) {
+ goto END_mb_putmsg;
+ }
+
+ /*
+ * Check "num_sg" is not 0, and "sgp" is not NULL
+ */
+ if ((num_sg == 0) || (sgp == NULL)) {
+ /* Invalid "num_sg" or "sgp" */
+ SC_DBG_DRV_TRACE(TC_MB_PUTMSG | TC_ERRCD, __LINE__,
+ &num_sg, sizeof (num_sg));
+ ret = EINVAL;
+ goto END_mb_putmsg;
+ }
+
+ /* Get total data length : "num_sg" */
+ for (ii = 0; ii < num_sg; ii++) {
+ if ((sgp[ii].msc_len == 0) ||
+ (sgp[ii].msc_dptr != NULL)) {
+ /*
+ * Add total data length
+ */
+ wkleng += sgp[ii].msc_len;
+ } else {
+ /* Invalid "sgp" */
+ SC_DBG_DRV_TRACE(TC_MB_PUTMSG | TC_ERRCD,
+ __LINE__, &ii, sizeof (ii));
+ ret = EINVAL;
+ goto END_mb_putmsg;
+ }
+ }
+
+ /*
+ * Check "data_len" and "wkleng"
+ */
+ if (data_len != wkleng) {
+ /* Invalid "data_len" */
+ SC_DBG_DRV_TRACE(TC_MB_PUTMSG | TC_ERRCD, __LINE__,
+ &data_len, sizeof (data_len));
+ ret = EINVAL;
+ goto END_mb_putmsg;
+ }
+
+ /*
+ * Check Tx SRAM space
+ */
+ if (scf_dscp_comtbl.tx_dsc_count >=
+ scf_dscp_comtbl.txdsc_busycount) {
+ /* No space of Tx SRAM */
+ SC_DBG_DRV_TRACE(TC_MB_PUTMSG | TC_ERRCD, __LINE__,
+ &scf_dscp_comtbl.tx_dsc_count,
+ sizeof (scf_dscp_comtbl.tx_dsc_count));
+
+ /* putmsg ENOSPC counter up */
+ mainp->memo_putmsg_enospc_cnt++;
+
+ mainp->putmsg_busy_flag = FLAG_ON;
+ ret = ENOSPC;
+ goto END_mb_putmsg;
+ }
+
+ /* Tx buffer allocation */
+ wkaddr = (caddr_t)kmem_zalloc(wkleng, KM_SLEEP);
+
+ /* Get Tx SRAM offset */
+ wkoffset = scf_dscp_sram_get();
+ /* Check Tx SRAM offset */
+ if (wkoffset == TX_SRAM_GET_ERROR) {
+ /* Tx SRAM offset failure */
+ SC_DBG_DRV_TRACE(TC_MB_PUTMSG | TC_ERRCD, __LINE__,
+ &wkoffset, sizeof (wkoffset));
+
+ /* Send data release */
+ kmem_free(wkaddr, wkleng);
+
+ /* putmsg busy counter up */
+ mainp->memo_putmsg_busy_cnt++;
+
+ ret = EBUSY;
+ goto END_mb_putmsg;
+ }
+
+ /* Get TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_put];
+
+ /* Make Tx descriptor : DATA_REQ */
+ dsc_p->dinfo.base.c_flag = DSC_FLAG_DEFAULT;
+ dsc_p->dinfo.base.offset = (uint16_t)wkoffset;
+ dsc_p->dinfo.base.length = wkleng;
+ dsc_p->dinfo.base.dscp_datap = wkaddr;
+ dsc_p->dinfo.bdcr.id = mainp->id & DSC_CNTL_MASK_ID;
+ dsc_p->dinfo.bdcr.code = DSC_CNTL_DATA_REQ;
+
+ /* Data copy to Tx buffer */
+ for (ii = 0; ii < num_sg; ii++) {
+ if (sgp[ii].msc_len != 0) {
+ bcopy(sgp[ii].msc_dptr, wkaddr,
+ sgp[ii].msc_len);
+ wkaddr += sgp[ii].msc_len;
+ }
+ }
+
+ /* Update Tx descriptor offset */
+ if (scf_dscp_comtbl.tx_put == scf_dscp_comtbl.tx_last) {
+ scf_dscp_comtbl.tx_put = scf_dscp_comtbl.tx_first;
+ } else {
+ scf_dscp_comtbl.tx_put++;
+ }
+
+ /* Update Tx descriptor count */
+ scf_dscp_comtbl.tx_dsc_count++;
+
+ /* Change TxDSC status (SB0) */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_TX_ST_SRAM_TRANS_WAIT);
+
+ /* Call send matrix */
+ scf_dscp_send_matrix();
+
+ /* Tx DATA_REQ counter */
+ mainp->memo_tx_data_req_cnt++;
+ break;
+
+ case SCF_ST_EST_FINI_WAIT: /* Main status (C1) */
+ /* Main status == C1 is NOP */
+ break;
+
+ default:
+ /* Not open */
+ SC_DBG_DRV_TRACE(TC_MB_PUTMSG | TC_ERRCD, __LINE__,
+ &mainp->status, TC_INFO_SIZE);
+ ret = EBADF;
+ break;
+ }
+
+/*
+ * END_mb_putmsg
+ */
+ END_mb_putmsg:
+
+ /* Collect the timers which need to be stopped */
+ tm_stop_cnt = scf_timer_stop_collect(save_tmids, SCF_TIMERCD_MAX);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Timer stop */
+ if (tm_stop_cnt != 0) {
+ scf_timer_untimeout(save_tmids, SCF_TIMERCD_MAX);
+ }
+
+ SC_DBG_DRV_TRACE(TC_MB_PUTMSG | TC_OUT, __LINE__, &ret, sizeof (ret));
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_mb_canget()
+ *
+ * Description: Checks if a message received in the specified mailbox.
+ * If there is a message received, then the length of the
+ * message is passed via the argument data_lenp. Otherwise,
+ * return an appropriate error value.
+ *
+ * Arguments:
+ *
+ * target_id - The target_id of the peer. It must be 0 on a Domain.
+ * mkey - Unique key corresponding to a mailbox.
+ * data_lenp - A pointer to uint32_t, in which the size of the message
+ * is returned.
+ *
+ * Return Values: returns 0 if a message is present, otherwise an appropriate
+ * errno value is returned.
+ *
+ * EINVAL - Invalid values.
+ * EBADF - Specified target_id is not OPEN.
+ * ENOMSG - No message available.
+ * EIO - DSCP I/F path not available.
+ */
+int
+scf_mb_canget(target_id_t target_id, mkey_t mkey, uint32_t *data_lenp)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_mb_canget() "
+ scf_dscp_main_t *mainp; /* Main table address */
+ scf_rdata_que_t *rdt_p; /* Current receive data queue address */
+ int path_ret; /* SCF path status return value */
+ int ret = 0; /* Return value */
+
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start mkey = 0x%08x",
+ mkey);
+ SC_DBG_DRV_TRACE(TC_MB_CANGET | TC_IN, __LINE__, &mkey, sizeof (mkey));
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* Check target_id */
+ if (target_id != 0) {
+ /* Invalid "target_id" */
+ SC_DBG_DRV_TRACE(TC_MB_CANGET | TC_ERRCD, __LINE__, &target_id,
+ sizeof (target_id));
+ ret = EINVAL;
+ goto END_mb_canget;
+ }
+
+ /* Get main table address from "mkey" */
+ mainp = scf_dscp_mkey2mainp(mkey);
+
+ /* Check mainp address */
+ if (mainp == NULL) {
+ /* Invalid "mkey" */
+ SC_DBG_DRV_TRACE(TC_MB_CANGET | TC_ERRCD, __LINE__, &mkey,
+ sizeof (mkey));
+ ret = EINVAL;
+ goto END_mb_canget;
+ }
+
+ /* Get SCF path status */
+ path_ret = scf_path_check(NULL);
+
+ /* Check SCF path status */
+ if (path_ret == SCF_PATH_HALT) {
+ /* SCF path status halt */
+ SC_DBG_DRV_TRACE(TC_MB_CANGET | TC_ERRCD, __LINE__, &path_ret,
+ sizeof (path_ret));
+ ret = EIO;
+ goto END_mb_canget;
+ }
+
+ /* Check main status */
+ switch (mainp->status) {
+ case SCF_ST_ESTABLISHED: /* Main status (C0) */
+ case SCF_ST_EST_FINI_WAIT: /* Main status (C1) */
+ /* Check "data_lenp" address */
+ if (data_lenp == NULL) {
+ /* Invalid "data_lenp" */
+ SC_DBG_DRV_TRACE(TC_MB_CANGET | TC_ERRCD, __LINE__,
+ &data_lenp, sizeof (data_lenp));
+
+ ret = EINVAL;
+ goto END_mb_canget;
+ }
+
+ /* Check receive data count */
+ if (mainp->rd_count != 0) {
+ /* Set receive data length */
+ rdt_p = &mainp->rd_datap[mainp->rd_get];
+ *data_lenp = rdt_p->length;
+ } else {
+ /* Set receive data length is 0 : No messages */
+ SC_DBG_DRV_TRACE(TC_MB_CANGET, __LINE__,
+ &mainp->rd_count,
+ sizeof (mainp->rd_count));
+ *data_lenp = 0;
+ ret = ENOMSG;
+ }
+ break;
+
+ default:
+ /* Not open */
+ SC_DBG_DRV_TRACE(TC_MB_CANGET | TC_ERRCD, __LINE__,
+ &mainp->status, TC_INFO_SIZE);
+ ret = EBADF;
+ break;
+ }
+
+/*
+ * END_mb_canget
+ */
+ END_mb_canget:
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ SC_DBG_DRV_TRACE(TC_MB_CANGET | TC_OUT, __LINE__, &ret, sizeof (ret));
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_mb_getmsg()
+ *
+ * Description: Get a message from the specified mailbox. A message need to
+ * be received either completely or none, that is, no partial
+ * messages should be received.
+ *
+ * If a 0 timeout value is specified, then it should act as a
+ * non-blocking interface, that is, it should either return
+ * a message from the mailbox or return appropriate error.
+ * If a timeout value is specified, then it can blocked
+ * until either the message is received successfully or timedout.
+ *
+ * Arguments:
+ *
+ * target_id - The target_id of the peer. It must be 0 on a Domain.
+ * mkey - Unique key corresponding to a mailbox.
+ * data_len - Total length of data buffers passed via scatter/gather list.
+ * num_sg - Number of scatter/gather elements in the argument sgp.
+ * sgp - Scatter/gather list pointer.
+ * timeout - timeout value in milliseconds. If 0 specified, no waiting
+ * is required.
+ *
+ * Return Values: returns 0 on success, otherwise any meaningful errno
+ * values are returned, some of the notable error values
+ * are given below.
+ *
+ * EINVAL - Invalid values.
+ * EBADF - Specified target_id is not OPEN.
+ * EMSGSIZE - Specified receive data size unmatched.
+ * ENOMSG - No message available.
+ * EIO - DSCP I/F path not available.
+ */
+/* ARGSUSED */
+int
+scf_mb_getmsg(target_id_t target_id, mkey_t mkey, uint32_t data_len,
+ uint32_t num_sg, mscat_gath_t *sgp, clock_t timeout)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_mb_getmsg() "
+ scf_dscp_main_t *mainp; /* Main table address */
+ scf_rdata_que_t *rdt_p; /* Current receive data queue address */
+ caddr_t wkaddr; /* Working value : buffer address */
+ uint32_t wkleng = 0; /* Working value : length */
+ int ii; /* Working value : counter */
+ int path_ret; /* SCF path status return value */
+ int ret = 0; /* Return value */
+
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start mkey = 0x%08x",
+ mkey);
+ SC_DBG_DRV_TRACE(TC_MB_GETMSG | TC_IN, __LINE__, &mkey, sizeof (mkey));
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* Check target_id */
+ if (target_id != 0) {
+ /* Invalid "target_id" */
+ SC_DBG_DRV_TRACE(TC_MB_GETMSG | TC_ERRCD, __LINE__, &target_id,
+ sizeof (target_id));
+ ret = EINVAL;
+ goto END_mb_getmsg;
+ }
+
+ /* Get main table address from "mkey" */
+ mainp = scf_dscp_mkey2mainp(mkey);
+
+ /* Check mainp address */
+ if (mainp == NULL) {
+ /* Invalid "mkey" */
+ SC_DBG_DRV_TRACE(TC_MB_GETMSG | TC_ERRCD, __LINE__, &mkey,
+ sizeof (mkey));
+ ret = EINVAL;
+ goto END_mb_getmsg;
+ }
+
+ /* Get SCF path status */
+ path_ret = scf_path_check(NULL);
+
+ /* Check SCF path status */
+ if (path_ret == SCF_PATH_HALT) {
+ /* SCF path status halt */
+ SC_DBG_DRV_TRACE(TC_MB_GETMSG | TC_ERRCD, __LINE__, &path_ret,
+ sizeof (path_ret));
+ ret = EIO;
+ goto END_mb_getmsg;
+ }
+
+ switch (mainp->status) {
+ case SCF_ST_ESTABLISHED: /* Main status (C0) */
+ case SCF_ST_EST_FINI_WAIT: /* Main status (C1) */
+ /* Check "data_len" */
+ if ((data_len == 0) ||
+ (data_len > scf_dscp_comtbl.maxdatalen)) {
+ /* Unmatched "data_len" */
+ SC_DBG_DRV_TRACE(TC_MB_GETMSG | TC_ERRCD, __LINE__,
+ &data_len, sizeof (data_len));
+ ret = EMSGSIZE;
+ goto END_mb_getmsg;
+ }
+
+ /* Is num_sg and sgp valid? */
+ if ((num_sg == 0) || (sgp == NULL)) {
+ /* Invalid "num_sg" or "sgp" */
+ SC_DBG_DRV_TRACE(TC_MB_GETMSG | TC_ERRCD,
+ __LINE__, &num_sg, sizeof (num_sg));
+ ret = EINVAL;
+ goto END_mb_getmsg;
+ }
+ /* Is there receive data? */
+ if (mainp->rd_count == 0) {
+ /* No message */
+ SC_DBG_DRV_TRACE(TC_MB_GETMSG, __LINE__,
+ &mainp->rd_count,
+ sizeof (mainp->rd_count));
+ ret = ENOMSG;
+ goto END_mb_getmsg;
+ }
+
+ /* Get total data length : "num_sg" */
+ for (ii = 0; ii < num_sg; ii++) {
+ if ((sgp[ii].msc_len == 0) ||
+ (sgp[ii].msc_dptr != NULL)) {
+ /*
+ * Add total data length
+ */
+ wkleng += sgp[ii].msc_len;
+ } else {
+ /* Invalid "sgp" */
+ SC_DBG_DRV_TRACE(TC_MB_GETMSG | TC_ERRCD,
+ __LINE__, &sgp, sizeof (sgp));
+ ret = EINVAL;
+ goto END_mb_getmsg;
+ }
+ }
+ /* Check "data_len" and "wkleng" */
+ if (data_len != wkleng) {
+ /* Unmatched "data_len" */
+ SC_DBG_DRV_TRACE(TC_MB_GETMSG | TC_ERRCD, __LINE__,
+ &data_len, sizeof (data_len));
+ ret = EMSGSIZE;
+ goto END_mb_getmsg;
+ }
+
+ /* Get receive data queue address */
+ rdt_p = &mainp->rd_datap[mainp->rd_get];
+
+ /* Check "data_len" and receive data length */
+ if (data_len != rdt_p->length) {
+ /* Unmatched data_len */
+ SC_DBG_DRV_TRACE(TC_MB_GETMSG | TC_ERRCD,
+ __LINE__, &data_len, sizeof (data_len));
+ ret = EMSGSIZE;
+ goto END_mb_getmsg;
+ }
+
+ /* Data copy to "sgp" */
+ wkaddr = rdt_p->rdatap;
+ for (ii = 0; ii < num_sg; ii++) {
+ if (sgp[ii].msc_len != 0) {
+ bcopy(wkaddr, sgp[ii].msc_dptr,
+ sgp[ii].msc_len);
+ wkaddr += sgp[ii].msc_len;
+ }
+ }
+ /* Receve data release */
+ kmem_free(rdt_p->rdatap, rdt_p->length);
+ rdt_p->rdatap = NULL;
+
+ /* Update receive data queue */
+ if (mainp->rd_get == mainp->rd_last) {
+ mainp->rd_get = mainp->rd_first;
+ } else {
+ mainp->rd_get++;
+ }
+
+ /* Update receive data queue count */
+ mainp->rd_count--;
+ break;
+
+ default:
+ /* Not open */
+ SC_DBG_DRV_TRACE(TC_MB_GETMSG | TC_ERRCD, __LINE__,
+ &mainp->status, TC_INFO_SIZE);
+ ret = EBADF;
+ break;
+ }
+
+/*
+ * END_mb_getmsg
+ */
+ END_mb_getmsg:
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ SC_DBG_DRV_TRACE(TC_MB_GETMSG | TC_OUT, __LINE__, &ret, sizeof (ret));
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_mb_flush()
+ *
+ * Description: Flush messages from a specified mailbox.
+ *
+ * Arguments:
+ *
+ * target_id - The target_id of the peer. It must be 0 on a Domain.
+ * mkey - Unique key corresponding to a mailbox.
+ * flush_type - Specifies what type of flush is desired.
+ *
+ * Return Values: returns 0 on success, otherwise any meaningful errno
+ * values are returned.
+ * EINVAL - Invalid values.
+ * EBADF - Specified target_id is not OPEN.
+ */
+int
+scf_mb_flush(target_id_t target_id, mkey_t mkey, mflush_type_t flush_type)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_mb_flush() "
+ scf_dscp_main_t *mainp; /* Main table address */
+ int ret = 0; /* Return value */
+
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start mkey = 0x%08x",
+ mkey);
+ SC_DBG_DRV_TRACE(TC_MB_FLUSH | TC_IN, __LINE__, &mkey, sizeof (mkey));
+
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* Check target_id */
+ if (target_id != 0) {
+ /* Invalid "target_id" */
+ SC_DBG_DRV_TRACE(TC_MB_FLUSH | TC_ERRCD, __LINE__, &target_id,
+ sizeof (target_id));
+ ret = EINVAL;
+ goto END_mb_flush;
+ }
+
+ /* Get main table address from "mkey" */
+ mainp = scf_dscp_mkey2mainp(mkey);
+
+ /* Check mainp address */
+ if (mainp == NULL) {
+ /* Invalid "mkey" */
+ SC_DBG_DRV_TRACE(TC_MB_FLUSH | TC_ERRCD, __LINE__, &mkey,
+ sizeof (mkey));
+ ret = EINVAL;
+ goto END_mb_flush;
+ }
+
+ switch (mainp->status) {
+ case SCF_ST_EST_TXEND_RECV_WAIT: /* Main status (B0) */
+ case SCF_ST_ESTABLISHED: /* Main status (C0) */
+ case SCF_ST_EST_FINI_WAIT: /* Main status (C1) */
+ switch (flush_type) {
+ case MB_FLUSH_SEND:
+ case MB_FLUSH_RECEIVE:
+ case MB_FLUSH_ALL:
+ if (flush_type != MB_FLUSH_RECEIVE) {
+ /* TxDSC buffer release */
+ scf_dscp_txdscbuff_free(mainp);
+ }
+ if (flush_type != MB_FLUSH_SEND) {
+ /* RxDSC buffer release */
+ scf_dscp_rxdscbuff_free(mainp);
+
+ /* All queing event release */
+ scf_dscp_event_queue_free(mainp);
+
+ /* All receive buffer release */
+ scf_dscp_rdata_free(mainp);
+ }
+ break;
+
+ default:
+
+ /* Invalid "flush_type" */
+ SC_DBG_DRV_TRACE(TC_MB_FLUSH | TC_ERRCD, __LINE__,
+ &flush_type, sizeof (flush_type));
+ ret = EINVAL;
+ break;
+ }
+ break;
+
+ default:
+ /* Not open */
+ SC_DBG_DRV_TRACE(TC_MB_FLUSH | TC_ERRCD, __LINE__,
+ &mainp->status, TC_INFO_SIZE);
+ ret = EBADF;
+ break;
+ }
+
+/*
+ * END_mb_flush
+ */
+ END_mb_flush:
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ SC_DBG_DRV_TRACE(TC_MB_FLUSH | TC_OUT, __LINE__, &ret, sizeof (ret));
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_mb_ctrl()
+ *
+ * Description: This interface provides a way to obtain any specific
+ * properties of a mailbox, such as maximum size of the
+ * message which could be transmitted/received etc.
+ *
+ * Arguments:
+ *
+ * target_id - The target_id of the peer. It must be 0 on a Domain.
+ * mkey - Unique key corresponding to a mailbox.
+ * op - an operation.
+ * arg - argument specific to the operation.
+ *
+ * Return Values: returns 0 on success, otherwise any meaningful errno
+ * values are returned.
+ *
+ * EINVAL - Invalid values.
+ * EBADF - Specified target_id is not OPEN.
+ * ENOTSUP - Not supported.
+ */
+int
+scf_mb_ctrl(target_id_t target_id, mkey_t mkey, uint32_t op, void *arg)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_mb_ctrl() "
+ scf_dscp_main_t *mainp; /* Main table address */
+ uint32_t *wkarg; /* Working value : arg */
+ int ret = 0; /* Return value */
+
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start mkey = 0x%08x",
+ mkey);
+ SC_DBG_DRV_TRACE(TC_MB_CTRL | TC_IN, __LINE__, &mkey, sizeof (mkey));
+
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* Check target_id */
+ if (target_id != 0) {
+ /* Invalid "target_id" */
+ SC_DBG_DRV_TRACE(TC_MB_CTRL | TC_ERRCD, __LINE__, &target_id,
+ sizeof (target_id));
+ ret = EINVAL;
+ goto END_mb_ctrl;
+ }
+
+ /* Get main table address from "mkey" */
+ mainp = scf_dscp_mkey2mainp(mkey);
+
+ /* Check mainp address */
+ if (mainp == NULL) {
+ /* Invalid "mkey" */
+ SC_DBG_DRV_TRACE(TC_MB_CTRL | TC_ERRCD, __LINE__, &mkey,
+ sizeof (mkey));
+ ret = EINVAL;
+ goto END_mb_ctrl;
+ }
+
+ switch (mainp->status) {
+ case SCF_ST_EST_TXEND_RECV_WAIT: /* Main status (B0) */
+ case SCF_ST_ESTABLISHED: /* Main status (C0) */
+ case SCF_ST_EST_FINI_WAIT: /* Main status (C1) */
+ /* Check "arg" address */
+ if (arg == NULL) {
+ /* Invalid "arg" */
+ SC_DBG_DRV_TRACE(TC_MB_CTRL | TC_ERRCD, __LINE__,
+ &arg, sizeof (arg));
+ ret = EINVAL;
+ goto END_mb_ctrl;
+ }
+
+ /* Check "op" */
+ switch (op) {
+ case SCF_MBOP_MAXMSGSIZE:
+ /*
+ * Notifies max send/receive
+ * data size
+ */
+ SC_DBG_DRV_TRACE(TC_MB_CTRL, __LINE__,
+ &scf_dscp_comtbl.maxdatalen,
+ sizeof (scf_dscp_comtbl.maxdatalen));
+
+ /* Setsend/receive data size */
+ wkarg = (uint32_t *)arg;
+ *wkarg = scf_dscp_comtbl.maxdatalen;
+ break;
+
+ default:
+ /* Not support */
+ SC_DBG_DRV_TRACE(TC_MB_CTRL, __LINE__, &op,
+ sizeof (op));
+ ret = ENOTSUP;
+ break;
+ }
+ break;
+
+ default:
+ /* Not open */
+ SC_DBG_DRV_TRACE(TC_MB_CTRL | TC_ERRCD,
+ __LINE__, &mainp->status, TC_INFO_SIZE);
+ ret = EBADF;
+ break;
+ }
+
+/*
+ * END_mb_ctrl
+ */
+ END_mb_ctrl:
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ SC_DBG_DRV_TRACE(TC_MB_CTRL | TC_OUT, __LINE__, &ret, sizeof (ret));
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * SCF driver system control intafece function
+ */
+
+/*
+ * scf_dscp_init()
+ *
+ * Description: DSCP control area initialization processing.
+ *
+ */
+void
+scf_dscp_init(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_init() "
+ scf_dscp_main_t *mainp = NULL; /* Main table address */
+ scf_dscp_dsc_t *dsc_p; /* TxDSC address */
+ scf_tx_sram_t *sram_p; /* Tx SRAM address */
+ int ii; /* Working value : counter */
+ int jj; /* Working value : counter */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /*
+ * DSCP common table initialization
+ */
+ /* Set size value */
+ scf_dscp_comtbl.maxdatalen = SCF_MB_MAXDATALEN;
+ scf_dscp_comtbl.total_buffsize = SCF_TOTAL_BUFFSIZE;
+ scf_dscp_comtbl.txbuffsize = SCF_TXBUFFSIZE;
+ scf_dscp_comtbl.rxbuffsize = SCF_RXBUFFSIZE;
+
+ /* Set max count */
+ scf_dscp_comtbl.txsram_maxcount = SCF_TX_SRAM_MAXCOUNT;
+ scf_dscp_comtbl.rxsram_maxcount = SCF_RX_SRAM_MAXCOUNT;
+ scf_dscp_comtbl.txdsc_maxcount = SCF_TXDSC_MAXCOUNT;
+ scf_dscp_comtbl.rxdsc_maxcount = SCF_RXDSC_MAXCOUNT;
+ scf_dscp_comtbl.txdsc_busycount = SCF_TXDSC_BUSYCOUNT;
+ scf_dscp_comtbl.rxdsc_busycount = SCF_RXDSC_BUSYCOUNT;
+
+ /* Set re-try max count */
+ scf_dscp_comtbl.tx_ackto_maxretry_cnt = SCF_TX_ACKTO_MAXRETRAYCOUNT;
+ scf_dscp_comtbl.tx_endto_maxretry_cnt = SCF_TX_ENDTO_MAXRETRAYCOUNT;
+ scf_dscp_comtbl.tx_busy_maxretry_cnt = SCF_TX_BUSY_MAXRETRAYCOUNT;
+ scf_dscp_comtbl.tx_interface_maxretry_cnt = SCF_TX_IF_MAXRETRAYCOUNT;
+ scf_dscp_comtbl.tx_nak_maxretry_cnt = SCF_TX_NAK_MAXRETRAYCOUNT;
+ scf_dscp_comtbl.tx_notsup_maxretry_cnt = SCF_TX_NOTSUP_MAXRETRAYCOUNT;
+ scf_dscp_comtbl.tx_prmerr_maxretry_cnt = SCF_TX_PRMERR_MAXRETRAYCOUNT;
+ scf_dscp_comtbl.tx_seqerr_maxretry_cnt = SCF_TX_SEQERR_MAXRETRAYCOUNT;
+ scf_dscp_comtbl.tx_other_maxretry_cnt = SCF_TX_OTHER_MAXRETRAYCOUNT;
+ scf_dscp_comtbl.tx_send_maxretry_cnt = SCF_TX_SEND_MAXRETRAYCOUNT;
+
+ /* TxDSC/RxDSC table allocation */
+ scf_dscp_comtbl.tx_dscsize =
+ sizeof (scf_dscp_dsc_t) * (scf_dscp_comtbl.txdsc_maxcount +
+ SCF_TXDSC_LOCALCOUNT);
+ scf_dscp_comtbl.tx_dscp =
+ (scf_dscp_dsc_t *)kmem_zalloc(scf_dscp_comtbl.tx_dscsize,
+ KM_SLEEP);
+
+ scf_dscp_comtbl.rx_dscsize =
+ sizeof (scf_dscp_dsc_t) * (scf_dscp_comtbl.rxdsc_maxcount);
+ scf_dscp_comtbl.rx_dscp =
+ (scf_dscp_dsc_t *)kmem_zalloc(scf_dscp_comtbl.rx_dscsize,
+ KM_SLEEP);
+
+ /* Tx SRAM table allocation */
+ scf_dscp_comtbl.tx_sramsize =
+ sizeof (scf_tx_sram_t) * scf_dscp_comtbl.txsram_maxcount;
+ scf_dscp_comtbl.tx_sramp =
+ (scf_tx_sram_t *)kmem_zalloc(scf_dscp_comtbl.tx_sramsize,
+ KM_SLEEP);
+
+ /*
+ * TxDSC table initialization
+ */
+ /* Get TxDSC table address */
+ dsc_p = scf_dscp_comtbl.tx_dscp;
+ for (ii = 0; ii < scf_dscp_comtbl.txdsc_maxcount; ii++, dsc_p++) {
+ /* Init SRAM offset */
+ dsc_p->dinfo.base.offset = DSC_OFFSET_NOTHING;
+ }
+
+ /* Set Tx offset */
+ scf_dscp_comtbl.tx_first = 0;
+ scf_dscp_comtbl.tx_last =
+ (uint16_t)(scf_dscp_comtbl.txdsc_maxcount - 1);
+ scf_dscp_comtbl.tx_put = 0;
+ scf_dscp_comtbl.tx_get = 0;
+ scf_dscp_comtbl.tx_local = (uint16_t)scf_dscp_comtbl.txdsc_maxcount;
+ scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_local].dinfo.base.offset =
+ DSC_OFFSET_NOTHING;
+
+ /*
+ * Tx STAM offset initialization
+ */
+ /* Get Tx SRAM table address */
+ sram_p = scf_dscp_comtbl.tx_sramp;
+ for (ii = 0; ii < scf_dscp_comtbl.txsram_maxcount; ii++, sram_p++) {
+ /* Init SRAM offset */
+ sram_p->offset =
+ (uint16_t)(scf_dscp_comtbl.txbuffsize * ii /
+ DSC_OFFSET_CONVERT);
+ }
+
+ /* Set Tx SRAM offset */
+ scf_dscp_comtbl.tx_sram_first = 0;
+ scf_dscp_comtbl.tx_sram_last = (scf_dscp_comtbl.txsram_maxcount - 1);
+ scf_dscp_comtbl.tx_sram_put = 0;
+
+ /*
+ * RxDSC table initialization
+ */
+ /* Set Rx offset */
+ scf_dscp_comtbl.rx_first = 0;
+ scf_dscp_comtbl.rx_last =
+ (uint16_t)(scf_dscp_comtbl.rxdsc_maxcount - 1);
+ scf_dscp_comtbl.rx_put = 0;
+ scf_dscp_comtbl.rx_get = 0;
+
+ /*
+ * Main table initialization
+ */
+ /* Get Top main table address */
+ mainp = &scf_dscp_comtbl.scf_dscp_main[0];
+
+ /* Check main table */
+ for (ii = 0; ii < MBIF_MAX; ii++, mainp++) {
+ /* Set table id */
+ mainp->id = ii & DSC_CNTL_MASK_ID;
+
+ /* Set event/recive queue max count */
+ mainp->ev_maxcount = SCF_MB_EVQUE_MAXCOUNT;
+ mainp->rd_maxcount = SCF_RDQUE_MAXCOUNT;
+ mainp->rd_busycount = SCF_RDQUE_BUSYCOUNT;
+
+ /* Set fint() condition variable */
+ cv_init(&mainp->fini_cv, NULL, CV_DRIVER, NULL);
+ mainp->cv_init_flag = FLAG_ON;
+
+ /* event/receive data queue table allocation */
+ mainp->ev_quesize =
+ sizeof (scf_event_que_t) * mainp->ev_maxcount;
+ mainp->ev_quep =
+ (scf_event_que_t *)kmem_zalloc(mainp->ev_quesize,
+ KM_SLEEP);
+ mainp->rd_datasize =
+ sizeof (scf_rdata_que_t) * mainp->ev_maxcount;
+ mainp->rd_datap =
+ (scf_rdata_que_t *)kmem_zalloc(mainp->rd_datasize,
+ KM_SLEEP);
+
+ /* Event queue initialization */
+ for (jj = 0; jj < mainp->ev_maxcount; jj++) {
+ mainp->ev_quep[jj].mevent = (scf_event_t)(-1);
+ }
+ mainp->ev_first = 0;
+ mainp->ev_last = (uint16_t)(mainp->ev_maxcount - 1);
+ mainp->ev_put = 0;
+ mainp->ev_get = 0;
+
+ /* Receive data queue initialization */
+ mainp->rd_first = 0;
+ mainp->rd_last = (uint16_t)(mainp->rd_maxcount - 1);
+ mainp->rd_put = 0;
+ mainp->rd_get = 0;
+ }
+
+ /* Initialize success flag ON */
+ scf_dscp_comtbl.dscp_init_flag = FLAG_ON;
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_fini()
+ *
+ * Description: DSCP control area release processing.
+ *
+ */
+void
+scf_dscp_fini(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_fini() "
+ scf_dscp_main_t *mainp; /* Main table address */
+ int ii; /* Working value : counter */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /*
+ * Main table resources release
+ */
+ /* Get Top main table address */
+ mainp = &scf_dscp_comtbl.scf_dscp_main[0];
+
+ /* Check main table */
+ for (ii = 0; ii < MBIF_MAX; ii++, mainp++) {
+ /* All receive buffer release */
+ scf_dscp_rdata_free(mainp);
+
+ /* Check fint() condition variable */
+ if (mainp->cv_init_flag == FLAG_ON) {
+ /* Destroy fint() condition variable */
+ cv_destroy(&mainp->fini_cv);
+ mainp->cv_init_flag = FLAG_OFF;
+ }
+
+ /* Check event queue table allocation */
+ if (mainp->ev_quep != NULL) {
+ /* Event queue table release */
+ kmem_free(mainp->ev_quep, mainp->ev_quesize);
+ mainp->ev_quep = NULL;
+ }
+
+ /* Check receive data table queue allocation */
+ if (mainp->rd_datap != NULL) {
+ /* Receive data queue table release */
+ kmem_free(mainp->rd_datap, mainp->rd_datasize);
+ mainp->rd_datap = NULL;
+ }
+ }
+
+ /*
+ * DSCP common table resources release
+ */
+ /* All timer stop */
+ scf_timer_stop(SCF_TIMERCD_DSCP_ACK);
+ scf_timer_stop(SCF_TIMERCD_DSCP_END);
+ scf_timer_stop(SCF_TIMERCD_DSCP_CALLBACK);
+ scf_timer_stop(SCF_TIMERCD_DSCP_BUSY);
+
+ /* All DSC buffer release */
+ scf_dscp_dscbuff_free_all();
+
+ /* Check TxDSC table allocation */
+ if (scf_dscp_comtbl.tx_dscp != NULL) {
+ /* TxDSC table release */
+ kmem_free(scf_dscp_comtbl.tx_dscp,
+ scf_dscp_comtbl.tx_dscsize);
+ scf_dscp_comtbl.tx_dscp = NULL;
+ }
+
+ /* Check RxDSC table allocation */
+ if (scf_dscp_comtbl.rx_dscp != NULL) {
+ /* RxDSC table release */
+ kmem_free(scf_dscp_comtbl.rx_dscp,
+ scf_dscp_comtbl.rx_dscsize);
+ scf_dscp_comtbl.rx_dscp = NULL;
+ }
+
+ /* Check Tx SRAM table allocation */
+ if (scf_dscp_comtbl.tx_sramp != NULL) {
+ /* Tx SRAM table release */
+ kmem_free(scf_dscp_comtbl.tx_sramp,
+ scf_dscp_comtbl.tx_sramsize);
+ scf_dscp_comtbl.tx_sramp = NULL;
+ }
+
+ /* Initialize success flag ON */
+ scf_dscp_comtbl.dscp_init_flag = FLAG_OFF;
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_start()
+ *
+ * Description: DSCP interface start processing.
+ *
+ */
+void
+scf_dscp_start(uint32_t factor)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_start() "
+ scf_dscp_main_t *mainp; /* Main table address */
+ scf_dscp_dsc_t *dsc_p; /* TxDSC address */
+ int ii; /* Working value : counter */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start factor = 0x%08x",
+ factor);
+
+ /* Check local control data flag */
+ if (scf_dscp_comtbl.tx_local_use_flag == FLAG_ON) {
+ /* Get local data TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_local];
+
+ /* Change TxDSC status (SA0) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_TX_ST_IDLE);
+
+ /* TxREQ send exec flag OFF */
+ scf_dscp_comtbl.tx_exec_flag = FLAG_OFF;
+ }
+
+ /* Check pending send TxDSC */
+ if (scf_dscp_comtbl.tx_dsc_count != 0) {
+ /* Get TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_get];
+
+ /* Check TxDSC status */
+ switch (dsc_p->status) {
+ case SCF_TX_ST_TXREQ_SEND_WAIT: /* TxDSC status (SB2) */
+ /* Check send data length */
+ if (dsc_p->dinfo.base.length != 0) {
+ /* Try again SRAM transfer */
+
+ /* Change TxDSC status (SB0) */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_TX_ST_SRAM_TRANS_WAIT);
+ }
+ break;
+
+ case SCF_TX_ST_TXACK_RECV_WAIT: /* TxDSC status (SC0) */
+ case SCF_TX_ST_TXEND_RECV_WAIT: /* TxDSC status (SC1) */
+ /* Try again TxREQ send */
+
+ /* Change TxDSC status (SB2) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_TX_ST_TXREQ_SEND_WAIT);
+ break;
+
+ default:
+ /* TxDSC status != SB2 or SC0 or SC1 is NOP */
+ break;
+ }
+ }
+
+ /* Check pending RxDSC */
+ while (scf_dscp_comtbl.rx_dsc_count != 0) {
+ /* Get RxDSC address */
+ dsc_p = &scf_dscp_comtbl.rx_dscp[scf_dscp_comtbl.rx_get];
+
+ /* Check receive data */
+ if (dsc_p->dinfo.base.dscp_datap != NULL) {
+ /* Receive data release */
+ kmem_free(dsc_p->dinfo.base.dscp_datap,
+ dsc_p->dinfo.base.length);
+ dsc_p->dinfo.base.dscp_datap = NULL;
+ }
+
+ /* Change RxDSC status (RA0) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_IDLE);
+
+ /* Update Rx descriptor offset */
+ if (scf_dscp_comtbl.rx_get == scf_dscp_comtbl.rx_last) {
+ scf_dscp_comtbl.rx_get = scf_dscp_comtbl.rx_first;
+ } else {
+ scf_dscp_comtbl.rx_get++;
+ }
+
+ /* Update Rx descriptor count */
+ scf_dscp_comtbl.rx_dsc_count--;
+
+ /* RxREQ receive exec flag OFF */
+ scf_dscp_comtbl.rx_exec_flag = FLAG_OFF;
+ }
+
+ /* Check SCF path change */
+ if (factor == FACTOR_PATH_CHG) {
+ /* Tx re-try counter initialization */
+ scf_dscp_comtbl.tx_ackto_retry_cnt = 0;
+ scf_dscp_comtbl.tx_endto_retry_cnt = 0;
+
+ scf_dscp_comtbl.tx_busy_retry_cnt = 0;
+ scf_dscp_comtbl.tx_interface_retry_cnt = 0;
+ scf_dscp_comtbl.tx_nak_retry_cnt = 0;
+ scf_dscp_comtbl.tx_notsuop_retry_cnt = 0;
+ scf_dscp_comtbl.tx_prmerr_retry_cnt = 0;
+ scf_dscp_comtbl.tx_seqerr_retry_cnt = 0;
+ scf_dscp_comtbl.tx_other_retry_cnt = 0;
+ scf_dscp_comtbl.tx_send_retry_cnt = 0;
+
+ /*
+ * SCF path change flag ON :
+ * local control data send(DSCP_PATH)
+ */
+ scf_dscp_comtbl.dscp_path_flag = FLAG_ON;
+ } else {
+ /* SCF online processing */
+
+ /* Get Top main table address */
+ mainp = &scf_dscp_comtbl.scf_dscp_main[0];
+
+ /* Check main table */
+ for (ii = 0; ii < MBIF_MAX; ii++, mainp++) {
+ /* Check main status */
+ switch (mainp->status) {
+ case SCF_ST_EST_TXEND_RECV_WAIT: /* Main status (B0) */
+ case SCF_ST_ESTABLISHED: /* Main status (C0) */
+ /*
+ * Connect check flag ON :
+ * local control data send(CONN_CHK)
+ */
+ mainp->conn_chk_flag = FLAG_ON;
+ break;
+
+ default:
+ /* Connect check flag OFF */
+ mainp->conn_chk_flag = FLAG_OFF;
+ break;
+ }
+ }
+ }
+
+ /* Call send matrix */
+ scf_dscp_send_matrix();
+
+ /* Call receive matrix */
+ scf_dscp_recv_matrix();
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_stop()
+ *
+ * Description: DSCP interface stop processing.
+ *
+ */
+void
+scf_dscp_stop(uint32_t factor)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_stop() "
+ scf_dscp_main_t *mainp; /* Main table address */
+ int ii; /* Working value : counter */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start factor = 0x%08x",
+ factor);
+
+ /* Check stop factor */
+ if ((factor == FACTOR_PATH_HALT) || (factor == FACTOR_PATH_STOP)) {
+ /* memo counter up */
+ scf_dscp_comtbl.scf_stop_memo_cnt++;
+
+ /* Get Top main table address */
+ mainp = &scf_dscp_comtbl.scf_dscp_main[0];
+
+ /* Check main table */
+ for (ii = 0; ii < MBIF_MAX; ii++, mainp++) {
+ /* Check main status */
+ switch (mainp->status) {
+ case SCF_ST_EST_TXEND_RECV_WAIT: /* Main status (B0) */
+ case SCF_ST_ESTABLISHED: /* Main status (C0) */
+ /* SCF_MB_DISC_ERROR event queuing */
+ scf_dscp_event_queue(mainp, SCF_MB_DISC_ERROR);
+
+ /* Change main status (C1) */
+ SCF_SET_STATUS(mainp, SCF_ST_EST_FINI_WAIT);
+
+ break;
+
+ case SCF_ST_CLOSE_TXEND_RECV_WAIT:
+ /* Main status (D0) */
+ /* Signal to fini() wait */
+ mainp->fini_wait_flag = FLAG_OFF;
+ cv_signal(&mainp->fini_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &mainp->fini_cv, sizeof (kcondvar_t));
+ break;
+
+ default:
+ /* Main status != B0 or C0 or D0 is NOP */
+ break;
+ }
+ }
+ }
+
+ /* Tx timer stop */
+ scf_timer_stop(SCF_TIMERCD_DSCP_ACK);
+ scf_timer_stop(SCF_TIMERCD_DSCP_END);
+ scf_timer_stop(SCF_TIMERCD_DSCP_BUSY);
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_intr()
+ *
+ * Description: The corresponding function is called according to the
+ * interruption factor from SCF.
+ *
+ */
+void
+scf_dscp_intr(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_intr() "
+ /* Working value : Interrupt check flag */
+ int interrupt = FLAG_OFF;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Get DSR register */
+ statep->reg_dsr = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->DSR);
+ SC_DBG_DRV_TRACE(TC_R_DSR, __LINE__, &statep->reg_dsr,
+ sizeof (statep->reg_dsr));
+
+ /* DSR register interrupt clear */
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle, &statep->scf_regs->DSR,
+ statep->reg_dsr);
+ SC_DBG_DRV_TRACE(TC_W_DSR, __LINE__, &statep->reg_dsr,
+ sizeof (statep->reg_dsr));
+
+ /* Regster read sync */
+ scf_rs8 = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->DSR);
+
+ SCF_DBG_TEST_INTR_DSCP_DSR(statep);
+
+ SCFDBGMSG1(SCF_DBGFLAG_REG, "DSR = 0x%02x", statep->reg_dsr);
+
+ if ((statep->reg_dsr & DSR_RxREQ) != 0) { /* RxREQ interrupt */
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, "RxREQ interrupt");
+
+ interrupt = FLAG_ON;
+ /* Get RxDCR register */
+ statep->reg_rxdcr_c_flag =
+ SCF_DDI_GET16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->RxDCR_C_FLAG);
+ SC_DBG_DRV_TRACE(TC_R_RxDCR_C_FLAG, __LINE__,
+ &statep->reg_rxdcr_c_flag,
+ sizeof (statep->reg_rxdcr_c_flag));
+
+ statep->reg_rxdcr_c_offset =
+ SCF_DDI_GET16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->RxDCR_OFFSET);
+ SC_DBG_DRV_TRACE(TC_R_RxDCR_OFFSET, __LINE__,
+ &statep->reg_rxdcr_c_offset,
+ sizeof (statep->reg_rxdcr_c_offset));
+
+ statep->reg_rxdcr_c_length =
+ SCF_DDI_GET32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->RxDCR_LENGTH);
+ SC_DBG_DRV_TRACE(TC_R_RxDCR_LENGTH, __LINE__,
+ &statep->reg_rxdcr_c_length,
+ sizeof (statep->reg_rxdcr_c_length));
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_DSCP_RXREQ);
+
+ SCF_DBG_TEST_INTR_DSCP_RXTX(statep, statep->reg_dsr);
+
+ SC_DBG_DRV_TRACE(TC_RxREQ, __LINE__,
+ &statep->reg_rxdcr_c_flag, 8);
+
+ SCFDBGMSG3(SCF_DBGFLAG_REG, "RxDCR = 0x%04x 0x%04x 0x%08x",
+ statep->reg_rxdcr_c_flag, statep->reg_rxdcr_c_offset,
+ statep->reg_rxdcr_c_length);
+
+ /* Call RxRERQ interrupt processing */
+ scf_dscp_rxreq_recv(statep);
+ }
+
+ if ((statep->reg_dsr & DSR_TxACK) != 0) { /* TxACK interrupt */
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, "TxACK interrupt");
+
+ interrupt = FLAG_ON;
+
+ SC_DBG_DRV_TRACE(TC_TxACK, __LINE__, NULL, 0);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_DSCP_TXACK);
+
+ /* Call TxACK interrupt processing */
+ scf_dscp_txack_recv(statep);
+ }
+
+ if ((statep->reg_dsr & DSR_TxEND) != 0) { /* TxEND interrupt */
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, "TxEND interrupt");
+
+ interrupt = FLAG_ON;
+
+ /* Get TxDSR register */
+ statep->reg_txdsr_c_flag =
+ SCF_DDI_GET16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->TxDSR_C_FLAG);
+ SC_DBG_DRV_TRACE(TC_R_TxDSR_C_FLAG, __LINE__,
+ &statep->reg_txdsr_c_flag,
+ sizeof (statep->reg_txdsr_c_flag));
+
+ statep->reg_txdsr_c_offset =
+ SCF_DDI_GET16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->TxDSR_OFFSET);
+ SC_DBG_DRV_TRACE(TC_R_TxDSR_OFFSET, __LINE__,
+ &statep->reg_txdsr_c_offset,
+ sizeof (statep->reg_txdsr_c_offset));
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_DSCP_TXEND);
+
+ SCF_DBG_TEST_INTR_DSCP_RXTX(statep, DSR_TxEND);
+
+ SC_DBG_DRV_TRACE(TC_TxEND, __LINE__,
+ &statep->reg_txdsr_c_flag, 4);
+
+ SCFDBGMSG2(SCF_DBGFLAG_REG, "TxDSR = 0x%04x 0x%04x",
+ statep->reg_txdsr_c_flag, statep->reg_txdsr_c_offset);
+
+ /* Call TxEND interrupt processing */
+ scf_dscp_txend_recv(statep);
+ }
+
+ if (interrupt == FLAG_OFF) {
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__, &statep->reg_dsr,
+ sizeof (statep->reg_dsr));
+ statep->no_int_dsr_cnt++;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * Timeout function : from SCF driver timer contorol function
+ */
+
+/*
+ * scf_dscp_ack_tout()
+ *
+ * Description: TxACK reception surveillance timeout processing is performed.
+ * SCF path change factor.
+ *
+ */
+void
+scf_dscp_ack_tout(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_ack_tout() "
+ scf_dscp_dsc_t *dsc_p; /* TxDSC address */
+ scf_state_t *statep; /* Soft state pointer */
+ int path_ret; /* SCF path status return value */
+ uchar_t cmd;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Check TxREQ send exec */
+ if (scf_dscp_comtbl.tx_exec_flag == FLAG_OFF) {
+ goto END_dscp_ack_tout;
+ }
+
+ /* memo counter up */
+ scf_dscp_comtbl.tx_ackto_memo_cnt++;
+
+ /* TxREQ send exec flag OFF */
+ scf_dscp_comtbl.tx_exec_flag = FLAG_OFF;
+
+ /* Check local control data flag */
+ if (scf_dscp_comtbl.tx_local_use_flag == FLAG_OFF) {
+ /* Get TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_get];
+ } else {
+ /* Get local data TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_local];
+ }
+
+ /* Check TxDSC status */
+ if (dsc_p->status == SCF_TX_ST_TXACK_RECV_WAIT) {
+ /* TxDSC status (SC0) */
+ /* Check re-try counter */
+ if ((scf_dscp_comtbl.tx_ackto_retry_cnt <
+ scf_dscp_comtbl.tx_ackto_maxretry_cnt) &&
+ (scf_dscp_comtbl.tx_send_retry_cnt <
+ scf_dscp_comtbl.tx_send_maxretry_cnt)) {
+ /* re-try count up */
+ scf_dscp_comtbl.tx_ackto_retry_cnt++;
+ scf_dscp_comtbl.tx_send_retry_cnt++;
+
+ /* Change TxDSC status (SB2) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_TX_ST_TXREQ_SEND_WAIT);
+
+ /* Call send matrix */
+ scf_dscp_send_matrix();
+ } else {
+ /* TxACK re-try timeout error */
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__,
+ &scf_dscp_comtbl.tx_ackto_retry_cnt,
+ sizeof (scf_dscp_comtbl.tx_ackto_retry_cnt));
+
+ /* Get SCF path status */
+ path_ret = scf_path_check(&statep);
+
+ /* Check SCF path status */
+ if (path_ret == SCF_PATH_ONLINE) {
+ cmd = (uchar_t)(dsc_p->dinfo.base.c_flag >> 8);
+ cmn_err(CE_WARN,
+ "%s,DSCP ack response timeout "
+ "occurred. "
+ "DSCP command = 0x%02x\n",
+ &statep->pathname[0], cmd);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_DSCP_ACKTO);
+
+ /* SCF path change */
+ statep->scf_herr |= HERR_DSCP_ACKTO;
+ scf_path_change(statep);
+ }
+ }
+ }
+
+/*
+ * END_dscp_ack_tout
+ */
+ END_dscp_ack_tout:
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_end_tout()
+ *
+ * Description: TxEND reception surveillance timeout processing is performed.
+ * SCF path change factor.
+ *
+ */
+void
+scf_dscp_end_tout(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_end_tout() "
+ scf_dscp_dsc_t *dsc_p; /* TxDSC address */
+ scf_state_t *statep; /* Soft state pointer */
+ int path_ret; /* SCF path status return value */
+ uchar_t cmd;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Check TxREQ send exec */
+ if (scf_dscp_comtbl.tx_exec_flag == FLAG_OFF) {
+ goto END_dscp_end_tout;
+ }
+
+ /* memo counter up */
+ scf_dscp_comtbl.tx_endto_memo_cnt++;
+
+ /* TxREQ send exec flag OFF */
+ scf_dscp_comtbl.tx_exec_flag = FLAG_OFF;
+
+ /* Check local control data flag */
+ if (scf_dscp_comtbl.tx_local_use_flag == FLAG_OFF) {
+ /* Get TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_get];
+ } else {
+ /* Get local data TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_local];
+ }
+
+ /* Check TxDSC status */
+ if (dsc_p->status == SCF_TX_ST_TXEND_RECV_WAIT) {
+ /* TxDSC status (SC1) */
+ /* Check re-try counter */
+ if ((scf_dscp_comtbl.tx_endto_retry_cnt <
+ scf_dscp_comtbl.tx_endto_maxretry_cnt) &&
+ (scf_dscp_comtbl.tx_send_retry_cnt <
+ scf_dscp_comtbl.tx_send_maxretry_cnt)) {
+ /* re-try count up */
+ scf_dscp_comtbl.tx_endto_retry_cnt++;
+ scf_dscp_comtbl.tx_send_retry_cnt++;
+
+ /* Change TxDSC status (SB2) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_TX_ST_TXREQ_SEND_WAIT);
+
+ /* Call send matrix */
+ scf_dscp_send_matrix();
+ } else {
+ /* TxEND re-try timeout error */
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__,
+ &scf_dscp_comtbl.tx_endto_retry_cnt,
+ sizeof (scf_dscp_comtbl.tx_endto_retry_cnt));
+
+ /* Get SCF path status */
+ path_ret = scf_path_check(&statep);
+
+ /* Check SCF path status */
+ if (path_ret == SCF_PATH_ONLINE) {
+ cmd = (uchar_t)(dsc_p->dinfo.base.c_flag >> 8);
+ cmn_err(CE_WARN,
+ "%s,DSCP end response timeout "
+ "occurred. "
+ "DSCP command = 0x%02x\n",
+ &statep->pathname[0], cmd);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_DSCP_ENDTO);
+
+ /* SCF path change */
+ statep->scf_herr |= HERR_DSCP_ENDTO;
+ scf_path_change(statep);
+ }
+ }
+ }
+
+/*
+ * END_dscp_end_tout
+ */
+ END_dscp_end_tout:
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_busy_tout()
+ *
+ * Description: Busy timeout performs TxREQ transmission again.
+ *
+ */
+void
+scf_dscp_busy_tout(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_busy_tout() "
+ scf_dscp_dsc_t *dsc_p; /* TxDSC address */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Check pending send TxDSC or local control TxDSC */
+ if ((scf_dscp_comtbl.tx_dsc_count == 0) &&
+ (scf_dscp_comtbl.tx_local_use_flag == FLAG_OFF)) {
+ goto END_dscp_busy_tout;
+ }
+
+ /* Check local control data flag */
+ if (scf_dscp_comtbl.tx_local_use_flag == FLAG_OFF) {
+ /* Get TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_get];
+ } else {
+ /* Get local data TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_local];
+ }
+
+ /* Check TxDSC status */
+ if (dsc_p->status == SCF_TX_ST_TXREQ_SEND_WAIT) {
+ /* TxDSC status (SB2) */
+ /* Call send matrix */
+ scf_dscp_send_matrix();
+ }
+
+/*
+ * END_dscp_busy_tout
+ */
+ END_dscp_busy_tout:
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_callback_tout()
+ *
+ * Description: Callbak timeout performs soft interrupt again.
+ *
+ */
+void
+scf_dscp_callback_tout(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_callback_tout() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Soft interrupt : call scf_dscp_callback() */
+ if (mutex_tryenter(&scf_comtbl.si_mutex) != 0) {
+ scf_comtbl.scf_softintr_dscp_kicked = FLAG_ON;
+ ddi_trigger_softintr(scf_comtbl.scf_softintr_id);
+ mutex_exit(&scf_comtbl.si_mutex);
+ }
+
+ /* Callback timer start */
+ scf_timer_start(SCF_TIMERCD_DSCP_CALLBACK);
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_callback()
+ *
+ * Description: Event queue is taken out and a callback entry is called.
+ *
+ */
+void
+scf_dscp_callback(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_callback() "
+ scf_dscp_main_t *mainp; /* Main table address */
+ /* Working value : event_handler */
+ void (*wkevent_handler)(scf_event_t, void *);
+ scf_event_t wkmevent; /* Working value : mevent */
+ void *wkarg; /* Working value : arg */
+ /* Working value : next event processing check flag */
+ int event_flag = FLAG_OFF;
+ int ii; /* Working value : counter */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Check callback entry exec flag */
+ if (scf_dscp_comtbl.callback_exec_flag == FLAG_ON) {
+ goto END_dscp_callback;
+ }
+
+ /* Set callback entry exec flag */
+ scf_dscp_comtbl.callback_exec_flag = FLAG_ON;
+
+/*
+ * CALLBACK_START
+ */
+ CALLBACK_START:
+
+ /* Get Top main table address */
+ mainp = &scf_dscp_comtbl.scf_dscp_main[0];
+ /* Check all main table */
+ for (ii = 0; ii < MBIF_MAX; ii++, mainp++) {
+ /* Check event count */
+ if (mainp->ev_count != 0) {
+ /* Next event processing flag ON */
+ event_flag = FLAG_ON;
+
+ /* Get event info */
+ wkmevent = mainp->ev_quep[mainp->ev_get].mevent;
+
+ /* Update event queue offset */
+ if (mainp->ev_get == mainp->ev_last) {
+ mainp->ev_get = mainp->ev_first;
+ } else {
+ mainp->ev_get++;
+ }
+
+ /* Update event queue count */
+ mainp->ev_count--;
+
+ /* Get callback enntry and arg */
+ wkevent_handler = mainp->event_handler;
+ wkarg = mainp->arg;
+
+ /* Check event_handler address */
+ if (wkevent_handler != NULL) {
+ /* Check main status */
+ switch (mainp->status) {
+ case SCF_ST_ESTABLISHED:
+ /* Main status (C0) */
+ case SCF_ST_EST_FINI_WAIT:
+ /* Main status (C1) */
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Call event handler */
+ wkevent_handler(wkmevent, wkarg);
+
+ SC_DBG_DRV_TRACE(TC_MB_CALLBACK,
+ __LINE__, &wkmevent,
+ sizeof (wkmevent));
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP,
+ "DSCP callback mevent = %d",
+ wkmevent);
+
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+ break;
+
+ default:
+ /*
+ * Main status != C0 or C1 is NOP
+ */
+ break;
+ }
+ }
+ }
+ }
+
+ /* Check next event processing */
+ if (event_flag == FLAG_ON) {
+ event_flag = FLAG_OFF;
+ goto CALLBACK_START;
+ }
+
+ /* Clear callback entry exec flag */
+ scf_dscp_comtbl.callback_exec_flag = FLAG_OFF;
+
+/*
+ * END_dscp_callback
+ */
+ END_dscp_callback:
+
+ /* CALLBACK timer stop */
+ scf_timer_stop(SCF_TIMERCD_DSCP_CALLBACK);
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+
+/*
+ * Interrupt function : from scf_dscp_intr()
+ */
+
+/*
+ * scf_dscp_txack_recv()
+ *
+ * Description: TxACK reception processing is performed.
+ *
+ */
+/* ARGSUSED */
+void
+scf_dscp_txack_recv(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_txack_recv() "
+ scf_dscp_dsc_t *dsc_p; /* TxDSC address */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Check TxREQ send exec */
+ if (scf_dscp_comtbl.tx_exec_flag == FLAG_OFF) {
+ goto END_dscp_txack_recv;
+ }
+
+ /* Check local control data flag */
+ if (scf_dscp_comtbl.tx_local_use_flag == FLAG_OFF) {
+ /* Get TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_get];
+ } else {
+ /* Get local data TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_local];
+ }
+
+ /* Check TxDSC status */
+ if (dsc_p->status == SCF_TX_ST_TXACK_RECV_WAIT) {
+ /* TxDSC status (SC0) */
+ /* Error counter initialization */
+ scf_dscp_comtbl.tx_ackto_retry_cnt = 0;
+
+ /* TxACK timer stop */
+ scf_timer_stop(SCF_TIMERCD_DSCP_ACK);
+
+ /* TxEND timer start */
+ scf_timer_start(SCF_TIMERCD_DSCP_END);
+
+ /* Change TxDSC status (SC1) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_TX_ST_TXEND_RECV_WAIT);
+ }
+
+/*
+ * END_dscp_txack_recv
+ */
+ END_dscp_txack_recv:
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_txend_recv()
+ *
+ * Description: TxEND reception is received and processing is carried out by
+ * completion information.
+ *
+ */
+void
+scf_dscp_txend_recv(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_txend_recv() "
+ scf_dscp_main_t *mainp; /* Main table address */
+ scf_dscp_dsc_t *dsc_p; /* TxDSC address */
+ scf_dscreg_t wk_dsc; /* Work TxDSC */
+ /* Working value : TxDSC release check flag */
+ int norel_txdsc = FLAG_OFF;
+ /* Working value : SCF path change flag */
+ int path_change = FLAG_OFF;
+ int ii; /* Working value : counter */
+ uchar_t cmd;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Check TxREQ send exec */
+ if (scf_dscp_comtbl.tx_exec_flag == FLAG_OFF) {
+ goto END_dscp_txend_recv;
+ }
+
+ /* Check local control data flag */
+ if (scf_dscp_comtbl.tx_local_use_flag == FLAG_OFF) {
+ /* Get TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_get];
+ } else {
+ /* Get local data TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_local];
+ }
+
+ /* Save TxDSR status information in TxDSC */
+ wk_dsc.base.c_flag = statep->reg_txdsr_c_flag;
+ dsc_p->dinfo.bdsr.status = wk_dsc.bdsr.status;
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, "TxEND status = 0x%02x",
+ dsc_p->dinfo.bdsr.status);
+
+ /* Check TxREQ offset and TxEND offset */
+ if (dsc_p->dinfo.base.offset != statep->reg_txdsr_c_offset) {
+ goto END_dscp_txend_recv;
+ }
+
+ /* TxACK and TxEND timer stop */
+ scf_timer_stop(SCF_TIMERCD_DSCP_ACK);
+ scf_timer_stop(SCF_TIMERCD_DSCP_END);
+
+ /* Get main table address from "id" */
+ mainp = scf_dscp_id2mainp(dsc_p->dinfo.bdcr.id);
+
+ /*
+ * Check mainp address or local control data(DSCP_PATH)
+ */
+ if ((mainp == NULL) &&
+ (dsc_p->dinfo.bdcr.id != DSC_CNTL_LOCAL)) {
+ goto END_dscp_txend_recv;
+ }
+
+ cmd = (uchar_t)(dsc_p->dinfo.base.c_flag >> 8);
+
+ /* Check TxDSC status */
+ switch (dsc_p->status) {
+ case SCF_TX_ST_TXACK_RECV_WAIT:
+ /* TxDSC status (SC0) */
+ case SCF_TX_ST_TXEND_RECV_WAIT:
+ /* TxDSC status (SC1) */
+ /* Check TxREQ end status */
+ switch (dsc_p->dinfo.bdsr.status) {
+ case DSC_STATUS_NORMAL: /* Normal end */
+ /* Error counter initialization */
+ scf_dscp_comtbl.tx_ackto_retry_cnt = 0;
+ scf_dscp_comtbl.tx_endto_retry_cnt = 0;
+ scf_dscp_comtbl.tx_busy_retry_cnt = 0;
+ scf_dscp_comtbl.tx_interface_retry_cnt = 0;
+ scf_dscp_comtbl.tx_nak_retry_cnt = 0;
+ scf_dscp_comtbl.tx_notsuop_retry_cnt = 0;
+ scf_dscp_comtbl.tx_prmerr_retry_cnt = 0;
+ scf_dscp_comtbl.tx_seqerr_retry_cnt = 0;
+ scf_dscp_comtbl.tx_other_retry_cnt = 0;
+ scf_dscp_comtbl.tx_send_retry_cnt = 0;
+
+ /* Check local control data(DSCP_PATH) */
+ if (dsc_p->dinfo.bdcr.id != DSC_CNTL_LOCAL) {
+ /* TxEND notice to main matrix */
+ scf_dscp_txend_notice(mainp);
+ }
+ break;
+
+ case DSC_STATUS_BUF_BUSY: /* Buffer busy */
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__,
+ &dsc_p->dinfo.base.c_flag, TC_INFO_SIZE);
+
+ /* memo counter up */
+ scf_dscp_comtbl.tx_busy_memo_cnt++;
+
+ /* Check re-try counter */
+ if ((scf_dscp_comtbl.tx_busy_retry_cnt <
+ scf_dscp_comtbl.tx_busy_maxretry_cnt) &&
+ (scf_dscp_comtbl.tx_send_retry_cnt <
+ scf_dscp_comtbl.tx_send_maxretry_cnt)) {
+ /* re-try count up */
+ scf_dscp_comtbl.tx_busy_retry_cnt++;
+ scf_dscp_comtbl.tx_send_retry_cnt++;
+
+ /* TxREQ busy timer start */
+ scf_timer_start(SCF_TIMERCD_DSCP_BUSY);
+
+ /* Change TxDSC status (SB2) */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_TX_ST_TXREQ_SEND_WAIT);
+
+ /* TxDSC not release */
+ norel_txdsc = FLAG_ON;
+ } else {
+ /* Buffer busy end re-try error */
+ cmn_err(CE_WARN,
+ "%s,Buffer busy occurred in XSCF. "
+ "DSCP command = 0x%02x\n",
+ &statep->pathname[0], cmd);
+
+ /* Check local control data(DSCP_PATH) */
+ if (dsc_p->dinfo.bdcr.id != DSC_CNTL_LOCAL) {
+ /* TxEND notice to main matrix */
+ scf_dscp_txend_notice(mainp);
+ } else {
+ /* DSCP path change send flag ON */
+ scf_dscp_comtbl.dscp_path_flag =
+ FLAG_ON;
+ }
+ }
+ break;
+
+ case DSC_STATUS_INTERFACE: /* Interface error */
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__,
+ &dsc_p->dinfo.base.c_flag, TC_INFO_SIZE);
+
+ /* memo counter up */
+ scf_dscp_comtbl.tx_interface_memo_cnt++;
+
+ /* Check re-try counter */
+ if ((scf_dscp_comtbl.tx_interface_retry_cnt <
+ scf_dscp_comtbl.tx_interface_maxretry_cnt) &&
+ (scf_dscp_comtbl.tx_send_retry_cnt <
+ scf_dscp_comtbl.tx_send_maxretry_cnt)) {
+ /* re-try count up */
+ scf_dscp_comtbl.tx_interface_retry_cnt++;
+ scf_dscp_comtbl.tx_send_retry_cnt++;
+
+ /* Change TxDSC status (SB2) */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_TX_ST_TXREQ_SEND_WAIT);
+
+ /* TxDSC not release */
+ norel_txdsc = FLAG_ON;
+ } else {
+ /* Interface error end re-try error */
+ cmn_err(CE_WARN,
+ "%s,Detected the interface error by "
+ "XSCF. DSCP command = 0x%02x\n",
+ &statep->pathname[0], cmd);
+
+ /* Set hard error flag */
+ statep->scf_herr |= HERR_DSCP_INTERFACE;
+
+ /* SCF path change flag ON */
+ path_change = FLAG_ON;
+ }
+ break;
+
+ case DSC_STATUS_CONN_NAK: /* Connection refusal */
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__,
+ &dsc_p->dinfo.base.c_flag, TC_INFO_SIZE);
+
+ /* memo counter up */
+ scf_dscp_comtbl.tx_nak_memo_cnt++;
+
+ /* Check re-try counter */
+ if ((scf_dscp_comtbl.tx_nak_retry_cnt <
+ scf_dscp_comtbl.tx_nak_maxretry_cnt) &&
+ (scf_dscp_comtbl.tx_send_retry_cnt <
+ scf_dscp_comtbl.tx_send_maxretry_cnt)) {
+ /* re-try count up */
+ scf_dscp_comtbl.tx_nak_retry_cnt++;
+ scf_dscp_comtbl.tx_send_retry_cnt++;
+
+ /* Change TxDSC status (SB2) */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_TX_ST_TXREQ_SEND_WAIT);
+
+ /* TxDSC not release */
+ norel_txdsc = FLAG_ON;
+ } else {
+ /* Connection refusal end re-try error */
+
+ /* Check local control data(DSCP_PATH) */
+ if (dsc_p->dinfo.bdcr.id != DSC_CNTL_LOCAL) {
+ /* TxEND notice to main matrix */
+ scf_dscp_txend_notice(mainp);
+ } else {
+ /* Set hard error flag */
+ statep->scf_herr |= HERR_DSCP_INTERFACE;
+
+ /* SCF path change flag ON */
+ path_change = FLAG_ON;
+ }
+ }
+ break;
+
+ case DSC_STATUS_E_NOT_SUPPORT: /* Not support */
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__,
+ &dsc_p->dinfo.base.c_flag, TC_INFO_SIZE);
+
+ /* memo counter up */
+ scf_dscp_comtbl.tx_notsuop_memo_cnt++;
+
+ /* Check re-try counter */
+ if ((scf_dscp_comtbl.tx_notsuop_retry_cnt <
+ scf_dscp_comtbl.tx_notsup_maxretry_cnt) &&
+ (scf_dscp_comtbl.tx_send_retry_cnt <
+ scf_dscp_comtbl.tx_send_maxretry_cnt)) {
+ /* re-try count up */
+ scf_dscp_comtbl.tx_notsuop_retry_cnt++;
+ scf_dscp_comtbl.tx_send_retry_cnt++;
+
+ /* Change TxDSC status (SB2) */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_TX_ST_TXREQ_SEND_WAIT);
+
+ /* TxDSC not release */
+ norel_txdsc = FLAG_ON;
+ } else {
+ /* Not support end re-try error */
+ cmn_err(CE_WARN,
+ "%s,Detected the not support command "
+ "by XSCF. DSCP command = 0x%02x\n",
+ &statep->pathname[0], cmd);
+
+ /* Check local control data(DSCP_PATH) */
+ if (dsc_p->dinfo.bdcr.id != DSC_CNTL_LOCAL) {
+ /* TxEND notice to main matrix */
+ scf_dscp_txend_notice(mainp);
+ } else {
+ /* DSCP path change send flag ON */
+ scf_dscp_comtbl.dscp_path_flag =
+ FLAG_ON;
+ }
+ }
+ break;
+
+ case DSC_STATUS_E_PARAM: /* Parameter error */
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__,
+ &dsc_p->dinfo.base.c_flag, TC_INFO_SIZE);
+
+ /* memo counter up */
+ scf_dscp_comtbl.tx_prmerr_memo_cnt++;
+
+ /* Check re-try counter */
+ if ((scf_dscp_comtbl.tx_prmerr_retry_cnt <
+ scf_dscp_comtbl.tx_prmerr_maxretry_cnt) &&
+ (scf_dscp_comtbl.tx_send_retry_cnt <
+ scf_dscp_comtbl.tx_send_maxretry_cnt)) {
+ /* re-try count up */
+ scf_dscp_comtbl.tx_prmerr_retry_cnt++;
+ scf_dscp_comtbl.tx_send_retry_cnt++;
+
+ /* Change TxDSC status (SB2) */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_TX_ST_TXREQ_SEND_WAIT);
+
+ /* TxDSC not release */
+ norel_txdsc = FLAG_ON;
+ } else {
+ /* Parameter error end re-try error */
+ cmn_err(CE_WARN,
+ "%s,Detected the invalid parameter by "
+ "XSCF. DSCP command = 0x%02x\n",
+ &statep->pathname[0], cmd);
+
+ /* Check local control data(DSCP_PATH) */
+ if (dsc_p->dinfo.bdcr.id != DSC_CNTL_LOCAL) {
+ /* TxEND notice to main matrix */
+ scf_dscp_txend_notice(mainp);
+ } else {
+ /* DSCP path change send flag ON */
+ scf_dscp_comtbl.dscp_path_flag =
+ FLAG_ON;
+ }
+ }
+ break;
+
+ case DSC_STATUS_E_SEQUENCE: /* Sequence error */
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__,
+ &dsc_p->dinfo.base.c_flag, TC_INFO_SIZE);
+
+ /* memo counter up */
+ scf_dscp_comtbl.tx_seqerr_memo_cnt++;
+
+ /* Check re-try counter */
+ if ((scf_dscp_comtbl.tx_seqerr_retry_cnt <
+ scf_dscp_comtbl.tx_seqerr_maxretry_cnt) &&
+ (scf_dscp_comtbl.tx_send_retry_cnt <
+ scf_dscp_comtbl.tx_send_maxretry_cnt)) {
+ /* re-try count up */
+ scf_dscp_comtbl.tx_seqerr_retry_cnt++;
+ scf_dscp_comtbl.tx_send_retry_cnt++;
+
+ /* Change TxDSC status (SB2) */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_TX_ST_TXREQ_SEND_WAIT);
+
+ /* TxDSC not release */
+ norel_txdsc = FLAG_ON;
+ } else {
+ /* Sequence error end re-try error */
+ cmn_err(CE_WARN,
+ "%s,Detected the sequence error by "
+ "XSCF. DSCP command = 0x%02x\n",
+ &statep->pathname[0], cmd);
+
+ /* Check local control data(DSCP_PATH) */
+ if (dsc_p->dinfo.bdcr.id != DSC_CNTL_LOCAL) {
+ /* TxEND notice to main matrix */
+ scf_dscp_txend_notice(mainp);
+ } else {
+ /* DSCP path change send flag ON */
+ scf_dscp_comtbl.dscp_path_flag =
+ FLAG_ON;
+ }
+ }
+ break;
+
+ default: /* Other status */
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__,
+ &dsc_p->dinfo.base.c_flag, TC_INFO_SIZE);
+
+ /* memo counter up */
+ scf_dscp_comtbl.tx_other_memo_cnt++;
+
+ /* Check re-try counter */
+ if ((scf_dscp_comtbl.tx_other_retry_cnt <
+ scf_dscp_comtbl.tx_other_maxretry_cnt) &&
+ (scf_dscp_comtbl.tx_send_retry_cnt <
+ scf_dscp_comtbl.tx_send_maxretry_cnt)) {
+ /* re-try count up */
+ scf_dscp_comtbl.tx_other_retry_cnt++;
+ scf_dscp_comtbl.tx_send_retry_cnt++;
+
+ /* Change TxDSC status (SB2) */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_TX_ST_TXREQ_SEND_WAIT);
+
+ /* TxDSC not release */
+ norel_txdsc = FLAG_ON;
+ } else {
+ /* Other error end re-try error */
+ cmn_err(CE_WARN,
+ "%s,Invalid status value was notified "
+ "from XSCF. DSCP command = 0x%02x, "
+ "Status value = 0x%02x\n",
+ &statep->pathname[0], cmd,
+ (uchar_t)
+ dsc_p->dinfo.base.c_flag);
+
+ /* Check local control data(DSCP_PATH) */
+ if (dsc_p->dinfo.bdcr.id != DSC_CNTL_LOCAL) {
+ /* TxEND notice to main matrix */
+ scf_dscp_txend_notice(mainp);
+ } else {
+ /* DSCP path change send flag ON */
+ scf_dscp_comtbl.dscp_path_flag =
+ FLAG_ON;
+ }
+ }
+ break;
+ }
+ break;
+
+ default:
+ /* TxDSC status != SC0 or SC1 is NOP */
+ break;
+ }
+
+ /* Check TxDSC not release */
+ if (norel_txdsc == FLAG_OFF) {
+ /* Check send data */
+ if (dsc_p->dinfo.base.dscp_datap != NULL) {
+ /* Send data release */
+ kmem_free(dsc_p->dinfo.base.dscp_datap,
+ dsc_p->dinfo.base.length);
+ dsc_p->dinfo.base.dscp_datap = NULL;
+ }
+
+ /* Check SRAM data */
+ if (dsc_p->dinfo.base.offset != DSC_OFFSET_NOTHING) {
+ /* Send SRAM data release */
+ scf_dscp_sram_free(dsc_p->dinfo.base.offset);
+ dsc_p->dinfo.base.offset = DSC_OFFSET_NOTHING;
+ }
+
+ /* Change TxDSC status (SA0) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_TX_ST_IDLE);
+
+ /* Check use local control TxDSC flag */
+ if (scf_dscp_comtbl.tx_local_use_flag == FLAG_OFF) {
+ /* Update Tx descriptor offset */
+ if (scf_dscp_comtbl.tx_get == scf_dscp_comtbl.tx_last) {
+ scf_dscp_comtbl.tx_get =
+ scf_dscp_comtbl.tx_first;
+ } else {
+ scf_dscp_comtbl.tx_get++;
+ }
+
+ /* Update Tx descriptor count */
+ scf_dscp_comtbl.tx_dsc_count--;
+
+ /* Get Top main table address */
+ mainp = &scf_dscp_comtbl.scf_dscp_main[0];
+ /* Check main table */
+ for (ii = 0; ii < MBIF_MAX; ii++, mainp++) {
+ /* Check putmsg busy release */
+ if ((mainp->putmsg_busy_flag == FLAG_ON) &&
+ (scf_dscp_comtbl.tx_dsc_count <
+ scf_dscp_comtbl.txdsc_busycount)) {
+ /* putmsg busy flag OFF */
+ mainp->putmsg_busy_flag = FLAG_OFF;
+
+ /* TxREL_BUSY notice to main matrix */
+ scf_dscp_txrelbusy_notice(mainp);
+ }
+ }
+ } else {
+ /* Initialize use local control TxDSC flag */
+ scf_dscp_comtbl.tx_local_use_flag = FLAG_OFF;
+
+ /* DSCP path change send flag OFF */
+ scf_dscp_comtbl.dscp_path_flag = FLAG_OFF;
+ }
+ }
+ /* TxREQ send exec flag OFF */
+ scf_dscp_comtbl.tx_exec_flag = FLAG_OFF;
+
+ /* Check SCF path change flag */
+ if (path_change == FLAG_OFF) {
+ /* Call send matrix */
+ scf_dscp_send_matrix();
+ } else {
+ /* SCF path change */
+ scf_path_change(statep);
+ }
+
+/*
+ * END_dscp_txend_recv
+ */
+ END_dscp_txend_recv:
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_rxreq_recv()
+ *
+ * Description: TxREQ reception notifies to a main control matrix.
+ *
+ */
+void
+scf_dscp_rxreq_recv(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_rxreq_recv() "
+ scf_dscp_main_t *mainp; /* Main table address */
+ scf_dscp_dsc_t *dsc_p; /* RxDSC address */
+ uint16_t offset_low; /* Working value : offset */
+ uint16_t offset_hight; /* Working value : offset */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Check pending RxDSC */
+ if (scf_dscp_comtbl.rx_dsc_count == 0) {
+ /* Get RxDSC address */
+ dsc_p = &scf_dscp_comtbl.rx_dscp[scf_dscp_comtbl.rx_put];
+
+ /* Save RxDCR information in RxDSC */
+ dsc_p->dinfo.base.c_flag = statep->reg_rxdcr_c_flag;
+ dsc_p->dinfo.base.offset = statep->reg_rxdcr_c_offset;
+ dsc_p->dinfo.base.length = statep->reg_rxdcr_c_length;
+ dsc_p->dinfo.bdsr.status = DSC_STATUS_NORMAL;
+ dsc_p->dinfo.base.dscp_datap = NULL;
+
+ /* Update Rx descriptor offset */
+ if (scf_dscp_comtbl.rx_put == scf_dscp_comtbl.rx_last) {
+ scf_dscp_comtbl.rx_put = scf_dscp_comtbl.rx_first;
+ } else {
+ scf_dscp_comtbl.rx_put++;
+ }
+
+ /* Update Rx descriptor count */
+ scf_dscp_comtbl.rx_dsc_count++;
+
+ /* RxREQ receive exec flag ON */
+ scf_dscp_comtbl.rx_exec_flag = FLAG_ON;
+
+ /* Get main table address from "id" */
+ mainp = scf_dscp_id2mainp(dsc_p->dinfo.bdcr.id);
+
+ offset_low = (uint16_t)(scf_dscp_comtbl.txbuffsize *
+ scf_dscp_comtbl.txsram_maxcount / DSC_OFFSET_CONVERT);
+
+ SCF_DBG_MAKE_LOOPBACK(offset_low);
+
+ offset_hight =
+ (uint16_t)(offset_low + scf_dscp_comtbl.rxbuffsize *
+ scf_dscp_comtbl.rxsram_maxcount / DSC_OFFSET_CONVERT);
+
+ /* Check mainp address and offset */
+ if ((mainp != NULL) &&
+ (((dsc_p->dinfo.base.offset >= offset_low) &&
+ (dsc_p->dinfo.base.offset < offset_hight)) ||
+ (dsc_p->dinfo.base.offset == DSC_OFFSET_NOTHING))) {
+ /* RxREQ notice to main matrix */
+ scf_dscp_rxreq_notice(mainp);
+ } else {
+ /* Invalid "id" or "offset" */
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__,
+ &dsc_p->dinfo.base.c_flag, TC_INFO_SIZE);
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, "Invalid id or offset");
+
+ /* Set end status : Parameter error */
+ dsc_p->dinfo.bdsr.status = DSC_STATUS_E_PARAM;
+
+ /* Change RxDSC status (RB3) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_RXEND_SEND_WAIT);
+
+ /* Call receive matrix */
+ scf_dscp_recv_matrix();
+ }
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * Main and Tx/Rx interface function
+ */
+
+/*
+ * scf_dscp_txend_notice()
+ *
+ * Description: The TxEND reception is notified of by Tx matrix and handle it
+ * with data code.
+ *
+ */
+void
+scf_dscp_txend_notice(scf_dscp_main_t *mainp)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_txend_notice() "
+ scf_dscp_dsc_t *dsc_p; /* TxDSC address */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Check local control data flag */
+ if (scf_dscp_comtbl.tx_local_use_flag == FLAG_OFF) {
+ /* Get TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_get];
+ } else {
+ /* Get local data TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_local];
+ }
+
+ /* TxREQ code check */
+ switch (dsc_p->dinfo.bdcr.code) {
+ case DSC_CNTL_INIT_REQ: /* INIT_REQ */
+ /* Check main status */
+ if (mainp->status == SCF_ST_EST_TXEND_RECV_WAIT) {
+ /* Main status (B0) */
+ /* Check end status */
+ if (dsc_p->dinfo.bdsr.status == DSC_STATUS_NORMAL) {
+ /* SCF_MB_CONN_OK event queuing */
+ scf_dscp_event_queue(mainp, SCF_MB_CONN_OK);
+
+ /* Change main status (C0) */
+ SCF_SET_STATUS(mainp, SCF_ST_ESTABLISHED);
+ } else {
+ /* Not normal end status */
+
+ /* SCF_MB_DISC_ERROR event queuing */
+ scf_dscp_event_queue(mainp, SCF_MB_DISC_ERROR);
+
+ /* Change main status (C1) */
+ SCF_SET_STATUS(mainp, SCF_ST_EST_FINI_WAIT);
+ }
+ }
+ break;
+
+ case DSC_CNTL_FINI_REQ: /* FINI_REQ */
+ /* Check main status */
+ if (mainp->status == SCF_ST_CLOSE_TXEND_RECV_WAIT) {
+ /* Main status (D0) */
+ /* Signal to fini() wait */
+ mainp->fini_wait_flag = FLAG_OFF;
+ cv_signal(&mainp->fini_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__, &mainp->fini_cv,
+ sizeof (kcondvar_t));
+ }
+ break;
+
+ case DSC_CNTL_CONN_CHK: /* CONN_CHK */
+ /* Check main status */
+ switch (mainp->status) {
+ case SCF_ST_EST_TXEND_RECV_WAIT: /* Main status (B0) */
+ case SCF_ST_ESTABLISHED: /* Main status (C0) */
+ /* CONN_CHK flag OFF */
+ mainp->conn_chk_flag = FLAG_OFF;
+ /* Check end status */
+ if (dsc_p->dinfo.bdsr.status != DSC_STATUS_NORMAL) {
+ /* SCF_MB_DISC_ERROR event queuing */
+ scf_dscp_event_queue(mainp, SCF_MB_DISC_ERROR);
+
+ /* Change main status (C1) */
+ SCF_SET_STATUS(mainp, SCF_ST_EST_FINI_WAIT);
+ }
+ break;
+
+ default:
+ /* Main status != B0 or C0 is NOP */
+ break;
+ }
+ break;
+
+ case DSC_CNTL_DATA_REQ: /* DATA_REQ */
+ /* Tx DATA_REQ ok counter up */
+ mainp->memo_tx_data_req_ok_cnt++;
+ break;
+
+ default:
+ /* Undefine TxREQ code is NOP */
+ break;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_txrelbusy_notice()
+ *
+ * Description: Tx busy release is notified of by Tx matrix and perform event
+ * queue processing.
+ *
+ */
+void
+scf_dscp_txrelbusy_notice(scf_dscp_main_t *mainp)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_txrelbusy_notice() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Check main status */
+ if (mainp->status == SCF_ST_ESTABLISHED) { /* Main status (C0) */
+ /* SCF_MB_SPACE event queuing */
+ scf_dscp_event_queue(mainp, SCF_MB_SPACE);
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_rxreq_notice()
+ *
+ * Description: The RxREQ reception is notified of by Rx matrix and handle it
+ * with data code.
+ *
+ */
+void
+scf_dscp_rxreq_notice(scf_dscp_main_t *mainp)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_rxreq_notice() "
+ scf_dscp_dsc_t *dsc_p; /* RxDSC address */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Get RxDSC address */
+ dsc_p = &scf_dscp_comtbl.rx_dscp[scf_dscp_comtbl.rx_get];
+
+ /* RxREQ code check */
+ switch (dsc_p->dinfo.bdcr.code) {
+ case DSC_CNTL_INIT_REQ: /* INIT_REQ */
+ /* Set end status : Not support */
+ dsc_p->dinfo.bdsr.status = DSC_STATUS_E_NOT_SUPPORT;
+
+ /* Change RxDSC status (RB3) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_RXEND_SEND_WAIT);
+
+ /* Call receive matrix */
+ scf_dscp_recv_matrix();
+ break;
+
+ case DSC_CNTL_FINI_REQ: /* FINI_REQ */
+ /* Check main status */
+ switch (mainp->status) {
+ case SCF_ST_EST_TXEND_RECV_WAIT: /* Main status (B0) */
+ case SCF_ST_ESTABLISHED: /* Main status (C0) */
+ /* SCF_MB_DISC_ERROR event queuing */
+ scf_dscp_event_queue(mainp, SCF_MB_DISC_ERROR);
+
+ /* Change main status (C1) */
+ SCF_SET_STATUS(mainp, SCF_ST_EST_FINI_WAIT);
+ break;
+
+ default:
+ /* Main status != B0 or C0 is NOP */
+ break;
+ }
+
+ /* Set end status : Normal end */
+ dsc_p->dinfo.bdsr.status = DSC_STATUS_NORMAL;
+
+ /* Change RxDSC status (RB3) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_RXEND_SEND_WAIT);
+
+ /* Call receive matrix */
+ scf_dscp_recv_matrix();
+ break;
+
+ case DSC_CNTL_DATA_REQ: /* DATA_REQ */
+ /* Rx DATA_REQ counter up */
+ mainp->memo_rx_data_req_cnt++;
+
+ /* Check receive data length */
+ if (dsc_p->dinfo.base.length <= scf_dscp_comtbl.maxdatalen) {
+ /* Check receive data queue space */
+ if (mainp->rd_count < mainp->rd_busycount) {
+ /* Set end status : Normal end */
+ dsc_p->dinfo.bdsr.status = DSC_STATUS_NORMAL;
+
+ /* Check main status */
+ if (mainp->status == SCF_ST_ESTABLISHED) {
+ /* Main status (C0) */
+ /* Change RxDSC status (RB0) */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_RX_ST_RXACK_SEND_WAIT);
+ } else {
+ /* Change RxDSC status (RB3) */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_RX_ST_RXEND_SEND_WAIT);
+ }
+ } else {
+ /* No space of receive data queue */
+
+ /* Set end status : Buffer busy */
+ dsc_p->dinfo.bdsr.status = DSC_STATUS_BUF_BUSY;
+
+ /* Change RxDSC status (RB3) */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_RX_ST_RXEND_SEND_WAIT);
+ }
+ } else {
+ /* Invalid deta length */
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__,
+ &dsc_p->dinfo.base.length,
+ sizeof (dsc_p->dinfo.base.length));
+
+ /* Set end status : Parameter error */
+ dsc_p->dinfo.bdsr.status = DSC_STATUS_E_PARAM;
+
+ /* Change RxDSC status (RB3) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_RXEND_SEND_WAIT);
+ }
+
+ /* Call receive matrix */
+ scf_dscp_recv_matrix();
+ break;
+
+ case DSC_CNTL_CONN_CHK: /* CONN_CHK */
+ /* Check main status */
+ if (mainp->status == SCF_ST_ESTABLISHED) {
+ /* Main status (C0) */
+ /* Set end status : Normal end */
+ dsc_p->dinfo.bdsr.status = DSC_STATUS_NORMAL;
+ } else {
+ /* Set end status : Connection refusal */
+ dsc_p->dinfo.bdsr.status = DSC_STATUS_CONN_NAK;
+ }
+
+ /* Change RxDSC status (RB3) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_RXEND_SEND_WAIT);
+
+ /* Call receive matrix */
+ scf_dscp_recv_matrix();
+ break;
+
+ default:
+ /* Invalid RxREQ code */
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__, &dsc_p->dinfo.base.c_flag,
+ TC_INFO_SIZE);
+
+ /* Set end status : Parameter error */
+ dsc_p->dinfo.bdsr.status = DSC_STATUS_E_PARAM;
+
+ /* Change RxDSC status (RB3) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_RXEND_SEND_WAIT);
+
+ /* Call receive matrix */
+ scf_dscp_recv_matrix();
+ break;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_rxdata_notice()
+ *
+ * Description: It is notified from a Rx control matrix, the received data are
+ * read from SRAM,
+ * and the notice of a receive data event is performed.
+ *
+ */
+void
+scf_dscp_rxdata_notice(scf_dscp_main_t *mainp)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_rxdata_notice() "
+ scf_dscp_dsc_t *dsc_p; /* RxDSC address */
+ scf_rdata_que_t *rdt_p; /* Receive data queue address */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Get RxDSC address */
+ dsc_p = &scf_dscp_comtbl.rx_dscp[scf_dscp_comtbl.rx_get];
+
+ /* Check main status */
+ switch (mainp->status) {
+ case SCF_ST_ESTABLISHED: /* Main status (C0) */
+ case SCF_ST_EST_FINI_WAIT: /* Main status (C1) */
+ /* Check receive data queue space */
+ if (mainp->rd_count < mainp->rd_busycount) {
+ /* Receive data queing */
+ rdt_p = &mainp->rd_datap[mainp->rd_put];
+ rdt_p->rdatap = dsc_p->dinfo.base.dscp_datap;
+ dsc_p->dinfo.base.dscp_datap = NULL;
+ rdt_p->length = dsc_p->dinfo.base.length;
+
+ /* Update receive data queue offset */
+ if (mainp->rd_put == mainp->rd_last) {
+ mainp->rd_put = mainp->rd_first;
+ } else {
+ mainp->rd_put++;
+ }
+
+ /* Update receive data queue count */
+ mainp->rd_count++;
+
+ /* SCF_MB_MSG_DATA event queuing */
+ scf_dscp_event_queue(mainp, SCF_MB_MSG_DATA);
+
+ /* Rx DATA_REQ ok counter up */
+ mainp->memo_rx_data_req_ok_cnt++;
+ } else {
+ /* No space of receive data queue */
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__, &mainp->rd_count,
+ sizeof (mainp->rd_count));
+ SCFDBGMSG(SCF_DBGFLAG_DSCP,
+ "No space of receive data queue");
+
+ /* Check receive data */
+ if (dsc_p->dinfo.base.dscp_datap != NULL) {
+ /* Receive data release */
+ kmem_free(dsc_p->dinfo.base.dscp_datap,
+ dsc_p->dinfo.base.length);
+ dsc_p->dinfo.base.dscp_datap = NULL;
+ }
+
+ /* Set end status : Buffer busy */
+ dsc_p->dinfo.bdsr.status = DSC_STATUS_BUF_BUSY;
+
+ /* Change RxDSC status (RB3) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_RXEND_SEND_WAIT);
+ }
+ break;
+
+ case SCF_ST_CLOSE_TXEND_RECV_WAIT: /* Main status (D0) */
+ /* Check receive data */
+ if (dsc_p->dinfo.base.dscp_datap != NULL) {
+ /* Receive data release */
+ kmem_free(dsc_p->dinfo.base.dscp_datap,
+ dsc_p->dinfo.base.length);
+ dsc_p->dinfo.base.dscp_datap = NULL;
+ }
+
+ /* Set end status : Normal end */
+ dsc_p->dinfo.bdsr.status = DSC_STATUS_NORMAL;
+
+ /* Change RxDSC status (RB3) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_RXEND_SEND_WAIT);
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__, &mainp->status,
+ TC_INFO_SIZE);
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, "Sequence error");
+
+ /* Check receive data */
+ if (dsc_p->dinfo.base.dscp_datap != NULL) {
+ /* Receive data release */
+ kmem_free(dsc_p->dinfo.base.dscp_datap,
+ dsc_p->dinfo.base.length);
+ dsc_p->dinfo.base.dscp_datap = NULL;
+ }
+
+ /* Set end status : Sequence error */
+ dsc_p->dinfo.bdsr.status = DSC_STATUS_E_SEQUENCE;
+
+ /* Change RxDSC status (RB3) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_RXEND_SEND_WAIT);
+ break;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * Tx subroutine function
+ */
+
+/*
+ * scf_dscp_send_matrix()
+ *
+ * Description: The Request to Send by a Tx descriptor state is performed.
+ *
+ */
+void
+scf_dscp_send_matrix(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_send_matrix() "
+ scf_dscp_main_t *mainp; /* Main table address */
+ scf_dscp_dsc_t *dsc_p; /* TxDSC address */
+ scf_state_t *statep; /* Soft state pointer */
+ uint8_t *wk_in_p; /* Working value : input address */
+ uint8_t *wk_out_p; /* Working value : output address */
+ /* Working value : next processing check flag */
+ int next_send_req = FLAG_OFF;
+ int path_ret; /* SCF path status return value */
+ int timer_ret; /* Timer check return value */
+ int ii; /* Working value : counter */
+ uint16_t tx_local = scf_dscp_comtbl.tx_local;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ SCF_DBG_MAKE_NO_DSCP_PATH(scf_dscp_comtbl.dscp_path_flag);
+
+/*
+ * SEND_MATRIX_START
+ */
+ SEND_MATRIX_START:
+
+ /* Check use local control TxDSC send */
+ if ((scf_dscp_comtbl.tx_local_use_flag == FLAG_OFF) &&
+ /* Check DSCP path change data send */
+ (scf_dscp_comtbl.dscp_path_flag == FLAG_ON)) {
+ /* Set use local control TxDSC flag */
+ scf_dscp_comtbl.tx_local_use_flag = FLAG_ON;
+
+ /* Get local data TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[tx_local];
+
+ /* Make Tx descriptor : DSCP_PATH */
+ dsc_p->dinfo.base.c_flag = DSC_FLAG_DEFAULT;
+ dsc_p->dinfo.base.offset = DSC_OFFSET_NOTHING;
+ dsc_p->dinfo.base.length = 0;
+ dsc_p->dinfo.base.dscp_datap = NULL;
+ dsc_p->dinfo.bdcr.id = DSC_CNTL_LOCAL;
+ dsc_p->dinfo.bdcr.code = DSC_CNTL_DSCP_PATH;
+
+ /* Change TxDSC status (SB2) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_TX_ST_TXREQ_SEND_WAIT);
+ } else if ((scf_dscp_comtbl.tx_local_use_flag == FLAG_OFF) &&
+ (scf_dscp_comtbl.dscp_path_flag == FLAG_OFF)) {
+ /* Initialize use local control TxDSC flag */
+ scf_dscp_comtbl.tx_local_use_flag = FLAG_OFF;
+
+ /* Get TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[scf_dscp_comtbl.tx_get];
+
+ /* Get top main table address */
+ mainp = &scf_dscp_comtbl.scf_dscp_main[0];
+ for (ii = 0; ii < MBIF_MAX; ii++, mainp++) {
+ /*
+ * Check DSCP connect data send and not local
+ * control TxDSC send
+ */
+ if ((mainp->conn_chk_flag == FLAG_OFF) ||
+ (scf_dscp_comtbl.tx_local_use_flag ==
+ FLAG_ON)) {
+ break;
+ }
+ /* Check main status */
+ switch (mainp->status) {
+ case SCF_ST_EST_TXEND_RECV_WAIT:
+ /* Main status (B0) */
+ case SCF_ST_ESTABLISHED:
+ /* Main status (C0) */
+ /*
+ * Set use local control TxDSC flag
+ */
+ scf_dscp_comtbl.tx_local_use_flag = FLAG_ON;
+
+ /*
+ * Get local data TxDSC address
+ */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[tx_local];
+
+ /*
+ * Make Tx descriptor : CONN_CHK
+ */
+ dsc_p->dinfo.base.c_flag = DSC_FLAG_DEFAULT;
+ dsc_p->dinfo.base.offset = DSC_OFFSET_NOTHING;
+ dsc_p->dinfo.base.length = 0;
+ dsc_p->dinfo.base.dscp_datap = NULL;
+ dsc_p->dinfo.bdcr.id = mainp->id & 0x0f;
+ dsc_p->dinfo.bdcr.code = DSC_CNTL_CONN_CHK;
+
+ /* Change TxDSC status (SB2) */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_TX_ST_TXREQ_SEND_WAIT);
+ break;
+
+ default:
+ /*
+ * Clear DSCP connect check flag
+ */
+ mainp->conn_chk_flag = FLAG_OFF;
+ break;
+ }
+ }
+ } else {
+ /* Get local data TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[tx_local];
+ }
+
+ /* Check pending send TxDSC or local control TxDSC */
+ if ((scf_dscp_comtbl.tx_dsc_count == 0) &&
+ (scf_dscp_comtbl.tx_local_use_flag == FLAG_OFF)) {
+ goto END_dscp_send_matrix;
+ }
+
+ /* Get SCF path status */
+ path_ret = scf_path_check(&statep);
+ /* Check TxDSC status */
+ switch (dsc_p->status) {
+ case SCF_TX_ST_IDLE: /* TxDSC status (SA0) */
+ /* TxDSC status == SA0 is next processing */
+ if (scf_dscp_comtbl.tx_local_use_flag == FLAG_OFF) {
+ /* Update Tx descriptor offset */
+ if (scf_dscp_comtbl.tx_get == scf_dscp_comtbl.tx_last) {
+ scf_dscp_comtbl.tx_get =
+ scf_dscp_comtbl.tx_first;
+ } else {
+ scf_dscp_comtbl.tx_get++;
+ }
+
+ /* Update Tx descriptor count */
+ scf_dscp_comtbl.tx_dsc_count--;
+ } else {
+ /* Initialize use local control TxDSC flag */
+ scf_dscp_comtbl.tx_local_use_flag = FLAG_OFF;
+ }
+
+ /* Next processing flag ON */
+ next_send_req = FLAG_ON;
+ break;
+
+ case SCF_TX_ST_SRAM_TRANS_WAIT: /* TxDSC status (SB0) */
+ /* Check SCF path status */
+ if (path_ret != SCF_PATH_ONLINE) {
+ break;
+ }
+ /* Data copy to SRAM */
+ ii = dsc_p->dinfo.base.offset * DSC_OFFSET_CONVERT;
+ wk_in_p = (uint8_t *)dsc_p->dinfo.base.dscp_datap;
+ wk_out_p = (uint8_t *)&statep->scf_dscp_sram->DATA[ii];
+ for (ii = 0; ii < dsc_p->dinfo.base.length;
+ ii++, wk_in_p++, wk_out_p++) {
+ SCF_DDI_PUT8(statep, statep->scf_dscp_sram_handle,
+ wk_out_p, *wk_in_p);
+ }
+
+ /* Change TxDSC status (SB2) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_TX_ST_TXREQ_SEND_WAIT);
+
+ /* Next processing flag ON */
+ next_send_req = FLAG_ON;
+ break;
+
+ case SCF_TX_ST_TXREQ_SEND_WAIT: /* TxDSC status (SB2) */
+ /* Get timer status */
+ timer_ret = scf_timer_check(SCF_TIMERCD_DSCP_BUSY);
+ /* Check TxREQ busy timer exec */
+ if (timer_ret == SCF_TIMER_EXEC) {
+ break;
+ }
+ /* Check SCF path status */
+ if (path_ret != SCF_PATH_ONLINE) {
+ break;
+ }
+ /* Check TxREQ send exec */
+ if (scf_dscp_comtbl.tx_exec_flag == FLAG_OFF) {
+ /* TxREQ send */
+ scf_dscp_txreq_send(statep, dsc_p);
+
+ /* Check send data length */
+ if (dsc_p->dinfo.base.length != 0) {
+ /* TxACK timer start */
+ scf_timer_start(SCF_TIMERCD_DSCP_ACK);
+
+ /*
+ * Change TxDSC status (SC0)
+ */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_TX_ST_TXACK_RECV_WAIT);
+ } else {
+ /* TxEND timer start */
+ scf_timer_start(SCF_TIMERCD_DSCP_END);
+
+ /*
+ * Change TxDSC status (SC1)
+ */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_TX_ST_TXEND_RECV_WAIT);
+ }
+ }
+ break;
+
+ default:
+ /* TxDSC status != SA0 or SB0 or SB2 is NOP */
+ break;
+ }
+
+ /* Check next send processing */
+ if (next_send_req == FLAG_ON) {
+ next_send_req = FLAG_OFF;
+ goto SEND_MATRIX_START;
+ }
+
+/*
+ * END_dscp_send_matrix
+ */
+ END_dscp_send_matrix:
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_txreq_send()
+ *
+ * Description: TxREQ is transmitted by hard access.
+ *
+ */
+void
+scf_dscp_txreq_send(scf_state_t *statep, scf_dscp_dsc_t *dsc_p)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_txreq_send() "
+ uint8_t *wk_in_p; /* Working value : input address */
+ uint8_t *wk_out_p; /* Working value : output address */
+ uint32_t wkleng; /* Working value : length */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Set control flag */
+ dsc_p->dinfo.bdsr.status = 0;
+ dsc_p->dinfo.base.c_flag |= DSC_FLAG_DEFAULT;
+
+ /* Write TxDCR register */
+ statep->reg_txdcr_c_flag = dsc_p->dinfo.base.c_flag;
+ SCF_DDI_PUT16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->TxDCR_C_FLAG, statep->reg_txdcr_c_flag);
+ SC_DBG_DRV_TRACE(TC_W_TxDCR_C_FLAG, __LINE__,
+ &statep->reg_txdcr_c_flag, sizeof (statep->reg_txdcr_c_flag));
+
+ statep->reg_txdcr_c_offset = dsc_p->dinfo.base.offset;
+ SCF_DDI_PUT16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->TxDCR_OFFSET, statep->reg_txdcr_c_offset);
+ SC_DBG_DRV_TRACE(TC_W_TxDCR_OFFSET, __LINE__,
+ &statep->reg_txdcr_c_offset,
+ sizeof (statep->reg_txdcr_c_offset));
+
+ statep->reg_txdcr_c_length = dsc_p->dinfo.base.length;
+ SCF_DDI_PUT32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->TxDCR_LENGTH, statep->reg_txdcr_c_length);
+ SC_DBG_DRV_TRACE(TC_W_TxDCR_LENGTH, __LINE__,
+ &statep->reg_txdcr_c_length,
+ sizeof (statep->reg_txdcr_c_length));
+
+ /* Write DCR register : TxREQ interrupt */
+ statep->reg_dcr = DCR_TxREQ;
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->DCR, statep->reg_dcr);
+ SC_DBG_DRV_TRACE(TC_W_DCR, __LINE__, &statep->reg_dcr,
+ sizeof (statep->reg_dcr));
+
+ /* Register read sync */
+ scf_rs8 = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->DCR);
+
+ SC_DBG_DRV_TRACE(TC_TxREQ, __LINE__, &statep->reg_txdcr_c_flag, 8);
+
+ SCFDBGMSG1(SCF_DBGFLAG_REG, "DCR = 0x%02x", statep->reg_dcr);
+ SCFDBGMSG3(SCF_DBGFLAG_REG, "TxDCR = 0x%04x 0x%04x 0x%08x",
+ statep->reg_txdcr_c_flag, statep->reg_txdcr_c_offset,
+ statep->reg_txdcr_c_length);
+
+ /* TxREQ send exec flag ON */
+ scf_dscp_comtbl.tx_exec_flag = FLAG_ON;
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_DSCP_TXREQ);
+ if (dsc_p->dinfo.base.length != 0) {
+ wk_in_p = (uint8_t *)dsc_p->dinfo.base.dscp_datap;
+ wk_out_p = (uint8_t *)&statep->memo_scf_drvtrc.INFO[0];
+ if (dsc_p->dinfo.base.length >
+ sizeof (statep->memo_scf_drvtrc.INFO)) {
+ wkleng = sizeof (statep->memo_scf_drvtrc.INFO);
+ } else {
+ wkleng = dsc_p->dinfo.base.length;
+ }
+ bcopy(wk_in_p, wk_out_p, wkleng);
+ SCF_SRAM_TRACE(statep, DTC_DSCP_SENDDATA);
+ }
+
+ SCF_DBG_TEST_TXREQ_SEND(statep, dsc_p);
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * Rx subroutine function
+ */
+
+/*
+ * scf_dscp_recv_matrix()
+ *
+ * Description: TxREQ received performs the corresponding response request.
+ *
+ */
+void
+scf_dscp_recv_matrix(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_recv_matrix() "
+ scf_dscp_main_t *mainp; /* Main table address */
+ scf_dscp_dsc_t *dsc_p; /* TxDSC address */
+ scf_state_t *statep; /* Soft state pointer */
+ caddr_t wkaddr; /* Working value : buffer address */
+ uint8_t *wk_in_p; /* Working value : input address */
+ uint8_t *wk_out_p; /* Working value : output address */
+ uint32_t wkleng; /* Working value : length */
+ uint32_t info_size;
+ /* Working value : next receive processing check flag */
+ int next_resp_req = FLAG_OFF;
+ int path_ret; /* SCF path status return value */
+ int ii; /* Working value : counter */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+/*
+ * RECV_MATRIX_START
+ */
+ RECV_MATRIX_START:
+
+ /* Check pending RxDSC */
+ if (scf_dscp_comtbl.rx_dsc_count == 0) {
+ goto END_dscp_recv_matrix;
+ }
+
+ /* Get RxDSC address */
+ dsc_p = &scf_dscp_comtbl.rx_dscp[scf_dscp_comtbl.rx_get];
+
+ /* Get SCF path status */
+ path_ret = scf_path_check(&statep);
+
+ /* Check RxDSC status */
+ switch (dsc_p->status) {
+ case SCF_RX_ST_RXACK_SEND_WAIT: /* RxDSC status (RB0) */
+ /* Check SCF path status */
+ if (path_ret != SCF_PATH_ONLINE) {
+ break;
+ }
+ /* Check receive data length */
+ if (dsc_p->dinfo.base.length != 0) {
+ /* Rx buffer allocation */
+ wkaddr = (caddr_t)kmem_zalloc(dsc_p->dinfo.base.length,
+ KM_NOSLEEP);
+
+ /* Set Rx buffer address */
+ dsc_p->dinfo.base.dscp_datap = wkaddr;
+
+ /* RxACK send */
+ scf_dscp_rxack_send(statep);
+
+ /* Change RxDSC status (RB1) */
+ SCF_SET_DSC_STATUS(dsc_p,
+ SCF_RX_ST_SRAM_TRANS_WAIT);
+ } else {
+ /* Change RxDSC status (RB3) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_RXEND_SEND_WAIT);
+ break;
+ }
+ /* Next receive processing flag ON */
+ next_resp_req = FLAG_ON;
+
+ break;
+
+ case SCF_RX_ST_SRAM_TRANS_WAIT: /* RxDSC status (RB1) */
+ /* Check SCF path status */
+ if (path_ret != SCF_PATH_ONLINE) {
+ break;
+ }
+ /* Get main table address from "id" */
+ mainp = scf_dscp_id2mainp(dsc_p->dinfo.bdcr.id);
+
+ /* Check mainp address */
+ if (mainp != NULL) {
+ /* Data copy from SRAM */
+ ii = dsc_p->dinfo.base.offset * DSC_OFFSET_CONVERT;
+ wk_in_p = &statep->scf_dscp_sram->DATA[ii];
+ wk_out_p = (uint8_t *)dsc_p->dinfo.base.dscp_datap;
+ for (ii = 0; ii < dsc_p->dinfo.base.length; ii++,
+ wk_in_p++, wk_out_p++) {
+ *wk_out_p = SCF_DDI_GET8(statep,
+ statep->scf_dscp_sram_handle, wk_in_p);
+ }
+
+ /* Set end status : Normal end */
+ dsc_p->dinfo.bdsr.status = DSC_STATUS_NORMAL;
+
+ /* Change RxDSC status (RB3) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_RXEND_SEND_WAIT);
+
+ /* STAM trace */
+ info_size = sizeof (statep->memo_scf_drvtrc.INFO);
+ if (dsc_p->dinfo.base.length != 0) {
+ wk_in_p =
+ (uint8_t *)dsc_p->dinfo.base.dscp_datap;
+ wk_out_p = &statep->memo_scf_drvtrc.INFO[0];
+ if (dsc_p->dinfo.base.length > info_size) {
+ wkleng = info_size;
+ } else {
+ wkleng = dsc_p->dinfo.base.length;
+ }
+ bcopy(wk_in_p, wk_out_p, wkleng);
+ SCF_SRAM_TRACE(statep, DTC_DSCP_RECVDATA);
+ }
+
+ /* Receive data notice to main matrix */
+ scf_dscp_rxdata_notice(mainp);
+ } else {
+ /* Invalid "id" */
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__,
+ &dsc_p->dinfo.base.c_flag, TC_INFO_SIZE);
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, "Invalid id");
+
+ /* Set end status : Parameter error */
+ dsc_p->dinfo.bdsr.status = DSC_STATUS_E_PARAM;
+
+ /* Change RxDSC status (RB3) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_RXEND_SEND_WAIT);
+ }
+
+ /* Next receive processing flag ON */
+ next_resp_req = FLAG_ON;
+ break;
+
+ case SCF_RX_ST_RXEND_SEND_WAIT: /* RxDSC status (RB3) */
+ /* Is SCF path online? */
+ if (path_ret != SCF_PATH_ONLINE) {
+ break;
+ }
+ /* RxEND send */
+ scf_dscp_rxend_send(statep, dsc_p);
+
+ /* Change RxDSC status (RA0) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_IDLE);
+
+ /* Update Rx descriptor offset */
+ if (scf_dscp_comtbl.rx_get == scf_dscp_comtbl.rx_last) {
+ scf_dscp_comtbl.rx_get = scf_dscp_comtbl.rx_first;
+ } else {
+ scf_dscp_comtbl.rx_get++;
+ }
+
+ /* Update Rx descriptor count */
+ scf_dscp_comtbl.rx_dsc_count--;
+
+ /* RxREQ receive exec flag OFF */
+ scf_dscp_comtbl.rx_exec_flag = FLAG_OFF;
+ break;
+
+ default:
+ /* RxDSC status == RA0 is NOP */
+ break;
+ }
+
+ /* Check next receive processing */
+ if (next_resp_req == FLAG_ON) {
+ next_resp_req = FLAG_OFF;
+ goto RECV_MATRIX_START;
+ }
+
+/*
+ * END_dscp_recv_matrix
+ */
+ END_dscp_recv_matrix:
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_rxack_send()
+ *
+ * Description: RxACK is transmitted by hard access.
+ *
+ */
+void
+scf_dscp_rxack_send(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_rxack_send() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Write DCR register : RxACK interrupt */
+ statep->reg_dcr = DCR_RxACK;
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->DCR, statep->reg_dcr);
+ SC_DBG_DRV_TRACE(TC_W_DCR, __LINE__, &statep->reg_dcr,
+ sizeof (statep->reg_dcr));
+
+ /* Register read sync */
+ scf_rs8 = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->DCR);
+
+ SC_DBG_DRV_TRACE(TC_RxACK, __LINE__, NULL, 0);
+
+ SCFDBGMSG1(SCF_DBGFLAG_REG, "DCR = 0x%02x", statep->reg_dcr);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_DSCP_RXACK);
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_rxend_send()
+ *
+ * Description: RxEND is transmitted by hard access.
+ *
+ */
+void
+scf_dscp_rxend_send(scf_state_t *statep, scf_dscp_dsc_t *dsc_p)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_rxend_send() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Write RxDSR register */
+ statep->reg_rxdsr_c_flag = dsc_p->dinfo.base.c_flag;
+ SCF_DDI_PUT16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->RxDSR_C_FLAG, statep->reg_rxdsr_c_flag);
+ SC_DBG_DRV_TRACE(TC_W_RxDSR_C_FLAG, __LINE__, &statep->reg_rxdsr_c_flag,
+ sizeof (statep->reg_rxdsr_c_flag));
+
+ statep->reg_rxdsr_c_offset = dsc_p->dinfo.base.offset;
+ SCF_DDI_PUT16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->RxDSR_OFFSET, statep->reg_rxdsr_c_offset);
+ SC_DBG_DRV_TRACE(TC_W_RxDSR_OFFSET, __LINE__,
+ &statep->reg_rxdsr_c_offset,
+ sizeof (statep->reg_rxdsr_c_offset));
+
+ /* Write DCR register : RxEND interrupt */
+ statep->reg_dcr = DCR_RxEND;
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->DCR, statep->reg_dcr);
+ SC_DBG_DRV_TRACE(TC_W_DCR, __LINE__, &statep->reg_dcr,
+ sizeof (statep->reg_dcr));
+
+ /* Register read sync */
+ scf_rs8 = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->DCR);
+
+ SC_DBG_DRV_TRACE(TC_RxEND, __LINE__, &statep->reg_rxdsr_c_flag, 4);
+
+ SCFDBGMSG1(SCF_DBGFLAG_REG, "DCR = 0x%02x", statep->reg_dcr);
+ SCFDBGMSG2(SCF_DBGFLAG_REG, "RxDSR = 0x%04x 0x%04x",
+ statep->reg_rxdsr_c_flag, statep->reg_rxdsr_c_offset);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_DSCP_RXEND);
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * subroutine function
+ */
+
+/*
+ * scf_dscp_dscbuff_free_all()
+ *
+ * Description: All descripter buffer release processing.
+ *
+ */
+void
+scf_dscp_dscbuff_free_all(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_dscbuff_free_all() "
+ scf_dscp_dsc_t *dsc_p; /* TxDSC address */
+ int ii; /* Working value : counter */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Get TxDSC address */
+ dsc_p = scf_dscp_comtbl.tx_dscp;
+
+ if (dsc_p != NULL) {
+ /* Check TxDSC */
+ for (ii = 0; ii < scf_dscp_comtbl.txdsc_maxcount; ii++,
+ dsc_p++) {
+ /* Check TxDSC status */
+ if (dsc_p->status == SCF_TX_ST_IDLE) {
+ continue;
+ }
+ /* TxDSC status not (SA0) */
+ /* Check send data */
+ if (dsc_p->dinfo.base.dscp_datap != NULL) {
+ /* Send data release */
+ kmem_free(dsc_p->dinfo.base.dscp_datap,
+ dsc_p->dinfo.base.length);
+ dsc_p->dinfo.base.dscp_datap = NULL;
+ }
+
+ /* Check SRAM data */
+ if (dsc_p->dinfo.base.offset != DSC_OFFSET_NOTHING) {
+ /* Send SRAM data release */
+ scf_dscp_sram_free(dsc_p->dinfo.base.offset);
+ dsc_p->dinfo.base.offset = DSC_OFFSET_NOTHING;
+ }
+
+ /* Change TxDSC status (SA0) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_TX_ST_IDLE);
+ }
+
+ /* Tx flag initialization */
+ scf_dscp_comtbl.tx_exec_flag = FLAG_OFF;
+ scf_dscp_comtbl.dscp_path_flag = FLAG_OFF;
+ scf_dscp_comtbl.tx_local_use_flag = FLAG_OFF;
+
+ /* TxDSC counter/offset initialization */
+ scf_dscp_comtbl.tx_get = scf_dscp_comtbl.tx_put;
+ scf_dscp_comtbl.tx_dsc_count = 0;
+
+ /* Tx re-try counter initialization */
+ scf_dscp_comtbl.tx_ackto_retry_cnt = 0;
+ scf_dscp_comtbl.tx_endto_retry_cnt = 0;
+
+ scf_dscp_comtbl.tx_busy_retry_cnt = 0;
+ scf_dscp_comtbl.tx_interface_retry_cnt = 0;
+ scf_dscp_comtbl.tx_nak_retry_cnt = 0;
+ scf_dscp_comtbl.tx_notsuop_retry_cnt = 0;
+ scf_dscp_comtbl.tx_prmerr_retry_cnt = 0;
+ scf_dscp_comtbl.tx_seqerr_retry_cnt = 0;
+ scf_dscp_comtbl.tx_other_retry_cnt = 0;
+ scf_dscp_comtbl.tx_send_retry_cnt = 0;
+ }
+
+ /* Get RxDSC address */
+ dsc_p = scf_dscp_comtbl.rx_dscp;
+
+ if (dsc_p != NULL) {
+ /* Check RxDSC */
+ for (ii = 0; ii < scf_dscp_comtbl.rxdsc_maxcount; ii++,
+ dsc_p++) {
+ /* Check RxDSC status */
+ if (dsc_p->status == SCF_RX_ST_IDLE) {
+ continue;
+ }
+ /* RxDSC status not (RA0) */
+ /* Check receive data */
+ if (dsc_p->dinfo.base.dscp_datap != NULL) {
+ /* Receive data release */
+ kmem_free(dsc_p->dinfo.base.dscp_datap,
+ dsc_p->dinfo.base.length);
+ dsc_p->dinfo.base.dscp_datap = NULL;
+ }
+
+ /* Change RxDSC status (RA0) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_IDLE);
+ }
+
+ /* Rx flag initialization */
+ scf_dscp_comtbl.rx_exec_flag = FLAG_OFF;
+
+ /* RxDSC counter/offset initialization */
+ scf_dscp_comtbl.rx_get = scf_dscp_comtbl.rx_put;
+ scf_dscp_comtbl.rx_dsc_count = 0;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_txdscbuff_free()
+ *
+ * Description: Tx descripter buffer release processing.
+ *
+ */
+void
+scf_dscp_txdscbuff_free(scf_dscp_main_t *mainp)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_txdscbuff_free() "
+ scf_dscp_dsc_t *dsc_p; /* TxDSC address */
+ uint16_t wkget; /* Working value : get offset */
+ int ii; /* Working value : counter */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Get TxDSC offser */
+ wkget = scf_dscp_comtbl.tx_get;
+
+ /* Check TxDSC */
+ for (ii = 0; ii < scf_dscp_comtbl.tx_dsc_count; ii++) {
+ /* Get TxDSC address */
+ dsc_p = &scf_dscp_comtbl.tx_dscp[wkget];
+
+ /* Update Tx descriptor offset */
+ if (wkget == scf_dscp_comtbl.tx_last) {
+ wkget = scf_dscp_comtbl.tx_first;
+ } else {
+ wkget++;
+ }
+
+ /* Check main use data */
+ if (mainp->id != dsc_p->dinfo.bdcr.id) {
+ continue;
+ }
+ /* Check TxDSC status */
+ switch (dsc_p->status) {
+ case SCF_TX_ST_SRAM_TRANS_WAIT:
+ /* TxDSC status not (SB0) */
+ case SCF_TX_ST_TXREQ_SEND_WAIT:
+ /* TxDSC status not (SB2) */
+ /* Check send data */
+ if (dsc_p->dinfo.base.dscp_datap != NULL) {
+ /* Send data release */
+ kmem_free(dsc_p->dinfo.base.dscp_datap,
+ dsc_p->dinfo.base.length);
+ dsc_p->dinfo.base.dscp_datap = NULL;
+ }
+
+ /* Check SRAM data */
+ if (dsc_p->dinfo.base.offset != DSC_OFFSET_NOTHING) {
+ /* Send SRAM data release */
+ scf_dscp_sram_free(dsc_p->dinfo.base.offset);
+ dsc_p->dinfo.base.offset = DSC_OFFSET_NOTHING;
+ }
+
+ /* Change TxDSC status (SA0) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_TX_ST_IDLE);
+ break;
+
+ default:
+ /* TxDSC status != SB0 or SB2 is NOP */
+ break;
+ }
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_rxdscbuff_free()
+ *
+ * Description: Rx descripter buffer release processing.
+ *
+ */
+void
+scf_dscp_rxdscbuff_free(scf_dscp_main_t *mainp)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_rxdscbuff_free() "
+ scf_dscp_dsc_t *dsc_p; /* TxDSC address */
+ uint16_t wkget; /* Working value : get offset */
+ int ii; /* Working value : counter */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Get RxDSC offser */
+ wkget = scf_dscp_comtbl.rx_get;
+
+ /* Check RxDSC */
+ for (ii = 0; ii < scf_dscp_comtbl.rx_dsc_count; ii++) {
+ /* Get RxDSC address */
+ dsc_p = &scf_dscp_comtbl.rx_dscp[wkget];
+
+ /* Update Rx descriptor offset */
+ if (wkget == scf_dscp_comtbl.rx_last) {
+ wkget = scf_dscp_comtbl.rx_first;
+ } else {
+ wkget++;
+ }
+
+ /* Check main use data */
+ if (mainp->id != dsc_p->dinfo.bdcr.id) {
+ continue;
+ }
+ /* Check RxDSC status */
+ if (dsc_p->status != SCF_RX_ST_IDLE) {
+ /* TxDSC status not (RA0) */
+ /* Check receive data */
+ if (dsc_p->dinfo.base.dscp_datap != NULL) {
+ /* Receive data release */
+ kmem_free(dsc_p->dinfo.base.dscp_datap,
+ dsc_p->dinfo.base.length);
+ dsc_p->dinfo.base.dscp_datap = NULL;
+ }
+
+ /* Change RxDSC status (RA0) */
+ SCF_SET_DSC_STATUS(dsc_p, SCF_RX_ST_IDLE);
+
+ /* Rx flag initialization */
+ scf_dscp_comtbl.rx_exec_flag = FLAG_OFF;
+
+ /* RxDSC counter/offset initialization */
+ scf_dscp_comtbl.rx_get = scf_dscp_comtbl.rx_put;
+ scf_dscp_comtbl.rx_dsc_count = 0;
+ break;
+ }
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_rdata_free()
+ *
+ * Description: All receive data buffer release processing.
+ *
+ */
+void
+scf_dscp_rdata_free(scf_dscp_main_t *mainp)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_rdata_free() "
+ /* Current receive data queue address */
+ scf_rdata_que_t *rdt_p;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ while (mainp->rd_count != 0) {
+ /* Get receive data queue address */
+ rdt_p = &mainp->rd_datap[mainp->rd_get];
+
+ /* Check receive data buffer */
+ if (rdt_p->rdatap != NULL) {
+ /* Receve data release */
+ kmem_free(rdt_p->rdatap, rdt_p->length);
+ rdt_p->rdatap = NULL;
+ }
+
+ /* Update receive data queue */
+ if (mainp->rd_get == mainp->rd_last) {
+ mainp->rd_get = mainp->rd_first;
+ } else {
+ mainp->rd_get++;
+ }
+
+ /* Update receive data queue count */
+ mainp->rd_count--;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_event_queue()
+ *
+ * Description: Event queueing processing.
+ *
+ */
+void
+scf_dscp_event_queue(scf_dscp_main_t *mainp, scf_event_t mevent)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_event_queue() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Check DISC ERROR event */
+ if (mevent == SCF_MB_DISC_ERROR) {
+ /* TxDSC buffer release */
+ scf_dscp_txdscbuff_free(mainp);
+
+ /* RxDSC buffer release */
+ scf_dscp_rxdscbuff_free(mainp);
+
+ /* All queing event release */
+ scf_dscp_event_queue_free(mainp);
+
+ /* All receive buffer release */
+ scf_dscp_rdata_free(mainp);
+ }
+
+ /* Event queing */
+ mainp->ev_quep[mainp->ev_put].mevent = mevent;
+
+ /* Update event queue offset */
+ if (mainp->ev_put == mainp->ev_last) {
+ mainp->ev_put = mainp->ev_first;
+ } else {
+ mainp->ev_put++;
+ }
+
+ /* Update event queue count */
+ mainp->ev_count++;
+
+ /* Soft interrupt : call scf_dscp_callback() */
+ if (mutex_tryenter(&scf_comtbl.si_mutex) != 0) {
+ scf_comtbl.scf_softintr_dscp_kicked = FLAG_ON;
+ ddi_trigger_softintr(scf_comtbl.scf_softintr_id);
+ mutex_exit(&scf_comtbl.si_mutex);
+ }
+
+ /* Callback timer start */
+ scf_timer_start(SCF_TIMERCD_DSCP_CALLBACK);
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_event_queue_free()
+ *
+ * Description: Event queue release processing.
+ *
+ */
+void
+scf_dscp_event_queue_free(scf_dscp_main_t *mainp)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_event_queue_free() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* All queing event release */
+ mainp->ev_get = mainp->ev_put;
+ mainp->ev_count = 0;
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_dscp_mkey2mainp()
+ *
+ * Description: Get MAIN control table address processing by mkey.
+ *
+ */
+scf_dscp_main_t *
+scf_dscp_mkey2mainp(mkey_t mkey)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_mkey2mainp() "
+ /* Return value : Main table address */
+ scf_dscp_main_t *mainp = NULL;
+ int ii; /* Working value : counter */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ for (ii = 0; ii < MBIF_MAX; ii++) {
+ /* Check "mkey" at search table */
+ if (mkey == scf_dscp_mkey_search[ii]) {
+ /* Set mainp address */
+ mainp = &scf_dscp_comtbl.scf_dscp_main[ii];
+ break;
+ }
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+ return (mainp);
+}
+
+
+/*
+ * scf_dscp_id2mainp()
+ *
+ * Description: Get MAIN control table address processing by id.
+ *
+ */
+scf_dscp_main_t *
+scf_dscp_id2mainp(uint8_t id)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_id2mainp() "
+ /* Return value : Main table address */
+ scf_dscp_main_t *mainp = NULL;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Check "id" */
+ if (id < MBIF_MAX) {
+ /* Set mainp address */
+ mainp = &scf_dscp_comtbl.scf_dscp_main[id];
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+ return (mainp);
+}
+
+
+/*
+ * scf_dscp_sram_get()
+ *
+ * Description: Tx SRAM alloc processing.
+ *
+ */
+uint16_t
+scf_dscp_sram_get(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_sram_get() "
+ scf_tx_sram_t *sram_p; /* Tx SRAM table address */
+ int ii; /* Working value : counter */
+ /* Return value : Tx SRAM offset */
+ uint16_t offset = TX_SRAM_GET_ERROR;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ /* Check Tx SRAM space */
+ if (scf_dscp_comtbl.tx_sram_count >=
+ scf_dscp_comtbl.txsram_maxcount) {
+ goto END_dscp_sram_get;
+ }
+
+ /* Check all Tx SRAM table */
+ for (ii = 0; ii < scf_dscp_comtbl.txsram_maxcount; ii++) {
+ /* Get Tx SRAM table address */
+ sram_p = &scf_dscp_comtbl.tx_sramp[scf_dscp_comtbl.tx_sram_put];
+
+ /* Update Tx SRAM offset */
+ if (scf_dscp_comtbl.tx_sram_put ==
+ scf_dscp_comtbl.tx_sram_last) {
+ scf_dscp_comtbl.tx_sram_put =
+ scf_dscp_comtbl.tx_sram_first;
+ } else {
+ scf_dscp_comtbl.tx_sram_put++;
+ }
+
+ /* Check Tx SRAM use */
+ if (sram_p->use_flag == FLAG_OFF) {
+ /* Tx SRAM use flag ON */
+ sram_p->use_flag = FLAG_ON;
+
+ /* Get Tx SRAM offset */
+ offset = sram_p->offset;
+
+ /* Update Tx SRAM count */
+ scf_dscp_comtbl.tx_sram_count++;
+ break;
+ }
+ }
+
+/*
+ * END_dscp_sram_get
+ */
+ END_dscp_sram_get:
+
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end offset = 0x%04x",
+ offset);
+ return (offset);
+}
+
+
+/*
+ * scf_dscp_sram_free()
+ *
+ * Description: Tx SRAM release processing
+ *
+ */
+void
+scf_dscp_sram_free(uint16_t offset)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_dscp_sram_free() "
+ scf_tx_sram_t *sram_p; /* Tx SRAM table address */
+ uint16_t wkget; /* Working value : get offset */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG1(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start offset = 0x%04x",
+ offset);
+
+ /* "offset" to Tx SRAM get offset */
+ wkget = (uint16_t)(offset
+ / (scf_dscp_comtbl.txbuffsize / DSC_OFFSET_CONVERT));
+
+ /* Check Tx SRAM get offset */
+ if (wkget < scf_dscp_comtbl.txsram_maxcount) {
+ /* Get Tx SRAM table address */
+ sram_p = &scf_dscp_comtbl.tx_sramp[wkget];
+
+ /* Check "offset" */
+ if (offset == sram_p->offset) {
+ /* Tx SRAM use flag OFF */
+ sram_p->use_flag = FLAG_OFF;
+
+ /* Update Tx SRAM count */
+ scf_dscp_comtbl.tx_sram_count--;
+ }
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+}
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scfhandler.c b/usr/src/uts/sun4u/opl/io/scfd/scfhandler.c
new file mode 100644
index 0000000000..e926481551
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scfhandler.c
@@ -0,0 +1,3458 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/ksynch.h>
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/cmn_err.h>
+#include <sys/kmem.h>
+#include <sys/errno.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/scfd/scfparam.h>
+
+/*
+ * Function list
+ */
+uint_t scf_intr(caddr_t arg);
+int scf_intr_cmdcmp(scf_state_t *statep);
+void scf_intr_cmdcmp_driver(scf_state_t *statep, struct scf_cmd *scfcmdp);
+int scf_intr_dsens(struct scf_cmd *scfcmdp, scf_int_reason_t *int_rp,
+ int len);
+void scf_status_change(scf_state_t *statep);
+void scf_next_cmd_check(scf_state_t *statep);
+void scf_next_rxdata_get(void);
+void scf_online_wait_tout(void);
+void scf_cmdbusy_tout(void);
+void scf_cmdend_tout(void);
+void scf_report_send_wait_tout(void);
+void scf_alivecheck_intr(scf_state_t *statep);
+void scf_path_change(scf_state_t *statep);
+void scf_halt(uint_t mode);
+void scf_panic_callb(int code);
+void scf_shutdown_callb(int code);
+uint_t scf_softintr(caddr_t arg);
+void scf_cmdwait_status_set(void);
+
+/*
+ * External function
+ */
+extern void scf_dscp_start(uint32_t factor);
+extern void scf_dscp_stop(uint32_t factor);
+extern void scf_dscp_intr(scf_state_t *state);
+extern void scf_dscp_callback(void);
+
+extern void do_shutdown(void);
+
+
+/*
+ * scf_intr()
+ *
+ * Description: Interrupt handler entry processing.
+ *
+ */
+uint_t
+scf_intr(caddr_t arg)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_intr() "
+ scf_state_t *statep = (void *)arg;
+ int path_change = 0;
+ uint_t ret = DDI_INTR_CLAIMED;
+ timeout_id_t save_tmids[SCF_TIMERCD_MAX];
+ int tm_stop_cnt;
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+ SC_DBG_DRV_TRACE(TC_INTR|TC_IN, __LINE__, &arg, sizeof (caddr_t));
+
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* Path status check */
+ if (scf_check_state(statep) == PATH_STAT_EMPTY) {
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__, "intr ", 8);
+ goto END_intr;
+ }
+
+ /* PANIC exec status */
+ if (scf_panic_exec_flag) {
+ /* SCF interrupt disable(CR) */
+ SCF_P_DDI_PUT16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL, CONTROL_DISABLE);
+ /* Register read sync */
+ scf_rs16 = SCF_P_DDI_GET16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL);
+
+ /* SCF Status register interrupt(STR) : clear */
+ SCF_P_DDI_PUT16(statep->scf_regs_handle,
+ &statep->scf_regs->STATUS, 0xffff);
+
+ /* SCF Status extended register(STExR) : interrupt clear */
+ SCF_P_DDI_PUT32(statep->scf_regs_handle,
+ &statep->scf_regs->STATUS_ExR, 0xffffffff);
+
+ /* DSCP buffer status register(DSR) : interrupt clear */
+ SCF_P_DDI_PUT8(statep->scf_regs_handle,
+ &statep->scf_regs->DSR, 0xff);
+
+ /* SCF interrupt status register(ISR) : interrupt clear */
+ SCF_P_DDI_PUT16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST,
+ (INT_ST_PATHCHGIE | CONTROL_ALIVEINE));
+ scf_rs16 = SCF_P_DDI_GET16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST);
+ goto END_intr;
+ }
+
+ /* Check hard error after or interrupt disable status */
+ if ((statep->scf_herr & HERR_EXEC) ||
+ (!(statep->resource_flag & S_DID_REGENB))) {
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__, "intr ", 8);
+
+ /* Interrupt disable */
+ scf_forbid_intr(statep);
+
+ /* SCF Status register interrupt(STR) : clear */
+ SCF_DDI_PUT16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->STATUS, 0xffff);
+
+ /* SCF Status extended register(STExR) : interrupt clear */
+ SCF_DDI_PUT32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->STATUS_ExR, 0xffffffff);
+
+ /* DSCP buffer status register(DSR) : interrupt clear */
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->DSR, 0xff);
+
+ /* SCF interrupt status register(ISR) : interrupt clear */
+ SCF_DDI_PUT16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST,
+ (INT_ST_PATHCHGIE | CONTROL_ALIVEINE));
+ scf_rs16 = SCF_DDI_GET16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST);
+ goto END_intr;
+ }
+
+ /* Get SCF interrupt register */
+ statep->reg_int_st = SCF_DDI_GET16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST);
+ SC_DBG_DRV_TRACE(TC_R_INT_ST, __LINE__, &statep->reg_int_st,
+ sizeof (statep->reg_int_st));
+
+ /* SCF interrupt register interrupt clear */
+ SCF_DDI_PUT16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST, statep->reg_int_st);
+ SC_DBG_DRV_TRACE(TC_W_INT_ST, __LINE__, &statep->reg_int_st,
+ sizeof (statep->reg_int_st));
+ /* Register read sync */
+ scf_rs16 = SCF_DDI_GET16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST);
+
+ SCF_DBG_TEST_INTR(statep);
+
+ SCFDBGMSG1(SCF_DBGFLAG_REG, "ISR = 0x%04x", statep->reg_int_st);
+
+ /* Get SCF status register */
+ statep->reg_status = SCF_DDI_GET16(statep,
+ statep->scf_regs_handle, &statep->scf_regs->STATUS);
+ SC_DBG_DRV_TRACE(TC_R_STATUS, __LINE__, &statep->reg_status,
+ sizeof (statep->reg_status));
+
+ /* Get SCF status extended register */
+ statep->reg_status_exr = SCF_DDI_GET32(statep,
+ statep->scf_regs_handle, &statep->scf_regs->STATUS_ExR);
+ SC_DBG_DRV_TRACE(TC_R_STATUS_ExR, __LINE__, &statep->reg_status_exr,
+ sizeof (statep->reg_status_exr));
+
+ /* Get SCF command register */
+ statep->reg_command = SCF_DDI_GET16(statep,
+ statep->scf_regs_handle, &statep->scf_regs->COMMAND);
+ SC_DBG_DRV_TRACE(TC_R_COMMAND, __LINE__, &statep->reg_command,
+ sizeof (statep->reg_command));
+
+ /* Get SCF command extended register */
+ statep->reg_command_exr = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->COMMAND_ExR);
+ SC_DBG_DRV_TRACE(TC_R_COMMAND_ExR, __LINE__, &statep->reg_command_exr,
+ sizeof (statep->reg_command_exr));
+
+ SCF_DBG_TEST_INTR_SCFINT(statep);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_INT);
+
+ /* Check SCF path change interrupt */
+ if (statep->reg_int_st & CONTROL_PATHCHGIE) {
+ /* Check interrupt SCF path */
+ if ((statep != scf_comtbl.scf_exec_p) &&
+ (statep != scf_comtbl.scf_path_p)) {
+ path_change = 1;
+ goto END_intr;
+ }
+ }
+
+ /* Check Alive Interrupt */
+ if (statep->reg_int_st & INT_ST_ALIVEINT) {
+ /* Check interrupt SCF path */
+ if ((statep == scf_comtbl.scf_exec_p) ||
+ (statep == scf_comtbl.scf_path_p)) {
+ /* Alive check interrupt */
+ scf_alivecheck_intr(statep);
+ } else {
+ /* not active SCF path */
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__,
+ "intr ", 8);
+ /* Alive interrupt disable */
+ scf_alivecheck_stop(statep);
+ }
+ }
+
+ /* Check SCF interrupt */
+ if (statep->reg_int_st & INT_ST_SCFINT) {
+ SC_DBG_DRV_TRACE(TC_RSTS, __LINE__, &statep->reg_command,
+ TC_INFO_SIZE);
+
+ SCFDBGMSG2(SCF_DBGFLAG_REG, "STR = 0x%04x STExR = 0x%08x",
+ statep->reg_status, statep->reg_status_exr);
+
+ /* Check active SCF path */
+ if ((statep == scf_comtbl.scf_exec_p) ||
+ (statep == scf_comtbl.scf_path_p)) {
+
+ /* Mode changed bit valid */
+ if ((statep->reg_status & STATUS_MODE_CHANGED) ||
+ (statep->reg_status & STATUS_CMD_COMPLETE)) {
+ /* Check secure mode status */
+ if ((statep->reg_status & STATUS_SECURE_MODE) ==
+ STATUS_MODE_LOCK) {
+ /* Mode status LOCK */
+ scf_dm_secure_mode = SCF_STAT_MODE_LOCK;
+ if (((scf_comtbl.scf_mode_sw &
+ STATUS_SECURE_MODE) !=
+ STATUS_MODE_LOCK) &&
+ (scf_comtbl.alive_running ==
+ SCF_ALIVE_START)) {
+ /* Alive check start */
+ scf_comtbl.scf_alive_event_sub =
+ EVENT_SUB_ALST_WAIT;
+ }
+ } else {
+ scf_dm_secure_mode =
+ SCF_STAT_MODE_UNLOCK;
+ }
+ scf_comtbl.scf_mode_sw =
+ (statep->reg_status &
+ (STATUS_BOOT_MODE |
+ STATUS_SECURE_MODE));
+ }
+
+ /* Check command complete */
+ if ((scf_comtbl.scf_cmd_exec_flag) &&
+ (statep->reg_status & STATUS_CMD_COMPLETE)) {
+ /* SCF command complete processing */
+ path_change = scf_intr_cmdcmp(statep);
+ if (path_change) {
+ goto END_intr;
+ }
+ }
+ } else {
+ /* SCF Status register interrupt clear */
+ SCF_DDI_PUT16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->STATUS, statep->reg_status);
+ SC_DBG_DRV_TRACE(TC_W_STATUS, __LINE__,
+ &statep->reg_status,
+ sizeof (statep->reg_status));
+ /* Register read sync */
+ scf_rs16 = SCF_DDI_GET16(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->STATUS);
+
+ /* SCF Status extended register interrupt clear */
+ SCF_DDI_PUT32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->STATUS_ExR,
+ statep->reg_status_exr);
+ SC_DBG_DRV_TRACE(TC_W_STATUS_ExR, __LINE__,
+ &statep->reg_status_exr,
+ sizeof (statep->reg_status_exr));
+ /* Register read sync */
+ scf_rs32 = SCF_DDI_GET32(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->STATUS_ExR);
+ goto END_intr;
+ }
+
+ /* Check SCF status change */
+ if (statep->reg_status_exr & STATUS_SCF_STATUS_CHANGE) {
+ /* SCF status change processing */
+ scf_status_change(statep);
+ }
+
+ /* SCF Status register interrupt clear */
+ SCF_DDI_PUT16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->STATUS, statep->reg_status);
+ SC_DBG_DRV_TRACE(TC_W_STATUS, __LINE__, &statep->reg_status,
+ sizeof (statep->reg_status));
+ /* Register read sync */
+ scf_rs16 = SCF_DDI_GET16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->STATUS);
+
+ /* SCF Status extended register interrupt clear */
+ SCF_DDI_PUT32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->STATUS_ExR, statep->reg_status_exr);
+ SC_DBG_DRV_TRACE(TC_W_STATUS_ExR, __LINE__,
+ &statep->reg_status_exr,
+ sizeof (statep->reg_status_exr));
+ /* Register read sync */
+ scf_rs32 = SCF_DDI_GET32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->STATUS_ExR);
+
+ /* SHUTDOWN/POFF/EVENT/ALIVE save */
+ if (statep->reg_status &
+ (STATUS_SHUTDOWN | STATUS_POFF | STATUS_EVENT)) {
+ scf_comtbl.scf_event_flag |= (statep->reg_status &
+ (STATUS_SHUTDOWN | STATUS_POFF | STATUS_EVENT));
+ }
+
+ /* POWER_FAILURE save */
+ if (statep->reg_status_exr & STATUS_POWER_FAILURE) {
+ scf_comtbl.scf_event_flag |= STATUS_SHUTDOWN;
+ }
+
+ /* Check next receive data timer exec */
+ if (scf_timer_check(SCF_TIMERCD_NEXTRECV) ==
+ SCF_TIMER_NOT_EXEC) {
+ /* Next command send check */
+ scf_next_cmd_check(statep);
+ }
+ }
+
+ /* Check next command send */
+ if ((scf_comtbl.scf_cmd_exec_flag == 0) &&
+ (scf_comtbl.cmd_busy_wait != 0)) {
+ scf_comtbl.cmd_busy_wait = 0;
+ /* Signal to command wait */
+ cv_signal(&scf_comtbl.cmdwait_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__, &scf_comtbl.cmdwait_cv,
+ sizeof (kcondvar_t));
+ }
+
+ /* Check DSCP Communication Buffer Interrupt */
+ if (statep->reg_int_st & INT_ST_IDBCINT) {
+ /* Check interrupt SCF path */
+ if ((statep == scf_comtbl.scf_exec_p) ||
+ (statep == scf_comtbl.scf_path_p)) {
+ scf_dscp_intr(statep);
+ } else {
+ /* not active SCF path */
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__,
+ "intr ", 8);
+ /* DSCP buffer status register interrupt clear */
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->DSR, 0xff);
+ /* Register read sync */
+ scf_rs8 = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->DSR);
+ }
+ }
+
+ if ((statep->reg_int_st & INT_ST_ALL) == 0) {
+ /* Unclamed counter up */
+ scf_comtbl.scf_unclamed_cnt++;
+
+ /* Get control register */
+ statep->reg_control = SCF_DDI_GET16(statep,
+ statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL);
+ SC_DBG_DRV_TRACE(TC_R_CONTROL, __LINE__, &statep->reg_control,
+ sizeof (statep->reg_control));
+ scf_comtbl.scf_unclamed.CONTROL = statep->reg_control;
+
+ scf_comtbl.scf_unclamed.INT_ST = statep->reg_int_st;
+
+ /* Get SCF command register */
+ statep->reg_command = SCF_DDI_GET16(statep,
+ statep->scf_regs_handle, &statep->scf_regs->COMMAND);
+ SC_DBG_DRV_TRACE(TC_R_COMMAND, __LINE__, &statep->reg_command,
+ sizeof (statep->reg_command));
+ scf_comtbl.scf_unclamed.COMMAND = statep->reg_command;
+
+ /* Get SCF status register */
+ statep->reg_status = SCF_DDI_GET16(statep,
+ statep->scf_regs_handle, &statep->scf_regs->STATUS);
+ SC_DBG_DRV_TRACE(TC_R_STATUS, __LINE__, &statep->reg_status,
+ sizeof (statep->reg_status));
+ scf_comtbl.scf_unclamed.STATUS = statep->reg_status;
+
+ /* Get SCF status extended register */
+ statep->reg_status_exr = SCF_DDI_GET32(statep,
+ statep->scf_regs_handle, &statep->scf_regs->STATUS_ExR);
+ SC_DBG_DRV_TRACE(TC_R_STATUS_ExR, __LINE__,
+ &statep->reg_status_exr,
+ sizeof (statep->reg_status_exr));
+ scf_comtbl.scf_unclamed.STATUS_ExR = statep->reg_status_exr;
+
+ /* Get DSR register */
+ statep->reg_dsr = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->DSR);
+ SC_DBG_DRV_TRACE(TC_R_DSR, __LINE__, &statep->reg_dsr,
+ sizeof (statep->reg_dsr));
+ scf_comtbl.scf_unclamed.DSR = statep->reg_dsr;
+ }
+
+/*
+ * END_intr
+ */
+ END_intr:
+
+ /* Check SCF path change */
+ if (path_change) {
+ scf_path_change(statep);
+ }
+
+ /* Collect the timers which need to be stopped */
+ tm_stop_cnt = scf_timer_stop_collect(save_tmids, SCF_TIMERCD_MAX);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Timer stop */
+ if (tm_stop_cnt != 0) {
+ scf_timer_untimeout(save_tmids, SCF_TIMERCD_MAX);
+ }
+
+ SC_DBG_DRV_TRACE(TC_INTR|TC_OUT, __LINE__, &ret, sizeof (uint_t));
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_intr_cmdcmp()
+ *
+ * Description: SCF command complete processing.
+ *
+ */
+int
+scf_intr_cmdcmp(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_intr_cmdcmp() "
+ struct scf_cmd *scfcmdp;
+ uint8_t sum;
+ uint32_t sum4;
+ uint8_t *wk_in_p8;
+ uint32_t *wk_in_p32;
+ uint8_t *wk_in_p;
+ uint8_t *wk_out_p;
+ uint_t wkleng;
+ uint_t wkleng2;
+ uint_t rcount;
+ uint_t rxbuff_cnt;
+ uint_t rxbuff_flag = 0;
+ char sumerr_msg[16];
+ int info_size;
+ int ii;
+ int ret = 0;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ if (scf_comtbl.scf_exec_cmd_id) {
+ /* SCF command start for ioctl */
+ scfcmdp = scf_comtbl.scf_cmdp;
+ } else {
+ /* SCF command start for interrupt */
+ scfcmdp = &scf_comtbl.scf_cmd_intr;
+ }
+ scfcmdp->stat0 = (statep->reg_status & STATUS_CMD_RTN_CODE) >> 4;
+ scfcmdp->status = statep->reg_status;
+
+ scf_timer_stop(SCF_TIMERCD_CMDEND);
+ scf_comtbl.scf_cmd_exec_flag = 0;
+ statep->cmd_to_rcnt = 0;
+
+ statep->reg_rdata[0] = SCF_DDI_GET32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->RDATA0);
+ statep->reg_rdata[1] = SCF_DDI_GET32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->RDATA1);
+ SC_DBG_DRV_TRACE(TC_R_RDATA0, __LINE__, & statep->reg_rdata[0],
+ sizeof (statep->reg_rdata[0]) + sizeof (statep->reg_rdata[1]));
+ statep->reg_rdata[2] = SCF_DDI_GET32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->RDATA2);
+ statep->reg_rdata[3] = SCF_DDI_GET32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->RDATA3);
+ SC_DBG_DRV_TRACE(TC_R_RDATA2, __LINE__, &statep->reg_rdata[2],
+ sizeof (statep->reg_rdata[2]) + sizeof (statep->reg_rdata[3]));
+
+ SCF_DBG_TEST_INTR_CMDEND(statep);
+
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, "command complete status = 0x%04x",
+ scfcmdp->stat0);
+ SCFDBGMSG4(SCF_DBGFLAG_REG, "RxDR = 0x%08x 0x%08x 0x%08x 0x%08x",
+ statep->reg_rdata[0], statep->reg_rdata[1],
+ statep->reg_rdata[2], statep->reg_rdata[3]);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_RECVDATA);
+
+ SCF_DBG_XSCF_SET_STATUS
+
+ /* Check command return value */
+ switch (scfcmdp->stat0) {
+ case NORMAL_END:
+ /* Norman end */
+ statep->tesum_rcnt = 0;
+
+ SCF_DBG_XSCF_SET_LENGTH
+
+ /* Make Rx register sum */
+ sum = SCF_MAGICNUMBER_S;
+ wk_in_p8 = (uint8_t *)&statep->reg_rdata[0];
+ for (ii = 0; ii < SCF_S_CNT_15; ii++, wk_in_p8++) {
+ sum += *wk_in_p8;
+ }
+
+ SCF_DBG_MAKE_RXSUM(sum, *wk_in_p8);
+
+ /* Check Rx register sum */
+ if (sum != *wk_in_p8) {
+ SCFDBGMSG2(SCF_DBGFLAG_SYS,
+ "Rx sum failure 0x%02x 0x%02x", sum, *wk_in_p8);
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__,
+ "intr ", 8);
+ scfcmdp->stat0 = SCF_STAT0_RDATA_SUM;
+ strcpy(&sumerr_msg[0], "SCF device");
+ statep->resum_rcnt++;
+ goto CHECK_rxsum_start;
+ }
+
+ if (scfcmdp->flag == SCF_USE_SLBUF) {
+ /*
+ * SCF_USE_SLBUF
+ */
+ scfcmdp->rbufleng = statep->reg_rdata[0];
+ if (scfcmdp->rbufleng > SCF_L_CNT_MAX) {
+ /* Invalid receive data length */
+ SCFDBGMSG1(SCF_DBGFLAG_SYS,
+ "Invalid receive data length = 0x%08x",
+ scfcmdp->rbufleng);
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__,
+ "intr ", 8);
+ scfcmdp->stat0 = SCF_STAT0_RDATA_SUM;
+ strcpy(&sumerr_msg[0], "SRAM");
+ statep->resum_rcnt++;
+ goto CHECK_rxsum_start;
+ }
+
+ if (scfcmdp->rbufleng == 0) {
+ statep->resum_rcnt = 0;
+ goto CHECK_rxsum_start;
+ }
+ /* Check receive data division mode */
+ if ((scf_comtbl.scf_exec_cmd_id) &&
+ (scfcmdp->rbufleng > scf_rxbuff_max_size)) {
+ scf_comtbl.scf_rem_rxbuff_size =
+ scfcmdp->rbufleng - scf_rxbuff_max_size;
+ rxbuff_cnt = scf_rxbuff_max_size;
+ rxbuff_flag = 1;
+ } else {
+ rxbuff_cnt = scfcmdp->rbufleng;
+ rxbuff_flag = 0;
+ }
+
+ /* Receive data copy */
+ wk_in_p = (uint8_t *)&statep->scf_sys_sram->DATA[0];
+ wk_out_p = (uint8_t *)&scfcmdp->rbuf[0];
+ for (ii = 0; ii < rxbuff_cnt;
+ ii++, wk_in_p++, wk_out_p++) {
+ *wk_out_p = SCF_DDI_GET8(statep,
+ statep->scf_sys_sram_handle, wk_in_p);
+ }
+
+ /* SRAM trace */
+ if (rxbuff_cnt > scf_sram_trace_data_size) {
+ rcount = scf_sram_trace_data_size;
+ } else {
+ rcount = rxbuff_cnt;
+ }
+ wk_in_p = (uint8_t *)scfcmdp->rbuf;
+ info_size = sizeof (statep->memo_scf_drvtrc.INFO);
+ while (rcount != 0) {
+ bzero((void *)&statep->memo_scf_drvtrc.INFO[0],
+ info_size);
+ wk_out_p = &statep->memo_scf_drvtrc.INFO[0];
+ if (rcount > info_size) {
+ wkleng = info_size;
+ } else {
+ wkleng = rcount;
+ }
+ rcount -= wkleng;
+ bcopy(wk_in_p, wk_out_p, wkleng);
+ SCF_SRAM_TRACE(statep, DTC_RECVDATA_SRAM);
+ wk_in_p += wkleng;
+ }
+
+ /* Check receive data division mode */
+ if (rxbuff_flag != 0) {
+ goto CHECK_rxsum_start;
+ }
+
+ /* Make SRAM data sum */
+ sum4 = SCF_MAGICNUMBER_L;
+ wkleng2 = scfcmdp->rbufleng;
+ wkleng = scfcmdp->rbufleng / 4;
+ wk_in_p32 = (void *)scfcmdp->rbuf;
+ for (ii = 0; ii < wkleng; ii++, wk_in_p32++) {
+ sum4 += *wk_in_p32;
+ }
+ if ((wkleng2 % 4) == 3) {
+ sum4 += ((scfcmdp->rbuf[wkleng2 - 3] << 24) |
+ (scfcmdp->rbuf[wkleng2 - 2] << 16) |
+ (scfcmdp->rbuf[wkleng2 - 1] << 8));
+
+ } else if ((wkleng2 % 4) == 2) {
+ sum4 += ((scfcmdp->rbuf[wkleng2 - 2] << 24) |
+ (scfcmdp->rbuf[wkleng2 - 1] << 16));
+ } else if ((wkleng2 % 4) == 1) {
+ sum4 += (scfcmdp->rbuf[wkleng2 - 1] << 24);
+ }
+
+ SCF_DBG_MAKE_RXSUM_L(sum4, statep->reg_rdata[2]);
+
+ /* Check SRAM data sum */
+ if (sum4 == statep->reg_rdata[2]) {
+ statep->resum_rcnt = 0;
+ } else {
+ SCFDBGMSG2(SCF_DBGFLAG_SYS,
+ "Rx sum failure 0x%08x 0x%08x",
+ sum4, statep->reg_rdata[2]);
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR,
+ __LINE__, "intr ", 8);
+ scfcmdp->stat0 = SCF_STAT0_RDATA_SUM;
+ strcpy(&sumerr_msg[0], "SRAM");
+ statep->resum_rcnt++;
+ }
+ } else {
+ if ((scfcmdp->flag == SCF_USE_SSBUF) ||
+ (scfcmdp->flag == SCF_USE_LSBUF)) {
+ /*
+ * SCF_USE_SSBUF/SCF_USE_LSBUF
+ */
+ if (scfcmdp->rcount < SCF_S_CNT_16) {
+ wkleng = scfcmdp->rcount;
+ } else {
+ wkleng = SCF_S_CNT_16;
+ }
+ scfcmdp->rbufleng = wkleng;
+ if (wkleng != 0) {
+ /* Receive data copy */
+ bcopy((void *)&statep->reg_rdata[0],
+ (void *)scfcmdp->rbuf, wkleng);
+ }
+ } else {
+ /*
+ * SCF_USE_S_BUF/SCF_USE_L_BUF
+ */
+ scfcmdp->rbufleng = 0;
+ }
+ statep->resum_rcnt = 0;
+ }
+
+/*
+ * CHECK_rxsum_start
+ */
+ CHECK_rxsum_start:
+
+ /* Check Rx sum re-try out */
+ if ((scfcmdp->stat0 == SCF_STAT0_RDATA_SUM) &&
+ (statep->resum_rcnt > scf_resum_rcnt)) {
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_RSUMERR);
+
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__,
+ "intr ", 8);
+ cmn_err(CE_WARN,
+ "%s,Failed the receive data SUM of %s. "
+ "SCF command = 0x%02x%02x\n",
+ &statep->pathname[0], &sumerr_msg[0],
+ scfcmdp->subcmd, scfcmdp->cmd);
+ statep->scf_herr |= HERR_RESUM;
+ ret = 1;
+ goto END_intr_cmdcmp;
+ }
+ break;
+
+ case INTERFACE:
+ /* Interface error */
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__, "intr ", 8);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_INTERFACE);
+
+ statep->tesum_rcnt++;
+ /* Check interface error re-try out */
+ if (statep->tesum_rcnt > scf_tesum_rcnt) {
+ cmn_err(CE_WARN,
+ "%s,Detected the interface error by XSCF. "
+ "SCF command = 0x%02x%02x\n",
+ &statep->pathname[0], scfcmdp->subcmd,
+ scfcmdp->cmd);
+ statep->scf_herr |= HERR_TESUM;
+ ret = 1;
+ goto END_intr_cmdcmp;
+ }
+ break;
+
+ case BUF_FUL:
+ /* Buff full */
+ SC_DBG_DRV_TRACE(TC_INTR|TC_MSG, __LINE__, "intr ", 8);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_RCI_BUF_FUL);
+
+ break;
+
+ case RCI_BUSY:
+ /* RCI busy */
+ SC_DBG_DRV_TRACE(TC_INTR|TC_MSG, __LINE__, "intr ", 8);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_RCI_BUSY);
+
+ break;
+
+ case E_NOT_SUPPORT:
+ /* Not support command/sub command */
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__, "intr ", 8);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_E_NOT_SUPPORT);
+
+ cmn_err(CE_WARN,
+ "%s,Detected the not support command by XSCF. "
+ "SCF command = 0x%02x%02x\n",
+ &statep->pathname[0], scfcmdp->subcmd,
+ scfcmdp->cmd);
+ break;
+
+ case E_PARAM:
+ /* Parameter error */
+
+ /* Check command is SB configuration change */
+ if ((scfcmdp->cmd == CMD_DR) && (scfcmdp->subcmd ==
+ SUB_SB_CONF_CHG)) {
+ scfcmdp->rbufleng = SCF_S_CNT_16;
+ /* Receive data copy */
+ if (scfcmdp->rcount < SCF_S_CNT_16) {
+ wkleng = scfcmdp->rcount;
+ } else {
+ wkleng = SCF_S_CNT_16;
+ }
+ if (wkleng != 0) {
+ bcopy((void *)&statep->reg_rdata[0],
+ (void *)scfcmdp->rbuf, wkleng);
+ }
+ } else {
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__,
+ "intr ", 8);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_E_PARAM);
+
+ cmn_err(CE_WARN,
+ "%s,Detected the invalid parameter by XSCF. "
+ "SCF command = 0x%02x%02x\n",
+ &statep->pathname[0], scfcmdp->subcmd,
+ scfcmdp->cmd);
+ }
+ break;
+
+ case E_RCI_ACCESS:
+ /* RCI access error */
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__, "intr ", 8);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_E_RCI_ACCESS);
+
+ cmn_err(CE_WARN,
+ "%s,RCI access error occurred in XSCF. "
+ "SCF command = 0x%02x%02x\n",
+ &statep->pathname[0], scfcmdp->subcmd,
+ scfcmdp->cmd);
+ break;
+
+ case E_SCFC_NOPATH:
+ /* No SCFC path */
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__, "intr ", 8);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_E_SCFC_PATH);
+
+ if (scf_comtbl.scf_exec_p) {
+ scf_chg_scf(scf_comtbl.scf_exec_p, PATH_STAT_ACTIVE);
+ scf_comtbl.scf_path_p = scf_comtbl.scf_exec_p;
+ scf_comtbl.scf_exec_p = 0;
+ }
+ scf_comtbl.scf_pchg_event_sub = EVENT_SUB_PCHG_WAIT;
+ break;
+
+ case RCI_NS:
+ /* Not support RCI */
+ SC_DBG_DRV_TRACE(TC_INTR|TC_MSG, __LINE__, "intr ", 8);
+ cmn_err(CE_WARN,
+ "%s,Cannot use RCI interface. "
+ "SCF command = 0x%02x%02x\n",
+ &statep->pathname[0], scfcmdp->subcmd,
+ scfcmdp->cmd);
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__, "intr ", 8);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_ERRRTN | scfcmdp->stat0);
+
+ cmn_err(CE_WARN,
+ "%s,Invalid status value was notified from XSCF. "
+ "SCF command = 0x%02x%02x, Status value = 0x%04x\n",
+ &statep->pathname[0], scfcmdp->subcmd,
+ scfcmdp->cmd, scfcmdp->status);
+ scfcmdp->stat0 = E_NOT_SUPPORT;
+ break;
+ }
+
+ /* Check SCF command start for interrupt */
+ if (scf_comtbl.scf_exec_cmd_id == 0) {
+ /* SCF command start for interrupt processing */
+ scf_intr_cmdcmp_driver(statep, scfcmdp);
+ } else {
+ /* Check ioctl command end wait */
+ if (scf_comtbl.cmd_end_wait != 0) {
+ /* Check command return value */
+ switch (scfcmdp->stat0) {
+ case NORMAL_END:
+ case BUF_FUL:
+ case RCI_BUSY:
+ case E_NOT_SUPPORT:
+ case E_PARAM:
+ case E_RCI_ACCESS:
+ case RCI_NS:
+ if ((scfcmdp->stat0 == NORMAL_END) &&
+ (scfcmdp->cmd == CMD_ALIVE_CHECK)) {
+ if (scfcmdp->subcmd ==
+ SUB_ALIVE_START) {
+ scf_alivecheck_start(statep);
+ } else {
+ scf_alivecheck_stop(statep);
+ }
+ }
+ if ((scfcmdp->stat0 == NORMAL_END) &&
+ (rxbuff_flag)) {
+ break;
+ }
+
+ scf_comtbl.cmd_end_wait = 0;
+ /* Signal to command end wait */
+ cv_signal(&scf_comtbl.cmdend_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &scf_comtbl.cmdend_cv,
+ sizeof (kcondvar_t));
+ break;
+
+ default:
+ /* INTERFACE */
+ /* E_SCFC_NOPATH */
+ /* Rx DATA SUM ERROR */
+
+ /* Set command retry send flag */
+ scf_comtbl.scf_cmd_resend_req |= RESEND_IOCTL;
+ break;
+ }
+ }
+ }
+
+ /* Check receive data division mode */
+ if (rxbuff_flag == 1) {
+ /* Next receive data timer start */
+ scf_timer_start(SCF_TIMERCD_NEXTRECV);
+ scf_comtbl.scf_cmd_exec_flag = 1;
+ }
+
+/*
+ * END_intr_cmdcmp
+ */
+ END_intr_cmdcmp:
+
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_intr_cmdcmp_driver()
+ *
+ * Description: SCF command complete. start for interrupt processing.
+ *
+ */
+void
+scf_intr_cmdcmp_driver(scf_state_t *statep, struct scf_cmd *scfcmdp)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_intr_cmdcmp_driver() "
+ int shutdown_flag = 0;
+ int poff_flag;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ /* Check command code */
+ switch (scfcmdp->cmd) {
+ case CMD_SCFI_PATH: /* SCF Path change command */
+ /* Check command return value */
+ if (scfcmdp->stat0 != NORMAL_END) {
+ scf_comtbl.scf_pchg_event_sub = EVENT_SUB_PCHG_WAIT;
+
+ /* Set command retry send flag */
+ scf_comtbl.scf_cmd_resend_req |= RESEND_PCHG;
+ break;
+ }
+
+ /* Check SCF path change status */
+ if (scf_comtbl.scf_path_p != NULL) {
+ scf_chg_scf(scf_comtbl.scf_path_p, PATH_STAT_ACTIVE);
+ scf_comtbl.scf_exec_p = scf_comtbl.scf_path_p;
+ scf_comtbl.scf_path_p = NULL;
+ /* FMEMA interface */
+ scf_avail_cmd_reg_vaddr =
+ (caddr_t)&statep->scf_regs->COMMAND;
+
+ scf_comtbl.path_change_rcnt = 0;
+ }
+
+ /* Check Alive check exec */
+ if (scf_comtbl.alive_running == SCF_ALIVE_START) {
+ scf_comtbl.scf_alive_event_sub = EVENT_SUB_ALST_WAIT;
+ } else {
+ scf_comtbl.scf_alive_event_sub = EVENT_SUB_NONE;
+ if (scf_comtbl.suspend_wait == 1) {
+ scf_comtbl.suspend_wait = 0;
+ scf_comtbl.scf_suspend_sendstop = 1;
+ cv_signal(&scf_comtbl.suspend_wait_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &scf_comtbl.suspend_wait_cv,
+ sizeof (kcondvar_t));
+ }
+ }
+ scf_comtbl.scf_pchg_event_sub = EVENT_SUB_NONE;
+
+ scf_comtbl.scf_event_flag |= STATUS_EVENT;
+
+ SCF_DBG_NO_INT_REASON;
+
+ /* DSCP interface start */
+ scf_dscp_start(FACTOR_PATH_CHG);
+ break;
+
+ case CMD_PART_POW_CTR: /* power control command */
+ /* Check command return value */
+ if (scfcmdp->stat0 != NORMAL_END) {
+ switch (scfcmdp->stat0) {
+ case BUF_FUL:
+ case RCI_BUSY:
+ case E_NOT_SUPPORT:
+ case E_PARAM:
+ case E_RCI_ACCESS:
+ case RCI_NS:
+ scf_comtbl.scf_poff_event_sub = EVENT_SUB_NONE;
+ break;
+
+ default:
+ /* INTERFACE */
+ /* E_SCFC_NOPATH */
+ /* Rx DATA SUM ERROR */
+ scf_comtbl.scf_poff_event_sub =
+ EVENT_SUB_POFF_WAIT;
+
+ /* Set command retry send flag */
+ scf_comtbl.scf_cmd_resend_req |= RESEND_POFF;
+ break;
+ }
+ break;
+ }
+
+ poff_flag = 0;
+ scf_comtbl.scf_poff_id =
+ (uchar_t)((statep->reg_rdata[0] &
+ 0xFF000000) >> 24);
+
+ SCF_DBG_TEST_INTR_POFF;
+
+ if (scf_comtbl.scf_poff_id == POFF_ID_PANEL) {
+ /* PANEL */
+ if ((scf_comtbl.scf_mode_sw & STATUS_SECURE_MODE) !=
+ STATUS_MODE_LOCK) {
+ /* Not LOCK */
+ SC_DBG_DRV_TRACE(TC_INTR, __LINE__,
+ "intr ", 8);
+ if (scf_comtbl.scf_shutdownreason == 0) {
+ poff_flag = 1;
+ scf_comtbl.scf_shutdownreason =
+ REASON_XSCFPOFF;
+ }
+ cmn_err(CE_NOTE,
+ "%s: System shutdown by panel "
+ "request.\n", scf_driver_name);
+ }
+ } else if ((scf_comtbl.scf_poff_id & POFF_ID_MASK) ==
+ POFF_ID_RCI) {
+ /* RCI */
+ SC_DBG_DRV_TRACE(TC_INTR, __LINE__, "intr ", 8);
+ if (scf_comtbl.scf_shutdownreason == 0) {
+ poff_flag = 1;
+ scf_comtbl.scf_shutdownreason = REASON_RCIPOFF;
+ }
+ cmn_err(CE_NOTE,
+ "%s: System shutdown from RCI.\n",
+ scf_driver_name);
+ } else if (scf_comtbl.scf_poff_id == POFF_ID_XSCF) {
+ /* XSCF */
+ SC_DBG_DRV_TRACE(TC_INTR, __LINE__, "intr ", 8);
+ if (scf_comtbl.scf_shutdownreason == 0) {
+ poff_flag = 1;
+ scf_comtbl.scf_shutdownreason = REASON_XSCFPOFF;
+ }
+ cmn_err(CE_NOTE,
+ "%s: System shutdown by XSCF "
+ "request.\n", scf_driver_name);
+ }
+
+ if (poff_flag) {
+ cmn_err(CE_CONT,
+ "%s: Shutdown was executed.\n",
+ scf_driver_name);
+ /* System shutdown start */
+ do_shutdown();
+ }
+
+ scf_comtbl.scf_poff_event_sub = EVENT_SUB_NONE;
+ break;
+
+ case CMD_INT_REASON: /* Event information command */
+ /* Check command return value */
+ if ((scfcmdp->stat0 != NORMAL_END) &&
+ (scfcmdp->stat0 != SCF_STAT0_RDATA_SUM)) {
+ switch (scfcmdp->stat0) {
+ case BUF_FUL:
+ case RCI_BUSY:
+ case E_NOT_SUPPORT:
+ case E_PARAM:
+ case E_RCI_ACCESS:
+ case RCI_NS:
+ scf_comtbl.scf_shut_event_sub = EVENT_SUB_NONE;
+ break;
+
+ default:
+ /* INTERFACE */
+ /* E_SCFC_NORATH */
+ if (scf_comtbl.scf_shut_event_sub ==
+ EVENT_SUB_SHUT_EXEC) {
+ scf_comtbl.scf_shut_event_sub =
+ EVENT_SUB_SHUT_WAIT;
+ } else {
+ scf_comtbl.scf_shut_event_sub =
+ EVENT_SUB_WAIT;
+ }
+
+ /* Set command retry send flag */
+ scf_comtbl.scf_cmd_resend_req |= RESEND_SHUT;
+ break;
+ }
+ break;
+ }
+
+ /* Check factor detail disp */
+ if ((scfcmdp->stat0 == SCF_STAT0_RDATA_SUM) &&
+ (scfcmdp->subcmd == SUB_INT_REASON_DISP)) {
+ /* Send detail re-disp */
+ scf_comtbl.int_reason_retry = 1;
+ if (scf_comtbl.scf_shut_event_sub ==
+ EVENT_SUB_SHUT_EXEC) {
+ scf_comtbl.scf_shut_event_sub =
+ EVENT_SUB_SHUT_WAIT;
+ } else {
+ scf_comtbl.scf_shut_event_sub = EVENT_SUB_WAIT;
+ }
+ break;
+ }
+
+ if (scfcmdp->stat0 == NORMAL_END) {
+
+ SCF_DBG_TEST_DSENS(scfcmdp, (void *)scfcmdp->rbuf,
+ (int)scfcmdp->rbufleng);
+
+ scf_comtbl.int_reason_retry = 0;
+ /*
+ * Event interrupt factor check
+ * processing
+ */
+ shutdown_flag = scf_intr_dsens(scfcmdp,
+ (void *)scfcmdp->rbuf, (int)scfcmdp->rbufleng);
+ } else {
+ if (scf_comtbl.scf_shut_event_sub ==
+ EVENT_SUB_SHUT_EXEC) {
+ shutdown_flag = DEV_SENSE_SHUTDOWN;
+ }
+ }
+ if (shutdown_flag == DEV_SENSE_SHUTDOWN) {
+ cmn_err(CE_CONT,
+ "%s: Shutdown was executed.\n",
+ scf_driver_name);
+ /* System shutdown start */
+ do_shutdown();
+ }
+ scf_comtbl.scf_shut_event_sub = EVENT_SUB_NONE;
+ break;
+
+ case CMD_ALIVE_CHECK: /* Alive check command */
+ /* Check command return value */
+ switch (scfcmdp->stat0) {
+ case NORMAL_END:
+ case BUF_FUL:
+ case RCI_BUSY:
+ case E_NOT_SUPPORT:
+ case E_PARAM:
+ case E_SCFC_NOPATH:
+ case E_RCI_ACCESS:
+ case RCI_NS:
+ if (scfcmdp->stat0 == NORMAL_END) {
+ if (scfcmdp->subcmd == SUB_ALIVE_START) {
+ scf_alivecheck_start(statep);
+ } else {
+ scf_alivecheck_stop(statep);
+ }
+ }
+
+ if ((scf_comtbl.scf_alive_event_sub ==
+ EVENT_SUB_ALSP_EXEC) &&
+ (scf_comtbl.suspend_wait)) {
+ /* Signal to suspend wait */
+ scf_comtbl.suspend_wait = 0;
+ scf_comtbl.scf_suspend_sendstop = 1;
+ cv_signal(&scf_comtbl.suspend_wait_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &scf_comtbl.suspend_wait_cv,
+ sizeof (kcondvar_t));
+ }
+ if ((scf_comtbl.scf_alive_event_sub ==
+ EVENT_SUB_ALST_EXEC) ||
+ (scf_comtbl.scf_alive_event_sub ==
+ EVENT_SUB_ALSP_EXEC)) {
+ scf_comtbl.scf_alive_event_sub = EVENT_SUB_NONE;
+ }
+ break;
+
+ default:
+ /* INTERFACE */
+ /* Rx DATA SUM ERROR */
+ if (scf_comtbl.alive_running == SCF_ALIVE_START) {
+ scf_comtbl.scf_alive_event_sub =
+ EVENT_SUB_ALST_WAIT;
+ } else {
+ scf_comtbl.scf_alive_event_sub =
+ EVENT_SUB_ALSP_WAIT;
+ }
+
+ /* Set command retry send flag */
+ if ((scfcmdp->subcmd == SUB_ALIVE_START) &&
+ (scf_comtbl.scf_alive_event_sub ==
+ EVENT_SUB_ALST_WAIT)) {
+ scf_comtbl.scf_cmd_resend_req |= RESEND_ALST;
+ } else if ((scfcmdp->subcmd == SUB_ALIVE_STOP) &&
+ (scf_comtbl.scf_alive_event_sub ==
+ EVENT_SUB_ALSP_WAIT)) {
+ scf_comtbl.scf_cmd_resend_req |= RESEND_ALSP;
+ }
+ break;
+ }
+ break;
+
+ case CMD_REPORT: /* Report command */
+ /* Check command return value */
+ switch (scfcmdp->stat0) {
+ case NORMAL_END:
+ case BUF_FUL:
+ case RCI_BUSY:
+ case E_NOT_SUPPORT:
+ case E_PARAM:
+ case E_SCFC_NOPATH:
+ case E_RCI_ACCESS:
+ case RCI_NS:
+ if ((scfcmdp->stat0 == NORMAL_END) &&
+ (scf_comtbl.alive_running == SCF_ALIVE_START)) {
+ /* Check Alive check exec */
+ scf_comtbl.scf_alive_event_sub =
+ EVENT_SUB_ALST_WAIT;
+ }
+
+ if ((scf_comtbl.scf_report_event_sub ==
+ EVENT_SUB_REPORT_RUN_EXEC) ||
+ (scf_comtbl.scf_report_event_sub ==
+ EVENT_SUB_REPORT_SHOT_EXEC)) {
+ scf_comtbl.scf_report_event_sub =
+ EVENT_SUB_NONE;
+ }
+
+ if (scfcmdp->stat0 == BUF_FUL) {
+ if (scf_comtbl.report_buf_ful_rcnt !=
+ scf_buf_ful_rcnt) {
+ scf_comtbl.report_buf_ful_rcnt++;
+ scf_timer_start(SCF_TIMERCD_BUF_FUL);
+ } else {
+ scf_comtbl.report_buf_ful_rcnt = 0;
+ }
+ } else {
+ scf_comtbl.report_buf_ful_rcnt = 0;
+ }
+
+ if (scfcmdp->stat0 == RCI_BUSY) {
+ if (scf_comtbl.report_rci_busy_rcnt !=
+ scf_rci_busy_rcnt) {
+ scf_comtbl.report_rci_busy_rcnt++;
+ scf_timer_start(SCF_TIMERCD_RCI_BUSY);
+ } else {
+ scf_comtbl.report_rci_busy_rcnt = 0;
+ }
+ } else {
+ scf_comtbl.report_rci_busy_rcnt = 0;
+ }
+ break;
+
+ default:
+ /* INTERFACE */
+ /* Rx DATA SUM ERROR */
+ if (scf_comtbl.scf_report_event_sub ==
+ EVENT_SUB_REPORT_RUN_EXEC) {
+ scf_comtbl.scf_report_event_sub =
+ EVENT_SUB_REPORT_RUN_WAIT;
+
+ /* Set command retry send flag */
+ scf_comtbl.scf_cmd_resend_req |=
+ RESEND_REPORT_RUN;
+ } else {
+ scf_comtbl.scf_report_event_sub =
+ EVENT_SUB_REPORT_SHUT_WAIT;
+
+ /* Set command retry send flag */
+ scf_comtbl.scf_cmd_resend_req |=
+ RESEND_REPORT_SHUT;
+ }
+ break;
+ }
+ break;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_intr_dsens()
+ *
+ * Description: Event factor check processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_intr_dsens(struct scf_cmd *scfcmdp, scf_int_reason_t *int_rp, int len)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_intr_dsens() "
+ scf_int_reason_t *rbuf;
+ uchar_t category_code;
+ uchar_t sub_status;
+ uchar_t category_type;
+ time_t timestamp;
+ int ret = 0;
+ int ent = 0;
+ int getevent_flag;
+ int max_ent;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ rbuf = int_rp;
+ max_ent = len / SCF_INT_REASON_SIZE;
+ if (max_ent > 4) {
+ max_ent = 4;
+ }
+
+ /* entry count loop */
+ while ((rbuf->b[4]) && (ent < max_ent)) {
+ /* Save last event */
+ bcopy((void *)&rbuf->b[0], (void *)&scf_comtbl.last_event[0],
+ SCF_INT_REASON_SIZE);
+
+ /* Check SCFIOCEVENTLIST */
+ getevent_flag = scf_push_getevent(&rbuf->b[0]);
+ if (getevent_flag == 0) {
+ /* wake up waiting thread */
+ cv_signal(&scf_comtbl.getevent_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &scf_comtbl.getevent_cv, sizeof (kcondvar_t));
+ }
+
+ /* get category code of the sense */
+ category_code = rbuf->b[4] & (~DEV_SENSE_SHUTDOWN);
+ sub_status = rbuf->b[4];
+ category_type = rbuf->b[6];
+ SC_DBG_DRV_TRACE(TC_DSENS, __LINE__, &rbuf->b[0], 8);
+ SC_DBG_DRV_TRACE(TC_DSENS, __LINE__, &rbuf->b[8], 8);
+ SCFDBGMSG4(SCF_DBGFLAG_REG,
+ "SENSE = 0x%08x 0x%08x 0x%08x 0x%08x",
+ rbuf->four_bytes_access[0], rbuf->four_bytes_access[1],
+ rbuf->four_bytes_access[2], rbuf->four_bytes_access[3]);
+
+ switch (category_code) {
+ case DEV_SENSE_FANUNIT:
+ /* fan unit failure */
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__,
+ "intr_dse", 8);
+
+ if (category_type == DEV_SENSE_ATTR_OWN) {
+ cmn_err(CE_WARN,
+ "%s: fan unit failure"
+ ", sub status = 0x%02x\n",
+ scf_driver_name, sub_status);
+ if ((sub_status & DEV_SENSE_SHUTDOWN) &&
+ (scf_comtbl.scf_shutdownreason == 0)) {
+ scf_comtbl.scf_shutdownreason =
+ REASON_SHUTDOWN_FAN;
+ ret = DEV_SENSE_SHUTDOWN;
+ }
+ } else {
+ cmn_err(CE_WARN,
+ "%s: fan unit failure on "
+ "RCI(addr = 0x%08x)"
+ ", FAN#%d, sub status = 0x%02x,\n"
+ "sense info ="
+ " 0x%02x 0x%02x 0x%02x 0x%02x"
+ " 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ scf_driver_name,
+ rbuf->four_bytes_access[0],
+ rbuf->b[9], sub_status,
+ rbuf->b[0], rbuf->b[1],
+ rbuf->b[2], rbuf->b[3],
+ rbuf->b[8], rbuf->b[9],
+ rbuf->b[10], rbuf->b[11]);
+ }
+ break;
+
+ case DEV_SENSE_PWRUNIT:
+ /* power unit failure */
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__,
+ "intr_dse", 8);
+
+ if (category_type == DEV_SENSE_ATTR_OWN) {
+ cmn_err(CE_WARN,
+ "%s: power supply unit failure"
+ ", sub status = 0x%02x\n",
+ scf_driver_name, sub_status);
+ if ((sub_status & DEV_SENSE_SHUTDOWN) &&
+ (scf_comtbl.scf_shutdownreason == 0)) {
+ scf_comtbl.scf_shutdownreason =
+ REASON_SHUTDOWN_PSU;
+ ret = DEV_SENSE_SHUTDOWN;
+ }
+ } else {
+ cmn_err(CE_WARN,
+ "%s: power supply unit failure on "
+ "RCI(addr = 0x%08x)"
+ ", FEP#%d, sub status = 0x%02x,\n"
+ "sense info ="
+ " 0x%02x 0x%02x 0x%02x 0x%02x"
+ " 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ scf_driver_name,
+ rbuf->four_bytes_access[0],
+ rbuf->b[11],
+ sub_status,
+ rbuf->b[0], rbuf->b[1],
+ rbuf->b[2], rbuf->b[3],
+ rbuf->b[8], rbuf->b[9],
+ rbuf->b[10], rbuf->b[11]);
+ }
+ break;
+
+ case DEV_SENSE_UPS:
+ /* UPS failure */
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__,
+ "intr_dse", 8);
+
+ if (category_type != DEV_SENSE_ATTR_OWN) {
+ break;
+ }
+
+ switch (rbuf->b[8] & DEV_SENSE_UPS_MASK) {
+ case DEV_SENSE_UPS_LOWBAT:
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__,
+ "intr_dse", 8);
+ cmn_err(CE_WARN,
+ "%s: UPS low battery"
+ " was detected, sub status = 0x%02x\n",
+ scf_driver_name, sub_status);
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR,
+ __LINE__, "intr_dse", 8);
+ cmn_err(CE_WARN,
+ "%s: UPS failure"
+ " was detected, sub status = 0x%02x\n",
+ scf_driver_name, sub_status);
+ break;
+ }
+ if ((sub_status & DEV_SENSE_SHUTDOWN) &&
+ (scf_comtbl.scf_shutdownreason == 0)) {
+ scf_comtbl.scf_shutdownreason =
+ REASON_SHUTDOWN_UPS;
+ ret = DEV_SENSE_SHUTDOWN;
+ }
+ break;
+
+ case DEV_SENSE_THERMAL:
+ /* thermal failure */
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__,
+ "intr_dse", 8);
+
+ if (category_type == DEV_SENSE_ATTR_OWN) {
+ cmn_err(CE_WARN,
+ "%s: thermal alarm"
+ ", sub status = 0x%02x\n",
+ scf_driver_name, sub_status);
+ if ((sub_status & DEV_SENSE_SHUTDOWN) &&
+ (scf_comtbl.scf_shutdownreason == 0)) {
+ scf_comtbl.scf_shutdownreason =
+ REASON_SHUTDOWN_THERMAL;
+ ret = DEV_SENSE_SHUTDOWN;
+ }
+ } else {
+ cmn_err(CE_WARN,
+ "%s: thermal alarm on "
+ "RCI(addr = 0x%08x)"
+ ", SENSOR#%d, sub status = 0x%02x,\n"
+ "sense info ="
+ " 0x%02x 0x%02x 0x%02x 0x%02x"
+ " 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ scf_driver_name,
+ rbuf->four_bytes_access[0],
+ rbuf->b[9], sub_status,
+ rbuf->b[0], rbuf->b[1],
+ rbuf->b[2], rbuf->b[3],
+ rbuf->b[8], rbuf->b[9],
+ rbuf->b[10], rbuf->b[11]);
+ }
+ break;
+
+ case DEV_SENSE_PWRSR:
+ /* power stop */
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__,
+ "intr_dse", 8);
+
+ if (category_type != DEV_SENSE_ATTR_OWN) {
+ break;
+ }
+
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__,
+ "intr_dse", 8);
+ cmn_err(CE_WARN,
+ "%s: Input power down was detected. "
+ "UPS is activated"
+ ", sub status = 0x%02x\n",
+ scf_driver_name, sub_status);
+ if (sub_status & DEV_SENSE_SHUTDOWN) {
+ if (scf_comtbl.shutdown_start_reported == 0) {
+ scf_comtbl.poff_factor =
+ SCF_POFF_FACTOR_PFAIL;
+ }
+ if (scf_comtbl.scf_shutdownreason == 0) {
+ scf_comtbl.scf_shutdownreason =
+ REASON_SHUTDOWN_UPS;
+ ret = DEV_SENSE_SHUTDOWN;
+ }
+ }
+ break;
+
+ case DEV_SENSE_NODE:
+ /* node error */
+ SC_DBG_DRV_TRACE(TC_INTR|TC_ERR, __LINE__,
+ "intr_dse", 8);
+
+ if (category_type == DEV_SENSE_ATTR_OWN) {
+ break;
+ }
+
+ cmn_err(CE_WARN,
+ "%s: node error on RCI(addr = 0x%08x)"
+ ", sub status = 0x%02x,\n"
+ "sense info ="
+ " 0x%02x 0x%02x 0x%02x 0x%02x"
+ " 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ scf_driver_name,
+ rbuf->four_bytes_access[0],
+ sub_status,
+ rbuf->b[0], rbuf->b[1],
+ rbuf->b[2], rbuf->b[3],
+ rbuf->b[8], rbuf->b[9],
+ rbuf->b[10], rbuf->b[11]);
+ if (rbuf->b[9] == DEV_SENSE_NODE_STCKTO) {
+ scf_comtbl.rcidown_event_flag = 1;
+ scf_comtbl.scfreport_rcidown.rci_addr =
+ rbuf->four_bytes_access[0];
+ scf_comtbl.scfreport_rcidown.report_sense[0] =
+ REPORT_STAT_RCIDWN;
+ scf_comtbl.scfreport_rcidown.report_sense[1] =
+ rbuf->b[9];
+ scf_comtbl.scfreport_rcidown.report_sense[2] =
+ rbuf->b[10];
+ scf_comtbl.scfreport_rcidown.report_sense[3] =
+ rbuf->b[11];
+ scf_comtbl.scfreport_rcidown.timestamp =
+ ddi_get_time();
+ /* wake up waiting thread */
+ cv_signal(&scf_comtbl.rsense_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &scf_comtbl.rsense_cv,
+ sizeof (kcondvar_t));
+ }
+ break;
+
+ case DEV_SENSE_SYS_REPORT:
+ /* system status report */
+ timestamp = ddi_get_time();
+ (void) scf_push_reportsense(rbuf->four_bytes_access[0],
+ &rbuf->b[8], timestamp);
+ /* wake up waiting thread */
+ cv_signal(&scf_comtbl.rsense_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &scf_comtbl.rsense_cv, sizeof (kcondvar_t));
+ break;
+
+ case DEV_SENSE_PANIC_REQ:
+ /* panic request */
+ cmn_err(CE_PANIC,
+ "%s: panic request from RCI(addr = 0x%08x)\n",
+ scf_driver_name,
+ rbuf->four_bytes_access[0]);
+ break;
+
+ case DEV_SENSE_IONODESTAT:
+ /* I/O node status */
+ if (category_type == DEV_SENSE_ATTR_OWN) {
+ break;
+ }
+
+ cmn_err(CE_NOTE,
+ "%s: I/O node status sense from "
+ "RCI(addr = 0x%08x), "
+ "sub status = 0x%02x,\n"
+ "sense info = 0x%02x 0x%02x 0x%02x 0x%02x"
+ " 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ scf_driver_name,
+ rbuf->four_bytes_access[0],
+ sub_status,
+ rbuf->b[0], rbuf->b[1],
+ rbuf->b[2], rbuf->b[3],
+ rbuf->b[8], rbuf->b[9],
+ rbuf->b[10], rbuf->b[11]);
+ break;
+
+ case DEV_SENSE_STATUS_RPT:
+ /* Deveice status print */
+ if (scf_comtbl.rdctrl_end_wait) {
+ /* rdctrl devsense? (for SCFIOCRDCTRL) */
+ /* keep devsense info */
+ scf_comtbl.rdctrl_sense_category_code =
+ category_code;
+ bcopy((void *)&rbuf->b[0],
+ (void *)&scf_comtbl.rdctrl_sense[0],
+ SCF_INT_REASON_SIZE);
+ cv_signal(&scf_comtbl.rdcsense_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &scf_comtbl.rdcsense_cv,
+ sizeof (kcondvar_t));
+ }
+ break;
+
+ default:
+ /* Devive status print */
+ if (((category_code & 0xf0) == DEV_SENSE_RCI_PATH40) &&
+ (scf_comtbl.rdctrl_end_wait)) {
+ /* rdctrl devsense (for SCFIOCRDCTRL) */
+ /* keep devsense info */
+ scf_comtbl.rdctrl_sense_category_code =
+ category_code;
+ bcopy((void *)&rbuf->b[0],
+ (void *)&scf_comtbl.rdctrl_sense[0],
+ SCF_INT_REASON_SIZE);
+ cv_signal(&scf_comtbl.rdcsense_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &scf_comtbl.rdcsense_cv,
+ sizeof (kcondvar_t));
+ }
+ break;
+
+ }
+
+/*
+ * NEXT_intr_dsens
+ */
+ NEXT_intr_dsens:
+
+ rbuf = (void *)((char *)rbuf + SCF_INT_REASON_SIZE);
+ ent++;
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_status_change()
+ *
+ * Description: SCF status change processing.
+ *
+ */
+void
+scf_status_change(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_status_change() "
+ uint8_t scf_unit;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ /* Check SCF status */
+ if ((statep->reg_status_exr & STATUS_SCF_STATUS) == STATUS_SCF_ONLINE) {
+ /*
+ * SCF online
+ */
+ if (scf_comtbl.scf_status == SCF_STATUS_OFFLINE) {
+ cmn_err(CE_NOTE, "%s: SCF online.\n", scf_driver_name);
+ }
+ scf_comtbl.scf_status = SCF_STATUS_ONLINE;
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_ONLINE);
+
+ /* Check online wait timer exec */
+ if (scf_timer_check(SCF_TIMERCD_ONLINE) == SCF_TIMER_NOT_EXEC) {
+ /* DCSP interface re-start */
+ scf_dscp_stop(FACTOR_OFFLINE);
+ scf_dscp_start(FACTOR_ONLINE);
+ } else {
+ /* DCSP interface start */
+ scf_dscp_start(FACTOR_ONLINE);
+
+ /* SCF online timer stop */
+ scf_timer_stop(SCF_TIMERCD_ONLINE);
+
+ /* Check SCF command exec */
+ if (scf_comtbl.scf_cmd_exec_flag) {
+ /* Set command wait status */
+ scf_cmdwait_status_set();
+ scf_comtbl.scf_cmd_exec_flag = 0;
+ }
+
+ /* Check Alive check exec */
+ if (scf_comtbl.alive_running == SCF_ALIVE_START) {
+ scf_comtbl.scf_alive_event_sub =
+ EVENT_SUB_ALST_WAIT;
+ }
+ }
+ } else {
+ /*
+ * SCF offline
+ */
+ if (scf_comtbl.scf_status != SCF_STATUS_OFFLINE) {
+ if (statep->reg_status_exr & STATUS_SCF_NO) {
+ scf_unit = 1;
+ } else {
+ scf_unit = 0;
+ }
+ cmn_err(CE_WARN,
+ "%s: SCF went to offline mode. unit=%d",
+ scf_driver_name, scf_unit);
+ }
+ scf_comtbl.scf_status = SCF_STATUS_OFFLINE;
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_OFFLINE);
+
+ /* Check online wait timer exec */
+ if (scf_timer_check(SCF_TIMERCD_ONLINE) == SCF_TIMER_NOT_EXEC) {
+ /* DCSP interface stop */
+ scf_dscp_stop(FACTOR_OFFLINE);
+
+ statep->online_to_rcnt = 0;
+ /* SCF online timer start */
+ scf_timer_start(SCF_TIMERCD_ONLINE);
+ }
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_next_cmd_check()
+ *
+ * Description: Next command send and check processing.
+ *
+ */
+void
+scf_next_cmd_check(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_next_cmd_check() "
+ struct scf_cmd *scfcmdp = &scf_comtbl.scf_cmd_intr;
+ int offline_ret;
+ int cmdbusy_ret;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ /* Check suspend send stop */
+ if (scf_comtbl.scf_suspend_sendstop) {
+ goto END_next_cmd_check;
+ }
+
+ if ((scf_comtbl.scf_path_p == NULL) &&
+ (scf_comtbl.scf_pchg_event_sub == EVENT_SUB_PCHG_WAIT)) {
+ scf_chg_scf(statep, PATH_STAT_ACTIVE);
+ scf_comtbl.scf_path_p = statep;
+ }
+
+ if (scf_comtbl.scf_cmd_exec_flag == 0) {
+ /* send comannd for interrupt */
+ offline_ret = scf_offline_check(statep, FLAG_ON);
+ cmdbusy_ret = scf_cmdbusy_check(statep);
+
+ if ((offline_ret != SCF_PATH_ONLINE) ||
+ (cmdbusy_ret != SCF_COMMAND_READY)) {
+ goto END_next_cmd_check;
+ }
+ }
+
+ /* Check SCF Path change request */
+ if ((scf_comtbl.scf_cmd_exec_flag == 0) &&
+ (scf_comtbl.scf_pchg_event_sub == EVENT_SUB_PCHG_WAIT)) {
+ /* Send SCF Path change command */
+ scfcmdp->cmd = CMD_SCFI_PATH;
+ scfcmdp->subcmd = SUB_CMD_PATH;
+ bzero((void *)&scf_comtbl.scf_sbuf[0], SCF_S_CNT_16);
+ scf_comtbl.scf_sbuf[0] = CMD_PATH_TYPE_SCFD;
+ scfcmdp->sbuf = &scf_comtbl.scf_sbuf[0];
+ scfcmdp->scount = SCF_S_CNT_15;
+ scfcmdp->rcount = 0;
+ scfcmdp->flag = SCF_USE_S_BUF;
+ if ((scf_comtbl.scf_cmd_resend_req & RESEND_PCHG) != 0) {
+ scf_comtbl.scf_cmd_resend_flag = 1;
+ scf_comtbl.scf_cmd_resend_req &= ~RESEND_PCHG;
+ }
+ scf_i_send_cmd(scfcmdp, statep);
+ scf_comtbl.scf_pchg_event_sub = EVENT_SUB_PCHG_EXEC;
+ }
+
+ if (scf_comtbl.scf_cmd_exec_flag == 0) {
+ /* Check shutdown event information request */
+ if ((scf_comtbl.scf_shut_event_sub == EVENT_SUB_NONE) &&
+ (scf_comtbl.scf_event_flag & STATUS_SHUTDOWN)) {
+ scf_comtbl.scf_event_flag &=
+ (~(STATUS_SHUTDOWN | STATUS_EVENT));
+ scf_comtbl.scf_shut_event_sub = EVENT_SUB_SHUT_WAIT;
+ }
+ if (scf_comtbl.scf_shut_event_sub == EVENT_SUB_SHUT_WAIT) {
+ /* Send event information command */
+ scfcmdp->cmd = CMD_INT_REASON;
+ if (scf_comtbl.int_reason_retry == 0) {
+ scfcmdp->subcmd = SUB_INT_REASON_DISP;
+ } else {
+ scfcmdp->subcmd = SUB_INT_REASON_RETRY;
+ }
+ scfcmdp->scount = 0;
+ scfcmdp->rbuf = &scf_comtbl.scf_rbuf[0];
+ scfcmdp->rcount = SCF_INT_CNT_MAX;
+ scfcmdp->flag = SCF_USE_SLBUF;
+ if ((scf_comtbl.scf_cmd_resend_req & RESEND_SHUT) !=
+ 0) {
+ scf_comtbl.scf_cmd_resend_flag = 1;
+ scf_comtbl.scf_cmd_resend_req &= ~RESEND_SHUT;
+ }
+ scf_i_send_cmd(scfcmdp, statep);
+ scf_comtbl.scf_shut_event_sub = EVENT_SUB_SHUT_EXEC;
+ }
+ }
+
+ if (scf_comtbl.scf_cmd_exec_flag == 0) {
+ /* Check power off factor request */
+ if ((scf_comtbl.scf_poff_event_sub == EVENT_SUB_NONE) &&
+ (scf_comtbl.scf_event_flag & STATUS_POFF)) {
+ scf_comtbl.scf_event_flag &= (~STATUS_POFF);
+ scf_comtbl.scf_poff_event_sub = EVENT_SUB_POFF_WAIT;
+ }
+ if (scf_comtbl.scf_poff_event_sub == EVENT_SUB_POFF_WAIT) {
+ /* Send power off factor command */
+ scfcmdp->cmd = CMD_PART_POW_CTR;
+ scfcmdp->subcmd = SUB_POFFID;
+ scfcmdp->scount = 0;
+ scfcmdp->rbuf = &scf_comtbl.scf_rbuf[0];
+ scfcmdp->rcount = SCF_S_CNT_15;
+ scfcmdp->flag = SCF_USE_SSBUF;
+ if ((scf_comtbl.scf_cmd_resend_req & RESEND_POFF) !=
+ 0) {
+ scf_comtbl.scf_cmd_resend_flag = 1;
+ scf_comtbl.scf_cmd_resend_req &= ~RESEND_POFF;
+ }
+ scf_i_send_cmd(scfcmdp, statep);
+ scf_comtbl.scf_poff_event_sub = EVENT_SUB_POFF_EXEC;
+ }
+ }
+
+ if (scf_comtbl.scf_cmd_exec_flag == 0) {
+ /* Check event information request */
+ if ((scf_comtbl.scf_shut_event_sub == EVENT_SUB_NONE) &&
+ (scf_comtbl.scf_event_flag & STATUS_EVENT)) {
+ scf_comtbl.scf_event_flag &= (~STATUS_EVENT);
+ scf_comtbl.scf_shut_event_sub = EVENT_SUB_WAIT;
+ }
+ if (scf_comtbl.scf_shut_event_sub == EVENT_SUB_WAIT) {
+ /* Send event information command */
+ scfcmdp->cmd = CMD_INT_REASON;
+ if (scf_comtbl.int_reason_retry == 0) {
+ scfcmdp->subcmd = SUB_INT_REASON_DISP;
+ } else {
+ scfcmdp->subcmd = SUB_INT_REASON_RETRY;
+ }
+ scfcmdp->scount = 0;
+ scfcmdp->rbuf = &scf_comtbl.scf_rbuf[0];
+ scfcmdp->rcount = SCF_INT_CNT_MAX;
+ scfcmdp->flag = SCF_USE_SLBUF;
+ if ((scf_comtbl.scf_cmd_resend_req & RESEND_SHUT) !=
+ 0) {
+ scf_comtbl.scf_cmd_resend_flag = 1;
+ scf_comtbl.scf_cmd_resend_req &= ~RESEND_SHUT;
+ }
+ scf_i_send_cmd(scfcmdp, statep);
+ scf_comtbl.scf_shut_event_sub = EVENT_SUB_EXEC;
+ }
+ }
+
+ if ((scf_comtbl.scf_cmd_exec_flag == 0) &&
+ ((scf_timer_check(SCF_TIMERCD_BUF_FUL) ==
+ SCF_TIMER_NOT_EXEC) &&
+ (scf_timer_check(SCF_TIMERCD_RCI_BUSY) ==
+ SCF_TIMER_NOT_EXEC))) {
+ /* Check report request */
+ if (scf_comtbl.scf_report_event_sub ==
+ EVENT_SUB_REPORT_RUN_WAIT) {
+ /* Send report(System running) command */
+ scfcmdp->cmd = CMD_REPORT;
+ scfcmdp->subcmd = SUB_SYSTEM_STATUS_RPT;
+ bzero((void *)&scf_comtbl.scf_sbuf[0], SCF_S_CNT_16);
+ scf_comtbl.scf_sbuf[0] = REPORT_STAT_SYSTEM_RUNNING;
+ scf_comtbl.scf_sbuf[1] = 0;
+ scf_comtbl.scf_sbuf[2] = 0;
+ scf_comtbl.scf_sbuf[3] = 0;
+ scfcmdp->sbuf = &scf_comtbl.scf_sbuf[0];
+ scfcmdp->scount = SCF_S_CNT_15;
+ scfcmdp->rcount = 0;
+ scfcmdp->flag = SCF_USE_S_BUF;
+ if ((scf_comtbl.scf_cmd_resend_req &
+ RESEND_REPORT_RUN) != 0) {
+ scf_comtbl.scf_cmd_resend_flag = 1;
+ scf_comtbl.scf_cmd_resend_req &=
+ ~RESEND_REPORT_RUN;
+ }
+ scf_i_send_cmd(scfcmdp, statep);
+ scf_comtbl.scf_report_event_sub =
+ EVENT_SUB_REPORT_RUN_EXEC;
+ scf_comtbl.scf_last_report = SCF_SYSTEM_RUNNING;
+ } else if (scf_comtbl.scf_report_event_sub ==
+ EVENT_SUB_REPORT_SHUT_WAIT) {
+ /* Send report(Shutdown start) command */
+ scfcmdp->cmd = CMD_REPORT;
+ scfcmdp->subcmd = SUB_SYSTEM_STATUS_RPT;
+ bzero((void *)&scf_comtbl.scf_sbuf[0],
+ SCF_S_CNT_16);
+ scf_comtbl.scf_sbuf[0] =
+ REPORT_STAT_SHUTDOWN_START;
+ scf_comtbl.scf_sbuf[1] =
+ scf_poff_factor[scf_comtbl.poff_factor][0];
+ scf_comtbl.scf_sbuf[2] =
+ scf_poff_factor[scf_comtbl.poff_factor][1];
+ scf_comtbl.scf_sbuf[3] =
+ scf_poff_factor[scf_comtbl.poff_factor][2];
+ scfcmdp->sbuf = &scf_comtbl.scf_sbuf[0];
+ scfcmdp->scount = SCF_S_CNT_15;
+ scfcmdp->rcount = 0;
+ scfcmdp->flag = SCF_USE_S_BUF;
+ if ((scf_comtbl.scf_cmd_resend_req &
+ RESEND_REPORT_SHUT) != 0) {
+ scf_comtbl.scf_cmd_resend_flag = 1;
+ scf_comtbl.scf_cmd_resend_req &=
+ ~RESEND_REPORT_SHUT;
+ }
+ scf_i_send_cmd(scfcmdp, statep);
+ scf_comtbl.scf_report_event_sub =
+ EVENT_SUB_REPORT_SHOT_EXEC;
+ scf_comtbl.scf_last_report = SCF_SHUTDOWN_START;
+ scf_comtbl.shutdown_start_reported = 1;
+ }
+ }
+
+ if (scf_comtbl.scf_cmd_exec_flag == 0) {
+ /* Check alive check request */
+ if (scf_comtbl.scf_alive_event_sub == EVENT_SUB_ALST_WAIT) {
+ /* Send alive check start command */
+ scfcmdp->cmd = CMD_ALIVE_CHECK;
+ scfcmdp->subcmd = SUB_ALIVE_START;
+ bzero((void *)&scf_comtbl.scf_sbuf[0], SCF_S_CNT_16);
+ scf_comtbl.scf_sbuf[0] = scf_alive_watch_code;
+ scf_comtbl.scf_sbuf[1] = scf_alive_phase_code;
+ scf_alive_phase_code++;
+ scf_comtbl.scf_sbuf[2] = scf_alive_interval_time;
+ scf_comtbl.scf_sbuf[3] = scf_alive_monitor_time;
+ scf_comtbl.scf_sbuf[4] =
+ (uchar_t)(scf_alive_panic_time >> 8);
+ scf_comtbl.scf_sbuf[5] =
+ (uchar_t)(scf_alive_panic_time);
+ scfcmdp->sbuf = &scf_comtbl.scf_sbuf[0];
+ scfcmdp->scount = SCF_S_CNT_15;
+ scfcmdp->rcount = 0;
+ scfcmdp->flag = SCF_USE_S_BUF;
+ if ((scf_comtbl.scf_cmd_resend_req & RESEND_ALST) !=
+ 0) {
+ scf_comtbl.scf_cmd_resend_flag = 1;
+ scf_comtbl.scf_cmd_resend_req &= ~RESEND_ALST;
+ }
+ scf_i_send_cmd(scfcmdp, statep);
+ scf_comtbl.scf_alive_event_sub = EVENT_SUB_ALST_EXEC;
+ } else if (scf_comtbl.scf_alive_event_sub ==
+ EVENT_SUB_ALSP_WAIT) {
+ /* Send alive check stop command */
+ scfcmdp->cmd = CMD_ALIVE_CHECK;
+ scfcmdp->subcmd = SUB_ALIVE_STOP;
+ scfcmdp->scount = 0;
+ scfcmdp->rcount = 0;
+ scfcmdp->flag = SCF_USE_S_BUF;
+ if ((scf_comtbl.scf_cmd_resend_req & RESEND_ALSP) !=
+ 0) {
+ scf_comtbl.scf_cmd_resend_flag = 1;
+ scf_comtbl.scf_cmd_resend_req &= ~RESEND_ALSP;
+ }
+ scf_i_send_cmd(scfcmdp, statep);
+ scf_comtbl.scf_alive_event_sub = EVENT_SUB_ALSP_EXEC;
+ }
+ }
+
+ /* Check send wait */
+ if ((scf_comtbl.scf_cmd_exec_flag == 0) && (scf_comtbl.cmd_end_wait)) {
+ if ((scf_comtbl.scf_cmd_resend_req & RESEND_IOCTL) != 0) {
+ scf_comtbl.scf_cmd_resend_flag = 1;
+ scf_comtbl.scf_cmd_resend_req &= ~RESEND_IOCTL;
+ }
+ scf_i_send_cmd(scf_comtbl.scf_cmdp, statep);
+
+ scf_comtbl.scf_exec_cmd_id = 1;
+ }
+
+ /* Signal to command wait */
+ if ((scf_comtbl.scf_cmd_exec_flag == 0) &&
+ (scf_comtbl.cmd_busy_wait != 0)) {
+ scf_comtbl.cmd_busy_wait = 0;
+ cv_signal(&scf_comtbl.cmdwait_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__, &scf_comtbl.cmdwait_cv,
+ sizeof (kcondvar_t));
+ }
+
+/*
+ * END_next_cmd_check
+ */
+ END_next_cmd_check:
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_next_rxdata_get()
+ *
+ * Description: Next receive data Read processing.
+ *
+ */
+void
+scf_next_rxdata_get(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_next_rxdata_get() "
+ scf_state_t *statep;
+ struct scf_cmd *scfcmdp;
+ uint32_t sum4;
+ uint8_t *wk_in_p;
+ uint8_t *wk_out_p;
+ uint32_t *wk_in_p32;
+ uint_t wk_leng;
+ uint_t rxbuff_cnt;
+ uint_t rxbuff_offset;
+ int path_ret;
+ int ii;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ /* Check command send exec */
+ if (scf_comtbl.scf_cmd_exec_flag == 0) {
+ goto END_next_rxdata_get;
+ }
+
+ /* Get SCF path status */
+ path_ret = scf_path_check(&statep);
+ /* Check SCF path status */
+ if (path_ret != SCF_PATH_ONLINE) {
+ goto END_next_rxdata_get;
+ }
+
+ /* Check remainder receive data size */
+ if (scf_comtbl.scf_rem_rxbuff_size == 0) {
+ goto END_next_rxdata_get;
+ }
+
+
+ scfcmdp = scf_comtbl.scf_cmdp;
+ rxbuff_offset = scfcmdp->rbufleng - scf_comtbl.scf_rem_rxbuff_size;
+ if (scf_comtbl.scf_rem_rxbuff_size > scf_rxbuff_max_size) {
+ rxbuff_cnt = scf_rxbuff_max_size;
+ } else {
+ rxbuff_cnt = scf_comtbl.scf_rem_rxbuff_size;
+ }
+ scf_comtbl.scf_rem_rxbuff_size -= rxbuff_cnt;
+
+ /* Receive data copy */
+ wk_in_p = (uint8_t *)&statep->scf_sys_sram->DATA[rxbuff_offset];
+ wk_out_p = (uint8_t *)&scfcmdp->rbuf[rxbuff_offset];
+ for (ii = 0; ii < rxbuff_cnt; ii++, wk_in_p++, wk_out_p++) {
+ *wk_out_p = SCF_DDI_GET8(statep, statep->scf_sys_sram_handle,
+ wk_in_p);
+ }
+
+ /* Check remainder receive data size */
+ if (scf_comtbl.scf_rem_rxbuff_size != 0) {
+ /* Next receive data timer start */
+ scf_timer_start(SCF_TIMERCD_NEXTRECV);
+ goto END_next_rxdata_get;
+ }
+
+ /* Remainder receive data end */
+ scf_comtbl.scf_cmd_exec_flag = 0;
+
+ /* Make SRAM data sum */
+ sum4 = SCF_MAGICNUMBER_L;
+ wk_leng = scfcmdp->rbufleng / 4;
+ wk_in_p32 = (void *)scfcmdp->rbuf;
+ for (ii = 0; ii < wk_leng; ii++, wk_in_p32++) {
+ sum4 += *wk_in_p32;
+ }
+ if ((scfcmdp->rbufleng % 4) == 3) {
+ sum4 += ((scfcmdp->rbuf[scfcmdp->rbufleng - 3] << 24) |
+ (scfcmdp->rbuf[scfcmdp->rbufleng - 2] << 16) |
+ (scfcmdp->rbuf[scfcmdp->rbufleng - 1] << 8));
+ } else if ((scfcmdp->rbufleng % 4) == 2) {
+ sum4 += ((scfcmdp->rbuf[scfcmdp->rbufleng - 2] << 24) |
+ (scfcmdp->rbuf[scfcmdp->rbufleng - 1] << 16));
+ } else if ((scfcmdp->rbufleng % 4) == 1) {
+ sum4 += (scfcmdp->rbuf[scfcmdp->rbufleng - 1] << 24);
+ }
+
+ SCF_DBG_MAKE_RXSUM_L(sum4, statep->reg_rdata[2]);
+
+ /* Check SRAM data sum */
+ if (sum4 == statep->reg_rdata[2]) {
+ statep->resum_rcnt = 0;
+
+ scf_comtbl.cmd_end_wait = 0;
+ /* Signal to command end wait */
+ cv_signal(&scf_comtbl.cmdend_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__, &scf_comtbl.cmdend_cv,
+ sizeof (kcondvar_t));
+
+ /* Next command send check */
+ scf_next_cmd_check(statep);
+ } else {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "next_rx ", 8);
+ scfcmdp->stat0 = SCF_STAT0_RDATA_SUM;
+ statep->resum_rcnt++;
+
+ /* Check Rx sum re-try out */
+ if (statep->resum_rcnt > scf_resum_rcnt) {
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_RSUMERR);
+
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "next_rx ", 8);
+ cmn_err(CE_WARN,
+ "%s,Failed the receive data SUM of "
+ "SRAM. SCF command =0x%02x%02x\n",
+ &statep->pathname[0],
+ scfcmdp->subcmd, scfcmdp->cmd);
+ statep->scf_herr |= HERR_RESUM;
+ scf_path_change(statep);
+ } else {
+ /* Set command wait status */
+ scf_cmdwait_status_set();
+
+ scf_comtbl.scf_cmd_exec_flag = 0;
+
+ /* Next command send check */
+ scf_next_cmd_check(statep);
+ }
+ }
+
+/*
+ * END_next_rxdata_get
+ */
+ END_next_rxdata_get:
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_online_wait_tout()
+ *
+ * Description: SCF online monitor timeout processing.
+ *
+ */
+void
+scf_online_wait_tout(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_online_wait_tout() "
+ scf_state_t *statep = NULL;
+ scf_state_t *wkstatep = NULL;
+ scf_state_t *wait_top_statep = NULL;
+ int online_flag = 0;
+ int ii;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ /* Get SCF path status */
+ statep = scf_comtbl.scf_exec_p;
+ if (statep == NULL) {
+ statep = scf_comtbl.scf_path_p;
+ }
+ /* Check SCF path status */
+ if (statep == NULL) {
+ goto END_online_wait_tout;
+ }
+
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "onlin_to", 8);
+ /* Get SCF status extended register */
+ statep->reg_status_exr = SCF_DDI_GET32(statep,
+ statep->scf_regs_handle, &statep->scf_regs->STATUS_ExR);
+ SC_DBG_DRV_TRACE(TC_R_STATUS_ExR, __LINE__,
+ &statep->reg_status_exr, sizeof (statep->reg_status_exr));
+
+ /* Check SCF status */
+ if ((statep->reg_status_exr & STATUS_SCF_STATUS) == STATUS_SCF_ONLINE) {
+ /*
+ * SCF online
+ */
+ SCFDBGMSG(SCF_DBGFLAG_SYS, "SCF path online");
+ if (scf_comtbl.scf_status == SCF_STATUS_OFFLINE) {
+ cmn_err(CE_NOTE, "%s: SCF online.\n", scf_driver_name);
+ }
+ scf_comtbl.scf_status = SCF_STATUS_ONLINE;
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_ONLINE);
+
+ /* DCSP interface start */
+ scf_dscp_start(FACTOR_ONLINE);
+
+ /* Check Alive check exec */
+ if (scf_comtbl.alive_running == SCF_ALIVE_START) {
+ scf_comtbl.scf_alive_event_sub = EVENT_SUB_ALST_WAIT;
+ }
+
+ /* Check command send exec */
+ if (scf_comtbl.scf_cmd_exec_flag) {
+ /* Set command wait status */
+ scf_cmdwait_status_set();
+ scf_comtbl.scf_cmd_exec_flag = 0;
+ }
+
+ /* Next command send check */
+ scf_next_cmd_check(statep);
+
+ /* Check next command send */
+ if (scf_comtbl.cmd_busy_wait != 0) {
+ scf_comtbl.cmd_busy_wait = 0;
+ /* Signal to command wait */
+ cv_signal(&scf_comtbl.cmdwait_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &scf_comtbl.cmdwait_cv, sizeof (kcondvar_t));
+ }
+ } else {
+ /*
+ * SCF offline
+ */
+
+ /* Check standby path */
+ if ((wkstatep = scf_comtbl.scf_wait_p) != NULL) {
+ wait_top_statep = wkstatep;
+ for (ii = 0; ii < scf_path_change_max; ii++) {
+ /* Get SCF status extended register */
+ wkstatep->reg_status_exr =
+ SCF_DDI_GET32(wkstatep,
+ wkstatep->scf_regs_handle,
+ &wkstatep->scf_regs->STATUS_ExR);
+ SC_DBG_DRV_TRACE(TC_R_STATUS_ExR, __LINE__,
+ &wkstatep->reg_status_exr,
+ sizeof (wkstatep->reg_status_exr));
+
+ /* Check SCF status */
+ if ((wkstatep->reg_status_exr &
+ STATUS_SCF_STATUS) ==
+ STATUS_SCF_ONLINE) {
+ /* SCF path change process */
+ online_flag = 1;
+ scf_path_change(wkstatep);
+ break;
+ }
+
+ /* SCF path change */
+ scf_comtbl.scf_wait_p = wkstatep->next;
+ scf_chg_scf(wkstatep, PATH_STAT_STANDBY);
+
+ /* Check standby path */
+ wkstatep = scf_comtbl.scf_wait_p;
+ if (wkstatep == NULL) {
+ /* Not change path */
+ break;
+ }
+ if (wkstatep != wait_top_statep) {
+ /* Next SCF path */
+ continue;
+ } else {
+ /* Not change path */
+ break;
+ }
+ }
+ }
+
+ if (online_flag != 0) {
+ goto END_online_wait_tout;
+ }
+
+ scf_comtbl.scf_status = SCF_STATUS_OFFLINE;
+
+ statep->online_to_rcnt++;
+
+ /* Check re-try out */
+ if (statep->online_to_rcnt <= scf_online_wait_rcnt) {
+ /* SCF online timer start */
+ scf_timer_start(SCF_TIMERCD_ONLINE);
+ goto END_online_wait_tout;
+ }
+
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "onlin_to", 8);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_ONLINETO);
+
+ scf_comtbl.scf_cmd_exec_flag = 0;
+
+ /* Timer stop */
+ scf_timer_stop(SCF_TIMERCD_CMDBUSY);
+ scf_timer_stop(SCF_TIMERCD_CMDEND);
+ scf_timer_stop(SCF_TIMERCD_ONLINE);
+ scf_timer_stop(SCF_TIMERCD_NEXTRECV);
+ scf_timer_stop(SCF_TIMERCD_BUF_FUL);
+ scf_timer_stop(SCF_TIMERCD_RCI_BUSY);
+
+ if (scf_comtbl.suspend_wait) {
+ /* Signal to suspend wait */
+ scf_comtbl.suspend_wait = 0;
+ scf_comtbl.scf_suspend_sendstop = 1;
+ cv_signal(&scf_comtbl.suspend_wait_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &scf_comtbl.suspend_wait_cv,
+ sizeof (kcondvar_t));
+ }
+ if (scf_comtbl.cmd_end_wait != 0) {
+ /* Signal to command end wait */
+ scf_comtbl.cmd_end_wait = 0;
+ scf_comtbl.scf_cmdp->stat0 = SCF_STAT0_NOT_PATH;
+ cv_signal(&scf_comtbl.cmdend_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &scf_comtbl.cmdend_cv,
+ sizeof (kcondvar_t));
+ }
+ if (scf_comtbl.cmd_busy_wait != 0) {
+ /* Signal to command wait */
+ scf_comtbl.cmd_busy_wait = 0;
+ cv_signal(&scf_comtbl.cmdwait_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &scf_comtbl.cmdwait_cv,
+ sizeof (kcondvar_t));
+ }
+
+ /* DCSP interface stop */
+ scf_dscp_stop(FACTOR_PATH_STOP);
+ }
+
+/*
+ * END_online_wait_tout
+ */
+ END_online_wait_tout:
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_cmdbusy_tout()
+ *
+ * Description: SCF command busy monitor timeout processing.
+ *
+ */
+void
+scf_cmdbusy_tout(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_cmdbusy_tout() "
+ scf_state_t *statep;
+ int path_ret;
+ uint8_t wk_int8;
+ uint16_t wk_int16;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ /* Get SCF path status */
+ path_ret = scf_path_check(&statep);
+ /* Check SCF path status */
+ if ((path_ret != SCF_PATH_ONLINE) && (path_ret != SCF_PATH_CHANGE)) {
+ goto END_cmdbusy_tout;
+ }
+
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "cmdbusy ", 8);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_CMDBUSYTO);
+
+ /* Get SCF command register */
+ wk_int16 = SCF_DDI_GET16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->COMMAND);
+ SC_DBG_DRV_TRACE(TC_R_COMMAND, __LINE__, &wk_int16, sizeof (wk_int16));
+ wk_int8 = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->COMMAND_ExR);
+ SC_DBG_DRV_TRACE(TC_R_COMMAND_ExR, __LINE__, &wk_int8,
+ sizeof (wk_int8));
+
+ /* Check busy flag */
+ if (((wk_int16 & COMMAND_BUSY) == 0x0000) &&
+ ((wk_int8 & COMMAND_ExR_BUSY) == 0x00)) {
+ SCFDBGMSG(SCF_DBGFLAG_SYS, "SCF command ready");
+ /* Next command send check */
+ scf_next_cmd_check(statep);
+ } else {
+ statep->devbusy_to_rcnt++;
+
+ /* Check re-try out */
+ if (statep->devbusy_to_rcnt <= scf_devbusy_wait_rcnt) {
+ /* SCF online timer start */
+ scf_timer_start(SCF_TIMERCD_CMDBUSY);
+ goto END_cmdbusy_tout;
+ }
+
+ if ((wk_int16 & COMMAND_BUSY) != 0x0000) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "cmdbusy ", 8);
+ cmn_err(CE_WARN,
+ "%s,Busy state of SCF command is "
+ "not released.\n", &statep->pathname[0]);
+ } else {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "cmdbusy ", 8);
+ cmn_err(CE_WARN,
+ "%s,Busy state of XSCF is not released.\n",
+ &statep->pathname[0]);
+ }
+ statep->scf_herr |= HERR_BUSY_RTO;
+ scf_path_change(statep);
+ }
+
+/*
+ * END_cmdbusy_tout
+ */
+ END_cmdbusy_tout:
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+/*
+ * scf_cmdend_tout()
+ *
+ * Description: SCF command complete monitor timeout processing.
+ *
+ */
+void
+scf_cmdend_tout(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_cmdend_tout() "
+ scf_state_t *statep;
+ int path_ret;
+ struct scf_cmd *scfcmdp;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ /* Get SCF path status */
+ path_ret = scf_path_check(&statep);
+ /* Check SCF path status */
+ if ((path_ret != SCF_PATH_ONLINE) && (path_ret != SCF_PATH_CHANGE)) {
+ goto END_cmdend_tout;
+ }
+
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "cmd_to ", 8);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_CMDTO);
+
+ /* error memo */
+ scf_comtbl.memo_cmd_to_cnt++;
+ statep->memo_cmd_to_cnt++;
+
+ if (scf_comtbl.scf_exec_cmd_id) {
+ /* Comand send for ioctl */
+ scfcmdp = scf_comtbl.scf_cmdp;
+ } else {
+ /* Comand send for interrupt */
+ scfcmdp = &scf_comtbl.scf_cmd_intr;
+ }
+
+ statep->cmd_to_rcnt++;
+
+ /* Check re-try out */
+ if (statep->cmd_to_rcnt <= scf_cmd_to_rcnt) {
+ /* Set command wait status */
+ scf_cmdwait_status_set();
+
+ scf_comtbl.scf_cmd_exec_flag = 0;
+
+ /* Next command send check */
+ scf_next_cmd_check(statep);
+ } else {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "cmd_to ", 8);
+ cmn_err(CE_WARN,
+ "%s,SCF command timeout occurred. "
+ "SCF command = 0x%02x%02x\n",
+ &statep->pathname[0],
+ scfcmdp->subcmd, scfcmdp->cmd);
+ statep->scf_herr |= HERR_CMD_RTO;
+ scf_path_change(statep);
+ }
+
+/*
+ * END_cmdend_tout
+ */
+ END_cmdend_tout:
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_report_send_wait_tout()
+ *
+ * Description: Report command send wait timeout processing.
+ *
+ */
+void
+scf_report_send_wait_tout(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_report_send_wait_tout() "
+ scf_state_t *statep;
+ int next_send = 0;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ /* Get SCF path status */
+ (void) scf_path_check(&statep);
+ /* Check SCF path status */
+ if (statep != NULL) {
+ /* Last send report send */
+ if (scf_comtbl.scf_last_report == SCF_SYSTEM_RUNNING) {
+ scf_comtbl.scf_report_event_sub =
+ EVENT_SUB_REPORT_RUN_WAIT;
+ next_send = 1;
+ } else if (scf_comtbl.scf_last_report ==
+ EVENT_SUB_REPORT_SHOT_EXEC) {
+ scf_comtbl.scf_report_event_sub =
+ EVENT_SUB_REPORT_SHUT_WAIT;
+ next_send = 1;
+ }
+ if (next_send) {
+ /* Next command send check */
+ scf_next_cmd_check(statep);
+ }
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_alivecheck_intr()
+ *
+ * Description: Alive check timeout interrupt processing.
+ *
+ */
+void
+scf_alivecheck_intr(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_alivecheck_intr() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ /* Check alive check exec */
+ if (scf_comtbl.alive_running == SCF_ALIVE_START) {
+ scf_comtbl.scf_alive_int_count--;
+
+ if (scf_comtbl.scf_alive_int_count == 0) {
+ /* Alive check register set */
+ statep->reg_acr = scf_acr_phase_code | ACR_ALIVE_INT;
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->ACR, statep->reg_acr);
+ SC_DBG_DRV_TRACE(TC_W_ACR, __LINE__, &statep->reg_acr,
+ sizeof (statep->reg_acr));
+ /* Register read sync */
+ scf_rs8 = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->ACR);
+
+ SCFDBGMSG1(SCF_DBGFLAG_REG, "ACR = 0x%02x",
+ statep->reg_acr);
+
+ scf_acr_phase_code++;
+ scf_comtbl.scf_alive_int_count =
+ scf_alive_interval_time / 3;
+ }
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_path_change()
+ *
+ * Description: SCF pass change processing.
+ *
+ */
+void
+scf_path_change(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_path_change() "
+ scf_state_t *wkstatep;
+ scf_state_t *act_statep = NULL;
+ uint_t path_change = 0;
+ uint_t halt_flag = 0;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ if (statep->scf_herr != 0) {
+ act_statep = statep;
+
+ statep->scf_herr |= HERR_EXEC;
+
+ /* Interrupt disable */
+ scf_forbid_intr(statep);
+
+ /* Change statep */
+ if (scf_comtbl.scf_path_p == statep) {
+ scf_comtbl.scf_path_p = NULL;
+ path_change = FACTOR_PATH_HALT;
+ } else if (scf_comtbl.scf_exec_p == statep) {
+ scf_comtbl.scf_exec_p = NULL;
+ path_change = FACTOR_PATH_HALT;
+ } else {
+ if ((statep->path_status == PATH_STAT_STANDBY) ||
+ (statep->path_status == PATH_STAT_STOP)) {
+ scf_del_queue(statep);
+ }
+ }
+ if ((statep->path_status == PATH_STAT_ACTIVE) ||
+ (statep->path_status == PATH_STAT_STANDBY) ||
+ (statep->path_status == PATH_STAT_STOP)) {
+ scf_chg_scf(statep, PATH_STAT_FAIL);
+ }
+ } else {
+ /* SCF path change interrupt or SCF online wait timeout */
+ if (scf_comtbl.scf_exec_p != NULL) {
+ act_statep = scf_comtbl.scf_exec_p;
+ } else if (scf_comtbl.scf_path_p != NULL) {
+ act_statep = scf_comtbl.scf_path_p;
+ }
+ if ((act_statep != NULL) && (scf_comtbl.scf_wait_p != NULL)) {
+ /* Interrupt disable */
+ scf_forbid_intr(act_statep);
+ /* Interrupt enable */
+ scf_permit_intr(act_statep, 1);
+
+ scf_comtbl.scf_exec_p = NULL;
+ scf_comtbl.scf_path_p = NULL;
+ path_change = FACTOR_PATH_STOP;
+
+ scf_chg_scf(act_statep, PATH_STAT_STANDBY);
+ }
+ }
+
+ if (path_change) {
+ /* FMEMA interface */
+ scf_avail_cmd_reg_vaddr = NULL;
+
+ /* Check standby path */
+ if ((wkstatep = scf_comtbl.scf_wait_p) != NULL) {
+ if (path_change == FACTOR_PATH_HALT) {
+ /* Check SCF path change retry */
+ if (scf_comtbl.path_change_rcnt <
+ scf_path_change_max) {
+ scf_comtbl.path_change_rcnt++;
+ } else {
+ /* SCF path change retry over */
+ halt_flag = FACTOR_PATH_HALT;
+ }
+ }
+ } else {
+ /* Not change path */
+ halt_flag = FACTOR_PATH_HALT;
+ }
+
+ if (halt_flag == 0) {
+ if (wkstatep != act_statep) {
+ cmn_err(CE_CONT,
+ "%s: SCFC path changed. (%s --> %s)\n",
+ scf_driver_name,
+ &act_statep->pathname[0],
+ &wkstatep->pathname[0]);
+ }
+
+ /* Timer stop */
+ scf_timer_stop(SCF_TIMERCD_CMDBUSY);
+ scf_timer_stop(SCF_TIMERCD_CMDEND);
+ scf_timer_stop(SCF_TIMERCD_ONLINE);
+ scf_timer_stop(SCF_TIMERCD_NEXTRECV);
+
+ /* Set command wait status */
+ scf_cmdwait_status_set();
+
+ scf_comtbl.scf_alive_event_sub = EVENT_SUB_NONE;
+ scf_comtbl.scf_cmd_exec_flag = 0;
+ /* Send path change */
+ scf_comtbl.scf_wait_p = wkstatep->next;
+ scf_comtbl.scf_pchg_event_sub = EVENT_SUB_PCHG_WAIT;
+ scf_next_cmd_check(wkstatep);
+
+ /* DCSP interface stop */
+ scf_dscp_stop(FACTOR_PATH_CHG);
+ }
+ }
+
+ if (halt_flag) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "path_chg", 8);
+
+ scf_comtbl.path_change_rcnt = 0;
+
+ scf_comtbl.scf_cmd_exec_flag = 0;
+
+ /* Timer stop */
+ scf_timer_stop(SCF_TIMERCD_CMDBUSY);
+ scf_timer_stop(SCF_TIMERCD_CMDEND);
+ scf_timer_stop(SCF_TIMERCD_ONLINE);
+ scf_timer_stop(SCF_TIMERCD_NEXTRECV);
+ scf_timer_stop(SCF_TIMERCD_BUF_FUL);
+ scf_timer_stop(SCF_TIMERCD_RCI_BUSY);
+
+ if (scf_comtbl.suspend_wait) {
+ /* Signal to suspend wait */
+ scf_comtbl.suspend_wait = 0;
+ scf_comtbl.scf_suspend_sendstop = 1;
+ cv_signal(&scf_comtbl.suspend_wait_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &scf_comtbl.suspend_wait_cv,
+ sizeof (kcondvar_t));
+ }
+ if (scf_comtbl.cmd_end_wait != 0) {
+ /* Signal to command end wait */
+ scf_comtbl.cmd_end_wait = 0;
+ scf_comtbl.scf_cmdp->stat0 = SCF_STAT0_NOT_PATH;
+ cv_signal(&scf_comtbl.cmdend_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &scf_comtbl.cmdend_cv, sizeof (kcondvar_t));
+ }
+ if (scf_comtbl.cmd_busy_wait != 0) {
+ /* Signal to command wait */
+ scf_comtbl.cmd_busy_wait = 0;
+ cv_signal(&scf_comtbl.cmdwait_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__,
+ &scf_comtbl.cmdwait_cv, sizeof (kcondvar_t));
+ }
+
+ /* DCSP interface stop */
+ scf_dscp_stop(halt_flag);
+
+ if (halt_flag == FACTOR_PATH_HALT) {
+ /* Not change path(SCF HALT) */
+
+ /* FMEMA interface */
+ scf_avail_cmd_reg_vaddr = NULL;
+
+ cmn_err(CE_WARN,
+ "%s: SCF HALT was detected.\n",
+ scf_driver_name);
+
+ /* SCF HALT after processing */
+ scf_halt(scf_halt_proc_mode);
+ }
+
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_halt()
+ *
+ * Description: SCFHALT shutdown/panic processing.
+ *
+ */
+void
+scf_halt(uint_t mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_halt() "
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ SC_DBG_DRV_TRACE(TC_ERRCD, __LINE__, &mode, sizeof (mode));
+
+ switch (mode) {
+ case HALTPROC_SHUTDOWN:
+ cmn_err(CE_CONT,
+ "%s: Shutdown was executed.\n",
+ scf_driver_name);
+ /* System shutdown start */
+ if (scf_comtbl.scf_shutdownreason == 0) {
+ scf_comtbl.scf_shutdownreason = REASON_SHUTDOWN_HALT;
+ do_shutdown();
+ }
+ break;
+
+ case HALTPROC_PANIC:
+ /* System panic */
+ cmn_err(CE_PANIC,
+ "%s: Executed panic by SCF HALT.\n",
+ scf_driver_name);
+ break;
+
+ default:
+ break;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_panic_callb()
+ *
+ * Description: Panic entry processing.
+ *
+ */
+/* ARGSUSED */
+void
+scf_panic_callb(int code)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_panic_callb() "
+ scf_state_t *statep;
+ scf_state_t *save__exec_p;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t sbuf;
+ unsigned short status;
+ int counter;
+ int report_succeeded = 0;
+ int path_flag = 0;
+ int path_counter = 0;
+ int report_counter = 0;
+ int ii = 0;
+ int new_report = 0;
+
+ SCFDBGMSG1(SCF_DBGFLAG_FOCK, SCF_FUNC_NAME ": start code = %d",
+ code);
+ SCF_PANIC_TRACE(__LINE__);
+
+ /* Check panic after */
+ if (scf_panic_reported) {
+ SCF_PANIC_TRACE(__LINE__);
+ return;
+ }
+
+ /* Check double panic */
+ if (scf_panic_exec_flag) {
+ SCF_PANIC_TRACE(__LINE__);
+ return;
+ }
+
+ /* Check shutdown exec */
+ if (scf_comtbl.scf_shutdown_exec_flag) {
+ SCF_PANIC_TRACE(__LINE__);
+ return;
+ }
+
+ /* Set panic exec flag */
+ scf_panic_exec_flag = 1;
+
+ save__exec_p = scf_comtbl.scf_exec_p;
+ /* wait */
+ drv_usecwait(SCF_MIL2MICRO(scf_panic_exec_wait_time));
+
+ if ((statep = scf_comtbl.scf_exec_p) != NULL) {
+ /* Exec SCF path interrupt disable */
+ /* SCF interrupt disable(CR) */
+ SCF_P_DDI_PUT16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL, CONTROL_DISABLE);
+ /* Register read sync */
+ status = SCF_P_DDI_GET16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL);
+
+ /* SCF Status register interrupt(STR) : clear */
+ SCF_P_DDI_PUT16(statep->scf_regs_handle,
+ &statep->scf_regs->STATUS, 0xffff);
+
+ /* SCF Status extended register(STExR) : interrupt clear */
+ SCF_P_DDI_PUT32(statep->scf_regs_handle,
+ &statep->scf_regs->STATUS_ExR, 0xffffffff);
+
+ /* DSCP buffer status register(DSR) : interrupt clear */
+ SCF_P_DDI_PUT8(statep->scf_regs_handle,
+ &statep->scf_regs->DSR, 0xff);
+
+ /* SCF interrupt status register(ISR) : interrupt clear */
+ SCF_P_DDI_PUT16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST,
+ (INT_ST_PATHCHGIE | CONTROL_ALIVEINE));
+ status = SCF_P_DDI_GET16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST);
+ }
+
+ if ((statep = scf_comtbl.scf_path_p) != NULL) {
+ /* Path change SCF path interrupt disable */
+ /* SCF interrupt disable(CR) */
+ SCF_P_DDI_PUT16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL, CONTROL_DISABLE);
+ /* Register read sync */
+ status = SCF_P_DDI_GET16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL);
+
+ /* SCF Status register interrupt(STR) : clear */
+ SCF_P_DDI_PUT16(statep->scf_regs_handle,
+ &statep->scf_regs->STATUS, 0xffff);
+
+ /* SCF Status extended register(STExR) : interrupt clear */
+ SCF_P_DDI_PUT32(statep->scf_regs_handle,
+ &statep->scf_regs->STATUS_ExR, 0xffffffff);
+
+ /* DSCP buffer status register(DSR) : interrupt clear */
+ SCF_P_DDI_PUT8(statep->scf_regs_handle,
+ &statep->scf_regs->DSR, 0xff);
+
+ /* SCF interrupt status register(ISR) : interrupt clear */
+ SCF_P_DDI_PUT16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST,
+ (INT_ST_PATHCHGIE | CONTROL_ALIVEINE));
+ status = SCF_P_DDI_GET16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST);
+ }
+
+ statep = scf_comtbl.scf_wait_p;
+ while (statep != NULL) { /* Standby SCF path interrupt disable */
+ /* SCF interrupt disable(CR) */
+ SCF_P_DDI_PUT16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL, CONTROL_DISABLE);
+ /* Register read sync */
+ status = SCF_P_DDI_GET16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL);
+
+ /* SCF Status register interrupt(STR) : clear */
+ SCF_P_DDI_PUT16(statep->scf_regs_handle,
+ &statep->scf_regs->STATUS, 0xffff);
+
+ /* SCF Status extended register(STExR) : interrupt clear */
+ SCF_P_DDI_PUT32(statep->scf_regs_handle,
+ &statep->scf_regs->STATUS_ExR, 0xffffffff);
+
+ /* DSCP buffer status register(DSR) : interrupt clear */
+ SCF_P_DDI_PUT8(statep->scf_regs_handle,
+ &statep->scf_regs->DSR, 0xff);
+
+ /* SCF interrupt status register(ISR) : interrupt clear */
+ SCF_P_DDI_PUT16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST,
+ (INT_ST_PATHCHGIE | CONTROL_ALIVEINE));
+ status = SCF_P_DDI_GET16(statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST);
+
+ statep = statep->next;
+ }
+
+ status = 0;
+
+ /* Check comand exec */
+ if (scf_comtbl.scf_cmd_exec_flag == 0) {
+ statep = scf_comtbl.scf_exec_p;
+ if (statep == NULL) {
+ statep = scf_comtbl.scf_path_p;
+ if (statep == NULL) {
+ statep = scf_comtbl.scf_wait_p;
+ if (statep == NULL) {
+ /* Not use SCF path */
+ SCF_PANIC_TRACE(__LINE__);
+ goto END_scf_panic_callb;
+ }
+ path_flag = 1;
+ }
+ }
+ goto START_scf_panic;
+ }
+
+ statep = scf_comtbl.scf_exec_p;
+ if (statep == NULL) {
+ statep = scf_comtbl.scf_path_p;
+ if (statep == NULL) {
+ statep = scf_comtbl.scf_wait_p;
+ if (statep == NULL) {
+ SCF_PANIC_TRACE(__LINE__);
+ goto END_scf_panic_callb;
+ }
+ /* wait */
+ drv_usecwait(SCF_MIL2MICRO((scf_cmdend_wait_time_panic *
+ scf_cmdend_wait_rcnt_panic)));
+ path_flag = 1;
+ }
+ }
+ if (path_flag == 0) {
+ for (ii = 0; ii < scf_cmdend_wait_rcnt_panic; ii++) {
+ /* wait */
+ drv_usecwait
+ (SCF_MIL2MICRO(scf_cmdend_wait_time_panic));
+
+ /* Get SCF status register */
+ status = SCF_P_DDI_GET16(statep->scf_regs_handle,
+ &statep->scf_regs->STATUS);
+ scf_panic_trc_status = status;
+
+ if (status & STATUS_CMD_COMPLETE) {
+ /* Command complete */
+ break;
+ }
+ }
+ SCF_P_DDI_PUT16(statep->scf_regs_handle,
+ &statep->scf_regs->STATUS, 0xffff);
+ }
+
+/*
+ * START_scf_panic
+ */
+ START_scf_panic:
+
+ counter = scf_panic_report_maxretry;
+ do {
+/*
+ * START_scf_panic_loop
+ */
+ START_scf_panic_loop:
+
+ /* Check SCF path change */
+ if (path_flag == 0) {
+ goto START_report_send;
+ }
+
+ scf_cmd.cmd = CMD_SCFI_PATH;
+ scf_cmd.subcmd = SUB_CMD_PATH;
+ bzero((void *)&sbuf.b[0], SCF_S_CNT_16);
+ sbuf.b[0] = REPORT_STAT_PANIC;
+ scf_cmd.scount = CMD_PATH_TYPE_SCFD;
+ scf_cmd.sbuf = &sbuf.b[0];
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_S_BUF;
+ scf_p_send_cmd(&scf_cmd, statep);
+
+ /* Check command complete */
+ for (ii = 0; ii < scf_cmdend_wait_rcnt_panic; ii++) {
+ /* wait */
+ drv_usecwait(SCF_MIL2MICRO(scf_cmdend_wait_time_panic));
+
+ /* Get SCF status register */
+ status = SCF_P_DDI_GET16(statep->scf_regs_handle,
+ &statep->scf_regs->STATUS);
+ scf_panic_trc_status = status;
+
+ if (status & STATUS_CMD_COMPLETE) {
+ /* Command complete */
+ break;
+ }
+ }
+ SCF_P_DDI_PUT16(statep->scf_regs_handle,
+ &statep->scf_regs->STATUS, 0xffff);
+
+ if (ii == scf_cmdend_wait_rcnt_panic) {
+ /* Not command complete */
+ if (path_counter < 1) {
+ path_counter++;
+ goto START_scf_panic_loop;
+ }
+ SCF_PANIC_TRACE(__LINE__);
+ goto END_scf_panic_callb;
+ }
+ switch ((status & STATUS_CMD_RTN_CODE) >> 4) {
+ case NORMAL_END:
+ counter = scf_panic_report_maxretry;
+ path_flag = 0;
+ report_counter = 0;
+ break;
+ default:
+ if (path_counter < 1) {
+ path_flag = 1;
+ path_counter++;
+ goto START_scf_panic_loop;
+ }
+ SCF_PANIC_TRACE(__LINE__);
+ goto END_scf_panic_callb;
+ }
+
+/*
+ * START_report_send
+ */
+ START_report_send:
+
+ if (new_report) {
+ /* new report panic */
+ scf_cmd.cmd = CMD_REPORT;
+ scf_cmd.subcmd = SUB_SYSTEM_STATUS_RPT_NOPATH;
+ } else {
+ /* report panic */
+ scf_cmd.cmd = CMD_REPORT;
+ scf_cmd.subcmd = SUB_SYSTEM_STATUS_RPT;
+ }
+ bzero((void *)&sbuf.b[0], SCF_S_CNT_16);
+ sbuf.b[0] = REPORT_STAT_PANIC;
+ scf_cmd.scount = SCF_S_CNT_15;
+ scf_cmd.sbuf = &sbuf.b[0];
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_S_BUF;
+ scf_p_send_cmd(&scf_cmd, statep);
+ scf_panic_exec_flag2 = 1;
+
+ /* Check command complete */
+ for (ii = 0; ii < scf_cmdend_wait_rcnt_panic; ii++) {
+ /* wait */
+ drv_usecwait(SCF_MIL2MICRO(scf_cmdend_wait_time_panic));
+
+ /* Get SCF status register */
+ status = SCF_P_DDI_GET16(statep->scf_regs_handle,
+ &statep->scf_regs->STATUS);
+ scf_panic_trc_status = status;
+
+ if (status & STATUS_CMD_COMPLETE) {
+ /* Command complete */
+ break;
+ }
+ }
+ SCF_P_DDI_PUT16(statep->scf_regs_handle,
+ &statep->scf_regs->STATUS, 0xffff);
+
+ if (ii == scf_cmdend_wait_rcnt_panic) {
+ /* Not command complete */
+ if (report_counter < 1) {
+ report_counter++;
+ goto START_scf_panic_loop;
+ }
+ SCF_PANIC_TRACE(__LINE__);
+ goto END_scf_panic_callb;
+ }
+
+ switch ((status & STATUS_CMD_RTN_CODE) >> 4) {
+ case NORMAL_END:
+ /* command success */
+ report_succeeded = 1;
+ SCF_PANIC_TRACE(__LINE__);
+ goto END_scf_panic_callb;
+
+ case BUF_FUL:
+ counter--;
+ if (counter > 0) {
+ drv_usecwait(SCF_MIL2MICRO(scf_buf_ful_rtime));
+ }
+ break;
+
+ case RCI_BUSY:
+ counter--;
+ if (counter > 0) {
+ drv_usecwait(SCF_MIL2MICRO(scf_rci_busy_rtime));
+ }
+ break;
+
+ case INTERFACE:
+ counter--;
+ break;
+
+ case E_SCFC_NOPATH:
+ if (new_report == 0) {
+ path_flag = 1;
+ path_counter = 0;
+ goto START_scf_panic_loop;
+ }
+
+ default:
+ /* E_NOT_SUPPORT */
+ /* E_PARAM */
+ /* E_RCI_ACCESS */
+ /* RCI_NS */
+ goto END_scf_panic_callb;
+ }
+
+ } while (counter > 0);
+
+/*
+ * END_scf_panic_callb
+ */
+ END_scf_panic_callb:
+
+ scf_comtbl.scf_exec_p = save__exec_p;
+ if (report_succeeded) {
+ SCF_PANIC_TRACE(__LINE__);
+ scf_panic_reported = 1;
+ } else {
+ SCF_PANIC_TRACE(__LINE__);
+ cmn_err(CE_WARN, "%s: cannot report PANIC.\n", scf_driver_name);
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_FOCK, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_shutdown_callb()
+ *
+ * Description: Shutdown entry processing.
+ *
+ */
+/* ARGSUSED */
+void
+scf_shutdown_callb(int code)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_shutdown_callb() "
+ scf_state_t *statep;
+ int ret;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t sbuf;
+
+ SCFDBGMSG1(SCF_DBGFLAG_FOCK, SCF_FUNC_NAME ": start code = %d",
+ code);
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* Check panic exec or shutdown exec */
+ if ((scf_panic_exec_flag) || (scf_comtbl.scf_shutdown_exec_flag)) {
+ SCFDBGMSG(SCF_DBGFLAG_SYS, "After Panic or shutdown");
+ goto END_scf_shutdown_callb99;
+ }
+
+ scf_comtbl.scf_shutdown_exec_flag = 1;
+
+ /* SCF command transmit sync stop */
+ ret = scf_make_send_cmd(&scf_cmd, SCF_USE_STOP);
+ if (ret != 0) {
+ SC_DBG_DRV_TRACE(TC_SHUTDOWN|TC_ERR, __LINE__, "i_ioctl ", 8);
+ goto END_scf_shutdown_callb;
+ }
+
+ scf_comtbl.shutdown_start_reported = 1;
+
+ bzero((void *)&sbuf.b[0], SCF_S_CNT_16);
+ sbuf.b[0] = REPORT_STAT_SHUTDOWN_START;
+ sbuf.b[1] = scf_poff_factor[scf_comtbl.poff_factor][0];
+ sbuf.b[2] = scf_poff_factor[scf_comtbl.poff_factor][1];
+ sbuf.b[3] = scf_poff_factor[scf_comtbl.poff_factor][2];
+ scf_cmd.cmd = CMD_REPORT;
+ scf_cmd.subcmd = SUB_SYSTEM_STATUS_RPT_NOPATH;
+ scf_cmd.scount = SCF_S_CNT_15;
+ scf_cmd.sbuf = &sbuf.b[0];
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = (SCF_USE_S_BUF | SCF_USE_SP);
+ scf_comtbl.scf_last_report = SCF_SHUTDOWN_START;
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+ if (ret != 0) {
+ SC_DBG_DRV_TRACE(TC_SHUTDOWN|TC_ERR, __LINE__, "shutdown", 8);
+ goto END_scf_shutdown_callb;
+ }
+ /* SCF command send sync re-stop */
+ ret = scf_make_send_cmd(&scf_cmd, SCF_USE_STOP);
+ if (ret != 0) {
+ SC_DBG_DRV_TRACE(TC_SHUTDOWN|TC_ERR, __LINE__, "shutdown", 8);
+ goto END_scf_shutdown_callb;
+ }
+
+ /* Check alive check exec */
+ if (scf_comtbl.alive_running == SCF_ALIVE_START) {
+ scf_cmd.cmd = CMD_ALIVE_CHECK;
+ scf_cmd.subcmd = SUB_ALIVE_STOP;
+ scf_cmd.scount = 0;
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = (SCF_USE_S_BUF | SCF_USE_SP);
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+ scf_comtbl.alive_running = SCF_ALIVE_STOP;
+ if (ret != 0) {
+ SC_DBG_DRV_TRACE(TC_SHUTDOWN|TC_ERR, __LINE__,
+ "shutdown", 8);
+ goto END_scf_shutdown_callb;
+ }
+ /* SCF command send sync re-stop */
+ ret = scf_make_send_cmd(&scf_cmd, SCF_USE_STOP);
+ }
+
+/*
+ * END_scf_shutdown_callb
+ */
+END_scf_shutdown_callb:
+
+ if ((statep = scf_comtbl.scf_exec_p) == NULL) {
+ statep = scf_comtbl.scf_path_p;
+ }
+ scf_comtbl.scf_exec_p = 0;
+ scf_comtbl.scf_path_p = 0;
+ if (statep) {
+ /* Exec device interrupt disable */
+ scf_forbid_intr(statep);
+ scf_chg_scf(statep, PATH_STAT_STOP);
+ }
+
+ while (scf_comtbl.scf_wait_p) {
+ /* Standby device interrupt disable */
+ statep = scf_comtbl.scf_wait_p;
+ scf_comtbl.scf_wait_p = statep->next;
+ scf_forbid_intr(statep);
+ scf_chg_scf(statep, PATH_STAT_STOP);
+ }
+
+ /* SCF command send sync start */
+ (void) scf_make_send_cmd(&scf_cmd, SCF_USE_START);
+
+ /* DCSP interface stop */
+ scf_dscp_stop(FACTOR_PATH_STOP);
+
+/*
+ * END_scf_shutdown_callb99
+ */
+END_scf_shutdown_callb99:
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ SCFDBGMSG(SCF_DBGFLAG_FOCK, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_softintr()
+ *
+ * Description: Soft interrupt entry processing. (for DSCP callback)
+ *
+ */
+/* ARGSUSED */
+uint_t
+scf_softintr(caddr_t arg)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_softintr() "
+ uint_t ret = DDI_INTR_CLAIMED;
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": start");
+
+ mutex_enter(&scf_comtbl.si_mutex);
+ if (scf_comtbl.scf_softintr_dscp_kicked == FLAG_ON) {
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* Check panic exec and shutdown exec */
+ if ((scf_panic_exec_flag == 0) &&
+ (scf_comtbl.scf_shutdown_exec_flag == 0)) {
+ scf_dscp_callback();
+ }
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ scf_comtbl.scf_softintr_dscp_kicked = FLAG_OFF;
+ }
+ mutex_exit(&scf_comtbl.si_mutex);
+
+ SCFDBGMSG(SCF_DBGFLAG_DSCP, SCF_FUNC_NAME ": end");
+ return (ret);
+}
+
+
+/*
+ * scf_cmdwait_status_set()
+ *
+ * Description: Check and setting command status.
+ *
+ */
+void
+scf_cmdwait_status_set(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_cmdwait_status_set() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ /* Set command wait status */
+ if (scf_comtbl.scf_pchg_event_sub == EVENT_SUB_PCHG_EXEC) {
+ scf_comtbl.scf_pchg_event_sub = EVENT_SUB_PCHG_WAIT;
+
+ /* Set command retry send flag */
+ scf_comtbl.scf_cmd_resend_req |= RESEND_PCHG;
+ }
+
+ if (scf_comtbl.scf_poff_event_sub == EVENT_SUB_POFF_EXEC) {
+ scf_comtbl.scf_poff_event_sub = EVENT_SUB_POFF_WAIT;
+
+ /* Set command retry send flag */
+ scf_comtbl.scf_cmd_resend_req |= RESEND_POFF;
+ }
+
+ switch (scf_comtbl.scf_shut_event_sub) {
+ case EVENT_SUB_SHUT_EXEC:
+ scf_comtbl.scf_shut_event_sub = EVENT_SUB_SHUT_WAIT;
+
+ /* Set command retry send flag */
+ scf_comtbl.scf_cmd_resend_req |= RESEND_SHUT;
+ break;
+
+ case EVENT_SUB_EXEC:
+ scf_comtbl.scf_shut_event_sub = EVENT_SUB_WAIT;
+
+ /* Set command retry send flag */
+ scf_comtbl.scf_cmd_resend_req |= RESEND_SHUT;
+ break;
+
+ default:
+ break;
+ }
+
+ switch (scf_comtbl.scf_alive_event_sub) {
+ case EVENT_SUB_ALST_EXEC:
+ scf_comtbl.scf_alive_event_sub = EVENT_SUB_ALST_WAIT;
+
+ /* Set command retry send flag */
+ scf_comtbl.scf_cmd_resend_req |= RESEND_ALST;
+ break;
+
+ case EVENT_SUB_ALSP_EXEC:
+ scf_comtbl.scf_alive_event_sub = EVENT_SUB_ALSP_WAIT;
+
+ /* Set command retry send flag */
+ scf_comtbl.scf_cmd_resend_req |= RESEND_ALSP;
+ break;
+
+ default:
+ break;
+ }
+
+ switch (scf_comtbl.scf_report_event_sub) {
+ case EVENT_SUB_REPORT_RUN_EXEC:
+ scf_comtbl.scf_report_event_sub = EVENT_SUB_REPORT_RUN_WAIT;
+
+ /* Set command retry send flag */
+ scf_comtbl.scf_cmd_resend_req |= RESEND_REPORT_RUN;
+ break;
+
+ case EVENT_SUB_REPORT_SHOT_EXEC:
+ scf_comtbl.scf_report_event_sub = EVENT_SUB_REPORT_SHUT_WAIT;
+
+ /* Set command retry send flag */
+ scf_comtbl.scf_cmd_resend_req |= RESEND_REPORT_SHUT;
+ break;
+
+ default:
+ break;
+ }
+
+ if (scf_comtbl.scf_cmd_exec_flag) {
+ if (scf_comtbl.cmd_end_wait) {
+ /* Set command retry send flag */
+ scf_comtbl.scf_cmd_resend_req |= RESEND_IOCTL;
+ }
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scfinit.c b/usr/src/uts/sun4u/opl/io/scfd/scfinit.c
new file mode 100644
index 0000000000..8b45427e2e
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scfinit.c
@@ -0,0 +1,301 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/conf.h>
+#include <sys/kmem.h>
+#include <sys/devops.h>
+#include <sys/modctl.h>
+#include <sys/cmn_err.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/scfd/scfparam.h>
+
+
+/*
+ * character/block entry point structure
+ */
+static struct cb_ops scf_cb_ops = {
+ scf_open, /* open */
+ scf_close, /* close */
+ nodev, /* strategy */
+ nodev, /* print */
+ nodev, /* dump */
+ nodev, /* read */
+ nodev, /* write */
+ scf_ioctl, /* ioctl */
+ nodev, /* devmap */
+ nodev, /* mmap */
+ nodev, /* segmap */
+ nochpoll, /* poll */
+ ddi_prop_op, /* prop_op */
+ (struct streamtab *)NULL, /* streamtab */
+ D_NEW | D_MP /* flag */
+};
+
+/*
+ * device operations structure
+ */
+static struct dev_ops scf_dev_ops = {
+ DEVO_REV, /* dev_ops revision */
+ 0, /* reference */
+ scf_getinfo, /* getinfo */
+ nulldev, /* identify */
+ nulldev, /* probe */
+ scf_attach, /* attach */
+ scf_detach, /* detach */
+ nodev, /* reset */
+ &scf_cb_ops, /* cb_ops */
+ (struct bus_ops *)NULL, /* bus_ops */
+ NULL /* power entry */
+};
+
+/*
+ * linkage structure for loadable driver
+ */
+extern struct mod_ops mod_driverops;
+static struct modldrv scf_modldrv = {
+ &mod_driverops, /* mod_driverops */
+ SCF_DRIVER_VERSION, /* version number */
+ &scf_dev_ops /* dev_ops */
+};
+
+/*
+ * module linkage structure
+ */
+static struct modlinkage scf_modlinkage = {
+ MODREV_1, /* modlinkage revision */
+ (void *)&scf_modldrv, /* linkage */
+ (void *)NULL /* (end of linkage) */
+};
+
+/*
+ * Function list
+ */
+void scf_free_resource(void);
+
+/*
+ * _init()
+ *
+ * Description: Install and initialization processing of module.
+ *
+ */
+int
+_init(void)
+{
+#define SCF_FUNC_NAME "_init() "
+ int error;
+
+ SCFDBGMSG(SCF_DBGFLAG_DDI, SCF_FUNC_NAME ": start");
+
+ /* initialize software state */
+ error = ddi_soft_state_init(&scfstate, sizeof (scf_state_t), 0);
+ if (error != 0) {
+ cmn_err(CE_WARN, "%s: _init: ddi_soft_state_init failed.\n",
+ scf_driver_name);
+ goto END_init;
+ }
+
+ SCF_DBG_DRV_TRACE_INIT;
+
+ /* Last report code initialize */
+ scf_comtbl.scf_last_report = NOT_SEND_REPORT;
+
+ /* XSCF status initialize */
+ scf_comtbl.scf_status = SCF_STATUS_UNKNOWN;
+
+ /* allocate memory */
+ scf_comtbl.report_sensep =
+ (scfreport_t *)kmem_zalloc((size_t)(sizeof (scfreport_t) *
+ scf_report_sense_pool_max), KM_SLEEP);
+ scf_comtbl.getevent_sensep =
+ (scfevent_t *)kmem_zalloc((size_t)(sizeof (scfevent_t) *
+ scf_getevent_pool_max), KM_SLEEP);
+ scf_comtbl.resource_flag |= DID_ALLOCBUF;
+
+ /* initialize mutex */
+ mutex_init(&scf_comtbl.attach_mutex, NULL, MUTEX_DRIVER, 0);
+ scf_comtbl.resource_flag |= DID_MUTEX_ATH;
+ mutex_init(&scf_comtbl.si_mutex, NULL, MUTEX_DRIVER, 0);
+ scf_comtbl.resource_flag |= DID_MUTEX_SI;
+ mutex_init(&scf_comtbl.trc_mutex, NULL, MUTEX_DRIVER, 0);
+ scf_comtbl.resource_flag |= DID_MUTEX_TRC;
+
+ /* initialize cv */
+ cv_init(&scf_comtbl.cmd_cv, NULL, CV_DRIVER, NULL);
+ cv_init(&scf_comtbl.cmdend_cv, NULL, CV_DRIVER, NULL);
+ cv_init(&scf_comtbl.cmdwait_cv, NULL, CV_DRIVER, NULL);
+ cv_init(&scf_comtbl.cmdbusy_cv, NULL, CV_DRIVER, NULL);
+ cv_init(&scf_comtbl.rsense_cv, NULL, CV_DRIVER, NULL);
+ cv_init(&scf_comtbl.rdcsense_cv, NULL, CV_DRIVER, NULL);
+ cv_init(&scf_comtbl.rdctrl_cv, NULL, CV_DRIVER, NULL);
+ cv_init(&scf_comtbl.getevent_cv, NULL, CV_DRIVER, NULL);
+ cv_init(&scf_comtbl.suspend_wait_cv, NULL, CV_DRIVER, NULL);
+ scf_comtbl.resource_flag |= DID_CV;
+
+ /* install module into system */
+ error = mod_install(&scf_modlinkage);
+ if (error != 0) {
+ cmn_err(CE_WARN, "%s: _init: mod_install failed.\n",
+ scf_driver_name);
+ /* release driver resources */
+ scf_free_resource();
+ }
+
+/*
+ * END_init
+ */
+ END_init:
+
+ SCFDBGMSG1(SCF_DBGFLAG_DDI, SCF_FUNC_NAME ": end return = %d", error);
+ return (error);
+}
+
+
+/*
+ * _fini()
+ *
+ * Description: Remove processing of module.
+ *
+ */
+int
+_fini(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "_fini() "
+ int error;
+
+ SCFDBGMSG(SCF_DBGFLAG_DDI, SCF_FUNC_NAME ": start");
+
+ /* remove module from system */
+ error = mod_remove(&scf_modlinkage);
+ if (error == 0) {
+ /* release driver resources */
+ scf_free_resource();
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_DDI, SCF_FUNC_NAME ": end return = %d", error);
+ return (error);
+}
+
+
+/*
+ * _info()
+ *
+ * Description: Return module information.
+ *
+ */
+int
+_info(struct modinfo *modinfop)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "_info() "
+ int error;
+
+ SCFDBGMSG(SCF_DBGFLAG_DDI, SCF_FUNC_NAME ": start");
+
+ /* return module infomarion */
+ error = mod_info(&scf_modlinkage, modinfop);
+
+ SCFDBGMSG1(SCF_DBGFLAG_DDI, SCF_FUNC_NAME ": end return = %d", error);
+ return (error);
+}
+
+
+/*
+ * scf_free_resource()
+ *
+ * Description: Release processing of driver resources.
+ *
+ */
+/* ARGSUSED */
+void
+scf_free_resource(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_free_resource() "
+
+ SCFDBGMSG(SCF_DBGFLAG_DDI, SCF_FUNC_NAME ": start");
+
+ SCF_DBG_IOMP_FREE;
+
+ /* System interface area release */
+ if (scf_comtbl.report_sensep != NULL) {
+ kmem_free((void *)scf_comtbl.report_sensep,
+ (size_t)(sizeof (scfreport_t) *
+ scf_report_sense_pool_max));
+ scf_comtbl.report_sensep = NULL;
+ }
+ if (scf_comtbl.getevent_sensep != NULL) {
+ kmem_free((void *)scf_comtbl.getevent_sensep,
+ (size_t)(sizeof (scfevent_t) * scf_getevent_pool_max));
+ scf_comtbl.getevent_sensep = NULL;
+ }
+ scf_comtbl.resource_flag &= (~DID_ALLOCBUF);
+
+ /* destroy cv */
+ if (scf_comtbl.resource_flag & DID_CV) {
+ cv_destroy(&scf_comtbl.cmd_cv);
+ cv_destroy(&scf_comtbl.cmdend_cv);
+ cv_destroy(&scf_comtbl.cmdwait_cv);
+ cv_destroy(&scf_comtbl.cmdbusy_cv);
+ cv_destroy(&scf_comtbl.rsense_cv);
+ cv_destroy(&scf_comtbl.rdcsense_cv);
+ cv_destroy(&scf_comtbl.rdctrl_cv);
+ cv_destroy(&scf_comtbl.getevent_cv);
+ cv_destroy(&scf_comtbl.suspend_wait_cv);
+ scf_comtbl.resource_flag &= (~DID_CV);
+ }
+
+ /* remove softint */
+ if (scf_comtbl.resource_flag & DID_SOFTINTR) {
+ ddi_remove_softintr(scf_comtbl.scf_softintr_id);
+ scf_comtbl.resource_flag &= (~DID_SOFTINTR);
+ }
+
+ /* destroy mutex */
+ if (scf_comtbl.resource_flag & DID_MUTEX_TRC) {
+ mutex_destroy(&scf_comtbl.trc_mutex);
+ scf_comtbl.resource_flag &= (~DID_MUTEX_TRC);
+ }
+ if (scf_comtbl.resource_flag & DID_MUTEX_ALL) {
+ mutex_destroy(&scf_comtbl.all_mutex);
+ scf_comtbl.resource_flag &= (~DID_MUTEX_ALL);
+ }
+ if (scf_comtbl.resource_flag & DID_MUTEX_SI) {
+ mutex_destroy(&scf_comtbl.si_mutex);
+ scf_comtbl.resource_flag &= (~DID_MUTEX_SI);
+ }
+ if (scf_comtbl.resource_flag & DID_MUTEX_ATH) {
+ mutex_destroy(&scf_comtbl.attach_mutex);
+ scf_comtbl.resource_flag &= (~DID_MUTEX_ATH);
+ }
+
+ /* release software state */
+ ddi_soft_state_fini(&scfstate);
+
+ SCFDBGMSG(SCF_DBGFLAG_DDI, SCF_FUNC_NAME ": end");
+}
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scfiomp.c b/usr/src/uts/sun4u/opl/io/scfd/scfiomp.c
new file mode 100644
index 0000000000..d427fcb399
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scfiomp.c
@@ -0,0 +1,2286 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/file.h>
+#include <sys/errno.h>
+#include <sys/open.h>
+#include <sys/uio.h>
+#include <sys/cred.h>
+#include <sys/kmem.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/scfd/scfparam.h>
+
+#ifdef DEBUG
+/*
+ * Function list
+ */
+void scf_add_scf(scf_state_t *statep);
+void scf_del_scf(scf_state_t *statep);
+int scf_meta_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
+ cred_t *cred_p, int *rval_p, int u_mode);
+int scf_inst_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
+ cred_t *cred_p, int *rval_p, int u_mode);
+void scf_inst_getstat32(scf_state_t *statep,
+ struct fiompstatus_32 *status32_p, char *message_p, int flag);
+void scf_inst_getstat(scf_state_t *statep,
+ struct fiompstatus *status_p, char *message_p, int flag);
+void scf_path_stmsg(scf_state_t *statep, char *message_p);
+
+/*
+ * External function
+ */
+extern void scf_dscp_stop(uint32_t factor);
+
+
+/*
+ * Multi path control table add
+ */
+void
+scf_add_scf(scf_state_t *statep)
+{
+#define SCF_FUNC_NAME "scf_add_scf() "
+ scf_state_t **iomp_scf;
+ int alloc_size;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start instance = %d",
+ statep->instance);
+
+ alloc_size = (sizeof (scf_state_t *) * (statep->instance + 1));
+ if (alloc_size < (sizeof (scf_state_t *) * SCF_MAX_INSTANCE)) {
+ alloc_size = (sizeof (scf_state_t *) * SCF_MAX_INSTANCE);
+ }
+ if ((scf_comtbl.alloc_size < alloc_size) ||
+ (scf_comtbl.iomp_scf == 0)) {
+ /* IOMP control table re-get */
+ iomp_scf = (scf_state_t **)kmem_zalloc((size_t)(alloc_size),
+ KM_SLEEP);
+ if (scf_comtbl.alloc_size != 0) {
+ bcopy(scf_comtbl.iomp_scf, iomp_scf,
+ scf_comtbl.alloc_size);
+ kmem_free((void *)scf_comtbl.iomp_scf,
+ (size_t)scf_comtbl.alloc_size);
+ }
+ scf_comtbl.iomp_scf = iomp_scf;
+ scf_comtbl.alloc_size = alloc_size;
+ }
+ scf_comtbl.iomp_scf[statep->instance] = statep;
+ /* SCF path count up */
+ if (scf_comtbl.path_num < (statep->instance + 1)) {
+ scf_comtbl.path_num = statep->instance + 1;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+/*
+ * Multi path control table delete
+ */
+void
+scf_del_scf(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_del_scf() "
+ int ii;
+ int path_num = 0;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start instance = %d",
+ statep->instance);
+
+ scf_comtbl.iomp_scf[statep->instance] = 0;
+
+ /* SCF path count up */
+ for (ii = 0; ii < scf_comtbl.alloc_size / sizeof (scf_state_t *);
+ ii++) {
+ if (scf_comtbl.iomp_scf[ii]) {
+ path_num = scf_comtbl.iomp_scf[ii]->instance + 1;
+ }
+ }
+ scf_comtbl.path_num = path_num;
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * Meta management ioctl
+ */
+/* ARGSUSED */
+int
+scf_meta_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
+ int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_meta_ioctl() "
+ int ret = 0;
+ int all_num;
+ int path_num;
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOMP,
+ SCF_FUNC_NAME ": start cmd = 0x%08x", (uint_t)cmd);
+
+ switch ((unsigned int)cmd) {
+ case FIOMPNEW:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPNEW proc");
+
+ ret = ENOTTY;
+ break;
+
+ case FIOMPENCAP:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPENCAP proc");
+
+ ret = ENOTTY;
+ break;
+
+ case FIOMPDEVINFO:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPDEVINFO proc");
+
+ if (u_mode == DDI_MODEL_ILP32) {
+ /* DDI_MODEL_ILP32 */
+ struct fiomp_devinfo_32 *fiomp_devinfo32_p;
+
+ fiomp_devinfo32_p =
+ (struct fiomp_devinfo_32 *)kmem_zalloc
+ ((size_t)(sizeof (struct fiomp_devinfo_32)),
+ KM_SLEEP);
+
+ if (ddi_copyin((void *)arg,
+ (void *)fiomp_devinfo32_p,
+ sizeof (struct fiomp_devinfo_32), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "m_ioctl ", 8);
+ ret = EFAULT;
+ goto END_DEVINFO32;
+ }
+ if (fiomp_devinfo32_p->inst_no != 0) {
+ /* Invalid inst_no */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "m_ioctl ", 8);
+ ret = EINVAL;
+ goto END_DEVINFO32;
+ }
+ mutex_enter(&scf_comtbl.attach_mutex);
+ if (!(scf_comtbl.resource_flag & DID_MUTEX_ALL)) {
+ /* Not attach device */
+ path_num = 0;
+ } else {
+ /* Is attach device */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ path_num = scf_comtbl.path_num;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ }
+ mutex_exit(&scf_comtbl.attach_mutex);
+ /* Set output information */
+ strcpy(fiomp_devinfo32_p->real_name, SCF_REAL_NAME);
+ strcpy(fiomp_devinfo32_p->user_path, SCF_USER_PATH);
+ fiomp_devinfo32_p->path_num = path_num;
+ fiomp_devinfo32_p->mpmode = FIOMP_FALSE;
+ fiomp_devinfo32_p->autopath = FIOMP_TRUE;
+ fiomp_devinfo32_p->block = FIOMP_TRUE;
+ fiomp_devinfo32_p->needsync = FIOMP_FALSE;
+ if (ddi_copyout((void *)fiomp_devinfo32_p,
+ (void *)arg, sizeof (struct fiomp_devinfo_32),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "m_ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_DEVINFO32
+ */
+ END_DEVINFO32:
+
+ if (fiomp_devinfo32_p) {
+ kmem_free((void *)fiomp_devinfo32_p,
+ (size_t)(sizeof (struct fiomp_devinfo_32)));
+ }
+ } else {
+ /* DDI_MODEL_NONE */
+ struct fiomp_devinfo *fiomp_devinfo_p;
+
+ fiomp_devinfo_p =
+ (struct fiomp_devinfo *)kmem_zalloc
+ ((size_t)(sizeof (struct fiomp_devinfo)),
+ KM_SLEEP);
+
+ if (ddi_copyin((void *)arg, (void *)fiomp_devinfo_p,
+ sizeof (struct fiomp_devinfo), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "m_ioctl ", 8);
+ ret = EFAULT;
+ goto END_DEVINFO;
+ }
+ if (fiomp_devinfo_p->inst_no != 0) {
+ /* Invalid inst_no */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "m_ioctl ", 8);
+ ret = EINVAL;
+ goto END_DEVINFO;
+ }
+ mutex_enter(&scf_comtbl.attach_mutex);
+ if (!(scf_comtbl.resource_flag & DID_MUTEX_ALL)) {
+ /* Not attach device */
+ path_num = 0;
+ } else {
+ /* Is attach device */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ path_num = scf_comtbl.path_num;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ }
+ mutex_exit(&scf_comtbl.attach_mutex);
+ /* Set output information */
+ strcpy(fiomp_devinfo_p->real_name,
+ SCF_REAL_NAME);
+ strcpy(fiomp_devinfo_p->user_path,
+ SCF_USER_PATH);
+ fiomp_devinfo_p->path_num = path_num;
+ fiomp_devinfo_p->mpmode = FIOMP_FALSE;
+ fiomp_devinfo_p->autopath = FIOMP_TRUE;
+ fiomp_devinfo_p->block = FIOMP_TRUE;
+ fiomp_devinfo_p->needsync = FIOMP_FALSE;
+ if (ddi_copyout((void *)fiomp_devinfo_p,
+ (void *)arg, sizeof (struct fiomp_devinfo),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "m_ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_DEVINFO
+ */
+ END_DEVINFO:
+
+ if (fiomp_devinfo_p) {
+ kmem_free((void *)fiomp_devinfo_p,
+ (size_t)(sizeof (struct fiomp_devinfo)));
+ }
+ }
+ break;
+
+ case FIOMPALLINSTNUM:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPALLINSTNUM proc");
+
+ mutex_enter(&scf_comtbl.attach_mutex);
+ /* Set output information */
+ if (!(scf_comtbl.resource_flag & DID_MUTEX_ALL)) {
+ /* Not attach device */
+ all_num = 0;
+ } else {
+ /* Is attach device */
+ all_num = 1;
+ }
+ mutex_exit(&scf_comtbl.attach_mutex);
+ if (ddi_copyout((void *)&all_num, (void *)arg,
+ sizeof (int), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "m_ioctl ", 8);
+ ret = EFAULT;
+ }
+ break;
+
+ case FIOMPALLDEVINFO:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPALLDEVINFO proc");
+
+ if (u_mode == DDI_MODEL_ILP32) {
+ /* DDI_MODEL_ILP32 */
+ struct fiomp_all_devinfo_32 fiomp_all_devinfo32;
+ struct fiomp_devinfo_32 *fiomp_devinfo32_p;
+
+ fiomp_devinfo32_p =
+ (struct fiomp_devinfo_32 *)kmem_zalloc
+ ((size_t)(sizeof (struct fiomp_devinfo_32)),
+ KM_SLEEP);
+
+ if (ddi_copyin((void *)arg, (void *)&fiomp_all_devinfo32,
+ sizeof (struct fiomp_all_devinfo_32), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "m_ioctl ", 8);
+ ret = EFAULT;
+ goto END_ALLDEVINFO32;
+ }
+ if (fiomp_all_devinfo32.num != 1) {
+ /* Set 1 in num */
+ fiomp_all_devinfo32.num = 1;
+ } else {
+ if (ddi_copyin((void *)(uintptr_t)fiomp_all_devinfo32.devinfo,
+ (void *)fiomp_devinfo32_p,
+ sizeof (struct fiomp_devinfo_32),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "m_ioctl ", 8);
+ ret = EFAULT;
+ goto END_ALLDEVINFO32;
+ }
+ mutex_enter(&scf_comtbl.attach_mutex);
+ if (!(scf_comtbl.resource_flag & DID_MUTEX_ALL)) {
+ /* Not attach device */
+ path_num = 0;
+ } else {
+ /* Is attach device */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ path_num = scf_comtbl.path_num;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ }
+ mutex_exit(&scf_comtbl.attach_mutex);
+ /* Set output information */
+ fiomp_devinfo32_p->inst_no = 0;
+ strcpy(fiomp_devinfo32_p->real_name, SCF_REAL_NAME);
+ strcpy(fiomp_devinfo32_p->user_path, SCF_USER_PATH);
+ fiomp_devinfo32_p->path_num = path_num;
+ fiomp_devinfo32_p->mpmode = FIOMP_FALSE;
+ fiomp_devinfo32_p->autopath = FIOMP_TRUE;
+ fiomp_devinfo32_p->block = FIOMP_TRUE;
+ fiomp_devinfo32_p->needsync = FIOMP_FALSE;
+ if (ddi_copyout((void *)fiomp_devinfo32_p,
+ (void *)(uintptr_t)fiomp_all_devinfo32.devinfo,
+ sizeof (struct fiomp_devinfo_32), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "m_ioctl ", 8);
+ ret = EFAULT;
+ goto END_ALLDEVINFO32;
+ }
+ }
+ if (ddi_copyout((void *)&fiomp_all_devinfo32, (void *)arg,
+ sizeof (struct fiomp_all_devinfo_32), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "m_ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_ALLDEVINFO32
+ */
+ END_ALLDEVINFO32:
+
+ if (fiomp_devinfo32_p) {
+ kmem_free((void *)fiomp_devinfo32_p,
+ (size_t)(sizeof (struct fiomp_devinfo_32)));
+ }
+ } else {
+ /* DDI_MODEL_NONE */
+ struct fiomp_all_devinfo fiomp_all_devinfo;
+ struct fiomp_devinfo *fiomp_devinfo_p;
+
+ fiomp_devinfo_p =
+ (struct fiomp_devinfo *)kmem_zalloc
+ ((size_t)(sizeof (struct fiomp_devinfo)),
+ KM_SLEEP);
+
+ if (ddi_copyin((void *)arg, (void *)&fiomp_all_devinfo,
+ sizeof (struct fiomp_all_devinfo), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "m_ioctl ", 8);
+ ret = EFAULT;
+ goto END_ALLDEVINFO;
+ }
+ if (fiomp_all_devinfo.num != 1) {
+ /* Set 1 in num */
+ fiomp_all_devinfo.num = 1;
+ } else {
+ if (ddi_copyin((void *)fiomp_all_devinfo.devinfo,
+ (void *)fiomp_devinfo_p,
+ sizeof (struct fiomp_devinfo), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "m_ioctl ", 8);
+ ret = EFAULT;
+ goto END_ALLDEVINFO;
+ }
+ mutex_enter(&scf_comtbl.attach_mutex);
+ if (!(scf_comtbl.resource_flag & DID_MUTEX_ALL)) {
+ /* Not attach device */
+ path_num = 0;
+ } else {
+ /* Is attach device */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ path_num = scf_comtbl.path_num;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ }
+ mutex_exit(&scf_comtbl.attach_mutex);
+ /* Set output information */
+ fiomp_devinfo_p->inst_no = 0;
+ strcpy(fiomp_devinfo_p->real_name, SCF_REAL_NAME);
+ strcpy(fiomp_devinfo_p->user_path, SCF_USER_PATH);
+ fiomp_devinfo_p->path_num = path_num;
+ fiomp_devinfo_p->mpmode = FIOMP_FALSE;
+ fiomp_devinfo_p->autopath = FIOMP_TRUE;
+ fiomp_devinfo_p->block = FIOMP_TRUE;
+ fiomp_devinfo_p->needsync = FIOMP_FALSE;
+ if (ddi_copyout((void *)fiomp_devinfo_p,
+ (void *)fiomp_all_devinfo.devinfo,
+ sizeof (struct fiomp_devinfo), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "m_ioctl ", 8);
+ ret = EFAULT;
+ goto END_ALLDEVINFO;
+ }
+ }
+ if (ddi_copyout((void *)&fiomp_all_devinfo, (void *)arg,
+ sizeof (struct fiomp_all_devinfo), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "m_ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_ALLDEVINFO
+ */
+ END_ALLDEVINFO:
+
+ if (fiomp_devinfo_p) {
+ kmem_free((void *)fiomp_devinfo_p,
+ (size_t)(sizeof (struct fiomp_devinfo)));
+ }
+ }
+ break;
+
+ case FIOMPGETEVENT:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPGETEVENT proc");
+
+ ret = ENOTTY;
+ break;
+
+ default:
+ /* undefined */
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "undefined ioctl command");
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "m_ioctl ", 8);
+ ret = ENOTTY;
+ }
+
+/*
+ * END_meta_ioctl
+ */
+ END_meta_ioctl:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOMP, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+/*
+ * Instans management ioctl
+ */
+/* ARGSUSED */
+int
+scf_inst_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
+ int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_inst_ioctl() "
+ scf_state_t *statep;
+ scf_state_t *wkstatep;
+ struct scf_cmd scf_cmd;
+ int ret = 0;
+ int all_num;
+ int pathnum;
+ int ii;
+ int jj;
+ int num_cmp_flag = 0;
+ int alloc_num;
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOMP,
+ SCF_FUNC_NAME ": start cmd = 0x%08x", (uint_t)cmd);
+
+ mutex_enter(&scf_comtbl.attach_mutex);
+ if (!(scf_comtbl.resource_flag & DID_MUTEX_ALL)) {
+ /* Not attach device */
+ mutex_exit(&scf_comtbl.attach_mutex);
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = ENXIO;
+ goto END_inst_ioctl;
+ }
+ mutex_exit(&scf_comtbl.attach_mutex);
+
+ switch ((unsigned int)cmd) {
+ case FIOMPMAXPATHNUM:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPMAXPATHNUM proc");
+
+ mutex_enter(&scf_comtbl.all_mutex);
+ /* Set output information */
+ all_num = scf_comtbl.path_num;
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (ddi_copyout((void *)&all_num, (void *)arg,
+ sizeof (int), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ }
+ break;
+
+ case FIOMPSETPROP:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPSETPROP proc");
+
+ ret = ENOTTY;
+ break;
+
+ case FIOMPGETPROP:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPGETPROP proc");
+
+ if (u_mode == DDI_MODEL_ILP32) {
+ /* DDI_MODEL_ILP32 */
+ struct fiompprop_32 fiompprop32;
+ char *work_name_p = 0;
+ char *iomp_name_p = 0;
+ char *iomp_real_name_p = 0;
+ char *iomp_user_path_p = 0;
+ char *iomp_status_p = 0;
+ caddr32_t *iomp_path_p = 0;
+ caddr32_t *iomp_logical_path_p = 0;
+ caddr32_t *iomp_path_status_p = 0;
+ caddr32_t *iomp_path_block_p = 0;
+ char *iomp_path = 0;
+ char *iomp_logical_path = 0;
+ char *iomp_path_status = 0;
+ char *iomp_path_block = 0;
+
+ if (ddi_copyin((void *)arg, (void *)&fiompprop32,
+ sizeof (struct fiompprop_32), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_inst_ioctl;
+ }
+ alloc_num = fiompprop32.num;
+ iomp_name_p = (char *)kmem_zalloc((size_t)(FIOMP_MAX_STR),
+ KM_SLEEP);
+ iomp_real_name_p = (char *)kmem_zalloc((size_t)(FIOMP_MAX_STR),
+ KM_SLEEP);
+ iomp_user_path_p = (char *)kmem_zalloc((size_t)(FIOMP_MAX_STR),
+ KM_SLEEP);
+ iomp_status_p = (char *)kmem_zalloc((size_t)(FIOMP_MAX_STR),
+ KM_SLEEP);
+ if (fiompprop32.num != 0) {
+ /* buffer allocation */
+ work_name_p = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR), KM_SLEEP);
+ iomp_path_p = (caddr32_t *)kmem_zalloc
+ ((size_t)((sizeof (caddr32_t)) * alloc_num), KM_SLEEP);
+ iomp_logical_path_p = (caddr32_t *)kmem_zalloc
+ ((size_t)((sizeof (caddr32_t)) * alloc_num), KM_SLEEP);
+ iomp_path_status_p = (caddr32_t *)kmem_zalloc
+ ((size_t)((sizeof (caddr32_t)) * alloc_num), KM_SLEEP);
+ iomp_path_block_p = (caddr32_t *)kmem_zalloc
+ ((size_t)((sizeof (caddr32_t)) * alloc_num), KM_SLEEP);
+ iomp_path = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR * alloc_num), KM_SLEEP);
+ iomp_logical_path = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR * alloc_num), KM_SLEEP);
+ iomp_path_status = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR * alloc_num), KM_SLEEP);
+ iomp_path_block = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR * alloc_num), KM_SLEEP);
+ }
+
+ mutex_enter(&scf_comtbl.all_mutex);
+ if (fiompprop32.num != scf_comtbl.path_num) {
+ /*
+ * When different from appointed num, perform only num setting
+ */
+ fiompprop32.num = scf_comtbl.path_num;
+ num_cmp_flag = 1;
+ } else {
+ /* Set output information */
+ strcpy(iomp_name_p, SCF_IOMP_NAME);
+ strcpy(iomp_real_name_p, SCF_REAL_NAME);
+ strcpy(iomp_user_path_p, SCF_USER_PATH);
+ if ((scf_comtbl.scf_path_p) || (scf_comtbl.scf_exec_p)) {
+ strcpy(iomp_status_p, "online");
+ } else if ((scf_comtbl.scf_stop_p) ||
+ (scf_comtbl.scf_err_p)) {
+ strcpy(iomp_status_p, "offline");
+ } else {
+ strcpy(iomp_status_p, "unconfigured");
+ }
+ for (ii = 0, jj = 0; ii < fiompprop32.num;
+ ii++, jj += FIOMP_MAX_STR) {
+ /* Output information setting every pass */
+ iomp_path[jj] = '\0';
+ iomp_logical_path[jj] = '\0';
+ iomp_path_status[jj] = '\0';
+ iomp_path_block[jj] = '\0';
+ if ((statep = scf_comtbl.iomp_scf[ii]) != 0) {
+ if (ddi_pathname(statep->dip,
+ work_name_p)) {
+ sprintf(&iomp_path[jj], "%s:scfc%d",
+ work_name_p, statep->instance);
+ }
+ sprintf(&iomp_logical_path[jj],
+ "/dev/FJSVhwr/scfc%d",
+ statep->instance);
+ switch (statep->path_status) {
+ case FIOMP_STAT_ACTIVE:
+ strcpy(&iomp_path_status[jj],
+ "active");
+ break;
+ case FIOMP_STAT_STANDBY:
+ strcpy(&iomp_path_status[jj],
+ "standby");
+ break;
+ case FIOMP_STAT_STOP:
+ strcpy(&iomp_path_status[jj],
+ "stop");
+ break;
+ case FIOMP_STAT_FAIL:
+ strcpy(&iomp_path_status[jj],
+ "fail");
+ break;
+ case FIOMP_STAT_DISCON:
+ strcpy(&iomp_path_status[jj],
+ "disconnected");
+ break;
+ default:
+ strcpy(&iomp_path_status[jj],
+ "empty");
+ }
+ strcpy(&iomp_path_block[jj], "block");
+ }
+ }
+ }
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (num_cmp_flag == 0) {
+ if (ddi_copyout((void *)iomp_name_p,
+ (void *)(uintptr_t)fiompprop32.iomp_name,
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP32;
+ }
+ if (ddi_copyout((void *)iomp_real_name_p,
+ (void *)(uintptr_t)fiompprop32.iomp_real_name,
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP32;
+ }
+ if (ddi_copyout((void *)iomp_user_path_p,
+ (void *)(uintptr_t)fiompprop32.iomp_user_path,
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP32;
+ }
+ if (ddi_copyout((void *)iomp_status_p,
+ (void *)(uintptr_t)fiompprop32.iomp_status,
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP32;
+ }
+ if (fiompprop32.num) {
+ if (fiompprop32.iomp_path) {
+ if (ddi_copyin((void *)(uintptr_t)fiompprop32.iomp_path,
+ (void *)iomp_path_p,
+ ((sizeof (caddr32_t)) * fiompprop32.num), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP32;
+ }
+ }
+ if (fiompprop32.iomp_logical_path) {
+ if (ddi_copyin((void *)(uintptr_t)fiompprop32.iomp_logical_path,
+ (void *)iomp_logical_path_p,
+ ((sizeof (caddr32_t)) * fiompprop32.num), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP32;
+ }
+ }
+ if (fiompprop32.iomp_path_status) {
+ if (ddi_copyin((void *)(uintptr_t)fiompprop32.iomp_path_status,
+ (void *)iomp_path_status_p,
+ ((sizeof (caddr32_t)) * fiompprop32.num), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP32;
+ }
+ }
+ if (fiompprop32.iomp_path_block) {
+ if (ddi_copyin((void *)(uintptr_t)fiompprop32.iomp_path_block,
+ (void *)iomp_path_block_p,
+ ((sizeof (caddr32_t)) * fiompprop32.num), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP32;
+ }
+ }
+ }
+ for (ii = 0, jj = 0; ii < fiompprop32.num;
+ ii++, jj += FIOMP_MAX_STR) {
+ if (iomp_path_p[ii]) {
+ if (ddi_copyout((void *)&iomp_path[jj],
+ (void *)(uintptr_t)iomp_path_p[ii],
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP32;
+ }
+ }
+ if (iomp_logical_path_p[ii]) {
+ if (ddi_copyout((void *)&iomp_logical_path[jj],
+ (void *)(uintptr_t)iomp_logical_path_p[ii],
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP32;
+ }
+ }
+ if (iomp_path_status_p[ii]) {
+ if (ddi_copyout((void *)&iomp_path_status[jj],
+ (void *)(uintptr_t)iomp_path_status_p[ii],
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP32;
+ }
+ }
+ if (iomp_path_block_p[ii]) {
+ if (ddi_copyout((void *)&iomp_path_block[jj],
+ (void *)(uintptr_t)iomp_path_block_p[ii],
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP32;
+ }
+ }
+ }
+ }
+ if (ddi_copyout((void *)&fiompprop32, (void *)arg,
+ sizeof (struct fiompprop_32), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_GETPROP32
+ */
+ END_GETPROP32:
+
+ /* Buffer release */
+ if (work_name_p) {
+ kmem_free((void *)work_name_p, (size_t)FIOMP_MAX_STR);
+ }
+ if (iomp_name_p) {
+ kmem_free((void *)iomp_name_p, (size_t)FIOMP_MAX_STR);
+ }
+ if (iomp_real_name_p) {
+ kmem_free((void *)iomp_real_name_p, (size_t)FIOMP_MAX_STR);
+ }
+ if (iomp_user_path_p) {
+ kmem_free((void *)iomp_user_path_p, (size_t)FIOMP_MAX_STR);
+ }
+ if (iomp_status_p) {
+ kmem_free((void *)iomp_status_p, (size_t)FIOMP_MAX_STR);
+ }
+ if (iomp_path_p) {
+ kmem_free((void *)iomp_path_p,
+ (size_t)((sizeof (caddr32_t)) * alloc_num));
+ }
+ if (iomp_logical_path_p) {
+ kmem_free((void *)iomp_logical_path_p,
+ (size_t)((sizeof (caddr32_t)) * alloc_num));
+ }
+ if (iomp_path_status_p) {
+ kmem_free((void *)iomp_path_status_p,
+ (size_t)((sizeof (caddr32_t)) * alloc_num));
+ }
+ if (iomp_path_block_p) {
+ kmem_free((void *)iomp_path_block_p,
+ (size_t)((sizeof (caddr32_t)) * alloc_num));
+ }
+ if (iomp_path) {
+ kmem_free((void *)iomp_path,
+ (size_t)(FIOMP_MAX_STR * alloc_num));
+ }
+ if (iomp_logical_path) {
+ kmem_free((void *)iomp_logical_path,
+ (size_t)(FIOMP_MAX_STR * alloc_num));
+ }
+ if (iomp_path_status) {
+ kmem_free((void *)iomp_path_status,
+ (size_t)(FIOMP_MAX_STR * alloc_num));
+ }
+ if (iomp_path_block) {
+ kmem_free((void *)iomp_path_block,
+ (size_t)(FIOMP_MAX_STR * alloc_num));
+ }
+ } else {
+ /* DDI_MODEL_NONE */
+ struct fiompprop fiompprop;
+ char *work_name_p = 0;
+ char *iomp_name_p = 0;
+ char *iomp_real_name_p = 0;
+ char *iomp_user_path_p = 0;
+ char *iomp_status_p = 0;
+ caddr_t *iomp_path_p = 0;
+ caddr_t *iomp_logical_path_p = 0;
+ caddr_t *iomp_path_status_p = 0;
+ caddr_t *iomp_path_block_p = 0;
+ char *iomp_path = 0;
+ char *iomp_logical_path = 0;
+ char *iomp_path_status = 0;
+ char *iomp_path_block = 0;
+
+ if (ddi_copyin((void *)arg, (void *)&fiompprop,
+ sizeof (struct fiompprop), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_inst_ioctl;
+ }
+ alloc_num = fiompprop.num;
+ if (fiompprop.num != 0) {
+ /* Buffer allocation */
+ work_name_p = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR), KM_SLEEP);
+ iomp_name_p = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR), KM_SLEEP);
+ iomp_real_name_p = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR), KM_SLEEP);
+ iomp_user_path_p = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR), KM_SLEEP);
+ iomp_status_p = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR), KM_SLEEP);
+ iomp_path_p = (caddr_t *)kmem_zalloc
+ ((size_t)((sizeof (caddr_t)) * alloc_num), KM_SLEEP);
+ iomp_logical_path_p = (caddr_t *)kmem_zalloc
+ ((size_t)((sizeof (caddr_t)) * alloc_num), KM_SLEEP);
+ iomp_path_status_p = (caddr_t *)kmem_zalloc
+ ((size_t)((sizeof (caddr_t)) * alloc_num), KM_SLEEP);
+ iomp_path_block_p = (caddr_t *)kmem_zalloc
+ ((size_t)((sizeof (caddr_t)) * alloc_num), KM_SLEEP);
+ iomp_path = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR * alloc_num), KM_SLEEP);
+ iomp_logical_path = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR * alloc_num), KM_SLEEP);
+ iomp_path_status = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR * alloc_num), KM_SLEEP);
+ iomp_path_block = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR * alloc_num), KM_SLEEP);
+ }
+
+ mutex_enter(&scf_comtbl.all_mutex);
+ if (fiompprop.num != scf_comtbl.path_num) {
+ /*
+ * When different from appointed num, perform only num setting
+ */
+ fiompprop.num = scf_comtbl.path_num;
+ num_cmp_flag = 1;
+ } else {
+ /* Set output information */
+ strcpy(iomp_name_p, SCF_IOMP_NAME);
+ strcpy(iomp_real_name_p, SCF_REAL_NAME);
+ strcpy(iomp_user_path_p, SCF_USER_PATH);
+ if ((scf_comtbl.scf_path_p) || (scf_comtbl.scf_exec_p)) {
+ strcpy(iomp_status_p, "online");
+ } else if ((scf_comtbl.scf_stop_p) || (scf_comtbl.scf_err_p)) {
+ strcpy(iomp_status_p, "offline");
+ } else {
+ strcpy(iomp_status_p, "unconfigured");
+ }
+ for (ii = 0, jj = 0; ii < fiompprop.num;
+ ii++, jj += FIOMP_MAX_STR) {
+ /* Output information setting every pass */
+ iomp_path[jj] = '\0';
+ iomp_logical_path[jj] = '\0';
+ iomp_path_status[jj] = '\0';
+ iomp_path_block[jj] = '\0';
+ if ((statep = scf_comtbl.iomp_scf[ii]) != 0) {
+ if (ddi_pathname(statep->dip, work_name_p)) {
+ sprintf(&iomp_path[jj], "%s:scfc%d",
+ work_name_p, statep->instance);
+ }
+ sprintf(&iomp_logical_path[jj],
+ "/dev/FJSVhwr/scfc%d",
+ statep->instance);
+ switch (statep->path_status) {
+ case FIOMP_STAT_ACTIVE:
+ strcpy(&iomp_path_status[jj],
+ "active");
+ break;
+ case FIOMP_STAT_STANDBY:
+ strcpy(&iomp_path_status[jj],
+ "standby");
+ break;
+ case FIOMP_STAT_STOP:
+ strcpy(&iomp_path_status[jj],
+ "stop");
+ break;
+ case FIOMP_STAT_FAIL:
+ strcpy(&iomp_path_status[jj],
+ "fail");
+ break;
+ case FIOMP_STAT_DISCON:
+ strcpy(&iomp_path_status[jj],
+ "disconnected");
+ break;
+ default:
+ strcpy(&iomp_path_status[jj],
+ "empty");
+ }
+ strcpy(&iomp_path_block[jj], "block");
+ }
+ }
+ }
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (num_cmp_flag == 0) {
+ if (ddi_copyout((void *)iomp_name_p,
+ (void *)fiompprop.iomp_name,
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP;
+ }
+ if (ddi_copyout((void *)iomp_real_name_p,
+ (void *)fiompprop.iomp_real_name,
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP;
+ }
+ if (ddi_copyout((void *)iomp_user_path_p,
+ (void *)fiompprop.iomp_user_path,
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP;
+ }
+ if (ddi_copyout((void *)iomp_status_p,
+ (void *)fiompprop.iomp_status,
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP;
+ }
+ if (fiompprop.num) {
+ if (fiompprop.iomp_path) {
+ if (ddi_copyin((void *)fiompprop.iomp_path,
+ (void *)iomp_path_p,
+ ((sizeof (caddr_t)) * fiompprop.num), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP;
+ }
+ }
+ if (fiompprop.iomp_logical_path) {
+ if (ddi_copyin((void *)fiompprop.iomp_logical_path,
+ (void *)iomp_logical_path_p,
+ ((sizeof (caddr_t)) * fiompprop.num), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP;
+ }
+ }
+ if (fiompprop.iomp_path_status) {
+ if (ddi_copyin((void *)fiompprop.iomp_path_status,
+ (void *)iomp_path_status_p,
+ ((sizeof (caddr_t)) * fiompprop.num), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP;
+ }
+ }
+ if (fiompprop.iomp_path_block) {
+ if (ddi_copyin((void *)fiompprop.iomp_path_block,
+ (void *)iomp_path_block_p,
+ ((sizeof (caddr_t)) * fiompprop.num), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP;
+ }
+ }
+ }
+ for (ii = 0, jj = 0; ii < fiompprop.num;
+ ii++, jj += FIOMP_MAX_STR) {
+ if (iomp_path_p[ii]) {
+ if (ddi_copyout((void *)&iomp_path[jj],
+ (void *)iomp_path_p[ii],
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP;
+ }
+ }
+ if (iomp_logical_path_p[ii]) {
+ if (ddi_copyout((void *)&iomp_logical_path[jj],
+ (void *)iomp_logical_path_p[ii],
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP;
+ }
+ }
+ if (iomp_path_status_p[ii]) {
+ if (ddi_copyout((void *)&iomp_path_status[jj],
+ (void *)iomp_path_status_p[ii],
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP;
+ }
+ }
+ if (iomp_path_block_p[ii]) {
+ if (ddi_copyout((void *)&iomp_path_block[jj],
+ (void *)iomp_path_block_p[ii],
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETPROP;
+ }
+ }
+ }
+ }
+ if (ddi_copyout((void *)&fiompprop, (void *)arg,
+ sizeof (struct fiompprop), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_GETPROP
+ */
+ END_GETPROP:
+
+ /* Buffer release */
+ if (work_name_p) {
+ kmem_free((void *)work_name_p, (size_t)FIOMP_MAX_STR);
+ }
+ if (iomp_name_p) {
+ kmem_free((void *)iomp_name_p, (size_t)FIOMP_MAX_STR);
+ }
+ if (iomp_real_name_p) {
+ kmem_free((void *)iomp_real_name_p, (size_t)FIOMP_MAX_STR);
+ }
+ if (iomp_user_path_p) {
+ kmem_free((void *)iomp_user_path_p, (size_t)FIOMP_MAX_STR);
+ }
+ if (iomp_status_p) {
+ kmem_free((void *)iomp_status_p, (size_t)FIOMP_MAX_STR);
+ }
+ if (iomp_path_p) {
+ kmem_free((void *)iomp_path_p,
+ (size_t)((sizeof (caddr_t)) * alloc_num));
+ }
+ if (iomp_logical_path_p) {
+ kmem_free((void *)iomp_logical_path_p,
+ (size_t)((sizeof (caddr_t)) * alloc_num));
+ }
+ if (iomp_path_status_p) {
+ kmem_free((void *)iomp_path_status_p,
+ (size_t)((sizeof (caddr_t)) * alloc_num));
+ }
+ if (iomp_path_block_p) {
+ kmem_free((void *)iomp_path_block_p,
+ (size_t)((sizeof (caddr_t)) * alloc_num));
+ }
+ if (iomp_path) {
+ kmem_free((void *)iomp_path,
+ (size_t)(FIOMP_MAX_STR * alloc_num));
+ }
+ if (iomp_logical_path) {
+ kmem_free((void *)iomp_logical_path,
+ (size_t)(FIOMP_MAX_STR * alloc_num));
+ }
+ if (iomp_path_status) {
+ kmem_free((void *)iomp_path_status,
+ (size_t)(FIOMP_MAX_STR * alloc_num));
+ }
+ if (iomp_path_block) {
+ kmem_free((void *)iomp_path_block,
+ (size_t)(FIOMP_MAX_STR * alloc_num));
+ }
+ }
+ break;
+
+ case FIOMPDESTROY:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPDESTROY proc");
+ ret = ENOTTY;
+ break;
+
+ case FIOMPSTOP:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPSTOP proc");
+
+ if (ddi_copyin((void *)arg, (void *)&pathnum,
+ sizeof (int), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_inst_ioctl;
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+ if (pathnum == FIOMP_PATH_ALL) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_inst_ioctl;
+ } else {
+ /* PATH appointment */
+ if (scf_comtbl.path_num < (pathnum + 1)) {
+ /* Invalid PATH */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_inst_ioctl;
+ }
+ if ((statep = scf_comtbl.iomp_scf[pathnum]) != 0) {
+ /* SCF command send sync stop */
+ ret = scf_make_send_cmd(&scf_cmd, SCF_USE_STOP);
+ if (ret != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ goto END_STOP;
+ }
+ if ((statep->path_status != FIOMP_STAT_ACTIVE) &&
+ (statep->path_status != FIOMP_STAT_STANDBY)) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+ } else {
+ if (statep->path_status == FIOMP_STAT_ACTIVE) {
+ /* Exec SCF device appointment */
+ if (scf_comtbl.scf_wait_p == 0) {
+ /* Last deveice stop is error */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR,
+ __LINE__, "i_ioctl ", 8);
+ ret = EINVAL;
+ } else {
+ /* Device interrupt disable */
+ scf_forbid_intr(statep);
+ scf_chg_scf(statep, FIOMP_STAT_STOP);
+ /* Send path change command */
+ statep = scf_comtbl.scf_wait_p;
+ scf_comtbl.scf_wait_p = statep->next;
+ scf_chg_scf(statep, FIOMP_STAT_ACTIVE);
+ scf_comtbl.scf_exec_p = 0;
+ scf_comtbl.scf_path_p = 0;
+ scf_comtbl.scf_pchg_event_sub =
+ EVENT_SUB_PCHG_WAIT;
+ scf_next_cmd_check(statep);
+
+ /* DCSP interface stop */
+ scf_dscp_stop(FACTOR_PATH_CHG);
+ }
+ } else {
+ /* Not exec device appointment */
+ scf_del_queue(statep);
+ scf_forbid_intr(statep);
+ scf_chg_scf(statep, FIOMP_STAT_STOP);
+ }
+ }
+/*
+ * END_STOP
+ */
+ END_STOP:
+
+ /* SCF command send sync start */
+ (void) scf_make_send_cmd(&scf_cmd, SCF_USE_START);
+ } else {
+ /* Appointed path is already out of managemen */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EINVAL;
+ }
+ }
+ mutex_exit(&scf_comtbl.all_mutex);
+ break;
+
+ case FIOMPSTART:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPSTART proc");
+
+ if (ddi_copyin((void *)arg, (void *)&pathnum,
+ sizeof (int), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_inst_ioctl;
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+ if (pathnum == FIOMP_PATH_ALL) {
+ /* PATH_ALL appointment */
+ if ((statep = scf_comtbl.scf_stop_p) != 0) {
+ /* Check stop queue */
+ scf_comtbl.scf_stop_p = 0;
+ while (statep) {
+ wkstatep = statep->next;
+ /* Interupt disable */
+ scf_permit_intr(statep, 1);
+ if ((scf_comtbl.scf_path_p) ||
+ (scf_comtbl.scf_exec_p)) {
+ scf_chg_scf(statep, FIOMP_STAT_STANDBY);
+ } else {
+ if (scf_comtbl.watchdog_after_resume) {
+ scf_comtbl.alive_running = SCF_ALIVE_START;
+ scf_comtbl.watchdog_after_resume = 0;
+ }
+ scf_chg_scf(statep, FIOMP_STAT_ACTIVE);
+ /* Send path change command */
+ scf_comtbl.scf_exec_p = 0;
+ scf_comtbl.scf_path_p = 0;
+
+ scf_comtbl.scf_pchg_event_sub = EVENT_SUB_PCHG_WAIT;
+ scf_next_cmd_check(statep);
+ }
+ statep = wkstatep;
+ }
+ }
+ } else {
+ /* PATH appointment */
+ if (scf_comtbl.path_num < (pathnum + 1)) {
+ /* Invalid PATH */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_inst_ioctl;
+ }
+ if ((statep = scf_comtbl.iomp_scf[pathnum]) != 0) {
+ if (statep->path_status == FIOMP_STAT_STOP) {
+ /* Check stop queue */
+ scf_del_queue(statep);
+ /* Interrupt enable */
+ scf_permit_intr(statep, 1);
+ if ((scf_comtbl.scf_path_p) ||
+ (scf_comtbl.scf_exec_p)) {
+ scf_chg_scf(statep, FIOMP_STAT_STANDBY);
+ } else {
+ if (scf_comtbl.watchdog_after_resume) {
+ scf_comtbl.alive_running = SCF_ALIVE_START;
+ scf_comtbl.watchdog_after_resume = 0;
+ }
+ scf_chg_scf(statep, FIOMP_STAT_ACTIVE);
+ /* Send path change command */
+ scf_comtbl.scf_exec_p = 0;
+ scf_comtbl.scf_path_p = 0;
+ scf_comtbl.scf_pchg_event_sub = EVENT_SUB_PCHG_WAIT;
+ scf_next_cmd_check(statep);
+ }
+ } else {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR,
+ __LINE__, "i_ioctl ", 8);
+ ret = EINVAL;
+ }
+ } else {
+ /* Appointed path is already out of managemen */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+ }
+ }
+ mutex_exit(&scf_comtbl.all_mutex);
+ break;
+
+ case FIOMPRECOVER:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPRECOVER proc");
+
+ if (ddi_copyin((void *)arg, (void *)&pathnum,
+ sizeof (int), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_inst_ioctl;
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+ if (pathnum == FIOMP_PATH_ALL) {
+ /* PATH_ALL appointment */
+
+ /* Check fail queue */
+ if ((statep = scf_comtbl.scf_err_p) != 0) {
+ scf_comtbl.scf_err_p = 0;
+ while (statep) {
+ wkstatep = statep->next;
+ /* Interrupt enable */
+ scf_forbid_intr(statep);
+ statep->scf_herr = 0;
+ statep->tesum_rcnt = 0;
+ statep->resum_rcnt = 0;
+ statep->cmd_to_rcnt = 0;
+ scf_chg_scf(statep, FIOMP_STAT_STOP);
+ statep = wkstatep;
+ }
+ }
+ } else {
+ /* PATH appointment */
+ if (scf_comtbl.path_num < (pathnum + 1)) {
+ /* Invalid PATH */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_inst_ioctl;
+ }
+ if ((statep = scf_comtbl.iomp_scf[pathnum]) != 0) {
+ if (statep->path_status == FIOMP_STAT_FAIL) {
+ scf_del_queue(statep);
+ scf_forbid_intr(statep);
+ statep->scf_herr = 0;
+ statep->tesum_rcnt = 0;
+ statep->resum_rcnt = 0;
+ statep->cmd_to_rcnt = 0;
+ scf_chg_scf(statep, FIOMP_STAT_STOP);
+ } else {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR,
+ __LINE__, "i_ioctl ", 8);
+ ret = EINVAL;
+ }
+ } else {
+ /* Appointed path is already out of managemen */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+ }
+ }
+ mutex_exit(&scf_comtbl.all_mutex);
+ break;
+
+ case FIOMPLIST:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPLIST proc");
+
+ if (u_mode == DDI_MODEL_ILP32) {
+ /* DDI_MODEL_ILP32 */
+ struct fiompdev_32 fiompdev32;
+ char *work_name_p = 0;
+ caddr32_t *devs_p = 0;
+ char *devs = 0;
+
+ if (ddi_copyin((void *)arg, (void *)&fiompdev32,
+ sizeof (struct fiompdev_32), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_inst_ioctl;
+ }
+ if (fiompdev32.api_level != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EINVAL;
+ goto END_inst_ioctl;
+ }
+ alloc_num = fiompdev32.num;
+ if (fiompdev32.num != 0) {
+ /* Buffer allocation */
+ work_name_p = (char *)kmem_zalloc((size_t)(FIOMP_MAX_STR),
+ KM_SLEEP);
+ devs_p = (caddr32_t *)kmem_zalloc
+ ((size_t)((sizeof (caddr32_t)) * alloc_num), KM_SLEEP);
+ devs = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR * alloc_num), KM_SLEEP);
+ }
+
+ mutex_enter(&scf_comtbl.all_mutex);
+ if (fiompdev32.num != scf_comtbl.path_num) {
+ /*
+ * When different from appointed num, perform only num setting
+ */
+ fiompdev32.num = scf_comtbl.path_num;
+ num_cmp_flag = 1;
+ } else {
+ /* Set output information */
+ fiompdev32.inst_no = 0;
+ fiompdev32.inst_minor = SCF_INST_INSTANCE;
+ fiompdev32.user_minor = SCF_USER_INSTANCE;
+ fiompdev32.mpmode = FIOMP_FALSE;
+ fiompdev32.autopath = FIOMP_TRUE;
+ fiompdev32.needsync = FIOMP_FALSE;
+ for (ii = 0, jj = 0; ii < fiompdev32.num;
+ ii++, jj += FIOMP_MAX_STR) {
+ /* Output information setting every pass */
+ devs[jj] = '\0';
+ if ((statep = scf_comtbl.iomp_scf[ii]) != 0) {
+ if (ddi_pathname(statep->dip, work_name_p)) {
+ sprintf(&devs[jj], "%s:scfc%d",
+ work_name_p, statep->instance);
+ }
+ }
+ }
+ }
+ mutex_exit(&scf_comtbl.all_mutex);
+ if ((num_cmp_flag == 0) && (fiompdev32.num != 0)) {
+ if (fiompdev32.devs) {
+ if (ddi_copyin((void *)(uintptr_t)fiompdev32.devs,
+ (void *)devs_p,
+ ((sizeof (caddr32_t)) * fiompdev32.num),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_LIST32;
+ }
+ }
+ for (ii = 0, jj = 0; ii < fiompdev32.num;
+ ii++, jj += FIOMP_MAX_STR) {
+ if (devs_p[ii]) {
+ if (ddi_copyout((void *)&devs[jj],
+ (void *)(uintptr_t)devs_p[ii],
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR,
+ __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_LIST32;
+ }
+ }
+ }
+ }
+ if (ddi_copyout((void *)&fiompdev32, (void *)arg,
+ sizeof (struct fiompdev_32), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_LIST32
+ */
+ END_LIST32:
+
+ /* Buffer release */
+ if (work_name_p) {
+ kmem_free((void *)work_name_p, (size_t)FIOMP_MAX_STR);
+ }
+ if (devs_p) {
+ kmem_free((void *)devs_p,
+ (size_t)((sizeof (caddr32_t)) * alloc_num));
+ }
+ if (devs) {
+ kmem_free((void *)devs,
+ (size_t)(FIOMP_MAX_STR * alloc_num));
+ }
+ } else {
+ /* DDI_MODEL_NONE */
+ struct fiompdev fiompdev;
+ char *work_name_p = 0;
+ caddr_t *devs_p = 0;
+ char *devs = 0;
+
+ if (ddi_copyin((void *)arg, (void *)&fiompdev,
+ sizeof (struct fiompdev), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_inst_ioctl;
+ }
+ if (fiompdev.api_level != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EINVAL;
+ goto END_inst_ioctl;
+ }
+ alloc_num = fiompdev.num;
+ if (fiompdev.num != 0) {
+ /* Buffer allocation */
+ work_name_p = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR), KM_SLEEP);
+ devs_p = (caddr_t *)kmem_zalloc
+ ((size_t)((sizeof (caddr_t)) * alloc_num), KM_SLEEP);
+ devs = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR * alloc_num), KM_SLEEP);
+ }
+
+ mutex_enter(&scf_comtbl.all_mutex);
+ if (fiompdev.num != scf_comtbl.path_num) {
+ /*
+ * When different from appointed num, perform only num setting
+ */
+ fiompdev.num = scf_comtbl.path_num;
+ num_cmp_flag = 1;
+ } else {
+ /* Set output information */
+ fiompdev.inst_no = 0;
+ fiompdev.inst_minor = SCF_INST_INSTANCE;
+ fiompdev.user_minor = SCF_USER_INSTANCE;
+ fiompdev.mpmode = FIOMP_FALSE;
+ fiompdev.autopath = FIOMP_TRUE;
+ fiompdev.needsync = FIOMP_FALSE;
+ for (ii = 0, jj = 0; ii < fiompdev.num;
+ ii++, jj += FIOMP_MAX_STR) {
+ /* Output information setting every pass */
+ devs[jj] = '\0';
+ if ((statep = scf_comtbl.iomp_scf[ii]) != 0) {
+ if (ddi_pathname(statep->dip, work_name_p)) {
+ sprintf(&devs[jj], "%s:scfc%d",
+ work_name_p, statep->instance);
+ }
+ }
+ }
+ }
+ mutex_exit(&scf_comtbl.all_mutex);
+ if ((num_cmp_flag == 0) && (fiompdev.num != 0)) {
+ if (fiompdev.devs) {
+ if (ddi_copyin((void *)fiompdev.devs,
+ (void *)devs_p,
+ ((sizeof (caddr_t)) * fiompdev.num),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_LIST;
+ }
+ }
+ for (ii = 0, jj = 0; ii < fiompdev.num;
+ ii++, jj += FIOMP_MAX_STR) {
+ if (devs_p[ii]) {
+ if (ddi_copyout((void *)&devs[jj],
+ (void *)devs_p[ii],
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR,
+ __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_LIST;
+ }
+ }
+ }
+ }
+ if (ddi_copyout((void *)&fiompdev, (void *)arg,
+ sizeof (struct fiompdev), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_LIST
+ */
+ END_LIST:
+
+ /* Buffer release */
+ if (work_name_p) {
+ kmem_free((void *)work_name_p, (size_t)FIOMP_MAX_STR);
+ }
+ if (devs_p) {
+ kmem_free((void *)devs_p,
+ (size_t)((sizeof (caddr_t)) * alloc_num));
+ }
+ if (devs) {
+ kmem_free((void *)devs,
+ (size_t)(FIOMP_MAX_STR * alloc_num));
+ }
+ }
+ break;
+
+ case FIOMPSTATUS:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPSTATUS proc");
+
+ if (u_mode == DDI_MODEL_ILP32) { /* DDI_MODEL_ILP32 */
+ struct fiompstatus_32 fiompstatus32;
+ char *message_p;
+
+ message_p = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR), KM_SLEEP);
+
+ if (ddi_copyin((void *)arg, (void *)&fiompstatus32,
+ sizeof (struct fiompstatus_32), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_STATUS32;
+ }
+ pathnum = fiompstatus32.pathnum;
+ if (pathnum == FIOMP_PATH_ALL) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+ goto END_STATUS32;
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+ if (scf_comtbl.path_num < (pathnum + 1)) {
+ /* Invalid path */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_STATUS32;
+ }
+ if ((statep = scf_comtbl.iomp_scf[pathnum]) != 0) {
+ scf_inst_getstat32(statep, &fiompstatus32,
+ message_p, 1);
+ } else {
+ /*
+ * Appointed path is already out of management
+ */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_STATUS32;
+ }
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (ddi_copyout((void *)message_p,
+ (void *)(uintptr_t)fiompstatus32.message,
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_STATUS32;
+ }
+
+ if (ddi_copyout((void *)&fiompstatus32, (void *)arg,
+ sizeof (struct fiompstatus_32), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_STATUS32
+ */
+ END_STATUS32:
+
+ if (message_p) {
+ kmem_free((void *)message_p,
+ (size_t)FIOMP_MAX_STR);
+ }
+ } else { /* DDI_MODEL_NONE */
+ struct fiompstatus fiompstatus;
+ char *message_p;
+
+ message_p = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR), KM_SLEEP);
+
+ if (ddi_copyin((void *)arg, (void *)&fiompstatus,
+ sizeof (struct fiompstatus), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_STATUS;
+ }
+ pathnum = fiompstatus.pathnum;
+ if (pathnum == FIOMP_PATH_ALL) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+ goto END_STATUS;
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+ if (scf_comtbl.path_num < (pathnum + 1)) {
+ /* Invalid path */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_STATUS;
+ }
+ if ((statep = scf_comtbl.iomp_scf[pathnum]) != 0) {
+ scf_inst_getstat(statep, &fiompstatus,
+ message_p, 1);
+ } else {
+ /*
+ * Appointed path is already out of managemen
+ */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_STATUS;
+ }
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (ddi_copyout((void *)message_p,
+ (void *)fiompstatus.message,
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_STATUS;
+ }
+
+ if (ddi_copyout((void *)&fiompstatus,
+ (void *)arg, sizeof (struct fiompstatus),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_STATUS
+ */
+ END_STATUS:
+
+ if (message_p) {
+ kmem_free((void *)message_p,
+ (size_t)FIOMP_MAX_STR);
+ }
+ }
+ break;
+
+ case FIOMPADD:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPADD proc");
+
+ ret = ENOTTY;
+ break;
+
+ case FIOMPDEL:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPDEL proc");
+
+ ret = ENOTTY;
+ break;
+
+ case FIOMPACTIVE:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPACTIVE proc");
+
+ ret = ENOTTY;
+ break;
+
+ case FIOMPDISCONNECT:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPDISCONNECT proc");
+
+ if (ddi_copyin((void *)arg, (void *)&pathnum,
+ sizeof (int), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_inst_ioctl;
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+ if (pathnum == FIOMP_PATH_ALL) {
+ /* PATH_ALL appointment */
+
+ /* Check stop queue */
+ if ((statep = scf_comtbl.scf_stop_p) != 0) {
+ scf_comtbl.scf_stop_p = 0;
+ while (statep) {
+ wkstatep = statep->next;
+ scf_chg_scf(statep, FIOMP_STAT_DISCON);
+ statep = wkstatep;
+ }
+ }
+ /* Check fail queue */
+ if ((statep = scf_comtbl.scf_err_p) != 0) {
+ scf_comtbl.scf_err_p = 0;
+ while (statep) {
+ wkstatep = statep->next;
+ scf_chg_scf(statep, FIOMP_STAT_DISCON);
+ statep = wkstatep;
+ }
+ }
+ } else {
+ /* PATH appointment */
+ if (scf_comtbl.path_num < (pathnum + 1)) {
+ /* Invalid path */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_inst_ioctl;
+ }
+ if ((statep = scf_comtbl.iomp_scf[pathnum]) != 0) {
+ if ((statep->path_status == FIOMP_STAT_STOP) ||
+ (statep->path_status ==
+ FIOMP_STAT_FAIL)) {
+ scf_del_queue(statep);
+ scf_chg_scf(statep, FIOMP_STAT_DISCON);
+ } else {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR,
+ __LINE__, "i_ioctl ", 8);
+ ret = EINVAL;
+ }
+ } else {
+ /* Appointed path is already out of managemen */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+ }
+ }
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ break;
+
+ case FIOMPCONNECT:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPCONNECT proc");
+
+ if (ddi_copyin((void *)arg, (void *)&pathnum,
+ sizeof (int), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_inst_ioctl;
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+ if (pathnum == FIOMP_PATH_ALL) {
+ /* PATH_ALL appointment */
+
+ /* Check disconnect queue */
+ if ((statep = scf_comtbl.scf_disc_p) != 0) {
+ scf_comtbl.scf_disc_p = 0;
+ while (statep) {
+ wkstatep = statep->next;
+ if (statep->scf_herr) {
+ scf_chg_scf(statep,
+ FIOMP_STAT_FAIL);
+ } else {
+ scf_chg_scf(statep,
+ FIOMP_STAT_STOP);
+ }
+ statep = wkstatep;
+ }
+ }
+ } else {
+ /* PATH appointment */
+ if (scf_comtbl.path_num < (pathnum + 1)) {
+ /* Invalid path */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_inst_ioctl;
+ }
+ if ((statep = scf_comtbl.iomp_scf[pathnum]) != 0) {
+ if (statep->path_status == FIOMP_STAT_DISCON) {
+ scf_del_queue(statep);
+ if (statep->scf_herr) {
+ scf_chg_scf(statep,
+ FIOMP_STAT_FAIL);
+ } else {
+ scf_chg_scf(statep,
+ FIOMP_STAT_STOP);
+ }
+ } else {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR,
+ __LINE__, "i_ioctl ", 8);
+ ret = EINVAL;
+ }
+ } else {
+ /* Appointed path is already out of managemen */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EINVAL;
+ }
+ }
+ mutex_exit(&scf_comtbl.all_mutex);
+ break;
+
+ case FIOMPSTANDBY:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPSTANDBY proc");
+
+ ret = ENOTTY;
+ break;
+
+ case FIOMPBLOCK:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPBLOCK proc");
+
+ ret = ENOTTY;
+ break;
+
+ case FIOMPUNBLOCK:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPUNBLOCK proc");
+
+ ret = ENOTTY;
+ break;
+
+ case FIOMPDIAGON:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPDIAGON proc");
+
+ ret = ENOTTY;
+ break;
+
+ case FIOMPDIAGOFF:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPDIAGOFF proc");
+
+ ret = ENOTTY;
+ break;
+
+ case FIOMPGETALLSTAT:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPGETALLSTAT proc");
+
+ if (u_mode == DDI_MODEL_ILP32) {
+ /* DDI_MODEL_ILP32 */
+ struct fiomp_all_stat_32 fiomp_all_stat32;
+ struct fiompstatus_32 *fiompstatus32_p = 0;
+ char *message_p = 0;
+
+ if (ddi_copyin((void *)arg, (void *)&fiomp_all_stat32,
+ sizeof (struct fiomp_all_stat_32), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_inst_ioctl;
+ }
+ alloc_num = fiomp_all_stat32.num;
+ if (fiomp_all_stat32.num != 0) {
+ /* Buffer allocation */
+ fiompstatus32_p =
+ (struct fiompstatus_32 *)kmem_zalloc
+ ((size_t)((sizeof (struct fiompstatus_32)) * alloc_num),
+ KM_SLEEP);
+ message_p = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR * alloc_num), KM_SLEEP);
+ }
+
+ if (fiomp_all_stat32.num != 0) {
+ if (ddi_copyin((void *)(uintptr_t)fiomp_all_stat32.status,
+ (void *)fiompstatus32_p,
+ ((sizeof (struct fiompstatus_32)) *
+ fiomp_all_stat32.num), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETALLSTAT32;
+ }
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+ if (scf_comtbl.path_num != fiomp_all_stat32.num) {
+ /*
+ * When different from appointed num, perform only num setting
+ */
+ fiomp_all_stat32.num = scf_comtbl.path_num;
+ num_cmp_flag = 1;
+ } else {
+ /* Output information setting every pass */
+ for (ii = 0, jj = 0; ii < fiomp_all_stat32.num;
+ ii++, jj += FIOMP_MAX_STR) {
+ statep = scf_comtbl.iomp_scf[ii];
+ scf_inst_getstat32(statep, &fiompstatus32_p[ii],
+ &message_p[jj], 1);
+ }
+ }
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (num_cmp_flag == 0 && fiomp_all_stat32.num != 0) {
+ if (ddi_copyout((void *)fiompstatus32_p,
+ (void *)(uintptr_t)fiomp_all_stat32.status,
+ ((sizeof (struct fiompstatus_32)) *
+ fiomp_all_stat32.num), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETALLSTAT32;
+ }
+ for (ii = 0, jj = 0; ii < fiomp_all_stat32.num;
+ ii++, jj += FIOMP_MAX_STR) {
+ if (ddi_copyout((void *)&message_p[jj],
+ (void *)(uintptr_t)fiompstatus32_p[ii].message,
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETALLSTAT32;
+ }
+ }
+ }
+ if (ddi_copyout((void *)&fiomp_all_stat32, (void *)arg,
+ sizeof (struct fiomp_all_stat_32), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_GETALLSTAT32
+ */
+ END_GETALLSTAT32:
+
+ /* Buffer release */
+ if (fiompstatus32_p) {
+ kmem_free((void *)fiompstatus32_p,
+ (size_t)((sizeof (struct fiompstatus_32)) * alloc_num));
+ }
+ if (message_p) {
+ kmem_free((void *)message_p,
+ (size_t)(FIOMP_MAX_STR * alloc_num));
+ }
+ } else {
+ /* DDI_MODEL_NONE */
+ struct fiomp_all_stat fiomp_all_stat;
+ struct fiompstatus *fiompstatus_p = 0;
+ char *message_p = 0;
+
+ if (ddi_copyin((void *)arg, (void *)&fiomp_all_stat,
+ sizeof (struct fiomp_all_stat), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_inst_ioctl;
+ }
+ alloc_num = fiomp_all_stat.num;
+ if (fiomp_all_stat.num != 0) {
+ /* Buffer allocation */
+ fiompstatus_p =
+ (struct fiompstatus *)kmem_zalloc
+ ((size_t)((sizeof (struct fiompstatus)) * alloc_num),
+ KM_SLEEP);
+ message_p = (char *)kmem_zalloc
+ ((size_t)(FIOMP_MAX_STR * alloc_num), KM_SLEEP);
+ }
+
+ if (fiomp_all_stat.num != 0) {
+ if (ddi_copyin((void *)fiomp_all_stat.status,
+ (void *)fiompstatus_p,
+ ((sizeof (struct fiompstatus)) * fiomp_all_stat.num),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETALLSTAT;
+ }
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+ if (scf_comtbl.path_num != fiomp_all_stat.num) {
+ /*
+ * When different from appointed num, perform only num setting
+ */
+ fiomp_all_stat.num = scf_comtbl.path_num;
+ num_cmp_flag = 1;
+ } else {
+ /* Output information setting every pass */
+ for (ii = 0, jj = 0; ii < fiomp_all_stat.num;
+ ii++, jj += FIOMP_MAX_STR) {
+ statep = scf_comtbl.iomp_scf[ii];
+ scf_inst_getstat(statep, &fiompstatus_p[ii],
+ &message_p[jj], 1);
+ }
+ }
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (num_cmp_flag == 0 && fiomp_all_stat.num != 0) {
+ if (ddi_copyout((void *)fiompstatus_p,
+ (void *)fiomp_all_stat.status,
+ ((sizeof (struct fiompstatus)) * fiomp_all_stat.num),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETALLSTAT;
+ }
+ for (ii = 0, jj = 0; ii < fiomp_all_stat.num;
+ ii++, jj += FIOMP_MAX_STR) {
+ if (ddi_copyout((void *)&message_p[jj],
+ (void *)fiompstatus_p[ii].message,
+ FIOMP_MAX_STR, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "i_ioctl ", 8);
+ ret = EFAULT;
+ goto END_GETALLSTAT;
+ }
+ }
+ }
+ if (ddi_copyout((void *)&fiomp_all_stat, (void *)arg,
+ sizeof (struct fiomp_all_stat), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_GETALLSTAT
+ */
+ END_GETALLSTAT:
+
+ /* Buffer release */
+ if (fiompstatus_p) {
+ kmem_free((void *)fiompstatus_p,
+ (size_t)((sizeof (struct fiompstatus)) * alloc_num));
+ }
+ if (message_p) {
+ kmem_free((void *)message_p,
+ (size_t)(FIOMP_MAX_STR * alloc_num));
+ }
+ }
+ break;
+
+ case FIOMPCHG:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPCHG proc");
+
+ ret = ENOTTY;
+ break;
+
+ case FIOMPGETEVENT:
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "FIOMPGETEVENT proc");
+
+ ret = ENOTTY;
+ break;
+
+ default:
+ /* undefined */
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, "undefined ioctl command");
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "i_ioctl ", 8);
+ ret = ENOTTY;
+ }
+
+/*
+ * END_inst_ioctl
+ */
+ END_inst_ioctl:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOMP, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+/*
+ * Make path status : FIOMPSTATUS, FIOMPGETALLSTAT : 32bit-64bit
+ */
+void
+scf_inst_getstat32(scf_state_t *statep, struct fiompstatus_32 *status32_p,
+ char *message_p, int flag)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_inst_getstat32() "
+ int path_status;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, SCF_FUNC_NAME ": start");
+
+ if (statep) {
+ if (flag) {
+ /* Current status */
+ path_status = statep->path_status;
+ } else {
+ /* Former status */
+ path_status = statep->old_path_status;
+ }
+ /* Set status */
+ switch (path_status) {
+ case FIOMP_STAT_ACTIVE:
+ status32_p->status = FIOMP_STAT_ACTIVE;
+ break;
+ case FIOMP_STAT_STANDBY:
+ status32_p->status = FIOMP_STAT_STANDBY;
+ break;
+ case FIOMP_STAT_STOP:
+ status32_p->status = FIOMP_STAT_STOP;
+ break;
+ case FIOMP_STAT_DISCON:
+ status32_p->status = FIOMP_STAT_DISCON;
+ break;
+ case FIOMP_STAT_FAIL:
+ status32_p->status = FIOMP_STAT_FAIL;
+ break;
+ default:
+ status32_p->status = FIOMP_STAT_EMPTY;
+ }
+ /* IOMP details message making */
+ scf_path_stmsg(statep, message_p);
+ } else {
+ status32_p->status = FIOMP_STAT_EMPTY;
+ message_p[0] = '\0';
+ }
+ status32_p->block_status = FIOMP_BSTAT_BLOCK;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, SCF_FUNC_NAME ": end");
+}
+
+/*
+ * Make path status : FIOMPSTATUS, FIOMPGETALLSTAT : 64bit-64bit/32bit-32bit
+ */
+void
+scf_inst_getstat(scf_state_t *statep, struct fiompstatus *status_p,
+ char *message_p, int flag)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_inst_getstat() "
+ int path_status;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, SCF_FUNC_NAME ": start");
+
+ if (statep) {
+ if (flag) {
+ /* Current status */
+ path_status = statep->path_status;
+ } else {
+ /* Former status */
+ path_status = statep->old_path_status;
+ }
+ /* Set status */
+ switch (path_status) {
+ case FIOMP_STAT_ACTIVE:
+ status_p->status = FIOMP_STAT_ACTIVE;
+ break;
+ case FIOMP_STAT_STANDBY:
+ status_p->status = FIOMP_STAT_STANDBY;
+ break;
+ case FIOMP_STAT_STOP:
+ status_p->status = FIOMP_STAT_STOP;
+ break;
+ case FIOMP_STAT_DISCON:
+ status_p->status = FIOMP_STAT_DISCON;
+ break;
+ case FIOMP_STAT_FAIL:
+ status_p->status = FIOMP_STAT_FAIL;
+ break;
+ default:
+ status_p->status = FIOMP_STAT_EMPTY;
+ }
+ /* IOMP details message making */
+ scf_path_stmsg(statep, message_p);
+ } else {
+ status_p->status = FIOMP_STAT_EMPTY;
+ message_p[0] = '\0';
+ }
+ status_p->block_status = FIOMP_BSTAT_BLOCK;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, SCF_FUNC_NAME ": end");
+}
+
+/*
+ * IOMP details message making
+ */
+void
+scf_path_stmsg(scf_state_t *statep, char *message_p)
+
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_path_stmsg() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, SCF_FUNC_NAME ": start");
+
+ if (statep->scf_herr & HERR_TESUM) {
+ strcpy(message_p, "Command error");
+ } else if (statep->scf_herr & HERR_RESUM) {
+ strcpy(message_p, "Sumcheck error");
+ } else if (statep->scf_herr & HERR_CMD_RTO) {
+ strcpy(message_p, "Command timeout");
+ } else if (statep->scf_herr & HERR_BUSY_RTO) {
+ strcpy(message_p, "Command busy timeout");
+ } else if (statep->scf_herr & HERR_DSCP_INTERFACE) {
+ strcpy(message_p, "SCF communication path error");
+ } else if (statep->scf_herr & HERR_DSCP_ACKTO) {
+ strcpy(message_p, "DSCP ack response timeout");
+ } else if (statep->scf_herr & HERR_DSCP_ENDTO) {
+ strcpy(message_p, "DSCP end response timeout");
+ } else {
+ strcpy(message_p, "Good");
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_IOMP, SCF_FUNC_NAME ": end");
+}
+#endif
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scfkstat.c b/usr/src/uts/sun4u/opl/io/scfd/scfkstat.c
new file mode 100644
index 0000000000..be531bfedc
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scfkstat.c
@@ -0,0 +1,219 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/kmem.h>
+#include <sys/file.h>
+#include <sys/errno.h>
+#include <sys/open.h>
+#include <sys/cred.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/kstat.h>
+
+#include <sys/scfd/scfparam.h>
+
+
+/*
+ * for kstat_named_create(9F)
+ */
+typedef struct _scf_kstat_named_list {
+ char *name;
+ uchar_t data_type;
+} scf_kstat_named_list;
+
+static scf_kstat_named_list scf_kstat_system_list[] = {
+ {SCF_STATUS_KSTAT_NAMED, KSTAT_DATA_CHAR},
+ {SCF_BOOT_MODE_KSTAT_NAMED, KSTAT_DATA_CHAR},
+ {SCF_SECURE_MODE_KSTAT_NAMED, KSTAT_DATA_CHAR},
+ {SCF_EVENT_KSTAT_NAMED, KSTAT_DATA_CHAR},
+ {SCF_ALIVE_KSTAT_NAMED, KSTAT_DATA_CHAR},
+};
+
+
+/*
+ * prototype
+ */
+static kstat_t *scf_kstat_named_init(char *name,
+ scf_kstat_named_list *kstat_list, int ndata,
+ int (*update)(struct kstat *, int));
+static int scf_kstat_sys_update(kstat_t *ksp, int rw);
+
+
+/*
+ * from scf_attach()
+ */
+/* DDI_ATTACH */
+void
+scf_kstat_init()
+{
+#define SCF_FUNC_NAME "scf_kstat_init() "
+ scf_kstat_private_t *private;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.attach_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_KSTAT, SCF_FUNC_NAME ": start");
+
+ private = scf_comtbl.kstat_private =
+ kmem_zalloc(sizeof (scf_kstat_private_t), KM_SLEEP);
+
+ /* NAMED state */
+ private->ksp_scf = scf_kstat_named_init(SCF_SYSTEM_KSTAT_NAME,
+ scf_kstat_system_list, SCF_KSTAT_SYS_NAMED_NDATA,
+ scf_kstat_sys_update);
+
+ SCFDBGMSG(SCF_DBGFLAG_KSTAT, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * from scf_detach()
+ */
+/* DDI_DETACH */
+void
+scf_kstat_fini()
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_kstat_fini() "
+ scf_kstat_private_t *private = scf_comtbl.kstat_private;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.attach_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_KSTAT, SCF_FUNC_NAME ": start");
+
+ if (private->ksp_scf != NULL) {
+ kstat_delete(private->ksp_scf);
+ }
+
+ kmem_free(private, sizeof (scf_kstat_private_t));
+
+ scf_comtbl.kstat_private = NULL;
+
+ SCFDBGMSG(SCF_DBGFLAG_KSTAT, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_kstat_named_init()
+ * kstat_create(KSTAT_TYPE_NAMED) + kstat_named_init() + kstat_install()
+ */
+static kstat_t *
+scf_kstat_named_init(char *name, scf_kstat_named_list *kstat_list, int ndata,
+ int (*update)(struct kstat *, int))
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_kstat_named_init() "
+ kstat_t *scf_ksp;
+ kstat_named_t *scf_named_ksp;
+ int ii;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.attach_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_KSTAT, SCF_FUNC_NAME ": start");
+
+ scf_ksp = kstat_create(SCF_DRIVER_NAME, 0, name, "misc",
+ KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT);
+ if (scf_ksp == NULL) {
+ cmn_err(CE_WARN, "%s: kstat_create failed.", scf_driver_name);
+ return (NULL);
+ }
+ scf_named_ksp = (kstat_named_t *)(scf_ksp->ks_data);
+
+ /*
+ * initialize the named kstat
+ */
+ for (ii = 0; ii < ndata; ii++, scf_named_ksp++) {
+ kstat_named_init(scf_named_ksp, kstat_list[ii].name,
+ kstat_list[ii].data_type);
+ }
+
+ scf_ksp->ks_update = update;
+ scf_ksp->ks_lock = (void *)&(scf_comtbl.all_mutex);
+
+ scf_ksp->ks_private = &scf_comtbl;
+
+ kstat_install(scf_ksp);
+
+ SCFDBGMSG(SCF_DBGFLAG_KSTAT, SCF_FUNC_NAME ": end");
+ return (scf_ksp);
+}
+
+
+/*
+ * "scf" update
+ */
+static int
+scf_kstat_sys_update(kstat_t *ksp, int rw)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_kstat_sys_update() "
+ kstat_named_t *sysksp;
+ scf_comtbl_t *softsp;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_KSTAT, SCF_FUNC_NAME ": start");
+
+ sysksp = (kstat_named_t *)ksp->ks_data;
+ softsp = (scf_comtbl_t *)ksp->ks_private;
+
+ /* this is a read-only kstat */
+ if (rw == KSTAT_WRITE) {
+ return (EACCES);
+ }
+
+ if (softsp->scf_status == SCF_STATUS_ONLINE) {
+ sysksp[SCF_KSTAT_SYS_NAMED_STATUS].value.c[0] =
+ SCF_STAT_STATUS_ONLINE;
+ } else {
+ sysksp[SCF_KSTAT_SYS_NAMED_STATUS].value.c[0] =
+ SCF_STAT_STATUS_OFFLINE;
+ }
+
+ if ((softsp->scf_mode_sw & STATUS_BOOT_MODE) == STATUS_MODE_AUTO_BOOT) {
+ sysksp[SCF_KSTAT_SYS_NAMED_BOOT_MODE].value.c[0] =
+ SCF_STAT_MODE_LOCK;
+ } else {
+ sysksp[SCF_KSTAT_SYS_NAMED_BOOT_MODE].value.c[0] =
+ SCF_STAT_MODE_UNLOCK;
+ }
+
+ if ((softsp->scf_mode_sw & STATUS_SECURE_MODE) == STATUS_MODE_LOCK) {
+ sysksp[SCF_KSTAT_SYS_NAMED_SECURE_MODE].value.c[0] =
+ SCF_STAT_MODE_AUTO_BOOT;
+ } else {
+ sysksp[SCF_KSTAT_SYS_NAMED_SECURE_MODE].value.c[0] =
+ SCF_STAT_MODE_OBP_STOP;
+ }
+
+ sysksp[SCF_KSTAT_SYS_NAMED_EVENT].value.c[0] =
+ (char)softsp->last_event[4];
+ sysksp[SCF_KSTAT_SYS_NAMED_ALIVE].value.c[0] =
+ softsp->alive_running;
+
+ SCFDBGMSG(SCF_DBGFLAG_KSTAT, SCF_FUNC_NAME ": end");
+ return (0);
+}
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scfops.c b/usr/src/uts/sun4u/opl/io/scfd/scfops.c
new file mode 100644
index 0000000000..3a8a194b8d
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scfops.c
@@ -0,0 +1,3470 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/file.h>
+#include <sys/errno.h>
+#include <sys/open.h>
+#include <sys/uio.h>
+#include <sys/cred.h>
+#include <sys/kmem.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/scfd/scfparam.h>
+#include <sys/scfd/scfio32.h>
+
+/*
+ * Function list
+ */
+int scf_open(dev_t *devp, int flag, int otyp, cred_t *cred_p);
+int scf_close(dev_t dev, int flag, int otyp, cred_t *cred_p);
+int scf_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
+ int *rval_p);
+int scf_ioc_reportstat(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_clearlcd(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_wrlcd(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_getdiskled(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_setdiskled(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_getsdownreason(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_optiondisp(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_getpciconfig(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_hac(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_hstadrsinfo(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_rdclistmax(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_rdclistx(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_rdctrl(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_opecall(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_getreport(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_rcipwr(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_panicreq(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_panicchk(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_parmset(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_parmget(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_autopwrset(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_autopwrget(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_autopwrclr(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_autopwrfpoff(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_autopwrexset(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_autopwrexget(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_dr(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_eventlist(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_getevent(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_setmadmevent(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_remcscmd(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_remcsfile(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_sparecmd(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_setphpinfo(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_ioc_pciresetreq(intptr_t arg, int mode, int *rval_p, int u_mode);
+int scf_push_reportsense(unsigned int rci_addr, unsigned char *sense,
+ time_t timestamp);
+int scf_pop_reportsense(scfreport_t *rsense);
+int scf_push_getevent(unsigned char *event_p);
+int scf_pop_getevent(scfevent_t *event_p);
+int scf_valid_date(int year, int month, int date);
+int scf_check_pon_time(scfautopwrtime_t *ptime);
+int scf_check_poff_time(scfautopwrtime_t *ptime);
+
+/*
+ * scf_open()
+ *
+ * Description: Driver open() entry processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
+{
+#define SCF_FUNC_NAME "scf_open() "
+ int ret = 0;
+ int instance;
+
+ SCFDBGMSG1(SCF_DBGFLAG_OPCLS, SCF_FUNC_NAME ": start instance = %d",
+ getminor(*devp));
+ SC_DBG_DRV_TRACE(TC_OPEN|TC_IN, __LINE__, NULL, 0);
+
+ /* get instance */
+ instance = getminor(*devp);
+ if (SCF_CHECK_INSTANCE(instance)) {
+ /* is the device character ? */
+ if (otyp != OTYP_CHR) {
+ SC_DBG_DRV_TRACE(TC_OPEN|TC_ERR, __LINE__,
+ "open ", 8);
+ ret = EINVAL;
+ }
+ } else {
+ SC_DBG_DRV_TRACE(TC_OPEN|TC_ERR, __LINE__, "open ", 8);
+ ret = EINVAL;
+ }
+
+ SC_DBG_DRV_TRACE(TC_OPEN|TC_OUT, __LINE__, &ret, sizeof (int));
+ SCFDBGMSG1(SCF_DBGFLAG_OPCLS, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * close entry
+ */
+/* ARGSUSED */
+int
+scf_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_close() "
+ int ret = 0;
+
+ SCFDBGMSG1(SCF_DBGFLAG_OPCLS, SCF_FUNC_NAME ": start instance = %d",
+ getminor(dev));
+ SC_DBG_DRV_TRACE(TC_CLOSE|TC_IN, __LINE__, NULL, 0);
+
+ SC_DBG_DRV_TRACE(TC_CLOSE|TC_OUT, __LINE__, &ret, sizeof (int));
+ SCFDBGMSG(SCF_DBGFLAG_OPCLS, SCF_FUNC_NAME ": end");
+ return (ret);
+}
+
+
+/*
+ * scf_ioctl()
+ *
+ * Description: Driver ioctl() entry processing.
+ *
+ */
+int
+scf_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
+ int *rval_p)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioctl() "
+ int ret = 0;
+ int instance;
+ int u_mode;
+ timeout_id_t save_tmids[SCF_TIMERCD_MAX];
+ int tm_stop_cnt;
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start instance = %d",
+ getminor(dev));
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_IN, __LINE__, &cmd, sizeof (int));
+
+#ifdef _MULTI_DATAMODEL
+ /* DDI_MODEL_ILP32: SCF driver 64bit, upper 32bit */
+ /* DDI_MODEL_NONE : SCF driver64bit, upper 64bit */
+ u_mode = ddi_model_convert_from(mode & FMODELS);
+#else /* ! _MULTI_DATAMODEL */
+ /* DDI_MODEL_NONE : SCF driver 32bit, upper 32bit */
+ u_mode = DDI_MODEL_NONE;
+#endif /* _MULTI_DATAMODEL */
+
+ /* get instance */
+ instance = getminor(dev);
+
+ SCF_DBG_IOMP_PROC;
+
+ if (instance == SCF_USER_INSTANCE) {
+
+ mutex_enter(&scf_comtbl.attach_mutex);
+ if (!(scf_comtbl.resource_flag & DID_MUTEX_ALL)) {
+ /* Not attach device */
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, "Not attach device");
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ mutex_exit(&scf_comtbl.attach_mutex);
+ ret = ENXIO;
+ goto END_ioctl;
+ }
+ mutex_exit(&scf_comtbl.attach_mutex);
+
+ if (drv_priv(cred_p) != 0) {
+ /* Not super-user */
+ if ((cmd != SCFIOCHSTADRSINFO) &&
+ (cmd != SCFIOCRDCLISTMAX) &&
+ (cmd != SCFIOCRDCLISTX)) {
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, "Not super-user");
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EPERM;
+ goto END_ioctl;
+ }
+ }
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* Suspend flag check */
+ if (scf_comtbl.suspend_flag) {
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, "suspend execute");
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ mutex_exit(&scf_comtbl.all_mutex);
+ ret = EBUSY;
+ goto END_ioctl;
+ }
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ SCF_DBG_IOCTL_PROC;
+
+ switch ((uint_t)cmd) {
+ /*
+ * RAS control interface
+ */
+ case SCFIOCREPORTSTAT:
+ ret = scf_ioc_reportstat(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCCLEARLCD:
+ ret = scf_ioc_clearlcd(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCWRLCD:
+ ret = scf_ioc_wrlcd(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCGETDISKLED:
+ ret = scf_ioc_getdiskled(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCSETDISKLED:
+ ret = scf_ioc_setdiskled(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCGETSDOWNREASON:
+ ret = scf_ioc_getsdownreason(arg, mode, rval_p, u_mode);
+ break;
+
+ /*
+ * System infomarion interface
+ */
+ case SCFIOCOPTIONDISP:
+ ret = scf_ioc_optiondisp(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCGETPCICONFIG:
+ ret = scf_ioc_getpciconfig(arg, mode, rval_p, u_mode);
+ break;
+
+ /*
+ * RCI control interface
+ */
+ case SCFIOCHAC:
+ ret = scf_ioc_hac(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCHSTADRSINFO:
+ ret = scf_ioc_hstadrsinfo(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCRDCLISTMAX:
+ ret = scf_ioc_rdclistmax(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCRDCLISTX:
+ ret = scf_ioc_rdclistx(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCRDCTRL:
+ ret = scf_ioc_rdctrl(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCOPECALL:
+ ret = scf_ioc_opecall(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCGETREPORT:
+ ret = scf_ioc_getreport(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCRCIPWR:
+ ret = scf_ioc_rcipwr(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCPANICREQ:
+ ret = scf_ioc_panicreq(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCPANICCHK:
+ ret = scf_ioc_panicchk(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCPARMSET:
+ ret = scf_ioc_parmset(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCPARMGET:
+ ret = scf_ioc_parmget(arg, mode, rval_p, u_mode);
+ break;
+
+ /*
+ * APCS control interface
+ */
+ case SCFIOCAUTOPWRSET:
+ ret = scf_ioc_autopwrset(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCAUTOPWRGET:
+ case SCFIOCSYSAUTOPWRGET:
+ ret = scf_ioc_autopwrget(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCSYSAUTOPWRCLR:
+ ret = scf_ioc_autopwrclr(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCAUTOPWRFPOFF:
+ ret = scf_ioc_autopwrfpoff(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCAUTOPWREXSET:
+ ret = scf_ioc_autopwrexset(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCAUTOPWREXGET:
+ ret = scf_ioc_autopwrexget(arg, mode, rval_p, u_mode);
+ break;
+
+ /*
+ * FJDR control interface
+ */
+ case SCFIOCDR:
+ ret = scf_ioc_dr(arg, mode, rval_p, u_mode);
+ break;
+
+ /*
+ * MADM REMCS interface
+ */
+ case SCFIOCEVENTLIST:
+ ret = scf_ioc_eventlist(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCGETEVENT:
+ ret = scf_ioc_getevent(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCSETMADMEVENT:
+ ret = scf_ioc_setmadmevent(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCREMCSCMD:
+ ret = scf_ioc_remcscmd(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCREMCSFILE:
+ ret = scf_ioc_remcsfile(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCSPARECMD:
+ ret = scf_ioc_sparecmd(arg, mode, rval_p, u_mode);
+ break;
+
+ /*
+ * Kernel interface
+ */
+ case SCFIOCSETPHPINFO:
+ ret = scf_ioc_setphpinfo(arg, mode, rval_p, u_mode);
+ break;
+
+ case SCFIOCPCIRESETREQ:
+ ret = scf_ioc_pciresetreq(arg, mode, rval_p, u_mode);
+ break;
+
+ default:
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, "undefined ioctl command");
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = ENOTTY;
+ break;
+ }
+ } else {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = ENXIO;
+ }
+
+/*
+ * END_ioctl
+ */
+ END_ioctl:
+
+ if (scf_comtbl.resource_flag & DID_MUTEX_ALL) {
+ /*
+ * untimeout() processing of the timer which stopped a timer by
+ * ioctl processing
+ * Call of driver mutex status is prohibited.
+ */
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* Collect the timers which need to be stopped */
+ tm_stop_cnt = scf_timer_stop_collect(save_tmids,
+ SCF_TIMERCD_MAX);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Timer stop */
+ if (tm_stop_cnt != 0) {
+ scf_timer_untimeout(save_tmids, SCF_TIMERCD_MAX);
+ }
+ }
+
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_OUT, __LINE__, &ret, sizeof (int));
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_reportstat()
+ *
+ * Description: SCFIOCREPORTSTAT ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_reportstat(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_reportstat() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t sbuf;
+ scf_state_t *wk_statep;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ bzero((void *)&sbuf.b[0], SCF_S_CNT_16);
+ switch (arg) {
+ case SCF_SHUTDOWN_START:
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ scf_comtbl.shutdown_start_reported = 1;
+ sbuf.b[0] = REPORT_STAT_SHUTDOWN_START;
+ sbuf.b[1] = scf_poff_factor[scf_comtbl.poff_factor][0];
+ sbuf.b[2] = scf_poff_factor[scf_comtbl.poff_factor][1];
+ sbuf.b[3] = scf_poff_factor[scf_comtbl.poff_factor][2];
+ break;
+
+ case SCF_SYSTEM_RUNNING:
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ sbuf.b[0] = REPORT_STAT_SYSTEM_RUNNING;
+ sbuf.b[1] = 0;
+ sbuf.b[2] = 0;
+ sbuf.b[3] = 0;
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_reportstat;
+ }
+
+ if ((scf_comtbl.scf_report_event_sub == EVENT_SUB_REPORT_RUN_WAIT) ||
+ (scf_comtbl.scf_report_event_sub ==
+ EVENT_SUB_REPORT_SHUT_WAIT)) {
+ scf_comtbl.scf_report_event_sub = EVENT_SUB_NONE;
+ }
+
+ scf_cmd.cmd = CMD_REPORT;
+ scf_cmd.subcmd = SUB_SYSTEM_STATUS_RPT;
+ scf_cmd.scount = SCF_S_CNT_15;
+ scf_cmd.sbuf = &sbuf.b[0];
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_S_BUF;
+ scf_comtbl.scf_last_report = arg;
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ if ((ret == EBUSY) &&
+ (scf_cmd.stat0 == SCF_STAT0_NOT_PATH) &&
+ (arg == SCF_SHUTDOWN_START)) {
+ wk_statep = scf_comtbl.scf_stop_p;
+ ret = EIO;
+ if (wk_statep != NULL) {
+ scf_del_queue(wk_statep);
+ /* Interrupt enable */
+ scf_permit_intr(wk_statep, 1);
+ scf_chg_scf(wk_statep, PATH_STAT_ACTIVE);
+ scf_comtbl.scf_exec_p = wk_statep;
+ /* SCF command send sync stop */
+ ret = scf_make_send_cmd(&scf_cmd, SCF_USE_STOP);
+ if (ret == 0) {
+ /* new report shutdown */
+ scf_cmd.subcmd = SUB_SYSTEM_STATUS_RPT_NOPATH;
+ scf_cmd.flag = (SCF_USE_S_BUF | SCF_USE_SP);
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+ if (ret == 0) {
+ /*
+ * SCF command send sync re-stop
+ */
+ ret = scf_make_send_cmd(&scf_cmd,
+ SCF_USE_STOP);
+ }
+ }
+ /* FIOMPSTART exec status */
+ if ((wk_statep == scf_comtbl.scf_exec_p) ||
+ (wk_statep == scf_comtbl.scf_path_p)) {
+ if (scf_comtbl.watchdog_after_resume) {
+ /*
+ * Alive check status recovery
+ */
+ scf_comtbl.alive_running =
+ SCF_ALIVE_START;
+ scf_comtbl.watchdog_after_resume = 0;
+ }
+ scf_chg_scf(wk_statep, PATH_STAT_ACTIVE);
+ /* SCF path change send */
+ scf_comtbl.scf_exec_p = 0;
+ scf_comtbl.scf_path_p = 0;
+ scf_comtbl.scf_pchg_event_sub =
+ EVENT_SUB_PCHG_WAIT;
+ scf_next_cmd_check(wk_statep);
+ }
+ /* SCF command send sync start */
+ (void) scf_make_send_cmd(&scf_cmd, SCF_USE_START);
+ }
+ }
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+/*
+ * END_reportstat
+ */
+ END_reportstat:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_clearlcd()
+ *
+ * Description: SCFIOCCLEARLCD ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_clearlcd(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_clearlcd() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t sbuf;
+ int ii;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ scf_cmd.cmd = CMD_PHASE;
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_L_BUF;
+
+ switch (arg) {
+ case SCF_CLRLCD_SEQ:
+ scf_cmd.subcmd = SUB_PHASE_PRINT;
+ scf_cmd.scount = 1;
+ scf_cmd.sbuf = &sbuf.b[0];
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ for (ii = 0; ii < SCF_WRLCD_MAX; ii++) {
+ scf_comtbl.lcd_seq_mes[ii] = '\0';
+ sbuf.b[ii] = '\0';
+ }
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ break;
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_wrlcd()
+ *
+ * Description: SCFIOCWRLCD ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_wrlcd(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_wrlcd() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t sbuf;
+ scfwrlcd_t scfwrlcd;
+ scfwrlcd32_t scfwrlcd32;
+ int ii;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (u_mode == DDI_MODEL_ILP32) {
+ /* DDI_MODEL_ILP32 */
+ if (ddi_copyin((void *)arg, (void *)&scfwrlcd32,
+ sizeof (scfwrlcd32_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ goto END_wrlcd;
+ }
+ scfwrlcd.lcd_type = scfwrlcd32.lcd_type;
+ scfwrlcd.length = scfwrlcd32.length;
+ scfwrlcd.string = (unsigned char *)(uintptr_t)scfwrlcd32.string;
+ } else {
+ /* DDI_MODEL_NONE */
+ if (ddi_copyin((void *)arg, (void *)&scfwrlcd,
+ sizeof (scfwrlcd_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ goto END_wrlcd;
+ }
+ }
+
+ if (scfwrlcd.length < 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_wrlcd;
+ }
+ if (scfwrlcd.length > SCF_WRLCD_MAX) {
+ scfwrlcd.length = SCF_WRLCD_MAX;
+ }
+ for (ii = 0; ii < SCF_WRLCD_MAX + 1; ii++) {
+ sbuf.b[ii] = '\0';
+ }
+ sbuf.b[scfwrlcd.length] = '\0';
+
+ if (ddi_copyin((void *)scfwrlcd.string, (void *)&sbuf.b[0],
+ (size_t)scfwrlcd.length, mode)) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_wrlcd;
+ }
+
+ scf_cmd.cmd = CMD_PHASE;
+ scf_cmd.scount = scfwrlcd.length + 1;
+ scf_cmd.sbuf = &sbuf.b[0];
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_L_BUF;
+
+ switch (scfwrlcd.lcd_type) {
+ case SCF_WRLCD_SEQ:
+ scf_cmd.subcmd = SUB_PHASE_PRINT;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+ bcopy((void *)&sbuf.b[0], (void *)&scf_comtbl.lcd_seq_mes[0],
+ SCF_WRLCD_MAX);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ break;
+ }
+
+/*
+ * END_wrlcd
+ */
+ END_wrlcd:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_getdiskled()
+ *
+ * Description: SCFIOCGETDISKLED ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_getdiskled(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_getdiskled() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t rbuf;
+ scfiocgetdiskled_t *scfiocgetdiskled_p = NULL;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ scfiocgetdiskled_p = kmem_zalloc((size_t)(sizeof (scfiocgetdiskled_t)),
+ KM_SLEEP);
+
+ if (ddi_copyin((void *)arg, (void *)scfiocgetdiskled_p,
+ sizeof (scfiocgetdiskled_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_getdiskled;
+ }
+
+ scf_cmd.cmd = CMD_DOMAIN_INFO;
+ scf_cmd.subcmd = SUB_DISK_LED_DISP;
+ scf_cmd.sbuf = &scfiocgetdiskled_p->path[0];
+ scf_cmd.scount = SCF_DISK_LED_PATH_MAX;
+ scf_cmd.rbuf = &rbuf.b[0];
+ scf_cmd.rcount = SCF_S_CNT_15;
+ scf_cmd.flag = SCF_USE_LSBUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ if (ret == 0) {
+ scfiocgetdiskled_p->led = rbuf.b[0];
+
+ if (ddi_copyout((void *)scfiocgetdiskled_p, (void *)arg,
+ sizeof (scfiocgetdiskled_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ }
+
+/*
+ * END_getdiskled
+ */
+ END_getdiskled:
+
+ if (scfiocgetdiskled_p) {
+ kmem_free((void *)scfiocgetdiskled_p,
+ (size_t)(sizeof (scfiocgetdiskled_t)));
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_setdiskled()
+ *
+ * Description: SCFIOCSETDISKLED ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_setdiskled(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_setdiskled() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scfiocgetdiskled_t *scfiocgetdiskled_p = NULL;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ scfiocgetdiskled_p = kmem_zalloc((size_t)(sizeof (scfiocgetdiskled_t)),
+ KM_SLEEP);
+
+ if (ddi_copyin((void *)arg, (void *)scfiocgetdiskled_p,
+ sizeof (scfiocgetdiskled_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_setdiskled;
+ }
+
+ switch (scfiocgetdiskled_p->led) {
+ case SCF_DISK_LED_ON:
+ scf_cmd.subcmd = SUB_DISK_LED_ON;
+ break;
+
+ case SCF_DISK_LED_BLINK:
+ scf_cmd.subcmd = SUB_DISK_LED_BLINK;
+ break;
+
+ case SCF_DISK_LED_OFF:
+ scf_cmd.subcmd = SUB_DISK_LED_OFF;
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_setdiskled;
+ }
+
+ scf_cmd.cmd = CMD_DOMAIN_INFO;
+ scf_cmd.sbuf = &scfiocgetdiskled_p->path[0];
+ scf_cmd.scount = SCF_DISK_LED_PATH_MAX;
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_L_BUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+/*
+ * END_setdiskled
+ */
+ END_setdiskled:
+
+ if (scfiocgetdiskled_p) {
+ kmem_free((void *)scfiocgetdiskled_p,
+ (size_t)(sizeof (scfiocgetdiskled_t)));
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_getsdownreason()
+ *
+ * Description: SCFIOCGETSDOWNREASON ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_getsdownreason(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_getsdownreason() "
+ int ret = 0;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (ddi_copyout((void *)&scf_comtbl.scf_shutdownreason,
+ (void *)arg, sizeof (int), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_optiondisp()
+ *
+ * Description: SCFIOCOPTIONDISP ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_optiondisp(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_optiondisp() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scfoption_t scfoption;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ bzero((void *)&scfoption, sizeof (scfoption_t));
+
+ scf_cmd.cmd = CMD_DOMAIN_INFO;
+ scf_cmd.subcmd = SUB_OPTION_DISP;
+ scf_cmd.sbuf = 0;
+ scf_cmd.scount = 0;
+ scf_cmd.rbuf = &scfoption.rbuf[0];
+ scf_cmd.rcount = SCF_S_CNT_15;
+ scf_cmd.flag = SCF_USE_SSBUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ if (ret == 0) {
+ if (ddi_copyout((void *)&scfoption, (void *)arg,
+ sizeof (scfoption_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_getpciconfig()
+ *
+ * Description: SCFIOCGETPCICONFIG ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_getpciconfig(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_getpciconfig() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scfiocgetpciconfig_t *scfiocgetpciconfig_p = NULL;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ scfiocgetpciconfig_p =
+ kmem_zalloc((size_t)(sizeof (scfiocgetpciconfig_t)), KM_SLEEP);
+
+ if (ddi_copyin((void *)arg, (void *)scfiocgetpciconfig_p,
+ sizeof (scfiocgetpciconfig_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_getpciconfig;
+ }
+
+ scf_cmd.cmd = CMD_DOMAIN_INFO;
+ scf_cmd.subcmd = SUB_PCI_DISP;
+ scf_cmd.sbuf = &scfiocgetpciconfig_p->sbuf[0];
+ scf_cmd.scount = SCF_S_CNT_15;
+ scf_cmd.rbuf = &scfiocgetpciconfig_p->rbuf[0];
+ scf_cmd.rcount = SCF_L_CNT_MAX;
+ scf_cmd.flag = SCF_USE_SLBUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ if (ret == 0) {
+ if (ddi_copyout((void *)scfiocgetpciconfig_p,
+ (void *)arg, sizeof (scfiocgetpciconfig_t),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ }
+
+/*
+ * END_getpciconfig
+ */
+ END_getpciconfig:
+
+ if (scfiocgetpciconfig_p) {
+ kmem_free((void *)scfiocgetpciconfig_p,
+ (size_t)(sizeof (scfiocgetpciconfig_t)));
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_hac()
+ *
+ * Description: SCFIOCHAC ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_hac(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_hac() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t sbuf;
+ scfhac_t scfhac;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (ddi_copyin((void *)arg, (void *)&scfhac,
+ sizeof (scfhac_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_hac;
+ }
+
+ if (scfhac.sub_command == SUB_HOSTADDR_DISP2) {
+ mutex_enter(&scf_comtbl.all_mutex);
+ if (scf_save_hac_flag != 0) {
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (ddi_copyout((void *)&scf_save_hac, (void *)arg,
+ sizeof (scfhac_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ goto END_hac;
+ } else {
+ mutex_exit(&scf_comtbl.all_mutex);
+ }
+ scfhac.sub_command = SUB_HOSTADDR_DISP;
+ }
+ scf_cmd.cmd = CMD_RCI_CTL;
+ scf_cmd.subcmd = scfhac.sub_command;
+ scf_cmd.sbuf = &scfhac.sbuf[0];
+ scf_cmd.scount = SCF_S_CNT_15;
+ scf_cmd.rbuf = &scfhac.rbuf[0];
+ scf_cmd.rcount = SCF_S_CNT_15;
+
+ switch (scfhac.sub_command) {
+ case SUB_REMOTE_POWCTL_SET:
+ scf_cmd.flag = SCF_USE_S_BUF;
+ break;
+
+ case SCF_SUB_REMOTE_POWCTL_SET:
+ bzero((void *)&sbuf.b[0], SCF_S_CNT_16);
+ sbuf.b[0] = scfhac.sbuf[6];
+ sbuf.b[1] = scfhac.sbuf[7];
+ scf_cmd.sbuf = &sbuf.b[0];
+ scf_cmd.subcmd = SUB_REMOTE_POWCTL_SET;
+ scf_cmd.flag = SCF_USE_S_BUF;
+ break;
+
+ case SUB_HOSTADDR_DISP:
+ case SUB_DEVICE_INFO:
+ scf_cmd.flag = SCF_USE_SSBUF;
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_hac;
+ }
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ if (ret == 0) {
+ if (scfhac.sub_command == SUB_HOSTADDR_DISP) {
+ bcopy((void *)&scfhac, (void *)&scf_save_hac,
+ sizeof (scfhac_t));
+ scf_save_hac_flag = 1;
+ }
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ if (ddi_copyout((void *)&scfhac, (void *)arg, sizeof (scfhac_t),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ } else {
+ mutex_exit(&scf_comtbl.all_mutex);
+ }
+
+/*
+ * END_hac
+ */
+ END_hac:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_hstadrsinfo()
+ *
+ * Description: SCFIOCHSTADRSINFO ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_hstadrsinfo(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_hstadrsinfo() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t rbuf;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ scf_cmd.cmd = CMD_RCI_CTL;
+ scf_cmd.subcmd = SUB_HOSTADDR_DISP;
+ scf_cmd.scount = 0;
+ scf_cmd.rbuf = &rbuf.b[0];
+ scf_cmd.rcount = SCF_S_CNT_12;
+ scf_cmd.flag = SCF_USE_SSBUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ if (ret == 0) {
+ bcopy((void *)&rbuf.b[0], (void *)&scf_save_hac.rbuf[0],
+ SCF_S_CNT_12);
+ scf_save_hac_flag = 1;
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ if (ddi_copyout((void *)&rbuf.b[0], (void *)arg, SCF_S_CNT_12,
+ mode) < 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ } else {
+ mutex_exit(&scf_comtbl.all_mutex);
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_rdclistmax()
+ *
+ * Description: SCFIOCRDCLISTMAX ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_rdclistmax(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_rdclistmax() "
+ int ret = 0;
+ int scfrdclistmax;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ scfrdclistmax = (scf_rci_max * SCF_DEVLIST_MAXCNT);
+
+ if (ddi_copyout((void *)&scfrdclistmax, (void *)arg, sizeof (int),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_rdclistx()
+ *
+ * Description: SCFIOCRDCLISTX ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_rdclistx(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_rdclistx() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t sbuf;
+ scfrdclistx_t *rdclistxp = NULL;
+ scfrdclistx_t *rdclistxp_wk = NULL;
+ union wk_buffer {
+ uchar_t b[8];
+ uint_t four_bytes_access[2];
+ } *rbuf_wk;
+ int ii;
+ int jj;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ rdclistxp =
+ (scfrdclistx_t *)kmem_zalloc((size_t)(sizeof (scfrdclistx_t) *
+ scf_rci_max * SCF_DEVLIST_MAXCNT), KM_SLEEP);
+ rdclistxp_wk =
+ (scfrdclistx_t *)kmem_zalloc((size_t)(sizeof (scfrdclistx_t) *
+ scf_rci_max), KM_SLEEP);
+
+ bzero((void *)&sbuf.b[0], SCF_S_CNT_16);
+
+ /* set device class */
+ sbuf.four_bytes_access[0] = 0x00000fff;
+ sbuf.b[4] = 0; /* 0 system */
+
+ scf_cmd.cmd = CMD_RCI_CTL;
+ scf_cmd.subcmd = SUB_DEVICE_LIST;
+ scf_cmd.scount = SCF_S_CNT_15;
+ scf_cmd.sbuf = &sbuf.b[0];
+ scf_cmd.rcount = scf_rci_max * SCF_DEVLIST_ENTSIZE;
+ scf_cmd.rbuf = (uchar_t *)rdclistxp_wk;
+ scf_cmd.flag = SCF_USE_SLBUF;
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ if (ret != 0) {
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_rdclistx;
+ }
+
+ rbuf_wk = (union wk_buffer *)rdclistxp_wk;
+ for (jj = 0; jj < (scf_cmd.rbufleng / SCF_DEVLIST_ENTSIZE);
+ jj++) {
+ if (rbuf_wk->four_bytes_access[0] == NULL) {
+ break;
+ }
+ rdclistxp[jj].rci_addr = rbuf_wk->four_bytes_access[0];
+ rdclistxp[jj].status = rbuf_wk->b[4];
+ rdclistxp[jj].dev_class =
+ ((ushort_t)rbuf_wk->b[5] << 8) +
+ (ushort_t)rbuf_wk->b[6];
+ rdclistxp[jj].sub_class = rbuf_wk->b[7];
+ rbuf_wk++;
+ }
+
+ /* set device class */
+ sbuf.four_bytes_access[0] = 0x00000fff;
+ sbuf.b[4] = 1; /* 1 system */
+
+ scf_cmd.cmd = CMD_RCI_CTL;
+ scf_cmd.subcmd = SUB_DEVICE_LIST;
+ scf_cmd.scount = SCF_S_CNT_15;
+ scf_cmd.sbuf = &sbuf.b[0];
+ scf_cmd.rcount = scf_rci_max * SCF_DEVLIST_ENTSIZE;
+ scf_cmd.rbuf = (uchar_t *)rdclistxp_wk;
+ scf_cmd.flag = SCF_USE_SLBUF;
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ if (ret != 0) {
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_rdclistx;
+ }
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ rbuf_wk = (union wk_buffer *)rdclistxp_wk;
+ for (ii = 0; ii < (scf_cmd.rbufleng / SCF_DEVLIST_ENTSIZE); ii++) {
+ if (rbuf_wk->four_bytes_access[0] == NULL) {
+ break;
+ }
+ rdclistxp[ii + jj].rci_addr = rbuf_wk->four_bytes_access[0];
+ rdclistxp[ii + jj].status = rbuf_wk->b[4];
+ rdclistxp[ii + jj].dev_class =
+ ((ushort_t)rbuf_wk->b[5] << 8) +
+ (ushort_t)rbuf_wk->b[6];
+ rdclistxp[ii + jj].sub_class = rbuf_wk->b[7];
+ rbuf_wk++;
+ }
+
+ /* return number of data */
+ *rval_p = (ii + jj);
+
+ if (ii + jj) {
+ if (ddi_copyout((void *)rdclistxp, (void *)arg,
+ (size_t)(sizeof (scfrdclistx_t) * (ii + jj)),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ }
+
+/*
+ * END_rdclistx
+ */
+ END_rdclistx:
+
+ if (rdclistxp) {
+ kmem_free((void *)rdclistxp, (size_t)(sizeof (scfrdclistx_t) *
+ scf_rci_max * SCF_DEVLIST_MAXCNT));
+ }
+ if (rdclistxp_wk) {
+ kmem_free((void *)rdclistxp_wk,
+ (size_t)(sizeof (scfrdclistx_t) * scf_rci_max));
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_rdctrl()
+ *
+ * Description: SCFIOCRDCTRL ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_rdctrl(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_rdctrl() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scfrdctrl_t scfrdctrl;
+ int got_sense = 0;
+ clock_t lb;
+ int ii;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (ddi_copyin((void *)arg, (void *)&scfrdctrl,
+ sizeof (scfrdctrl_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_rdctrl;
+ }
+ if ((scfrdctrl.sub_cmd != SUB_DEVICE_STATUS_RPT) &&
+ (scfrdctrl.sub_cmd != SCF_SUB_DEVICE_STATUS_RPT) &&
+ ((scfrdctrl.sub_cmd | SCF_RCI_PATH_PARITY) !=
+ SCF_RCI_PATH_40)) {
+ /* wrong sub_cmd */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_rdctrl;
+ }
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ while (scf_comtbl.rdctrl_busy) {
+ SC_DBG_DRV_TRACE(TC_W_SIG, __LINE__, &scf_comtbl.rdctrl_cv,
+ sizeof (kcondvar_t));
+ if (cv_wait_sig(&scf_comtbl.rdctrl_cv,
+ &scf_comtbl.all_mutex) == 0) {
+ SC_DBG_DRV_TRACE(TC_KILL, __LINE__,
+ &scf_comtbl.rdctrl_cv, sizeof (kcondvar_t));
+ mutex_exit(&scf_comtbl.all_mutex);
+ ret = EINTR;
+ goto END_rdctrl;
+ }
+ }
+ scf_comtbl.rdctrl_busy = 1;
+
+ for (ii = scfrdctrl.scount; ii < SCF_S_CNT_32; ii++) {
+ scfrdctrl.sbuf[ii] = 0;
+ }
+ if ((scfrdctrl.sub_cmd == SUB_DEVICE_STATUS_RPT) ||
+ (scfrdctrl.sub_cmd == SCF_SUB_DEVICE_STATUS_RPT)) {
+ scf_cmd.flag = SCF_USE_S_BUF;
+ } else {
+ /* SUB_RCI_PATH_4* */
+ scf_cmd.flag = SCF_USE_L_BUF;
+ /* Parameter size set */
+ if (scfrdctrl.scount > 6) {
+ scfrdctrl.sbuf[5] = (scfrdctrl.scount - 6);
+ } else {
+ scfrdctrl.sbuf[5] = 0;
+ }
+ }
+ scf_cmd.cmd = CMD_RCI_CTL;
+ if (scfrdctrl.sub_cmd == SCF_SUB_DEVICE_STATUS_RPT) {
+ scf_cmd.subcmd = SUB_DEVICE_STATUS_RPT;
+ } else if (scfrdctrl.sub_cmd ==
+ (SCF_RCI_PATH_40 & (~SCF_RCI_PATH_PARITY))) {
+ scf_cmd.subcmd = SCF_RCI_PATH_40;
+ } else {
+ scf_cmd.subcmd = scfrdctrl.sub_cmd;
+ }
+ scf_comtbl.rdctrl_sense_category_code = 0;
+ scf_cmd.sbuf = &scfrdctrl.sbuf[0];
+ scf_cmd.scount = SCF_S_CNT_32;
+ scf_cmd.rcount = 0;
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ if (ret != 0) {
+ goto END_rdctrl_signal;
+ }
+
+ /* wait for sense */
+ lb = ddi_get_lbolt();
+ while (scf_comtbl.rdctrl_sense_category_code == 0) {
+ SC_DBG_DRV_TRACE(TC_T_WAIT, __LINE__, &scf_comtbl.rdcsense_cv,
+ sizeof (kcondvar_t));
+ scf_comtbl.rdctrl_end_wait = 1;
+ if (cv_timedwait(&scf_comtbl.rdcsense_cv, &scf_comtbl.all_mutex,
+ drv_usectohz(scf_rdctrl_sense_wait) + lb) == (-1)) {
+ /* time out */
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = ENODATA;
+ goto END_rdctrl_signal;
+ }
+ }
+
+ /* check RCI-addr and category code */
+ if ((bcmp((void *)&scf_comtbl.rdctrl_sense[0],
+ (void *)&scfrdctrl.sbuf[0], 4) == 0) &&
+ (((scf_comtbl.rdctrl_sense_category_code) ==
+ (scfrdctrl.sub_cmd & (~SCF_RCI_PATH_PARITY))) ||
+ ((scf_comtbl.rdctrl_sense_category_code ==
+ DEV_SENSE_STATUS_RPT) &&
+ ((scfrdctrl.sub_cmd == SUB_DEVICE_STATUS_RPT) ||
+ (scfrdctrl.sub_cmd == SCF_SUB_DEVICE_STATUS_RPT))))) {
+ bcopy((void *)&scf_comtbl.rdctrl_sense[0],
+ (void *)&scfrdctrl.sense[0], 4);
+ bcopy((void *)&scf_comtbl.rdctrl_sense[8],
+ (void *)&scfrdctrl.sense[4], (SCF_INT_REASON_SIZE - 4));
+ got_sense = 1;
+ } else {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = ENODATA;
+ }
+
+/*
+ * END_rdctrl_signal
+ */
+ END_rdctrl_signal:
+
+ scf_comtbl.rdctrl_end_wait = 0;
+ scf_comtbl.rdctrl_busy = 0;
+ cv_signal(&scf_comtbl.rdctrl_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__, &scf_comtbl.rdctrl_cv,
+ sizeof (kcondvar_t));
+ mutex_exit(&scf_comtbl.all_mutex);
+ if (got_sense) {
+ if (ddi_copyout((void *)&scfrdctrl, (void *)arg,
+ sizeof (scfrdctrl_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ }
+
+/*
+ * END_rdctrl
+ */
+ END_rdctrl:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_opecall()
+ *
+ * Description: SCFIOCOPECALL ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_opecall(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_opecall() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scfhac_t scfhac;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (ddi_copyin((void *)arg, (void *)&scfhac, sizeof (scfhac_t),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_opecall;
+ }
+
+ scf_cmd.cmd = CMD_REPORT;
+ scf_cmd.subcmd = scfhac.sub_command;
+ scf_cmd.scount = SCF_S_CNT_15;
+ scf_cmd.sbuf = &scfhac.sbuf[0];
+ scf_cmd.rcount = SCF_S_CNT_15;
+ scf_cmd.rbuf = &scfhac.rbuf[0];
+
+ switch (scfhac.sub_command) {
+ case SUB_OPECALL_ON_SET:
+ case SUB_OPECALL_OFF_SET:
+ scf_cmd.flag = SCF_USE_S_BUF;
+ break;
+
+ case SUB_OPECALL_DISP:
+ scf_cmd.flag = SCF_USE_SSBUF;
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_opecall;
+ }
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ if (ret == 0) {
+ if (ddi_copyout((void *)&scfhac, (void *)arg, sizeof (scfhac_t),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ }
+
+/*
+ * END_opecall
+ */
+ END_opecall:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_getreport()
+ *
+ * Description: SCFIOCGETREPORT ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_getreport(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_getreport() "
+ scfreport_t scfreport;
+ scfreport32_t scfreport32;
+ scfreport_t *scfreport_p;
+ int ret = 0;
+ int loop_flag = 1;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (u_mode == DDI_MODEL_ILP32) {
+ /* DDI_MODEL_ILP32 */
+ if (ddi_copyin((void *)arg, (void *)&scfreport32,
+ sizeof (scfreport32_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ goto END_getreport;
+ }
+ scfreport.flag = scfreport32.flag;
+ } else {
+ /* DDI_MODEL_NONE */
+ if (ddi_copyin((void *)arg, (void *)&scfreport,
+ sizeof (scfreport_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ goto END_getreport;
+ }
+ }
+
+ switch (scfreport.flag) {
+ case GETREPORT_WAIT:
+ case GETREPORT_WAIT_AND_RCIDWN:
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ scfreport_p = (scfreport_t *)&scf_comtbl.scfreport_rcidown.flag;
+ while (loop_flag) {
+ if (scf_pop_reportsense(&scfreport) == 0) {
+ break;
+ }
+ if ((scfreport.flag == GETREPORT_WAIT_AND_RCIDWN) &&
+ (scf_comtbl.rcidown_event_flag)) {
+ scfreport.rci_addr = scfreport_p->rci_addr;
+ scfreport.report_sense[0] =
+ scfreport_p->report_sense[0];
+ scfreport.report_sense[1] =
+ scfreport_p->report_sense[1];
+ scfreport.report_sense[2] =
+ scfreport_p->report_sense[2];
+ scfreport.report_sense[3] =
+ scfreport_p->report_sense[3];
+ scfreport.timestamp = scfreport_p->timestamp;
+ scf_comtbl.rcidown_event_flag = 0;
+ break;
+ }
+ SC_DBG_DRV_TRACE(TC_W_SIG, __LINE__,
+ &scf_comtbl.rsense_cv, sizeof (kcondvar_t));
+ if (cv_wait_sig(&scf_comtbl.rsense_cv,
+ &scf_comtbl.all_mutex) == 0) {
+ SC_DBG_DRV_TRACE(TC_KILL, __LINE__,
+ &scf_comtbl.rsense_cv,
+ sizeof (kcondvar_t));
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ ret = EINTR;
+ goto END_getreport;
+ }
+ }
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ break;
+
+ case GETREPORT_NOWAIT:
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ if (scf_pop_reportsense(&scfreport) < 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ mutex_exit(&scf_comtbl.all_mutex);
+ ret = ENODATA;
+ goto END_getreport;
+ }
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_getreport;
+ }
+
+ if (u_mode == DDI_MODEL_ILP32) {
+ /* DDI_MODEL_ILP32 */
+ if ((scfreport.timestamp < INT32_MIN) ||
+ (scfreport.timestamp > INT32_MAX)) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EOVERFLOW;
+ goto END_getreport;
+ }
+
+ scfreport32.rci_addr = scfreport.rci_addr;
+ scfreport32.report_sense[0] = scfreport.report_sense[0];
+ scfreport32.report_sense[1] = scfreport.report_sense[1];
+ scfreport32.report_sense[2] = scfreport.report_sense[2];
+ scfreport32.report_sense[3] = scfreport.report_sense[3];
+ scfreport32.timestamp = (time32_t)scfreport.timestamp;
+
+ if (ddi_copyout((void *)&scfreport32, (void *)arg,
+ sizeof (scfreport32_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ } else {
+ /* DDI_MODEL_NONE */
+ if (ddi_copyout((void *)&scfreport, (void *)arg,
+ sizeof (scfreport_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ }
+
+/*
+ * END_getreport
+ */
+ END_getreport:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_rcipwr()
+ *
+ * Description: SCFIOCRCIPWR ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_rcipwr(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_rcipwr() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t sbuf;
+ scf_short_buffer_t rbuf;
+ scfrcipwr_t scfrcipwr;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (ddi_copyin((void *)arg, (void *)&scfrcipwr,
+ sizeof (scfrcipwr_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_rcipwr;
+ }
+
+ scf_cmd.cmd = CMD_RCI_CTL;
+ scf_cmd.subcmd = SUB_HOSTADDR_DISP;
+ scf_cmd.scount = 0;
+ scf_cmd.rbuf = &rbuf.b[0];
+ scf_cmd.rcount = SCF_S_CNT_12;
+ scf_cmd.flag = SCF_USE_SSBUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ if (ret != 0) {
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_rcipwr;
+ }
+
+ /* check RCI-address */
+ if ((scfrcipwr.rci_addr == rbuf.four_bytes_access[0]) ||
+ (scfrcipwr.rci_addr == SCF_CMD_SYSTEM_ADDR)) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ mutex_exit(&scf_comtbl.all_mutex);
+ ret = EINVAL;
+ goto END_rcipwr;
+ }
+
+ bzero((void *)&sbuf.b[0], SCF_S_CNT_16);
+
+ switch (scfrcipwr.sub_cmd) {
+ case RCI_PWR_ON:
+ scf_cmd.subcmd = SUB_PON;
+ break;
+
+ case RCI_PWR_OFF:
+ scf_cmd.subcmd = SUB_FPOFF;
+ break;
+
+ case RCI_SYS_RESET:
+ scf_cmd.subcmd = SUB_RESET;
+ break;
+
+ case RCI_PWR_NOR_OFF:
+ scf_cmd.subcmd = SUB_POFF;
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ mutex_exit(&scf_comtbl.all_mutex);
+ ret = EINVAL;
+ goto END_rcipwr;
+ }
+
+ scf_cmd.cmd = CMD_PART_POW_CTR;
+ scf_cmd.scount = SCF_S_CNT_15;
+ sbuf.four_bytes_access[0] = scfrcipwr.rci_addr;
+ scf_cmd.sbuf = &sbuf.b[0];
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_S_BUF;
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+/*
+ * END_rcipwr
+ */
+ END_rcipwr:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_panicreq()
+ *
+ * Description: SCFIOCPANICREQ ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_panicreq(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_panicreq() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t sbuf;
+ scf_short_buffer_t rbuf;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ scf_cmd.cmd = CMD_RCI_CTL;
+ scf_cmd.subcmd = SUB_HOSTADDR_DISP;
+ scf_cmd.scount = 0;
+ scf_cmd.rbuf = &rbuf.b[0];
+ scf_cmd.rcount = SCF_S_CNT_12;
+ scf_cmd.flag = SCF_USE_SSBUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ if (ret != 0) {
+ mutex_exit(&scf_comtbl.all_mutex);
+ goto END_panicreq;
+ }
+
+ /* check RCI-address */
+ if (((uint_t)arg == rbuf.four_bytes_access[0]) ||
+ (arg == SCF_CMD_SYSTEM_ADDR)) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ mutex_exit(&scf_comtbl.all_mutex);
+ ret = EINVAL;
+ goto END_panicreq;
+ }
+
+ bzero((void *)&sbuf.b[0], SCF_S_CNT_16);
+ sbuf.four_bytes_access[0] = (unsigned int)arg;
+ scf_cmd.cmd = CMD_RCI_CTL;
+ scf_cmd.subcmd = SUB_PANIC;
+ scf_cmd.scount = SCF_S_CNT_15;
+ scf_cmd.sbuf = &sbuf.b[0];
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_S_BUF;
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+/*
+ * END_panicreq
+ */
+ END_panicreq:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_panicchk()
+ *
+ * Description: SCFIOCPANICCHK ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_panicchk(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_panicchk() "
+ int ret = 0;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (ddi_copyout((void *)&scf_panic_exec_flag2, (void *)arg,
+ sizeof (int), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_parmset()
+ *
+ * Description: SCFIOCPARMSET ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_parmset(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_parmset() "
+ int ret = 0;
+ scfparam_t scfparam;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (ddi_copyin((void *)arg, (void *)&scfparam,
+ sizeof (scfparam_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_parmset;
+ }
+
+ switch (scfparam.parm) {
+ case SCF_PARM_RDCTRL_TIMER:
+ if ((scfparam.value >= SCF_SEC2MICRO(1)) &&
+ (scfparam.value <= SCF_SEC2MICRO(120))) {
+ mutex_enter(&scf_comtbl.all_mutex);
+ scf_rdctrl_sense_wait =
+ scfparam.value - (scfparam.value % 500000);
+ mutex_exit(&scf_comtbl.all_mutex);
+ } else {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EINVAL;
+ goto END_parmset;
+ }
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_parmset;
+ }
+
+ if (ddi_copyout((void *)&scfparam, (void *)arg,
+ sizeof (scfparam_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_parmset
+ */
+ END_parmset:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_parmget()
+ *
+ * Description: SCFIOCPARMGET ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_parmget(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_parmget() "
+ int ret = 0;
+ scfparam_t scfparam;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (ddi_copyin((void *)arg, (void *)&scfparam,
+ sizeof (scfparam_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_parmget;
+ }
+
+ switch (scfparam.parm) {
+ case SCF_PARM_RDCTRL_TIMER:
+ mutex_enter(&scf_comtbl.all_mutex);
+ scfparam.value = scf_rdctrl_sense_wait;
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_parmget;
+ }
+
+ if (ddi_copyout((void *)&scfparam, (void *)arg,
+ sizeof (scfparam_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_parmget
+ */
+ END_parmget:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_autopwrset()
+ *
+ * Description: SCFIOCAUTOPWRSET ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_autopwrset(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_autopwrset() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t sbuf;
+ scfautopwr_t scfautopwr;
+ int ii;
+ int jj;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (ddi_copyin((void *)arg, (void *)&scfautopwr,
+ sizeof (scfautopwr_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_autopwrset;
+ }
+
+ if ((scfautopwr.valid_entries < 0) || (scfautopwr.valid_entries > 5)) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_autopwrset;
+ }
+
+ bzero((void *)&sbuf.b[0], 5 * SCF_S_CNT_16);
+
+ for (ii = 0, jj = 0; ii < scfautopwr.valid_entries; ii++,
+ jj = ii * SCF_S_CNT_16) {
+ /* check pon time */
+ if (scf_check_pon_time(&scfautopwr.ptime[ii]) < 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EINVAL;
+ goto END_autopwrset;
+ }
+
+ if (scf_check_poff_time(&scfautopwr.ptime[ii]) < 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EINVAL;
+ goto END_autopwrset;
+ }
+
+ sbuf.b[jj] = (uchar_t)(scfautopwr.ptime[ii].pon_year >> 8);
+ sbuf.b[jj + 1] = (uchar_t)scfautopwr.ptime[ii].pon_year;
+ sbuf.b[jj + 2] = (uchar_t)scfautopwr.ptime[ii].pon_month;
+ sbuf.b[jj + 3] = (uchar_t)scfautopwr.ptime[ii].pon_date;
+ sbuf.b[jj + 4] = (uchar_t)scfautopwr.ptime[ii].pon_hour;
+ sbuf.b[jj + 5] = (uchar_t)scfautopwr.ptime[ii].pon_minute;
+ sbuf.b[jj + 6] = 0;
+ sbuf.b[jj + 7] = 0;
+
+ sbuf.b[jj + 8] = (uchar_t)(scfautopwr.ptime[ii].poff_year >> 8);
+ sbuf.b[jj + 9] = (uchar_t)scfautopwr.ptime[ii].poff_year;
+ sbuf.b[jj + 10] = (uchar_t)scfautopwr.ptime[ii].poff_month;
+ sbuf.b[jj + 11] = (uchar_t)scfautopwr.ptime[ii].poff_date;
+ sbuf.b[jj + 12] = (uchar_t)scfautopwr.ptime[ii].poff_hour;
+ sbuf.b[jj + 13] = (uchar_t)scfautopwr.ptime[ii].poff_minute;
+ sbuf.b[jj + 14] = 0;
+ sbuf.b[jj + 15] = 0;
+ }
+
+ scf_cmd.cmd = CMD_SYS_AUTOPOW;
+ scf_cmd.subcmd = SUB_SYS_AUTO_ONOFF_SET;
+ scf_cmd.scount = 5 * SCF_S_CNT_16;
+ scf_cmd.sbuf = &sbuf.b[0];
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_L_BUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+/*
+ * END_autopwrset
+ */
+ END_autopwrset:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_autopwrget()
+ *
+ * Description: SCFIOCAUTOPWRGET or SCFIOCSYSAUTOPWRGET
+ * ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_autopwrget(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_autopwrget() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t rbuf;
+ scfautopwr_t scfautopwr;
+ int ii;
+ int jj;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ bzero((void *)&scfautopwr, sizeof (scfautopwr_t));
+
+ scf_cmd.cmd = CMD_SYS_AUTOPOW;
+ scf_cmd.subcmd = SUB_SYS_AUTO_ONOFF_DISP;
+ scf_cmd.scount = 0;
+ scf_cmd.rbuf = &rbuf.b[0];
+ scf_cmd.rcount = 5 * SCF_S_CNT_16;
+ scf_cmd.flag = SCF_USE_SLBUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ if (ret != 0) {
+ goto END_autopwrget;
+ }
+
+ for (ii = 0, jj = 0; ii < (scf_cmd.rbufleng / SCF_S_CNT_16);
+ ii++, jj = ii * SCF_S_CNT_16) {
+ scfautopwr.ptime[ii].pon_year =
+ (int)(rbuf.b[jj] << 8) | (int)rbuf.b[jj + 1];
+ scfautopwr.ptime[ii].pon_month = (int)rbuf.b[jj + 2];
+ scfautopwr.ptime[ii].pon_date = (int)rbuf.b[jj + 3];
+ scfautopwr.ptime[ii].pon_hour = (int)rbuf.b[jj + 4];
+ scfautopwr.ptime[ii].pon_minute = (int)rbuf.b[jj + 5];
+
+ scfautopwr.ptime[ii].poff_year =
+ (int)(rbuf.b[jj + 8] << 8) | (int)rbuf.b[jj + 9];
+ scfautopwr.ptime[ii].poff_month = (int)rbuf.b[jj + 10];
+ scfautopwr.ptime[ii].poff_date = (int)rbuf.b[jj + 11];
+ scfautopwr.ptime[ii].poff_hour = (int)rbuf.b[jj + 12];
+ scfautopwr.ptime[ii].poff_minute = (int)rbuf.b[jj + 13];
+ }
+ scfautopwr.valid_entries = 5;
+
+ if (ddi_copyout((void *)&scfautopwr, (void *)arg,
+ sizeof (scfautopwr_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_autopwrget
+ */
+ END_autopwrget:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_autopwrclr()
+ *
+ * Description: SCFIOCSYSAUTOPWRCLR ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_autopwrclr(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_autopwrclr() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ scf_cmd.cmd = CMD_SYS_AUTOPOW;
+ scf_cmd.subcmd = SUB_SYS_AUTO_ONOFF_CLRAR;
+ scf_cmd.scount = 0;
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_S_BUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_autopwrfpoff()
+ *
+ * Description: SCFIOCAUTOPWRFPOFF ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_autopwrfpoff(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_autopwrfpoff() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t sbuf;
+ scfautopwrtime_t scfautopwrtime;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (ddi_copyin((void *)arg, (void *)&scfautopwrtime,
+ sizeof (scfautopwrtime_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_autopwrfpoff;
+ }
+ bzero((void *)&sbuf.b[0], 5 * SCF_S_CNT_16);
+ if (scf_check_poff_time(&scfautopwrtime) < 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_autopwrfpoff;
+ }
+ sbuf.b[0] = (uchar_t)(scfautopwrtime.poff_year >> 8);
+ sbuf.b[1] = (uchar_t)scfautopwrtime.poff_year;
+ sbuf.b[2] = (uchar_t)scfautopwrtime.poff_month;
+ sbuf.b[3] = (uchar_t)scfautopwrtime.poff_date;
+ sbuf.b[4] = (uchar_t)scfautopwrtime.poff_hour;
+ sbuf.b[5] = (uchar_t)scfautopwrtime.poff_minute;
+
+ scf_cmd.cmd = CMD_SYS_AUTOPOW;
+ scf_cmd.subcmd = SUB_FORCED_POFF_SET;
+ scf_cmd.scount = SCF_S_CNT_15;
+ scf_cmd.sbuf = &sbuf.b[0];
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_S_BUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+/*
+ * END_autopwrfpoff
+ */
+ END_autopwrfpoff:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_autopwrexset()
+ *
+ * Description: SCFIOCAUTOPWREXSET ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_autopwrexset(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_autopwrexset() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t sbuf;
+ scfautopwrex_t scfautopwrex;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ bzero((void *)&sbuf.b[0], SCF_S_CNT_16);
+ if (ddi_copyin((void *)arg, (void *)&scfautopwrex,
+ sizeof (scfautopwrex_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_autopwrexset;
+ }
+
+ switch (scfautopwrex.rpwr_mode) {
+ case AUTOPWREX_RESTORE:
+ case AUTOPWREX_NOPON:
+ case AUTOPWREX_AUTOPON:
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_autopwrexset;
+ }
+
+ scf_cmd.cmd = CMD_SYS_AUTOPOW;
+ scf_cmd.subcmd = SUB_PRESET_MODE_SET;
+ scf_cmd.scount = SCF_S_CNT_15;
+ sbuf.b[0] = (unsigned char)scfautopwrex.rpwr_mode;
+ scf_cmd.sbuf = &sbuf.b[0];
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_S_BUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+/*
+ * END_autopwrexset
+ */
+ END_autopwrexset:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_autopwrexget()
+ *
+ * Description: SCFIOCAUTOPWREXGET ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_autopwrexget(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_autopwrexget() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t rbuf;
+ scfautopwrex_t scfautopwrex;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ bzero((void *)&scfautopwrex, sizeof (scfautopwrex_t));
+
+ scf_cmd.cmd = CMD_SYS_AUTOPOW;
+ scf_cmd.subcmd = SUB_PRESET_MODE_DISP;
+ scf_cmd.scount = 0;
+ scf_cmd.rcount = SCF_S_CNT_15;
+ scf_cmd.rbuf = &rbuf.b[0];
+ scf_cmd.flag = SCF_USE_SSBUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ if (ret == 0) {
+ scfautopwrex.rpwr_mode = (int)rbuf.b[0];
+
+ if (ddi_copyout((void *)&scfautopwrex, (void *)arg,
+ sizeof (scfautopwrex_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_dr()
+ *
+ * Description: SCFIOCDR ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_dr(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_dr() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scfdr_t *scfdr_p = NULL;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ scfdr_p = kmem_zalloc((size_t)(sizeof (scfdr_t)), KM_SLEEP);
+
+ if (ddi_copyin((void *)arg, (void *)scfdr_p, sizeof (scfdr_t),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_dr;
+ }
+
+ scf_cmd.cmd = CMD_DR;
+ scf_cmd.subcmd = scfdr_p->sub_command;
+ scf_cmd.sbuf = &scfdr_p->sbuf[0];
+ scf_cmd.scount = SCF_S_CNT_15;
+
+ switch (scfdr_p->sub_command) {
+ case SUB_SB_CONF_CHG:
+ scf_cmd.rbuf = &scfdr_p->rbuf[0];
+ scf_cmd.rcount = SCF_S_CNT_16;
+ scf_cmd.flag = SCF_USE_S_BUF;
+ break;
+
+ case SUB_SB_BUILD_COMP:
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_S_BUF;
+ break;
+
+ case SUB_SB_SENSE_ALL:
+ scf_cmd.rbuf = &scfdr_p->rbuf[0];
+ scf_cmd.rcount = sizeof (scfdr_p->sbuf);
+ scf_cmd.flag = SCF_USE_SLBUF;
+ break;
+
+ case SUB_SB_SENSE:
+ scf_cmd.rbuf = &scfdr_p->rbuf[0];
+ scf_cmd.rcount = SCF_S_CNT_15;
+ scf_cmd.flag = SCF_USE_SSBUF;
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_dr;
+ }
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ if (ret != 0) {
+ if (scf_cmd.stat0 == E_PARAM) {
+ ret = EINVAL;
+ } else {
+ goto END_dr;
+ }
+ }
+
+ if (ddi_copyout((void *)scfdr_p, (void *)arg, sizeof (scfdr_t),
+ mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ }
+
+/*
+ * END_dr
+ */
+ END_dr:
+
+ if (scfdr_p) {
+ kmem_free((void *)scfdr_p, (size_t)(sizeof (scfdr_t)));
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_eventlist()
+ *
+ * Description: SCFIOCEVENTLIST ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_eventlist(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_eventlist() "
+ int ret = 0;
+ scfeventlist_t *scfeventlist_p = NULL;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ scfeventlist_p = kmem_zalloc((size_t)(sizeof (scfeventlist_t)),
+ KM_SLEEP);
+
+ if (ddi_copyin((void *)arg, (void *)scfeventlist_p,
+ sizeof (scfeventlist_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_eventlist;
+ }
+ if (scfeventlist_p->listcnt > SCF_EVENTLIST_MAX) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_eventlist;
+ }
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ bcopy((void *)scfeventlist_p, (void *)&scf_comtbl.getevent_tbl.listcnt,
+ sizeof (scfeventlist_t));
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+/*
+ * END_eventlist
+ */
+ END_eventlist:
+
+ if (scfeventlist_p) {
+ kmem_free((void *)scfeventlist_p,
+ (size_t)(sizeof (scfeventlist_t)));
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_getevent()
+ *
+ * Description: SCFIOCGETEVENT ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_getevent(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_getevent() "
+ int ret = 0;
+ scfevent_t scfevent;
+ scfevent32_t scfevent32;
+ int loop_flag = 1;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (u_mode == DDI_MODEL_ILP32) {
+ /* DDI_MODEL_ILP32 */
+ if (ddi_copyin((void *)arg, (void *)&scfevent32,
+ sizeof (scfevent32_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ goto END_getevent;
+ }
+ scfevent.flag = scfevent32.flag;
+ } else {
+ /* DDI_MODEL_NONE */
+ if (ddi_copyin((void *)arg, (void *)&scfevent,
+ sizeof (scfevent_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ goto END_getevent;
+ }
+ }
+
+ switch (scfevent.flag) {
+ case GETEVENT_WAIT:
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ while (loop_flag) {
+ if (scf_pop_getevent(&scfevent) == 0) {
+ break;
+ }
+ SC_DBG_DRV_TRACE(TC_W_SIG, __LINE__,
+ &scf_comtbl.getevent_cv, sizeof (kcondvar_t));
+ if (cv_wait_sig(&scf_comtbl.getevent_cv,
+ &scf_comtbl.all_mutex) == 0) {
+ SC_DBG_DRV_TRACE(TC_KILL, __LINE__,
+ &scf_comtbl.getevent_cv,
+ sizeof (kcondvar_t));
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ ret = EINTR;
+ goto END_getevent;
+ }
+ }
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ break;
+
+ case GETEVENT_NOWAIT:
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ if (scf_pop_getevent(&scfevent) < 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ ret = ENODATA;
+ goto END_getevent;
+ }
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_getevent;
+ }
+
+ if (u_mode == DDI_MODEL_ILP32) {
+ /* DDI_MODEL_ILP32 */
+ if ((scfevent.timestamp < INT32_MIN) ||
+ (scfevent.timestamp > INT32_MAX)) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EOVERFLOW;
+ goto END_getevent;
+ }
+ bcopy((void *)&scfevent.rci_addr, (void *)&scfevent32.rci_addr,
+ SCF_INT_REASON_SIZE);
+ scfevent32.timestamp = (time32_t)scfevent.timestamp;
+
+ if (ddi_copyout((void *)&scfevent32, (void *)arg,
+ sizeof (scfevent32_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ } else {
+ /* DDI_MODEL_NONE */
+ if (ddi_copyout((void *)&scfevent, (void *)arg,
+ sizeof (scfevent_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ }
+
+/*
+ * END_getevent
+ */
+ END_getevent:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_setmadmevent()
+ *
+ * Description: SCFIOCSETMADMEVENT ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_setmadmevent(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_setmadmevent() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scfiocsetmadmevent_t *scfiocsetmadmevent_p = NULL;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ scfiocsetmadmevent_p =
+ kmem_zalloc((size_t)(sizeof (scfiocsetmadmevent_t)),
+ KM_SLEEP);
+
+ if (ddi_copyin((void *)arg, (void *)scfiocsetmadmevent_p,
+ sizeof (scfiocsetmadmevent_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_setmadmevent;
+ }
+
+ if (scfiocsetmadmevent_p->size > SCF_L_CNT_MAX) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_setmadmevent;
+ }
+
+ scf_cmd.cmd = CMD_ERRLOG;
+ scf_cmd.subcmd = SUB_ERRLOG_SET_MADMIN;
+ scf_cmd.sbuf = &scfiocsetmadmevent_p->buf[0];
+ scf_cmd.scount = scfiocsetmadmevent_p->size;
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_L_BUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+/*
+ * END_setmadmevent
+ */
+ END_setmadmevent:
+
+ if (scfiocsetmadmevent_p) {
+ kmem_free((void *)scfiocsetmadmevent_p,
+ (size_t)(sizeof (scfiocsetmadmevent_t)));
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_remcscmd()
+ *
+ * Description: SCFIOCREMCSCMD ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_remcscmd(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_remcscmd() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t rbuf;
+ scfiocremcscmd_t scfiocremcscmd;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (ddi_copyin((void *)arg, (void *)&scfiocremcscmd,
+ sizeof (scfiocremcscmd_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_remcscmd;
+ }
+
+ if (scfiocremcscmd.size > SCF_S_CNT_15) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_remcscmd;
+ }
+
+ scf_cmd.cmd = CMD_REMCS_SPT;
+
+ switch (scfiocremcscmd.sub_command) {
+ case SUB_CMD_EX_REMCS:
+ scf_cmd.subcmd = scfiocremcscmd.sub_command;
+ scf_cmd.scount = scfiocremcscmd.size;
+ scf_cmd.sbuf = &scfiocremcscmd.buf[0];
+ scf_cmd.rcount = SCF_S_CNT_15;
+ scf_cmd.rbuf = &rbuf.b[0];
+ scf_cmd.flag = SCF_USE_SSBUF;
+ break;
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_remcscmd;
+ }
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ if (ret == 0) {
+ scfiocremcscmd.size = scf_cmd.rbufleng;
+ if (scfiocremcscmd.size != 0) {
+ bcopy((void *)&rbuf.b[0],
+ (void *)&scfiocremcscmd.buf[0],
+ scfiocremcscmd.size);
+ }
+
+ if (ddi_copyout((void *)&scfiocremcscmd, (void *)arg,
+ sizeof (scfiocremcscmd_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ }
+
+/*
+ * END_remcscmd
+ */
+ END_remcscmd:
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_remcsfile()
+ *
+ * Description: SCFIOCREMCSFILE ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_remcsfile(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_remcsfile() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scf_short_buffer_t rbuf;
+ scfiocremcsfile_t *scfiocremcsfile_p = NULL;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ scfiocremcsfile_p = kmem_zalloc((size_t)(sizeof (scfiocremcsfile_t)),
+ KM_SLEEP);
+
+ if (ddi_copyin((void *)arg, (void *)scfiocremcsfile_p,
+ sizeof (scfiocremcsfile_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_remcsfile;
+ }
+
+ if (scfiocremcsfile_p->size > SCF_L_CNT_MAX) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_remcsfile;
+ }
+
+ scf_cmd.cmd = CMD_FILE_DOWNLOAD;
+ scf_cmd.subcmd = scfiocremcsfile_p->sub_command;
+
+ switch (scfiocremcsfile_p->sub_command) {
+ case SUB_FILEUP_READY:
+ if (scfiocremcsfile_p->size > SCF_L_CNT_MAX) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EINVAL;
+ goto END_remcsfile;
+ }
+ scf_cmd.scount = scfiocremcsfile_p->size;
+ scf_cmd.sbuf = &scfiocremcsfile_p->buf[0];
+ scf_cmd.rcount = SCF_S_CNT_15;
+ scf_cmd.rbuf = &rbuf.b[0];
+ scf_cmd.flag = SCF_USE_LSBUF;
+ break;
+
+ case SUB_FILEUP_SET:
+ scf_cmd.scount = scfiocremcsfile_p->size;
+ scf_cmd.sbuf = &scfiocremcsfile_p->buf[0];
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_L_BUF;
+ break;
+
+ case SUB_TRANSFER_STOP:
+ scf_cmd.scount = 0;
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_S_BUF;
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_remcsfile;
+ }
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ if (ret == 0) {
+ scfiocremcsfile_p->size = scf_cmd.rbufleng;
+ if (scfiocremcsfile_p->size != 0) {
+ bcopy((void *)&rbuf.b[0],
+ (void *)&scfiocremcsfile_p->buf[0],
+ scfiocremcsfile_p->size);
+ }
+
+ if (ddi_copyout((void *)scfiocremcsfile_p, (void *)arg,
+ sizeof (scfiocremcsfile_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ }
+
+/*
+ * END_remcsfile
+ */
+ END_remcsfile:
+
+ if (scfiocremcsfile_p) {
+ kmem_free((void *)scfiocremcsfile_p,
+ (size_t)(sizeof (scfiocremcsfile_t)));
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_sparecmd()
+ *
+ * Description: SCFIOCSPARECMD ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_sparecmd(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_sparecmd() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scfiocsparecmd_t *scfiocsparecmd_p = NULL;
+ uint_t madm_scount;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ scfiocsparecmd_p = kmem_zalloc((size_t)(sizeof (scfiocsparecmd_t)),
+ KM_SLEEP);
+
+ if (ddi_copyin((void *)arg, (void *)scfiocsparecmd_p,
+ sizeof (scfiocsparecmd_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_sparecmd;
+ }
+
+ if (scfiocsparecmd_p->size > SCF_L_CNT_MAX) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_sparecmd;
+ }
+
+ scf_cmd.cmd = CMD_SPARE;
+ scf_cmd.subcmd = scfiocsparecmd_p->spare_sub_command;
+ scf_cmd.sbuf = &scfiocsparecmd_p->buf[0];
+ scf_cmd.rbuf = &scfiocsparecmd_p->buf[0];
+ scf_cmd.cexr[0] = scfiocsparecmd_p->command;
+ scf_cmd.cexr[1] = scfiocsparecmd_p->sub_command;
+
+ switch (scfiocsparecmd_p->spare_sub_command) {
+ case SUB_SPARE_SS:
+ scf_cmd.scount = SCF_S_CNT_12;
+ scf_cmd.rcount = SCF_S_CNT_12;
+ scf_cmd.flag = SCF_USE_SSBUF;
+ break;
+
+ case SUB_SPARE_SL:
+ scf_cmd.scount = SCF_S_CNT_12;
+ scf_cmd.rcount = SCF_L_CNT_MAX;
+ scf_cmd.flag = SCF_USE_SLBUF;
+ break;
+
+ case SUB_SPARE_LS:
+ madm_scount = (scfiocsparecmd_p->size + SCF_S_CNT_15) &
+ SCF_LENGTH_16BYTE_CNV;
+ scf_cmd.scount = madm_scount;
+ scf_cmd.rcount = SCF_S_CNT_12;
+ scf_cmd.flag = SCF_USE_LSBUF;
+ break;
+
+ default:
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_sparecmd;
+ }
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ if (ret == 0) {
+ scfiocsparecmd_p->size = scf_cmd.rbufleng;
+ if (ddi_copyout((void *)scfiocsparecmd_p, (void *)arg,
+ sizeof (scfiocsparecmd_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ }
+
+/*
+ * END_sparecmd
+ */
+ END_sparecmd:
+
+ if (scfiocsparecmd_p) {
+ kmem_free((void *)scfiocsparecmd_p,
+ (size_t)(sizeof (scfiocsparecmd_t)));
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_setphpinfo()
+ *
+ * Description: SCFIOCSETPHPINFO ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_setphpinfo(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_setphpinfo() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scfsetphpinfo_t *scfsetphpinfo_p = NULL;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ scfsetphpinfo_p = kmem_zalloc((size_t)(sizeof (scfsetphpinfo_t)),
+ KM_SLEEP);
+
+ if (ddi_copyin((void *)arg, (void *)scfsetphpinfo_p,
+ sizeof (scfsetphpinfo_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_setphpinfo;
+ }
+
+ if (scfsetphpinfo_p->size > SCF_L_CNT_MAX) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_setphpinfo;
+ }
+
+ scf_cmd.cmd = CMD_DOMAIN_INFO;
+ scf_cmd.subcmd = SUB_PCI_HP_CONFIG;
+ scf_cmd.sbuf = &scfsetphpinfo_p->buf[0];
+ scf_cmd.scount = scfsetphpinfo_p->size;
+ scf_cmd.rcount = 0;
+ scf_cmd.flag = SCF_USE_L_BUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+/*
+ * END_setphpinfo
+ */
+ END_setphpinfo:
+
+ if (scfsetphpinfo_p) {
+ kmem_free((void *)scfsetphpinfo_p,
+ (size_t)(sizeof (scfsetphpinfo_t)));
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_ioc_pciresetreq()
+ *
+ * Description: SCFIOCPCIRESETREQ ioctl command processing.
+ *
+ */
+/* ARGSUSED */
+int
+scf_ioc_pciresetreq(intptr_t arg, int mode, int *rval_p, int u_mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_ioc_pciresetreq() "
+ int ret = 0;
+ struct scf_cmd scf_cmd;
+ scfpciresetreq_t *scfpciresetreq_p = NULL;
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ scfpciresetreq_p = kmem_zalloc((size_t)(sizeof (scfpciresetreq_t)),
+ KM_SLEEP);
+
+ if (ddi_copyin((void *)arg, (void *)scfpciresetreq_p,
+ sizeof (scfpciresetreq_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EFAULT;
+ goto END_pciresetreq;
+ }
+
+ if (scfpciresetreq_p->size > SCF_L_CNT_MAX) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "ioctl ", 8);
+ ret = EINVAL;
+ goto END_pciresetreq;
+ }
+
+ scf_cmd.cmd = CMD_DOMAIN_INFO;
+ scf_cmd.subcmd = SUB_PHP_RESET;
+ scf_cmd.sbuf = &scfpciresetreq_p->sbuf[0];
+ scf_cmd.scount = scfpciresetreq_p->size;
+ scf_cmd.rbuf = &scfpciresetreq_p->rbuf[0];
+ scf_cmd.rcount = SCF_S_CNT_15;
+ scf_cmd.flag = SCF_USE_L_BUF;
+
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ if (ret == 0) {
+ if (ddi_copyout((void *)scfpciresetreq_p, (void *)arg,
+ sizeof (scfpciresetreq_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "ioctl ", 8);
+ ret = EFAULT;
+ }
+ }
+
+/*
+ * END_pciresetreq
+ */
+ END_pciresetreq:
+
+ if (scfpciresetreq_p) {
+ kmem_free((void *)scfpciresetreq_p,
+ (size_t)(sizeof (scfpciresetreq_t)));
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_push_reportsense()
+ *
+ * Description: Set processing of SCFIOCGETREPORT information.
+ *
+ */
+int
+scf_push_reportsense(unsigned int rci_addr, unsigned char *sense,
+ time_t timestamp)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_push_reportsense() "
+ int top = scf_comtbl.report_sense_top;
+ scfreport_t *rsensep = scf_comtbl.report_sensep;
+ int overflow = 0;
+ int ret = 0;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (rsensep[top].flag != 0) {
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, "reportsense overflow");
+ overflow = 1;
+ /* increment counter */
+ scf_comtbl.scf_rsense_overflow++;
+ }
+
+ rsensep[top].flag = 1;
+ rsensep[top].rci_addr = rci_addr;
+ bcopy((void *)&sense[0], (void *)&rsensep[top].report_sense[0], 4);
+ rsensep[top].timestamp = timestamp;
+
+ scf_comtbl.report_sense_top =
+ ((scf_report_sense_pool_max - 1) == top) ? 0 : top + 1;
+
+ if (overflow) {
+ scf_comtbl.report_sense_oldest = scf_comtbl.report_sense_top;
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL,
+ SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_pop_reportsense()
+ *
+ * Description: Get processing of SCFIOCGETREPORT information.
+ *
+ */
+int
+scf_pop_reportsense(scfreport_t *rsense)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_pop_reportsense() "
+ int oldest = scf_comtbl.report_sense_oldest;
+ scfreport_t *drv_rsensep = scf_comtbl.report_sensep;
+ int ret = (-1);
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (drv_rsensep[oldest].flag != 0) {
+ rsense->rci_addr = drv_rsensep[oldest].rci_addr;
+ bcopy((void *)&drv_rsensep[oldest].report_sense[0],
+ (void *)&rsense->report_sense[0], 4);
+ rsense->timestamp = drv_rsensep[oldest].timestamp;
+ /* clear flag */
+ drv_rsensep[oldest].flag = 0;
+ scf_comtbl.report_sense_oldest =
+ ((scf_report_sense_pool_max - 1) == oldest)
+ ? 0 : oldest + 1;
+ ret = 0;
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL,
+ SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_push_getevent()
+ *
+ * Description: Set processing of SCFIOCGETEVENT information.
+ *
+ */
+int
+scf_push_getevent(unsigned char *event_p)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_push_getevent() "
+ int top = scf_comtbl.getevent_sense_top;
+ scfevent_t *scfevent_p = scf_comtbl.getevent_sensep;
+ int overflow = 0;
+ int ii;
+ time_t timestamp;
+ int ret = 1;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ /* Event check */
+ for (ii = 0; ii < scf_comtbl.getevent_tbl.listcnt; ii++) {
+ if (event_p[4] == scf_comtbl.getevent_tbl.codelist[ii]) {
+ ret = 0;
+ break;
+ }
+ }
+ if (ret == 0) {
+ /* Event set */
+ if (scfevent_p[top].flag != 0) {
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, "getevent overflow");
+ overflow = 1;
+ /* increment counter */
+ scf_comtbl.scf_getevent_overflow++;
+ }
+ timestamp = ddi_get_time();
+ scfevent_p[top].flag = 1;
+ bcopy((void *)event_p, (void *)&scfevent_p[top].rci_addr,
+ SCF_INT_REASON_SIZE);
+ scfevent_p[top].timestamp = timestamp;
+ scf_comtbl.getevent_sense_top =
+ ((scf_getevent_pool_max - 1) == top) ? 0 : top + 1;
+ if (overflow) {
+ scf_comtbl.getevent_sense_oldest =
+ scf_comtbl.getevent_sense_top;
+ }
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL,
+ SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_pop_reportsense()
+ *
+ * Description: Get processing of SCFIOCGETEVENT information.
+ *
+ */
+int
+scf_pop_getevent(scfevent_t *event_p)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_pop_getevent() "
+ int oldest = scf_comtbl.getevent_sense_oldest;
+ scfevent_t *scfevent_p = scf_comtbl.getevent_sensep;
+ int ret = (-1);
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if (scfevent_p[oldest].flag != 0) {
+ bcopy((void *)&scfevent_p[oldest].rci_addr,
+ (void *)&event_p->rci_addr, SCF_INT_REASON_SIZE);
+ event_p->timestamp = scfevent_p[oldest].timestamp;
+ /* clear flag */
+ scfevent_p[oldest].flag = 0;
+ scf_comtbl.getevent_sense_oldest =
+ ((scf_getevent_pool_max - 1) == oldest)
+ ? 0 : oldest + 1;
+ ret = 0;
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL,
+ SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_valid_date()
+ *
+ * Description: Validity check processing of date.
+ *
+ */
+int
+scf_valid_date(int year, int month, int date)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_valid_date() "
+ int leap = 0;
+ int ret = 0;
+ static int scf_m2d[2][12] = {
+ { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 },
+ { 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }
+ };
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ if ((year % 400) == 0) {
+ leap = 1;
+ } else {
+ if ((year % 100) == 0) {
+ leap = 0;
+ } else {
+ if ((year % 4) == 0) {
+ leap = 1;
+ }
+ }
+ }
+ if (scf_m2d[leap][month - 1] < date) {
+ ret = 1;
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_check_pon_time()
+ *
+ * Description: Power-on time range check processing.
+ *
+ */
+int
+scf_check_pon_time(scfautopwrtime_t *ptime)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_check_pon_time() "
+ int ret = (-1);
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ /* check date, time */
+ if ((ptime->pon_year == 0) && (ptime->pon_month == 0) &&
+ (ptime->pon_date == 0) && (ptime->pon_hour == 0) &&
+ (ptime->pon_minute == 0)) {
+ ret = 0;
+ goto END_check_pon_time;
+ }
+
+ /* check date, time */
+ if ((ptime->pon_year < 1970) || (ptime->pon_year > 9999)) {
+ goto END_check_pon_time;
+ }
+ if ((ptime->pon_month < 1) || (ptime->pon_month > 12)) {
+ goto END_check_pon_time;
+ }
+ if (ptime->pon_date < 1) {
+ goto END_check_pon_time;
+ }
+ if ((ptime->pon_hour < 0) || (ptime->pon_hour > 23)) {
+ goto END_check_pon_time;
+ }
+ if ((ptime->pon_minute < 0) || (ptime->pon_minute > 59)) {
+ goto END_check_pon_time;
+ }
+ if (scf_valid_date(ptime->pon_year,
+ ptime->pon_month, ptime->pon_date)) {
+ goto END_check_pon_time;
+ }
+ ret = 0;
+
+/*
+ * END_check_pon_time
+ */
+ END_check_pon_time:
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL,
+ SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_check_poff_time()
+ *
+ * Description: Power-off time range check processing.
+ *
+ */
+int
+scf_check_poff_time(scfautopwrtime_t *ptime)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_check_poff_time() "
+ int ret = (-1);
+
+ SCFDBGMSG(SCF_DBGFLAG_IOCTL, SCF_FUNC_NAME ": start");
+
+ /* all zero? */
+ if ((ptime->poff_year == 0) && (ptime->poff_month == 0) &&
+ (ptime->poff_date == 0) && (ptime->poff_hour == 0) &&
+ (ptime->poff_minute == 0)) {
+ if ((ptime->flag != 0) || (ptime->sarea != 0)) {
+ goto END_check_poff_time;
+ }
+ ret = 0;
+ goto END_check_poff_time;
+ }
+
+ /* check date, time */
+ if ((ptime->poff_year < 1970) || (ptime->poff_year > 9999)) {
+ goto END_check_poff_time;
+ }
+ if ((ptime->poff_month < 1) || (ptime->poff_month > 12)) {
+ goto END_check_poff_time;
+ }
+ if (ptime->poff_date < 1) {
+ goto END_check_poff_time;
+ }
+ if ((ptime->poff_hour < 0) || (ptime->poff_hour > 23)) {
+ goto END_check_poff_time;
+ }
+ if ((ptime->poff_minute < 0) || (ptime->poff_minute > 59)) {
+ goto END_check_poff_time;
+ }
+ if (scf_valid_date(ptime->poff_year, ptime->poff_month,
+ ptime->poff_date)) {
+ goto END_check_poff_time;
+ }
+ ret = 0;
+
+/*
+ * END_check_poff_time
+ */
+ END_check_poff_time:
+ SCFDBGMSG1(SCF_DBGFLAG_IOCTL,
+ SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scfostoescf.c b/usr/src/uts/sun4u/opl/io/scfd/scfostoescf.c
new file mode 100644
index 0000000000..8d56e398cf
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scfostoescf.c
@@ -0,0 +1,307 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/ksynch.h>
+#include <sys/types.h>
+#include <sys/cmn_err.h>
+#include <sys/kmem.h>
+#include <sys/errno.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/scfd/scfparam.h>
+
+
+/*
+ * scf_service_putinfo()
+ *
+ * Description: Data request to send processing from the OS to ESCF.
+ *
+ */
+/* ARGSUSED */
+int
+scf_service_putinfo(uint32_t key, uint8_t type, uint32_t transid,
+ uint32_t length, void *datap)
+{
+#define SCF_FUNC_NAME "scf_service_putinfo() "
+ scf_cmd_t scf_cmd; /* SCF command table */
+ uchar_t *bufp = NULL; /* Working value : buff addr */
+ int ret = 0; /* Return value */
+ timeout_id_t save_tmids[SCF_TIMERCD_MAX];
+ int tm_stop_cnt;
+
+ SCFDBGMSG(SCF_DBGFLAG_SRV, SCF_FUNC_NAME ": start");
+ SC_DBG_DRV_TRACE(TC_S_PUTINFO | TC_IN, __LINE__, &key, sizeof (key));
+
+ /* SCF command table clear */
+ bzero((void *)&scf_cmd, sizeof (scf_cmd_t));
+
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* Check "key" */
+ if (key != KEY_ESCF) {
+ /* Invalid "key" */
+ SC_DBG_DRV_TRACE(TC_S_PUTINFO | TC_ERRCD, __LINE__, &key,
+ sizeof (key));
+ ret = EINVAL;
+ goto END_service_putinfo;
+ }
+
+ /* Check "length" and "datap" */
+ if ((length != 0) && (datap == NULL)) {
+ /* Invalid "length" or "datap" */
+ SC_DBG_DRV_TRACE(TC_S_PUTINFO | TC_ERRCD, __LINE__, &length,
+ sizeof (length));
+ ret = EINVAL;
+ goto END_service_putinfo;
+ }
+
+ /* Check "length" is max length */
+ if (length > SCF_L_CNT_MAX) {
+ /* Invalid "length" */
+ SC_DBG_DRV_TRACE(TC_S_PUTINFO | TC_ERRCD, __LINE__, &length,
+ sizeof (length));
+ ret = EINVAL;
+ goto END_service_putinfo;
+ }
+
+ /* Check putinfo exec flag */
+ if (scf_comtbl.putinfo_exec_flag == FLAG_ON) {
+ /* Multiplex, putinfo */
+ SC_DBG_DRV_TRACE(TC_S_PUTINFO, __LINE__,
+ &scf_comtbl.putinfo_exec_flag,
+ sizeof (scf_comtbl.putinfo_exec_flag));
+ ret = EBUSY;
+ goto END_service_putinfo;
+ }
+
+ /* putinfo exec flag ON */
+ scf_comtbl.putinfo_exec_flag = FLAG_ON;
+
+ /* Check "length" is 0 */
+ if (length != 0) {
+ /* Send buffer allocation */
+ bufp = (uchar_t *)kmem_zalloc(length, KM_SLEEP);
+
+ /* Data copy to send buffer */
+ bcopy(datap, bufp, length);
+ }
+
+ /* Make SCF command */
+ scf_cmd.flag = SCF_USE_L_BUF;
+ scf_cmd.cmd = CMD_OS_XSCF_CTL;
+ scf_cmd.subcmd = type;
+ scf_cmd.sbuf = bufp;
+ scf_cmd.scount = length;
+ scf_cmd.rbuf = NULL;
+ scf_cmd.rcount = 0;
+
+ /* Send SCF command */
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ /* Check send buffer */
+ if (bufp != NULL) {
+ /* Send data release */
+ kmem_free((void *)bufp, length);
+ }
+
+ /* putinfo exec flag OFF */
+ scf_comtbl.putinfo_exec_flag = FLAG_OFF;
+
+/*
+ * END_service_putinfo
+ */
+ END_service_putinfo:
+
+ /* Collect the timers which need to be stopped */
+ tm_stop_cnt = scf_timer_stop_collect(save_tmids, SCF_TIMERCD_MAX);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Timer stop */
+ if (tm_stop_cnt != 0) {
+ scf_timer_untimeout(save_tmids, SCF_TIMERCD_MAX);
+ }
+
+ SC_DBG_DRV_TRACE(TC_S_PUTINFO | TC_OUT, __LINE__, &ret, sizeof (ret));
+ SCFDBGMSG1(SCF_DBGFLAG_SRV, SCF_FUNC_NAME ": end return = %d", ret);
+
+ return (ret);
+}
+
+
+/*
+ * scf_service_getinfo()
+ *
+ * Description: Data request to receive processing from the OS to ESCF.
+ *
+ */
+int
+scf_service_getinfo(uint32_t key, uint8_t type, uint32_t transid,
+ uint32_t *lengthp, void *datap)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_service_getinfo() "
+ scf_cmd_t scf_cmd; /* SCF command table */
+ scf_short_buffer_t sbuf; /* Send buffer */
+ uchar_t *bufp = NULL; /* Working value : buff addr */
+ uint_t wkleng; /* Working value : length */
+ int ret = 0; /* Return value */
+ timeout_id_t save_tmids[SCF_TIMERCD_MAX];
+ int tm_stop_cnt;
+
+ SCFDBGMSG(SCF_DBGFLAG_SRV, SCF_FUNC_NAME ": start");
+ SC_DBG_DRV_TRACE(TC_S_GETINFO | TC_IN, __LINE__, &key, sizeof (key));
+
+ /* SCF command table/Send buffer clear */
+ bzero((void *)&scf_cmd, sizeof (scf_cmd_t));
+ bzero((void *)&sbuf.b[0], SCF_S_CNT_16);
+
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* Check "key" */
+ if (key != KEY_ESCF) {
+ /* Invalid "key" */
+ SC_DBG_DRV_TRACE(TC_S_GETINFO | TC_ERRCD, __LINE__, &key,
+ sizeof (key));
+ ret = EINVAL;
+ goto END_service_getinfo;
+ }
+
+ /* Check "lengthp" and "datap" */
+ if (lengthp == NULL) {
+ /* Invalid "lengthp" */
+ SC_DBG_DRV_TRACE(TC_S_GETINFO | TC_ERRCD, __LINE__, &lengthp,
+ sizeof (lengthp));
+ ret = EINVAL;
+ goto END_service_getinfo;
+ }
+
+ /* Check "lengthp" is max length */
+ if (*lengthp > SCF_L_CNT_MAX) {
+ /* Invalid "lengthp" */
+ SC_DBG_DRV_TRACE(TC_S_GETINFO | TC_ERRCD, __LINE__, lengthp,
+ sizeof (*lengthp));
+ ret = EINVAL;
+ goto END_service_getinfo;
+ }
+
+ /* Check, parameter "length" and "datap" */
+ if ((*lengthp != 0) && (datap == NULL)) {
+ /* Invalid "lengthp" or "datap" */
+ SC_DBG_DRV_TRACE(TC_S_GETINFO | TC_ERRCD, __LINE__, lengthp,
+ sizeof (*lengthp));
+ ret = EINVAL;
+ goto END_service_getinfo;
+ }
+
+ /* Check getinfo exec flag */
+ if (scf_comtbl.getinfo_exec_flag == FLAG_ON) {
+ /* Multiplex, getinfo */
+ SC_DBG_DRV_TRACE(TC_S_GETINFO, __LINE__,
+ &scf_comtbl.getinfo_exec_flag,
+ sizeof (scf_comtbl.getinfo_exec_flag));
+ ret = EBUSY;
+ goto END_service_getinfo;
+ }
+
+ /* getinfo exec flag ON */
+ scf_comtbl.getinfo_exec_flag = FLAG_ON;
+
+ /* Check "lengthp" is 0 */
+ if (*lengthp != 0) {
+ /*
+ * Receive buffer allocation
+ */
+ wkleng = *lengthp;
+ bufp = (uchar_t *)kmem_zalloc(wkleng, KM_SLEEP);
+ } else {
+ wkleng = 0;
+ }
+
+ /* Make SCF command */
+ sbuf.four_bytes_access[0] = transid;
+ scf_cmd.flag = SCF_USE_SLBUF;
+ scf_cmd.cmd = CMD_OS_XSCF_CTL;
+ scf_cmd.subcmd = type;
+ scf_cmd.scount = SCF_S_CNT_15;
+ scf_cmd.sbuf = &sbuf.b[0];
+ scf_cmd.rcount = wkleng;
+ scf_cmd.rbuf = bufp;
+
+ /* Send SCF command */
+ ret = scf_send_cmd_check_bufful(&scf_cmd);
+
+ /* Check return code */
+ if (ret == 0) {
+ /* Set receive length */
+ if (*lengthp > scf_cmd.rbufleng) {
+ /* Set receive data length */
+ *lengthp = scf_cmd.rbufleng;
+ }
+
+ /* Check receive data length is not 0 */
+ if (*lengthp != 0) {
+ /* Data copy to "datap" */
+ bcopy(bufp, datap, *lengthp);
+ }
+ }
+
+ /* Check receive buffer */
+ if (bufp != NULL) {
+ /*
+ * Receive data release
+ */
+ kmem_free((void *)bufp, wkleng);
+ }
+
+ /* getinfo exec flag OFF */
+ scf_comtbl.getinfo_exec_flag = FLAG_OFF;
+
+/*
+ * END_service_getinfo
+ */
+ END_service_getinfo:
+
+ /* Collect the timers which need to be stopped */
+ tm_stop_cnt = scf_timer_stop_collect(save_tmids, SCF_TIMERCD_MAX);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Timer stop */
+ if (tm_stop_cnt != 0) {
+ scf_timer_untimeout(save_tmids, SCF_TIMERCD_MAX);
+ }
+
+ SC_DBG_DRV_TRACE(TC_S_GETINFO | TC_OUT, __LINE__, &ret, sizeof (ret));
+ SCFDBGMSG1(SCF_DBGFLAG_SRV, SCF_FUNC_NAME ": end return = %d", ret);
+
+ return (ret);
+}
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scfparam.c b/usr/src/uts/sun4u/opl/io/scfd/scfparam.c
new file mode 100644
index 0000000000..d1cadc0598
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scfparam.c
@@ -0,0 +1,147 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/conf.h>
+#include <sys/cmn_err.h>
+#include <sys/errno.h>
+#include <sys/time.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/scfd/scfsys.h>
+
+/*
+ * Common table
+ */
+scf_comtbl_t scf_comtbl; /* SCF driver common table */
+void *scfstate; /* root of soft state */
+char *scf_driver_name = SCF_DRIVER_NAME; /* SCF driver name */
+
+/*
+ * SCF driver control mode
+ */
+uint_t scf_halt_proc_mode = HALTPROC_STOP; /* SCFHALT after processing mode */
+uint_t scf_last_detach_mode = 0; /* Last detach mode */
+
+/*
+ * SRAM trace date size
+ */
+uint_t scf_sram_trace_data_size = 12; /* Get SRAM trace data size */
+
+/*
+ * Wait timer value (Micro-second)
+ */
+uint_t scf_rdctrl_sense_wait = 60000000;
+ /* SCFIOCRDCTRL wait timer value (60s) */
+
+/*
+ * Wait timer value (Milli-second)
+ */
+uint_t scf_buf_ful_rtime = 500;
+ /* Buff full wait retry timer value (500ms) */
+uint_t scf_rci_busy_rtime = 3000; /* RCI busy wait retry timer value (3s) */
+
+/*
+ * Re-try counter
+ */
+uint_t scf_buf_ful_rcnt = 10; /* Buff full retry counter */
+uint_t scf_rci_busy_rcnt = 15; /* RCI busy retry counter */
+uint_t scf_tesum_rcnt = 1; /* Tx sum retry counter */
+uint_t scf_resum_rcnt = 1; /* Rx sum retry counter */
+uint_t scf_cmd_to_rcnt = 1; /* Command to retry counter */
+uint_t scf_devbusy_wait_rcnt = 6; /* Command device busy retry counter */
+uint_t scf_online_wait_rcnt = 6; /* SCF online retry counter */
+uint_t scf_path_change_max = 4; /* SCF path change retry counter */
+
+/*
+ * Max value
+ */
+uint_t scf_report_sense_pool_max = 96; /* Report sense max */
+uint_t scf_getevent_pool_max = 96; /* SCFIOCGETEVENT max */
+uint_t scf_rci_max = 32 + 94; /* RCI device max */
+uint_t scf_rxbuff_max_size = 4096; /* SCF command data division max size */
+
+/*
+ * Poff factor (reported on shutdown start)
+ */
+unsigned char scf_poff_factor[2][3] = {
+ { 0x00, 0x00, 0x00 }, /* Shutdown (except pfail) */
+ { 0x01, 0x00, 0x00 }}; /* Shutdown by pfail */
+
+/*
+ * Alive check parameter
+ */
+uchar_t scf_alive_watch_code = 0x10; /* alive code for SCF driver */
+uchar_t scf_alive_phase_code = 0x00; /* alive phase code */
+uchar_t scf_alive_interval_time = INTERVAL_TIME_DEF; /* interval time */
+uchar_t scf_alive_monitor_time = MONITOR_TIME_DEF; /* monitor timeout */
+ushort_t scf_alive_panic_time = PANIC_TIME_DEF; /* panic timeout */
+
+uchar_t scf_acr_phase_code = 0x00; /* Alive check register phase code */
+
+/*
+ * FMEMA interface
+ */
+caddr_t scf_avail_cmd_reg_vaddr = 0; /* SCF Command register address */
+
+/*
+ * Send break interface
+ */
+int scf_dm_secure_mode = 0; /* secure mode */
+
+/*
+ * ioctl control value and flag
+ */
+int scf_save_hac_flag = 0; /* Host address disp flag */
+scfhac_t scf_save_hac; /* Host address disp save */
+
+/*
+ * Register read sync value
+ */
+uint8_t scf_rs8;
+uint16_t scf_rs16;
+uint32_t scf_rs32;
+
+/*
+ * Panic value
+ */
+uint_t scf_panic_reported = 0; /* Panic report after */
+uint_t scf_panic_report_maxretry = 15; /* Same as busy_maxretry */
+uint_t scf_cmdend_wait_time_panic = 1000;
+ /* SCF command end wait timer value (1s) */
+uint_t scf_cmdend_wait_rcnt_panic = 4; /* SCF command end retry counter */
+
+uint_t scf_panic_exec_wait_time = 100; /* Panic wait timer value (100ms) */
+uint_t scf_panic_exec_flag = 0; /* Panic exec flag */
+uint_t scf_panic_exec_flag2 = 0; /* Panic exec flag (report send) */
+
+/*
+ * Panic trace
+ */
+ushort_t scf_panic_trc_w_off = 0; /* Panic trcae next write offset */
+uint16_t scf_panic_trc_command = 0; /* Panic SCF command register memo */
+uint16_t scf_panic_trc_status = 0; /* Panic SCF status register memo */
+ushort_t scf_panic_trc[16]; /* Panic trace area */
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scfreg.c b/usr/src/uts/sun4u/opl/io/scfd/scfreg.c
new file mode 100644
index 0000000000..bfb8d40eba
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scfreg.c
@@ -0,0 +1,1977 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/conf.h>
+#include <sys/cmn_err.h>
+#include <sys/errno.h>
+#include <sys/time.h>
+#include <sys/kmem.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/scfd/scfparam.h>
+
+
+/*
+ * SCF command send control area save
+ */
+struct scf_cmd scfcmd_save;
+uchar_t scf_sbuf_save[SCF_L_CNT_MAX];
+uchar_t scf_rbuf_save[SCF_L_CNT_MAX];
+
+/*
+ * Function list
+ */
+int scf_map_regs(dev_info_t *dip, scf_state_t *statep);
+void scf_unmap_regs(scf_state_t *statep);
+int scf_send_cmd_check_bufful(struct scf_cmd *scfcmdp);
+int scf_send_cmd(struct scf_cmd *scfcmdp);
+void scf_i_send_cmd(struct scf_cmd *scfcmdp, struct scf_state *statep);
+void scf_p_send_cmd(struct scf_cmd *scfcmdp, struct scf_state *statep);
+int scf_path_check(scf_state_t **statep);
+int scf_offline_check(scf_state_t *statep, uint_t timer_exec_flag);
+int scf_cmdbusy_check(scf_state_t *statep);
+void scf_alivecheck_start(scf_state_t *statep);
+void scf_alivecheck_stop(scf_state_t *statep);
+void scf_forbid_intr(struct scf_state *statep);
+void scf_permit_intr(struct scf_state *statep, int flag);
+int scf_check_state(scf_state_t *statep);
+void scf_chg_scf(scf_state_t *statep, int status);
+void scf_del_queue(scf_state_t *statep);
+int scf_make_send_cmd(struct scf_cmd *scfcmdp, uint_t flag);
+void scf_sram_trace_init(struct scf_state *statep);
+void scf_sram_trace(struct scf_state *statep, uint8_t log_id);
+
+/*
+ * External function
+ */
+extern void scf_dscp_stop(uint32_t factor);
+
+
+/*
+ * scf_map_regs()
+ *
+ * Description: Register and SRAM map processing.
+ *
+ */
+int
+scf_map_regs(dev_info_t *dip, scf_state_t *statep)
+{
+#define SCF_FUNC_NAME "scf_map_regs() "
+ int ret = 1;
+ uint32_t wkoffset = 0;
+
+ ddi_device_acc_attr_t access_attr = {
+ DDI_DEVICE_ATTR_V0,
+ DDI_STRUCTURE_BE_ACC,
+ DDI_STRICTORDER_ACC
+ };
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ SCF_DBG_TEST_MAP_REGS(statep);
+
+ /* map register 1 : SCF register */
+ if (ddi_regs_map_setup(dip, REG_INDEX_SCF,
+ (caddr_t *)&statep->scf_regs, 0, 0, &access_attr,
+ &statep->scf_regs_handle) != DDI_SUCCESS) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "map_regs", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_map_regs: ddi_regs_map_setup failed.\n",
+ scf_driver_name);
+ goto END_map_regs;
+ }
+ statep->resource_flag |= S_DID_REG1;
+
+ /* map register 2 : SCF contorol register */
+ if (ddi_regs_map_setup(dip, REG_INDEX_SCFCNTL,
+ (caddr_t *)&statep->scf_regs_c, 0, 0, &access_attr,
+ &statep->scf_regs_c_handle) != DDI_SUCCESS) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "map_regs", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_map_regs: ddi_regs_map_setup failed.\n",
+ scf_driver_name);
+ goto END_map_regs;
+ }
+ statep->resource_flag |= S_DID_REG2;
+
+ /* get size of register 3 : SCF DSCP SRAM */
+ if (ddi_dev_regsize(dip, REG_INDEX_DSCPSRAM,
+ &statep->scf_dscp_sram_len) != DDI_SUCCESS) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "map_regs", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_map_regs: ddi_dev_regsize failed.\n",
+ scf_driver_name);
+ goto END_map_regs;
+ }
+ /* check size */
+ if (statep->scf_dscp_sram_len < SRAM_MAX_DSCP) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "map_regs", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_map_regs: ddi_dev_regsize failed.\n",
+ scf_driver_name);
+ goto END_map_regs;
+ } else if (statep->scf_dscp_sram_len > SRAM_MAX_DSCP) {
+ statep->scf_dscp_sram_len = SRAM_MAX_DSCP;
+ }
+ /* map register 3 : SCF DSCP SRAM */
+ if (ddi_regs_map_setup(dip, REG_INDEX_DSCPSRAM,
+ (caddr_t *)&statep->scf_dscp_sram, 0,
+ (offset_t)statep->scf_dscp_sram_len, &access_attr,
+ &statep->scf_dscp_sram_handle) != DDI_SUCCESS) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "map_regs", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_map_regs: ddi_regs_map_setup failed.\n",
+ scf_driver_name);
+ goto END_map_regs;
+ }
+ statep->resource_flag |= S_DID_REG3;
+
+ /* get size of register 4 : SCF system SRAM */
+ if (ddi_dev_regsize(dip, REG_INDEX_SYSTEMSRAM,
+ &statep->scf_sys_sram_len) != DDI_SUCCESS) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "map_regs", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_map_regs: ddi_dev_regsize failed.\n",
+ scf_driver_name);
+ goto END_map_regs;
+ }
+ /* check size */
+ if (statep->scf_sys_sram_len < SRAM_MAX_SYSTEM) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "map_regs", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_map_regs: ddi_dev_regsize failed.\n",
+ scf_driver_name);
+ goto END_map_regs;
+ } else if (statep->scf_sys_sram_len > SRAM_MAX_SYSTEM) {
+ statep->scf_sys_sram_len = SRAM_MAX_SYSTEM;
+ }
+ /* map register 4 : SCF system SRAM */
+ if (ddi_regs_map_setup(dip, REG_INDEX_SYSTEMSRAM,
+ (caddr_t *)&statep->scf_sys_sram, 0,
+ (offset_t)statep->scf_sys_sram_len, &access_attr,
+ &statep->scf_sys_sram_handle) != DDI_SUCCESS) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "map_regs", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_map_regs: ddi_regs_map_setup failed.\n",
+ scf_driver_name);
+ goto END_map_regs;
+ }
+ statep->resource_flag |= S_DID_REG4;
+
+ /* get size of register 5 : SCF interface block */
+ if (ddi_dev_regsize(dip, REG_INDEX_INTERFACE,
+ &statep->scf_interface_len) != DDI_SUCCESS) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "map_regs", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_map_regs: ddi_dev_regsize failed.\n",
+ scf_driver_name);
+ goto END_map_regs;
+ }
+ /* check size */
+ if (statep->scf_interface_len < sizeof (scf_interface_t)) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "map_regs", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_map_regs: ddi_dev_regsize failed.\n",
+ scf_driver_name);
+ goto END_map_regs;
+ } else {
+ statep->scf_interface_len = sizeof (scf_interface_t);
+ }
+ /* map register 5 : SCF interface block */
+ if (ddi_regs_map_setup(dip, REG_INDEX_INTERFACE,
+ (caddr_t *)&statep->scf_interface, 0,
+ (offset_t)statep->scf_interface_len, &access_attr,
+ &statep->scf_interface_handle) != DDI_SUCCESS) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "map_regs", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_map_regs: ddi_regs_map_setup failed.\n",
+ scf_driver_name);
+ goto END_map_regs;
+ }
+ statep->resource_flag |= S_DID_REG5;
+
+ /* get size of register : SRAM driver trace */
+ wkoffset = SCF_DDI_GET32(statep, statep->scf_interface_handle,
+ &statep->scf_interface->DRVTRC_OFFSET);
+ statep->scf_reg_drvtrc_len =
+ SCF_DDI_GET32(statep, statep->scf_interface_handle,
+ &statep->scf_interface->DRVTRC_SIZE);
+
+ if ((wkoffset != 0) && (statep->scf_reg_drvtrc_len != 0)) {
+ /* map register : SRAM driver trace */
+ if (ddi_regs_map_setup(dip, REG_INDEX_INTERFACE,
+ (caddr_t *)&statep->scf_reg_drvtrc, wkoffset,
+ (offset_t)statep->scf_reg_drvtrc_len, &access_attr,
+ &statep->scf_reg_drvtrc_handle) != DDI_SUCCESS) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "map_regs", 8);
+ cmn_err(CE_WARN,
+ "%s: scf_map_regs: "
+ "ddi_regs_map_setup failed.\n",
+ scf_driver_name);
+ goto END_map_regs;
+ }
+ statep->resource_flag |= S_DID_REG6;
+ }
+
+ /* SRAM trace initialize */
+ scf_sram_trace_init(statep);
+
+ ret = 0;
+
+/*
+ * END_map_regs
+ */
+ END_map_regs:
+
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_unmap_regs()
+ *
+ * Description: Register and SRAM un-map processing.
+ *
+ */
+void
+scf_unmap_regs(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_unmap_regs() "
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ SCF_DBG_TEST_UNMAP_REGS(statep);
+
+ /* Register and SRAM un-map */
+ if (statep->resource_flag & S_DID_REG1) {
+ ddi_regs_map_free(&statep->scf_regs_handle);
+ statep->resource_flag &= ~S_DID_REG1;
+ }
+
+ if (statep->resource_flag & S_DID_REG2) {
+ ddi_regs_map_free(&statep->scf_regs_c_handle);
+ statep->resource_flag &= ~S_DID_REG2;
+ }
+
+ if (statep->resource_flag & S_DID_REG3) {
+ ddi_regs_map_free(&statep->scf_dscp_sram_handle);
+ statep->resource_flag &= ~S_DID_REG3;
+ }
+
+ if (statep->resource_flag & S_DID_REG4) {
+ ddi_regs_map_free(&statep->scf_sys_sram_handle);
+ statep->resource_flag &= ~S_DID_REG4;
+ }
+
+ if (statep->resource_flag & S_DID_REG5) {
+ ddi_regs_map_free(&statep->scf_interface_handle);
+ statep->resource_flag &= ~S_DID_REG5;
+ }
+
+ if (statep->resource_flag & S_DID_REG6) {
+ ddi_regs_map_free(&statep->scf_reg_drvtrc_handle);
+ statep->resource_flag &= ~S_DID_REG6;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_send_cmd_check_bufful()
+ *
+ * Description: SCF command send and buffer busy check processing.
+ *
+ */
+int
+scf_send_cmd_check_bufful(struct scf_cmd *scfcmdp)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_send_cmd_check_bufful() "
+ int ret = 0;
+ int buf_ful_cnt = scf_buf_ful_rcnt;
+ int rci_busy_cnt = scf_rci_busy_rcnt;
+ clock_t lb;
+ struct scf_state *statep;
+ int cv_ret;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ ret = scf_send_cmd(scfcmdp);
+
+ while (((scfcmdp->stat0 == BUF_FUL) && (buf_ful_cnt != 0)) ||
+ ((scfcmdp->stat0 == RCI_BUSY) && (rci_busy_cnt != 0))) {
+ if (scfcmdp->stat0 == BUF_FUL) {
+ buf_ful_cnt--;
+ lb = ddi_get_lbolt();
+ lb += drv_usectohz(SCF_MIL2MICRO(scf_buf_ful_rtime));
+ cv_ret = 0;
+ while (cv_ret != (-1)) {
+ SC_DBG_DRV_TRACE(TC_T_WAIT, __LINE__,
+ &scf_comtbl.cmdbusy_cv,
+ sizeof (kcondvar_t));
+ if ((cv_ret =
+ cv_timedwait_sig(&scf_comtbl.cmdbusy_cv,
+ &scf_comtbl.all_mutex, lb)) == 0) {
+ SC_DBG_DRV_TRACE(TC_KILL, __LINE__,
+ &scf_comtbl.cmdbusy_cv,
+ sizeof (kcondvar_t));
+ ret = EINTR;
+ goto END_send_cmd_check_bufful;
+ }
+ }
+ } else if (scfcmdp->stat0 == RCI_BUSY) {
+ rci_busy_cnt--;
+ lb = ddi_get_lbolt();
+ lb += drv_usectohz(SCF_MIL2MICRO(scf_rci_busy_rtime));
+ cv_ret = 0;
+ while (cv_ret != (-1)) {
+ SC_DBG_DRV_TRACE(TC_T_WAIT, __LINE__,
+ &scf_comtbl.cmdbusy_cv,
+ sizeof (kcondvar_t));
+ if ((cv_ret =
+ cv_timedwait_sig(&scf_comtbl.cmdbusy_cv,
+ &scf_comtbl.all_mutex, lb)) == 0) {
+ SC_DBG_DRV_TRACE(TC_KILL, __LINE__,
+ &scf_comtbl.cmdbusy_cv,
+ sizeof (kcondvar_t));
+ ret = EINTR;
+ goto END_send_cmd_check_bufful;
+ }
+ }
+ } else {
+ break;
+ }
+ ret = scf_send_cmd(scfcmdp);
+ }
+
+ if (scf_comtbl.scf_exec_p) {
+ statep = scf_comtbl.scf_exec_p;
+ } else if (scf_comtbl.scf_path_p) {
+ statep = scf_comtbl.scf_path_p;
+ }
+ if (statep != NULL) {
+ if ((scfcmdp->stat0 == BUF_FUL) && (buf_ful_cnt == 0)) {
+ cmn_err(CE_WARN,
+ "%s,Buffer busy occurred in XSCF. "
+ "SCF command = 0x%02x%02x\n",
+ &statep->pathname[0],
+ scfcmdp->subcmd, scfcmdp->cmd);
+ } else if ((scfcmdp->stat0 == RCI_BUSY) &&
+ (rci_busy_cnt == 0)) {
+ cmn_err(CE_WARN,
+ "%s,RCI busy occurred in XSCF. "
+ "SCF command = 0x%02x%02x\n",
+ &statep->pathname[0],
+ scfcmdp->subcmd, scfcmdp->cmd);
+ }
+ }
+
+/*
+ * END_send_cmd_check_bufful
+ */
+ END_send_cmd_check_bufful:
+
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+
+}
+
+
+/*
+ * scf_send_cmd()
+ *
+ * Description: Synchronized SCF command send processing.
+ *
+ */
+int
+scf_send_cmd(struct scf_cmd *scfcmdp)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_send_cmd() "
+ struct scf_state *statep;
+ int ret = 0;
+ int offline_ret;
+ int cmdbusy_ret;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ scfcmdp->stat0 = 0;
+ /* Check SCF command send sync re-start */
+ if (scfcmdp->flag == SCF_USE_START) {
+ if (scf_comtbl.path_stop_flag != 0) {
+ /* Check path stop */
+ scf_comtbl.path_stop_flag = 0;
+ goto END_scf_send_cmd;
+ }
+ goto END_scf_send_cmd99;
+ }
+ /* Check SCF command send sync re-stop */
+ if ((scfcmdp->flag == SCF_USE_STOP) &&
+ (scf_comtbl.path_stop_flag != 0)) {
+ goto STOP_scf_send_cmd;
+ }
+ /* Check SCF command send sync stop status */
+ if ((scfcmdp->flag & SCF_USE_SP) != 0) {
+ goto SP_scf_send_cmd;
+ }
+ /* IOCTL/DETACH/SUSPEND send sync */
+ while (scf_comtbl.cmd_busy != 0) {
+ scf_comtbl.cmd_wait += 1;
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__, &scf_comtbl.cmd_cv,
+ sizeof (kcondvar_t));
+ if (cv_wait_sig(&scf_comtbl.cmd_cv,
+ &scf_comtbl.all_mutex) == 0) {
+ SC_DBG_DRV_TRACE(TC_KILL, __LINE__, &scf_comtbl.cmd_cv,
+ sizeof (kcondvar_t));
+ scf_comtbl.cmd_wait -= 1;
+ ret = EINTR;
+ goto END_scf_send_cmd99;
+ }
+ scf_comtbl.cmd_wait -= 1;
+ }
+ scf_comtbl.cmd_busy = 1;
+
+/*
+ * STOP_scf_send_cmd
+ */
+ STOP_scf_send_cmd:
+
+ /* Check SUSPEND flag */
+ if (scf_comtbl.suspend_flag) {
+ ret = EBUSY;
+ scfcmdp->stat0 = SCF_STAT0_NOT_PATH;
+ goto END_scf_send_cmd;
+ }
+
+ (void) scf_path_check(&statep);
+ if (statep == NULL) {
+ /* not exec SCF device */
+ ret = EIO;
+ scfcmdp->stat0 = SCF_STAT0_NOT_PATH;
+ goto END_scf_send_cmd;
+ }
+
+ offline_ret = scf_offline_check(statep, FLAG_ON);
+ cmdbusy_ret = scf_cmdbusy_check(statep);
+
+ /* send comannd for interrupt */
+ while ((scf_comtbl.scf_cmd_exec_flag != 0) ||
+ (offline_ret != SCF_PATH_ONLINE) ||
+ (cmdbusy_ret != SCF_COMMAND_READY)) {
+ scf_comtbl.cmd_busy_wait = 1;
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__, &scf_comtbl.cmdwait_cv,
+ sizeof (kcondvar_t));
+ if (cv_wait_sig(&scf_comtbl.cmdwait_cv,
+ &scf_comtbl.all_mutex) == 0) {
+ SC_DBG_DRV_TRACE(TC_KILL, __LINE__,
+ &scf_comtbl.cmdwait_cv, sizeof (kcondvar_t));
+ scf_comtbl.cmd_busy_wait = 0;
+ ret = EINTR;
+ goto END_scf_send_cmd;
+ }
+ scf_comtbl.cmd_busy_wait = 0;
+
+ (void) scf_path_check(&statep);
+ if (statep == NULL) {
+ /* not exec SCF device */
+ ret = EIO;
+ scfcmdp->stat0 = SCF_STAT0_NOT_PATH;
+ goto END_scf_send_cmd;
+ }
+
+ offline_ret = scf_offline_check(statep, FLAG_ON);
+ cmdbusy_ret = scf_cmdbusy_check(statep);
+
+ if ((scf_comtbl.scf_cmd_exec_flag == 0) &&
+ (offline_ret != SCF_PATH_ONLINE)) {
+ scf_timer_stop(SCF_TIMERCD_CMDBUSY);
+ scf_timer_stop(SCF_TIMERCD_ONLINE);
+ ret = EBUSY;
+ scfcmdp->stat0 = SCF_STAT0_NOT_PATH;
+ goto END_scf_send_cmd;
+ }
+ }
+
+/*
+ * SP_scf_send_cmd
+ */
+ SP_scf_send_cmd:
+
+ /* Check SUSPEND flag */
+ if (scf_comtbl.suspend_flag) {
+ ret = EBUSY;
+ scfcmdp->stat0 = SCF_STAT0_NOT_PATH;
+ goto END_scf_send_cmd;
+ }
+ if ((statep = scf_comtbl.scf_exec_p) == 0) {
+ ret = EIO;
+ scfcmdp->stat0 = SCF_STAT0_NOT_PATH;
+ goto END_scf_send_cmd;
+ }
+
+ if (scfcmdp->flag == SCF_USE_STOP) {
+ /* SCF command send sync stop */
+ scf_comtbl.path_stop_flag = 1;
+ goto END_scf_send_cmd99;
+ }
+
+ bcopy((char *)scfcmdp, (char *)&scfcmd_save, sizeof (struct scf_cmd));
+ if (scfcmdp->sbuf != NULL) {
+ scfcmd_save.sbuf = &scf_sbuf_save[0];
+ if (scfcmdp->scount) {
+ bcopy(scfcmdp->sbuf, scf_sbuf_save, scfcmdp->scount);
+ }
+ }
+ if (scfcmdp->rbuf != NULL) {
+ scfcmd_save.rbuf = &scf_rbuf_save[0];
+ }
+ scfcmd_save.flag &= (~SCF_USE_SP);
+ scf_i_send_cmd(&scfcmd_save, statep);
+ scf_comtbl.scf_cmdp = &scfcmd_save;
+
+ scf_comtbl.scf_exec_cmd_id = 1;
+
+ scf_comtbl.cmd_end_wait = 1;
+ while (scf_comtbl.cmd_end_wait != 0) {
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__, &scf_comtbl.cmdend_cv,
+ sizeof (kcondvar_t));
+ if (cv_wait_sig(&scf_comtbl.cmdend_cv,
+ &scf_comtbl.all_mutex) == 0) {
+ SC_DBG_DRV_TRACE(TC_KILL, __LINE__,
+ &scf_comtbl.cmdend_cv, sizeof (kcondvar_t));
+ scf_comtbl.cmd_end_wait = 0;
+ ret = EINTR;
+ goto END_scf_send_cmd;
+ }
+ }
+ scfcmdp->stat0 = scfcmd_save.stat0;
+ scfcmdp->rbufleng = scfcmd_save.rbufleng;
+ scfcmdp->status = scfcmd_save.status;
+ if (scfcmdp->rbuf != NULL) {
+ if (scfcmdp->rbufleng < scfcmdp->rcount) {
+ bcopy(&scf_rbuf_save[0], scfcmdp->rbuf,
+ scfcmdp->rbufleng);
+ } else {
+ bcopy(&scf_rbuf_save[0], scfcmdp->rbuf,
+ scfcmdp->rcount);
+ }
+ }
+ scf_comtbl.cmd_end_wait = 0;
+
+ switch (scfcmdp->stat0) {
+ case NORMAL_END:
+ break;
+
+ case E_NOT_SUPPORT:
+ case RCI_NS:
+ ret = ENOTSUP;
+ break;
+
+ default:
+ /* BUF_FUL/RCI_BUSY/other */
+ ret = EIO;
+ }
+
+ if ((scfcmdp->flag & SCF_USE_SP) != 0) {
+ goto END_scf_send_cmd99;
+ }
+
+/*
+ * END_scf_send_cmd
+ */
+ END_scf_send_cmd:
+
+ scf_comtbl.cmd_busy = 0;
+ if (scf_comtbl.cmd_wait) {
+ cv_signal(&scf_comtbl.cmd_cv);
+ SC_DBG_DRV_TRACE(TC_SIGNAL, __LINE__, &scf_comtbl.cmd_cv,
+ sizeof (kcondvar_t));
+ }
+
+ if ((scfcmdp->flag & SCF_USE_SP) != 0) {
+ scf_comtbl.path_stop_flag = 0;
+ }
+
+/*
+ * END_scf_send_cmd99
+ */
+ END_scf_send_cmd99:
+
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_i_send_cmd()
+ *
+ * Description: SCF command send processing. (for hard access)
+ *
+ */
+void
+scf_i_send_cmd(struct scf_cmd *scfcmdp, struct scf_state *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_i_send_cmd() "
+ uint8_t sdata[16];
+ uint8_t *wk_charp;
+ uint8_t sum = SCF_MAGICNUMBER_S;
+ uint32_t sum4 = SCF_MAGICNUMBER_L;
+ int scount;
+ int wkleng;
+ int ii;
+ uint8_t *wk_in_p;
+ uint8_t *wk_out_p;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ bzero((void *)sdata, 16);
+
+ switch (scfcmdp->flag) {
+ case SCF_USE_S_BUF:
+ case SCF_USE_SSBUF:
+ case SCF_USE_SLBUF:
+ /*
+ * Use Tx data register, Not use Tx buffer data
+ */
+ /* Set Tx data register memo */
+ wk_charp = (uint8_t *)&scfcmdp->sbuf[0];
+ if (scfcmdp->scount < SCF_S_CNT_16) {
+ scount = scfcmdp->scount;
+ } else {
+ scount = SCF_S_CNT_15;
+ }
+ for (ii = 0; ii < scount; ii++, wk_charp++) {
+ sdata[ii] = *wk_charp;
+ }
+
+ /* Set CMD_SPARE data */
+ if (scfcmdp->cmd == CMD_SPARE) {
+ sdata[12] = scfcmdp->cexr[0];
+ sdata[13] = scfcmdp->cexr[1];
+ }
+
+ break;
+
+ case SCF_USE_L_BUF:
+ case SCF_USE_LSBUF:
+ /*
+ * Use Tx data register, Use Tx buffer data
+ */
+ /* Make Tx buffer data sum */
+ for (ii = 0; ii < (scfcmdp->scount / 4); ii++) {
+ sum4 += ((scfcmdp->sbuf[ii * 4 + 0] << 24) |
+ (scfcmdp->sbuf[ii * 4 + 1] << 16) |
+ (scfcmdp->sbuf[ii * 4 + 2] << 8) |
+ (scfcmdp->sbuf[ii * 4 + 3]));
+ }
+ if ((scfcmdp->scount % 4) == 3) {
+ sum4 += ((scfcmdp->sbuf[ii * 4 + 0] << 24) |
+ (scfcmdp->sbuf[ii * 4 + 1] << 16) |
+ (scfcmdp->sbuf[ii * 4 + 2] << 8));
+ } else if ((scfcmdp->scount % 4) == 2) {
+ sum4 += ((scfcmdp->sbuf[ii * 4 + 0] << 24) |
+ (scfcmdp->sbuf[ii * 4 + 1] << 16));
+ } else if ((scfcmdp->scount % 4) == 1) {
+ sum4 += (scfcmdp->sbuf[ii * 4 + 0] << 24);
+ }
+
+ /* Set Tx data register memo : data length */
+ wk_charp = (uint8_t *)&scfcmdp->scount;
+ for (ii = 0; ii < 4; ii++, wk_charp++) {
+ sdata[ii] = *wk_charp;
+ }
+
+ /* Set Tx data register memo : sum */
+ wk_charp = (uint8_t *)&sum4;
+ for (ii = 8; ii < 12; ii++, wk_charp++) {
+ sdata[ii] = *wk_charp;
+ }
+
+ /* Set CMD_SPARE data */
+ if (scfcmdp->cmd == CMD_SPARE) {
+ sdata[12] = scfcmdp->cexr[0];
+ sdata[13] = scfcmdp->cexr[1];
+ }
+
+ /* SRAM data write */
+ wk_in_p = (uint8_t *)scfcmdp->sbuf;
+ wk_out_p = (uint8_t *)&statep->scf_sys_sram->DATA[0];
+ for (ii = 0; ii < scfcmdp->scount;
+ ii++, wk_in_p++, wk_out_p++) {
+ SCF_DDI_PUT8(statep, statep->scf_sys_sram_handle,
+ wk_out_p, *wk_in_p);
+ }
+
+ break;
+ }
+
+ /* Make Tx data sum */
+ for (ii = 0; ii < SCF_S_CNT_15; ii++) {
+ sum += sdata[ii];
+ }
+ /* Set Tx data sum */
+ sdata[15] = sum;
+
+ /* TxDATA register set */
+ statep->reg_tdata[0] =
+ (sdata[0] << 24) | (sdata[1] << 16) |
+ (sdata[2] << 8) | sdata[3];
+ SCF_DDI_PUT32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->TDATA0, statep->reg_tdata[0]);
+
+ statep->reg_tdata[1] =
+ (sdata[4] << 24) | (sdata[5] << 16) |
+ (sdata[6] << 8) | sdata[7];
+ SCF_DDI_PUT32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->TDATA1, statep->reg_tdata[1]);
+
+ SC_DBG_DRV_TRACE(TC_W_TDATA0, __LINE__, &statep->reg_tdata[0],
+ sizeof (statep->reg_tdata[0]) + sizeof (statep->reg_tdata[1]));
+
+ statep->reg_tdata[2] =
+ (sdata[8] << 24) | (sdata[9] << 16) |
+ (sdata[10] << 8) | sdata[11];
+ SCF_DDI_PUT32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->TDATA2, statep->reg_tdata[2]);
+
+ statep->reg_tdata[3] =
+ (sdata[12] << 24) | (sdata[13] << 16) |
+ (sdata[14] << 8) | sdata[15];
+ SCF_DDI_PUT32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->TDATA3, statep->reg_tdata[3]);
+
+ SC_DBG_DRV_TRACE(TC_W_TDATA2, __LINE__, &statep->reg_tdata[2],
+ sizeof (statep->reg_tdata[2]) + sizeof (statep->reg_tdata[3]));
+
+ /* SCF command extendedregister set */
+ if (scf_comtbl.scf_cmd_resend_flag == 0) {
+ statep->reg_command_exr = 0x00;
+ } else {
+ scf_comtbl.scf_cmd_resend_flag = 0;
+ statep->reg_command_exr = COMMAND_ExR_RETRY;
+ }
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->COMMAND_ExR, statep->reg_command_exr);
+ SC_DBG_DRV_TRACE(TC_W_COMMAND_ExR, __LINE__, &statep->reg_command_exr,
+ sizeof (statep->reg_command_exr));
+
+ /* SCF command register set */
+ statep->reg_command = ((scfcmdp->subcmd << 8) | scfcmdp->cmd);
+
+ /* Set sub command code */
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle,
+ (uint8_t *)&statep->scf_regs->COMMAND,
+ (uint8_t)(statep->reg_command >> 8));
+ /* Set command code : SCF interrupt */
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle,
+ (uint8_t *)&statep->scf_regs->COMMAND + 1,
+ (uint8_t)statep->reg_command);
+
+ SC_DBG_DRV_TRACE(TC_W_COMMAND, __LINE__, &statep->reg_command,
+ sizeof (statep->reg_command));
+ /* Register read sync */
+ scf_rs16 = SCF_DDI_GET16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->COMMAND);
+
+ SCFDBGMSG2(SCF_DBGFLAG_REG, "CMD = 0x%04x CMDExR = 0x%02x",
+ statep->reg_command, statep->reg_command_exr);
+ SCFDBGMSG4(SCF_DBGFLAG_REG, "TxDR = 0x%08x 0x%08x 0x%08x 0x%08x",
+ statep->reg_tdata[0], statep->reg_tdata[1],
+ statep->reg_tdata[2], statep->reg_tdata[3]);
+
+ scf_comtbl.scf_cmd_exec_flag = 1;
+ scf_comtbl.scf_exec_cmd_id = 0;
+
+ /* SCF command timer start */
+ scf_timer_start(SCF_TIMERCD_CMDEND);
+
+ /* SRAM trace */
+ SCF_SRAM_TRACE(statep, DTC_CMD);
+ SCF_SRAM_TRACE(statep, DTC_SENDDATA);
+
+ if (((scfcmdp->flag == SCF_USE_L_BUF) ||
+ (scfcmdp->flag == SCF_USE_LSBUF)) &&
+ (scfcmdp->scount != 0)) {
+ if (scfcmdp->scount > scf_sram_trace_data_size) {
+ scount = scf_sram_trace_data_size;
+ } else {
+ scount = scfcmdp->scount;
+ }
+ wk_in_p = (uint8_t *)scfcmdp->sbuf;
+ while (scount != 0) {
+ bzero((void *)&statep->memo_scf_drvtrc.INFO[0],
+ sizeof (statep->memo_scf_drvtrc.INFO));
+ wk_out_p = (uint8_t *)&statep->memo_scf_drvtrc.INFO[0];
+ if (scount > sizeof (statep->memo_scf_drvtrc.INFO)) {
+ wkleng = sizeof (statep->memo_scf_drvtrc.INFO);
+ } else {
+ wkleng = scount;
+ }
+ scount -= wkleng;
+ bcopy(wk_in_p, wk_out_p, wkleng);
+ SCF_SRAM_TRACE(statep, DTC_SENDDATA_SRAM);
+ wk_in_p += wkleng;
+ }
+ }
+
+ SC_DBG_DRV_TRACE(TC_SEND, __LINE__, &scfcmdp->flag, 8);
+ SC_DBG_DRV_TRACE(TC_SEND, __LINE__, &scfcmdp->scount, 8);
+
+ SCF_DBG_TEST_SEND_CMD(statep, scfcmdp);
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * panic send cmd function
+ */
+void
+scf_p_send_cmd(struct scf_cmd *scfcmdp, struct scf_state *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_p_send_cmd() "
+ uint8_t sdata[16];
+ uint8_t *wk_charp;
+ uint8_t sum = SCF_MAGICNUMBER_S;
+ uint32_t sum4 = SCF_MAGICNUMBER_L;
+ int scount;
+ int ii;
+ uint8_t *wk_in_p;
+ uint8_t *wk_out_p;
+ uint16_t wk_int16;
+
+ SCFDBGMSG(SCF_DBGFLAG_FOCK, SCF_FUNC_NAME ": start");
+
+ bzero((void *)sdata, 16);
+
+ switch (scfcmdp->flag) {
+ case SCF_USE_S_BUF:
+ case SCF_USE_SSBUF:
+ case SCF_USE_SLBUF:
+ /*
+ * Use Tx data register, Not use Tx buffer data
+ */
+ /* Set Tx data register memo */
+ wk_charp = (uint8_t *)&scfcmdp->sbuf[0];
+ if (scfcmdp->scount < SCF_S_CNT_16) {
+ scount = scfcmdp->scount;
+ } else {
+ scount = SCF_S_CNT_15;
+ }
+ for (ii = 0; ii < scount; ii++, wk_charp++) {
+ sdata[ii] = *wk_charp;
+ }
+
+ break;
+
+ case SCF_USE_L_BUF:
+ case SCF_USE_LSBUF:
+ /*
+ * Use Tx data register, Use Tx buffer data
+ */
+ /* Make Tx buffer data sum */
+ for (ii = 0; ii < (scfcmdp->scount / 4); ii++) {
+ sum4 += ((scfcmdp->sbuf[ii * 4 + 0] << 24) |
+ (scfcmdp->sbuf[ii * 4 + 1] << 16) |
+ (scfcmdp->sbuf[ii * 4 + 2] << 8) |
+ (scfcmdp->sbuf[ii * 4 + 3]));
+ }
+ if ((scfcmdp->scount % 4) == 3) {
+ sum4 += ((scfcmdp->sbuf[ii * 4 + 0] << 24) |
+ (scfcmdp->sbuf[ii * 4 + 1] << 16) |
+ (scfcmdp->sbuf[ii * 4 + 2] << 8));
+ } else if ((scfcmdp->scount % 4) == 2) {
+ sum4 += ((scfcmdp->sbuf[ii * 4 + 0] << 24) |
+ (scfcmdp->sbuf[ii * 4 + 1] << 16));
+ } else if ((scfcmdp->scount % 4) == 1) {
+ sum4 += (scfcmdp->sbuf[ii * 4 + 0] << 24);
+ }
+
+ /* Set Tx data register memo : data length */
+ wk_charp = (uint8_t *)&scfcmdp->scount;
+ for (ii = 0; ii < 4; ii++, wk_charp++) {
+ sdata[ii] = *wk_charp;
+ }
+
+ /* Set Tx data register memo : sum */
+ wk_charp = (uint8_t *)&sum4;
+ for (ii = 8; ii < 12; ii++, wk_charp++) {
+ sdata[ii] = *wk_charp;
+ }
+
+ /* Set CMD_SPARE data */
+ if (scfcmdp->cmd == CMD_SPARE) {
+ sdata[12] = scfcmdp->cexr[0];
+ sdata[13] = scfcmdp->cexr[1];
+ }
+
+ /* SRAM data write */
+ wk_in_p = (uint8_t *)scfcmdp->sbuf;
+ wk_out_p = (uint8_t *)&statep->scf_sys_sram->DATA[0];
+ for (ii = 0; ii < scfcmdp->scount;
+ ii++, wk_in_p++, wk_out_p++) {
+ SCF_P_DDI_PUT8(statep->scf_sys_sram_handle,
+ wk_out_p, *wk_in_p);
+ }
+
+ break;
+ }
+
+ /* Make Tx data sum */
+ for (ii = 0; ii < SCF_S_CNT_15; ii++) {
+ sum += sdata[ii];
+ }
+ /* Set Tx data sum */
+ sdata[15] = sum;
+
+ /* TxDATA register set */
+ SCF_P_DDI_PUT32(statep->scf_regs_handle, &statep->scf_regs->TDATA0,
+ (sdata[0] << 24) | (sdata[1] << 16) |
+ (sdata[2] << 8) | sdata[3]);
+ SCF_P_DDI_PUT32(statep->scf_regs_handle, &statep->scf_regs->TDATA1,
+ (sdata[4] << 24) | (sdata[5] << 16) |
+ (sdata[6] << 8) | sdata[7]);
+ SCF_P_DDI_PUT32(statep->scf_regs_handle, &statep->scf_regs->TDATA2,
+ (sdata[8] << 24) | (sdata[9] << 16) |
+ (sdata[10] << 8) | sdata[11]);
+ SCF_P_DDI_PUT32(statep->scf_regs_handle, &statep->scf_regs->TDATA3,
+ (sdata[12] << 24) | (sdata[13] << 16) |
+ (sdata[14] << 8) | sdata[15]);
+
+ /* SCF command extendedregister set */
+ SCF_P_DDI_PUT8(statep->scf_regs_handle,
+ &statep->scf_regs->COMMAND_ExR, 0x00);
+
+ /* SCF command register set */
+ SCF_P_DDI_PUT8(statep->scf_regs_handle,
+ (uint8_t *)&statep->scf_regs->COMMAND,
+ (uint8_t)scfcmdp->subcmd);
+ SCF_P_DDI_PUT8(statep->scf_regs_handle,
+ (uint8_t *)&statep->scf_regs->COMMAND + 1,
+ (uint8_t)scfcmdp->cmd);
+ /* Register read sync */
+ wk_int16 = SCF_P_DDI_GET16(statep->scf_regs_handle,
+ &statep->scf_regs->COMMAND);
+ scf_panic_trc_command = wk_int16;
+
+ SCFDBGMSG(SCF_DBGFLAG_FOCK, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * SCF path status check
+ */
+int
+scf_path_check(scf_state_t **statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_path_check() "
+ int ret;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ if (scf_comtbl.scf_exec_p != NULL) {
+ /* SCF path exec status */
+ if (statep != NULL) {
+ *statep = scf_comtbl.scf_exec_p;
+ }
+
+ ret = scf_offline_check(scf_comtbl.scf_exec_p, FLAG_ON);
+
+ } else if (scf_comtbl.scf_path_p != NULL) {
+ /* SCF path change status */
+ if (statep != NULL) {
+ *statep = scf_comtbl.scf_path_p;
+ }
+
+ ret = scf_offline_check(scf_comtbl.scf_path_p, FLAG_ON);
+
+ if (ret == SCF_PATH_ONLINE) {
+ SCFDBGMSG(SCF_DBGFLAG_SYS, "SCF path change status");
+ ret = SCF_PATH_CHANGE;
+ }
+
+ } else {
+ /* SCF path halt status */
+ if (statep != NULL) {
+ *statep = NULL;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, "SCF path halt status");
+ ret = SCF_PATH_HALT;
+
+ }
+
+ SCF_DBG_MAKE_PATH_CHECK(ret);
+
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * ESCF offline check
+ */
+int
+scf_offline_check(scf_state_t *statep, uint_t timer_exec_flag)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_offline_check() "
+ int ret;
+ uint8_t scf_unit;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ /* Get SCF Status extended register */
+ statep->reg_status_exr = SCF_DDI_GET32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->STATUS_ExR);
+ SC_DBG_DRV_TRACE(TC_R_STATUS_ExR, __LINE__, &statep->reg_status_exr,
+ sizeof (statep->reg_status_exr));
+
+ SCF_DBG_MAKE_ONLINE(statep->reg_status_exr);
+
+ /* Check SCF online */
+ if ((statep->reg_status_exr & STATUS_SCF_STATUS) == STATUS_SCF_ONLINE) {
+ if (scf_comtbl.scf_status == SCF_STATUS_OFFLINE) {
+ cmn_err(CE_NOTE, "%s: SCF online.\n", scf_driver_name);
+ }
+ scf_comtbl.scf_status = SCF_STATUS_ONLINE;
+
+ if (timer_exec_flag == FLAG_ON) {
+ /* Check online wait timer exec */
+ if (scf_timer_check(SCF_TIMERCD_ONLINE) ==
+ SCF_TIMER_NOT_EXEC) {
+ ret = SCF_PATH_ONLINE;
+ } else {
+ ret = SCF_PATH_OFFLINE_DRV;
+ }
+ } else {
+ ret = SCF_PATH_ONLINE;
+ }
+ } else {
+ if (scf_comtbl.scf_status != SCF_STATUS_OFFLINE) {
+ if (statep->reg_status_exr & STATUS_SCF_NO) {
+ scf_unit = 1;
+ } else {
+ scf_unit = 0;
+ }
+ cmn_err(CE_WARN,
+ "%s: SCF went to offline mode. unit=%d",
+ scf_driver_name, scf_unit);
+ }
+ scf_comtbl.scf_status = SCF_STATUS_OFFLINE;
+
+ if (timer_exec_flag == FLAG_ON) {
+ /* Check online wait timer exec */
+ if (scf_timer_check(SCF_TIMERCD_ONLINE) ==
+ SCF_TIMER_NOT_EXEC) {
+ /* DCSP interface stop */
+ scf_dscp_stop(FACTOR_OFFLINE);
+
+ /* SCF online timer start */
+ statep->online_to_rcnt = 0;
+ scf_timer_start(SCF_TIMERCD_ONLINE);
+ }
+ }
+ SCFDBGMSG(SCF_DBGFLAG_SYS, "SCF path offline");
+ ret = SCF_PATH_OFFLINE;
+ }
+
+ SCF_DBG_MAKE_OFFLINE_CHECK(ret);
+
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * SCF command busy check
+ */
+int
+scf_cmdbusy_check(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_cmdbusy_check() "
+ int ret;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ SCF_DBG_RTN_MAKE_CMD_READY;
+
+ /* Get SCF command register */
+ statep->reg_command = SCF_DDI_GET16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->COMMAND);
+ SC_DBG_DRV_TRACE(TC_R_COMMAND, __LINE__, &statep->reg_command,
+ sizeof (statep->reg_command));
+ statep->reg_command_exr = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->COMMAND_ExR);
+ SC_DBG_DRV_TRACE(TC_R_COMMAND_ExR, __LINE__, &statep->reg_command_exr,
+ sizeof (statep->reg_command_exr));
+
+ SCF_DBG_MAKE_CMD_BUSY(statep->reg_command, statep->reg_command_exr);
+
+ /* Check busy flag */
+ if (((statep->reg_command & COMMAND_BUSY) == 0x0000) &&
+ ((statep->reg_command_exr & COMMAND_ExR_BUSY) == 0x00)) {
+ /* Check busy timer exec */
+ if (scf_timer_check(SCF_TIMERCD_CMDBUSY) ==
+ SCF_TIMER_NOT_EXEC) {
+ ret = SCF_COMMAND_READY;
+ SCFDBGMSG(SCF_DBGFLAG_SYS, "SCF command busy");
+ } else {
+ ret = SCF_COMMAND_BUSY_DRV;
+ SCFDBGMSG(SCF_DBGFLAG_SYS, "SCF command exr busy");
+ }
+ } else {
+ if (scf_comtbl.scf_cmd_exec_flag == FLAG_OFF) {
+ /* Check busy timer exec */
+ if (scf_timer_check(SCF_TIMERCD_CMDBUSY) ==
+ SCF_TIMER_NOT_EXEC) {
+ /* busy timer start */
+ statep->devbusy_to_rcnt = 0;
+ scf_timer_start(SCF_TIMERCD_CMDBUSY);
+ }
+ }
+ ret = SCF_COMMAND_BUSY;
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+void
+scf_alivecheck_start(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_alivecheck_start() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ /* Check alive check exec */
+ if (scf_comtbl.alive_running == SCF_ALIVE_START) {
+ /* Alive check value initialize */
+ scf_acr_phase_code = 0;
+ scf_comtbl.scf_alive_int_count = scf_alive_interval_time / 3;
+
+ /* Alive timer register initialize */
+ statep->reg_atr = ATR_INTERVAL_STOP;
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->ATR, statep->reg_atr);
+ SC_DBG_DRV_TRACE(TC_W_ATR, __LINE__, &statep->reg_atr,
+ sizeof (statep->reg_atr));
+ /* Register read sync */
+ scf_rs8 = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->ATR);
+
+ /* Alive Interrupt enable */
+ statep->reg_control |= CONTROL_ALIVEINE;
+ SCF_DDI_PUT16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL, statep->reg_control);
+ SC_DBG_DRV_TRACE(TC_W_CONTROL, __LINE__, &statep->reg_control,
+ sizeof (statep->reg_control));
+ /* Register read sync */
+ scf_rs16 = SCF_DDI_GET16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL);
+
+ /* Alive timer register set */
+ statep->reg_atr = ATR_INTERVAL_30S;
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->ATR, statep->reg_atr);
+ SC_DBG_DRV_TRACE(TC_W_ATR, __LINE__, &statep->reg_atr,
+ sizeof (statep->reg_atr));
+ /* Register read sync */
+ scf_rs8 = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->ATR);
+
+ /* Alive check register set */
+ statep->reg_acr = scf_acr_phase_code | ACR_ALIVE_INT;
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->ACR, statep->reg_acr);
+ SC_DBG_DRV_TRACE(TC_W_ACR, __LINE__, &statep->reg_acr,
+ sizeof (statep->reg_acr));
+ /* Register read sync */
+ scf_rs8 = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->ACR);
+
+ SCFDBGMSG1(SCF_DBGFLAG_REG, "ACR = 0x%02x", statep->reg_acr);
+
+ scf_acr_phase_code++;
+
+ SCF_DBG_TEST_ALIVE_START(statep);
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+void
+scf_alivecheck_stop(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_alivecheck_stop() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ /* Alive Interrupt disable */
+ statep->reg_control &= (~CONTROL_ALIVEINE);
+ SCF_DDI_PUT16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL, statep->reg_control);
+ SC_DBG_DRV_TRACE(TC_W_CONTROL, __LINE__, &statep->reg_control,
+ sizeof (statep->reg_control));
+ /* Register read sync */
+ scf_rs16 = SCF_DDI_GET16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL);
+
+ /* Alive timer register clear */
+ statep->reg_atr = ATR_INTERVAL_STOP;
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->ATR, statep->reg_atr);
+ SC_DBG_DRV_TRACE(TC_W_ATR, __LINE__, &statep->reg_atr,
+ sizeof (statep->reg_atr));
+ /* Register read sync */
+ scf_rs8 = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->ATR);
+
+ SCF_DBG_TEST_ALIVE_STOP(statep);
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * forbid SCF interrupt
+ */
+void
+scf_forbid_intr(struct scf_state *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_forbid_intr() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ /* Interrupt disable */
+ statep->reg_control = CONTROL_DISABLE;
+ SCF_DDI_PUT16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL, statep->reg_control);
+ SC_DBG_DRV_TRACE(TC_W_CONTROL, __LINE__, &statep->reg_control,
+ sizeof (statep->reg_control));
+ /* Register read sync */
+ scf_rs16 = SCF_DDI_GET16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL);
+
+ scf_alivecheck_stop(statep);
+
+ statep->resource_flag &= (~S_DID_REGENB);
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * permit SCF interrupt
+ */
+void
+scf_permit_intr(struct scf_state *statep, int flag)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_permit_intr() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ if (flag) {
+ /* SCF Status register interrupt clear */
+ SCF_DDI_PUT16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->STATUS, 0xffff);
+ /* Register read sync */
+ scf_rs16 = SCF_DDI_GET16(statep, statep->scf_regs_handle,
+ &statep->scf_regs->STATUS);
+
+ /* SCF Status extended register interrupt clear */
+ SCF_DDI_PUT32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->STATUS_ExR, 0xffffffff);
+ /* Register read sync */
+ scf_rs32 = SCF_DDI_GET32(statep, statep->scf_regs_handle,
+ &statep->scf_regs->STATUS_ExR);
+
+ /* DSCP buffer status register interrupt clear */
+ SCF_DDI_PUT8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->DSR, 0xff);
+ /* Register read sync */
+ scf_rs8 = SCF_DDI_GET8(statep, statep->scf_regs_handle,
+ &statep->scf_regs->DSR);
+
+ /* SCF interrupt status register interrupt clear */
+ SCF_DDI_PUT16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST,
+ (INT_ST_PATHCHGIE | CONTROL_ALIVEINE));
+ /* Register read sync */
+ scf_rs16 = SCF_DDI_GET16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST);
+ }
+
+ /* Interrupt enable */
+ statep->reg_control = CONTROL_ENABLE;
+ SCF_DDI_PUT16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL, statep->reg_control);
+ SC_DBG_DRV_TRACE(TC_W_CONTROL, __LINE__, &statep->reg_control,
+ sizeof (statep->reg_control));
+ /* Register read sync */
+ scf_rs16 = SCF_DDI_GET16(statep, statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL);
+
+ statep->resource_flag |= S_DID_REGENB;
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * Path status check
+ */
+int
+scf_check_state(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_check_state() "
+ scf_state_t *wkstatep;
+ int ret;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start");
+
+ if (statep != NULL) {
+ if ((statep == scf_comtbl.scf_exec_p) ||
+ (statep == scf_comtbl.scf_path_p)) {
+ /* PATH_STAT_ACTIVE status */
+ ret = PATH_STAT_ACTIVE;
+ } else {
+ wkstatep = scf_comtbl.scf_wait_p;
+ while (wkstatep) {
+ if (statep == wkstatep) {
+ /* PATH_STAT_STANDBY status */
+ ret = PATH_STAT_STANDBY;
+ goto END_check_state;
+ } else {
+ wkstatep = wkstatep->next;
+ }
+ }
+ wkstatep = scf_comtbl.scf_stop_p;
+ while (wkstatep) {
+ if (statep == wkstatep) {
+ /* PATH_STAT_STOP status */
+ ret = PATH_STAT_STOP;
+ goto END_check_state;
+ } else {
+ wkstatep = wkstatep->next;
+ }
+ }
+ wkstatep = scf_comtbl.scf_err_p;
+ while (wkstatep) {
+ if (statep == wkstatep) {
+ /* PATH_STAT_FAIL status */
+ ret = PATH_STAT_FAIL;
+ goto END_check_state;
+ } else {
+ wkstatep = wkstatep->next;
+ }
+ }
+ wkstatep = scf_comtbl.scf_disc_p;
+ while (wkstatep) {
+ if (statep == wkstatep) {
+ /* PATH_STAT_DISCON status */
+ ret = PATH_STAT_DISCON;
+ goto END_check_state;
+ } else {
+ wkstatep = wkstatep->next;
+ }
+ }
+ /* scf_comtbl.scf_suspend_p queue */
+ /* PATH_STAT_DISCON status */
+ ret = PATH_STAT_EMPTY;
+ }
+ } else {
+ /* PATH_STAT_DISCON status */
+ ret = PATH_STAT_EMPTY;
+ }
+
+/*
+ * END_check_state
+ */
+ END_check_state:
+
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * Multi path status change and queue change
+ */
+void
+scf_chg_scf(scf_state_t *statep, int status)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_chg_scf() "
+ scf_state_t *wkstatep;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG2(SCF_DBGFLAG_SYS,
+ SCF_FUNC_NAME ": start instance = %d status = %d",
+ statep->instance, statep->path_status);
+
+ /* Set path status */
+ if (statep->path_status != status) {
+ statep->old_path_status = statep->path_status;
+ statep->path_status = status;
+ }
+ switch (status) {
+ case PATH_STAT_ACTIVE:
+ /* Not queue change */
+ break;
+
+ case PATH_STAT_EMPTY:
+ /* Change empty queue */
+ if (scf_comtbl.scf_suspend_p) {
+ wkstatep = scf_comtbl.scf_suspend_p;
+ while (wkstatep->next) {
+ wkstatep = wkstatep->next;
+ }
+ wkstatep->next = statep;
+ } else {
+ scf_comtbl.scf_suspend_p = statep;
+ }
+ statep->next = 0;
+ break;
+
+ case PATH_STAT_STANDBY:
+ /* Change standby queue */
+ if (scf_comtbl.scf_wait_p) {
+ wkstatep = scf_comtbl.scf_wait_p;
+ while (wkstatep->next) {
+ wkstatep = wkstatep->next;
+ }
+ wkstatep->next = statep;
+ } else {
+ scf_comtbl.scf_wait_p = statep;
+ }
+ statep->next = 0;
+ break;
+
+ case PATH_STAT_STOP:
+ /* Change stop queue */
+ if (scf_comtbl.scf_stop_p) {
+ wkstatep = scf_comtbl.scf_stop_p;
+ while (wkstatep->next) {
+ wkstatep = wkstatep->next;
+ }
+ wkstatep->next = statep;
+ } else {
+ scf_comtbl.scf_stop_p = statep;
+ }
+ statep->next = 0;
+ break;
+
+ case PATH_STAT_FAIL:
+ /* Change fail queue */
+ if (scf_comtbl.scf_err_p) {
+ wkstatep = scf_comtbl.scf_err_p;
+ while (wkstatep->next) {
+ wkstatep = wkstatep->next;
+ }
+ wkstatep->next = statep;
+ } else {
+ scf_comtbl.scf_err_p = statep;
+ }
+ statep->next = 0;
+ break;
+
+ case PATH_STAT_DISCON:
+ /* Change disconnect queue */
+ if (scf_comtbl.scf_disc_p) {
+ wkstatep = scf_comtbl.scf_disc_p;
+ while (wkstatep->next) {
+ wkstatep = wkstatep->next;
+ }
+ wkstatep->next = statep;
+ } else {
+ scf_comtbl.scf_disc_p = statep;
+ }
+ statep->next = 0;
+ break;
+ }
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end status = %d",
+ statep->path_status);
+}
+
+/*
+ * Multi path queue check and delete queue
+ */
+void
+scf_del_queue(scf_state_t *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_del_queue() "
+ scf_state_t *wkstatep;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start instance = %d",
+ statep->instance);
+
+ if ((wkstatep = scf_comtbl.scf_exec_p) == statep) {
+ /* Delete active(exec) queue */
+ scf_comtbl.scf_exec_p = NULL;
+ return;
+ } else if ((wkstatep = scf_comtbl.scf_path_p) == statep) {
+ /* Delete active(path change) queue */
+ scf_comtbl.scf_path_p = NULL;
+ return;
+ } else {
+ if ((wkstatep = scf_comtbl.scf_suspend_p) != 0) {
+ /* Delete empty(suspend) queue */
+ if (wkstatep == statep) {
+ scf_comtbl.scf_suspend_p = wkstatep->next;
+ return;
+ } else {
+ while (wkstatep->next) {
+ if (wkstatep->next == statep) {
+ wkstatep->next = statep->next;
+ return;
+ }
+ wkstatep = wkstatep->next;
+ }
+ }
+ }
+ if ((wkstatep = scf_comtbl.scf_wait_p) != 0) {
+ /* Delete standby(wait) queue */
+ if (wkstatep == statep) {
+ scf_comtbl.scf_wait_p = wkstatep->next;
+ return;
+ } else {
+ while (wkstatep->next) {
+ if (wkstatep->next == statep) {
+ wkstatep->next = statep->next;
+ return;
+ }
+ wkstatep = wkstatep->next;
+ }
+ }
+ }
+ if ((wkstatep = scf_comtbl.scf_err_p) != 0) {
+ /* Delete fail(error) queue */
+ if (wkstatep == statep) {
+ scf_comtbl.scf_err_p = wkstatep->next;
+ return;
+ } else {
+ while (wkstatep->next) {
+ if (wkstatep->next == statep) {
+ wkstatep->next = statep->next;
+ return;
+ }
+ wkstatep = wkstatep->next;
+ }
+ }
+ }
+ if ((wkstatep = scf_comtbl.scf_stop_p) != 0) {
+ /* Delete stop queue */
+ if (wkstatep == statep) {
+ scf_comtbl.scf_stop_p = wkstatep->next;
+ return;
+ } else {
+ while (wkstatep->next) {
+ if (wkstatep->next == statep) {
+ wkstatep->next = statep->next;
+ return;
+ }
+ wkstatep = wkstatep->next;
+ }
+ }
+ }
+ if ((wkstatep = scf_comtbl.scf_disc_p) != 0) {
+ /* Delete disconnect queue */
+ if (wkstatep == statep) {
+ scf_comtbl.scf_disc_p = wkstatep->next;
+ return;
+ } else {
+ while (wkstatep->next) {
+ if (wkstatep->next == statep) {
+ wkstatep->next = statep->next;
+ return;
+ }
+ wkstatep = wkstatep->next;
+ }
+ }
+ }
+ }
+ SCFDBGMSG(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * SCF command send sync
+ */
+int
+scf_make_send_cmd(struct scf_cmd *scfcmdp, uint_t flag)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_make_send_cmd() "
+ /* falg = SCF_USE_STOP : SCF command stop wait */
+ /* falg = SCF_USE_START : SCF_USE_STOP signal */
+
+ int ret;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": start flag = 0x%08x",
+ flag);
+
+ scfcmdp->cmd = 0;
+ scfcmdp->subcmd = 0;
+ scfcmdp->scount = 0;
+ scfcmdp->sbuf = NULL;
+ scfcmdp->rcount = 0;
+ scfcmdp->rbuf = NULL;
+ scfcmdp->flag = flag;
+ ret = scf_send_cmd_check_bufful(scfcmdp);
+
+ SCFDBGMSG1(SCF_DBGFLAG_SYS, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_sram_trace_init()
+ *
+ * SRAM trace initialize processing.
+ *
+ */
+void
+scf_sram_trace_init(struct scf_state *statep)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_sram_trace_init() "
+ uint8_t wk_drv_id;
+ uint32_t wk_data_top;
+ uint32_t wk_data_last;
+ uint32_t wk_data_write;
+ off_t min_len;
+ uint8_t *wk_in_p;
+ uint8_t *wk_out_p;
+ int wk_leng;
+ int ii;
+ uint8_t drv_name[DRV_ID_SIZE];
+ uint8_t *wk_drv_vl = (uint8_t *)SCF_DRIVER_VERSION;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_TRACE, SCF_FUNC_NAME ": start");
+
+ /* Check SRAM map */
+ if (statep->resource_flag & S_DID_REG6) {
+ wk_drv_id =
+ SCF_DDI_GET8(statep, statep->scf_reg_drvtrc_handle,
+ &statep->scf_reg_drvtrc->DRV_ID[0]);
+ wk_data_top =
+ SCF_DDI_GET32(statep, statep->scf_reg_drvtrc_handle,
+ &statep->scf_reg_drvtrc->DATA_TOP);
+ wk_data_last =
+ SCF_DDI_GET32(statep, statep->scf_reg_drvtrc_handle,
+ &statep->scf_reg_drvtrc->DATA_LAST);
+ wk_data_write =
+ SCF_DDI_GET32(statep, statep->scf_reg_drvtrc_handle,
+ &statep->scf_reg_drvtrc->DATA_WRITE);
+ if ((wk_drv_id == 0) ||
+ (wk_data_top != statep->memo_DATA_TOP) ||
+ (wk_data_last != statep->memo_DATA_LAST) ||
+ ((wk_data_write >= wk_data_top) &&
+ (wk_data_write <= wk_data_last))) {
+ /* Make SRAM driver trace header */
+ min_len = (off_t)(sizeof (scf_if_drvtrc_t) +
+ sizeof (scf_drvtrc_ent_t));
+ if (statep->scf_reg_drvtrc_len >= min_len) {
+ statep->memo_DATA_TOP =
+ (uint32_t)(sizeof (scf_if_drvtrc_t));
+ statep->memo_DATA_WRITE =
+ (uint32_t)(sizeof (scf_if_drvtrc_t));
+ statep->memo_DATA_LAST =
+ (uint32_t)(statep->scf_reg_drvtrc_len -
+ sizeof (scf_drvtrc_ent_t));
+ } else {
+ statep->memo_DATA_TOP = 0;
+ statep->memo_DATA_WRITE = 0;
+ statep->memo_DATA_LAST = 0;
+ }
+ SCF_DDI_PUT32(statep, statep->scf_reg_drvtrc_handle,
+ &statep->scf_reg_drvtrc->DATA_TOP,
+ statep->memo_DATA_TOP);
+ SCF_DDI_PUT32(statep, statep->scf_reg_drvtrc_handle,
+ &statep->scf_reg_drvtrc->DATA_WRITE,
+ statep->memo_DATA_WRITE);
+ SCF_DDI_PUT32(statep, statep->scf_reg_drvtrc_handle,
+ &statep->scf_reg_drvtrc->DATA_LAST,
+ statep->memo_DATA_LAST);
+ } else {
+ statep->memo_DATA_TOP = SCF_DDI_GET32(statep,
+ statep->scf_reg_drvtrc_handle,
+ &statep->scf_reg_drvtrc->DATA_TOP);
+ statep->memo_DATA_WRITE = SCF_DDI_GET32(statep,
+ statep->scf_reg_drvtrc_handle,
+ &statep->scf_reg_drvtrc->DATA_WRITE);
+ statep->memo_DATA_LAST = SCF_DDI_GET32(statep,
+ statep->scf_reg_drvtrc_handle,
+ &statep->scf_reg_drvtrc->DATA_LAST);
+ }
+
+ wk_leng = sizeof (SCF_DRIVER_VERSION);
+ if (wk_leng > DRV_ID_SIZE) {
+ wk_leng = DRV_ID_SIZE;
+ }
+ wk_in_p = wk_drv_vl;
+ wk_out_p = (uint8_t *)&drv_name[0];
+ for (ii = 0; ii < wk_leng; ii++, wk_in_p++, wk_out_p++) {
+ *wk_out_p = *wk_in_p;
+ }
+ for (; ii < DRV_ID_SIZE; ii++, wk_out_p++) {
+ *wk_out_p = ' ';
+ }
+ wk_in_p = (uint8_t *)&drv_name[0];
+ wk_out_p = (uint8_t *)&statep->scf_reg_drvtrc->DRV_ID[0];
+ for (ii = 0; ii < DRV_ID_SIZE; ii++, wk_in_p++, wk_out_p++) {
+ SCF_DDI_PUT8(statep, statep->scf_reg_drvtrc_handle,
+ wk_out_p, *wk_in_p);
+ }
+ } else {
+ statep->memo_DATA_TOP = 0;
+ statep->memo_DATA_WRITE = 0;
+ statep->memo_DATA_LAST = 0;
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_TRACE, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_sram_trace()
+ *
+ * SRAM trace get processing.
+ *
+ */
+void
+scf_sram_trace(struct scf_state *statep, uint8_t log_id)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_sram_trace() "
+ uint8_t *wk_in_p;
+ uint8_t *wk_out_p;
+ clock_t clock_val;
+ uint32_t log_time;
+ uint8_t wk_log_id;
+ int ii;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_TRACE, SCF_FUNC_NAME ": start");
+
+ if (statep->memo_DATA_WRITE) {
+ statep->memo_scf_drvtrc.LOG_ID = log_id;
+ clock_val = ddi_get_lbolt();
+ log_time = (uint32_t)(drv_hztousec(clock_val) / 100000);
+ statep->memo_scf_drvtrc.LOG_TIME[0] = (uint8_t)(log_time >> 16);
+ statep->memo_scf_drvtrc.LOG_TIME[1] = (uint8_t)(log_time >> 8);
+ statep->memo_scf_drvtrc.LOG_TIME[2] = (uint8_t)(log_time);
+
+ if ((log_id & DTC_MASK_HIGH) == DTC_ERRRTN) {
+ wk_log_id = DTC_ERRRTN;
+ } else {
+ wk_log_id = log_id;
+ }
+
+ /* Check log id */
+ switch (wk_log_id) {
+ case DTC_ONLINETO: /* SCF online timeout */
+ case DTC_ONLINE: /* SCF online start */
+ case DTC_OFFLINE: /* SCF offline start */
+ SCF_SET_SRAM_DATA2_2(0, statep->reg_control,
+ statep->reg_int_st);
+ SCF_SET_SRAM_DATA2_2(4, statep->reg_command,
+ statep->reg_status);
+ SCF_SET_SRAM_DATA4_1(8, statep->reg_status_exr);
+ break;
+
+ case DTC_SENDDATA: /* SCF send command data */
+ SCF_SET_SRAM_DATA4_3(0, statep->reg_tdata[0],
+ statep->reg_tdata[2], statep->reg_tdata[3]);
+ break;
+
+ case DTC_RECVDATA: /* SCF recv command data */
+ SCF_SET_SRAM_DATA4_3(0, statep->reg_rdata[0],
+ statep->reg_rdata[2], statep->reg_rdata[3]);
+ break;
+
+ case DTC_ERRRTN: /* SCF command retuen error */
+ SCF_SET_SRAM_DATA4_3(0, statep->reg_tdata[0],
+ statep->reg_tdata[1], statep->reg_tdata[2]);
+ break;
+
+
+ case DTC_RSUMERR: /* SCF command receive sum error */
+ SCF_SET_SRAM_DATA4_3(0, statep->reg_rdata[0],
+ statep->reg_rdata[1], statep->reg_rdata[2]);
+ break;
+
+ case DTC_DSCP_TXREQ: /* DSCP TxREQ request */
+ SCF_SET_SRAM_DATA2_2(0, statep->reg_control,
+ statep->reg_int_st);
+ SCF_SET_SRAM_DATA1_2(4, statep->reg_dcr,
+ statep->reg_dsr);
+ SCF_SET_SRAM_DATA2_1(6, statep->reg_txdcr_c_flag);
+ SCF_SET_SRAM_DATA4_1(8, statep->reg_txdcr_c_length);
+ break;
+
+ case DTC_DSCP_RXACK: /* DSCP RxACK request */
+ SCF_SET_SRAM_DATA2_2(0, statep->reg_control,
+ statep->reg_int_st);
+ SCF_SET_SRAM_DATA1_2(4, statep->reg_dcr,
+ statep->reg_dsr);
+ SCF_SET_SRAM_DATA2_1(6, statep->reg_rxdcr_c_flag);
+ SCF_SET_SRAM_DATA2_2(8, statep->reg_rxdcr_c_offset, 0);
+ break;
+
+ case DTC_DSCP_RXEND: /* DSCP RxEND request */
+ SCF_SET_SRAM_DATA2_2(0, statep->reg_control,
+ statep->reg_int_st);
+ SCF_SET_SRAM_DATA1_2(4, statep->reg_dcr,
+ statep->reg_dsr);
+ SCF_SET_SRAM_DATA2_1(6, statep->reg_rxdsr_c_flag);
+ SCF_SET_SRAM_DATA2_2(8, statep->reg_rxdsr_c_offset, 0);
+ break;
+
+ case DTC_DSCP_RXREQ:
+ SCF_SET_SRAM_DATA2_2(0, statep->reg_control,
+ statep->reg_int_st);
+ SCF_SET_SRAM_DATA1_2(4, statep->reg_dcr,
+ statep->reg_dsr);
+ SCF_SET_SRAM_DATA2_1(6, statep->reg_rxdcr_c_flag);
+ SCF_SET_SRAM_DATA4_1(8, statep->reg_rxdcr_c_length);
+ break;
+
+
+ case DTC_DSCP_TXACK: /* DSCP TxACK interrupt */
+ case DTC_DSCP_ACKTO: /* DSCP ACK timeout */
+ case DTC_DSCP_ENDTO: /* DSCP END timeout */
+ SCF_SET_SRAM_DATA2_2(0, statep->reg_control,
+ statep->reg_int_st);
+ SCF_SET_SRAM_DATA1_2(4, statep->reg_dcr,
+ statep->reg_dsr);
+ SCF_SET_SRAM_DATA2_1(6, statep->reg_txdcr_c_flag);
+ SCF_SET_SRAM_DATA2_2(8, statep->reg_txdcr_c_offset, 0);
+ break;
+
+ case DTC_DSCP_TXEND: /* DSCP TxEND interrupt */
+ SCF_SET_SRAM_DATA2_2(0, statep->reg_control,
+ statep->reg_int_st);
+ SCF_SET_SRAM_DATA1_2(4, statep->reg_dcr,
+ statep->reg_dsr);
+ SCF_SET_SRAM_DATA2_1(6, statep->reg_txdsr_c_flag);
+ SCF_SET_SRAM_DATA2_2(8, statep->reg_txdsr_c_offset, 0);
+ break;
+
+ case DTC_SENDDATA_SRAM: /* SCF send command data for SRAM */
+ case DTC_RECVDATA_SRAM: /* SCF recv command data for SRAM */
+ case DTC_DSCP_SENDDATA: /* DSCP send data */
+ case DTC_DSCP_RECVDATA: /* DSCP send data */
+ /* Information is already set */
+ break;
+
+ case DTC_CMD: /* SCF command start */
+ case DTC_INT: /* SCF interrupt */
+ case DTC_CMDTO: /* SCF command timeout */
+ case DTC_CMDBUSYTO: /* SCF command busy timeout */
+ default:
+ SCF_SET_SRAM_DATA2_2(0, statep->reg_control,
+ statep->reg_int_st);
+ SCF_SET_SRAM_DATA2_2(4, statep->reg_command,
+ statep->reg_status);
+ SCF_SET_SRAM_DATA1_2(8, statep->reg_command_exr,
+ (statep->reg_status_exr >> 24));
+ SCF_SET_SRAM_DATA1_2(10, statep->reg_acr,
+ statep->reg_atr);
+ break;
+ }
+ /* Set trace data */
+ wk_in_p = (uint8_t *)&statep->memo_scf_drvtrc.LOG_ID;
+ wk_out_p = (uint8_t *)statep->scf_reg_drvtrc +
+ statep->memo_DATA_WRITE;
+ for (ii = 0; ii < sizeof (scf_drvtrc_ent_t);
+ ii++, wk_in_p++, wk_out_p++) {
+ SCF_DDI_PUT8(statep, statep->scf_reg_drvtrc_handle,
+ wk_out_p, *wk_in_p);
+ }
+
+ /* Next offset update */
+ statep->memo_DATA_WRITE += sizeof (scf_drvtrc_ent_t);
+ if (statep->memo_DATA_WRITE > statep->memo_DATA_LAST) {
+ statep->memo_DATA_WRITE = statep->memo_DATA_TOP;
+ }
+ SCF_DDI_PUT32(statep, statep->scf_reg_drvtrc_handle,
+ &statep->scf_reg_drvtrc->DATA_WRITE,
+ statep->memo_DATA_WRITE);
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_TRACE, SCF_FUNC_NAME ": end");
+}
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scfsnap.c b/usr/src/uts/sun4u/opl/io/scfd/scfsnap.c
new file mode 100644
index 0000000000..e85a5e12e4
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scfsnap.c
@@ -0,0 +1,766 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/file.h>
+#include <sys/conf.h>
+#include <sys/cmn_err.h>
+#include <sys/kmem.h>
+#include <sys/errno.h>
+#include <sys/time.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/scfd/scfparam.h>
+#include <sys/scfd/scfdscp.h>
+
+#ifdef DEBUG
+/*
+ * Function list
+ */
+int scf_snapshotsize(intptr_t arg, int mode);
+int scf_get_snapize(int type, int info);
+int scf_snapshot(intptr_t arg, int mode);
+int scf_get_snap(int type, int info, scfsnap_value_t *snap_p,
+ int snap_size);
+
+
+/*
+ * External function
+ */
+extern scf_dscp_comtbl_t scf_dscp_comtbl;
+
+
+/*
+ * scf_snapshotsize()
+ */
+int
+scf_snapshotsize(intptr_t arg, int mode)
+{
+#define SCF_FUNC_NAME "scf_snapshotsize() "
+ int snap_size;
+ scfsnapsize_t scfsnapsize;
+ int ret = 0;
+
+ SCFDBGMSG(SCF_DBGFLAG_SNAP, SCF_FUNC_NAME ": start");
+
+ if (ddi_copyin((void *)arg, (void *)&scfsnapsize,
+ sizeof (scfsnapsize_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "dbg_snap", 8);
+ ret = EFAULT;
+ goto END_snapshotsize;
+ }
+
+ if (mutex_tryenter(&scf_comtbl.all_mutex) != 0) {
+ snap_size = scf_get_snapize(scfsnapsize.type, scfsnapsize.info);
+
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ if (snap_size == (-1)) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "dbg_snap", 8);
+ ret = EINVAL;
+ goto END_snapshotsize;
+ }
+
+ scfsnapsize.size = snap_size;
+
+ if (ddi_copyout((void *)&scfsnapsize, (void *)arg,
+ sizeof (scfsnapsize_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "dbg_snap", 8);
+ ret = EFAULT;
+ }
+ } else {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "dbg_snap", 8);
+ ret = EBUSY;
+ }
+
+/*
+ * END_snapshotsize
+ */
+ END_snapshotsize:
+
+ SCFDBGMSG1(SCF_DBGFLAG_SNAP, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_get_snapize()
+ */
+int
+scf_get_snapize(int type, int info)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_get_snapize() "
+ scf_state_t *statep = NULL;
+ int wk_size;
+ int ii;
+ int snap_driver_size;
+ int snap_register_size;
+ int snap_sram_size;
+ int ret = 0;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG1(SCF_DBGFLAG_SNAP, SCF_FUNC_NAME ": start type = %d", type);
+
+ if (info == SCFSNAPINFO_AUTO) {
+ statep = scf_comtbl.scf_exec_p;
+ if (statep == NULL) {
+ statep = scf_comtbl.scf_path_p;
+ }
+ } else if (info < scf_comtbl.path_num) {
+ statep = scf_comtbl.iomp_scf[info];
+ } else {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "snapsize", 8);
+ ret = (-1);
+ goto END_get_snapize;
+ }
+
+ /* Set driver area size */
+ wk_size = DRV_ID_SIZE;
+ wk_size = sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) & SCF_LENGTH_16BYTE_CNV);
+ snap_driver_size = wk_size;
+
+ wk_size = sizeof (scf_timer);
+ wk_size = sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) & SCF_LENGTH_16BYTE_CNV);
+ snap_driver_size += wk_size;
+
+ wk_size = sizeof (scf_comtbl_t);
+ wk_size = sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) & SCF_LENGTH_16BYTE_CNV);
+ snap_driver_size += wk_size;
+
+ if (statep != NULL) {
+ wk_size = sizeof (scf_state_t);
+ wk_size = sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) & SCF_LENGTH_16BYTE_CNV);
+ snap_driver_size += wk_size;
+ }
+
+ wk_size = sizeof (scf_dscp_comtbl_t);
+ wk_size = sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) & SCF_LENGTH_16BYTE_CNV);
+ snap_driver_size += wk_size;
+
+ if (scf_dscp_comtbl.tx_dscp != NULL) {
+ wk_size = scf_dscp_comtbl.tx_dscsize;
+ wk_size = sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) & SCF_LENGTH_16BYTE_CNV);
+ snap_driver_size += wk_size;
+ }
+
+ if (scf_dscp_comtbl.rx_dscp != NULL) {
+ wk_size = scf_dscp_comtbl.rx_dscsize;
+ wk_size = sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) & SCF_LENGTH_16BYTE_CNV);
+ snap_driver_size += wk_size;
+ }
+
+ if (scf_dscp_comtbl.tx_sramp != NULL) {
+ wk_size = scf_dscp_comtbl.tx_sramsize;
+ wk_size = sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) & SCF_LENGTH_16BYTE_CNV);
+ snap_driver_size += wk_size;
+ }
+
+ for (ii = 0; ii < MBIF_MAX; ii++) {
+ if (scf_dscp_comtbl.scf_dscp_main[ii].ev_quep != NULL) {
+ wk_size = scf_dscp_comtbl.scf_dscp_main[ii].ev_quesize;
+ wk_size = sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) &
+ SCF_LENGTH_16BYTE_CNV);
+ snap_driver_size += wk_size;
+ }
+ if (scf_dscp_comtbl.scf_dscp_main[ii].rd_datap != NULL) {
+ wk_size = scf_dscp_comtbl.scf_dscp_main[ii].rd_datasize;
+ wk_size = sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) &
+ SCF_LENGTH_16BYTE_CNV);
+ snap_driver_size += wk_size;
+ }
+ }
+
+ /* Set register area size */
+ if (statep != NULL) {
+ wk_size = sizeof (scf_regs_t) + sizeof (scf_regs_c_t);
+ wk_size = sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) & SCF_LENGTH_16BYTE_CNV);
+ snap_register_size = wk_size;
+ } else {
+ snap_register_size = 0;
+ }
+
+ /* Set sram area size */
+ if (statep != NULL) {
+ wk_size = sizeof (scf_dscp_sram_t) +
+ sizeof (scf_sys_sram_t) + statep->scf_reg_drvtrc_len;
+ wk_size = sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) & SCF_LENGTH_16BYTE_CNV);
+ snap_sram_size = wk_size;
+ } else {
+ snap_sram_size = 0;
+ }
+
+ switch (type) {
+ case SCFSNAPTYPE_ALL:
+ /* Set all area snap size */
+ if (statep != NULL) {
+ ret = snap_driver_size + snap_register_size +
+ snap_sram_size;
+ } else {
+ ret = snap_driver_size;
+ }
+ break;
+
+ case SCFSNAPTYPE_DRIVER:
+ /* Set driver area snap size */
+ ret = snap_driver_size;
+ break;
+
+ case SCFSNAPTYPE_REGISTER:
+ /* Set register area snap size */
+ ret = snap_register_size;
+ break;
+
+ case SCFSNAPTYPE_SRAM:
+ /* Set use SRAM area snap size */
+ ret = snap_sram_size;
+ break;
+
+ default:
+ /* Invalid parameter */
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "snapsize", 8);
+ ret = (-1);
+ break;
+ }
+
+/*
+ * END_get_snapize
+ */
+ END_get_snapize:
+
+ SCFDBGMSG1(SCF_DBGFLAG_SNAP, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_snapshot()
+ */
+int
+scf_snapshot(intptr_t arg, int mode)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_snapshot() "
+ int snap_size;
+ scfsnap_t scfsnap;
+ scfsnap32_t scfsnap32;
+ scfsnap_value_t *scfsnap_p = NULL;
+ int ret = 0;
+
+ SCFDBGMSG(SCF_DBGFLAG_SNAP, SCF_FUNC_NAME ": start");
+
+#ifdef _MULTI_DATAMODEL
+ switch (ddi_model_convert_from(mode & FMODELS)) {
+ case DDI_MODEL_ILP32:
+ if (ddi_copyin((void *)arg, (void *)&scfsnap32,
+ sizeof (scfsnap32_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "dbg_snap", 8);
+ ret = EFAULT;
+ goto END_snapshot;
+ }
+ scfsnap.type = scfsnap32.type;
+ scfsnap.info = scfsnap32.info;
+ break;
+
+ case DDI_MODEL_NONE:
+ if (ddi_copyin((void *)arg, (void *)&scfsnap,
+ sizeof (scfsnap_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "dbg_snap", 8);
+ ret = EFAULT;
+ goto END_snapshot;
+ }
+ break;
+ }
+#else /* ! _MULTI_DATAMODEL */
+ if (ddi_copyin((void *)arg, (void *)&scfsnap,
+ sizeof (scfsnap_t), mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "dbg_snap", 8);
+ ret = EFAULT;
+ goto END_snapshot;
+ }
+#endif /* _MULTI_DATAMODEL */
+
+ if (mutex_tryenter(&scf_comtbl.all_mutex) != 0) {
+
+ snap_size = scf_get_snapize(scfsnap.type, scfsnap.info);
+
+ if (snap_size == (-1)) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "dbg_snap", 8);
+ mutex_exit(&scf_comtbl.all_mutex);
+ ret = EINVAL;
+ goto END_snapshot;
+ }
+
+ if (snap_size != 0) {
+ scfsnap_p = kmem_zalloc((size_t)snap_size, KM_SLEEP);
+
+ ret = scf_get_snap(scfsnap.type, scfsnap.info,
+ scfsnap_p, snap_size);
+ } else {
+ ret = ENODATA;
+ }
+
+ mutex_exit(&scf_comtbl.all_mutex);
+ } else {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__, "dbg_snap", 8);
+ ret = EBUSY;
+ }
+
+ if (ret == 0) {
+
+#ifdef _MULTI_DATAMODEL
+ switch (ddi_model_convert_from(mode & FMODELS)) {
+ case DDI_MODEL_ILP32:
+ if (ddi_copyout((void *)scfsnap_p,
+ (void *)(uintptr_t)scfsnap32.ss_entries,
+ (size_t)snap_size, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "dbg_snap", 8);
+ ret = EFAULT;
+ }
+
+ break;
+
+ case DDI_MODEL_NONE:
+ if (ddi_copyout((void *)scfsnap_p,
+ (void *)scfsnap.ss_entries,
+ (size_t)snap_size, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "dbg_snap", 8);
+ ret = EFAULT;
+ }
+ break;
+ }
+
+#else /* ! _MULTI_DATAMODEL */
+ if (ddi_copyout((void *)scfsnap_p, (void *)scfsnap.ss_entries,
+ (size_t)snap_size, mode) != 0) {
+ SC_DBG_DRV_TRACE(TC_IOCTL|TC_ERR, __LINE__,
+ "dbg_snap", 8);
+ ret = EFAULT;
+ }
+#endif /* _MULTI_DATAMODEL */
+ }
+
+/*
+ * END_snapshot
+ */
+ END_snapshot:
+
+ if (scfsnap_p) {
+ kmem_free((void *)scfsnap_p,
+ (size_t)snap_size);
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_SNAP, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_get_snap()
+ */
+int
+scf_get_snap(int type, int info, scfsnap_value_t *snap_top_p, int snap_size)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_get_snap() "
+ scf_state_t *statep;
+ scfsnap_value_t *snap_p;
+ int wk_size;
+ int wk_nextoff;
+ int exec_model;
+ uint8_t *wk_in_p;
+ uint8_t *wk_out_p;
+ scf_dscp_main_t *mainp;
+ scf_regs_t *wk_regs_p;
+ scf_regs_c_t *wk_regs_c_p;
+ int ii;
+
+ int ret = 0;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG1(SCF_DBGFLAG_SNAP, SCF_FUNC_NAME ": start type = %d", type);
+
+#ifdef _MULTI_DATAMODEL
+ exec_model = SCF_DRIVER_64BIT;
+#else /* ! _MULTI_DATAMODEL */
+ exec_model = SCF_DRIVER_32BIT;
+#endif /* _MULTI_DATAMODEL */
+
+ if ((scf_get_snapize(type, info) > snap_size) || (snap_size <= 0)) {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "snapshot", 8);
+ ret = EINVAL;
+ goto END_get_snap;
+ }
+
+ if (info == SCFSNAPINFO_AUTO) {
+ statep = scf_comtbl.scf_exec_p;
+ if (statep == NULL) {
+ statep = scf_comtbl.scf_path_p;
+ }
+ } else if (info < scf_comtbl.path_num) {
+ statep = scf_comtbl.iomp_scf[info];
+ } else {
+ SC_DBG_DRV_TRACE(TC_ERR, __LINE__, "snapshot", 8);
+ ret = EINVAL;
+ goto END_get_snap;
+ }
+
+ snap_p = snap_top_p;
+ wk_nextoff = 0;
+ if ((type == SCFSNAPTYPE_ALL) || (type == SCFSNAPTYPE_DRIVER)) {
+ /* Set driver vl area */
+ strcpy((char *)&snap_p->ss_name[0], SNAP_SCF_DRIVER_VL);
+ wk_size = sizeof (SCF_DRIVER_VERSION);
+ if (wk_size > DRV_ID_SIZE) {
+ wk_size = DRV_ID_SIZE;
+ }
+ wk_nextoff += (sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) & SCF_LENGTH_16BYTE_CNV));
+ snap_p->ss_flag = exec_model;
+ snap_p->ss_size = wk_size;
+ snap_p->ss_nextoff = wk_nextoff;
+ bcopy((void *)SCF_DRIVER_VERSION,
+ (void *)(snap_p + 1), wk_size);
+ snap_p = (void *)((caddr_t)snap_top_p + wk_nextoff);
+
+ /* Set driver timer area */
+ strcpy((char *)&snap_p->ss_name[0], SNAP_SCF_TIMER_TBL);
+ wk_size = sizeof (scf_timer);
+ wk_nextoff += (sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) & SCF_LENGTH_16BYTE_CNV));
+ snap_p->ss_flag = exec_model;
+ snap_p->ss_size = wk_size;
+ snap_p->ss_nextoff = wk_nextoff;
+ bcopy((void *)scf_timer, (void *)(snap_p + 1), wk_size);
+ snap_p = (void *)((caddr_t)snap_top_p + wk_nextoff);
+
+ /* Set driver common area */
+ strcpy((char *)&snap_p->ss_name[0], SNAP_SCF_COMTBL);
+ wk_size = sizeof (scf_comtbl);
+ wk_nextoff += (sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) & SCF_LENGTH_16BYTE_CNV));
+ snap_p->ss_flag = exec_model;
+ snap_p->ss_size = wk_size;
+ snap_p->ss_nextoff = wk_nextoff;
+ bcopy((void *)&scf_comtbl, (void *)(snap_p + 1), wk_size);
+ snap_p = (void *)((caddr_t)snap_top_p + wk_nextoff);
+
+ if (statep != NULL) {
+ /* Set device area */
+ strcpy((char *)&snap_p->ss_name[0], SNAP_SCF_STATE);
+ wk_size = sizeof (scf_state_t);
+ wk_nextoff += (sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) &
+ SCF_LENGTH_16BYTE_CNV));
+ snap_p->ss_flag = exec_model;
+ snap_p->ss_size = wk_size;
+ snap_p->ss_nextoff = wk_nextoff;
+ bcopy((void *)statep, (void *)(snap_p + 1), wk_size);
+ snap_p = (void *)((caddr_t)snap_top_p + wk_nextoff);
+ }
+
+ /* Set driver DSCP common area */
+ strcpy((char *)&snap_p->ss_name[0], SNAP_SCF_DSCP_COMTBL);
+ wk_size = sizeof (scf_dscp_comtbl_t);
+ wk_nextoff += (sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) & SCF_LENGTH_16BYTE_CNV));
+ snap_p->ss_flag = exec_model;
+ snap_p->ss_size = wk_size;
+ snap_p->ss_nextoff = wk_nextoff;
+ bcopy((void *)&scf_dscp_comtbl, (void *)(snap_p + 1), wk_size);
+ snap_p = (void *)((caddr_t)snap_top_p + wk_nextoff);
+
+ /* Set driver DSCP TxDSC area */
+ if (scf_dscp_comtbl.tx_dscp != NULL) {
+ strcpy((char *)&snap_p->ss_name[0],
+ SNAP_SCF_DSCP_TXDSC);
+ wk_size = scf_dscp_comtbl.tx_dscsize;
+ wk_nextoff += (sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) &
+ SCF_LENGTH_16BYTE_CNV));
+ snap_p->ss_flag = exec_model;
+ snap_p->ss_size = wk_size;
+ snap_p->ss_nextoff = wk_nextoff;
+ bcopy((void *)scf_dscp_comtbl.tx_dscp,
+ (void *)(snap_p + 1), wk_size);
+ snap_p = (void *)((caddr_t)snap_top_p + wk_nextoff);
+ }
+
+ /* Set driver DSCP RxDSC area */
+ if (scf_dscp_comtbl.rx_dscp != NULL) {
+ strcpy((char *)&snap_p->ss_name[0],
+ SNAP_SCF_DSCP_RXDSC);
+ wk_size = scf_dscp_comtbl.rx_dscsize;
+ wk_nextoff += (sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) &
+ SCF_LENGTH_16BYTE_CNV));
+ snap_p->ss_flag = exec_model;
+ snap_p->ss_size = wk_size;
+ snap_p->ss_nextoff = wk_nextoff;
+ bcopy((void *)scf_dscp_comtbl.rx_dscp,
+ (void *)(snap_p + 1), wk_size);
+ snap_p = (void *)((caddr_t)snap_top_p + wk_nextoff);
+ }
+
+ /* Set driver DSCP Tx SRAM area */
+ if (scf_dscp_comtbl.tx_sramp != NULL) {
+ strcpy((char *)&snap_p->ss_name[0],
+ SNAP_SCF_DSCP_TXSRAM);
+ wk_size = scf_dscp_comtbl.tx_sramsize;
+ wk_nextoff += (sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) &
+ SCF_LENGTH_16BYTE_CNV));
+ snap_p->ss_flag = exec_model;
+ snap_p->ss_size = wk_size;
+ snap_p->ss_nextoff = wk_nextoff;
+ bcopy((void *)scf_dscp_comtbl.tx_sramp,
+ (void *)(snap_p + 1), wk_size);
+ snap_p = (void *)((caddr_t)snap_top_p + wk_nextoff);
+ }
+
+ for (ii = 0; ii < MBIF_MAX; ii++) {
+ mainp = &scf_dscp_comtbl.scf_dscp_main[ii];
+ /* Set driver DSCP Event data area */
+ if (mainp->ev_quep != NULL) {
+ strcpy((char *)&snap_p->ss_name[0],
+ SNAP_SCF_DSCP_EVENT);
+ wk_size = mainp->ev_quesize;
+ wk_nextoff += (sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) &
+ SCF_LENGTH_16BYTE_CNV));
+ snap_p->ss_flag = exec_model;
+ snap_p->ss_size = wk_size;
+ snap_p->ss_nextoff = wk_nextoff;
+ bcopy((void *)mainp->ev_quep,
+ (void *)(snap_p + 1), wk_size);
+ snap_p = (void *)((caddr_t)snap_top_p +
+ wk_nextoff);
+ }
+ /* Set driver DSCP Recv data area */
+ if (mainp->rd_datap != NULL) {
+ strcpy((char *)&snap_p->ss_name[0],
+ SNAP_SCF_DSCP_RDATA);
+ wk_size = mainp->rd_datasize;
+ wk_nextoff += (sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) &
+ SCF_LENGTH_16BYTE_CNV));
+ snap_p->ss_flag = exec_model;
+ snap_p->ss_size = wk_size;
+ snap_p->ss_nextoff = wk_nextoff;
+ bcopy((void *)mainp->rd_datap,
+ (void *)(snap_p + 1), wk_size);
+ snap_p = (void *)((caddr_t)snap_top_p +
+ wk_nextoff);
+ }
+ }
+ }
+
+ if ((type == SCFSNAPTYPE_ALL) || (type == SCFSNAPTYPE_REGISTER)) {
+ if (statep != NULL) {
+ /* Set register area */
+ strcpy((char *)&snap_p->ss_name[0], SNAP_REGISTER);
+ wk_size = sizeof (scf_regs_t) + sizeof (scf_regs_c_t);
+ wk_nextoff += (sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) &
+ SCF_LENGTH_16BYTE_CNV));
+ snap_p->ss_flag = exec_model;
+ snap_p->ss_size = wk_size;
+ snap_p->ss_nextoff = wk_nextoff;
+
+ wk_regs_p = (scf_regs_t *)(snap_p + 1);
+ wk_regs_p->COMMAND = SCF_DDI_GET16(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->COMMAND);
+ wk_regs_p->STATUS = SCF_DDI_GET16(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->STATUS);
+ wk_regs_p->VERSION = SCF_DDI_GET8(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->VERSION);
+ wk_regs_p->TDATA0 = SCF_DDI_GET32(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->TDATA0);
+ wk_regs_p->TDATA1 = SCF_DDI_GET32(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->TDATA1);
+ wk_regs_p->TDATA2 = SCF_DDI_GET32(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->TDATA2);
+ wk_regs_p->TDATA3 = SCF_DDI_GET32(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->TDATA3);
+ wk_regs_p->RDATA0 = SCF_DDI_GET32(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->RDATA0);
+ wk_regs_p->RDATA1 = SCF_DDI_GET32(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->RDATA1);
+ wk_regs_p->RDATA2 = SCF_DDI_GET32(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->RDATA2);
+ wk_regs_p->RDATA3 = SCF_DDI_GET32(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->RDATA3);
+ wk_regs_p->COMMAND_ExR = SCF_DDI_GET8(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->COMMAND_ExR);
+ wk_regs_p->ACR = SCF_DDI_GET8(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->ACR);
+ wk_regs_p->ATR = SCF_DDI_GET8(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->ATR);
+ wk_regs_p->STATUS_ExR = SCF_DDI_GET32(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->STATUS_ExR);
+ wk_regs_p->DCR = SCF_DDI_GET8(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->DCR);
+ wk_regs_p->DSR = SCF_DDI_GET8(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->DSR);
+ wk_regs_p->TxDCR_C_FLAG = SCF_DDI_GET16(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->TxDCR_C_FLAG);
+ wk_regs_p->TxDCR_OFFSET = SCF_DDI_GET16(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->TxDCR_OFFSET);
+ wk_regs_p->TxDCR_LENGTH = SCF_DDI_GET32(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->TxDCR_LENGTH);
+ wk_regs_p->TxDSR_C_FLAG = SCF_DDI_GET16(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->TxDSR_C_FLAG);
+ wk_regs_p->TxDSR_OFFSET = SCF_DDI_GET16(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->TxDSR_OFFSET);
+ wk_regs_p->RxDCR_C_FLAG = SCF_DDI_GET16(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->RxDCR_C_FLAG);
+ wk_regs_p->RxDCR_OFFSET = SCF_DDI_GET16(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->RxDCR_OFFSET);
+ wk_regs_p->RxDCR_LENGTH = SCF_DDI_GET32(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->RxDCR_LENGTH);
+ wk_regs_p->RxDSR_C_FLAG = SCF_DDI_GET16(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->RxDSR_C_FLAG);
+ wk_regs_p->RxDSR_OFFSET = SCF_DDI_GET16(statep,
+ statep->scf_regs_handle,
+ &statep->scf_regs->RxDSR_OFFSET);
+
+ wk_regs_c_p = (scf_regs_c_t *)(wk_regs_p + 1);
+ wk_regs_c_p->CONTROL = SCF_DDI_GET16(statep,
+ statep->scf_regs_c_handle,
+ &statep->scf_regs_c->CONTROL);
+ wk_regs_c_p->INT_ST = SCF_DDI_GET16(statep,
+ statep->scf_regs_c_handle,
+ &statep->scf_regs_c->INT_ST);
+
+ snap_p = (void *)((caddr_t)snap_top_p + wk_nextoff);
+ } else {
+ if (type == SCFSNAPTYPE_REGISTER) {
+ ret = ENODATA;
+ }
+ }
+ }
+
+
+ if ((type == SCFSNAPTYPE_ALL) || (type == SCFSNAPTYPE_SRAM)) {
+ if (statep != NULL) {
+ /* Set use SRAM area */
+ strcpy((char *)&snap_p->ss_name[0], SNAP_SRAM);
+ wk_size = sizeof (scf_dscp_sram_t) +
+ sizeof (scf_sys_sram_t) +
+ statep->scf_reg_drvtrc_len;
+ wk_nextoff += (sizeof (scfsnap_value_t) +
+ ((wk_size + SCF_S_CNT_15) &
+ SCF_LENGTH_16BYTE_CNV));
+ snap_p->ss_flag = exec_model;
+ snap_p->ss_size = wk_size;
+ snap_p->ss_nextoff = wk_nextoff;
+
+ wk_in_p = (uint8_t *)&statep->scf_dscp_sram->DATA[0];
+ wk_out_p = (uint8_t *)(snap_p + 1);
+ for (ii = 0; ii < sizeof (scf_dscp_sram_t);
+ ii++, wk_in_p++, wk_out_p++) {
+ *wk_out_p = SCF_DDI_GET8(statep,
+ statep->scf_dscp_sram_handle, wk_in_p);
+ }
+
+ wk_in_p = (uint8_t *)&statep->scf_sys_sram->DATA[0];
+ for (ii = 0; ii < sizeof (scf_sys_sram_t);
+ ii++, wk_in_p++, wk_out_p++) {
+ *wk_out_p = SCF_DDI_GET8(statep,
+ statep->scf_sys_sram_handle, wk_in_p);
+ }
+
+ wk_in_p = (uint8_t *)statep->scf_reg_drvtrc;
+ for (ii = 0; ii < statep->scf_reg_drvtrc_len;
+ ii++, wk_in_p++, wk_out_p++) {
+ *wk_out_p = SCF_DDI_GET8(statep,
+ statep->scf_reg_drvtrc_handle, wk_in_p);
+ }
+ snap_p = (void *)((caddr_t)snap_top_p + wk_nextoff);
+ } else {
+ if (type == SCFSNAPTYPE_SRAM) {
+ ret = ENODATA;
+ }
+ }
+ }
+
+/*
+ * END_get_snap
+ */
+ END_get_snap:
+
+ SCFDBGMSG1(SCF_DBGFLAG_SNAP, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+#endif /* DEBUG */
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scftimer.c b/usr/src/uts/sun4u/opl/io/scfd/scftimer.c
new file mode 100644
index 0000000000..104b31439e
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scftimer.c
@@ -0,0 +1,603 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/ksynch.h>
+#include <sys/types.h>
+#include <sys/conf.h>
+#include <sys/cmn_err.h>
+#include <sys/kmem.h>
+#include <sys/errno.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/scfd/scfparam.h>
+
+/*
+ * Timer control table and control flag
+ */
+static int scf_timer_stop_flag = FLAG_OFF; /* Timer stop flag */
+scf_timer_t scf_timer[SCF_TIMERCD_MAX]; /* Timer contorol table */
+
+/*
+ * Timer value
+ */
+ /* SCF command busy watch timer */
+int scf_devbusy_wait_time = SCF_TIMER_VALUE_DEVBUSY;
+ /* SCF command completion watch timer */
+int scf_cmdend_wait_time = SCF_TIMER_VALUE_CMDEND;
+ /* SCF online watch timer */
+int scf_online_wait_time = SCF_TIMER_VALUE_ONLINE;
+ /* Next receive wait timer */
+int scf_rxbuff_wait_time = SCF_TIMER_VALUE_NEXTRCV;
+ /* DSCP interface TxACK watch timer */
+int scf_dscp_ack_wait_time = SCF_TIMER_VALUE_DSCP_ACK;
+ /* DSCP interface TxEND watch timer */
+int scf_dscp_end_wait_time = SCF_TIMER_VALUE_DSCP_END;
+ /* DSCP interface busy watch timer */
+int scf_dscp_txbusy_time = SCF_TIMER_VALUE_DSCP_BUSY;
+ /* DSCP interface callback timer */
+int scf_dscp_callback_time = SCF_TIMER_VALUE_DSCP_CALLBACK;
+
+/*
+ * Function list
+ */
+void scf_timer_init(void);
+void scf_timer_start(int tmcd);
+void scf_timer_stop(int tmcd);
+void scf_timer_all_stop(void);
+int scf_timer_check(int tmcd);
+uint32_t scf_timer_value_get(int tmcd);
+void scf_tout(void *arg);
+int scf_timer_stop_collect(timeout_id_t *tmids, int size);
+void scf_timer_untimeout(timeout_id_t *tmids, int size);
+
+/*
+ * External function
+ */
+extern void scf_cmdbusy_tout(void);
+extern void scf_cmdend_tout(void);
+extern void scf_online_wait_tout(void);
+extern void scf_next_rxdata_get(void);
+extern void scf_dscp_ack_tout(void);
+extern void scf_dscp_end_tout(void);
+extern void scf_dscp_busy_tout(void);
+extern void scf_dscp_callback_tout(void);
+extern void scf_report_send_wait_tout(void);
+
+/*
+ * scf_timer_init()
+ *
+ * Description: Timer initialization processing.
+ *
+ */
+void
+scf_timer_init(void)
+{
+#define SCF_FUNC_NAME "scf_timer_init() "
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": start");
+
+ /* Set timer code and timer value */
+ scf_timer[SCF_TIMERCD_CMDBUSY].value = scf_devbusy_wait_time;
+ scf_timer[SCF_TIMERCD_CMDBUSY].tbl[0].code = SCF_TIMERCD_CMDBUSY;
+ scf_timer[SCF_TIMERCD_CMDBUSY].tbl[1].code = SCF_TIMERCD_CMDBUSY;
+
+ scf_timer[SCF_TIMERCD_CMDEND].value = scf_cmdend_wait_time;
+ scf_timer[SCF_TIMERCD_CMDEND].tbl[0].code = SCF_TIMERCD_CMDEND;
+ scf_timer[SCF_TIMERCD_CMDEND].tbl[1].code = SCF_TIMERCD_CMDEND;
+
+ scf_timer[SCF_TIMERCD_ONLINE].value = scf_online_wait_time;
+ scf_timer[SCF_TIMERCD_ONLINE].tbl[0].code = SCF_TIMERCD_ONLINE;
+ scf_timer[SCF_TIMERCD_ONLINE].tbl[1].code = SCF_TIMERCD_ONLINE;
+
+ scf_timer[SCF_TIMERCD_NEXTRECV].value = scf_rxbuff_wait_time;
+ scf_timer[SCF_TIMERCD_NEXTRECV].tbl[0].code = SCF_TIMERCD_NEXTRECV;
+ scf_timer[SCF_TIMERCD_NEXTRECV].tbl[1].code = SCF_TIMERCD_NEXTRECV;
+
+ scf_timer[SCF_TIMERCD_DSCP_ACK].value = scf_dscp_ack_wait_time;
+ scf_timer[SCF_TIMERCD_DSCP_ACK].tbl[0].code = SCF_TIMERCD_DSCP_ACK;
+ scf_timer[SCF_TIMERCD_DSCP_ACK].tbl[1].code = SCF_TIMERCD_DSCP_ACK;
+
+ scf_timer[SCF_TIMERCD_DSCP_END].value = scf_dscp_end_wait_time;
+ scf_timer[SCF_TIMERCD_DSCP_END].tbl[0].code = SCF_TIMERCD_DSCP_END;
+ scf_timer[SCF_TIMERCD_DSCP_END].tbl[1].code = SCF_TIMERCD_DSCP_END;
+
+ scf_timer[SCF_TIMERCD_DSCP_BUSY].value = scf_dscp_txbusy_time;
+ scf_timer[SCF_TIMERCD_DSCP_BUSY].tbl[0].code = SCF_TIMERCD_DSCP_BUSY;
+ scf_timer[SCF_TIMERCD_DSCP_BUSY].tbl[1].code = SCF_TIMERCD_DSCP_BUSY;
+
+ scf_timer[SCF_TIMERCD_DSCP_CALLBACK].value = scf_dscp_callback_time;
+ scf_timer[SCF_TIMERCD_DSCP_CALLBACK].tbl[0].code =
+ SCF_TIMERCD_DSCP_CALLBACK;
+ scf_timer[SCF_TIMERCD_DSCP_CALLBACK].tbl[1].code =
+ SCF_TIMERCD_DSCP_CALLBACK;
+
+ scf_timer[SCF_TIMERCD_BUF_FUL].value = scf_buf_ful_rtime;
+ scf_timer[SCF_TIMERCD_BUF_FUL].tbl[0].code = SCF_TIMERCD_BUF_FUL;
+ scf_timer[SCF_TIMERCD_BUF_FUL].tbl[1].code = SCF_TIMERCD_BUF_FUL;
+
+ scf_timer[SCF_TIMERCD_RCI_BUSY].value = scf_rci_busy_rtime;
+ scf_timer[SCF_TIMERCD_RCI_BUSY].tbl[0].code = SCF_TIMERCD_RCI_BUSY;
+ scf_timer[SCF_TIMERCD_RCI_BUSY].tbl[1].code = SCF_TIMERCD_RCI_BUSY;
+
+ SCFDBGMSG(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_timer_start()
+ *
+ * Description: Timer start subroutine.
+ *
+ */
+void
+scf_timer_start(int tmcd)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_timer_start() "
+ scf_timer_t *tm_p; /* Timer table address */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG1(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": start tmcd = %d", tmcd);
+
+ /* Check timer code */
+ if (tmcd >= SCF_TIMERCD_MAX) {
+ goto END_timer_start;
+ }
+
+ /* Get timer table address */
+ tm_p = &scf_timer[tmcd];
+
+ /* Check timer value and timer start flag */
+ if ((tm_p->value == 0) || (tm_p->start == FLAG_ON)) {
+ goto END_timer_start;
+ }
+
+ /* Check timer stop flag */
+ if (tm_p->stop == FLAG_OFF) {
+ /*
+ * Timer start and judgment
+ */
+ /* Change timer table side */
+ tm_p->side = (tm_p->side == 0) ? 1 : 0;
+
+ /* timer start */
+ tm_p->tbl[tm_p->side].id = timeout(scf_tout,
+ &tm_p->tbl[tm_p->side],
+ drv_usectohz(SCF_MIL2MICRO(tm_p->value)));
+
+ /* Timer start flag ON */
+ tm_p->start = FLAG_ON;
+
+ SC_DBG_DRV_TRACE(TC_T_START, __LINE__, &tmcd, sizeof (tmcd));
+ SCFDBGMSG(SCF_DBGFLAG_TIMER, "timeout() call");
+ } else {
+ /*
+ * Timer restart and judgment
+ */
+ SCFDBGMSG(SCF_DBGFLAG_TIMER, "timer restart");
+
+ /* Check current table timer use */
+ if (tm_p->tbl[tm_p->side].id != 0) {
+ /* Change timer table side */
+ tm_p->side = (tm_p->side == 0) ? 1 : 0;
+ }
+
+ /* Timer start and restart flag ON */
+ tm_p->start = FLAG_ON;
+ tm_p->restart = FLAG_ON;
+ }
+
+/*
+ * END_timer_start
+ */
+ END_timer_start:
+
+ SCFDBGMSG(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_timer_stop()
+ *
+ * Description: Timer stop subroutine.
+ *
+ */
+void
+scf_timer_stop(int tmcd)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_timer_stop() "
+ scf_timer_t *tm_p; /* Timer table address */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG1(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": start tmcd = %d", tmcd);
+
+ /* Check timer code */
+ if (tmcd < SCF_TIMERCD_MAX) {
+ /* Get timer table address */
+ tm_p = &scf_timer[tmcd];
+
+ /* Check timer start flag */
+ if (tm_p->start == FLAG_ON) {
+ /*
+ * Timer start and judgment
+ */
+
+ /* Timer start and restart flag OFF */
+ tm_p->start = FLAG_OFF;
+ tm_p->restart = FLAG_OFF;
+
+ /* Timer stop flag ON */
+ tm_p->stop = FLAG_ON;
+ scf_timer_stop_flag = FLAG_ON;
+ }
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_timer_all_stop()
+ *
+ * Description: Timer all stop subroutine.
+ *
+ */
+void
+scf_timer_all_stop(void)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_timer_all_stop() "
+ int tm_cd;
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": start");
+
+ for (tm_cd = 0; tm_cd < SCF_TIMERCD_MAX; tm_cd++) {
+ scf_timer_stop(tm_cd);
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_timer_check()
+ *
+ * Description: Timer status check subroutine.
+ *
+ */
+int
+scf_timer_check(int tmcd)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_timer_check() "
+ scf_timer_t *tm_p; /* Timer table address */
+ int ret = SCF_TIMER_NOT_EXEC; /* Return value */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG1(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": start tmcd = %d", tmcd);
+
+ /* Check timer code */
+ if (tmcd < SCF_TIMERCD_MAX) {
+ /* Get timer table address */
+ tm_p = &scf_timer[tmcd];
+
+ /* Check timer start flag */
+ if (tm_p->start == FLAG_ON) {
+ /* Timer exec state */
+ ret = SCF_TIMER_EXEC;
+ }
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_timer_value_get()
+ *
+ * Description: Timer value get subroutine.
+ *
+ */
+uint32_t
+scf_timer_value_get(int tmcd)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_timer_value_get() "
+ uint32_t ret = 0; /* Return value */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG1(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": start tmcd = %d", tmcd);
+
+ /* Check timer code */
+ if (tmcd < SCF_TIMERCD_MAX) {
+ /* Set timer value */
+ ret = scf_timer[tmcd].value;
+ }
+
+ SCFDBGMSG1(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": end return = %d", ret);
+ return (ret);
+}
+
+
+/*
+ * scf_tout()
+ *
+ * Description: Timeout main processing.
+ *
+ */
+void
+scf_tout(void *arg)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_tout() "
+ scf_timer_tbl_t *tm_tblp = (scf_timer_tbl_t *)arg;
+ scf_timer_t *tm_p; /* Timer table address */
+ timeout_id_t save_tmids[SCF_TIMERCD_MAX];
+ int tm_stop_cnt;
+
+ SCFDBGMSG1(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": start tmcd = %d",
+ tm_tblp->code);
+
+ SC_DBG_DRV_TRACE(TC_T_TOUT | TC_IN, __LINE__, &tm_tblp->code,
+ sizeof (tm_tblp->code));
+
+ /* Lock driver mutex */
+ mutex_enter(&scf_comtbl.all_mutex);
+
+ /* Get timer table address */
+ tm_p = &scf_timer[tm_tblp->code];
+
+ /* Check timer exec state */
+ if ((tm_p->start == FLAG_ON) && (tm_tblp->id != 0) &&
+ (tm_p->stop == FLAG_OFF)) {
+ /* Timer flag OFF and timer id clear */
+ tm_p->start = FLAG_OFF;
+ tm_tblp->id = 0;
+
+ /* Check timer code */
+ switch (tm_tblp->code) {
+ case SCF_TIMERCD_CMDBUSY:
+ /* SCF command busy watch timeout */
+ scf_cmdbusy_tout();
+ break;
+
+ case SCF_TIMERCD_CMDEND:
+ /* SCF command completion watch timeout */
+ scf_cmdend_tout();
+ break;
+
+ case SCF_TIMERCD_ONLINE:
+ /* SCF online watch timeout */
+ scf_online_wait_tout();
+ break;
+
+ case SCF_TIMERCD_NEXTRECV:
+ /* Next receive wait timeout */
+ scf_next_rxdata_get();
+ break;
+
+ case SCF_TIMERCD_DSCP_ACK:
+ /* DSCP interface TxACK watch timeout */
+ scf_dscp_ack_tout();
+ break;
+
+ case SCF_TIMERCD_DSCP_END:
+ /* DSCP interface TxEND watch timeout */
+ scf_dscp_end_tout();
+ break;
+
+ case SCF_TIMERCD_DSCP_BUSY:
+ /* DSCP interface busy watch timeout */
+ scf_dscp_busy_tout();
+ break;
+
+ case SCF_TIMERCD_DSCP_CALLBACK:
+ /* DSCP interface callback timeout */
+ scf_dscp_callback_tout();
+ break;
+
+ case SCF_TIMERCD_BUF_FUL:
+ /* SCF command BUF_FUL timeout */
+ case SCF_TIMERCD_RCI_BUSY:
+ /* SCF command RCI_BUSY timeout */
+ scf_report_send_wait_tout();
+ break;
+
+ default:
+ /* NOP */
+ break;
+ }
+ } else {
+ /* Timer flag OFF and timer id clear */
+ tm_p->stop = FLAG_OFF;
+ tm_tblp->id = 0;
+
+ /* Check timer restart flag */
+ if (tm_p->restart == FLAG_ON) {
+ /*
+ * Timer start and judgment
+ */
+ /* timer start */
+ tm_p->tbl[tm_p->side].id = timeout(scf_tout,
+ &tm_p->tbl[tm_p->side],
+ drv_usectohz(SCF_MIL2MICRO(tm_p->value)));
+
+ /* Timer start flag is already ON */
+
+ /* Timer restart flag OFF */
+ tm_p->restart = FLAG_OFF;
+
+ SC_DBG_DRV_TRACE(TC_T_START, __LINE__, &tm_tblp->code,
+ sizeof (tm_tblp->code));
+ SCFDBGMSG(SCF_DBGFLAG_TIMER, "timeout() call");
+ }
+ }
+
+ /* Collect the timers which need to be stopped */
+ tm_stop_cnt = scf_timer_stop_collect(save_tmids, SCF_TIMERCD_MAX);
+
+ /* Unlock driver mutex */
+ mutex_exit(&scf_comtbl.all_mutex);
+
+ /* Timer stop */
+ if (tm_stop_cnt != 0) {
+ scf_timer_untimeout(save_tmids, SCF_TIMERCD_MAX);
+ }
+
+ SC_DBG_DRV_TRACE(TC_T_TOUT | TC_OUT, __LINE__, NULL, 0);
+ SCFDBGMSG(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": end");
+}
+
+
+/*
+ * scf_timer_stop_collect()
+ *
+ * Description: Collect the timers which need to be stopped.
+ *
+ */
+int
+scf_timer_stop_collect(timeout_id_t *tmids, int size)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_timer_stop_collect() "
+ scf_timer_t *tm_p; /* Timer table address */
+ int ii; /* Working value : counter */
+ int tm_stop_cnt = 0; /* Timer stop counter */
+
+ ASSERT(MUTEX_HELD(&scf_comtbl.all_mutex));
+
+ SCFDBGMSG(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": start");
+
+ /* Clear save timer table */
+ bzero((caddr_t)tmids, (sizeof (timeout_id_t) * size));
+
+ /* Check timer stop factor */
+ if (scf_timer_stop_flag == FLAG_OFF) {
+ goto END_timer_stop_collect;
+ }
+
+ /* Timer stop flag OFF */
+ scf_timer_stop_flag = FLAG_OFF;
+
+ /* Get timer table address */
+ tm_p = &scf_timer[0];
+
+ /* Check all timer table */
+ for (ii = 0; ii < size; ii++, tm_p++) {
+ /* Check timer stop flag */
+ if (tm_p->stop == FLAG_ON) {
+ /* Timer stop flag OFF */
+ tm_p->stop = FLAG_OFF;
+
+ /* Check timer side 0 table timer use */
+ if (tm_p->tbl[0].id != 0) {
+ /* Save stop timer id */
+ tmids[tm_stop_cnt++] = tm_p->tbl[0].id;
+
+ /* Timer id clear */
+ tm_p->tbl[0].id = 0;
+
+ SC_DBG_DRV_TRACE(TC_T_STOP, __LINE__, &ii,
+ sizeof (ii));
+ }
+
+ /* Check timer side 1 table timer use */
+ if (tm_p->tbl[1].id != 0) {
+ /* Save stop timer id */
+ tmids[tm_stop_cnt++] = tm_p->tbl[1].id;
+
+ /* Timer id clear */
+ tm_p->tbl[1].id = 0;
+
+ SC_DBG_DRV_TRACE(TC_T_STOP, __LINE__, &ii,
+ sizeof (ii));
+ }
+ }
+ /* Check timer restart flag */
+ if (tm_p->restart == FLAG_ON) {
+ /*
+ * Timer start and judgment
+ */
+
+ /* timer start */
+ tm_p->tbl[tm_p->side].id = timeout(scf_tout,
+ &tm_p->tbl[tm_p->side],
+ drv_usectohz(SCF_MIL2MICRO(tm_p->value)));
+
+ /* Timer start flag ON */
+ tm_p->restart = FLAG_OFF;
+
+ SC_DBG_DRV_TRACE(TC_T_START, __LINE__, &ii,
+ sizeof (ii));
+ }
+ }
+
+/*
+ * END_timer_stop_collect
+ */
+ END_timer_stop_collect:
+
+ SCFDBGMSG1(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": end tm_stop_cnt = %d",
+ tm_stop_cnt);
+ return (tm_stop_cnt);
+}
+
+
+/*
+ * scf_timer_untimeout()
+ *
+ * Description: Timer stop subroutine.
+ *
+ */
+void
+scf_timer_untimeout(timeout_id_t *tmids, int size)
+{
+#undef SCF_FUNC_NAME
+#define SCF_FUNC_NAME "scf_timer_untimeout() "
+ int ii; /* Working value : counter */
+
+ SCFDBGMSG(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": start");
+
+ /* Save timer id stop */
+ for (ii = 0; ii < size; ii++) {
+ if (tmids[ii] != 0) {
+ (void) untimeout(tmids[ii]);
+ }
+ }
+
+ SCFDBGMSG(SCF_DBGFLAG_TIMER, SCF_FUNC_NAME ": end");
+}
diff --git a/usr/src/uts/sun4u/opl/io/scfd/scftrace.c b/usr/src/uts/sun4u/opl/io/scfd/scftrace.c
new file mode 100644
index 0000000000..1477d4f492
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/io/scfd/scftrace.c
@@ -0,0 +1,152 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/conf.h>
+#include <sys/cmn_err.h>
+#include <sys/errno.h>
+#include <sys/time.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+
+#include <sys/scfd/scfparam.h>
+
+#ifdef DEBUG
+/*
+ * SCF driver trace flag
+ */
+ushort_t scf_trace_exec = 1; /* 1:trace exec, 0:Trace no exec */
+
+ushort_t scf_trace_flag = 0xff00;
+/*
+ * xxxx xxxx : scf_trace_flag
+ * 1 : Error trace exec
+ * 1 : Busy trace exec
+ * 1 : Messege trace exec
+ * 1 : RD register trace exec
+ * 1 : WR register trace exec
+ * 1 : Timer trace exec
+ * 1 : Func out trace exec
+ * 1 : Func in trace exec
+ */
+
+/*
+ * SCF driver trace debug flag
+ */
+uint_t scf_trace_msg_flag = 0x00000000; /* trace massege flag */
+
+
+/*
+ * Function list
+ */
+void scf_trace(ushort_t code, ushort_t line, uchar_t *info, ushort_t size);
+
+
+/*
+ * scf_trace()
+ *
+ * SCF Driver trace get processing.
+ *
+ * 0 +--------------+
+ * | sorce line | trace get source line
+ * 2 +--------------+
+ * | time | trace get time (100ms)
+ * 4 +--------------+
+ * | triger code | trace triger code
+ * 6 +--------------+
+ * | info size | infomarion size
+ * 8 +--------------+
+ * | |
+ * A + +
+ * | |
+ * C + info + infomarion
+ * | |
+ * E + +
+ * | |
+ * 10 +--------------+
+ *
+ */
+void
+scf_trace(ushort_t code, ushort_t line, uchar_t *info, ushort_t size)
+{
+ scf_trctbl_t trace_wk;
+ scf_trctbl_t *trcp;
+ uchar_t *in_p;
+ uchar_t *out_p;
+ clock_t clock_val;
+ int ii;
+ int trcflag = 0;
+
+ if ((scf_trace_exec) &&
+ ((code & scf_trace_flag) || (!(code & 0xFF00)))) {
+ if (scf_comtbl.resource_flag & DID_MUTEX_TRC) {
+ mutex_enter(&scf_comtbl.trc_mutex);
+ trcflag = 1;
+ }
+ }
+
+ if (!trcflag) {
+ return;
+ }
+
+ trcp = (scf_trctbl_t *)&trace_wk.line;
+ trcp->line = line;
+ clock_val = ddi_get_lbolt();
+ trcp->tmvl = (ushort_t)(drv_hztousec(clock_val) / 100000);
+ trcp->code = code;
+ trcp->size = size;
+ for (ii = 0; ii < sizeof (trace_wk.info); ii++) {
+ if (ii < size) {
+ trcp->info[ii] = *(info+ii);
+ } else {
+ trcp->info[ii] = 0;
+ }
+ }
+
+ if (trcflag) {
+ in_p = (uchar_t *)trcp;
+ out_p = (uchar_t *)scf_comtbl.trace_w;
+ scf_comtbl.trace_w++;
+ if (scf_comtbl.trace_w == scf_comtbl.trace_l) {
+ scf_comtbl.trace_w = scf_comtbl.trace_f;
+ }
+ for (ii = 0; ii < 16; ii++, in_p++, out_p++) *out_p = *in_p;
+ if (trcp->code & (TC_ERR | TC_ERRCD)) {
+ in_p = (uchar_t *)trcp;
+ out_p = (uchar_t *)scf_comtbl.err_trace_w;
+ for (ii = 0; ii < 16; ii++, in_p++, out_p++)
+ *out_p = *in_p;
+ scf_comtbl.err_trace_w++;
+ if (scf_comtbl.err_trace_w == scf_comtbl.err_trace_l) {
+ scf_comtbl.err_trace_w = scf_comtbl.err_trace_f;
+ }
+ }
+ }
+
+ if (trcflag) {
+ mutex_exit(&scf_comtbl.trc_mutex);
+ }
+}
+#endif
diff --git a/usr/src/uts/sun4u/opl/mc-opl/Makefile b/usr/src/uts/sun4u/opl/mc-opl/Makefile
new file mode 100644
index 0000000000..d089913fa3
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/mc-opl/Makefile
@@ -0,0 +1,98 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the mc-opl driver kernel
+# module.
+#
+# sun4u oplimplementation architecture dependent
+#
+# uts/sun4u/opl/mc-opl/Makefile
+#
+#
+#Path to the base of the uts directory tree(/usr/src/uts)
+#
+UTSBASE = ../../..
+
+#
+#Define the module and object file sets.
+#
+MODULE = mc-opl
+OBJECTS = $(MC_OPL_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(MC_OPL_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_OPL_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/sun4u/opl/io
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.opl
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += $(CCVERBOSE) -I../sys
+
+#
+# Turn on doubleword alignment for 64 bit registers
+#
+# CFLAGS
+# MC_EMULATION - emulate mac patrol registers
+#
+CFLAGS += -dalign -DMC_EMULATION
+#CFLAGS += -dalign
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/ml/drmach.il.cpp b/usr/src/uts/sun4u/opl/ml/drmach.il.cpp
new file mode 100644
index 0000000000..24d9c195c2
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/ml/drmach.il.cpp
@@ -0,0 +1,198 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+
+/*
+ * This file is through cpp before being used as
+ * an inline. It contains support routines used
+ * only by DR for the copy-rename sequence.
+ */
+
+#if defined(lint)
+#include <sys/types.h>
+#endif /* lint */
+
+#ifndef INLINE
+
+#include <sys/asm_linkage.h>
+
+#else /* INLINE */
+
+#define ENTRY_NP(x) .inline x,0
+#define retl /* nop */
+#define SET_SIZE(x) .end
+
+#endif /* INLINE */
+
+#include <sys/privregs.h>
+#include <sys/sun4asi.h>
+#include <sys/machparam.h>
+
+#include <sys/intreg.h>
+#include <sys/opl_olympus_regs.h>
+
+/*
+ * Bcopy routine used by DR to copy
+ * between physical addresses.
+ * Borrowed from Starfire DR 2.6.
+ */
+#if defined(lint)
+
+/*ARGSUSED*/
+void
+bcopy32_il(uint64_t paddr1, uint64_t paddr2)
+{}
+
+#else /* lint */
+
+ ENTRY_NP(bcopy32_il)
+ .register %g2, #scratch
+ .register %g3, #scratch
+ rdpr %pstate, %g0
+ ldxa [%o0]ASI_MEM, %o2
+ add %o0, 8, %o0
+ ldxa [%o0]ASI_MEM, %o3
+ add %o0, 8, %o0
+ ldxa [%o0]ASI_MEM, %g1
+ add %o0, 8, %o0
+ ldxa [%o0]ASI_MEM, %g2
+
+ stxa %o2, [%o1]ASI_MEM
+ add %o1, 8, %o1
+ stxa %o3, [%o1]ASI_MEM
+ add %o1, 8, %o1
+ stxa %g1, [%o1]ASI_MEM
+ add %o1, 8, %o1
+ stxa %g2, [%o1]ASI_MEM
+
+ retl
+ nop
+ SET_SIZE(bcopy32_il)
+
+#endif /* lint */
+
+#if defined(lint)
+
+/*ARGSUSED*/
+void
+flush_cache_il(void)
+{}
+
+#else /* lint */
+
+ ENTRY_NP(flush_cache_il)
+ rdpr %pstate, %o3
+ andn %o3, PSTATE_IE, %o4
+ wrpr %g0, %o4, %pstate
+ mov ASI_L2_CTRL_U2_FLUSH, %o4
+ mov ASI_L2_CTRL_RW_ADDR, %o5
+ stxa %o4, [%o5]ASI_L2_CTRL
+ ! retl
+ wrpr %g0, %o3, %pstate ! restore earlier pstate
+ SET_SIZE(flush_cache_il)
+
+#endif /* lint */
+
+#if defined(lint)
+/* ARGUSED */
+uint64_t
+drmach_get_stick_il(void)
+{}
+
+#else /* lint */
+ ENTRY_NP(drmach_get_stick_il)
+ retl
+ rd STICK, %o0
+ SET_SIZE(drmach_get_stick_il)
+#endif /* lint */
+
+#if defined(lint)
+/* ARGUSED */
+void
+membar_sync_il(void)
+{}
+
+#else /* lint */
+ ENTRY_NP(membar_sync_il)
+ retl
+ membar #Sync
+ SET_SIZE(membar_sync_il)
+#endif /* lint */
+
+
+#if defined(lint)
+
+/* ARGSUSED */
+void
+flush_instr_mem_il(caddr_t vaddr)
+{}
+
+#else /* lint */
+
+/*
+ * flush_instr_mem:
+ * Flush 1 page of the I-$ starting at vaddr
+ * %o0 vaddr
+ *
+ * SPARC64-VI maintains consistency of the on-chip Instruction Cache with
+ * the stores from all processors so that a FLUSH instruction is only needed
+ * to ensure pipeline is consistent. This means a single flush is sufficient at
+ * the end of a sequence of stores that updates the instruction stream to
+ * ensure correct operation.
+ */
+
+ ENTRY_NP(flush_instr_mem_il)
+ flush %o0 ! address irrelevent
+ retl
+ nop
+ SET_SIZE(flush_instr_mem_il)
+
+#endif /* lint */
+
+#if defined(lint)
+
+/* ARGSUSED */
+void
+drmach_sleep_il(void)
+{}
+
+#else /* lint */
+
+/*
+ * drmach-sleep_il:
+ *
+ * busy loop wait can affect performance of the sibling strand
+ * the sleep instruction can be used to avoid that.
+ */
+
+ ENTRY_NP(drmach_sleep_il)
+.word 0x81b01060
+ retl
+ nop
+ SET_SIZE(flush_instr_mem_il)
+
+#endif /* lint */
diff --git a/usr/src/uts/sun4u/opl/ml/drmach_asm.s b/usr/src/uts/sun4u/opl/ml/drmach_asm.s
new file mode 100644
index 0000000000..870b954b7c
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/ml/drmach_asm.s
@@ -0,0 +1,507 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * This file is through cpp before being used as
+ * an inline. It contains support routines used
+ * only by DR for the copy-rename sequence.
+ */
+
+#if defined(lint)
+#include <sys/types.h>
+#else
+#include "assym.h"
+#include "drmach_offsets.h"
+#endif /* lint */
+
+#include <sys/asm_linkage.h>
+#include <sys/param.h>
+#include <sys/privregs.h>
+#include <sys/spitregs.h>
+#include <sys/mmu.h>
+#include <sys/machthread.h>
+#include <sys/pte.h>
+#include <sys/stack.h>
+#include <sys/vis.h>
+#include <sys/intreg.h>
+#include <sys/cheetahregs.h>
+#include <sys/drmach.h>
+
+#if !defined(lint)
+
+/*
+ * turn off speculative mode to prevent unwanted memory access
+ * when we are in the FMEM loops
+ */
+
+#define FJSV_SPECULATIVE_OFF(reg, tmp1, tmp2) \
+ rdpr %pstate, reg ;\
+ andn reg, PSTATE_IE, tmp1 ;\
+ wrpr %g0, tmp1, %pstate ;\
+ ldxa [%g0]ASI_MCNTL, tmp1 ;\
+ set 1, tmp2 ;\
+ sllx tmp2, MCNTL_SPECULATIVE_SHIFT, tmp2 ;\
+ or tmp1, tmp2, tmp1 ;\
+ stxa tmp1, [%g0]ASI_MCNTL ;\
+ membar #Sync
+#endif
+
+
+#if defined(lint)
+/*ARGSUSED*/
+void
+drmach_fmem_loop_script(caddr_t critical, int size, caddr_t stat)
+{ return; }
+#else /* lint */
+ .align 8
+ ENTRY_NP(drmach_fmem_loop_script)
+ /* turn off speculative mode */
+ FJSV_SPECULATIVE_OFF(%o5, %o3, %o4);
+
+ /* read the critical region to get everything in the cache */
+ mov %o0, %o3
+0:
+ ldx [%o3], %o4
+ sub %o1, 8, %o1
+ brnz %o1, 0b
+ add %o3, 8, %o3
+
+ /* clear L2_CTRL_UGE_TRAP error bit */
+ mov ASI_L2_CTRL_RW_ADDR, %o1
+ ldxa [%o1]ASI_L2_CTRL, %o3
+ sethi %hi(ASI_L2_CTRL_UGE_TRAP), %o4
+ btst %o3, %o4
+ bz,pn %xcc, 1f
+ nop
+ stxa %o4, [%o1]ASI_L2_CTRL
+
+ /* now tell the master CPU that we are ready */
+1:
+ set FMEM_LOOP_FMEM_READY, %o3
+ stb %o3, [%o2]
+ membar #Sync
+ ba 5f
+ nop
+
+ /*
+ * note that we branch to 5f, which branches right back to 2 here.
+ * The trick is that when that branch instruction has already been
+ * patched to a branch to itself - an infinite loop.
+ * The master thread will patch it back to "ba 2b" when it
+ * completes.
+ */
+
+ /* Once we are back, we first check if there has been any
+ * L2_CTRL_UGE_TRAP errors, if so we have to fail the
+ * operation. This will cause a panic because the system
+ * is already in inconsistent state.
+ */
+2:
+ mov %g0, %o1
+ mov ASI_L2_CTRL_RW_ADDR, %o3
+ ldxa [%o3]ASI_L2_CTRL, %o3
+ sethi %hi(ASI_L2_CTRL_UGE_TRAP), %o4
+ btst %o3, %o4
+ bz,pn %xcc, 3f
+ mov %g0, %o4
+ set FMEM_HW_ERROR, %o4
+
+ /* set error code and stat code */
+3:
+ set FMEM_LOOP_DONE, %o3
+ stb %o3, [%o2]
+
+ /* turn on speculative mode again */
+ ldxa [%g0]ASI_MCNTL, %o0
+ set 1, %o1
+ sllx %o1, MCNTL_SPECULATIVE_SHIFT, %o1
+ andn %o0, %o1, %o0
+ ba 4f
+ nop
+.align 32
+4:
+ stxa %o0, [%g0]ASI_MCNTL
+ membar #Sync
+ wrpr %g0, %o5, %pstate
+ retl
+ mov %o4, %o0
+.align 8
+5:
+ ALTENTRY(drmach_fmem_loop_script_rtn)
+ /*
+ * busy wait will affect sibling strands so
+ * we put sleep instruction in the delay slot
+ */
+ ba 2b
+.word 0x81b01060
+ nop
+ SET_SIZE(drmach_fmem_loop_script)
+#endif /* lint */
+
+#if defined(lint)
+/*ARGSUSED*/
+void
+drmach_flush_icache(void)
+{ return; }
+#else /* lint */
+ .align 8
+ ENTRY_NP(drmach_flush_icache)
+ stxa %g0, [%g0]ASI_ALL_FLUSH_L1I
+ membar #Sync
+ retl
+ nop
+ SET_SIZE(drmach_flush_icache)
+#endif
+
+#if defined(lint)
+/*ARGSUSED*/
+int
+drmach_fmem_exec_script(caddr_t critical, int size)
+{ return (0); }
+#else /* lint */
+.align 32
+ ENTRY_NP(drmach_fmem_exec_script)
+ /* turn off speculative mode */
+ FJSV_SPECULATIVE_OFF(%o5, %o3, %o4);
+ /* save locals to save area */
+ add %o0, SAVE_LOCAL, %o2
+ stx %l0, [%o2+8*0]
+ stx %l1, [%o2+8*1]
+ stx %l2, [%o2+8*2]
+ stx %l3, [%o2+8*3]
+ stx %l4, [%o2+8*4]
+ stx %l5, [%o2+8*5]
+ stx %l6, [%o2+8*6]
+ stx %l7, [%o2+8*7]
+ mov %o5, %l6
+ /* l7 is set only when FMEM cmd is issued to SCF */
+ mov %g0, %l7
+
+ /* read the critical region to put everything in the cache */
+ mov %o0, %o2
+0:
+ ldx [%o2], %o4
+ sub %o1, 8, %o1
+ brnz %o1, 0b
+ add %o2, 8, %o2
+ ba 4f
+ nop
+
+ /* we branch to 4f but eventually we branch back here to finish up */
+1:
+ mov %l6, %o5
+ /*
+ * save some registers for debugging
+ * l0 - SCF_REG_BASE
+ * l1 - SCF_TD
+ * l2 - SCF_TD + 8
+ * l5 - DELAY
+ */
+ add %o0, SAVE_LOG, %o1
+ stx %l0, [%o1+8*0]
+ stx %l1, [%o1+8*1]
+ stx %l2, [%o1+8*2]
+ stx %l5, [%o1+8*3]
+
+ add %o0, FMEM_ISSUED, %o1
+ st %l7, [%o1]
+
+ /* Check for L2_CTRL_UGE_TRAP error */
+ mov ASI_L2_CTRL_RW_ADDR, %l0
+ ldxa [%l0]ASI_L2_CTRL, %l1
+ sethi %hi(ASI_L2_CTRL_UGE_TRAP), %l2
+ btst %l1, %l2
+ bz,pn %xcc, 2f
+ nop
+ mov FMEM_HW_ERROR, %o4
+2:
+ /* restore all locals */
+ add %o0, SAVE_LOCAL, %o1
+ ldx [%o1+8*0], %l0
+ ldx [%o1+8*1], %l1
+ ldx [%o1+8*2], %l2
+ ldx [%o1+8*3], %l3
+ ldx [%o1+8*4], %l4
+ ldx [%o1+8*5], %l5
+ ldx [%o1+8*6], %l6
+ ldx [%o1+8*7], %l7
+
+ /* turn on speculative mode */
+ ldxa [%g0]ASI_MCNTL, %o1
+ set 1, %o2
+ sllx %o2, MCNTL_SPECULATIVE_SHIFT, %o2
+ andn %o1, %o2, %o1
+ ba 3f
+ nop
+.align 32
+3:
+ stxa %o1, [%g0]ASI_MCNTL
+ membar #Sync
+ /* return error code here */
+ mov %o4, %o0
+ retl
+ wrpr %g0, %o5, %pstate
+
+ /* clear L2_CTRL_UGE_TRAP error bit */
+4:
+ mov ASI_L2_CTRL_RW_ADDR, %l0
+ ldxa [%l0]ASI_L2_CTRL, %l1
+ sethi %hi(ASI_L2_CTRL_UGE_TRAP), %l2
+ btst %l1, %l2
+ bz,pn %xcc, 5f
+ nop
+ stxa %l2, [%l0]ASI_L2_CTRL
+5:
+ /* set up the register locations and parameters */
+ ldx [%o0 + SCF_REG_BASE], %l0
+ ldx [%o0 + SCF_TD], %l1
+ ldx [%o0 + SCF_TD+8], %l2
+ ldx [%o0 + DELAY], %l5
+
+ /* check if SCF is busy */
+ add %l0, SCF_COMMAND, %o1
+ lduha [%o1]ASI_IO, %o2
+ sethi %hi(SCF_CMD_BUSY), %o3
+ btst %o2, %o3
+ be %xcc, 6f
+ nop
+ ba 1b
+ mov FMEM_SCF_BUSY, %o4
+
+ /* clear STATUS bit */
+6:
+ add %l0, SCF_STATUS, %o1
+ lduha [%o1]ASI_IO, %o2
+ sethi %hi(SCF_STATUS_READY), %o3
+ btst %o2, %o3
+ be %xcc, 7f
+ nop
+ stha %o3, [%o1]ASI_IO
+
+ /* clear CMD_COMPLETE bit */
+7:
+ mov SCF_STATUS_CMD_COMPLETE, %o3
+ btst %o2, %o3
+ be,a %xcc, 8f
+ nop
+ stha %o3, [%o1]ASI_IO
+8:
+ add %l0, (SCF_TDATA+0xe), %o1
+ mov %l2, %o4
+ mov SCF_RETRY_CNT, %o5
+
+ sethi %hi(0xffff), %l2
+ or %l2, %lo(0xffff), %l2
+
+ and %o4, %l2, %o3
+
+ /*
+ * o1 points to SCFBASE.SCF_TDATA[0xe]
+ * l0 points to SCFBASE
+ * crticial->SCF_TD[0] = source board #
+ * crticial->SCF_TD[1] = target board #
+ * l1 = critical->SCF_TD[0 - 7]
+ * l2 = 0xffff
+ * o4 = critical->SCF_TD[8 - 15]
+ * o3 = (*o4) & 0xffff
+
+ /*
+ * Because there is no parity protection on the ebus
+ * we read the data back after the write to verify
+ * we write 2 bytes at a time.
+ * If the data read is not the same as data written
+ * we retry up to a limit of FMEM_RETRY_OUT
+ */
+9:
+ stha %o3, [%o1]ASI_IO
+ lduha [%o1]ASI_IO, %o2
+ sub %o5, 1, %o5
+ brz,a %o5, 1b
+ mov FMEM_RETRY_OUT, %o4
+ cmp %o2, %o3
+ bne,a 9b
+ nop
+
+ sub %o1, %l0, %o2
+ cmp %o2, (SCF_TDATA+0x8)
+ bne %xcc, 2f
+ srlx %o4, 16, %o4
+ mov %l1, %o4
+
+ /* if we have reach TDATA+8, we switch to l1 */
+ /* XXX: Why we need 2 loops??? */
+2:
+ sub %o1, 2, %o1
+ mov SCF_RETRY_CNT, %o5
+ and %o4, %l2, %o3
+
+ sub %o1, %l0, %o2
+ cmp %o2, (SCF_TDATA)
+ bge,a 9b
+ nop
+
+ /* if we reach TDATA, we are done */
+
+ /* read from SCF back to our buffer for debugging */
+ add %l0, (SCF_TDATA), %o1
+ ldxa [%o1]ASI_IO, %o2
+ stx %o2, [%o0+SCF_TD]
+
+ add %l0, (SCF_TDATA+8), %o1
+ ldxa [%o1]ASI_IO, %o2
+ stx %o2, [%o0+SCF_TD+8]
+
+
+ rd STICK, %l1
+ add %l5, %l1, %l5
+
+ /* Now tell SCF to do it */
+ add %l0, SCF_COMMAND, %o1
+
+ /* 0x10A6 is the magic command */
+ sethi %hi(0x10A6), %o2
+ or %o2, %lo(0x10A6), %o2
+ stha %o2, [%o1]ASI_IO
+
+ mov 1, %l7 ! FMEM is issued
+
+ add %l0, SCF_STATUS, %o1
+ sethi %hi(SCF_STATUS_READY), %o2
+ mov SCF_STATUS_CMD_COMPLETE, %o3
+
+ /* read STATUS_READY bit and clear it only if it is set */
+ /* XXX: this STATUS_READY checking seems meaningless */
+3:
+ lduha [%o1]ASI_IO, %o4
+ btst %o2, %o4
+ be %xcc, 4f ! STATUS_READY is not set
+ nop
+ stha %o2, [%o1]ASI_IO ! Clear if the bit is set
+
+ /* check CMD_COMPLETE bit and clear */
+4:
+ btst %o3, %o4
+ be %xcc, 5f ! CMD_COMPLETE is not set
+ nop
+ stha %o3, [%o1]ASI_IO ! Now we are done and clear it
+ mov FMEM_NO_ERROR, %o4
+ ba %xcc, 6f
+ nop
+
+ /* timeout delay checking */
+5:
+ rd STICK, %l2
+ cmp %l5, %l2
+ bge %xcc, 3b
+ nop
+ mov FMEM_TIMEOUT, %o4
+
+ /* we are done or timed out */
+6:
+ ba,a 1b
+ nop
+ SET_SIZE(drmach_fmem_exec_script)
+#endif /* lint */
+
+#if defined(lint)
+/*ARGSUSED*/
+void
+drmach_fmem_exec_script_end(caddr_t critical, int size)
+{ return; }
+#else /* lint */
+ ENTRY_NP(drmach_fmem_exec_script_end)
+ nop
+ SET_SIZE(drmach_fmem_exec_script_end)
+#endif /* lint */
+
+#if defined(lint)
+uint64_t
+patch_inst(uint64_t *x, uint64_t y)
+{
+ *x = y;
+ return (0);
+}
+
+#else /* lint */
+
+ ENTRY_NP(patch_inst)
+ ldx [%o0], %o2
+ casx [%o0], %o2, %o1
+ flush %o0
+ membar #Sync
+ ldx [%o0], %o2
+ retl
+ mov %o2, %o0
+ SET_SIZE(patch_inst)
+
+#endif /* lint */
+
+#if defined(lint)
+void
+drmach_sys_trap()
+{
+}
+#else /* lint */
+ ENTRY_NP(drmach_sys_trap)
+ mov -1, %g4
+ set sys_trap, %g5
+ jmp %g5
+ nop
+ SET_SIZE(drmach_sys_trap)
+#endif /* lint */
+
+#if defined(lint)
+uint64_t
+drmach_get_stick()
+{
+ return (0);
+}
+#else /* lint */
+ ENTRY_NP(drmach_get_stick)
+ retl
+ rd STICK, %o0
+ SET_SIZE(drmach_get_stick)
+#endif /* lint */
+
+#if defined(lint)
+/*ARGSUSED*/
+void
+drmach_flush(void)
+{}
+
+#else /* lint */
+ ENTRY_NP(drmach_flush)
+ mov %o0, %o2
+0:
+ flush %o2
+ sub %o1, 8, %o1
+ brnz %o1, 0b
+ add %o2, 8, %o2
+ retl
+ nop
+ SET_SIZE(drmach_flush)
+#endif /* lint */
diff --git a/usr/src/uts/sun4u/opl/ml/drmach_offsets.in b/usr/src/uts/sun4u/opl/ml/drmach_offsets.in
new file mode 100644
index 0000000000..de88434a7e
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/ml/drmach_offsets.in
@@ -0,0 +1,60 @@
+\
+\ Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+\ Use is subject to license terms.
+\
+\ CDDL HEADER START
+\
+\ The contents of this file are subject to the terms of the
+\ Common Development and Distribution License (the "License").
+\ You may not use this file except in compliance with the License.
+\
+\ You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+\ or http://www.opensolaris.org/os/licensing.
+\ See the License for the specific language governing permissions
+\ and limitations under the License.
+\
+\ When distributing Covered Code, include this CDDL HEADER in each
+\ file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+\ If applicable, add the following below this CDDL HEADER, with the
+\ fields enclosed by brackets "[]" replaced with your own identifying
+\ information: Portions Copyright [yyyy] [name of copyright owner]
+\
+\ CDDL HEADER END
+\
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/sysmacros.h>
+#include <sys/param.h>
+#include <sys/machparam.h>
+#include <sys/systm.h>
+#include <sys/machsystm.h>
+#include <sys/cpuvar.h>
+
+#include <sys/drmach.h>
+
+drmach_copy_rename_critical_t DRMACH_COPY_RENAME_CRITICAL_SIZE
+ scf_reg_base
+ scf_td
+ save_log
+ save_local
+ pstate
+ delay
+ run
+ fmem
+ loop
+ loop_rtn
+ inst_loop_ret
+ fmem_issued
+
+drmach_scf_regs_t SCF_REG_SIZE
+ scf_command
+ scf_status
+ scf_version
+ scf_tdata
+ scf_rdata
+
+drmach_fmem_mbox_t FMEM_MBOX_SIZE
+ stat
+ error
diff --git a/usr/src/uts/sun4u/opl/olympus_c/Makefile b/usr/src/uts/sun4u/opl/olympus_c/Makefile
new file mode 100644
index 0000000000..1cfb812323
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/olympus_c/Makefile
@@ -0,0 +1,116 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the OPL specific
+# Olympus-C driver module.
+#
+# uts/sun4u/opl/olympus_c/Makefile
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = FJSV,SPARC64-VI
+OBJECTS = $(OLYMPUS_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(OLYMPUS_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_OPL_CPU_DIR)/$(MODULE)
+
+CPU_DIR = .
+HERE = ../olympus_c
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.opl
+
+#
+# Override defaults
+#
+CLEANFILES += $(CPULIB) $(SYM_MOD)
+
+#
+# Define targets
+#
+ALL_TARGET = $(SYM_MOD)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = def $(BINARY) $(ROOTMODULE) $(ROOTSOFTLINKS)
+
+#
+# Overrides
+#
+ALL_BUILDS = $(ALL_BUILDSONLY64)
+DEF_BUILDS = $(DEF_BUILDSONLY64)
+CLEANLINTFILES += $(LINT32_FILES)
+
+#
+# lint pass one enforcement
+#
+OLYMPUS_C_DEFS += -DOLYMPUS_C
+CFLAGS += $(CCVERBOSE) $(OLYMPUS_C_DEFS)
+
+CPPFLAGS += -DCPU_MODULE -DOLYMPUS_C
+AS_CPPFLAGS += -DCPU_MODULE -DOLYMPUS_C
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+all: $(ALL_DEPS)
+
+def: $(DEF_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+$(CPULIB): $(BINARY)
+ $(BUILD.SO) $(BINARY)
+
+$(SYM_MOD): $(UNIX_O) $(CPULIB)
+ @$(ECHO) "resolving symbols against unix.o"
+ @(cd $(UNIX_DIR); pwd; \
+ CPU_DIR=$(HERE) SYM_MOD=$(HERE)/$(SYM_MOD) $(MAKE) symcheck)
+
+$(ROOTSOFTLINKS): $(ROOTMODULE)
+ $(RM) $@; $(SYMLINK) $(MODULE) $@ $(CHOWNLINK) $(CHGRPLINK)
+
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/oplkmdrv/Makefile b/usr/src/uts/sun4u/opl/oplkmdrv/Makefile
new file mode 100644
index 0000000000..6645855b82
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/oplkmdrv/Makefile
@@ -0,0 +1,100 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the oplkmdrv driver
+# kernel module.
+#
+# sun4u implementation architecture dependent
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../../..
+INC_PATH += -I$(UTSBASE)/sun4u/starcat/
+
+#
+# Define the module and object file sets.
+#
+
+MODULE = oplkmdrv
+OBJECTS = $(OPLKM_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(OPLKM_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_OPL_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/sun4u/opl/io
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.opl
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY) $(SRC_CONFFILE)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += $(CCVERBOSE)
+
+#
+# Turn on doubleword alignment for 64 bit registers
+#
+CFLAGS += -dalign
+
+#
+# Dependency
+#
+LDFLAGS += -dy -Ndrv/scfd
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/oplmsu/Makefile b/usr/src/uts/sun4u/opl/oplmsu/Makefile
new file mode 100644
index 0000000000..851e374f41
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/oplmsu/Makefile
@@ -0,0 +1,88 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#pragma ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the oplmsu driver
+# loadable module.
+#
+# sun4u opl implementation architecture dependent
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = oplmsu
+OBJECTS = $(OPLMSU_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(OPLMSU_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_OPL_DRV_DIR)/$(MODULE)
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.opl
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += $(CCVERBOSE) -I../sys
+LDFLAGS += -dy -Ndrv/su
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/oplpanel/Makefile b/usr/src/uts/sun4u/opl/oplpanel/Makefile
new file mode 100644
index 0000000000..5d2f757b40
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/oplpanel/Makefile
@@ -0,0 +1,87 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#pragma ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the oplpanel driver
+# loadable module.
+#
+# sun4u opl implementation architecture dependent
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = oplpanel
+OBJECTS = $(OPL_PANEL_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(OPL_PANEL_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_OPL_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/sun4u/opl/io/oplpanel
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.opl
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY) $(SRC_CONFFILE)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += $(CCVERBOSE) -I../sys
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/options/Makefile b/usr/src/uts/sun4u/opl/options/Makefile
new file mode 100644
index 0000000000..44b6f3a7b3
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/options/Makefile
@@ -0,0 +1,86 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+#
+# uts/sun4u/opl/options/Makefile
+#
+# This makefile drives the production of the opl options conf file
+#
+# sparc architecture dependent
+#
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = options
+ROOTMODULE = $(ROOT_OPL_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/sun4u/opl/io
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.opl
+
+#
+# Define targets
+#
+ALL_TARGET = $(SRC_CONFILE)
+LINT_TARGET =
+INSTALL_TARGET = $(ROOT_OPL_DRV_DIR) $(ROOT_CONFFILE)
+
+#
+# Overrides.
+#
+CFLAGS += $(CCVERBOSE)
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+all: $(ALL_DEPS)
+
+def: $(DEF_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sparc/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/os/opl.c b/usr/src/uts/sun4u/opl/os/opl.c
new file mode 100644
index 0000000000..c9cfe717ae
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/os/opl.c
@@ -0,0 +1,855 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/cpuvar.h>
+#include <sys/systm.h>
+#include <sys/sysmacros.h>
+#include <sys/promif.h>
+#include <sys/platform_module.h>
+#include <sys/cmn_err.h>
+#include <sys/errno.h>
+#include <sys/machsystm.h>
+#include <sys/bootconf.h>
+#include <sys/nvpair.h>
+#include <sys/kobj.h>
+#include <sys/mem_cage.h>
+#include <sys/opl.h>
+#include <sys/scfd/scfostoescf.h>
+#include <sys/cpu_sgnblk_defs.h>
+#include <sys/utsname.h>
+#include <sys/ddi.h>
+#include <sys/sunndi.h>
+#include <sys/lgrp.h>
+#include <sys/memnode.h>
+#include <sys/sysmacros.h>
+#include <vm/vm_dep.h>
+
+int (*opl_get_mem_unum)(int, uint64_t, char *, int, int *);
+
+/* Memory for fcode claims. 16k times # maximum possible IO units */
+#define EFCODE_SIZE (OPL_MAX_BOARDS * OPL_MAX_IO_UNITS_PER_BOARD * 0x4000)
+int efcode_size = EFCODE_SIZE;
+
+#define OPL_MC_MEMBOARD_SHIFT 38 /* Boards on 256BG boundary */
+
+/* Set the maximum number of boards for DR */
+int opl_boards = OPL_MAX_BOARDS;
+
+void sgn_update_all_cpus(ushort_t, uchar_t, uchar_t);
+
+extern int tsb_lgrp_affinity;
+
+int opl_tsb_spares = (OPL_MAX_BOARDS) * (OPL_MAX_PCICH_UNITS_PER_BOARD) *
+ (OPL_MAX_TSBS_PER_PCICH);
+
+pgcnt_t opl_startup_cage_size = 0;
+
+static struct memlist *opl_memlist_per_board(struct memlist *ml);
+
+int
+set_platform_max_ncpus(void)
+{
+ return (OPL_MAX_CPU_PER_BOARD * OPL_MAX_BOARDS);
+}
+
+int
+set_platform_tsb_spares(void)
+{
+ return (MIN(opl_tsb_spares, MAX_UPA));
+}
+
+#pragma weak mmu_init_large_pages
+
+void
+set_platform_defaults(void)
+{
+ extern char *tod_module_name;
+ extern void cpu_sgn_update(ushort_t, uchar_t, uchar_t, int);
+ extern int ts_dispatch_extended;
+ extern void mmu_init_large_pages(size_t);
+
+ /* Set the CPU signature function pointer */
+ cpu_sgn_func = cpu_sgn_update;
+
+ /* Set appropriate tod module for OPL platform */
+ ASSERT(tod_module_name == NULL);
+ tod_module_name = "todopl";
+
+ /*
+ * Use the alternate TS dispatch table, which is better tuned
+ * for large servers.
+ */
+ if (ts_dispatch_extended == -1)
+ ts_dispatch_extended = 1;
+
+ if ((mmu_page_sizes == max_mmu_page_sizes) &&
+ (mmu_ism_pagesize != MMU_PAGESIZE32M)) {
+ if (&mmu_init_large_pages)
+ mmu_init_large_pages(mmu_ism_pagesize);
+ }
+
+ tsb_lgrp_affinity = 1;
+}
+
+/*
+ * Convert logical a board number to a physical one.
+ */
+
+#define LSBPROP "board#"
+#define PSBPROP "physical-board#"
+
+int
+opl_get_physical_board(int id)
+{
+ dev_info_t *root_dip, *dip = NULL;
+ char *dname = NULL;
+ int circ;
+
+ pnode_t pnode;
+ char pname[MAXSYSNAME] = {0};
+
+ int lsb_id; /* Logical System Board ID */
+ int psb_id; /* Physical System Board ID */
+
+
+ /*
+ * This function is called on early stage of bootup when the
+ * kernel device tree is not initialized yet, and also
+ * later on when the device tree is up. We want to try
+ * the fast track first.
+ */
+ root_dip = ddi_root_node();
+ if (root_dip) {
+ /* Get from devinfo node */
+ ndi_devi_enter(root_dip, &circ);
+ for (dip = ddi_get_child(root_dip); dip;
+ dip = ddi_get_next_sibling(dip)) {
+
+ dname = ddi_node_name(dip);
+ if (strncmp(dname, "pseudo-mc", 9) != 0)
+ continue;
+
+ if ((lsb_id = (int)ddi_getprop(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, LSBPROP, -1)) == -1)
+ continue;
+
+ if (id == lsb_id) {
+ if ((psb_id = (int)ddi_getprop(DDI_DEV_T_ANY,
+ dip, DDI_PROP_DONTPASS, PSBPROP, -1))
+ == -1) {
+ ndi_devi_exit(root_dip, circ);
+ return (-1);
+ } else {
+ ndi_devi_exit(root_dip, circ);
+ return (psb_id);
+ }
+ }
+ }
+ ndi_devi_exit(root_dip, circ);
+ }
+
+ /*
+ * We do not have the kernel device tree, or we did not
+ * find the node for some reason (let's say the kernel
+ * device tree was modified), let's try the OBP tree.
+ */
+ pnode = prom_rootnode();
+ for (pnode = prom_childnode(pnode); pnode;
+ pnode = prom_nextnode(pnode)) {
+
+ if ((prom_getprop(pnode, "name", (caddr_t)pname) == -1) ||
+ (strncmp(pname, "pseudo-mc", 9) != 0))
+ continue;
+
+ if (prom_getprop(pnode, LSBPROP, (caddr_t)&lsb_id) == -1)
+ continue;
+
+ if (id == lsb_id) {
+ if (prom_getprop(pnode, PSBPROP,
+ (caddr_t)&psb_id) == -1) {
+ return (-1);
+ } else {
+ return (psb_id);
+ }
+ }
+ }
+
+ return (-1);
+}
+
+/*
+ * For OPL it's possible that memory from two or more successive boards
+ * will be contiguous across the boards, and therefore represented as a
+ * single chunk.
+ * This function splits such chunks down the board boundaries.
+ */
+static struct memlist *
+opl_memlist_per_board(struct memlist *ml)
+{
+ uint64_t ssize, low, high, boundary;
+ struct memlist *head, *tail, *new;
+
+ ssize = (1ull << OPL_MC_MEMBOARD_SHIFT);
+
+ head = tail = NULL;
+
+ for (; ml; ml = ml->next) {
+ low = (uint64_t)ml->address;
+ high = low+(uint64_t)(ml->size);
+ while (low < high) {
+ boundary = roundup(low+1, ssize);
+ boundary = MIN(high, boundary);
+ new = kmem_zalloc(sizeof (struct memlist), KM_SLEEP);
+ new->address = low;
+ new->size = boundary - low;
+ if (head == NULL)
+ head = new;
+ if (tail) {
+ tail->next = new;
+ new->prev = tail;
+ }
+ tail = new;
+ low = boundary;
+ }
+ }
+ return (head);
+}
+
+void
+set_platform_cage_params(void)
+{
+ extern pgcnt_t total_pages;
+ extern struct memlist *phys_avail;
+ struct memlist *ml, *tml;
+ int ret;
+
+ if (kernel_cage_enable) {
+ pgcnt_t preferred_cage_size;
+
+ preferred_cage_size =
+ MAX(opl_startup_cage_size, total_pages / 256);
+
+ ml = opl_memlist_per_board(phys_avail);
+
+ kcage_range_lock();
+ /*
+ * Note: we are assuming that post has load the
+ * whole show in to the high end of memory. Having
+ * taken this leap, we copy the whole of phys_avail
+ * the glist and arrange for the cage to grow
+ * downward (descending pfns).
+ */
+ ret = kcage_range_init(ml, 1);
+
+ /* free the memlist */
+ do {
+ tml = ml->next;
+ kmem_free(ml, sizeof (struct memlist));
+ ml = tml;
+ } while (ml != NULL);
+
+ if (ret == 0)
+ kcage_init(preferred_cage_size);
+ kcage_range_unlock();
+ }
+
+ if (kcage_on)
+ cmn_err(CE_NOTE, "!DR Kernel Cage is ENABLED");
+ else
+ cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
+}
+
+/*ARGSUSED*/
+int
+plat_cpu_poweron(struct cpu *cp)
+{
+ int (*opl_cpu_poweron)(struct cpu *) = NULL;
+
+ opl_cpu_poweron =
+ (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweron", 0);
+
+ if (opl_cpu_poweron == NULL)
+ return (ENOTSUP);
+ else
+ return ((opl_cpu_poweron)(cp));
+
+}
+
+/*ARGSUSED*/
+int
+plat_cpu_poweroff(struct cpu *cp)
+{
+ int (*opl_cpu_poweroff)(struct cpu *) = NULL;
+
+ opl_cpu_poweroff =
+ (int (*)(struct cpu *))kobj_getsymvalue("drmach_cpu_poweroff", 0);
+
+ if (opl_cpu_poweroff == NULL)
+ return (ENOTSUP);
+ else
+ return ((opl_cpu_poweroff)(cp));
+
+}
+
+int
+plat_max_boards(void)
+{
+ return (OPL_MAX_BOARDS);
+}
+
+int
+plat_max_cpu_units_per_board(void)
+{
+ return (OPL_MAX_CPU_PER_BOARD);
+}
+
+int
+plat_max_mem_units_per_board(void)
+{
+ return (OPL_MAX_MEM_UNITS_PER_BOARD);
+}
+
+int
+plat_max_io_units_per_board(void)
+{
+ return (OPL_MAX_IO_UNITS_PER_BOARD);
+}
+
+int
+plat_max_cmp_units_per_board(void)
+{
+ return (OPL_MAX_CMP_UNITS_PER_BOARD);
+}
+
+int
+plat_max_core_units_per_board(void)
+{
+ return (OPL_MAX_CORE_UNITS_PER_BOARD);
+}
+
+int
+plat_pfn_to_mem_node(pfn_t pfn)
+{
+ return (pfn >> mem_node_pfn_shift);
+}
+
+/* ARGSUSED */
+void
+plat_build_mem_nodes(u_longlong_t *list, size_t nelems)
+{
+ size_t elem;
+ pfn_t basepfn;
+ pgcnt_t npgs;
+ uint64_t boundary, ssize;
+ uint64_t low, high;
+
+ /*
+ * OPL mem slices are always aligned on a 256GB boundary.
+ */
+ mem_node_pfn_shift = OPL_MC_MEMBOARD_SHIFT - MMU_PAGESHIFT;
+ mem_node_physalign = 0;
+
+ /*
+ * Boot install lists are arranged <addr, len>, <addr, len>, ...
+ */
+ ssize = (1ull << OPL_MC_MEMBOARD_SHIFT);
+ for (elem = 0; elem < nelems; elem += 2) {
+ low = (uint64_t)list[elem];
+ high = low+(uint64_t)(list[elem+1]);
+ while (low < high) {
+ boundary = roundup(low+1, ssize);
+ boundary = MIN(high, boundary);
+ basepfn = btop(low);
+ npgs = btop(boundary - low);
+ mem_node_add_slice(basepfn, basepfn + npgs - 1);
+ low = boundary;
+ }
+ }
+}
+
+/*
+ * Find the CPU associated with a slice at boot-time.
+ */
+void
+plat_fill_mc(pnode_t nodeid)
+{
+ int board;
+ int memnode;
+ struct {
+ uint64_t addr;
+ uint64_t size;
+ } mem_range;
+
+ if (prom_getprop(nodeid, "board#", (caddr_t)&board) < 0) {
+ panic("Can not find board# property in mc node %x", nodeid);
+ }
+ if (prom_getprop(nodeid, "sb-mem-ranges", (caddr_t)&mem_range) < 0) {
+ panic("Can not find sb-mem-ranges property in mc node %x",
+ nodeid);
+ }
+ memnode = mem_range.addr >> OPL_MC_MEMBOARD_SHIFT;
+ plat_assign_lgrphand_to_mem_node(board, memnode);
+}
+
+/*
+ * Return the platform handle for the lgroup containing the given CPU
+ *
+ * For OPL, lgroup platform handle == board #.
+ */
+
+extern int mpo_disabled;
+extern lgrp_handle_t lgrp_default_handle;
+
+lgrp_handle_t
+plat_lgrp_cpu_to_hand(processorid_t id)
+{
+ lgrp_handle_t plathand;
+
+ /*
+ * Return the real platform handle for the CPU until
+ * such time as we know that MPO should be disabled.
+ * At that point, we set the "mpo_disabled" flag to true,
+ * and from that point on, return the default handle.
+ *
+ * By the time we know that MPO should be disabled, the
+ * first CPU will have already been added to a leaf
+ * lgroup, but that's ok. The common lgroup code will
+ * double check that the boot CPU is in the correct place,
+ * and in the case where mpo should be disabled, will move
+ * it to the root if necessary.
+ */
+ if (mpo_disabled) {
+ /* If MPO is disabled, return the default (UMA) handle */
+ plathand = lgrp_default_handle;
+ } else
+ plathand = (lgrp_handle_t)LSB_ID(id);
+ return (plathand);
+}
+
+/*
+ * Platform specific lgroup initialization
+ */
+void
+plat_lgrp_init(void)
+{
+ extern uint32_t lgrp_expand_proc_thresh;
+ extern uint32_t lgrp_expand_proc_diff;
+
+ /*
+ * Set tuneables for the OPL architecture
+ *
+ * lgrp_expand_proc_thresh is the minimum load on the lgroups
+ * this process is currently running on before considering
+ * expanding threads to another lgroup.
+ *
+ * lgrp_expand_proc_diff determines how much less the remote lgroup
+ * must be loaded before expanding to it.
+ *
+ * Since remote latencies can be costly, attempt to keep 3 threads
+ * within the same lgroup before expanding to the next lgroup.
+ */
+ lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX * 3;
+ lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
+}
+
+/*
+ * Platform notification of lgroup (re)configuration changes
+ */
+/*ARGSUSED*/
+void
+plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
+{
+ update_membounds_t *umb;
+ lgrp_config_mem_rename_t lmr;
+ int sbd, tbd;
+ lgrp_handle_t hand, shand, thand;
+ int mnode, snode, tnode;
+ pfn_t start, end;
+
+ if (mpo_disabled)
+ return;
+
+ switch (evt) {
+
+ case LGRP_CONFIG_MEM_ADD:
+ /*
+ * Establish the lgroup handle to memnode translation.
+ */
+ umb = (update_membounds_t *)arg;
+
+ hand = umb->u_board;
+ mnode = plat_pfn_to_mem_node(umb->u_base >> MMU_PAGESHIFT);
+ plat_assign_lgrphand_to_mem_node(hand, mnode);
+
+ break;
+
+ case LGRP_CONFIG_MEM_DEL:
+ /*
+ * Special handling for possible memory holes.
+ */
+ umb = (update_membounds_t *)arg;
+ hand = umb->u_board;
+ if ((mnode = plat_lgrphand_to_mem_node(hand)) != -1) {
+ if (mem_node_config[mnode].exists) {
+ start = mem_node_config[mnode].physbase;
+ end = mem_node_config[mnode].physmax;
+ mem_node_pre_del_slice(start, end);
+ mem_node_post_del_slice(start, end, 0);
+ }
+ }
+
+ break;
+
+ case LGRP_CONFIG_MEM_RENAME:
+ /*
+ * During a DR copy-rename operation, all of the memory
+ * on one board is moved to another board -- but the
+ * addresses/pfns and memnodes don't change. This means
+ * the memory has changed locations without changing identity.
+ *
+ * Source is where we are copying from and target is where we
+ * are copying to. After source memnode is copied to target
+ * memnode, the physical addresses of the target memnode are
+ * renamed to match what the source memnode had. Then target
+ * memnode can be removed and source memnode can take its
+ * place.
+ *
+ * To do this, swap the lgroup handle to memnode mappings for
+ * the boards, so target lgroup will have source memnode and
+ * source lgroup will have empty target memnode which is where
+ * its memory will go (if any is added to it later).
+ *
+ * Then source memnode needs to be removed from its lgroup
+ * and added to the target lgroup where the memory was living
+ * but under a different name/memnode. The memory was in the
+ * target memnode and now lives in the source memnode with
+ * different physical addresses even though it is the same
+ * memory.
+ */
+ sbd = arg & 0xffff;
+ tbd = (arg & 0xffff0000) >> 16;
+ shand = sbd;
+ thand = tbd;
+ snode = plat_lgrphand_to_mem_node(shand);
+ tnode = plat_lgrphand_to_mem_node(thand);
+
+ /*
+ * Special handling for possible memory holes.
+ */
+ if (tnode != -1 && mem_node_config[tnode].exists) {
+ start = mem_node_config[mnode].physbase;
+ end = mem_node_config[mnode].physmax;
+ mem_node_pre_del_slice(start, end);
+ mem_node_post_del_slice(start, end, 0);
+ }
+
+ plat_assign_lgrphand_to_mem_node(thand, snode);
+ plat_assign_lgrphand_to_mem_node(shand, tnode);
+
+ lmr.lmem_rename_from = shand;
+ lmr.lmem_rename_to = thand;
+
+ /*
+ * Remove source memnode of copy rename from its lgroup
+ * and add it to its new target lgroup
+ */
+ lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
+ (uintptr_t)&lmr);
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Return latency between "from" and "to" lgroups
+ *
+ * This latency number can only be used for relative comparison
+ * between lgroups on the running system, cannot be used across platforms,
+ * and may not reflect the actual latency. It is platform and implementation
+ * specific, so platform gets to decide its value. It would be nice if the
+ * number was at least proportional to make comparisons more meaningful though.
+ * NOTE: The numbers below are supposed to be load latencies for uncached
+ * memory divided by 10.
+ *
+ * XXX latency values for Columbus, not Columbus2. Should be fixed later when
+ * we know the actual numbers for Columbus2.
+ */
+int
+plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
+{
+ /*
+ * Return min remote latency when there are more than two lgroups
+ * (root and child) and getting latency between two different lgroups
+ * or root is involved
+ */
+ if (lgrp_optimizations() && (from != to ||
+ from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
+ return (27);
+ else
+ return (25);
+}
+
+/*
+ * Return platform handle for root lgroup
+ */
+lgrp_handle_t
+plat_lgrp_root_hand(void)
+{
+ if (mpo_disabled)
+ return (lgrp_default_handle);
+
+ return (LGRP_DEFAULT_HANDLE);
+}
+
+/*ARGSUSED*/
+void
+plat_freelist_process(int mnode)
+{
+}
+
+void
+load_platform_drivers(void)
+{
+ (void) i_ddi_attach_pseudo_node("dr");
+}
+
+/*
+ * No platform drivers on this platform
+ */
+char *platform_module_list[] = {
+ (char *)0
+};
+
+/*ARGSUSED*/
+void
+plat_tod_fault(enum tod_fault_type tod_bad)
+{
+}
+
+/*ARGSUSED*/
+void
+cpu_sgn_update(ushort_t sgn, uchar_t state, uchar_t sub_state, int cpuid)
+{
+ static void (*scf_panic_callback)(int);
+ static void (*scf_shutdown_callback)(int);
+
+ /*
+ * This is for notifing system panic/shutdown to SCF.
+ * In case of shutdown and panic, SCF call back
+ * function should be called.
+ * <SCF call back functions>
+ * scf_panic_callb() : panicsys()->panic_quiesce_hw()
+ * scf_shutdown_callb(): halt() or power_down() or reboot_machine()
+ * cpuid should be -1 and state should be SIGST_EXIT.
+ */
+ if (state == SIGST_EXIT && cpuid == -1) {
+
+ /*
+ * find the symbol for the SCF panic callback routine in driver
+ */
+ if (scf_panic_callback == NULL)
+ scf_panic_callback = (void (*)(int))
+ modgetsymvalue("scf_panic_callb", 0);
+ if (scf_shutdown_callback == NULL)
+ scf_shutdown_callback = (void (*)(int))
+ modgetsymvalue("scf_shutdown_callb", 0);
+
+ switch (sub_state) {
+ case SIGSUBST_PANIC:
+ if (scf_panic_callback == NULL) {
+ cmn_err(CE_NOTE, "!cpu_sgn_update: "
+ "scf_panic_callb not found\n");
+ return;
+ }
+ scf_panic_callback(SIGSUBST_PANIC);
+ break;
+
+ case SIGSUBST_HALT:
+ if (scf_shutdown_callback == NULL) {
+ cmn_err(CE_NOTE, "!cpu_sgn_update: "
+ "scf_shutdown_callb not found\n");
+ return;
+ }
+ scf_shutdown_callback(SIGSUBST_HALT);
+ break;
+
+ case SIGSUBST_ENVIRON:
+ if (scf_shutdown_callback == NULL) {
+ cmn_err(CE_NOTE, "!cpu_sgn_update: "
+ "scf_shutdown_callb not found\n");
+ return;
+ }
+ scf_shutdown_callback(SIGSUBST_ENVIRON);
+ break;
+
+ case SIGSUBST_REBOOT:
+ if (scf_shutdown_callback == NULL) {
+ cmn_err(CE_NOTE, "!cpu_sgn_update: "
+ "scf_shutdown_callb not found\n");
+ return;
+ }
+ scf_shutdown_callback(SIGSUBST_REBOOT);
+ break;
+ }
+ }
+}
+
+/*ARGSUSED*/
+int
+plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
+ int flt_in_memory, ushort_t flt_status,
+ char *buf, int buflen, int *lenp)
+{
+ /*
+ * check if it's a Memory error.
+ */
+ if (flt_in_memory) {
+ if (opl_get_mem_unum != NULL) {
+ return (opl_get_mem_unum(synd_code, flt_addr,
+ buf, buflen, lenp));
+ } else {
+ return (ENOTSUP);
+ }
+ } else {
+ return (ENOTSUP);
+ }
+}
+
+/*ARGSUSED*/
+int
+plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
+{
+ uint_t sb;
+
+ sb = opl_get_physical_board(LSB_ID(cpuid));
+ if (sb == -1) {
+ return (ENXIO);
+ }
+
+ if (snprintf(buf, buflen, "CMU%d", sb) >= buflen) {
+ return (ENOSPC);
+ } else {
+ if (lenp)
+ *lenp = strlen(buf);
+ return (0);
+ }
+}
+
+#define SCF_PUTINFO(f, s, p) \
+ f(KEY_ESCF, 0x01, 0, s, p)
+void
+plat_nodename_set(void)
+{
+ void *datap;
+ static int (*scf_service_function)(uint32_t, uint8_t,
+ uint32_t, uint32_t, void *);
+ int counter = 5;
+
+ /*
+ * find the symbol for the SCF put routine in driver
+ */
+ if (scf_service_function == NULL)
+ scf_service_function =
+ (int (*)(uint32_t, uint8_t, uint32_t, uint32_t, void *))
+ modgetsymvalue("scf_service_putinfo", 0);
+
+ /*
+ * If the symbol was found, call it. Otherwise, log a note (but not to
+ * the console).
+ */
+
+ if (scf_service_function == NULL) {
+ cmn_err(CE_NOTE,
+ "!plat_nodename_set: scf_service_putinfo not found\n");
+ return;
+ }
+
+ datap =
+ (struct utsname *)kmem_zalloc(sizeof (struct utsname), KM_SLEEP);
+
+ if (datap == NULL) {
+ return;
+ }
+
+ bcopy((struct utsname *)&utsname,
+ (struct utsname *)datap, sizeof (struct utsname));
+
+ while ((SCF_PUTINFO(scf_service_function,
+ sizeof (struct utsname), datap) == EBUSY) && (counter-- > 0)) {
+ delay(10 * drv_usectohz(1000000));
+ }
+ if (counter == 0)
+ cmn_err(CE_NOTE,
+ "!plat_nodename_set: "
+ "scf_service_putinfo not responding\n");
+
+ kmem_free(datap, sizeof (struct utsname));
+}
+
+caddr_t efcode_vaddr = NULL;
+
+/*
+ * Preallocate enough memory for fcode claims.
+ */
+
+caddr_t
+efcode_alloc(caddr_t alloc_base)
+{
+ caddr_t efcode_alloc_base = (caddr_t)roundup((uintptr_t)alloc_base,
+ MMU_PAGESIZE);
+ caddr_t vaddr;
+
+ /*
+ * allocate the physical memory for the Oberon fcode.
+ */
+ if ((vaddr = (caddr_t)BOP_ALLOC(bootops, efcode_alloc_base,
+ efcode_size, MMU_PAGESIZE)) == NULL)
+ cmn_err(CE_PANIC, "Cannot allocate Efcode Memory");
+
+ efcode_vaddr = vaddr;
+
+ return (efcode_alloc_base + efcode_size);
+}
+
+caddr_t
+plat_startup_memlist(caddr_t alloc_base)
+{
+ caddr_t tmp_alloc_base;
+
+ tmp_alloc_base = efcode_alloc(alloc_base);
+ tmp_alloc_base =
+ (caddr_t)roundup((uintptr_t)tmp_alloc_base, ecache_alignsize);
+ return (tmp_alloc_base);
+}
+
+void
+startup_platform(void)
+{
+}
diff --git a/usr/src/uts/sun4u/opl/pcicmu/Makefile b/usr/src/uts/sun4u/opl/pcicmu/Makefile
new file mode 100644
index 0000000000..f610c04f77
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/pcicmu/Makefile
@@ -0,0 +1,93 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the pcicmu driver kernel module
+#
+# sun4u opl implementation architecture dependent
+#
+# uts/sun4u/opl/pcicmu/Makefile
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = pcicmu
+OBJECTS = $(PCICMU_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(PCICMU_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_OPL_DRV_DIR)/$(MODULE)
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.opl
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += $(CCVERBOSE) -I../sys
+
+#
+# Turn on doubleword alignment for 64 bit registers
+#
+CFLAGS += -dalign
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/platmod/Makefile b/usr/src/uts/sun4u/opl/platmod/Makefile
new file mode 100644
index 0000000000..92016eac00
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/platmod/Makefile
@@ -0,0 +1,97 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the sun4u opl platform
+# module.
+#
+# sun4u opl implementation architecture dependent
+#
+# uts/sun4u/opl/platmod/Makefile
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../../..
+#
+# Define the module and object file sets.
+#
+MODULE = platmod
+OBJECTS = $(OPL_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(OPL_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_OPL_MISC_DIR)/$(MODULE)
+PLAT_DIR = .
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.opl
+
+#
+# Override defaults
+#
+CLEANFILES += $(PLATLIB)
+
+#
+# Define targets
+#
+ALL_TARGET = $(PLATLIB)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += $(CCVERBOSE)
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+all: $(ALL_DEPS)
+
+def: $(DEF_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+$(PLATLIB): $(BINARY)
+ $(LD) -o $(PLATLIB) -G $(BINARY) -h misc/platmod
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/scfd/Makefile b/usr/src/uts/sun4u/opl/scfd/Makefile
new file mode 100644
index 0000000000..79c2ff8813
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/scfd/Makefile
@@ -0,0 +1,88 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the scfd driver
+# loadable module.
+#
+# sun4u opl implementation architecture dependent
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+# uts/sun4u/opl/scf/Makefile
+
+UTSBASE = ../../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = scfd
+OBJECTS = $(SCFD_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(SCFD_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_OPL_DRV_DIR)/$(MODULE)
+CONF_SRCDIR = $(UTSBASE)/sun4u/opl/io/$(MODULE)
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.opl
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY) $(SRC_CONFFILE)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE) $(ROOT_CONFFILE)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += $(CCVERBOSE)
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+all: $(ALL_DEPS)
+
+def: $(DEF_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/sys/Makefile b/usr/src/uts/sun4u/opl/sys/Makefile
new file mode 100644
index 0000000000..254fe817e4
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/Makefile
@@ -0,0 +1,124 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# uts/sun4u/opl/sys/Makefile
+#
+
+
+UTSBASE = ../../..
+
+#
+# include global definitions
+#
+include ../Makefile.opl
+
+#
+# Override defaults.
+#
+FILEMODE = 644
+OWNER = bin
+GROUP = bin
+
+HDRS= opl_hwdesc.h
+
+CHKHDRS= dm2s.h \
+ drmach.h \
+ fiomp.h \
+ mc-opl.h \
+ oplkm.h \
+ oplkm_msg.h
+
+OPLMSUHDRS= oplmsu.h \
+ oplmsu_proto.h
+
+PCICMUHDRS= pcicmu.h \
+ pcmu_cb.h \
+ pcmu_counters.h \
+ pcmu_ecc.h \
+ pcmu_err.h \
+ pcmu_ib.h \
+ pcmu_pbm.h \
+ pcmu_types.h \
+ pcmu_util.h
+
+SCFDHDRS= iomp_drv.h \
+ opcio.h \
+ scfdebug.h \
+ scfdscp.h \
+ scfdscpif.h \
+ scfio32.h \
+ scfkstat.h \
+ scfostoescf.h \
+ scfparam.h \
+ scfreg.h \
+ scfsnap.h \
+ scfstate.h \
+ scfsys.h \
+ scftimer.h \
+ scftrace.h
+
+NGDRHDRS= $(UTSBASE)/sun4u/ngdr/sys/dr.h \
+ $(UTSBASE)/sun4u/ngdr/sys/dr_util.h
+
+ROOTHDRS= $(HDRS:%=$(USR_OPL_ISYS_DIR)/%)
+
+ROOTDIR= $(ROOT)/usr/share/src
+ROOTDIRS= $(ROOTDIR)/uts $(ROOTDIR)/uts/$(PLATFORM)
+
+ROOTLINK= $(ROOTDIR)/uts/$(PLATFORM)/sys
+LINKDEST= ../../../../platform/$(PLATFORM)/include/sys
+
+CHECKHDRS= $(HDRS:%.h=%.check) \
+ $(CHKHDRS:%.h=%.check) \
+ $(OPLMSUHDRS:%.h=oplmsu/%.check) \
+ $(PCICMUHDRS:%.h=pcicmu/%.check) \
+ $(SCFDHDRS:%.h=scfd/%.check) \
+ $(NGDRHDRS:%.h=%.check)
+
+.KEEP_STATE:
+
+.PARALLEL: $(CHECKHDRS) $(ROOTHDRS)
+
+install_h: $(ROOTDIRS) .WAIT $(ROOTHDRS) $(ROOTLINK)
+
+check: $(CHECKHDRS)
+
+#
+# install rules
+#
+$(USR_OPL_ISYS_DIR)/%: % $(USR_OPL_ISYS_DIR)
+ $(INS.file)
+
+$(ROOTDIRS):
+ $(INS.dir.bin.bin)
+
+# -r because this used to be a directory and is now a link.
+$(ROOTLINK): $(ROOTDIRS)
+ -$(RM) -r $@; $(SYMLINK) $(LINKDEST) $@ $(CHOWNLINK) $(CHGRPLINK)
+
+FRC:
+
+include ../Makefile.targ
diff --git a/usr/src/uts/sun4u/opl/sys/dm2s.h b/usr/src/uts/sun4u/opl/sys/dm2s.h
new file mode 100644
index 0000000000..9718a1a4d4
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/dm2s.h
@@ -0,0 +1,110 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _DDM2S_H
+#define _DDM2S_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define DM2S_MAX_SG 20 /* Max. scatter-gather elements */
+#define DM2S_MAX_RETRIES 3 /* Max. number of retries */
+
+/*
+ * Instance structure.
+ */
+typedef struct dm2s {
+ dev_info_t *ms_dip; /* Devinfo pointer */
+ major_t ms_major; /* Major number */
+ uint32_t ms_ppa; /* Device instance */
+ mkey_t ms_key; /* Mailbox key */
+ target_id_t ms_target; /* Target-id */
+
+ ddi_iblock_cookie_t ms_ibcookie; /* Interrupt block cookie */
+ kmutex_t ms_lock; /* Lock to protect this structure */
+ kcondvar_t ms_wait; /* Cond. var to signal events */
+
+ uint32_t ms_mtu; /* MTU supported */
+ queue_t *ms_rq; /* Read side queue */
+ queue_t *ms_wq; /* Write side queuee */
+ uint32_t ms_state; /* State of the device */
+
+ uint32_t ms_retries; /* Number of retries */
+ timeout_id_t ms_rq_timeoutid; /* Timeout id for read queue */
+ timeout_id_t ms_wq_timeoutid; /* Timeout id for write queue */
+ bufcall_id_t ms_rbufcid; /* Buffcall-id for the read */
+
+ uint64_t ms_obytes; /* Number of output bytes */
+ uint64_t ms_ibytes; /* Number of input bytes */
+
+ uint32_t ms_clean; /* Cleanup flags */
+ mscat_gath_t ms_sg_rcv; /* Scatter-gather for receive */
+ mscat_gath_t ms_sg_tx[DM2S_MAX_SG]; /* scatter-gather for Tx */
+} dm2s_t;
+
+/* ms_state flags */
+#define DM2S_MB_INITED 0x00000001 /* Mailbox initialized */
+#define DM2S_MB_CONN 0x00000002 /* Mailbox in connected state */
+#define DM2S_MB_DISC 0x00000004 /* Mailbox is disconnected */
+#define DM2S_OPENED 0x00000008 /* Device opened */
+
+#define DM2S_MBOX_READY(x) ((x)->ms_state & DM2S_MB_CONN)
+
+/* ms_clean flags */
+#define DM2S_CLEAN_LOCK 0x00000001
+#define DM2S_CLEAN_CV 0x00000002
+#define DM2S_CLEAN_NODE 0x00000004
+
+#ifdef DEBUG
+/*
+ * Debug levels
+ */
+#define DBG_DRV 0x01 /* driver related traces */
+#define DBG_MBOX 0x02 /* Mailbox traces */
+#define DBG_MESG 0x04 /* Mailbox Message traces */
+#define DBG_WARN 0x10 /* warning type traces */
+
+static void dm2s_dump_bytes(char *str, uint32_t total_len,
+ uint32_t num_sg, mscat_gath_t *sgp);
+
+#define DPRINTF(f, x) if (f & dm2s_debug) printf x
+#define DMPBYTES(s, l, n, sg) dm2s_dump_bytes(s, l, n, sg)
+
+#else /* DEBUG */
+
+#define DPRINTF(f, x)
+#define DMPBYTES(s, l, n, sg)
+
+#endif /* DEBUG */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _DDM2S_H */
diff --git a/usr/src/uts/sun4u/opl/sys/drmach.h b/usr/src/uts/sun4u/opl/sys/drmach.h
new file mode 100644
index 0000000000..3a336b6422
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/drmach.h
@@ -0,0 +1,355 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_DRMACH_H_
+#define _SYS_DRMACH_H_
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef _ASM
+#include <sys/types.h>
+#include <sys/memlist.h>
+#include <sys/processor.h>
+#include <sys/cpuvar.h>
+#include <sys/sbd_ioctl.h>
+#include <sys/sysevent.h>
+#include <sys/ddi.h>
+#include <sys/sunddi.h>
+#include <sys/sunndi.h>
+#include <sys/ddi_impldefs.h>
+#include <sys/pte.h>
+#include <sys/opl.h>
+#endif
+
+
+#define MAX_BOARDS plat_max_boards()
+#define MAX_CPU_UNITS_PER_BOARD plat_max_cpu_units_per_board()
+#define MAX_MEM_UNITS_PER_BOARD plat_max_mem_units_per_board()
+#define MAX_IO_UNITS_PER_BOARD plat_max_io_units_per_board()
+#define MAX_CMP_UNITS_PER_BOARD plat_max_cmp_units_per_board()
+/*
+ * DR uses MAX_CORES_PER_CMP as number of virtual CPU within a CMP
+ */
+#define MAX_CORES_PER_CMP OPL_MAX_CPU_PER_CMP
+
+
+/* returned with drmach_board_find_devices callback */
+#define DRMACH_DEVTYPE_CPU "cpu"
+#define DRMACH_DEVTYPE_MEM "memory"
+#define DRMACH_DEVTYPE_PCI "pci"
+
+#define FMEM_LOOP_START 1
+#define FMEM_LOOP_COPY_READY 2
+#define FMEM_LOOP_COPY_DONE 3
+#define FMEM_LOOP_FMEM_READY 4
+#define FMEM_LOOP_RENAME_DONE 5
+#define FMEM_LOOP_DONE 6
+#define FMEM_LOOP_EXIT 7
+
+#define FMEM_NO_ERROR 0
+#define FMEM_OBP_FAIL 1
+#define FMEM_XC_TIMEOUT 2
+#define FMEM_COPY_TIMEOUT 3
+#define FMEM_SCF_BUSY 4
+#define FMEM_RETRY_OUT 5
+#define FMEM_TIMEOUT 6
+#define FMEM_HW_ERROR 7
+#define FMEM_TERMINATE 8
+#define FMEM_COPY_ERROR 9
+#define FMEM_SCF_ERR 10
+
+#define SCF_CMD_BUSY 0x8000
+#define SCF_STATUS_READY 0x8000
+#define SCF_STATUS_SHUTDOWN 0x4000
+#define SCF_STATUS_POFF 0x2000
+#define SCF_STATUS_EVENT 0x1000
+#define SCF_STATUS_TIMER_ADJUST 0x0800
+#define SCF_STATUS_ALIVE 0x0400
+#define SCF_STATUS_MODE_CHANGED 0x0200
+#define SCF_STATUS_CMD_U_PARITY 0x0100
+#define SCF_STATUS_CMD_RTN_CODE 0x00f0
+#define SCF_STATUS_MODE_SWITCH 0x000c
+#define SCF_STATUS_CMD_COMPLETE 0x0002
+#define SCF_STATUS_CMD_L_PARITY 0x0001
+
+#define SCF_RETRY_CNT 15
+
+#ifndef _ASM
+
+/*
+ * OPL platform specific routines currently only defined
+ * in opl.c and referenced by DR.
+ */
+
+typedef void *drmachid_t;
+
+/*
+ * We have to split up the copy rename data structure
+ * into several pieces:
+ * 1. critical region that must be locked in TLB and must
+ * be physically contiguous/no ecache conflict.
+ * This region contains the assembly code that handles
+ * the rename programming, the slave code that loops
+ * until the master script completes and all data
+ * required to do the programming.
+ *
+ * It also contains the status of each CPU because the
+ * master must wait for all the slaves to get ready before
+ * it can program the SCF.
+ *
+ * We do not need the error code in the critical section.
+ * It is not set until the FMEM is done.
+ * 2. relocatable section that must be locked in TLB. All data
+ * referenced in this section must also be locked in TLB to
+ * avoid tlbmiss.
+ *
+ * We will also put everything else in this section even it
+ * does not need such protection.
+ */
+typedef struct {
+ int16_t scf_command;
+ int8_t scf_rsv1[2];
+ int16_t scf_status;
+ int8_t scf_rsv2[2];
+ int8_t scf_version;
+ int8_t scf_rsv3[3];
+ int8_t scf_rsv4[4];
+ uint8_t scf_tdata[16];
+ uint8_t scf_rdata[16];
+} drmach_scf_regs_t;
+
+
+
+typedef struct {
+ volatile uint_t stat;
+ volatile uint_t error;
+ int op;
+#define OPL_FMEM_SCF_START 0x1
+#define OPL_FMEM_MC_SUSPEND 0x2
+} drmach_fmem_mbox_t;
+
+typedef struct {
+ uint64_t scf_reg_base;
+ uint8_t scf_td[16];
+ uint64_t save_log[8];
+ uint64_t save_local[8];
+ uint64_t pstate;
+ uint64_t delay;
+ int (*run)(void *arg, int cpuid);
+ int (*fmem)(void *arg, size_t sz);
+ int (*loop)(void *arg1, size_t sz, void *arg2);
+ void (*loop_rtn)(void *arg);
+ uint64_t inst_loop_ret;
+ int fmem_issued;
+ volatile uchar_t stat[NCPU];
+} drmach_copy_rename_critical_t;
+
+typedef struct {
+ uint64_t s_copybasepa;
+ uint64_t t_copybasepa;
+ drmachid_t s_mem;
+ drmachid_t t_mem;
+ cpuset_t cpu_ready_set;
+ cpuset_t cpu_slave_set;
+ cpuset_t cpu_copy_set;
+ processorid_t cpuid;
+ drmach_fmem_mbox_t fmem_status;
+ volatile uchar_t error[NCPU];
+ struct memlist *c_ml;
+ struct memlist *cpu_ml[NCPU];
+ caddr_t locked_va;
+ tte_t locked_tte;
+ void (*mc_resume)(void);
+ int (*scf_fmem_end)(void);
+ int (*scf_fmem_cancel)(void);
+ uint64_t copy_delay;
+ uint64_t stick_freq;
+ uint64_t copy_wait_time;
+ processorid_t slowest_cpuid;
+} drmach_copy_rename_data_t;
+
+typedef struct {
+ uint64_t nbytes[NCPU];
+} drmach_cr_stat_t;
+
+typedef struct {
+ drmach_copy_rename_critical_t *critical;
+ drmach_copy_rename_data_t *data;
+ caddr_t memlist_buffer;
+ struct memlist *free_mlist;
+ drmach_cr_stat_t *stat;
+} drmach_copy_rename_program_t;
+
+#define DRMACH_FMEM_LOCKED_PAGES 4
+#define DRMACH_FMEM_DATA_PAGE 0
+#define DRMACH_FMEM_CRITICAL_PAGE 1
+#define DRMACH_FMEM_MLIST_PAGE 2
+#define DRMACH_FMEM_STAT_PAGE 3
+
+/*
+ * layout of the FMEM buffers:
+ * 1st 8k page
+ * +--------------------------------+
+ * |drmach_copy_rename_program_t |
+ * +--------------------------------+
+ * |drmach_copy_rename_data_t |
+ * | |
+ * +--------------------------------+
+ *
+ * 2nd 8k page
+ * +--------------------------------+
+ * |drmach_copy_rename_critical_t |
+ * | |
+ * +--------------------------------+
+ * |run (drmach_copy_rename_prog__relocatable)
+ * |(roundup boundary to 1K) |
+ * +--------------------------------+
+ * | fmem_script |
+ * |(roundup boundary to 1K) |
+ * +--------------------------------+
+ * |loop_script |
+ * | |
+ * +--------------------------------+
+ * |at least 1K NOP/0's |
+ * | |
+ * +--------------------------------+
+ *
+ * 3rd 8k page
+ * +--------------------------------+
+ * |memlist_buffer (free_mlist) |
+ * | |
+ * +--------------------------------+
+ *
+ * 4th 8k page - drmach_cr_stat_t.
+ *
+ */
+
+typedef struct {
+ boolean_t assigned;
+ boolean_t powered;
+ boolean_t configured;
+ boolean_t busy;
+ boolean_t empty;
+ sbd_cond_t cond;
+ char type[MAXNAMELEN];
+ char info[MAXPATHLEN]; /* TODO: what size? */
+} drmach_status_t;
+
+typedef struct {
+ int size;
+ char *copts;
+} drmach_opts_t;
+
+typedef struct {
+ uint64_t mi_basepa;
+ uint64_t mi_size;
+ uint64_t mi_slice_size;
+ uint64_t mi_alignment_mask;
+} drmach_mem_info_t;
+
+extern sbd_error_t *drmach_mem_get_info(drmachid_t, drmach_mem_info_t *);
+extern int drmach_board_is_floating(drmachid_t);
+
+extern sbd_error_t *drmach_copy_rename_init(
+ drmachid_t dst_id,
+ drmachid_t src_id, struct memlist *src_copy_ml,
+ drmachid_t *pgm_id);
+extern sbd_error_t *drmach_copy_rename_fini(drmachid_t id);
+extern void drmach_copy_rename(drmachid_t id);
+
+extern sbd_error_t *drmach_pre_op(int cmd, drmachid_t id,
+ drmach_opts_t *opts);
+extern sbd_error_t *drmach_post_op(int cmd, drmachid_t id,
+ drmach_opts_t *opts);
+
+extern sbd_error_t *drmach_board_assign(int bnum, drmachid_t *id);
+extern sbd_error_t *drmach_board_connect(drmachid_t id,
+ drmach_opts_t *opts);
+extern sbd_error_t *drmach_board_deprobe(drmachid_t id);
+extern sbd_error_t *drmach_board_disconnect(drmachid_t id,
+ drmach_opts_t *opts);
+extern sbd_error_t *drmach_board_find_devices(drmachid_t id, void *a,
+ sbd_error_t *(*found)(void *a, const char *, int, drmachid_t));
+extern int drmach_board_lookup(int bnum, drmachid_t *id);
+extern sbd_error_t *drmach_passthru(drmachid_t id,
+ drmach_opts_t *opts);
+
+extern sbd_error_t *drmach_board_name(int bnum, char *buf, int buflen);
+
+extern sbd_error_t *drmach_board_poweroff(drmachid_t id);
+extern sbd_error_t *drmach_board_poweron(drmachid_t id);
+extern sbd_error_t *drmach_board_test(drmachid_t id, drmach_opts_t *opts,
+ int force);
+
+extern sbd_error_t *drmach_board_unassign(drmachid_t id);
+
+extern sbd_error_t *drmach_configure(drmachid_t id, int flags);
+
+extern sbd_error_t *drmach_cpu_disconnect(drmachid_t id);
+extern sbd_error_t *drmach_cpu_get_id(drmachid_t id, processorid_t *cpuid);
+extern sbd_error_t *drmach_cpu_get_impl(drmachid_t id, int *ip);
+extern void drmach_cpu_flush_ecache_sync(void);
+
+extern sbd_error_t *drmach_get_dip(drmachid_t id, dev_info_t **dip);
+
+extern sbd_error_t *drmach_io_is_attached(drmachid_t id, int *yes);
+extern sbd_error_t *drmach_io_post_attach(drmachid_t id);
+extern sbd_error_t *drmach_io_post_release(drmachid_t id);
+extern sbd_error_t *drmach_io_pre_release(drmachid_t id);
+extern sbd_error_t *drmach_io_unrelease(drmachid_t id);
+
+extern sbd_error_t *drmach_mem_add_span(drmachid_t id,
+ uint64_t basepa, uint64_t size);
+extern sbd_error_t *drmach_mem_del_span(drmachid_t id,
+ uint64_t basepa, uint64_t size);
+extern sbd_error_t *drmach_mem_disable(drmachid_t id);
+extern sbd_error_t *drmach_mem_enable(drmachid_t id);
+extern sbd_error_t *drmach_mem_get_base_physaddr(drmachid_t id,
+ uint64_t *pa);
+extern sbd_error_t *drmach_mem_get_memlist(drmachid_t id,
+ struct memlist **ml);
+extern sbd_error_t *drmach_mem_get_slice_size(drmachid_t, uint64_t *);
+
+extern sbd_error_t *drmach_release(drmachid_t id);
+extern sbd_error_t *drmach_status(drmachid_t id, drmach_status_t *stat);
+extern sbd_error_t *drmach_unconfigure(drmachid_t id, int flags);
+extern int drmach_log_sysevent(int board, char *hint, int flag,
+ int verbose);
+
+extern int drmach_verify_sr(dev_info_t *dip, int sflag);
+extern void drmach_suspend_last();
+extern void drmach_resume_first();
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_DRMACH_H_ */
diff --git a/usr/src/uts/sun4u/opl/sys/fiomp.h b/usr/src/uts/sun4u/opl/sys/fiomp.h
new file mode 100644
index 0000000000..b3110a6dab
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/fiomp.h
@@ -0,0 +1,240 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _FIOMP_H
+#define _FIOMP_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define FIOMP_TRUE 1
+#define FIOMP_FALSE 0
+
+#define FIOMP_MAX_STR 1024 /* same as FILENAME_MAX */
+
+#define FIOMP_PATH_ALL (-1)
+
+/* ioctl base */
+#define FIOMPC ('f' << 8)
+
+/*
+ * ioctl for the meta management node
+ */
+
+/* create a new multi-path */
+#define FIOMPNEW (FIOMPC|0x0)
+/* encapsulate using devices */
+#define FIOMPENCAP (FIOMPC|0x1)
+struct fiompdev {
+ int api_level; /* API level = 0 */
+ int inst_no; /* instance number */
+ minor_t inst_minor; /* instance management node */
+ minor_t user_minor; /* user access node */
+ int num; /* number of devices */
+ char **devs; /* device names */
+ int mpmode; /* multi pathing */
+ int autopath; /* automatic path change */
+ int block; /* able to block physical device */
+ int needsync; /* need synchronize path status */
+ void *ext; /* for extension = NULL */
+};
+
+/* get an instance device's information */
+#define FIOMPDEVINFO (FIOMPC|0x2)
+struct fiomp_devinfo {
+ int inst_no; /* instance number */
+ char real_name[FIOMP_MAX_STR]; /* instance management node */
+ char user_path[FIOMP_MAX_STR]; /* user access path */
+ int path_num; /* number of paths */
+ int mpmode; /* multi pathing */
+ int autopath; /* automatic path change */
+ int block; /* able to block physical device */
+ int needsync; /* need synchronize path status */
+ void *ext; /* for extension = NULL */
+};
+
+/* get number of all instances */
+#define FIOMPALLINSTNUM (FIOMPC|0x3)
+
+/* get all device's informations */
+#define FIOMPALLDEVINFO (FIOMPC|0x4)
+struct fiomp_all_devinfo {
+ int num; /* number of instances */
+ struct fiomp_devinfo *devinfo; /* device informations */
+};
+
+/* keep 0x5 - 0xf for reserve */
+
+/*
+ * ioctl for instance management nodes
+ */
+/* get max number of paths */
+#define FIOMPMAXPATHNUM (FIOMPC|0x10)
+
+/* set the device's property */
+#define FIOMPSETPROP (FIOMPC|0x11)
+
+/* get the device's property */
+#define FIOMPGETPROP (FIOMPC|0x12)
+struct fiompprop {
+ char *iomp_name; /* instance name */
+ char *iomp_real_name;
+ /* instance management node (/devices) */
+ char *iomp_user_path; /* instance management node (/dev) */
+ char *iomp_status; /* status of the instance */
+ int num; /* number of paths */
+ char **iomp_path; /* target device nodes (/devices) */
+ char **iomp_logical_path; /* target device nodes (/dev) */
+ char **iomp_path_status; /* status of target devices */
+ char **iomp_path_block; /* access block */
+};
+
+/* destroy the instance */
+#define FIOMPDESTROY (FIOMPC|0x13)
+
+/* stop the path */
+#define FIOMPSTOP (FIOMPC|0x14)
+
+/* start the path */
+#define FIOMPSTART (FIOMPC|0x15)
+
+/* list all paths */
+#define FIOMPLIST (FIOMPC|0x16)
+
+/* get the path status */
+#define FIOMPSTATUS (FIOMPC|0x17)
+struct fiompstatus {
+ int pathnum; /* path number */
+ int status; /* FIOMP_STAT_xxxx */
+ char *message; /* some messages */
+ int block_status; /* access block status */
+ void *ext; /* reservesd (= NULL) */
+};
+
+/* status */
+#define FIOMP_STAT_ACTIVE PATH_STAT_ACTIVE
+#define FIOMP_STAT_STANDBY PATH_STAT_STANDBY
+#define FIOMP_STAT_STOP PATH_STAT_STOP
+#define FIOMP_STAT_FAIL PATH_STAT_FAIL
+#define FIOMP_STAT_DISCON PATH_STAT_DISCON
+#define FIOMP_STAT_ENCAP PATH_STAT_ENCAP
+#define FIOMP_STAT_EMPTY PATH_STAT_EMPTY
+
+/* access block status */
+#define FIOMP_BSTAT_BLOCK 1
+#define FIOMP_BSTAT_UNBLOCK 0
+
+/* add, delete */
+#define FIOMPADD (FIOMPC|0x18)
+#define FIOMPDEL (FIOMPC|0x19)
+struct fiomppath {
+ int num; /* number of paths */
+ char **devs; /* device names */
+};
+
+/* active, stabdby */
+#define FIOMPACTIVE (FIOMPC|0x1a)
+#define FIOMPSTANDBY (FIOMPC|0x1b)
+
+/* block, unblock */
+#define FIOMPBLOCK (FIOMPC|0x1c)
+#define FIOMPUNBLOCK (FIOMPC|0x1d)
+
+/* diagnostic mode ON,OFF */
+#define FIOMPDIAGON (FIOMPC|0x1e)
+#define FIOMPDIAGOFF (FIOMPC|0x1f)
+struct fiomp_diag_mode {
+ int pathnum; /* path for diagnostic */
+ int level; /* = 0 */
+};
+
+/* get all status */
+#define FIOMPGETALLSTAT (FIOMPC|0x20)
+struct fiomp_all_stat {
+ int num; /* number of paths */
+ struct fiompstatus *status; /* path status */
+};
+
+/* change the status of paths */
+#define FIOMPCHG (FIOMPC|0x21)
+struct fiompchg {
+ int num; /* number of all paths */
+ struct fiompstatus *set_status; /* setting values */
+ struct fiompstatus *pre_status; /* previous values */
+ struct fiompstatus *status; /* current values */
+};
+
+/* recover the failed path */
+#define FIOMPRECOVER (FIOMPC|0x22)
+
+/* disconnect/reconnect the path */
+#define FIOMPDISCONNECT (FIOMPC|0x23)
+#define FIOMPCONNECT (FIOMPC|0x24)
+
+/* keep 0x25 - 0x2f for reserve */
+
+/*
+ * Common ioctl
+ */
+/* get event */
+#define FIOMPGETEVENT (FIOMPC|0x30)
+struct fiompevent {
+ int event; /* event type = FIOMP_EVT_xx */
+ int num; /* instance number(meta management) or */
+ /* number of all path(instance management) */
+ struct fiompstatus *pre_status; /* previous status */
+ struct fiompstatus *status; /* current status */
+};
+
+/* event type */
+#define FIOMP_EVT_NONE 0x0
+#define FIOMP_EVT_NEW 0x1
+#define FIOMP_EVT_DESTROY 0x2
+#define FIOMP_EVT_STAT 0x101
+#define FIOMP_EVT_PATHS 0x102
+
+/*
+ * Device property
+ */
+#define FIOMP_PROP_NAME "iomp-name"
+#define FIOMP_PROP_REAL_NAME "iomp-real-name"
+#define FIOMP_PROP_PATH_N "iomp-path-"
+#define FIOMP_PROP_USER_PATH "iomp-user-path"
+#define FIOMP_PROP_LOGIC_PATH_N "iomp-logical-path-"
+#define FIOMP_PROP_STATUS "iomp-status"
+#define FIOMP_PROP_PATH_NUM "iomp-path-num"
+#define FIOMP_PROP_STATUS_N "iomp-path-status-"
+
+#define FIOMP_PROP_BLOCK_N "iomp-path-block-"
+#define FIOMP_PROP_BLOCK_DEFAULT "iomp-path-block-default"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FIOMP_H */
diff --git a/usr/src/uts/sun4u/opl/sys/mc-opl.h b/usr/src/uts/sun4u/opl/sys/mc-opl.h
new file mode 100644
index 0000000000..b3aa57b2a9
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/mc-opl.h
@@ -0,0 +1,342 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_MC_OPL_H
+#define _SYS_MC_OPL_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/note.h>
+
+#ifdef DEBUG
+#define MC_LOG if (oplmc_debug) printf
+extern int oplmc_debug;
+#else
+#define MC_LOG _NOTE(CONSTANTCONDITION) if (0) printf
+#endif
+
+/*
+ * load/store MAC register
+ */
+extern uint32_t mc_ldphysio(uint64_t);
+extern void mc_stphysio(uint64_t, uint32_t);
+#define LD_MAC_REG(paddr) mc_ldphysio(paddr)
+#define ST_MAC_REG(paddr, data) mc_stphysio((paddr), (data))
+
+#define BANKNUM_PER_SB 8
+
+typedef struct scf_log {
+ struct scf_log *sl_next;
+ int sl_bank;
+ uint32_t sl_err_add;
+ uint32_t sl_err_log;
+} scf_log_t;
+
+typedef struct mc_opl_state {
+ struct mc_opl_state *next;
+ dev_info_t *mc_dip;
+ uint32_t mc_status;
+#define MC_POLL_RUNNING 0x1
+#define MC_SOFT_SUSPENDED 0x2 /* suspended by DR */
+#define MC_DRIVER_SUSPENDED 0x4 /* DDI_SUSPEND */
+ uint32_t mc_board_num; /* board# */
+ uint64_t mc_start_address; /* sb-mem-ranges */
+ uint64_t mc_size;
+ struct mc_bank {
+ uint32_t mcb_status;
+#define BANK_INSTALLED 0x80000000
+#define BANK_MIRROR_MODE 0x40000000 /* 0: normal 1: mirror */
+#define BANK_PTRL_RUNNING 0x00000001
+ uint64_t mcb_reg_base;
+ uint32_t mcb_ptrl_cntl;
+ } mc_bank[BANKNUM_PER_SB];
+ uchar_t mc_trans_table[2][64]; /* csX-mac-pa-trans-table */
+ clock_t mc_interval_hz;
+ timeout_id_t mc_tid;
+ kmutex_t mc_lock;
+ scf_log_t *mc_scf_log;
+ scf_log_t *mc_scf_log_tail;
+ int mc_scf_total;
+#define MAX_SCF_LOGS 64
+ struct mc_inst_list *mc_list;
+ struct memlist *mlist;
+ int mc_scf_retry[BANKNUM_PER_SB];
+ int mc_last_error;
+#define MAX_SCF_RETRY 10
+ uint64_t mc_period; /* number of times memory scanned */
+} mc_opl_t;
+
+typedef struct mc_inst_list {
+ struct mc_inst_list *next;
+ mc_opl_t *mc_opl;
+ uint32_t mc_board_num;
+ uint64_t mc_start_address;
+ uint64_t mc_size;
+} mc_inst_list_t;
+
+#define IS_MIRROR(mcp, bn) ((mcp)->mc_bank[bn].mcb_status\
+ & BANK_MIRROR_MODE)
+typedef struct mc_addr {
+ int ma_bd; /* board number */
+ int ma_bank; /* bank number */
+ uint32_t ma_dimm_addr; /* DIMM address (same format as ERR_ADD) */
+} mc_addr_t;
+
+typedef struct mc_addr_info {
+ struct mc_addr mi_maddr;
+ int mi_valid;
+ int mi_advance;
+} mc_addr_info_t;
+
+typedef struct mc_flt_stat {
+ uint32_t mf_type; /* fault type */
+#define FLT_TYPE_CMPE 0x0001
+#define FLT_TYPE_UE 0x0002
+#define FLT_TYPE_PERMANENT_CE 0x0003
+#define FLT_TYPE_INTERMITTENT_CE 0x0004
+#define FLT_TYPE_SUE 0x0005
+#define FLT_TYPE_MUE 0x0006
+ uint32_t mf_cntl; /* MAC_BANKm_PTRL_CNTL Register */
+ uint32_t mf_err_add; /* MAC_BANKm_{PTRL|MI}_ERR_ADD Register */
+ uint32_t mf_err_log; /* MAC_BANKm_{PTRL|MI}_ERR_LOG Register */
+ uint32_t mf_synd;
+ uchar_t mf_errlog_valid;
+ uchar_t mf_dimm_slot;
+ uchar_t mf_dram_place;
+ uint64_t mf_flt_paddr; /* faulty physical address */
+ mc_addr_t mf_flt_maddr; /* faulty DIMM address */
+} mc_flt_stat_t;
+
+typedef struct mc_aflt {
+ uint64_t mflt_id; /* gethrtime() at time of fault */
+ mc_opl_t *mflt_mcp; /* mc-opl structure */
+ char *mflt_erpt_class; /* ereport class name */
+ int mflt_is_ptrl; /* detected by PTRL or MI */
+ int mflt_nflts; /* 1 or 2 */
+ int mflt_pr; /* page retire flags */
+ mc_flt_stat_t *mflt_stat[2]; /* fault status */
+} mc_aflt_t;
+
+#define MAC_PTRL_STAT(mcp, i) (mcp->mc_bank[i].mcb_reg_base)
+#define MAC_PTRL_CNTL(mcp, i) (mcp->mc_bank[i].mcb_reg_base + 0x10)
+#define MAC_PTRL_ERR_ADD(mcp, i) (mcp->mc_bank[i].mcb_reg_base + 0x20)
+#define MAC_PTRL_ERR_LOG(mcp, i) (mcp->mc_bank[i].mcb_reg_base + 0x24)
+#define MAC_MI_ERR_ADD(mcp, i) (mcp->mc_bank[i].mcb_reg_base + 0x28)
+#define MAC_MI_ERR_LOG(mcp, i) (mcp->mc_bank[i].mcb_reg_base + 0x2c)
+#define MAC_STATIC_ERR_ADD(mcp, i) (mcp->mc_bank[i].mcb_reg_base + 0x30)
+#define MAC_STATIC_ERR_LOG(mcp, i) (mcp->mc_bank[i].mcb_reg_base + 0x34)
+#define MAC_RESTART_ADD(mcp, i) (mcp->mc_bank[i].mcb_reg_base + 0x40)
+#define MAC_REWRITE_ADD(mcp, i) (mcp->mc_bank[i].mcb_reg_base + 0x44)
+#define MAC_EG_ADD(mcp, i) (mcp->mc_bank[i].mcb_reg_base + 0x48)
+#define MAC_EG_CNTL(mcp, i) (mcp->mc_bank[i].mcb_reg_base + 0x4c)
+#define MAC_MIRR(mcp, i) (mcp->mc_bank[i].mcb_reg_base + 0x50)
+
+/* use PA[37:6] */
+#define MAC_RESTART_PA(pa) ((pa >> 6) & 0xffffffff)
+/*
+ * MAC_BANKm_PTRL_STAT_Register
+ */
+#define MAC_STAT_PTRL_CE 0x00000020
+#define MAC_STAT_PTRL_UE 0x00000010
+#define MAC_STAT_PTRL_CMPE 0x00000008
+#define MAC_STAT_MI_CE 0x00000004
+#define MAC_STAT_MI_UE 0x00000002
+#define MAC_STAT_MI_CMPE 0x00000001
+
+#define MAC_STAT_PTRL_ERRS (MAC_STAT_PTRL_CE|MAC_STAT_PTRL_UE\
+ |MAC_STAT_PTRL_CMPE)
+#define MAC_STAT_MI_ERRS (MAC_STAT_MI_CE|MAC_STAT_MI_UE\
+ |MAC_STAT_MI_CMPE)
+
+/*
+ * MAC_BANKm_PTRL_CTRL_Register
+ */
+#define MAC_CNTL_PTRL_START 0x80000000
+#define MAC_CNTL_USE_RESTART_ADD 0x40000000
+#define MAC_CNTL_PTRL_STOP 0x20000000
+#define MAC_CNTL_PTRL_INTERVAL 0x1c000000
+#define MAC_CNTL_PTRL_RESET 0x02000000
+#define MAC_CNTL_PTRL_STATUS 0x01000000
+#define MAC_CNTL_REW_REQ 0x00800000
+#define MAC_CNTL_REW_RESET 0x00400000
+#define MAC_CNTL_CS0_DEG_MODE 0x00200000
+#define MAC_CNTL_PTRL_CE 0x00008000
+#define MAC_CNTL_PTRL_UE 0x00004000
+#define MAC_CNTL_PTRL_CMPE 0x00002000
+#define MAC_CNTL_MI_CE 0x00001000
+#define MAC_CNTL_MI_UE 0x00000800
+#define MAC_CNTL_MI_CMPE 0x00000400
+#define MAC_CNTL_REW_CE 0x00000200
+#define MAC_CNTL_REW_UE 0x00000100
+#define MAC_CNTL_REW_END 0x00000080
+#define MAC_CNTL_PTRL_ADD_MAX 0x00000040
+#define MAC_CNTL_REW_CMPE 0x00000020
+
+#define MAC_CNTL_PTRL_PRESERVE_BITS (MAC_CNTL_PTRL_INTERVAL)
+
+#define MAC_CNTL_PTRL_ERRS (MAC_CNTL_PTRL_CE|MAC_CNTL_PTRL_UE\
+ |MAC_CNTL_PTRL_CMPE)
+#define MAC_CNTL_MI_ERRS (MAC_CNTL_MI_CE|MAC_CNTL_MI_UE\
+ |MAC_CNTL_MI_CMPE)
+#define MAC_CNTL_REW_ERRS (MAC_CNTL_REW_CE|MAC_CNTL_REW_CMPE|\
+ MAC_CNTL_REW_UE|MAC_CNTL_REW_END)
+#define MAC_CNTL_ALL_ERRS (MAC_CNTL_PTRL_ERRS|\
+ MAC_CNTL_MI_ERRS|MAC_CNTL_REW_ERRS)
+
+#define MAC_ERRLOG_SYND_SHIFT 16
+#define MAC_ERRLOG_SYND_MASK 0xffff
+#define MAC_ERRLOG_DIMMSLOT_SHIFT 13
+#define MAC_ERRLOG_DIMMSLOT_MASK 0x7
+#define MAC_ERRLOG_DRAM_PLACE_SHIFT 8
+#define MAC_ERRLOG_DRAM_PLACE_MASK 0x1f
+
+#define MAC_SET_ERRLOG_INFO(flt_stat) \
+ (flt_stat)->mf_errlog_valid = 1; \
+ (flt_stat)->mf_synd = ((flt_stat)->mf_err_log >> \
+ MAC_ERRLOG_SYND_SHIFT) & \
+ MAC_ERRLOG_SYND_MASK; \
+ (flt_stat)->mf_dimm_slot = ((flt_stat)->mf_err_log >> \
+ MAC_ERRLOG_DIMMSLOT_SHIFT) & \
+ MAC_ERRLOG_DIMMSLOT_MASK; \
+ (flt_stat)->mf_dram_place = ((flt_stat)->mf_err_log >> \
+ MAC_ERRLOG_DRAM_PLACE_SHIFT) & \
+ MAC_ERRLOG_DRAM_PLACE_MASK;
+
+extern void mc_write_cntl(mc_opl_t *, int, uint32_t);
+#define MAC_CMD(mcp, i, cmd) mc_write_cntl(mcp, i, cmd)
+
+#define MAC_PTRL_START_ADD(mcp, i) MAC_CMD((mcp), (i),\
+ MAC_CNTL_PTRL_START|MAC_CNTL_USE_RESTART_ADD)
+#define MAC_PTRL_START(mcp, i) MAC_CMD((mcp), (i), MAC_CNTL_PTRL_START)
+#define MAC_PTRL_STOP(mcp, i) MAC_CMD((mcp), (i), MAC_CNTL_PTRL_STOP)
+#define MAC_PTRL_RESET(mcp, i) MAC_CMD((mcp), (i), MAC_CNTL_PTRL_RESET)
+#define MAC_REW_REQ(mcp, i) MAC_CMD((mcp), (i), MAC_CNTL_REW_REQ)
+#define MAC_REW_RESET(mcp, i) MAC_CMD((mcp), (i), MAC_CNTL_REW_RESET)
+#define MAC_CLEAR_ERRS(mcp, i, errs) MAC_CMD((mcp), (i), errs)
+#define MAC_CLEAR_ALL_ERRS(mcp, i) MAC_CMD((mcp), (i),\
+ MAC_CNTL_ALL_ERRS)
+#define MAC_CLEAR_MAX(mcp, i) \
+ MAC_CMD((mcp), (i), MAC_CNTL_PTRL_ADD_MAX)
+
+
+/*
+ * MAC_BANKm_PTRL/MI_ERR_ADD/LOG_Register
+ */
+#define MAC_ERR_ADD_INVALID 0x80000000
+#define MAC_ERR_LOG_INVALID 0x00000080
+
+/*
+ * MAC_BANKm_STATIC_ERR_ADD_Register
+ */
+#define MAC_STATIC_ERR_VLD 0x80000000
+
+/*
+ * MAC_BANKm_MIRR_Register
+ */
+#define MAC_MIRR_MIRROR_MODE 0x80000000
+#define MAC_MIRR_BANK_EXCLUSIVE 0x40000000
+
+#define OPL_BOARD_MAX 16
+#define OPL_BANK_MAX 8
+
+/*
+ * MAC_BANKm_EG_ADD_Register
+ */
+#define MAC_EG_ADD_MASK 0x7ffffffc
+/*
+ * To set the EG_CNTL register, bit[26-25] and
+ * bit[21-20] must be cleared. Then the other
+ * control bit should be set. Then the bit[26-25]
+ * and bit[21-20] should be set while other bits
+ * should be the same as before.
+ */
+#define MAC_EG_CNTL_MASK 0x06300000
+
+#define MAC_EG_ADD_FIX 0x80000000
+#define MAC_EG_FORCE_DERR00 0x40000000
+#define MAC_EG_FORCE_DERR16 0x20000000
+#define MAC_EG_FORCE_DERR64 0x10000000
+#define MAC_EG_FORCE_DERR80 0x08000000
+#define MAC_EG_DERR_ALWAYS 0x02000000
+#define MAC_EG_DERR_ONCE 0x04000000
+#define MAC_EG_DERR_NOP 0x06000000
+#define MAC_EG_FORCE_READ00 0x00800000
+#define MAC_EG_FORCE_READ16 0x00400000
+#define MAC_EG_RDERR_ALWAYS 0x00100000
+#define MAC_EG_RDERR_ONCE 0x00200000
+#define MAC_EG_RDERR_NOP 0x00300000
+
+#define MAC_EG_SETUP_MASK 0xf9cfffff
+
+/* For MAC-PA translation */
+#define MC_ADDRESS_BITS 31
+#define PA_BITS_FOR_MAC 39
+#define INDEX_OF_BANK_SUPPLEMENT_BIT 39
+#define MP_NONE 128
+#define MP_BANK_0 129
+#define MP_BANK_1 130
+#define MP_BANK_2 131
+
+#define CS_SHIFT 29
+#define MC_TT_ENTRIES 64
+#define MC_TT_CS 2
+
+
+#define MAX_MC_LOOP_COUNT 100
+
+/* export interface for error injection */
+extern int mc_inject_error(int error_type, uint64_t pa, uint32_t flags);
+
+#define MC_INJECT_NOP 0x0
+#define MC_INJECT_INTERMITTENT_CE 0x1
+#define MC_INJECT_PERMANENT_CE 0x2
+#define MC_INJECT_UE 0x3
+#define MC_INJECT_INTERMITTENT_MCE 0x11
+#define MC_INJECT_PERMANENT_MCE 0x12
+#define MC_INJECT_SUE 0x13
+#define MC_INJECT_MUE 0x14
+#define MC_INJECT_CMPE 0x15
+
+#define MC_INJECT_MIRROR_MODE 0x10
+#define MC_INJECT_MIRROR(x) (x & MC_INJECT_MIRROR_MODE)
+
+#define MC_INJECT_FLAG_NO_TRAP 0x1
+#define MC_INJECT_FLAG_RESTART 0x2
+#define MC_INJECT_FLAG_POLL 0x4
+#define MC_INJECT_FLAG_RESET 0x8
+#define MC_INJECT_FLAG_OTHER 0x10
+#define MC_INJECT_FLAG_LD 0x20
+#define MC_INJECT_FLAG_ST 0x40
+#define MC_INJECT_FLAG_PATH 0x80
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MC_OPL_H */
diff --git a/usr/src/uts/sun4u/opl/sys/opl_hwdesc.h b/usr/src/uts/sun4u/opl/sys/opl_hwdesc.h
new file mode 100644
index 0000000000..19fffe91df
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/opl_hwdesc.h
@@ -0,0 +1,511 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_OPL_HWDESC_H
+#define _SYS_OPL_HWDESC_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Hardware Descriptor.
+ */
+
+#define HWD_SBS_PER_DOMAIN 32 /* System boards per domain */
+#define HWD_CPUS_PER_CORE 4 /* Strands per physical core */
+#define HWD_CORES_PER_CPU_CHIP 4 /* Cores per processor chip */
+#define HWD_CPU_CHIPS_PER_CMU 4 /* Processor chips per CMU */
+#define HWD_SCS_PER_CMU 4 /* System controllers per CMU */
+#define HWD_DIMMS_PER_CMU 32 /* Memory DIMMs per CMU */
+#define HWD_IOCS_PER_IOU 2 /* Oberon chips per I/O unit */
+#define HWD_PCI_CHANNELS_PER_IOC 2 /* PCI channels per Oberon chip */
+#define HWD_LEAVES_PER_PCI_CHANNEL 2 /* Leaves per PCI channel */
+#define HWD_PCI_CHANNELS_PER_SB 4 /* PCI channels per system board */
+#define HWD_CMU_CHANNEL 4 /* CMU channel number */
+#define HWD_IO_BOATS_PER_IOU 6 /* I/O boats per I/O unit */
+#define HWD_BANKS_PER_CMU 8 /* Memory banks per CMU */
+#define HWD_MAX_MEM_CHUNKS 8 /* Chunks per board */
+
+typedef uint32_t hwd_stat_t; /* component status */
+
+/*
+ * Values for hwd_stat_t.
+ */
+#define HWD_STAT_UNKNOWN 0x0000 /* No status yet */
+#define HWD_STAT_PRESENT 0x0001 /* Present */
+#define HWD_STAT_MISS 0x0002 /* Missing */
+#define HWD_STAT_MISCONFIG 0x0003 /* Misconfigured */
+#define HWD_STAT_PASS 0x0004 /* Ok */
+#define HWD_STAT_FAIL 0x0080 /* Failed by XSCF */
+#define HWD_STAT_FAIL_OBP 0x0081 /* Failed by POST/OBP */
+#define HWD_STAT_FAIL_OS 0x0082 /* Failed by OS */
+
+#define HWD_STAT_FAILED 0x0080
+
+#define HWD_MASK_NOT_USED 0x8000 /* If this bit is set, the component */
+ /* is not used (even if it presents) */
+
+#define HWD_STATUS_FAILED(stat) ((stat) & HWD_STAT_FAILED)
+#define HWD_STATUS_OK(stat) ((stat) == HWD_STAT_PASS)
+#define HWD_STATUS_PRESENT(stat) \
+ ((stat) & (HWD_STAT_PRESENT | HWD_STAT_PASS))
+#define HWD_STATUS_NONE(stat) \
+ (((stat) == HWD_STAT_UNKNOWN) || ((stat) == HWD_STAT_MISS))
+
+#define HWD_VERSION_MAJOR 1
+#define HWD_VERSION_MINOR 1
+
+/*
+ * Hardware Descriptor Header.
+ *
+ * Some fields occur repeatedly in different structures:
+ *
+ * spare* This field is for future use.
+ *
+ * filler* This field is used to show alignment. This could also
+ * be used in the future for something.
+ *
+ * check_sum This contains the check sum of the structure it resides in.
+ */
+typedef struct {
+ uint32_t hdr_magic; /* magic code ('HWDE') */
+ struct hwdesc_version {
+ uint16_t major;
+ uint16_t minor;
+ } hdr_version; /* structure version */
+
+ /*
+ * Domain Identifier. The OPL system can have
+ * upto 24 domains so domain id can be 0 - 23.
+ */
+ uint8_t hdr_domain_id;
+ char hdr_filler[3];
+
+ /*
+ * offsets from the beginning of the header to:
+ * - SB status information (hwd_sb_status_t)
+ * - domain information (hwd_domain_info_t)
+ * - SB information (hwd_sb_info_t).
+ */
+ uint32_t hdr_sb_status_offset;
+ uint32_t hdr_domain_info_offset;
+ uint32_t hdr_sb_info_offset;
+
+ uint32_t hdr_spare[9];
+ uint32_t hdr_check_sum;
+} hwd_header_t;
+
+/*
+ * SB Status
+ */
+typedef struct {
+ hwd_stat_t sb_status[HWD_SBS_PER_DOMAIN]; /* status of all LSBs */
+ uint32_t sb_spare[15];
+ uint32_t sb_check_sum;
+} hwd_sb_status_t;
+
+/*
+ * SP -> Domain Information.
+ */
+typedef struct {
+ uint32_t dinf_reset_factor; /* domain reset reason */
+ uint32_t dinf_host_id; /* domain unique id */
+ uint64_t dinf_system_frequency; /* Hz */
+ uint64_t dinf_stick_frequency; /* Hz */
+ uint32_t dinf_scf_command_timeout; /* SCF i/f timeout seconds */
+ uint32_t dinf_model_info; /* FF1/2 DC1/2/3 */
+ uint8_t dinf_mac_address[6]; /* system MAC address */
+ uint8_t dinf_filler1[10];
+ uint8_t dinf_dr_status; /* 0: DR capable, !0: no DR */
+ uint8_t dinf_filler2[7];
+ /*
+ * Specification of degeneracy operation of POST by XSCF
+ * 0x00: off
+ * 0x20: component
+ * 0x40: board
+ * 0x80: system
+ */
+ uint8_t dinf_config_policy;
+ /*
+ * Specification of diagnosis operation of POST by XSCF
+ * 0x00: off
+ * 0x20: min
+ * 0x40: max
+ */
+ uint8_t dinf_diag_level;
+ /*
+ * Specification of boot operation of OBP by XSCF
+ * 0x00: It follows other settings.
+ * 0x80: Auto boot is not done.
+ */
+ uint8_t dinf_boot_mode;
+ uint8_t dinf_spare1[5];
+ int64_t dinf_cpu_start_time; /* seconds since the Epoch */
+ char dinf_banner_name[64]; /* system banner string */
+ char dinf_platform_token[64]; /* platform name */
+ uint32_t dinf_floating_board_bitmap; /* bit 0 = SB0 ... */
+ uint32_t dinf_spare2[12];
+ uint32_t dinf_check_sum;
+} hwd_domain_info_t;
+
+/*
+ * CPU Strand
+ */
+typedef struct {
+ hwd_stat_t cpu_status;
+ char cpu_component_name[32];
+ uint16_t cpu_cpuid; /* 0x0000, 0x0001, ... 0x01ff */
+ uint16_t cpu_filler;
+ uint32_t cpu_spare[6];
+} hwd_cpu_t;
+
+/*
+ * CPU Core
+ */
+typedef struct {
+ hwd_stat_t core_status;
+ char core_component_name[32];
+ uint32_t core_filler1;
+ uint64_t core_frequency; /* Hz */
+ uint64_t core_config; /* bus config reg */
+ uint64_t core_version; /* processor VER */
+ uint16_t core_manufacturer; /* VER.manuf */
+ uint16_t core_implementation; /* VER.impl */
+ uint8_t core_mask; /* VER.mask */
+ uint8_t core_filler2[3];
+ uint32_t core_l1_icache_size;
+ uint16_t core_l1_icache_line_size;
+ uint16_t core_l1_icache_associativity;
+ uint32_t core_num_itlb_entries;
+ uint32_t core_l1_dcache_size;
+ uint16_t core_l1_dcache_line_size;
+ uint16_t core_l1_dcache_associativity;
+ uint32_t core_num_dtlb_entries;
+ uint32_t core_spare1[4];
+ uint32_t core_l2_cache_size;
+ uint16_t core_l2_cache_line_size;
+ uint16_t core_l2_cache_associativity;
+ uint32_t core_l2_cache_sharing; /* bit N:coreN */
+ uint32_t core_spare2[5];
+ hwd_cpu_t core_cpus[HWD_CPUS_PER_CORE];
+ uint32_t core_spare3[4];
+} hwd_core_t;
+
+/*
+ * CPU Chip
+ */
+typedef struct {
+ hwd_stat_t chip_status;
+ char chip_component_name[32]; /* example: "CPU#x" */
+ char chip_fru_name[32]; /* example: "CPU#x" */
+ char chip_compatible[32]; /* example: "FJSV,SPARC64-VI" */
+ /*
+ * Jupiter Bus Device ID
+ * 0x0400, 0x0408, ... , 0x05f8
+ */
+ uint16_t chip_portid;
+ uint16_t chip_filler;
+ uint32_t chip_spare1[6];
+ hwd_core_t chip_cores[HWD_CORES_PER_CPU_CHIP];
+ uint32_t chip_spare2[4];
+} hwd_cpu_chip_t;
+
+/*
+ * SC
+ */
+typedef struct {
+ hwd_stat_t sc_status;
+ uint32_t sc_filler;
+ /*
+ * Top address of SC registers in this XSB
+ */
+ uint64_t sc_register_address;
+} hwd_sc_t;
+
+/*
+ * Bank
+ */
+typedef struct {
+ hwd_stat_t bank_status;
+ hwd_stat_t bank_cs_status[2]; /* DIMM pair status */
+ uint32_t bank_filler1;
+ uint64_t bank_register_address; /* address of mem patrol regs */
+ uint8_t bank_mac_ocd; /* calibrated MAC OCD value */
+ uint8_t bank_filler2[3];
+ uint8_t bank_dimm_ocd[4][2]; /* calibrated DIMM OCD value */
+ uint32_t bank_tune; /* for POST use */
+ uint32_t bank_spare[2];
+} hwd_bank_t;
+
+/*
+ * Chunk
+ */
+typedef struct {
+ uint64_t chnk_start_address;
+ uint64_t chnk_size;
+} hwd_chunk_t;
+
+/*
+ * Dimm
+ */
+typedef struct {
+ hwd_stat_t dimm_status;
+ uint32_t dimm_filler1;
+ uint64_t dimm_capacity; /* bytes */
+ uint64_t dimm_available_capacity; /* bytes */
+ uint8_t dimm_rank; /* 1 or 2 */
+ uint8_t dimm_filler2[7];
+ char dimm_component_name[32]; /* "MEM#xyz" */
+ char dimm_fru_name[32]; /* "MEM#xyz" */
+} hwd_dimm_t;
+
+/*
+ * CS
+ */
+typedef struct {
+ hwd_stat_t cs_status;
+ uint8_t cs_number_of_dimms;
+ uint8_t cs_filler[3];
+ uint64_t cs_available_capacity;
+ uint64_t cs_dimm_capacity;
+ uint8_t cs_dimm_badd[8]; /* Value to initialize MAC by POST */
+ uint16_t cs_dimm_add[8]; /* Value to initialize MAC by POST */
+ uint8_t cs_pa_mac_table[64]; /* PA <-> MAC address conversion */
+} hwd_cs_t;
+
+/*
+ * Memory
+ */
+typedef struct {
+ uint64_t mem_start_address; /* Memory start for this LSB */
+ uint64_t mem_size; /* Memory size for this LSB */
+ hwd_bank_t mem_banks[HWD_BANKS_PER_CMU];
+ /*
+ * Mirroring mode:
+ * 0x00 or 0x01
+ * 0x00 : not 'memory mirror mode'
+ * 0x01 : 'memory mirror mode'
+ */
+ uint8_t mem_mirror_mode; /* mirroring mode */
+ /*
+ * Memory configuration:
+ * 0x01 : 1 divided mode
+ * 0x02 : 2 divided mode
+ * 0x04 : 4 divided mode
+ *
+ * It is always set to 0x04 at the XSB mode.
+ */
+ uint8_t mem_division_mode;
+ uint8_t mem_piece_number; /* 0-3 memory slot group used */
+ uint8_t mem_cs_interleave; /* 1:cs interleave, 0:not */
+ uint32_t mem_filler[3];
+ uint8_t mem_available_bitmap[512]; /* for POST use */
+ uint8_t mem_degrade_bitmap[16384]; /* for POST use */
+ hwd_chunk_t mem_chunks[HWD_MAX_MEM_CHUNKS];
+ hwd_dimm_t mem_dimms[HWD_DIMMS_PER_CMU];
+ hwd_cs_t mem_cs[2];
+} hwd_memory_t;
+
+typedef struct {
+ hwd_stat_t scf_status;
+ char scf_component_name[32]; /* "SCFI#z" */
+} hwd_scf_interface_t;
+
+typedef struct {
+ hwd_stat_t tty_status;
+ char tty_component_name[32]; /* "TTY#z" */
+} hwd_tty_t;
+
+typedef struct {
+ uint8_t fver_major; /* firmware major version */
+ uint8_t fver_minor; /* firmware minor version */
+ uint8_t fver_local; /* firmware local version */
+ uint8_t fver_filler;
+} hwd_fmem_version_t;
+
+typedef struct {
+ hwd_stat_t fmem_status; /* status of flash */
+ char fmem_component_name[32];
+ uint8_t fmem_used; /* non-zero: fmem is used */
+ uint8_t fmem_filler[3];
+ hwd_fmem_version_t fmem_version;
+ uint32_t fmem_spare;
+} hwd_fmem_t;
+
+/*
+ * CMU CH
+ */
+typedef struct {
+ hwd_stat_t chan_status;
+ /*
+ * CMU_CH port ID
+ * LSB0 is 0x0008, LSB1 is 0x0018, ... , LSB15 is 0x00f8
+ */
+ uint16_t chan_portid;
+ uint16_t chan_filler;
+ char chan_component_name[32]; /* "U2P#z" */
+ hwd_scf_interface_t chan_scf_interface;
+ hwd_tty_t chan_serial;
+ hwd_fmem_t chan_fmem[2];
+} hwd_cmu_chan_t;
+
+/*
+ * CMU
+ */
+typedef struct {
+ char cmu_component_name[32]; /* example: "CxS0y" */
+ char cmu_fru_name[32]; /* example: "Cabinet#x-CMU#y" */
+
+ hwd_cpu_chip_t cmu_cpu_chips[HWD_CPU_CHIPS_PER_CMU]; /* CPU */
+ hwd_sc_t cmu_scs[HWD_SCS_PER_CMU]; /* SC */
+ hwd_memory_t cmu_memory; /* Memory */
+ hwd_cmu_chan_t cmu_ch; /* CMU CH */
+ uint32_t cmu_spare[32];
+} hwd_cmu_t;
+
+typedef struct {
+ hwd_stat_t slot_status;
+ char slot_name[16];
+} hwd_slot_t;
+
+/*
+ * IO Boat
+ */
+typedef struct {
+ hwd_stat_t iob_status;
+ char iob_component_name[32];
+ char iob_fru_name[32];
+ /*
+ * IO_Boat type
+ * 0x01 : PCI-X Slot Type
+ * 0x02 : PCI Express Slot Type
+ */
+ uint32_t iob_type; /* PCI-X or PCI Express */
+ uint64_t iob_io_box_info; /* location of I/O */
+ /*
+ * Information of switch on IO_boat
+ * use only switch_status[0] when PCI-X type IO_boat
+ */
+ hwd_stat_t iob_switch_status[3]; /* PCIE switch statuses */
+ /*
+ * Information of bridge on IO_boat
+ * use only when PCI-X type IO_boat
+ */
+ hwd_stat_t iob_bridge_status[3]; /* PCIX bridge statuses */
+ hwd_slot_t iob_slot[6]; /* PCI slot names */
+ uint32_t iob_spare[8];
+} hwd_io_boat_t;
+
+/* IOU PCI Express Slot */
+typedef struct {
+ uint32_t iou_type; /* 0: empty, 1: card, 2: IO boat */
+ hwd_slot_t iou_slot;
+ hwd_io_boat_t iou_io_boat;
+} hwd_iou_slot_t;
+
+typedef struct {
+ hwd_stat_t ff_onb_switch_status;
+ uint8_t ff_onb_filler[64];
+ hwd_stat_t ff_onb_bridge_status;
+ hwd_stat_t ff_onb_sas_status;
+ hwd_stat_t ff_onb_gbe_status;
+ hwd_iou_slot_t ff_onb_slot;
+ hwd_slot_t ff_onb_xslot;
+} hwd_ff_onboard_t;
+
+typedef struct {
+ hwd_stat_t ioua_status; /* IOUA status */
+ char ioua_component_name[32];
+ char ioua_fru_name[32];
+ hwd_stat_t ioua_bridge_status;
+ hwd_stat_t ioua_sas_status;
+ hwd_stat_t ioua_gbe_status;
+} hwd_ioua_t;
+
+typedef struct {
+ uint8_t iou_desc_filler[80];
+ hwd_iou_slot_t iou_desc_slot;
+} hwd_iou_slot_desc_t;
+
+typedef struct {
+ hwd_stat_t leaf_status;
+ uint16_t leaf_port_id; /* portid (logical leaf id) */
+ uint8_t leaf_filler[6];
+ uint32_t leaf_slot_type; /* card or boat */
+ union {
+ hwd_ff_onboard_t leaf_ff_onboard;
+ hwd_ioua_t leaf_ioua;
+ hwd_iou_slot_desc_t leaf_iou_slot;
+ uint8_t leaf_spare[448];
+ } leaf_u;
+ uint64_t leaf_cfgio_offset; /* config space offset */
+ uint64_t leaf_cfgio_size; /* config space size */
+ uint64_t leaf_mem32_offset; /* offset of mem32 area */
+ uint64_t leaf_mem32_size; /* size of mem32 area */
+ uint64_t leaf_mem64_offset; /* offset of mem64 area */
+ uint64_t leaf_mem64_size; /* size of mem64 area */
+} hwd_leaf_t;
+
+/*
+ * PCI CH
+ */
+typedef struct {
+ hwd_stat_t pci_status; /* PCI CH status */
+ char pci_component_name[32];
+ char pci_fru_name[32];
+ uint8_t pci_filler[12];
+ hwd_leaf_t pci_leaf[HWD_LEAVES_PER_PCI_CHANNEL];
+} hwd_pci_ch_t;
+
+/*
+ * System Board
+ */
+typedef struct {
+ /*
+ * SB
+ */
+ hwd_stat_t sb_status;
+ uint8_t sb_mode; /* 0:PSB 1:XSB */
+ uint8_t sb_psb_number; /* PSB number for this LSB */
+ uint8_t sb_filler1[10];
+
+ hwd_cmu_t sb_cmu; /* CMU */
+
+ hwd_pci_ch_t sb_pci_ch[HWD_PCI_CHANNELS_PER_SB]; /* PCI CH */
+
+ uint32_t sb_spare[31];
+ uint32_t sb_check_sum;
+} hwd_sb_t;
+
+#define HWD_DATA_SIZE (36 * 1024) /* Size of HWD data from SCF */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_OPL_HWDESC_H */
diff --git a/usr/src/uts/sun4u/opl/sys/oplkm.h b/usr/src/uts/sun4u/opl/sys/oplkm.h
new file mode 100644
index 0000000000..30c613de5b
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/oplkm.h
@@ -0,0 +1,103 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_OPLKM_H
+#define _SYS_OPLKM_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * Device instance structure.
+ */
+typedef struct okms {
+ dev_info_t *km_dip; /* Devinfo pointer */
+ major_t km_major; /* Major number */
+ uint32_t km_inst; /* Device instance */
+ mkey_t km_key; /* Mailbox key */
+ target_id_t km_target; /* Target-id */
+
+ ddi_iblock_cookie_t km_ibcookie; /* Interrupt block cookie */
+ kmutex_t km_lock; /* Lock to protect this structure */
+ kcondvar_t km_wait; /* Cond. var to signal events */
+ uint32_t km_state; /* State of the device */
+ uint32_t km_maxsz; /* Max msg size */
+
+ uint32_t km_retries; /* Number of retries */
+ uint32_t km_clean; /* Cleanup flags */
+ mscat_gath_t km_sg_rcv; /* Scatter-gather for Rx */
+ mscat_gath_t km_sg_tx; /* Scatter-gather for Tx */
+
+ okm_req_hdr_t *km_reqp; /* Cached request */
+ int km_reqlen; /* Request length */
+} okms_t;
+
+/* km_state flags */
+#define OKM_MB_INITED 0x00000001 /* Mailbox initialized */
+#define OKM_MB_CONN 0x00000002 /* Mailbox in connected state */
+#define OKM_MB_DISC 0x00000004 /* Mailbox is disconnected */
+#define OKM_OPENED 0x00000008 /* Device opened */
+
+#define OKM_MBOX_READY(x) (((x)->km_state & OKM_MB_CONN) && \
+ !((x)->km_state & OKM_MB_DISC))
+
+/* km_clean flags */
+#define OKM_CLEAN_LOCK 0x00000001
+#define OKM_CLEAN_CV 0x00000002
+#define OKM_CLEAN_NODE 0x00000004
+
+#ifdef DEBUG
+/*
+ * Debug levels
+ */
+#define DBG_DRV 0x01 /* driver related traces */
+#define DBG_MBOX 0x02 /* Mailbox traces */
+#define DBG_MESG 0x04 /* Mailbox Message traces */
+#define DBG_WARN 0x10 /* warning type traces */
+
+static void okm_print_req(okm_req_hdr_t *reqp, uint32_t len);
+static void okm_print_rep(okm_rep_hdr_t *repp);
+
+#define DPRINTF(f, x) if (f & okm_debug) printf x
+#define DUMP_REQ(r, l) okm_print_req(r, l)
+#define DUMP_REPLY(r) okm_print_rep(r)
+
+#else /* DEBUG */
+
+#define DPRINTF(f, x)
+#define DUMP_REQ(r, l)
+#define DUMP_REPLY(r)
+
+#endif /* DEBUG */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_OPLKM_H */
diff --git a/usr/src/uts/sun4u/opl/sys/oplkm_msg.h b/usr/src/uts/sun4u/opl/sys/oplkm_msg.h
new file mode 100644
index 0000000000..2e49a63e0f
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/oplkm_msg.h
@@ -0,0 +1,91 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_OPLKM_MSG_H
+#define _SYS_OPLKM_MSG_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This header file describes the format of the mailbox messages
+ * exchanged between the OPL key management driver on a OPL Domain
+ * and the OPL Service Processor.
+ */
+
+
+/*
+ * Request message header.
+ */
+typedef struct okm_req_hdr {
+ uint32_t krq_version; /* Protocol version */
+ uint32_t krq_transid; /* Transaction ID */
+ uint32_t krq_cmd; /* Request */
+ uint32_t krq_reserved; /* Reserved */
+} okm_req_hdr_t;
+
+/*
+ * Reply message header.
+ */
+typedef struct okm_rep_hdr {
+ uint32_t krp_version; /* protocol version */
+ uint32_t krp_transid; /* Transaction ID */
+ uint32_t krp_status; /* Status */
+ uint32_t krp_sadb_errno; /* PF_KEY errno, if applicable */
+ uint32_t krp_sadb_version; /* PF_KEY version, if applicable */
+} okm_rep_hdr_t;
+
+/*
+ * Version of this current protocol.
+ */
+#define OKM_PROTOCOL_VERSION 1
+
+
+/*
+ * Message types.
+ */
+#define OKM_MSG_SADB 0x1 /* SADB message from SP */
+
+/*
+ * Values for sckm_msg_rep_hdr status field.
+ */
+#define OKM_SUCCESS 0x0 /* Operation succeeded */
+#define OKM_ERR_VERSION 0x1 /* Unexpected version */
+#define OKM_ERR_SADB_PFKEY 0x2 /* PF_KEY returned an error */
+#define OKM_ERR_SADB_MSG 0x3 /* bad SADB msg detect by driver */
+#define OKM_ERR_DAEMON 0x4 /* Error communicating with daemon */
+#define OKM_ERR_BAD_CMD 0x5 /* unknown command */
+#define OKM_ERR_SADB_VERSION 0x6 /* bad SADB version */
+#define OKM_ERR_SADB_TIMEOUT 0x7 /* no response from key engine */
+#define OKM_ERR_SADB_BAD_TYPE 0x8 /* bad SADB msg type */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_OPLKM_MSG_H */
diff --git a/usr/src/uts/sun4u/opl/sys/oplmsu/oplmsu.h b/usr/src/uts/sun4u/opl/sys/oplmsu/oplmsu.h
new file mode 100644
index 0000000000..f738979706
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/oplmsu/oplmsu.h
@@ -0,0 +1,358 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _OPLMSU_H
+#define _OPLMSU_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ack_flag */
+#define ACK_RES 0
+#define NAK_RES -1
+
+/* active_flag */
+#define ACTIVE_RES 0
+#define NOT_ACTIVE_RES -1
+
+/* undefined path number */
+#define UNDEFINED -1
+
+/* sleep and wakeup control flag */
+#define CV_WAKEUP 0
+#define CV_SLEEP 1
+
+/* write/read control flag */
+#define MSU_WRITE_SIDE 0
+#define MSU_READ_SIDE 1
+
+/* message priority */
+#define MSU_HIGH 1
+#define MSU_NORM 0
+
+/* miscellaneous */
+#define SUCCESS 0
+#define FAILURE -1
+#if !defined(BUSY) /* warning: macro redefined */
+#define BUSY -2
+#endif
+
+/* timeout interval */
+#define MSU_TM_500MS 500000 /* 500ms */
+
+/* XON/XOFF code */
+#define MSU_XON 0x11
+#define MSU_XOFF 0x13
+#define MSU_XON_4 (MSU_XON << 24|MSU_XON << 16|MSU_XON << 8|MSU_XON)
+#define MSU_XOFF_4 (MSU_XOFF << 24|MSU_XOFF << 16|MSU_XOFF << 8|MSU_XOFF)
+
+/* main path code used by MSU_CMD_START ioctl */
+#define MAINPATHCODE 0x1000
+#define MSU_OBP_CONSOLE -2
+
+/* maximum number of minor device number */
+#define MAXDEVMINOR 256
+
+/* node mask */
+#define USER_NODE_MASK 0x00000000 /* user control node */
+#define META_NODE_MASK 0x00010000 /* meta control node */
+
+/* node_flag */
+#define MSU_NODE_USER 0 /* user control node */
+#define MSU_NODE_META 1 /* meta control node */
+
+/* node_flag check macro */
+#define MSU_NODE_TYPE(dev) \
+ (((dev) & (META_NODE_MASK|USER_NODE_MASK)) >> 16)
+
+/* termio_flag */
+#define MSU_TIOS_TCSETS 1 /* TCSETS */
+#define MSU_TIOS_MSET 2 /* TIOCMSET */
+#define MSU_TIOS_PPS 3 /* TIOCSPPS */
+#define MSU_TIOS_WINSZP 4 /* TIOCSWINSZ */
+#define MSU_TIOS_SOFTCAR 5 /* TIOCSSOFTCAR */
+#define MSU_TIOS_END 6 /* termios end */
+
+/* binding name */
+#define MSU_CMUCH_FF "pci10cf,138f"
+#define MSU_CMUCH_DC "pci10cf,1390"
+#ifdef DEBUG
+#define MSU_CMUCH_DBG "pci108e,8000"
+#endif
+
+/* tty-port# properties */
+#define MSU_TTY_PORT_PROP "tty-port#"
+
+/* board# properties */
+#define MSU_BOARD_PROP "board#"
+
+/*
+ * oplmsu command code
+ */
+#define MSU_CMD ('f' << 8)
+#define MSU_CMD_STOP (MSU_CMD|0x14)
+#define MSU_CMD_START (MSU_CMD|0x15)
+#define MSU_CMD_ACTIVE (MSU_CMD|0x1a)
+
+#define MSU_PATH_ALL (-1) /* path all instruction */
+
+/*
+ * oplmsu path status for status member on upper path info table
+ */
+#define MSU_PSTAT_EMPTY 0
+#define MSU_PSTAT_ACTIVE 1
+#define MSU_PSTAT_STANDBY 2
+#define MSU_PSTAT_STOP 3
+#define MSU_PSTAT_FAIL 4
+#define MSU_PSTAT_DISCON 5
+#define MSU_PSTAT_ENCAP 6
+
+/*
+ * oplmsu additional status for traditional_status member on
+ * upper path info table
+ */
+#define MSU_UNLINK 0 /* initial state */
+#define MSU_EMPTY 1 /* MSU_STAT_EMPTY(00) state */
+#define MSU_LINK_NU 2 /* link state(no link ID, no upper path info) */
+#define MSU_SETID_NU 3 /* set ID state(link ID, no upper path info) */
+#define MSU_MAKE_INST 4 /* create instance node state */
+#define MSU_STOP 5 /* MSU_STAT_STOP(03) state */
+#define MSU_WSTR_ACK 6 /* wait ack/nak of MSU_CMD_START state */
+#define MSU_STANDBY 7 /* MSU_STAT_STANDBY(02) state */
+#define MSU_WTCS_ACK 8 /* wait ack/nak of TCSETS state */
+#define MSU_WTMS_ACK 9 /* wait ack/nak of TIOCMSET state */
+#define MSU_WPPS_ACK 10 /* wait ack/nak of TIOCSPPS state */
+#define MSU_WWSZ_ACK 11 /* wait ack/nak of TIOCSWINSZ state */
+#define MSU_WCAR_ACK 12 /* wait ack/nak of TIOCSSOFTCAR state */
+#define MSU_ACTIVE 13 /* MSU_STAT_ACTIVE(01) state */
+#define MSU_WSTP_ACK 14 /* wait ack/nak of MSU_CMD_STOP state */
+#define MSU_FAIL 15 /* MSU_STAT_FAIL(04) state */
+#define MSU_WCHK_ACK 16 /* wait ack/nak of OPLMSUSELFTEST */
+#define MSU_SETID 17 /* set ID state(link ID, upper path info) */
+#define MSU_DISCON 18 /* MSU_STAT_DISCON(05) state */
+#define MSU_LINK 19 /* link state(no link ID, upper path info) */
+#define MSU_WPTH_CHG 20 /* wait ack/nak of OPLMSUPATHCHG state */
+
+/*
+ * oplmsu instance status for inst_status member on
+ * upper instance info talbe
+ */
+#define INST_STAT_BUSY -1 /* busy */
+#define INST_STAT_ONLINE 10 /* online */
+#define INST_STAT_OFFLINE 11 /* offline */
+#define INST_STAT_UNCONFIGURED 12 /* unconfigured */
+
+/*
+ * oplmsu lower path Info table ext status for ext member on
+ * lower path info table
+ */
+#define MSU_EXT_NOTUSED -1 /* not used (default) */
+#define MSU_EXT_ACTIVE_CANDIDATE -2 /* active path candidate by */
+ /* MSU_CMD_START */
+#define MSU_EXT_VOID -3 /* void status */
+
+/* oplmsu/su pathname size */
+#define MSU_PATHNAME_SIZE 128
+
+/* control block(path parameter) */
+struct msu_path {
+ int num; /* total number of paths */
+ int reserved; /* reserved */
+};
+
+/* control block(device parameter) */
+struct msu_dev {
+ dev_info_t *dip; /* pointer to dev_info_t */
+};
+
+/* serial device control block */
+typedef struct serial_devcb {
+ dev_info_t *dip; /* pointer to dev_info_t */
+ int lsb; /* LSB number */
+} ser_devcb_t;
+
+/* serial device countrl block list */
+typedef struct serial_devlist {
+ struct serial_devlist *next;
+ dev_info_t *dip; /* pointer to dev_info_t */
+} ser_devl_t;
+
+/* upper path table */
+typedef struct upper_path_table {
+ struct upper_path_table *u_next;
+ struct upper_path_table *u_prev;
+ struct lower_path_table *lpath;
+ int path_no;
+ int reserved;
+ int status;
+ int prev_status;
+ ulong_t traditional_status;
+ ser_devcb_t ser_devcb;
+} upath_t;
+
+/* lower path table */
+typedef struct lower_path_table {
+ struct lower_path_table *l_next;
+ struct lower_path_table *l_prev;
+ mblk_t *first_lpri_hi;
+ mblk_t *last_lpri_hi;
+ mblk_t *hndl_mp;
+ queue_t *hndl_uqueue;
+ queue_t *lower_queue;
+ queue_t *uwq_queue;
+ struct upper_instance_table *uinst;
+ char *abt_char;
+ struct buf_tbl *rbuftbl;
+ bufcall_id_t rbuf_id;
+ timeout_id_t rtout_id;
+ upath_t *src_upath;
+ long status;
+ int path_no;
+ int link_id;
+ int uwq_flag;
+ int sw_flag;
+ kcondvar_t sw_cv;
+} lpath_t;
+
+/* control table */
+typedef struct control_table {
+ struct control_table *c_next;
+ struct control_table *c_prev;
+ mblk_t *first_upri_hi;
+ mblk_t *last_upri_hi;
+ queue_t *queue;
+ queue_t *lrq_queue;
+ queue_t *wait_queue;
+ minor_t minor;
+ int node_type;
+ struct buf_tbl *wbuftbl;
+ bufcall_id_t wbuf_id;
+ timeout_id_t wtout_id;
+ int lrq_flag;
+ int sleep_flag;
+ kcondvar_t cvp;
+} ctrl_t;
+
+#define MSU_MAX_ABTSLEN 24 /* maximum length for abort sequence */
+
+/* upper instance table */
+typedef struct upper_instance_table {
+ upath_t *first_upath;
+ upath_t *last_upath;
+ lpath_t *first_lpath;
+ lpath_t *last_lpath;
+ ctrl_t *meta_ctrl;
+ ctrl_t *user_ctrl;
+ queue_t *lower_queue;
+ dev_info_t *msu_dip;
+ int inst_status;
+ int path_num;
+ int reserved[2];
+ krwlock_t lock;
+ kmutex_t u_lock;
+ kmutex_t l_lock;
+ kmutex_t c_lock;
+ mblk_t *tcsets_p;
+ mblk_t *tiocmset_p;
+ mblk_t *tiocspps_p;
+ mblk_t *tiocswinsz_p;
+ mblk_t *tiocssoftcar_p;
+ char abts[MSU_MAX_ABTSLEN];
+} uinst_t;
+
+/* queue table for bufcall() and timeout() */
+struct buf_tbl {
+ queue_t *q;
+ int rw_flag;
+};
+
+
+/* rwlock macro */
+#define OPLMSU_RWLOCK_UPGRADE() { \
+ if (rw_tryupgrade(&oplmsu_uinst->lock) == 0) { \
+ rw_exit(&oplmsu_uinst->lock); \
+ rw_enter(&oplmsu_uinst->lock, RW_WRITER); \
+ } \
+}
+
+#ifdef DEBUG
+typedef struct tracedata {
+ queue_t *q;
+ mblk_t *mp;
+ char op[3];
+ uchar_t msg_type;
+ int pathno;
+ int msg_cmd;
+ ulong_t data;
+} msu_trc_t;
+
+#define MSU_TRC_USER ('u' << 24|'s' << 16|'e' << 8|'r')
+#define MSU_TRC_META ('m' << 24|'e' << 16|'t' << 8|'a')
+
+/* oplmsu_trace_on */
+#define MSU_TRACE_OFF 0
+#define MSU_TRACE_ON 1
+
+/* oplmsu_debug_mode */
+#define MSU_DPRINT_ON 1 /* enable print log */
+
+/* op type */
+#define MSU_TRC_UI 0 /* upper input */
+#define MSU_TRC_UO 1 /* upper output */
+#define MSU_TRC_LI 2 /* lower input */
+#define MSU_TRC_LO 3 /* lower output */
+#define MSU_TRC_OPN 4 /* open */
+#define MSU_TRC_CLS 5 /* close */
+
+/* trace macro */
+#define OPLMSU_TRACE(q, mp, op) { \
+ if (oplmsu_trace_on == MSU_TRACE_ON) { \
+ oplmsu_cmn_trace(q, mp, op); \
+ } \
+}
+
+/* debug print macro */
+#define DBG_PRINT(args) { \
+ if (oplmsu_debug_mode & MSU_DPRINT_ON) { \
+ cmn_err args; \
+ } \
+}
+
+#else /* ! DEBUG */
+
+/* trace macro */
+#define OPLMSU_TRACE(q, mp, op)
+
+/* debug print macro */
+#define DBG_PRINT(args)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _OPLMSU_H */
diff --git a/usr/src/uts/sun4u/opl/sys/oplmsu/oplmsu_proto.h b/usr/src/uts/sun4u/opl/sys/oplmsu/oplmsu_proto.h
new file mode 100644
index 0000000000..2481917f4e
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/oplmsu/oplmsu_proto.h
@@ -0,0 +1,166 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _OPLMSU_PROTO_H
+#define _OPLMSU_PROTO_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * PROTOTYPE DECLARATIONS
+ */
+
+int oplmsu_attach(dev_info_t *, ddi_attach_cmd_t);
+int oplmsu_detach(dev_info_t *, ddi_detach_cmd_t);
+int oplmsu_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
+int oplmsu_open(queue_t *, dev_t *, int, int, cred_t *);
+int oplmsu_close(queue_t *, int, cred_t *);
+int oplmsu_uwput(queue_t *, mblk_t *);
+int oplmsu_uwsrv(queue_t *);
+int oplmsu_lwsrv(queue_t *);
+int oplmsu_lrput(queue_t *, mblk_t *);
+int oplmsu_lrsrv(queue_t *);
+int oplmsu_ursrv(queue_t *);
+
+int oplmsu_open_msu(dev_info_t *, ldi_ident_t *, ldi_handle_t *);
+int oplmsu_plink_serial(dev_info_t *, ldi_handle_t, int *);
+int oplmsu_set_lpathnum(int, int);
+int oplmsu_dr_attach(dev_info_t *);
+int oplmsu_dr_detach(dev_info_t *);
+int oplmsu_find_serial(ser_devl_t **);
+dev_info_t *oplmsu_find_ser_dip(dev_info_t *);
+void oplmsu_conf_stream(uinst_t *);
+void oplmsu_unlinks(ldi_handle_t, int *, int);
+void oplmsu_setup(uinst_t *);
+int oplmsu_create_upath(dev_info_t *);
+int oplmsu_config_new(struct msu_path *);
+int oplmsu_config_add(dev_info_t *);
+int oplmsu_config_del(struct msu_path *);
+int oplmsu_config_stop(int);
+int oplmsu_config_start(int);
+int oplmsu_config_disc(int);
+
+/*
+ * UPPER WRITE SERVICE PROCEDURE
+ */
+int oplmsu_uwioctl_iplink(queue_t *, mblk_t *);
+int oplmsu_uwioctl_ipunlink(queue_t *, mblk_t *);
+int oplmsu_uwioctl_termios(queue_t *, mblk_t *);
+
+/*
+ * LOWER READ SERVICE PROCEDURE
+ */
+int oplmsu_lrioctl_termios(queue_t *, mblk_t *);
+int oplmsu_lrmsg_error(queue_t *, mblk_t *);
+int oplmsu_lrdata_xoffxon(queue_t *, mblk_t *);
+
+/*
+ * COMMON FUNCTIONS
+ */
+void oplmsu_link_upath(upath_t *);
+void oplmsu_unlink_upath(upath_t *);
+void oplmsu_link_lpath(lpath_t *);
+void oplmsu_unlink_lpath(lpath_t *);
+void oplmsu_link_high_primsg(mblk_t **, mblk_t **, mblk_t *);
+int oplmsu_check_lpath_usable(void);
+upath_t *oplmsu_search_upath_info(int);
+
+void oplmsu_iocack(queue_t *, mblk_t *, int);
+void oplmsu_delete_upath_info(void);
+int oplmsu_set_ioctl_path(lpath_t *, queue_t *, mblk_t *);
+void oplmsu_clear_ioctl_path(lpath_t *);
+
+int oplmsu_get_inst_status(void);
+upath_t *oplmsu_search_standby(void);
+void oplmsu_search_min_stop_path(void);
+int oplmsu_get_pathnum(void);
+int oplmsu_cmn_put_xoffxon(queue_t *, int);
+void oplmsu_cmn_putxoff_standby(void);
+void oplmsu_cmn_set_mflush(mblk_t *);
+void oplmsu_cmn_set_upath_sts(upath_t *, int, int, ulong_t);
+int oplmsu_cmn_allocmb(queue_t *, mblk_t *, mblk_t **, size_t, int);
+int oplmsu_cmn_copymb(queue_t *, mblk_t *, mblk_t **, mblk_t *, int);
+void oplmsu_cmn_bufcall(queue_t *, mblk_t *, size_t, int);
+int oplmsu_cmn_prechg(queue_t *, mblk_t *, int, mblk_t **, int *, int *);
+int oplmsu_stop_prechg(mblk_t **, int *, int *);
+int oplmsu_cmn_prechg_termio(queue_t *, mblk_t *, int, int, mblk_t **,
+ int *);
+int oplmsu_cmn_pullup_msg(queue_t *, mblk_t *);
+
+void oplmsu_cmn_wakeup(queue_t *);
+void oplmsu_cmn_bufcb(void *);
+void oplmsu_wbufcb_posthndl(ctrl_t *);
+
+/*
+ * common functions for write stream
+ */
+int oplmsu_wcmn_chknode(queue_t *, int, mblk_t *);
+void oplmsu_wcmn_flush_hndl(queue_t *, mblk_t *, krw_t);
+int oplmsu_wcmn_through_hndl(queue_t *, mblk_t *, int, krw_t);
+mblk_t *oplmsu_wcmn_high_getq(queue_t *);
+void oplmsu_wcmn_norm_putbq(queue_t *, mblk_t *, queue_t *);
+void oplmsu_wcmn_high_qenable(queue_t *, krw_t);
+
+/*
+ * common functions for read stream
+ */
+void oplmsu_rcmn_flush_hndl(queue_t *, mblk_t *);
+int oplmsu_rcmn_through_hndl(queue_t *, mblk_t *, int);
+void oplmsu_rcmn_high_qenable(queue_t *);
+
+
+#ifdef DEBUG
+void oplmsu_cmn_trace(queue_t *, mblk_t *, int);
+void oplmsu_cmn_msglog(mblk_t *, int);
+void oplmsu_cmn_prt_pathname(dev_info_t *);
+#endif
+
+
+/*
+ * GLOBAL VARIABLES
+ */
+extern uinst_t *oplmsu_uinst;
+extern int oplmsu_queue_flag;
+extern int oplmsu_check_su;
+
+#ifdef DEBUG
+extern int oplmsu_debug_mode;
+extern int oplmsu_trace_on;
+extern uint_t oplmsu_ltrc_size;
+extern msu_trc_t *oplmsu_ltrc_top;
+extern msu_trc_t *oplmsu_ltrc_tail;
+extern msu_trc_t *oplmsu_ltrc_cur;
+extern ulong_t oplmsu_ltrc_ccnt;
+extern kmutex_t oplmsu_ltrc_lock;
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _OPLMSU_PROTO_H */
diff --git a/usr/src/uts/sun4u/opl/sys/pcicmu/pcicmu.h b/usr/src/uts/sun4u/opl/sys/pcicmu/pcicmu.h
new file mode 100644
index 0000000000..9e533c0cf8
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/pcicmu/pcicmu.h
@@ -0,0 +1,450 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_PCICMU_H
+#define _SYS_PCICMU_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/pci.h>
+#include <sys/pci_intr_lib.h>
+#include <sys/pcicmu/pcmu_types.h>
+#include <sys/pcicmu/pcmu_ib.h>
+#include <sys/pcicmu/pcmu_cb.h>
+#include <sys/pcicmu/pcmu_ecc.h>
+#include <sys/pcicmu/pcmu_pbm.h>
+#include <sys/pcicmu/pcmu_counters.h>
+#include <sys/pcicmu/pcmu_util.h>
+#include <sys/pcicmu/pcmu_err.h>
+
+
+/*
+ * The following typedef is used to represent a
+ * 1275 "bus-range" property of a PCI Bus node.
+ */
+struct pcmu_bus_range {
+ uint32_t lo;
+ uint32_t hi;
+};
+
+/*
+ * Structure to represent an entry in the
+ * "ranges" property of a device node.
+ */
+struct pcmu_ranges {
+ uint32_t child_high;
+ uint32_t child_mid;
+ uint32_t child_low;
+ uint32_t parent_high;
+ uint32_t parent_low;
+ uint32_t size_high;
+ uint32_t size_low;
+};
+
+typedef enum {
+ PCMU_NEW,
+ PCMU_ATTACHED,
+ PCMU_DETACHED,
+ PCMU_SUSPENDED
+} pcmu_state_t;
+
+typedef enum {
+ PCMU_PBM_OBJ,
+ PCMU_ECC_OBJ,
+ PCMU_CB_OBJ
+} pcmu_obj_t;
+
+typedef enum {
+ PCMU_OBJ_INTR_ADD,
+ PCMU_OBJ_INTR_REMOVE
+} pcmu_obj_op_t;
+
+#define PCI_OPLCMU "pcicmu"
+
+/*
+ * pcicmu soft state structure.
+ */
+struct pcicmu {
+ /*
+ * State flags and mutex:
+ */
+ pcmu_state_t pcmu_state;
+ uint_t pcmu_soft_state;
+ uint_t pcmu_open_count;
+ kmutex_t pcmu_mutex;
+
+ /*
+ * Links to other state structures:
+ */
+ dev_info_t *pcmu_dip; /* devinfo structure */
+ pcmu_ib_t *pcmu_ib_p; /* interrupt block */
+ pcmu_cb_t *pcmu_cb_p; /* control block */
+ pcmu_pbm_t *pcmu_pcbm_p; /* PBM block */
+ pcmu_ecc_t *pcmu_pecc_p; /* ECC error block */
+
+ /*
+ * other state info:
+ */
+ uint_t pcmu_id; /* Jupiter device id */
+ uint32_t pcmu_rev; /* Bus bridge chip identification */
+
+ /*
+ * pci device node properties:
+ */
+ pcmu_bus_range_t pcmu_bus_range; /* "bus-range" */
+ pcmu_ranges_t *pcmu_ranges; /* "ranges" data & length */
+ int pcmu_ranges_length;
+ uint32_t *pcmu_inos; /* inos from "interrupts" prop */
+ int pcmu_inos_len; /* "interrupts" length */
+ int pcmu_numproxy; /* upa interrupt proxies */
+
+ /*
+ * register mapping:
+ */
+ caddr_t pcmu_address[4];
+ ddi_acc_handle_t pcmu_ac[4];
+
+ /*
+ * Performance counters kstat.
+ */
+ pcmu_cntr_pa_t pcmu_uks_pa;
+ kstat_t *pcmu_uksp; /* ptr to upstream kstat */
+ kmutex_t pcmu_err_mutex; /* per chip error handling mutex */
+
+ /* Fault Management support */
+ int pcmu_fm_cap;
+ ddi_iblock_cookie_t pcmu_fm_ibc;
+};
+
+/*
+ * pcmu_soft_state values.
+ */
+#define PCMU_SOFT_STATE_OPEN 0x01
+#define PCMU_SOFT_STATE_OPEN_EXCL 0x02
+#define PCMU_SOFT_STATE_CLOSED 0x04
+
+/*
+ * CMU-CH and PBM soft state macros:
+ */
+#define PCMU_AP_MINOR_NUM_TO_INSTANCE(x) ((x) >> 8)
+
+#define get_pcmu_soft_state(i) \
+ ((pcmu_t *)ddi_get_soft_state(per_pcmu_state, (i)))
+
+#define alloc_pcmu_soft_state(i) \
+ ddi_soft_state_zalloc(per_pcmu_state, (i))
+
+#define free_pcmu_soft_state(i) \
+ ddi_soft_state_free(per_pcmu_state, (i))
+
+#define DEV_TO_SOFTSTATE(dev) ((pcmu_t *)ddi_get_soft_state(per_pcmu_state, \
+ PCMU_AP_MINOR_NUM_TO_INSTANCE(getminor(dev))))
+
+#define PCMU_ATTACH_RETCODE(obj, op, err) \
+ ((err) ? (obj) << 8 | (op) << 4 | (err) & 0xf : DDI_SUCCESS)
+
+
+/*
+ * Performance counters information.
+ */
+#define PCMU_SHIFT_PIC0 8
+#define PCMU_SHIFT_PIC1 0
+
+/*
+ * CMU-CH-specific register offsets & bit field positions.
+ */
+
+/*
+ * Offsets of global registers:
+ */
+#define PCMU_CB_DEVICE_ID_REG_OFFSET 0x00000000 /* RAGS */
+#define PCMU_CB_CONTROL_STATUS_REG_OFFSET 0x00000010
+
+/*
+ * CMU-CH performance counters offsets.
+ */
+#define PCMU_PERF_PCR_OFFSET 0x00000100
+#define PCMU_PERF_PIC_OFFSET 0x00000108
+
+/*
+ * Offsets of registers in the interrupt block:
+ */
+#define PCMU_IB_OBIO_INTR_MAP_REG_OFFSET 0x00001000
+#define PCMU_IB_OBIO_CLEAR_INTR_REG_OFFSET 0x00001800
+
+/*
+ * Offsets of registers in the PBM block:
+ */
+#define PCMU_PCI_PBM_REG_BASE 0x00002000 /* RAGS */
+#define PCMU_PCI_CTRL_REG_OFFSET 0x00000000
+#define PCMU_PCI_ASYNC_FLT_STATUS_REG_OFFSET 0x00000010
+#define PCMU_PCI_ASYNC_FLT_ADDR_REG_OFFSET 0x00000018
+#define PCMU_PCI_DIAG_REG_OFFSET 0x00000020
+
+/*
+ * CMU-CH control register bit definitions:
+ */
+#define PCMU_CB_CONTROL_STATUS_MODE 0x0000000000000001ull
+#define PCMU_CB_CONTROL_STATUS_IMPL 0xf000000000000000ull
+#define PCMU_CB_CONTROL_STATUS_IMPL_SHIFT 60
+#define PCMU_CB_CONTROL_STATUS_VER 0x0f00000000000000ull
+#define PCMU_CB_CONTROL_STATUS_VER_SHIFT 56
+
+/*
+ * CMU-CH ECC UE AFSR bit definitions:
+ */
+#define PCMU_ECC_UE_AFSR_BYTEMASK 0x0000ffff00000000ull
+#define PCMU_ECC_UE_AFSR_BYTEMASK_SHIFT 32
+#define PCMU_ECC_UE_AFSR_DW_OFFSET 0x00000000e0000000ull
+#define PCMU_ECC_UE_AFSR_DW_OFFSET_SHIFT 29
+#define PCMU_ECC_UE_AFSR_ID 0x000000001f000000ull
+#define PCMU_ECC_UE_AFSR_ID_SHIFT 24
+#define PCMU_ECC_UE_AFSR_BLK 0x0000000000800000ull
+
+/*
+ * CMU-CH pci control register bits:
+ */
+#define PCMU_PCI_CTRL_ARB_PARK 0x0000000000200000ull
+#define PCMU_PCI_CTRL_WAKEUP_EN 0x0000000000000200ull
+#define PCMU_PCI_CTRL_ERR_INT_EN 0x0000000000000100ull
+#define PCMU_PCI_CTRL_ARB_EN_MASK 0x000000000000000full
+
+/*
+ * CMU-CH PCI asynchronous fault status register bit definitions:
+ */
+#define PCMU_PCI_AFSR_PE_SHIFT 60
+#define PCMU_PCI_AFSR_SE_SHIFT 56
+#define PCMU_PCI_AFSR_E_MA 0x0000000000000008ull
+#define PCMU_PCI_AFSR_E_TA 0x0000000000000004ull
+#define PCMU_PCI_AFSR_E_RTRY 0x0000000000000002ull
+#define PCMU_PCI_AFSR_E_PERR 0x0000000000000001ull
+#define PCMU_PCI_AFSR_E_MASK 0x000000000000000full
+#define PCMU_PCI_AFSR_BYTEMASK 0x0000ffff00000000ull
+#define PCMU_PCI_AFSR_BYTEMASK_SHIFT 32
+#define PCMU_PCI_AFSR_BLK 0x0000000080000000ull
+#define PCMU_PCI_AFSR_MID 0x000000003e000000ull
+#define PCMU_PCI_AFSR_MID_SHIFT 25
+
+/*
+ * CMU-CH PCI diagnostic register bit definitions:
+ */
+#define PCMU_PCI_DIAG_DIS_DWSYNC 0x0000000000000010ull
+
+#define PBM_AFSR_TO_PRIERR(afsr) \
+ (afsr >> PCMU_PCI_AFSR_PE_SHIFT & PCMU_PCI_AFSR_E_MASK)
+#define PBM_AFSR_TO_SECERR(afsr) \
+ (afsr >> PCMU_PCI_AFSR_SE_SHIFT & PCMU_PCI_AFSR_E_MASK)
+
+#define PCMU_ID_TO_IGN(pcmu_id) ((pcmu_ign_t)UPAID_TO_IGN(pcmu_id))
+
+
+/*
+ * Number of dispatch target entries.
+ */
+#define U2U_DATA_NUM 16
+
+/*
+ * Offsets of registers in the Interrupt Dispatch Table:
+ */
+#define U2U_MODE_STATUS_REGISTER_OFFSET 0x00000000
+#define U2U_PID_REGISTER_OFFSET 0x00000008
+#define U2U_DATA_REGISTER_OFFSET 0x00000010
+
+/*
+ * Mode Status register bit definitions:
+ */
+#define U2U_MS_IEV 0x00000040 /* bit-6: Interrupt Extension enable */
+
+/*
+ * Index number of U2U registers in OBP's "regs-property" of CMU-CH
+ */
+#define REGS_INDEX_OF_U2U 3
+
+/*
+ * The following two difinitions are used to control target id
+ * for Interrupt dispatch data by software.
+ */
+typedef struct u2u_ittrans_id {
+ uint_t u2u_tgt_cpu_id; /* target CPU ID */
+ uint_t u2u_rsv1; /* reserved */
+ volatile uint64_t *u2u_ino_map_reg; /* u2u intr. map register */
+} u2u_ittrans_id_t;
+
+typedef struct u2u_ittrans_data {
+ kmutex_t u2u_ittrans_lock;
+ uintptr_t u2u_regs_base; /* "reg" property */
+ ddi_acc_handle_t u2u_acc; /* pointer to acc */
+ uint_t u2u_port_id; /* "PID" register n U2U */
+ uint_t u2u_board; /* "board#" property */
+ u2u_ittrans_id_t u2u_ittrans_id[U2U_DATA_NUM];
+} u2u_ittrans_data_t;
+
+
+/*
+ * Offsets of registers in the interrupt block:
+ */
+
+#define PCMU_IB_UPA0_INTR_MAP_REG_OFFSET 0x6000
+#define PCMU_IB_UPA1_INTR_MAP_REG_OFFSET 0x8000
+#define PCMU_IB_SLOT_CLEAR_INTR_REG_OFFSET 0x1400
+#define PCMU_IB_OBIO_INTR_STATE_DIAG_REG 0xA808
+#define PCMU_IB_INTR_RETRY_TIMER_OFFSET 0x1A00
+
+/*
+ * Offsets of registers in the ECC block:
+ */
+#define PCMU_ECC_CSR_OFFSET 0x20
+#define PCMU_UE_AFSR_OFFSET 0x30
+#define PCMU_UE_AFAR_OFFSET 0x38
+
+/*
+ * CMU-CH control register bit definitions:
+ */
+#define PCMU_CB_CONTROL_STATUS_IGN 0x0007c00000000000ull
+#define PCMU_CB_CONTROL_STATUS_IGN_SHIFT 46
+#define PCMU_CB_CONTROL_STATUS_APCKEN 0x0000000000000008ull
+#define PCMU_CB_CONTROL_STATUS_APERR 0x0000000000000004ull
+#define PCMU_CB_CONTROL_STATUS_IAP 0x0000000000000002ull
+
+/*
+ * CMU-CH interrupt mapping register bit definitions:
+ */
+#define PCMU_INTR_MAP_REG_VALID 0x0000000080000000ull
+#define PCMU_INTR_MAP_REG_TID 0x000000007C000000ull
+#define PCMU_INTR_MAP_REG_IGN 0x00000000000007C0ull
+#define PCMU_INTR_MAP_REG_INO 0x000000000000003full
+#define PCMU_INTR_MAP_REG_TID_SHIFT 26
+#define PCMU_INTR_MAP_REG_IGN_SHIFT 6
+
+/*
+ * CMU-CH clear interrupt register bit definitions:
+ */
+#define PCMU_CLEAR_INTR_REG_MASK 0x0000000000000003ull
+#define PCMU_CLEAR_INTR_REG_IDLE 0x0000000000000000ull
+#define PCMU_CLEAR_INTR_REG_RECEIVED 0x0000000000000001ull
+#define PCMU_CLEAR_INTR_REG_RSVD 0x0000000000000002ull
+#define PCMU_CLEAR_INTR_REG_PENDING 0x0000000000000003ull
+
+/*
+ * CMU-CH ECC control register bit definitions:
+ */
+#define PCMU_ECC_CTRL_ECC_EN 0x8000000000000000ull
+#define PCMU_ECC_CTRL_UE_INTEN 0x4000000000000000ull
+
+/*
+ * CMU-CH ECC UE AFSR bit definitions:
+ */
+#define PCMU_ECC_UE_AFSR_PE_SHIFT 61
+#define PCMU_ECC_UE_AFSR_SE_SHIFT 58
+#define PCMU_ECC_UE_AFSR_E_MASK 0x0000000000000007ull
+#define PCMU_ECC_UE_AFSR_E_PIO 0x0000000000000004ull
+
+/*
+ * CMU-CH PCI diagnostic register bit definitions:
+ */
+#define PCMU_PCI_DIAG_DIS_RETRY 0x0000000000000040ull
+#define PCMU_PCI_DIAG_DIS_INTSYNC 0x0000000000000020ull
+
+
+#define NAMEINST(dip) ddi_driver_name(dip), ddi_get_instance(dip)
+#define NAMEADDR(dip) ddi_node_name(dip), ddi_get_name_addr(dip)
+
+
+/*
+ * CMU-CH Tunables
+ */
+extern uint32_t pcmu_spurintr_duration; /* spurious interupt duration */
+extern ushort_t pcmu_command_default; /* default command */
+extern uint_t ecc_error_intr_enable; /* ECC error intr */
+extern uint_t pcmu_ecc_afsr_retries; /* num ECC afsr retries */
+extern uint_t pcmu_intr_retry_intv; /* intr retry interval */
+extern uint_t pcmu_panic_on_fatal_errors; /* PANIC on fatal errors */
+extern uint_t pcmu_unclaimed_intr_max; /* Max unclaimed interrupts */
+extern hrtime_t pcmu_intrpend_timeout; /* intr pending timeout */
+
+
+extern void *per_pcmu_state; /* per-pbm soft state pointer */
+extern kmutex_t pcmu_global_mutex; /* attach/detach common struct lock */
+extern uint64_t pcmu_errtrig_pa;
+
+
+/*
+ * Prototypes.
+ */
+extern void pcmu_post_uninit_child(pcmu_t *);
+extern void pcmu_kstat_init(void);
+extern void pcmu_kstat_fini(void);
+extern void pcmu_add_upstream_kstat(pcmu_t *);
+extern void pcmu_fix_ranges(pcmu_ranges_t *, int);
+extern uint_t pcmu_pbm_disable_errors(pcmu_pbm_t *);
+extern uint32_t ib_map_reg_get_cpu(volatile uint64_t);
+extern uint64_t *ib_intr_map_reg_addr(pcmu_ib_t *, pcmu_ib_ino_t);
+extern uint64_t *ib_clear_intr_reg_addr(pcmu_ib_t *, pcmu_ib_ino_t);
+extern void pcmu_cb_setup(pcmu_t *);
+extern void pcmu_cb_teardown(pcmu_t *);
+extern int cb_register_intr(pcmu_t *);
+extern void cb_enable_intr(pcmu_t *);
+extern uint64_t cb_ino_to_map_pa(pcmu_cb_t *, pcmu_ib_ino_t);
+extern uint64_t cb_ino_to_clr_pa(pcmu_cb_t *, pcmu_ib_ino_t);
+extern int cb_remove_xintr(pcmu_t *, dev_info_t *, dev_info_t *,
+ pcmu_ib_ino_t, pcmu_ib_mondo_t);
+extern uint32_t pcmu_intr_dist_cpuid(pcmu_ib_t *, pcmu_ib_ino_info_t *);
+extern void pcmu_ecc_setup(pcmu_ecc_t *);
+extern ushort_t pcmu_ecc_get_synd(uint64_t);
+extern void pcmu_pbm_setup(pcmu_pbm_t *);
+extern void pcmu_pbm_teardown(pcmu_pbm_t *);
+extern uintptr_t pcmu_ib_setup(pcmu_ib_t *);
+extern int pcmu_get_numproxy(dev_info_t *);
+extern int pcmu_ecc_add_intr(pcmu_t *, int, pcmu_ecc_intr_info_t *);
+extern void pcmu_ecc_rem_intr(pcmu_t *, int, pcmu_ecc_intr_info_t *);
+extern int pcmu_pbm_err_handler(dev_info_t *, ddi_fm_error_t *,
+ const void *, int);
+extern void pcmu_ecc_classify(uint64_t, pcmu_ecc_errstate_t *);
+extern int pcmu_pbm_classify(pcmu_pbm_errstate_t *);
+extern int pcmu_check_error(pcmu_t *);
+extern void set_intr_mapping_reg(int, uint64_t *, int);
+extern uint32_t pcmu_class_to_pil(dev_info_t *rdip);
+extern int pcmu_add_intr(dev_info_t *dip, dev_info_t *rdip,
+ ddi_intr_handle_impl_t *hdlp);
+extern int pcmu_remove_intr(dev_info_t *dip, dev_info_t *rdip,
+ ddi_intr_handle_impl_t *hdlp);
+extern void pcmu_intr_teardown(pcmu_t *pcmu_p);
+
+extern int u2u_translate_tgtid(pcmu_t *, uint_t, volatile uint64_t *);
+extern void u2u_ittrans_cleanup(u2u_ittrans_data_t *, volatile uint64_t *);
+void pcmu_err_create(pcmu_t *pcmu_p);
+void pcmu_err_destroy(pcmu_t *pcmu_p);
+void pcmu_pbm_ereport_post(dev_info_t *dip, uint64_t ena,
+ pcmu_pbm_errstate_t *pbm_err);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_PCICMU_H */
diff --git a/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_cb.h b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_cb.h
new file mode 100644
index 0000000000..9690a3b23b
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_cb.h
@@ -0,0 +1,81 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_PCMU_CB_H
+#define _SYS_PCMU_CB_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum pcmu_cb_nintr_index {
+ CBNINTR_PBM = 0, /* not shared */
+ CBNINTR_UE = 1, /* shared */
+ CBNINTR_CE = 2, /* shared */
+ CBNINTR_POWER_FAIL = 3, /* shared */
+ CBNINTR_THERMAL = 4, /* shared */
+ CBNINTR_MAX /* max */
+};
+
+/*
+ * control block soft state structure:
+ */
+struct pcmu_cb {
+ pcmu_t *pcb_pcmu_p;
+ pcmu_ign_t pcb_ign; /* interrupt grp# */
+ kmutex_t pcb_intr_lock; /* guards add/rem intr and intr dist */
+ uint32_t pcb_no_of_inos; /* # of actual inos, including PBM */
+ uint32_t pcb_inos[CBNINTR_MAX]; /* subset of pcmu_p->pcmu_inos array */
+ uint64_t pcb_base_pa; /* PA of CSR bank, 2nd "reg" */
+ uint64_t pcb_map_pa; /* map reg base PA */
+ uint64_t pcb_clr_pa; /* clr reg base PA */
+ uint64_t pcb_obsta_pa; /* sta reg base PA */
+ uint64_t *pcb_imr_save;
+ caddr_t pcb_ittrans_cookie; /* intr tgt translation */
+};
+
+#define PCMU_CB_INO_TO_MONDO(pcb_p, ino) \
+ ((pcb_p)->pcb_ign << PCMU_INO_BITS | (ino))
+
+/*
+ * Prototypes.
+ */
+extern void pcmu_cb_create(pcmu_t *pcmu_p);
+extern void pcmu_cb_destroy(pcmu_t *pcmu_p);
+extern void pcmu_cb_suspend(pcmu_cb_t *cb_p);
+extern void pcmu_cb_resume(pcmu_cb_t *cb_p);
+extern void pcmu_cb_enable_nintr(pcmu_t *pcmu_p, pcmu_cb_nintr_index_t idx);
+extern void pcmu_cb_disable_nintr(pcmu_cb_t *cb_p,
+ pcmu_cb_nintr_index_t idx, int wait);
+extern void pcmu_cb_clear_nintr(pcmu_cb_t *cb_p, pcmu_cb_nintr_index_t idx);
+extern void pcmu_cb_intr_dist(void *arg);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_PCMU_CB_H */
diff --git a/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_counters.h b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_counters.h
new file mode 100644
index 0000000000..ed63d7ffba
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_counters.h
@@ -0,0 +1,78 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_PCMU_COUNTERS_H
+#define _SYS_PCMU_COUNTERS_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define NUM_OF_PICS 2
+
+/*
+ * used to build array of event-names and pcr-mask values
+ */
+typedef struct pcmu_kev_mask {
+ char *event_name; /* Event name */
+ uint64_t pcr_mask; /* PCR mask */
+} pcmu_kev_mask_t;
+
+typedef struct pcmu_ksinfo {
+ uint8_t pic_no_evs; /* number of events */
+ uint8_t pic_shift[NUM_OF_PICS]; /* PIC shift */
+ kstat_t *pic_name_ksp[NUM_OF_PICS]; /* kstat names */
+} pcmu_ksinfo_t;
+
+typedef struct pcmu_cntr_addr {
+ uint64_t *pcr_addr;
+ uint64_t *pic_addr;
+} pcmu_cntr_addr_t;
+
+typedef struct pcmu_cntr_pa {
+ uint64_t pcr_pa;
+ uint64_t pic_pa;
+} pcmu_cntr_pa_t;
+
+/*
+ * Prototypes.
+ */
+extern void pcmu_create_name_kstat(char *, pcmu_ksinfo_t *, pcmu_kev_mask_t *);
+extern void pcmu_delete_name_kstat(pcmu_ksinfo_t *);
+extern kstat_t *pcmu_create_cntr_kstat(pcmu_t *, char *, int,
+ int (*update)(kstat_t *, int), void *);
+extern int pcmu_cntr_kstat_update(kstat_t *, int);
+extern int pcmu_cntr_kstat_pa_update(kstat_t *, int);
+extern void pcmu_kstat_create(pcmu_t *);
+extern void pcmu_kstat_destroy(pcmu_t *);
+extern void pcmu_rem_upstream_kstat(pcmu_t *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_PCMU_COUNTERS_H */
diff --git a/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_ecc.h b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_ecc.h
new file mode 100644
index 0000000000..7062f5cdca
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_ecc.h
@@ -0,0 +1,75 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_PCMU_ECC_H
+#define _SYS_PCMU_ECC_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct pcmu_ecc_intr_info {
+ pcmu_ecc_t *pecc_p;
+ int pecc_type; /* CBNINTR_UE */
+ uint64_t pecc_afsr_pa; /* ECC AFSR phsyical address */
+ uint64_t pecc_afar_pa; /* ECC AFAR physical address */
+
+ /*
+ * Implementation-specific masks & shift values.
+ */
+ uint64_t pecc_errpndg_mask; /* 0 if not applicable. */ /* RAGS */
+ uint64_t pecc_offset_mask;
+ uint_t pecc_offset_shift;
+ uint_t pecc_size_log2;
+};
+
+struct pcmu_ecc {
+ pcmu_t *pecc_pcmu_p;
+ volatile uint64_t pecc_csr_pa; /* ECC control & status reg */
+ struct pcmu_ecc_intr_info pecc_ue; /* ECC UE error intr info */
+ timeout_id_t pecc_tout_id;
+};
+
+/*
+ * Prototypes
+ */
+extern void pcmu_ecc_create(pcmu_t *pcmu_p);
+extern int pcmu_ecc_register_intr(pcmu_t *pcmu_p);
+extern void pcmu_ecc_destroy(pcmu_t *pcmu_p);
+extern void pcmu_ecc_configure(pcmu_t *pcmu_p);
+extern void pcmu_ecc_enable_intr(pcmu_t *pcmu_p);
+extern void pcmu_ecc_disable_wait(pcmu_ecc_t *pecc_p);
+extern uint_t pcmu_ecc_disable_nowait(pcmu_ecc_t *pecc_p);
+extern uint_t pcmu_ecc_intr(caddr_t a);
+extern int pcmu_ecc_err_handler(pcmu_ecc_errstate_t *ecc_err_p);
+extern void pcmu_ecc_err_drain(void *not_used, pcmu_ecc_errstate_t *ecc_err);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_PCMU_ECC_H */
diff --git a/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_err.h b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_err.h
new file mode 100644
index 0000000000..89b4ed8de7
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_err.h
@@ -0,0 +1,135 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_PCMU_ERR_H
+#define _SYS_PCMU_ERR_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/ddifm.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _KERNEL
+
+#define PBM_PRIMARY 1
+#define PBM_SECONDARY 0
+#define PBM_NONFATAL 0
+#define PBM_FATAL 1
+#define FM_LOG_PCI 0
+#define FM_LOG_PBM 1
+#define ECC_MAX_ERRS 6
+#define TARGET_MAX_ERRS 6
+
+/*
+ * Since pcmu_pbm_err_handler() is called by various interrupt/trap/callback
+ * handlers, it is necessary for it to know where it is being called from.
+ * Below are the flags passed to pcmu_pbm_err_handler() to give it knowledge
+ * of it's caller.
+ */
+#define PCI_TRAP_CALL 0x0
+#define PCI_CB_CALL 0x1
+#define PCI_INTR_CALL 0x2
+#define PCI_BUS_EXIT_CALL 0x3
+#define PCI_ECC_CALL 0x4
+
+extern errorq_t *pcmu_ecc_queue; /* per-system ecc handling queue */
+
+struct pcmu_errstate {
+ char *pcmu_err_class;
+ uint16_t pcmu_cfg_stat;
+ uint16_t pcmu_cfg_comm;
+ uint64_t pcmu_pa;
+};
+
+/*
+ * pbm errstate use to encompass the state for all errors
+ * detected by the pci block
+ */
+struct pcmu_pbm_errstate {
+ char *pbm_err_class;
+ int pcbm_pri;
+ int pbm_log;
+ uint32_t pbm_err;
+ uint32_t pbm_multi;
+ char *pbm_bridge_type;
+ uint64_t pbm_ctl_stat;
+ uint64_t pbm_afsr;
+ uint64_t pbm_afar;
+ uint64_t pbm_va_log;
+ uint64_t pbm_err_sl;
+ uint64_t pcbm_pcix_stat;
+ uint32_t pcbm_pcix_pfar;
+ pcmu_errstate_t pcbm_pci;
+ char *pcmu_pbm_terr_class;
+};
+
+/*
+ * ecc errstate used to store all state captured,
+ * upon detection of an ecc error.
+ */
+struct pcmu_ecc_errstate {
+ char *ecc_bridge_type;
+ pcmu_ecc_t *pecc_p;
+ uint64_t ecc_afsr;
+ uint64_t ecc_afar;
+ uint64_t ecc_offset;
+ uint64_t ecc_dev_id;
+ uint64_t ecc_dw_offset;
+ struct async_flt ecc_aflt;
+ pcmu_ecc_intr_info_t ecc_ii_p;
+ uint64_t ecc_ctrl;
+ int pecc_pri;
+ char ecc_unum[UNUM_NAMLEN];
+ uint64_t ecc_ena;
+ uint64_t ecc_err_addr;
+ char *ecc_err_type;
+ int pecc_pg_ret;
+ nvlist_t *ecc_fmri;
+ int ecc_caller;
+};
+
+extern int pcmu_fm_init_child(dev_info_t *dip, dev_info_t *tdip, int cap,
+ ddi_iblock_cookie_t *ibc);
+extern void pcmu_bus_enter(dev_info_t *dip, ddi_acc_handle_t handle);
+extern void pcmu_bus_exit(dev_info_t *dip, ddi_acc_handle_t handle);
+extern void pcmu_pbm_ereport_post(dev_info_t *dip, uint64_t ena,
+ pcmu_pbm_errstate_t *pbm_err);
+extern void pcmu_fm_acc_setup(ddi_map_req_t *mp, dev_info_t *rdip);
+extern int pcmu_handle_lookup(dev_info_t *dip, int type, uint64_t fme_ena,
+ void *afar);
+extern void pcmu_fm_create(pcmu_t *pcmu_p);
+extern void pcmu_fm_destroy(pcmu_t *pcmu_p);
+extern int pcmu_err_callback(dev_info_t *dip, ddi_fm_error_t *derr,
+ const void *impl_data);
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_PCMU_ERR_H */
diff --git a/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_ib.h b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_ib.h
new file mode 100644
index 0000000000..24c5bfee6a
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_ib.h
@@ -0,0 +1,173 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_PCMU_IB_H
+#define _SYS_PCMU_IB_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/ddi_subrdefs.h>
+
+typedef uint8_t pcmu_ib_ino_t;
+typedef uint16_t pcmu_ib_mondo_t;
+
+/*
+ * The following structure represents an interrupt entry for an INO.
+ */
+typedef struct ih {
+ dev_info_t *ih_dip; /* devinfo structure */
+ uint32_t ih_inum; /* interrupt number for this device */
+ uint_t ih_intr_state; /* Only used for fixed interrupts */
+ uint_t (*ih_handler)(); /* interrupt handler */
+ caddr_t ih_handler_arg1; /* interrupt handler argument #1 */
+ caddr_t ih_handler_arg2; /* interrupt handler argument #2 */
+ ddi_acc_handle_t ih_config_handle; /* config space reg map handle */
+ struct ih *ih_next; /* next entry in list */
+} ih_t;
+
+/*
+ * ino structure : one per CMU-CH ino with interrupt registered
+ */
+typedef struct pcmu_ib_ino_info {
+ struct pcmu_ib_ino_info *pino_next;
+ pcmu_ib_ino_t pino_ino; /* INO number - 8 bit */
+ uint8_t pino_slot_no; /* PCI slot number 0-8 */
+ uint16_t pino_ih_size; /* size of the pci intrspec list */
+ ih_t *pino_ih_head; /* intr spec (part of ppd) list head */
+ ih_t *pino_ih_tail; /* intr spec (part of ppd) list tail */
+ ih_t *pino_ih_start; /* starting point in intr spec list */
+ pcmu_ib_t *pino_ib_p; /* link back to interrupt block state */
+ volatile uint64_t *pino_clr_reg; /* ino interrupt clear register */
+ volatile uint64_t *pino_map_reg; /* ino interrupt mapping register */
+ uint64_t pino_map_reg_save; /* = *pino_map_reg if saved */
+ uint32_t pino_pil; /* PIL for this ino */
+ volatile uint_t pino_unclaimed; /* number of unclaimed interrupts */
+ clock_t pino_spurintr_begin; /* begin time of spurious intr series */
+ int pino_established; /* ino has been associated with a cpu */
+ uint32_t pino_cpuid; /* cpu that ino is targeting */
+ int32_t pino_intr_weight; /* intr weight of devices sharing ino */
+} pcmu_ib_ino_info_t;
+
+/*
+ * interrupt block soft state structure:
+ */
+struct pcmu_ib {
+ pcmu_t *pib_pcmu_p; /* link back to pci soft state */
+ pcmu_ign_t pib_ign; /* interrupt group # */
+ uintptr_t pib_obio_intr_map_regs; /* onboard intr map register */
+ uintptr_t pib_obio_clear_intr_regs; /* onboard intr clear reg */
+ volatile uint64_t *pib_upa_imr[2]; /* UPA expansion intr map register */
+ uint64_t pib_upa_imr_state[2]; /* UPA intr map state */ /* RAGS */
+ volatile uint64_t *pib_intr_retry_timer_reg; /* intr retry register */
+ volatile uint64_t *pib_obio_intr_state_diag_reg; /* onboard intr st. */
+ uint_t pib_max_ino; /* largest supported INO */
+ pcmu_ib_ino_info_t *pib_ino_lst; /* ino link list */
+ kmutex_t pib_ino_lst_mutex; /* mutex for ino link list */
+ kmutex_t pib_intr_lock; /* lock for internal intr */
+};
+
+#define PCMU_MAX_INO 0x3f
+#define PCMU_INO_BITS 6 /* INO#s are 6 bits long */
+
+/*
+ * Only used for fixed or legacy interrupts
+ */
+#define PCMU_INTR_STATE_DISABLE 0 /* disabled */
+#define PCMU_INTR_STATE_ENABLE 1 /* enabled */
+
+#define PCMU_IB_INTR_WAIT 1 /* wait for inter completion */
+#define PCMU_IB_INTR_NOWAIT 0 /* handling intr, no wait */
+
+#define PCMU_IB2CB(pib_p) ((pib_p)->pib_pcmu_p->pcmu_cb_p)
+
+#define PCMU_IB_MONDO_TO_INO(mondo) ((pcmu_ib_ino_t)((mondo) & 0x3f))
+#define PCMU_IB_INO_INTR_ON(reg_p) *(reg_p) |= PCMU_INTR_MAP_REG_VALID
+#define PCMU_IB_INO_INTR_OFF(reg_p) *(reg_p) &= ~PCMU_INTR_MAP_REG_VALID
+#define PCMU_IB_INO_INTR_STATE_REG(pib_p, ino) \
+ (pib_p->pib_obio_intr_state_diag_reg)
+
+#define PCMU_IB_INO_INTR_PENDING(reg_p, ino) \
+ (((*(reg_p) >> (((ino) & 0x1f) << 1)) & \
+ PCMU_CLEAR_INTR_REG_MASK) == PCMU_CLEAR_INTR_REG_PENDING)
+
+#define PCMU_IB_INO_INTR_CLEAR(reg_p) *(reg_p) = PCMU_CLEAR_INTR_REG_IDLE
+#define PCMU_IB_INO_INTR_PEND(reg_p) *(reg_p) = PCMU_CLEAR_INTR_REG_PENDING
+#define PCMU_IB_INO_INTR_ISON(imr) ((imr) >> 31)
+
+#define PCMU_IB_IMR2MONDO(imr) ((imr) & \
+ (PCMU_INTR_MAP_REG_IGN | PCMU_INTR_MAP_REG_INO))
+
+#define PCMU_IB_IS_OBIO_INO(ino) (ino & 0x20)
+
+#define PCMU_IB_IGN_TO_MONDO(ign, ino) (((ign) << PCMU_INO_BITS) | (ino))
+#define PCMU_IB_INO_TO_MONDO(pib_p, ino) \
+ PCMU_IB_IGN_TO_MONDO((pib_p)->pib_ign, ino)
+
+
+extern int pcmu_pil[];
+
+/*
+ * Prototypes
+ */
+extern void pcmu_ib_create(pcmu_t *pcmu_p);
+extern void pcmu_ib_destroy(pcmu_t *pcmu_p);
+extern void pcmu_ib_configure(pcmu_ib_t *pib_p);
+extern uint64_t ib_get_map_reg(pcmu_ib_mondo_t mondo, uint32_t cpu_id);
+extern void pcmu_ib_intr_enable(pcmu_t *pcmu_p, pcmu_ib_ino_t ino);
+extern void pcmu_ib_intr_disable(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino, int wait);
+extern void pcmu_ib_nintr_clear(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino);
+extern void pcmu_ib_suspend(pcmu_ib_t *pib_p);
+extern void pcmu_ib_resume(pcmu_ib_t *pib_p);
+extern pcmu_ib_ino_info_t *pcmu_ib_locate_ino(pcmu_ib_t *pib_p,
+ pcmu_ib_ino_t ino_num);
+extern pcmu_ib_ino_info_t *pcmu_ib_new_ino(pcmu_ib_t *pib_p,
+ pcmu_ib_ino_t ino_num, ih_t *ih_p);
+extern void pcmu_ib_delete_ino(pcmu_ib_t *pib_p, pcmu_ib_ino_info_t *ino_p);
+extern void pcmu_ib_free_ino_all(pcmu_ib_t *pib_p);
+extern int pcmu_ib_update_intr_state(pcmu_t *pcmu_p, dev_info_t *rdip,
+ ddi_intr_handle_impl_t *hdlp, uint_t new_intr_state);
+extern void pcmu_ib_ino_add_intr(pcmu_t *pcmu_p,
+ pcmu_ib_ino_info_t *ino_p, ih_t *ih_p);
+extern void pcmu_ib_ino_rem_intr(pcmu_t *pcmu_p,
+ pcmu_ib_ino_info_t *ino_p, ih_t *ih_p);
+extern ih_t *pcmu_ib_ino_locate_intr(pcmu_ib_ino_info_t *ino_p,
+ dev_info_t *dip, uint32_t inum);
+extern ih_t *pcmu_ib_alloc_ih(dev_info_t *dip, uint32_t inum,
+ uint_t (*int_handler)(caddr_t int_handler_arg1, caddr_t int_handler_arg2),
+ caddr_t int_handler_arg1, caddr_t int_handler_arg2);
+extern void pcmu_ib_intr_dist_nintr(pcmu_ib_t *pib_p, pcmu_ib_ino_t ino,
+ volatile uint64_t *imr_p);
+extern void pcmu_ib_intr_dist_all(void *arg,
+ int32_t max_weight, int32_t weight);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_PCMU_IB_H */
diff --git a/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_pbm.h b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_pbm.h
new file mode 100644
index 0000000000..340be4ff8a
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_pbm.h
@@ -0,0 +1,99 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_PCMU_PBM_H
+#define _SYS_PCMU_PBM_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+#include <sys/dditypes.h>
+#include <sys/ontrap.h>
+#include <sys/callb.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The following structure represents the pci configuration header
+ * for CMU-CH PBM.
+ */
+typedef struct config_header {
+ volatile uint16_t ch_vendor_id;
+ volatile uint16_t ch_device_id;
+ volatile uint16_t ch_command_reg;
+ volatile uint16_t ch_status_reg;
+ volatile uint8_t ch_revision_id_reg;
+ volatile uint8_t ch_programming_if_code_reg;
+ volatile uint8_t ch_sub_class_reg;
+ volatile uint8_t ch_base_class_reg;
+ volatile uint8_t ch_cache_line_size_reg;
+ volatile uint8_t ch_latency_timer_reg;
+ volatile uint8_t ch_header_type_reg;
+} config_header_t;
+
+#define PBM_NAMESTR_BUFLEN 64
+
+/*
+ * CMU-CH pbm block soft state structure:
+ */
+struct pcmu_pbm {
+ pcmu_t *pcbm_pcmu_p; /* link back to the soft state */
+
+ volatile uint64_t *pcbm_ctrl_reg; /* PBM control reg */
+ volatile uint64_t *pcbm_async_flt_status_reg; /* PBM AFSR reg */
+ volatile uint64_t *pcbm_async_flt_addr_reg; /* PBM AFAR reg */
+ volatile uint64_t *pcbm_diag_reg; /* PBM diag reg */
+
+ config_header_t *pcbm_config_header; /* PBM config header */
+ uint64_t pcbm_imr_save; /* intr map save area */
+ ddi_iblock_cookie_t pcbm_iblock_cookie; /* PBM error intr priority */
+
+ on_trap_data_t *pcbm_ontrap_data; /* ddi_poke support */
+ kmutex_t pcbm_pokeflt_mutex; /* poke mutex */
+ ddi_acc_handle_t pcbm_excl_handle; /* cautious IO access handle */
+ char pcbm_nameinst_str[PBM_NAMESTR_BUFLEN]; /* driver name & inst */
+ char *pcbm_nameaddr_str; /* node name & address */
+};
+
+/*
+ * Prototypes
+ */
+extern void pcmu_pbm_create(pcmu_t *pcmu_p);
+extern void pcmu_pbm_destroy(pcmu_t *pcmu_p);
+extern void pcmu_pbm_configure(pcmu_pbm_t *pcbm_p);
+extern void pcmu_pbm_suspend(pcmu_pbm_t *pcbm_p);
+extern void pcmu_pbm_resume(pcmu_pbm_t *pcbm_p);
+extern void pcmu_pbm_intr_dist(void *arg);
+extern int pcmu_pbm_register_intr(pcmu_pbm_t *pcbm_p);
+extern int pcmu_pbm_afsr_report(dev_info_t *dip, uint64_t fme_ena,
+ pcmu_pbm_errstate_t *pbm_err_p);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_PCMU_PBM_H */
diff --git a/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_types.h b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_types.h
new file mode 100644
index 0000000000..7fccf74f3e
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_types.h
@@ -0,0 +1,54 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_PCMU_TYPES_H
+#define _SYS_PCMU_TYPES_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct pcicmu pcmu_t;
+typedef struct pcmu_cb pcmu_cb_t;
+typedef struct pcmu_ib pcmu_ib_t;
+typedef struct pcmu_pbm pcmu_pbm_t;
+typedef uint16_t pcmu_ign_t;
+typedef struct pcmu_errstate pcmu_errstate_t;
+typedef struct pcmu_ecc_errstate pcmu_ecc_errstate_t;
+typedef struct pcmu_pbm_errstate pcmu_pbm_errstate_t;
+typedef struct pcmu_cb_errstate pcmu_cb_errstate_t;
+typedef struct pcmu_bus_range pcmu_bus_range_t;
+typedef struct pcmu_ranges pcmu_ranges_t;
+typedef enum pcmu_cb_nintr_index pcmu_cb_nintr_index_t;
+typedef struct pcmu_ecc pcmu_ecc_t;
+typedef struct pcmu_ecc_intr_info pcmu_ecc_intr_info_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_PCMU_TYPES_H */
diff --git a/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_util.h b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_util.h
new file mode 100644
index 0000000000..31ba222cd1
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/pcicmu/pcmu_util.h
@@ -0,0 +1,123 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_PCMU_UTIL_H
+#define _SYS_PCMU_UTIL_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * Prototypes
+ */
+extern int pcmu_init_child(pcmu_t *, dev_info_t *);
+extern int pcmu_uninit_child(pcmu_t *, dev_info_t *);
+extern int pcmu_report_dev(dev_info_t *);
+extern int get_pcmu_properties(pcmu_t *, dev_info_t *);
+extern void free_pcmu_properties(pcmu_t *);
+extern int pcmu_get_portid(dev_info_t *);
+extern int pcmu_reloc_reg(dev_info_t *, dev_info_t *, pcmu_t *,
+ pci_regspec_t *);
+extern int pcmu_xlate_reg(pcmu_t *, pci_regspec_t *, struct regspec *);
+extern off_t pcmu_get_reg_set_size(dev_info_t *, int);
+extern uint_t pcmu_get_nreg_set(dev_info_t *);
+extern uint64_t pcmu_get_cfg_pabase(pcmu_t *);
+extern int pcmu_cfg_report(dev_info_t *, ddi_fm_error_t *,
+ pcmu_errstate_t *, int, uint32_t);
+
+#ifdef DEBUG
+
+extern uint64_t pcmu_debug_flags;
+
+typedef struct pcmu_dflag_to_str {
+ uint64_t flag;
+ char *string;
+} pcmu_dflag_to_str_t;
+
+#define PCMU_DBG_ATTACH 0x1ull
+#define PCMU_DBG_DETACH 0x2ull
+#define PCMU_DBG_MAP 0x4ull
+#define PCMU_DBG_A_INTX 0x8ull
+#define PCMU_DBG_R_INTX 0x10ull
+#define PCMU_DBG_INIT_CLD 0x20ull
+#define PCMU_DBG_CTLOPS 0x40ull
+#define PCMU_DBG_INTR 0x80ull
+#define PCMU_DBG_ERR_INTR 0x100ull
+#define PCMU_DBG_BUS_FAULT 0x200ull
+#define PCMU_DBG_IB (0x20ull << 32)
+#define PCMU_DBG_CB (0x40ull << 32)
+#define PCMU_DBG_PBM (0x80ull << 32)
+#define PCMU_DBG_CONT (0x100ull << 32)
+#define PCMU_DBG_OPEN (0x1000ull << 32)
+#define PCMU_DBG_CLOSE (0x2000ull << 32)
+#define PCMU_DBG_IOCTL (0x4000ull << 32)
+#define PCMU_DBG_PWR (0x8000ull << 32)
+
+
+#define PCMU_DBG0(flag, dip, fmt) \
+ pcmu_debug(flag, dip, fmt, 0, 0, 0, 0, 0);
+
+#define PCMU_DBG1(flag, dip, fmt, a1) \
+ pcmu_debug(flag, dip, fmt, (uintptr_t)(a1), 0, 0, 0, 0);
+
+#define PCMU_DBG2(flag, dip, fmt, a1, a2) \
+ pcmu_debug(flag, dip, fmt, (uintptr_t)(a1), (uintptr_t)(a2), 0, 0, 0);
+
+#define PCMU_DBG3(flag, dip, fmt, a1, a2, a3) \
+ pcmu_debug(flag, dip, fmt, (uintptr_t)(a1), \
+ (uintptr_t)(a2), (uintptr_t)(a3), 0, 0);
+
+#define PCMU_DBG4(flag, dip, fmt, a1, a2, a3, a4) \
+ pcmu_debug(flag, dip, fmt, (uintptr_t)(a1), \
+ (uintptr_t)(a2), (uintptr_t)(a3), \
+ (uintptr_t)(a4), 0);
+
+#define PCMU_DBG5(flag, dip, fmt, a1, a2, a3, a4, a5) \
+ pcmu_debug(flag, dip, fmt, (uintptr_t)(a1), \
+ (uintptr_t)(a2), (uintptr_t)(a3), \
+ (uintptr_t)(a4), (uintptr_t)(a5));
+
+extern void pcmu_debug(uint64_t, dev_info_t *, char *,
+ uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+#else /* DEBUG */
+
+#define PCMU_DBG0(flag, dip, fmt)
+#define PCMU_DBG1(flag, dip, fmt, a1)
+#define PCMU_DBG2(flag, dip, fmt, a1, a2)
+#define PCMU_DBG3(flag, dip, fmt, a1, a2, a3)
+#define PCMU_DBG4(flag, dip, fmt, a1, a2, a3, a4)
+#define PCMU_DBG5(flag, dip, fmt, a1, a2, a3, a4, a5)
+
+#endif /* DEBUG */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_PCMU_UTIL_H */
diff --git a/usr/src/uts/sun4u/opl/sys/scfd/iomp_drv.h b/usr/src/uts/sun4u/opl/sys/scfd/iomp_drv.h
new file mode 100644
index 0000000000..224a90b3a9
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/scfd/iomp_drv.h
@@ -0,0 +1,134 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _IOMP_DRV_H
+#define _IOMP_DRV_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types.h>
+#include <sys/types32.h>
+#include <sys/fiomp.h>
+
+#define SCF_IOMP_NAME "mscf0"
+#define SCF_REAL_NAME "/pseudo/scfd@200:mscf0"
+#define SCF_LOGCAL_PATH "/dev/FJSVhwr/scfc"
+#define SCF_USER_PATH "/dev/FJSVhwr/pwrctl"
+
+#define SCF_MAX_STR 256
+#define FIOMP_STAT_ONLINE 10
+#define FIOMP_STAT_OFFLINE 11
+#define FIOMP_STAT_UNCONFIGURED 12
+
+#define FIOMP_STAT_RECOVER 20
+
+#define FIOMP_STAT_BUSY -1
+
+struct fiompdev_32 {
+ int api_level; /* API level = 0 */
+ int inst_no; /* instance number */
+ minor32_t inst_minor; /* instance management node */
+ minor32_t user_minor; /* user access node */
+ int num; /* number of devices */
+ caddr32_t devs; /* device names */
+ int mpmode; /* multi pathing */
+ int autopath; /* automatic path change */
+ int block; /* able to block physical device */
+ int needsync; /* need synchronize path status */
+ caddr32_t ext; /* for extension = NULL */
+};
+
+struct fiomp_devinfo_32 {
+ int inst_no; /* instance number */
+ char real_name[FIOMP_MAX_STR]; /* instance management node */
+ char user_path[FIOMP_MAX_STR]; /* user access path */
+ int path_num; /* number of paths */
+ int mpmode; /* multi pathing */
+ int autopath; /* automatic path change */
+ int block; /* able to block physical device */
+ int needsync; /* need synchronize path status */
+ caddr32_t ext; /* for extension = NULL */
+};
+
+struct fiomp_all_devinfo_32 {
+ int num; /* number of instances */
+ caddr32_t devinfo; /* device informations */
+};
+
+struct fiompprop_32 {
+ caddr32_t iomp_name; /* instance name */
+ caddr32_t iomp_real_name;
+ /* instance management node (/devices) */
+ caddr32_t iomp_user_path;
+ /* instance management node (/dev) */
+ caddr32_t iomp_status; /* status of the instance */
+ int num; /* number of paths */
+ caddr32_t iomp_path; /* target device nodes (/devices) */
+ caddr32_t iomp_logical_path; /* target device nodes (/dev) */
+ caddr32_t iomp_path_status; /* status of target devices */
+ caddr32_t iomp_path_block; /* access block */
+};
+
+struct fiompstatus_32 {
+ int pathnum; /* path number */
+ int status; /* FIOMP_STAT_xxxx */
+ caddr32_t message; /* some messages */
+ int block_status; /* access block status */
+ caddr32_t ext; /* reservesd (= NULL) */
+};
+
+struct fiomppath_32 {
+ int num; /* number of paths */
+ caddr32_t devs; /* device names */
+};
+
+struct fiomp_all_stat_32 {
+ int num; /* number of paths */
+ caddr32_t status; /* path status */
+};
+
+struct fiompchg_32 {
+ int num; /* number of all paths */
+ caddr32_t set_status; /* setting values */
+ caddr32_t pre_status; /* previous values */
+ caddr32_t status; /* current values */
+};
+
+struct fiompevent_32 {
+ int event; /* event type = FIOMP_EVT_xx */
+ int num; /* instance number(meta management) or */
+ /* number of all path(instance management) */
+ caddr32_t pre_status; /* previous status */
+ caddr32_t status; /* current status */
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _IOMP_DRV_H */
diff --git a/usr/src/uts/sun4u/opl/sys/scfd/opcio.h b/usr/src/uts/sun4u/opl/sys/scfd/opcio.h
new file mode 100644
index 0000000000..fc085e1b1a
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/scfd/opcio.h
@@ -0,0 +1,326 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _SYS_OPCIO_H
+#define _SYS_OPCIO_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/ioccom.h>
+
+/*
+ * ioctl
+ */
+#define SCFIOC 'p'<<8
+
+/*
+ * ioctl
+ */
+#define SCFIOCCLEARLCD (SCFIOC|10|0x80040000)
+#define SCFIOCWRLCD (SCFIOC|11|0x800c0000)
+#define SCFIOCREPORTSTAT (SCFIOC|22|0x80040000)
+#define SCFIOCHAC (SCFIOC|28|0x80810000)
+#define SCFIOCRDCLIST (SCFIOC|37|0xc00c0000)
+#define SCFIOCHSTADRSINFO (SCFIOC|41|0x40040000)
+#define SCFIOCAUTOPWRSET (SCFIOC|42|0x80f40000)
+#define SCFIOCAUTOPWRGET (SCFIOC|43|0x40f40000)
+#define SCFIOCAUTOPWREXSET (SCFIOC|44|0x80100000)
+#define SCFIOCAUTOPWREXGET (SCFIOC|45|0x40100000)
+#define SCFIOCAUTOPWRFPOFF (SCFIOC|46|0x80f40000)
+#define SCFIOCRCIPWR (SCFIOC|48|0xc0080000)
+#define SCFIOCGETREPORT (SCFIOC|49|0x40100000)
+#define SCFIOCRDCLISTMAX (SCFIOC|50|0x40040000)
+#define SCFIOCRDCLISTX (SCFIOC|51|0x800c0000)
+#define SCFIOCRDCTRL (SCFIOC|52|0xc0820000)
+#define SCFIOCPANICREQ (SCFIOC|53|0x80040000)
+#define SCFIOCSYSAUTOPWRGET (SCFIOC|60|0x20000000)
+#define SCFIOCOPECALL (SCFIOC|62|0x20000000)
+#define SCFIOCSYSAUTOPWRCLR (SCFIOC|66|0x20000000)
+#define SCFIOCPANICCHK (SCFIOC|67|0x80040000)
+#define SCFIOCDR (SCFIOC|68|0x80040000)
+#define SCFIOCEVENTLIST (SCFIOC|70|0x80040000)
+#define SCFIOCGETEVENT (SCFIOC|71|0x80040000)
+#define SCFIOCOPTIONDISP (SCFIOC|80|0x80040000)
+#define SCFIOCPARMSET (SCFIOC|82|0x80040000)
+#define SCFIOCPARMGET (SCFIOC|83|0x80040000)
+
+#define SCFIOCGETDISKLED (SCFIOC|101|0x80040000)
+#define SCFIOCSETDISKLED (SCFIOC|102|0x80040000)
+#define SCFIOCGETSDOWNREASON (SCFIOC|103|0x80040000)
+#define SCFIOCGETPCICONFIG (SCFIOC|104|0x80040000)
+#define SCFIOCSETMADMEVENT (SCFIOC|105|0x80040000)
+#define SCFIOCREMCSCMD (SCFIOC|106|0x80040000)
+#define SCFIOCSPARECMD (SCFIOC|107|0x80040000)
+#define SCFIOCREMCSFILE (SCFIOC|108|0x80040000)
+
+#define SCFIOCSETPHPINFO (SCFIOC|1|0xe0000000)
+#define SCFIOCPCIRESETREQ (SCFIOC|2|0xe0000000)
+
+/* SCFIOCOPECALL */
+#define SUB_OPECALL_DISP 0x10 /* OP call disp */
+#define SUB_OPECALL_ON_SET 0x20 /* OP call ON set */
+#define SUB_OPECALL_OFF_SET 0x31 /* OP call OFF set */
+
+/* SCFIOCCLEARLCD */
+#define SCF_CLRLCD_SEQ 0
+
+/* SCFIOCWRLCD */
+typedef struct scfwrlcd {
+ int lcd_type;
+ int length;
+ unsigned char *string;
+} scfwrlcd_t;
+/* for lcd_type field */
+#define SCF_WRLCD_SEQ 0
+
+#define SCF_WRLCD_MAX 32
+
+/* SCFIOCREPORTSTAT */
+#define SCF_SHUTDOWN_START 0
+#define SCF_SYSTEM_RUNNING 1
+#define SCF_RE_REPORT 9
+
+/* SCFIOCHAC */
+typedef struct scfhac {
+ unsigned char sbuf[64];
+ unsigned char rbuf[64];
+ unsigned char sub_command;
+} scfhac_t;
+/* for sub_command field */
+#define SUB_HOSTADDR_DISP 0x00 /* Host address disp */
+#define SUB_REMOTE_POWCTL_SET 0x11 /* Remote power control set */
+#define SCF_SUB_REMOTE_POWCTL_SET 0x10
+#define SUB_DEVICE_INFO 0x0c /* Device information disp */
+
+/* SCFIOCAUTOPWRSET, SCFIOCAUTOPWRGET, SCFIOCAUTOPWRFPOFF */
+typedef struct scfautopwrtime {
+ int pon_year; /* 1970 - 9999 */
+ int pon_month; /* 1 - 12 */
+ int pon_date; /* 1 - 31 */
+ int pon_hour; /* 0 - 23 */
+ int pon_minute; /* 0 - 59 */
+ int poff_year; /* 1970 - 9999 */
+ int poff_month; /* 1 - 12 */
+ int poff_date; /* 1 - 31 */
+ int poff_hour; /* 0 - 23 */
+ int poff_minute; /* 0 - 59 */
+ int flag;
+ int sarea;
+} scfautopwrtime_t;
+
+typedef struct scfautopwr {
+ int valid_entries;
+ struct scfautopwrtime ptime[5];
+} scfautopwr_t;
+
+/* SCFIOCAUTOPWREXSET, SCFIOCAUTOPWREXGET */
+typedef struct scfautopwrex {
+ int rpwr_mode;
+ int rpwr_time; /* minutes */
+ int w_time; /* minutes */
+ int a_time; /* minutes */
+} scfautopwrex_t;
+/* for rpwr_mode field */
+#define AUTOPWREX_RESTORE 0x00
+#define AUTOPWREX_NOPON 0x01
+#define AUTOPWREX_AUTOPON 0x80
+
+/* SCFIOCRCIPWR */
+typedef struct scfrcipwr {
+ int sub_cmd;
+ unsigned int rci_addr;
+} scfrcipwr_t;
+/* for sub_cmd field */
+#define RCI_PWR_ON 0x80
+#define RCI_PWR_OFF 0x40
+#define RCI_SYS_RESET 0x20
+#define RCI_PFCTR 0x00
+#define RCI_PWR_NOR_OFF 0x41
+
+/* SCFIOCGETREPORT */
+typedef struct scfreport {
+ int flag;
+ unsigned int rci_addr;
+ unsigned char report_sense[4];
+ time_t timestamp;
+} scfreport_t;
+/* for flag field */
+#define GETREPORT_WAIT 1
+#define GETREPORT_NOWAIT 2
+#define GETREPORT_WAIT_AND_RCIDWN 3
+
+/* SCFIOCRDCLISTX */
+typedef struct scfrdclistx {
+ unsigned int rci_addr;
+ unsigned char status;
+ unsigned short dev_class;
+ unsigned char sub_class;
+} scfrdclistx_t;
+
+/* SCFIOCRDCTRL */
+typedef struct scfrdctrl {
+ unsigned char sub_cmd;
+ unsigned char scount;
+ unsigned char sbuf[64];
+ unsigned char sense[64];
+} scfrdctrl_t;
+/* for sub_cmd field */
+#define SUB_DEVICE_STATUS_RPT 0x14 /* Device status print */
+#define SCF_SUB_DEVICE_STATUS_RPT 0x71
+#define SCF_RCI_PATH_40 0x50 /* RCI device request */
+
+/* SCFIOCDR */
+typedef struct scfdr {
+ unsigned char sbuf[16];
+ unsigned char rbuf[16 * 64];
+ unsigned char sub_command;
+} scfdr_t;
+/* for sub_command field */
+#define SUB_SB_CONF_CHG 0x11 /* SB configuration change */
+#define SUB_SB_SENSE 0x00 /* SB status disp */
+#define SUB_SB_SENSE_ALL 0x18 /* SB status all disp */
+#define SUB_SB_BUILD_COMP 0x12 /* SB build completion */
+
+/* SCFIOCEVENTLIST */
+#define SCF_EVENTLIST_MAX 128
+typedef struct scfeventlist {
+ int listcnt;
+ unsigned char codelist[SCF_EVENTLIST_MAX];
+} scfeventlist_t;
+
+/* SCFIOCGETEVENT */
+typedef struct scfevent {
+ int flag;
+ unsigned int rci_addr;
+ unsigned char code;
+ unsigned char size;
+ unsigned char rsv[2];
+ unsigned char event_sense[24];
+ time_t timestamp;
+} scfevent_t;
+/* for flag field */
+#define GETEVENT_WAIT 1
+#define GETEVENT_NOWAIT 2
+
+/* SCFIOCOPTIONDISP */
+typedef struct scfoption {
+ unsigned char rbuf[16];
+} scfoption_t;
+
+/* SCFIOCPARMSET, SCFIOCPARMGET */
+typedef struct scfparam {
+ int parm;
+ int value;
+} scfparam_t;
+/* for parm field */
+#define SCF_PARM_RDCTRL_TIMER 0x00000001
+
+/* SCFIOCGETDISKLED/SCFIOCSETDISKLED */
+#define SCF_DISK_LED_PATH_MAX 512
+typedef struct scfiocgetdiskled {
+ unsigned char path[SCF_DISK_LED_PATH_MAX];
+ unsigned char led;
+} scfiocgetdiskled_t;
+/* for led field */
+#define SCF_DISK_LED_ON 0x01
+#define SCF_DISK_LED_BLINK 0x02
+#define SCF_DISK_LED_OFF 0x04
+
+/* SCFIOCGETSDOWNREASON */
+#define REASON_NOTHING 0x00000000 /* reason nothing */
+#define REASON_SHUTDOWN_FAN 0x00000001 /* Fan unit failure */
+#define REASON_SHUTDOWN_PSU 0x00000002 /* Power unit failure */
+#define REASON_SHUTDOWN_THERMAL 0x00000006 /* Thermal failure */
+#define REASON_SHUTDOWN_UPS 0x00000007 /* UPS failure */
+#define REASON_RCIPOFF 0x00000100 /* RCI POFF */
+#define REASON_XSCFPOFF 0x00000103 /* XSCF POFF */
+#define REASON_SHUTDOWN_HALT 0xffffffff /* SCF HALT */
+
+/* SCFIOCGETPCICONFIG */
+typedef struct scfiocgetpciconfig {
+ unsigned char sbuf[16];
+ unsigned char rbuf[65536];
+} scfiocgetpciconfig_t;
+
+/* SCFIOCSETMADMEVENT */
+typedef struct scfiocsetmadmevent {
+ unsigned char buf[65536];
+ unsigned int size;
+} scfiocsetmadmevent_t;
+
+/* SCFIOCREMCSCMD */
+typedef struct scfiocremcscmd {
+ unsigned char buf[16];
+ unsigned int size;
+ unsigned char sub_command;
+} scfiocremcscmd_t;
+/* for sub_command field */
+#define SUB_CMD_EX_REMCS 0x01
+
+/* SCFIOCREMCSFILE */
+typedef struct scfiocremcsfile {
+ unsigned char buf[65536];
+ unsigned int size;
+ unsigned char sub_command;
+} scfiocremcsfile_t;
+/* for sub_command field */
+#define SUB_FILEUP_READY 0x10
+#define SUB_FILEUP_SET 0x20
+#define SUB_TRANSFER_STOP 0x40
+
+/* SCFIOCSPARECMD */
+typedef struct scfiocsparecmd {
+ unsigned char buf[65536];
+ unsigned int size;
+ unsigned char command;
+ unsigned char sub_command;
+ unsigned char spare_sub_command;
+} scfiocsparecmd_t;
+/* for sub_command field */
+#define SUB_SPARE_SS 0x00 /* Type SS */
+#define SUB_SPARE_SL 0x11 /* Type SL */
+#define SUB_SPARE_LS 0x12 /* Type LS */
+
+/* SCFIOCSETPHPINFO */
+typedef struct scfsetphpinfo {
+ unsigned char buf[65536];
+ unsigned int size;
+} scfsetphpinfo_t;
+
+/* SCFIOCPCIRESETREQ */
+typedef struct scfpciresetreq {
+ unsigned char sbuf[65536];
+ unsigned char rbuf[16];
+ unsigned int size;
+} scfpciresetreq_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_OPCIO_H */
diff --git a/usr/src/uts/sun4u/opl/sys/scfd/scfdebug.h b/usr/src/uts/sun4u/opl/sys/scfd/scfdebug.h
new file mode 100644
index 0000000000..a441947757
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/scfd/scfdebug.h
@@ -0,0 +1,308 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _SCFDEBUG_H
+#define _SCFDEBUG_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/scfd/scfdscp.h>
+
+/*
+ * ioctl
+ */
+#define SCFIOCDEBUG 'd'<<8
+
+/*
+ * ioctl
+ */
+#define SCFIOCCMDTHROUGH (SCFIOC|1|0xf0000000)
+#define SCFIOCTEST (SCFIOCDEBUG|2|0x80040000)
+
+/* SCFIOCCMDTHROUGH */
+typedef struct scfcmdthrough {
+ unsigned short mode;
+ unsigned short cmdtype;
+ unsigned short code;
+ unsigned int sbufleng;
+ unsigned int rbufleng;
+ unsigned short status;
+ unsigned char sbuf[65536 + 16];
+ unsigned char rbuf[1024 * 512];
+} scfcmdthrough_t;
+/* for mode field */
+#define SCF_CMDTHROUGH_START 0 /* command through start */
+#define SCF_CMDTHROUGH_STOP 1 /* command through stop */
+#define SCF_CMDTHROUGH_CMD 2 /* command through */
+/* for cmdtype field */
+#define SCF_CMDTHROUGH_TYPE_NN 0 /* WRITE:Nothing READ:Nothing */
+#define SCF_CMDTHROUGH_TYPE_NS 1 /* WRITE:Nothing READ:Small */
+#define SCF_CMDTHROUGH_TYPE_NL 2 /* WRITE:Nothing READ:Larg */
+#define SCF_CMDTHROUGH_TYPE_SN 3 /* WRITE:Small READ:Nothing */
+#define SCF_CMDTHROUGH_TYPE_SS 4 /* WRITE:Small READ:Small */
+#define SCF_CMDTHROUGH_TYPE_SL 5 /* WRITE:Small READ:Larg */
+#define SCF_CMDTHROUGH_TYPE_LN 6 /* WRITE:Larg READ:Nothing */
+#define SCF_CMDTHROUGH_TYPE_LS 7 /* WRITE:Larg READ:Small */
+
+#define TEST_INFO_MAX 32
+
+/* SCFIOCTEST */
+typedef struct scf_scfioctest_tbl {
+ uint_t mode;
+ uint_t rci_addr;
+ uint_t data[2];
+ uint_t rsv1;
+ uint_t rsc2;
+ uint_t scf_debugxscf;
+ uint_t rtncode;
+ uint_t info[TEST_INFO_MAX];
+ uint8_t rdata[SRAM_MAX_SYSTEM];
+} scf_scfioctest_t;
+/* for mode field (Low 2byte) */
+#define TEST_MODE_MASK_HIGHT 0xffff0000
+#define TEST_MODE_MASK_LOW 0x0000ffff
+#define TEST_NONE 0x00000000
+
+/* Config mode : for mode field (Low 2byte) */
+#define TEST_CONF 0x00000100
+#define TEST_CONF_RESET 0x00000100
+#define TEST_CONF_DEBUG_MSG 0x00000101
+#define TEST_CONF_CMD_BUSY 0x00000181
+#define TEST_CONF_DSCP_LOOPBACK 0x00000182
+#define TEST_CONF_SCF_PATH 0x00000183
+
+/* Interrupt mode : for mode field (Low 2byte) */
+#define TEST_INT 0x00000200
+#define TEST_INT_RESET 0x00000200
+#define TEST_INT_SYS 0x00000201
+#define TEST_INT_SYS_POFF 0x00000202
+#define TEST_INT_SYS_EVENT 0x00000203
+#define TEST_INT_DSCP 0x00000204
+
+/* SYS func call mode : for mode field (Low 2byte) */
+#define TEST_SYS_CALL 0x00001000
+#define TEST_SYS_CALL_RESET 0x00001000
+#define TEST_SYS_CALL_INT 0x00001001
+
+/* DSCP func call mode : for mode field (Low 2byte) */
+#define TEST_DSCP_CALL 0x00001100
+#define TEST_DSCP_CALL_RESET 0x00001100
+#define TEST_DSCP_CALL_INIT 0x00001101
+#define TEST_DSCP_CALL_FINI 0x00001102
+#define TEST_DSCP_CALL_PUTMSG 0x00001103
+#define TEST_DSCP_CALL_CANGET 0x00001104
+#define TEST_DSCP_CALL_GETMSG 0x00001105
+#define TEST_DSCP_CALL_FLUSH 0x00001106
+#define TEST_DSCP_CALL_CTRL 0x00001107
+#define TEST_DSCP_CALL_OTHER 0x00001108
+
+/* OSESCF func callmode : for mode field (Low 2byte) */
+#define TEST_OSESCF_CALL 0x00001200
+#define TEST_OSESCF_CALL_RESET 0x00001200
+#define TEST_OSESCF_CALL_PUTINFO 0x00001201
+#define TEST_OSESCF_CALL_GETINFO 0x00001202
+
+/* FMEM OSESCF func callmode : for mode field (Low 2byte) */
+#define TEST_FMEM_START 0x00001211
+#define TEST_FMEM_END 0x00001212
+#define TEST_FMEM_CANCEL 0x00001213
+
+/*
+ * External vwlue
+ */
+extern uint_t scf_debug_test_sys_int_flag;
+extern uint_t scf_debug_test_rxbuff_nosum_check_flag;
+extern uint_t scf_debug_test_sys_event_flag;
+extern uint_t scf_debug_test_sys_poff_flag;
+extern uint_t scf_debug_test_dscp_int_flag;
+extern uint_t scf_debug_test_cmdr_busy;
+extern uint_t scf_debug_test_cmdexr_busy;
+extern uint_t scf_debug_test_copyin;
+extern uint_t scf_debug_test_copyout;
+extern uint_t scf_debug_test_kmem;
+extern uint_t scf_debug_test_path_check;
+extern uint_t scf_debug_test_path_check_rtn;
+extern uint_t scf_debug_test_offline_check;
+extern uint_t scf_debug_test_offline_check_rtn;
+extern uint_t scf_debug_test_dscp_call_flag;
+extern uint_t scf_debug_test_osescf_call_flag;
+
+extern uint_t scf_no_make_sum_s;
+extern uint_t scf_no_make_sum_l;
+
+extern uint_t scf_debug_nofirm_sys;
+extern uint_t scf_debug_scfint_time;
+extern uint_t scf_debug_nofirm_dscp;
+extern uint_t scf_debug_idbcint_time;
+extern uint_t scf_debug_test_dscp_loopback;
+extern uint_t scf_debug_nooffline_check;
+extern uint_t scf_debug_no_dscp_path;
+extern uint_t scf_debug_no_alive;
+extern uint_t scf_debug_norxsum_check;
+extern uint_t scf_debug_no_int_reason;
+extern uint_t scf_debug_no_device;
+
+/*
+ * External function
+ */
+extern int scf_debug_cmdthrough(intptr_t arg, int mode);
+extern int scf_debug_test(intptr_t arg, int mode);
+extern void scf_debug_test_intr_tout(void *arg);
+extern void scf_debug_test_intr(scf_state_t *statep);
+extern void scf_debug_test_intr_scfint(scf_state_t *statep);
+extern void scf_debug_test_intr_cmdend(scf_state_t *statep);
+extern void scf_debug_test_intr_poff(void);
+extern void scf_debug_test_dsens(struct scf_cmd *scfcmdp,
+ scf_int_reason_t *int_rp, int len);
+extern void scf_debug_test_intr_dscp_dsr(scf_state_t *statep);
+extern void scf_debug_test_intr_dscp_rxtx(scf_state_t *statep, uint8_t dsr);
+extern void scf_debug_test_alive_start(scf_state_t *statep);
+extern void scf_debug_test_alive_stop(scf_state_t *statep);
+extern void scf_debug_test_alive_intr_tout(void *arg);
+extern void scf_debug_test_send_cmd(struct scf_state *statep,
+ struct scf_cmd *scfcmdp);
+extern void scf_debug_test_txreq_send(scf_state_t *statep,
+ scf_dscp_dsc_t *dsc_p);
+extern void scf_debug_test_event_handler(scf_event_t mevent, void *arg);
+extern void scf_debug_test_timer_stop(void);
+extern void scf_debug_test_map_regs(scf_state_t *statep);
+extern void scf_debug_test_unmap_regs(scf_state_t *statep);
+
+/*
+ * Debug flag and value define
+ */
+/* scf_debug_test_sys_int_flag */
+#define SCF_DBF_SYS_INTR_OFF 0
+#define SCF_DBF_SYS_INTR_ON 1
+
+/* scf_debug_test_rxbuff_nosum_check_flag */
+#define SCF_DBF_RXBUFF_NOSUM_CHECK_OFF 0
+#define SCF_DBF_RXBUFF_NOSUM_CHECK_ON 1
+
+/* scf_debug_test_sys_event_flag */
+#define SCF_DBF_SYS_EVENT_OFF 0
+#define SCF_DBF_SYS_EVENT_ON 1
+
+/* scf_debug_test_sys_poff_flag */
+#define SCF_DBF_SYS_POFF_OFF 0
+#define SCF_DBF_SYS_POFF_ON 1
+
+/* scf_debug_test_dscp_int_flag */
+#define SCF_DBF_DSCP_INT_OFF 0
+#define SCF_DBF_DSCP_INT_ON 1
+
+/* scf_debug_test_cmdr_busy */
+#define SCF_DBC_CMDR_BUSY_CLEAR 0x00000000
+
+/* scf_debug_test_cmdexr_busy */
+#define SCF_DBC_CMDEXR_BUSY_CLEAR 0x00000000
+
+/* scf_debug_test_copyin */
+#define SCF_DBC_COPYIN_CLEAR 0x00000000
+
+/* scf_debug_test_copyout */
+#define SCF_DBC_COPYOUT_CLEAR 0x00000000
+
+/* scf_debug_test_kmem */
+#define SCF_DBC_KMEM_CLEAR 0x00000000
+
+/* scf_debug_test_path_check */
+#define SCF_DBC_PATH_CHECK_CLEAR 0x00000000
+
+/* scf_debug_test_path_check_rtn */
+#define SCF_DBC_PATH_CHECK_RTN_CLEAR 0x00000000
+
+/* scf_debug_test_offline_check */
+#define SCF_DBC_OFFLINE_CHECK_CLEAR 0x00000000
+
+/* scf_debug_test_offline_check_rtn */
+#define SCF_DBC_OFFLINE_CHECK_RTN_CLEAR 0x00000000
+
+/* scf_debug_test_dscp_call_flag */
+#define SCF_DBF_DSCP_CALL_OFF 0
+#define SCF_DBF_DSCP_CALL_ON 1
+
+/* scf_debug_test_osescf_call_flag */
+#define SCF_DBF_OSESCF_CALL_OFF 0
+#define SCF_DBF_OSESCF_CALL_ON 1
+
+/* scf_no_make_sum_s */
+#define SCF_DBF_NO_MAKE_SUM_S_OFF 0
+#define SCF_DBF_NO_MAKE_SUM_S_ON 1
+
+/* scf_no_make_sum_l */
+#define SCF_DBF_NO_MAKE_SUM_L_OFF 0
+#define SCF_DBF_NO_MAKE_SUM_L_ON 1
+
+/* scf_debug_nofirm_sys */
+#define SCF_DBF_NOFIRM_SYS_OFF 0
+#define SCF_DBF_NOFIRM_SYS_ON 1
+
+/* scf_debug_scfint_time */
+#define SCF_DBT_SCFINT_TIME_100MS 100
+
+/* scf_debug_nofirm_dscp */
+#define SCF_DBF_NOFIRM_DSCP_OFF 0
+#define SCF_DBF_NOFIRM_DSCP_ON 1
+
+/* scf_debug_idbcint_time */
+#define SCF_DBT_IDBCINT_TIME_100MS 100
+
+/* scf_debug_test_dscp_loopback */
+#define SCF_DBF_DSCP_LOOPBACK_OFF 0
+#define SCF_DBF_DSCP_LOOPBACK_ON 1
+
+/* scf_debug_nooffline_check */
+#define SCF_DBF_NOOFFLINE_CHECK_OFF 0
+#define SCF_DBF_NOOFFLINE_CHECK_ON 1
+
+/* scf_debug_no_dscp_path */
+#define SCF_DBF_NO_DSCP_PATH_OFF 0
+#define SCF_DBF_NO_DSCP_PATH_ON 1
+
+/* scf_debug_no_alive */
+#define SCF_DBF_NO_ALIVE_OFF 0
+#define SCF_DBF_NO_ALIVE_ON 1
+
+/* scf_debug_norxsum_check */
+#define SCF_DBF_NORXSUM_CHECK_OFF 0
+#define SCF_DBF_NORXSUM_CHECK_ON 1
+
+/* scf_debug_no_int_reason */
+#define SCF_DBF_NO_INT_REASON_OFF 0
+#define SCF_DBF_NO_INT_REASON_ON 1
+
+/* scf_debug_no_device */
+#define SCF_DBF_NO_DEVICE_OFF 0
+#define SCF_DBF_NO_DEVICE_ON 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SCFDEBUG_H */
diff --git a/usr/src/uts/sun4u/opl/sys/scfd/scfdscp.h b/usr/src/uts/sun4u/opl/sys/scfd/scfdscp.h
new file mode 100644
index 0000000000..1d8c8cc740
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/scfd/scfdscp.h
@@ -0,0 +1,468 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _SCFDSCP_H
+#define _SCFDSCP_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/scfd/scfdscpif.h>
+/*
+ * Discriptor define
+ */
+#define SCF_TOTAL_BUFFSIZE 0x00010000 /* Total buff size (64KB) */
+#define SCF_MB_MAXDATALEN 0x00000600 /* Message data max length */
+#define SCF_TXBUFFSIZE SCF_MB_MAXDATALEN /* Tx buff size (1.5KB) */
+#define SCF_RXBUFFSIZE SCF_MB_MAXDATALEN /* Rx buff size (1.5KB) */
+#define SCF_TX_SRAM_MAXCOUNT 16 /* Tx SRAM max count (0x6000) */
+#define SCF_RX_SRAM_MAXCOUNT 26 /* Rx SRAM max count (0x6000) */
+
+#define SCF_TXDSC_CNTROLCOUNT (2 * MBIF_MAX) /* TxDSC control count */
+#define SCF_TXDSC_LOCALCOUNT 1 /* TxDSC local count (DSCP_PATH/CONN_CHK) */
+
+#define SCF_TXDSC_MAXCOUNT (SCF_TX_SRAM_MAXCOUNT + SCF_TXDSC_CNTROLCOUNT)
+ /* TxDSC max count (0x6000) */
+#define SCF_RXDSC_MAXCOUNT SCF_RX_SRAM_MAXCOUNT
+ /* RxDSC max count (0x9c00) */
+
+#define SCF_TXDSC_BUSYCOUNT SCF_TX_SRAM_MAXCOUNT /* TxDSC busy count */
+#define SCF_RXDSC_BUSYCOUNT SCF_RX_SRAM_MAXCOUNT /* RxDSC busy count */
+
+/*
+ * Re-try max count define
+ */
+#define SCF_TX_ACKTO_MAXRETRAYCOUNT 1 /* TxACK timeout */
+#define SCF_TX_ENDTO_MAXRETRAYCOUNT 1 /* TxEND timeout */
+
+#define SCF_TX_BUSY_MAXRETRAYCOUNT 10 /* TxEND Busy */
+#define SCF_TX_IF_MAXRETRAYCOUNT 1 /* TxEND Interface error */
+#define SCF_TX_NAK_MAXRETRAYCOUNT 1 /* TxEND Connection refusal */
+#define SCF_TX_NOTSUP_MAXRETRAYCOUNT 1 /* TxEND Not support */
+#define SCF_TX_PRMERR_MAXRETRAYCOUNT 1 /* TxEND Parameter error */
+#define SCF_TX_SEQERR_MAXRETRAYCOUNT 1 /* TxEND Sequence error */
+#define SCF_TX_OTHER_MAXRETRAYCOUNT 1 /* TxEND other error */
+#define SCF_TX_SEND_MAXRETRAYCOUNT 3 /* TxEND send */
+
+/*
+ * Que max count define
+ */
+#define SCF_RDQUE_MAXCOUNT SCF_RXDSC_MAXCOUNT /* Recv data */
+#define SCF_RDQUE_BUSYCOUNT SCF_RDQUE_MAXCOUNT /* Recv data */
+
+#define SCF_MB_EVQUE_MAXCOUNT (SCF_RDQUE_MAXCOUNT + 4) /* Event */
+
+/*
+ * Mailbox interface code
+ */
+typedef enum {
+ MBIF_DSCP, /* DSCP mailbox interface code */
+ MBIF_DKMD, /* DKMD mailbox interface code */
+ /* Add mailbox interface code */
+ MBIF_MAX /* Max interface code */
+} scf_mbif_t;
+
+/*
+ * Callback event queue
+ */
+typedef struct scf_event_que {
+ scf_event_t mevent; /* Event types */
+} scf_event_que_t;
+
+/*
+ * Recv data queue
+ */
+typedef struct scf_rdata_que {
+ caddr_t rdatap; /* Recv data address */
+ uint32_t length; /* Recv data length */
+} scf_rdata_que_t;
+
+/*
+ * DSCP main control table
+ */
+typedef struct scf_dscp_main {
+ /* main status */
+ uint16_t status; /* Main status */
+ uint16_t old_status; /* Old main status */
+ uint8_t id; /* Table No. */
+ uint8_t rsv[3]; /* reserved */
+
+ /* flag/code */
+ uint_t conn_chk_flag; /* DSCP connect check flag */
+
+ /* init() parameter */
+ target_id_t target_id; /* Target ID specifying the peer */
+ mkey_t mkey; /* Data type for mailbox key */
+ void (*event_handler)(scf_event_t, void *);
+ /* event handler function */
+ scf_event_t mevent; /* Event types */
+ void *arg; /* Callback argument */
+
+ /* cv_init() condition */
+ uint_t cv_init_flag; /* cv_init call flag */
+ kcondvar_t fini_cv; /* fint() condition variables */
+ uint_t fini_wait_flag; /* fini wait flag */
+
+ /* flag */
+ uint_t putmsg_busy_flag; /* putmsg busy flag */
+
+ /* memo counter */
+ uint_t memo_tx_data_req_cnt; /* Tx DATA_REQ counter */
+ uint_t memo_tx_data_req_ok_cnt; /* Tx DATA_REQ ok counter */
+ uint_t memo_rx_data_req_cnt; /* Rx DATA_REQ counter */
+ uint_t memo_rx_data_req_ok_cnt; /* Rx DATA_REQ ok counter */
+ uint_t memo_putmsg_busy_cnt; /* putmsg busy counter */
+ uint_t memo_putmsg_enospc_cnt; /* putmsg ENOSPC counter */
+
+ /* Event/Recv data regulation counter */
+ uint_t ev_maxcount; /* Event max count */
+ uint_t rd_maxcount; /* Recv data max count */
+ uint_t rd_busycount; /* Recv data busy count */
+
+ /* Event/Recv data working counter */
+ uint_t ev_count; /* Use event count */
+ uint_t rd_count; /* Use recv data count */
+
+ /* Event/Recv data table address and size */
+ scf_event_que_t *ev_quep; /* Event table pointer */
+ uint_t ev_quesize; /* Event table size */
+ scf_rdata_que_t *rd_datap; /* Recv data table pointer */
+ uint_t rd_datasize; /* Recv data table size */
+
+ /* Event/Recv data offset */
+ uint16_t ev_first; /* Event first */
+ uint16_t ev_last; /* Event last */
+ uint16_t ev_put; /* Event put */
+ uint16_t ev_get; /* Event get */
+ uint16_t rd_first; /* Recv data first */
+ uint16_t rd_last; /* Recv data last */
+ uint16_t rd_put; /* Recv data put */
+ uint16_t rd_get; /* Recv data get */
+} scf_dscp_main_t;
+
+/*
+ * DCR/DSR register table
+ */
+typedef union scf_dscreg {
+ /* Basic code format */
+ struct {
+ uint16_t c_flag; /* Control flag (DCR/DSR) */
+ uint16_t offset; /* SRAM offset (DCR/DSR) */
+ uint32_t length; /* Data length (DCR) */
+ caddr_t dscp_datap; /* KMEM data address */
+ } base;
+ /* TxDCR/RxDCR bit format */
+ struct { /* DCR bit format */
+ unsigned id : 4; /* control id */
+ unsigned code : 4; /* control code */
+
+ unsigned emergency : 1; /* emergency flag */
+ unsigned interrupt : 1; /* interrupt flag */
+ unsigned : 2;
+ unsigned first : 1; /* first data flag */
+ unsigned last : 1; /* last data flag */
+ unsigned : 2;
+ } bdcr;
+ /* TxDSR/RxDSR bit format */
+ struct { /* DSR bit format */
+ unsigned id : 4; /* control id */
+ unsigned code : 4; /* control code */
+
+ unsigned status : 8; /* complete status */
+ } bdsr;
+} scf_dscreg_t;
+
+/*
+ * DSCP Tx/Rx discriptor table
+ */
+typedef struct scf_dscp_dsc {
+ /* TxDSC/RxDSC status */
+ uint16_t status; /* Tx/Rx status */
+ uint16_t old_status; /* Old Tx/Rx status */
+
+ /* DCR/DSR interface area */
+ scf_dscreg_t dinfo; /* DCR/DSR register table */
+} scf_dscp_dsc_t;
+
+/*
+ * DSCP Tx SRAM table
+ */
+typedef struct scf_tx_sram {
+ uint16_t use_flag; /* Tx SRAM use flag */
+ uint16_t offset; /* Tx SRAM offset */
+} scf_tx_sram_t;
+
+/*
+ * DSCP common table
+ */
+typedef struct scf_dscp_comtbl {
+ /* DSCP main control table */
+ scf_dscp_main_t scf_dscp_main[MBIF_MAX]; /* DSCP main table */
+
+ /* flag/code */
+ uint_t dscp_init_flag; /* DSCP interface init flag */
+ uint_t tx_exec_flag; /* TxREQ exec flag */
+ uint_t rx_exec_flag; /* RxREQ exec flag */
+ uint_t callback_exec_flag; /* Callback exec flag */
+ uint_t dscp_path_flag; /* DSCP path change flag */
+ uint_t tx_local_use_flag; /* Use local control TxDSC flag */
+
+ /* size */
+ uint_t maxdatalen; /* Message data max length */
+ uint_t total_buffsize; /* Total buff size */
+ uint_t txbuffsize; /* Tx buff size */
+ uint_t rxbuffsize; /* Rx buff size */
+
+ /* TxDSC/RxDSC/Event regulation counter */
+ uint_t txsram_maxcount; /* TxDSC SRAM max count */
+ uint_t rxsram_maxcount; /* RxDSC SRAM max count */
+ uint_t txdsc_maxcount; /* TxDSC max count */
+ uint_t rxdsc_maxcount; /* RxDSC max count */
+ uint_t txdsc_busycount; /* TxDSC busy count */
+ uint_t rxdsc_busycount; /* RxDSC busy count */
+
+ /* TxDSC re-try max count */
+ uint_t tx_ackto_maxretry_cnt; /* TxACK timeout */
+ uint_t tx_endto_maxretry_cnt; /* TxEND timeout */
+
+ uint_t tx_busy_maxretry_cnt; /* TxEND busy */
+ uint_t tx_interface_maxretry_cnt; /* TxEND Interface error */
+ uint_t tx_nak_maxretry_cnt; /* TxEND Connection refusal */
+ uint_t tx_notsup_maxretry_cnt; /* TxEND Not support */
+ uint_t tx_prmerr_maxretry_cnt; /* TxEND Parameter error */
+ uint_t tx_seqerr_maxretry_cnt; /* TxEND Sequence erro */
+ uint_t tx_other_maxretry_cnt; /* TxEND other error */
+ uint_t tx_send_maxretry_cnt; /* TxEND send */
+
+ /* TxDSC/RxDSC working counter */
+ uint_t tx_dsc_count; /* Use TxDSC count */
+ uint_t rx_dsc_count; /* Use RxDSC count */
+ uint_t tx_sram_count; /* Use Tx SRAM count */
+
+ /* TxDSC/RxDSC working re-try counter */
+ uint_t tx_ackto_retry_cnt; /* TxACK timeout */
+ uint_t tx_endto_retry_cnt; /* TxEND timeout */
+
+ uint_t tx_busy_retry_cnt; /* TxEND busy */
+ uint_t tx_interface_retry_cnt; /* TxEND Interface error */
+ uint_t tx_nak_retry_cnt; /* TxEND Connection refusal */
+ uint_t tx_notsuop_retry_cnt; /* TxEND Not support */
+ uint_t tx_prmerr_retry_cnt; /* TxEND Parameter error */
+ uint_t tx_seqerr_retry_cnt; /* TxEND Sequence error */
+ uint_t tx_other_retry_cnt; /* TxEND other error */
+ uint_t tx_send_retry_cnt; /* TxEND send */
+
+ /* TxDSC/RxDSC memo counter */
+ uint_t tx_ackto_memo_cnt; /* TxACK timeout */
+ uint_t tx_endto_memo_cnt; /* TxEND timeout */
+ uint_t tx_busy_memo_cnt; /* TxEND busy */
+ uint_t tx_interface_memo_cnt; /* TxEND Interface error */
+ uint_t tx_nak_memo_cnt; /* TxEND Connection refusal */
+ uint_t tx_notsuop_memo_cnt; /* TxEND Not support */
+ uint_t tx_prmerr_memo_cnt; /* TxEND Parameter error */
+ uint_t tx_seqerr_memo_cnt; /* TxEND Sequence error */
+ uint_t tx_other_memo_cnt; /* TxEND other error */
+ uint_t scf_stop_memo_cnt; /* SCF path stop */
+
+ /* TxDSC table address and size */
+ scf_dscp_dsc_t *tx_dscp; /* TxDSC table pointer */
+ uint_t tx_dscsize; /* TxDSC table size */
+ /* RxDSC table address and size */
+ scf_dscp_dsc_t *rx_dscp; /* RxDSC table pointer */
+ uint_t rx_dscsize; /* RxDSC table size */
+ /* Tx SRAM table address and size */
+ scf_tx_sram_t *tx_sramp; /* Tx SRAM table pointer */
+ uint_t tx_sramsize; /* Tx SRAM table size */
+
+ /* TxDSC offset */
+ uint16_t tx_first; /* TxDSC first offset */
+ uint16_t tx_last; /* TxDSC last offset */
+ uint16_t tx_put; /* TxDSC put offset */
+ uint16_t tx_get; /* TxDSC get offset */
+ uint16_t tx_local; /* Local control TxDSC offset */
+
+ /* TxDSC/RxDSC offset */
+ uint16_t rx_first; /* RxDSC first offset */
+ uint16_t rx_last; /* RxDSC last offset */
+ uint16_t rx_put; /* RxDSC put offset */
+ uint16_t rx_get; /* RxDSC get offset */
+
+ /* Tx SRAM offset */
+ uint16_t tx_sram_first; /* Tx SRAM first offset */
+ uint16_t tx_sram_last; /* Tx SRAM last offset */
+ uint16_t tx_sram_put; /* Tx SRAM put offset */
+
+} scf_dscp_comtbl_t;
+
+/*
+ * DSCP main status (scf_dscp_main_t : status)
+ */
+ /* (A0) Cconnection idle state */
+#define SCF_ST_IDLE 0x0000
+
+#ifdef _SCF_SP_SIDE
+ /* (A1) init() after, INIT_REQ recv state */
+#define SCF_ST_EST_INIT_REQ_RECV_WAIT 0x0001
+#else /* _SCF_SP_SIDE */
+ /* (B0) Send INIT_REQ, TxEND recv wait state */
+#define SCF_ST_EST_TXEND_RECV_WAIT 0x0010
+#endif /* _SCF_SP_SIDE */
+
+ /* (C0) Connection establishment state */
+#define SCF_ST_ESTABLISHED 0x0020
+ /* (C1) Recv FINI_REQ, fini() wait state */
+#define SCF_ST_EST_FINI_WAIT 0x0021
+ /* (D0) Send FINI_REQ, TxEND recv wait state */
+#define SCF_ST_CLOSE_TXEND_RECV_WAIT 0x0030
+
+/*
+ * DSCP Tx discriptor status (scf_dscp_dsc_t : status)
+ */
+ /* (SA0) Idle state */
+#define SCF_TX_ST_IDLE 0x0000
+ /* (SB0) TxREQ send wait & SRAM trans wait state */
+#define SCF_TX_ST_SRAM_TRANS_WAIT 0x0010
+
+#ifdef _SCF_SP_SIDE
+ /* (SB1) TxREQ send wait & SRAM trans comp wait state */
+#define SCF_TX_ST_SRAM_COMP_WAIT 0x0011
+#endif /* _SCF_SP_SIDE */
+
+ /* (SB2) TxREQ send wait & TxREQ send wait state */
+#define SCF_TX_ST_TXREQ_SEND_WAIT 0x0012
+ /* (SC0) Send TxREQ, TxACK recv wait state */
+#define SCF_TX_ST_TXACK_RECV_WAIT 0x0020
+ /* (SC1) Send TxREQ, TxEND recv wait state */
+#define SCF_TX_ST_TXEND_RECV_WAIT 0x0021
+
+/*
+ * DSCP Rx discriptor status (scf_dscp_dsc_t : status)
+ */
+ /* (RA0) Idle state */
+#define SCF_RX_ST_IDLE 0x0000
+ /* (RB0) Recv RxREQ, RxACK send wait state */
+#define SCF_RX_ST_RXACK_SEND_WAIT 0x0010
+ /* (RB1) Recv RxREQ, SRAM trans wait state */
+#define SCF_RX_ST_SRAM_TRANS_WAIT 0x0011
+
+#ifdef _SCF_SP_SIDE
+ /* (RB2) Recv RxREQ, SRAM comp wait state */
+#define SCF_RX_ST_SRAM_COMP_WAIT 0x0012
+#endif /* _SCF_SP_SIDE */
+
+ /* (RB3) Recv RxREQ, RxEND send wait state */
+#define SCF_RX_ST_RXEND_SEND_WAIT 0x0013
+
+/*
+ * DSC controlflag (scf_dscreg_t : c_flag)
+ */
+#define DSC_FLAG_DEFAULT 0x004c /* Default flag */
+ /* emergency=0, interrupt=1, first=1, last=0 */
+/*
+ * DSC controlflag (scf_dscreg_t : id)
+ */
+#define DSC_CNTL_MASK_ID 0x0f /* Mask id */
+
+#define DSC_CNTL_DSCP 0x0 /* DSCP mailbox interface */
+#define DSC_CNTL_DKMD 0x1 /* DKMD mailbox interface */
+#define DSC_CNTL_LOCAL 0xe /* Local interface */
+#define DSC_CNTL_POST 0xf /* Post diag interface (not use) */
+
+/*
+ * DSC controlflag (scf_dscreg_t : code)
+ */
+#define DSC_CNTL_MASK_CODE 0x0f /* Mask code */
+
+#define DSC_CNTL_DATA_REQ 0x0 /* DATA REQ */
+#define DSC_CNTL_INIT_REQ 0x1 /* INIT_REQ */
+#define DSC_CNTL_FINI_REQ 0x2 /* FINI_REQ */
+#define DSC_CNTL_FLUSH_REQ 0x3 /* FLUSH_REQ */
+#define DSC_CNTL_CONN_CHK 0xf /* CONN_CHK */
+
+/*
+ * DSC controlflag (scf_dscreg_t : code) id = DSC_CNTL_LOCAL
+ */
+#define DSC_CNTL_DSCP_PATH 0x0 /* DSCP_PATH */
+
+/*
+ * DSC controlflag (scf_dscreg_t : status)
+ */
+#define DSC_STATUS_NORMAL 0x00 /* Normal end */
+#define DSC_STATUS_BUF_BUSY 0x01 /* Buffer busy */
+#define DSC_STATUS_INTERFACE 0x03 /* Interface error */
+#define DSC_STATUS_CONN_NAK 0x04 /* Connection refusal */
+#define DSC_STATUS_E_NOT_SUPPORT 0x08 /* Not support */
+#define DSC_STATUS_E_PARAM 0x09 /* Parameter error */
+#define DSC_STATUS_E_SEQUENCE 0x0d /* Sequence error */
+
+/*
+ * DSC controlflag (scf_dscreg_t : offset)
+ */
+#define DSC_OFFSET_NOTHING 0xffff /* DSC offset nothing value */
+#define DSC_OFFSET_CONVERT 16 /* DSC offset convert size */
+
+/*
+ * scf_dscp_sram_get() return value
+ */
+#define TX_SRAM_GET_ERROR 0xffff /* Tx SRAM get error value */
+
+/*
+ * Main status change macro
+ */
+#define SCF_SET_STATUS(p, st) \
+ p->old_status = p->status; \
+ p->status = st; \
+ SCFDBGMSG2(SCF_DBGFLAG_DSCP, \
+ "main status change = 0x%04x 0x%04x", \
+ p->status, p->old_status)
+/*
+ * TxDSC/RxDSC status change macro
+ */
+#define SCF_SET_DSC_STATUS(p, st) \
+ p->old_status = p->status; \
+ p->status = st; \
+ SCFDBGMSG2(SCF_DBGFLAG_DSCP, \
+ "DSC status change = 0x%04x 0x%04x", \
+ p->status, p->old_status)
+
+/*
+ * Use scf_dscp_tx_mat_notice() code
+ */
+#define TxEND (uint8_t)0x80 /* TxEND */
+#define TxREL_BUSY (uint8_t)0xf0 /* Relese busy */
+
+/*
+ * Use scf_dscp_rx_mat_notice() code
+ */
+#define RxREQ (uint8_t)0x80 /* RxREQ */
+#define RxDATA (uint8_t)0xf0 /* RxDATA */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SCFDSCP_H */
diff --git a/usr/src/uts/sun4u/opl/sys/scfd/scfdscpif.h b/usr/src/uts/sun4u/opl/sys/scfd/scfdscpif.h
new file mode 100644
index 0000000000..713555dde1
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/scfd/scfdscpif.h
@@ -0,0 +1,104 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2005
+ */
+
+#ifndef _SCFDSCPIF_H
+#define _SCFDSCPIF_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint32_t mkey_t; /* Data type for mailbox key */
+typedef uint32_t target_id_t; /* Target ID specifying the peer */
+
+/*
+ * Mailbox event types are defined as below.
+ */
+typedef enum {
+ SCF_MB_CONN_OK, /* Connection OK event */
+ SCF_MB_MSG_DATA, /* A new message has received */
+ SCF_MB_SPACE, /* Mailbox has space */
+ SCF_MB_DISC_ERROR /* Disconnect error */
+} scf_event_t;
+
+#define SCF_EVENT_PRI DDI_SOFTINT_LOW /* Event handler priority */
+
+/*
+ * A scatter/gather data structure used for sending/receiving mailbox
+ * messages.
+ */
+typedef struct mscat_gath {
+ caddr_t msc_dptr; /* pointer to the data buffer */
+ uint32_t msc_len; /* Length of data in the data buffer */
+} mscat_gath_t;
+
+
+/*
+ * Mailbox Flush types.
+ */
+typedef enum {
+ MB_FLUSH_SEND = 0x01, /* Flush all messages on the send side */
+ MB_FLUSH_RECEIVE, /* Flush all messages on the recieve side */
+ MB_FLUSH_ALL /* Flush messages on the both sides */
+} mflush_type_t;
+
+int scf_mb_init(target_id_t target_id, mkey_t mkey,
+ void (*event_handler)(scf_event_t mevent, void *arg), void *arg);
+
+int scf_mb_fini(target_id_t target_id, mkey_t mkey);
+
+int scf_mb_putmsg(target_id_t target_id, mkey_t mkey, uint32_t data_len,
+ uint32_t num_sg, mscat_gath_t *sgp, clock_t timeout);
+
+int scf_mb_canget(target_id_t target_id, mkey_t mkey, uint32_t *data_lenp);
+
+int scf_mb_getmsg(target_id_t target_id, mkey_t mkey, uint32_t data_len,
+ uint32_t num_sg, mscat_gath_t *sgp, clock_t timeout);
+
+int scf_mb_flush(target_id_t target_id, uint32_t key, mflush_type_t flush_type);
+
+int scf_mb_ctrl(target_id_t target_id, uint32_t key, uint32_t op, void *arg);
+
+
+/*
+ * The following are the operations defined for scf_mb_ctrl().
+ */
+
+/*
+ * Return the maximum message length which could be received/transmitted
+ * on the specified mailbox. The value is returned via the argument(arg),
+ * which will be treated as a pointer to an uint32_t.
+ */
+#define SCF_MBOP_MAXMSGSIZE 0x00000001
+
+#define DSCP_KEY ('D' << 24 | 'S' << 16 | 'C' << 8 | 'P')
+#define DKMD_KEY ('D' << 24 | 'K' << 16 | 'M' << 8 | 'D')
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SCFDSCPIF_H */
diff --git a/usr/src/uts/sun4u/opl/sys/scfd/scfio32.h b/usr/src/uts/sun4u/opl/sys/scfd/scfio32.h
new file mode 100644
index 0000000000..16483f2bab
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/scfd/scfio32.h
@@ -0,0 +1,68 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _SCFIO32_H
+#define _SCFIO32_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/types32.h>
+
+/* SCFIOCWRLCD 32bit */
+typedef struct scfwrlcd32 {
+ int lcd_type;
+ int length;
+ caddr32_t string;
+} scfwrlcd32_t;
+
+
+/* SCFIOCGETREPORT 32bit */
+typedef struct scfreport32 {
+ int flag;
+ unsigned int rci_addr;
+ unsigned char report_sense[4];
+ time32_t timestamp;
+} scfreport32_t;
+
+
+/* SCFIOCGETEVENT 32bit */
+typedef struct scfevent32 {
+ int flag;
+ unsigned int rci_addr;
+ unsigned char code;
+ unsigned char size;
+ unsigned char rsv[2];
+ unsigned char event_sense[24];
+ time32_t timestamp;
+} scfevent32_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SCFIO32_H */
diff --git a/usr/src/uts/sun4u/opl/sys/scfd/scfkstat.h b/usr/src/uts/sun4u/opl/sys/scfd/scfkstat.h
new file mode 100644
index 0000000000..a3b4a1d731
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/scfd/scfkstat.h
@@ -0,0 +1,102 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _SCFKSTAT_H
+#define _SCFKSTAT_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * kstat_create(9F) ID parameter
+ * module = "scfd"
+ * class = "misc"
+ */
+/* name = "??" */
+#define SCF_SYSTEM_KSTAT_NAME "scf"
+
+/*
+ * kstat_named_init(9F) ID parameter
+ */
+/* name == "scf" */
+#define SCF_STATUS_KSTAT_NAMED "status"
+#define SCF_BOOT_MODE_KSTAT_NAMED "boot_mode"
+#define SCF_SECURE_MODE_KSTAT_NAMED "secure_mode"
+#define SCF_EVENT_KSTAT_NAMED "event"
+#define SCF_ALIVE_KSTAT_NAMED "alive"
+
+/*
+ * "scf" KSTAT_TYPE_NAMED item
+ */
+/* named == "status" */
+#define SCF_STAT_STATUS_OFFLINE 0
+#define SCF_STAT_STATUS_ONLINE 1
+
+/* named == "boot_mode" */
+#define SCF_STAT_MODE_OBP_STOP 0
+#define SCF_STAT_MODE_AUTO_BOOT 1
+
+/* named == "secure_mode" */
+#define SCF_STAT_MODE_UNLOCK 0
+#define SCF_STAT_MODE_LOCK 1
+
+/* named == "watch" */
+#define SCF_STAT_ALIVE_OFF 0
+#define SCF_STAT_ALIVE_ON 1
+
+
+/*
+ * SCF driver kstat entry point
+ */
+/* from scf_attach() */
+void scf_kstat_init(); /* DDI_ATTACH */
+
+/* from scf_detach() */
+void scf_kstat_fini(); /* DDI_DETACH */
+
+/*
+ * private variables for kstat routine
+ */
+typedef struct scf_kstat_private {
+ /* kstat_t */
+ kstat_t *ksp_scf;
+} scf_kstat_private_t;
+
+/* "scf" */
+#define SCF_KSTAT_SYS_NAMED_STATUS 0
+#define SCF_KSTAT_SYS_NAMED_BOOT_MODE 1
+#define SCF_KSTAT_SYS_NAMED_SECURE_MODE 2
+#define SCF_KSTAT_SYS_NAMED_EVENT 3
+#define SCF_KSTAT_SYS_NAMED_ALIVE 4
+
+#define SCF_KSTAT_SYS_NAMED_NDATA 5
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SCFKSTAT_H */
diff --git a/usr/src/uts/sun4u/opl/sys/scfd/scfostoescf.h b/usr/src/uts/sun4u/opl/sys/scfd/scfostoescf.h
new file mode 100644
index 0000000000..0c49a17147
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/scfd/scfostoescf.h
@@ -0,0 +1,54 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _SCFOSTOESCF_H
+#define _SCFOSTOESCF_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* OS to ESCF key */
+#define KEY_ESCF ('E' << 24 | 'S' << 16 | 'C' << 8 | 'F')
+
+
+
+/*
+ * External function
+ */
+
+extern int scf_service_putinfo(uint32_t, uint8_t, uint32_t, uint32_t, void *);
+extern int scf_service_getinfo(uint32_t, uint8_t, uint32_t, uint32_t *, void *);
+
+#define SUB_OS_SEND_PRE_FMEMA 0x10
+#define SUB_OS_SEND_CANCEL_FMEMA 0x15
+#define SUB_OS_SEND_COMPLETE_FMEMA 0x43
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SCFOSTOESCF_H */
diff --git a/usr/src/uts/sun4u/opl/sys/scfd/scfparam.h b/usr/src/uts/sun4u/opl/sys/scfd/scfparam.h
new file mode 100644
index 0000000000..b5f1dd7bdd
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/scfd/scfparam.h
@@ -0,0 +1,158 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _SCFPARAM_H
+#define _SCFPARAM_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/scfd/scfsys.h>
+#include <sys/scfd/scfostoescf.h>
+
+/*
+ * Common table
+ */
+extern scf_comtbl_t scf_comtbl; /* SCF driver common table */
+extern void *scfstate; /* root of soft state */
+extern char *scf_driver_name; /* SCF driver name */
+
+/*
+ * SCF driver control mode
+ */
+extern uint_t scf_halt_proc_mode; /* SCFHALT after processing mode */
+extern uint_t scf_last_detach_mode; /* Last detach mode */
+
+/*
+ * SRAM trace date size
+ */
+extern uint_t scf_sram_trace_data_size; /* Get SRAM trace data size */
+extern uint_t scf_trace_rxdata_size; /* Rx data trace size */
+
+/*
+ * Wait timer value (Micro-second)
+ */
+extern uint_t scf_rdctrl_sense_wait;
+ /* SCFIOCRDCTRL wait timer value (60s) */
+
+/*
+ * Wait timer value
+ */
+extern uint_t scf_buf_ful_rtime;
+ /* Buff full wait retry timer value (500ms) */
+extern uint_t scf_rci_busy_rtime;
+ /* RCI busy wait retry timer value (3s) */
+
+/*
+ * Re-try counter
+ */
+extern uint_t scf_buf_ful_rcnt; /* Buff full retry counter */
+extern uint_t scf_rci_busy_rcnt; /* RCI busy retry counter */
+extern uint_t scf_tesum_rcnt; /* Tx sum retry counter */
+extern uint_t scf_resum_rcnt; /* Rx sum retry counter */
+extern uint_t scf_cmd_to_rcnt; /* Command to retry counter */
+extern uint_t scf_devbusy_wait_rcnt; /* Command device busy retry counter */
+extern uint_t scf_online_wait_rcnt; /* SCF online retry counter */
+extern uint_t scf_path_change_max; /* SCF path change retry counter */
+
+/*
+ * Max value
+ */
+extern uint_t scf_report_sense_pool_max; /* Report sense max */
+extern uint_t scf_getevent_pool_max; /* SCFIOCGETEVENT max */
+extern uint_t scf_rci_max; /* RCI device max */
+extern uint_t scf_rxbuff_max_size; /* SCF command data division max size */
+
+/*
+ * Poff factor (reported on shutdown start)
+ */
+unsigned char scf_poff_factor[2][3];
+#define SCF_POFF_FACTOR_NORMAL 0
+#define SCF_POFF_FACTOR_PFAIL 1
+
+/*
+ * Alive check parameter
+ */
+extern uchar_t scf_alive_watch_code; /* Watch code for SCF driver */
+extern uchar_t scf_alive_phase_code; /* Watch phase code */
+extern uchar_t scf_alive_interval_time; /* interval time */
+extern uchar_t scf_alive_monitor_time; /* monitor timeout */
+extern ushort_t scf_alive_panic_time; /* panic timeout */
+
+extern uchar_t scf_acr_phase_code; /* Alive check register phase code */
+
+/*
+ * FMEMA interface
+ */
+extern caddr_t scf_avail_cmd_reg_vaddr; /* SCF Command register address */
+
+/*
+ * Send break interface
+ */
+extern int scf_dm_secure_mode; /* secure mode */
+
+/*
+ * ioctl control value and flag
+ */
+extern int scf_save_hac_flag; /* Host address disp flag */
+extern scfhac_t scf_save_hac; /* Host address disp save */
+
+/*
+ * Register read sync value
+ */
+extern uint8_t scf_rs8;
+extern uint16_t scf_rs16;
+extern uint32_t scf_rs32;
+
+/*
+ * Panic value
+ */
+extern uint_t scf_panic_reported; /* Panic report after */
+extern uint_t scf_panic_report_maxretry; /* Same as busy_maxretry */
+extern uint_t scf_cmdend_wait_time_panic;
+ /* SCF command end wait timer value (1s) */
+extern uint_t scf_cmdend_wait_rcnt_panic; /* SCF command end retry counter */
+
+extern uint_t scf_panic_exec_wait_time; /* Panic wait timer value (100ms) */
+extern uint_t scf_panic_exec_flag; /* Panic exec flag */
+extern uint_t scf_panic_exec_flag2; /* Panic exec flag (report send) */
+
+/*
+ * Panic trace
+ */
+extern ushort_t scf_panic_trc_w_off; /* Panic trcae next write offset */
+extern uint16_t scf_panic_trc_command; /* Panic SCF command register memo */
+extern uint16_t scf_panic_trc_status; /* Panic SCF status register memo */
+extern ushort_t scf_panic_trc[16]; /* Panic trace area */
+#define SCF_PANIC_TRACE(x) \
+ (scf_panic_trc[scf_panic_trc_w_off++ & 0x000f] = (ushort_t)x)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SCFPARAM_H */
diff --git a/usr/src/uts/sun4u/opl/sys/scfd/scfreg.h b/usr/src/uts/sun4u/opl/sys/scfd/scfreg.h
new file mode 100644
index 0000000000..5d5924d03b
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/scfd/scfreg.h
@@ -0,0 +1,581 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _SCFREG_H
+#define _SCFREG_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * register map count
+ */
+#define SCF_REGMAP_COUNT 5 /* register map cnt */
+
+/*
+ * register and SRAM max size
+ */
+#define SRAM_MAX_DSCP 0x00010000 /* SCF system SRAM max size */
+#define SRAM_MAX_SYSTEM 0x00010000 /* SCF DSCP SRAM max size */
+#define SRAM_MAX_INTERFACE 0x00060000 /* SCF interface max size */
+#define SRAM_MAX_DRVDRVE 0x00001000 /* SCF SRAM driver trac */
+
+/*
+ * SCF registers
+ */
+typedef struct scf_regs {
+ uint16_t COMMAND; /* SCF command register */
+ uint8_t rsv002[2]; /* reserved */
+ uint16_t STATUS; /* SCF status register */
+ uint8_t rsv006[2]; /* reserved */
+ uint8_t VERSION; /* SCF version register */
+ uint8_t rsv009[3]; /* reserved */
+ uint8_t rsv00c[4]; /* reserved */
+
+ uint32_t TDATA0; /* SCF Tx DATA0 register */
+ uint32_t TDATA1; /* SCF Tx DATA1 register */
+ uint32_t TDATA2; /* SCF Tx DATA2 register */
+ uint32_t TDATA3; /* SCF Tx DATA3 register */
+
+ uint32_t RDATA0; /* SCF Rx DATA0 register */
+ uint32_t RDATA1; /* SCF Rx DATA1 register */
+ uint32_t RDATA2; /* SCF Rx DATA2 register */
+ uint32_t RDATA3; /* SCF Rx DATA3 register */
+
+ uint8_t rsv030[16]; /* reserved */
+ uint8_t rsv040[2]; /* reserved */
+ uint8_t COMMAND_ExR; /* SCF command extended register */
+ uint8_t rsv043; /* reserved */
+ uint8_t FIRMREQ1; /* Firmware request register1 */
+ uint8_t FIRMREQ0; /* Firmware request register0 */
+ uint8_t rsv046[2]; /* reserved */
+ uint8_t ACR; /* Alive check register */
+ uint8_t ATR; /* Alive timer register */
+ uint8_t rsv04a[6]; /* reserved */
+
+ uint8_t rsv050[4]; /* reserved */
+ uint32_t STATUS_ExR; /* SCFI status extended register */
+ uint8_t rsv058[8]; /* reserved */
+ uint8_t rsv060[160]; /* reserved */
+
+ uint8_t DCR; /* DSCP buffer control register */
+ uint8_t DSR; /* DSCP buffer status register */
+ uint8_t rsv102[14]; /* reserved */
+
+ uint16_t TxDCR_C_FLAG; /* DSCP Tx DSC control register */
+ uint16_t TxDCR_OFFSET; /* DSCP Tx DSC control register */
+ uint32_t TxDCR_LENGTH; /* DSCP Tx DSC control register */
+ uint16_t TxDSR_C_FLAG; /* DSCP Tx DSC status register */
+ uint16_t TxDSR_OFFSET; /* DSCP Tx DSC status register */
+ uint8_t rsv11c[4]; /* reserved */
+
+ uint16_t RxDCR_C_FLAG; /* DSCP Rx DSC control register */
+ uint16_t RxDCR_OFFSET; /* DSCP Rx DSC control register */
+ uint32_t RxDCR_LENGTH; /* DSCP Rx DSC control register */
+ uint16_t RxDSR_C_FLAG; /* DSCP Rx DSC status register */
+ uint16_t RxDSR_OFFSET; /* DSCP Rx DSC status register */
+ uint8_t rsv12c[4]; /* reserved */
+} scf_regs_t;
+
+/*
+ * SCF control registers
+ */
+typedef struct scf_regs_c {
+ uint16_t CONTROL; /* SCF Control register */
+ uint8_t rsv02[2]; /* reserved */
+ uint16_t INT_ST; /* SCF Interrupt Status register */
+ uint8_t rsv06[2]; /* reserved */
+} scf_regs_c_t;
+
+/*
+ * System buffer (SRAM)
+ */
+typedef struct scf_sys_sram {
+ uint8_t DATA[SRAM_MAX_SYSTEM]; /* System Tx/Rx buffer */
+} scf_sys_sram_t;
+
+/*
+ * DSCP buffer (SRAM)
+ */
+typedef struct scf_dscp_sram {
+ uint8_t DATA[SRAM_MAX_DSCP]; /* DSCP Tx/Rx buffer */
+} scf_dscp_sram_t;
+
+/*
+ * Interface buffer hedder (SRAM)
+ */
+typedef struct scf_interface {
+ uint32_t other1[0x40 / 4]; /* other area */
+ uint32_t DRVTRC_OFFSET; /* SCF driver trace offset */
+ uint32_t DRVTRC_SIZE; /* SCF driver trace size */
+ uint32_t other2[0xa8 / 4]; /* other area */
+} scf_interface_t;
+
+/*
+ * SCF driver trace table
+ */
+#define DRV_ID_SIZE 16
+
+typedef struct scf_if_drvtrc {
+ uint8_t DRV_ID[DRV_ID_SIZE]; /* driver ID */
+ uint32_t DATA_TOP; /* trace data top offset */
+ uint32_t DATA_LAST; /* trace data last offset */
+ uint32_t DATA_WRITE; /* trace data write offset */
+ uint8_t rsv01c[4]; /* reserved */
+} scf_if_drvtrc_t;
+
+/*
+ * SRAM driver trace entry
+ */
+typedef struct scf_drvtrc_ent {
+ uint8_t LOG_ID; /* log ID */
+ uint8_t LOG_TIME[3]; /* log time */
+ uint8_t INFO[12]; /* log info */
+} scf_drvtrc_ent_t;
+
+/*
+ * SRAM trace log ID
+ */
+#define DTC_CMD 0x01 /* SCF command start */
+#define DTC_INT 0x02 /* SCF interrupt */
+#define DTC_SENDDATA 0x03 /* SCF send command data */
+#define DTC_RECVDATA 0x04 /* SCF recv command data */
+#define DTC_SENDDATA_SRAM 0x05 /* SCF send command data for SRAM */
+#define DTC_RECVDATA_SRAM 0x06 /* SCF recv command data for SRAM */
+#define DTC_CMDTO 0x11 /* SCF command timeout */
+#define DTC_CMDBUSYTO 0x12 /* SCF command busy timeout */
+#define DTC_ONLINETO 0x13 /* SCF online timeout */
+#define DTC_ERRRTN 0x20 /* SCF command retuen error */
+#define DTC_RCI_BUF_FUL 0x21 /* SCF command return for buff full */
+#define DTC_RCI_BUSY 0x22 /* SCF command return for rci busy */
+#define DTC_INTERFACE 0x23 /* SCF command return for */
+ /* interface error */
+#define DTC_E_NOT_SUPPORT 0x28 /* SCF command return for */
+ /* not support */
+#define DTC_E_PARAM 0x29 /* SCF command return for */
+ /* parameter error */
+#define DTC_E_SCFC_PATH 0x2a /* SCF command return for */
+ /* SCFI path error */
+#define DTC_E_RCI_ACCESS 0x2b /* SCF command return for */
+ /* RCI access error */
+#define DTC_E_SEQUENCE 0x2d /* SCF command return for */
+ /* sequence error */
+#define DTC_RSUMERR 0x31 /* SCF command receive sum error */
+#define DTC_ONLINE 0x32 /* SCF offline start */
+#define DTC_OFFLINE 0x33 /* SCF offline start */
+#define DTC_DSCP_TXREQ 0x41 /* DSCP TxREQ request */
+#define DTC_DSCP_RXACK 0x42 /* DSCP RxACK request */
+#define DTC_DSCP_RXEND 0x43 /* DSCP RxEND request */
+#define DTC_DSCP_RXREQ 0x44 /* DSCP RxREQ interrupt */
+#define DTC_DSCP_TXACK 0x45 /* DSCP TxACK interrupt */
+#define DTC_DSCP_TXEND 0x46 /* DSCP TxEND interrupt */
+#define DTC_DSCP_SENDDATA 0x47 /* DSCP send data */
+#define DTC_DSCP_RECVDATA 0x48 /* DSCP recv data */
+#define DTC_DSCP_ACKTO 0x51 /* DSCP ACK timeout */
+#define DTC_DSCP_ENDTO 0x52 /* DSCP END timeout */
+
+#define DTC_MASK_HIGH 0xf0 /* mask high */
+
+/* SRAM trace define */
+#define SCF_SRAM_TRACE(a, b) scf_sram_trace(a, b)
+
+#define SCF_SET_SRAM_DATA1_2(a, b, c) \
+ statep->memo_scf_drvtrc.INFO[a] = (uint8_t)(b); \
+ statep->memo_scf_drvtrc.INFO[a + 1] = (uint8_t)(c)
+
+#define SCF_SET_SRAM_DATA2_1(a, b) \
+ statep->memo_scf_drvtrc.INFO[a] = (uint8_t)(b >> 8); \
+ statep->memo_scf_drvtrc.INFO[a + 1] = (uint8_t)(b)
+
+#define SCF_SET_SRAM_DATA2_2(a, b, c) \
+ statep->memo_scf_drvtrc.INFO[a] = (uint8_t)(b >> 8); \
+ statep->memo_scf_drvtrc.INFO[a + 1] = (uint8_t)(b); \
+ statep->memo_scf_drvtrc.INFO[a + 2] = (uint8_t)(c >> 8); \
+ statep->memo_scf_drvtrc.INFO[a + 3] = (uint8_t)(c)
+
+#define SCF_SET_SRAM_DATA4_1(a, b) \
+ statep->memo_scf_drvtrc.INFO[a] = (uint8_t)(b >> 24); \
+ statep->memo_scf_drvtrc.INFO[a + 1] = (uint8_t)(b >> 16); \
+ statep->memo_scf_drvtrc.INFO[a + 2] = (uint8_t)(b >> 8); \
+ statep->memo_scf_drvtrc.INFO[a + 3] = (uint8_t)(b)
+
+#define SCF_SET_SRAM_DATA4_3(a, b, c, d) \
+ statep->memo_scf_drvtrc.INFO[a] = (uint8_t)(b >> 24); \
+ statep->memo_scf_drvtrc.INFO[a + 1] = (uint8_t)(b >> 16); \
+ statep->memo_scf_drvtrc.INFO[a + 2] = (uint8_t)(b >> 8); \
+ statep->memo_scf_drvtrc.INFO[a + 3] = (uint8_t)(b); \
+ statep->memo_scf_drvtrc.INFO[a + 4] = (uint8_t)(c >> 24); \
+ statep->memo_scf_drvtrc.INFO[a + 5] = (uint8_t)(c >> 16); \
+ statep->memo_scf_drvtrc.INFO[a + 6] = (uint8_t)(c >> 8); \
+ statep->memo_scf_drvtrc.INFO[a + 7] = (uint8_t)(c); \
+ statep->memo_scf_drvtrc.INFO[a + 8] = (uint8_t)(d >> 24); \
+ statep->memo_scf_drvtrc.INFO[a + 9] = (uint8_t)(d >> 16); \
+ statep->memo_scf_drvtrc.INFO[a + 10] = (uint8_t)(d >> 8); \
+ statep->memo_scf_drvtrc.INFO[a + 11] = (uint8_t)(d)
+
+/*
+ * SCF registers define
+ */
+
+/* COMMAND : SCF command register define */
+#define COMMAND_BUSY 0x8000 /* Command interface busy */
+#define COMMAND_SUBCODE 0x7f00 /* Command subcode */
+#define COMMAND_CODE 0x00ff /* Command code */
+
+#define CMD_SCFI_PATH 0x10 /* SCF path change */
+#define SUB_CMD_PATH 0x00 /* Command path change */
+
+#define CMD_PATH_TYPE_SCFD 0x01 /* Command path type(scfd) */
+
+#define CMD_ALIVE_CHECK 0x20 /* Alive check */
+#define SUB_ALIVE_START 0x30 /* Start */
+#define SUB_ALIVE_STOP 0x50 /* Stop */
+
+#define CMD_REPORT 0x21 /* Report */
+#define SUB_SYSTEM_STATUS_RPT 0x40 /* System status */
+#define SUB_SYSTEM_STATUS_RPT_NOPATH 0x51
+ /* System status (no path check) */
+
+#define CMD_PHASE 0x22 /* Domain phase print */
+#define SUB_PHASE_PRINT 0x10 /* Phase print */
+
+#define CMD_PART_POW_CTR 0x30 /* power control */
+#define SUB_PON 0x01 /* Power on */
+#define SUB_POFF 0x02 /* Power off */
+#define SUB_FPOFF 0x13 /* Forced power off */
+#define SUB_RESET 0x04 /* Power reset */
+#define SUB_POFFID 0x19 /* Power off factor */
+
+#define CMD_SYS_AUTOPOW 0x35 /* System automatic power control */
+#define SUB_SYS_AUTO_ONOFF_SET 0x01 /* on/off time set */
+#define SUB_SYS_AUTO_ONOFF_DISP 0x02 /* on/off time disp */
+#define SUB_SYS_AUTO_ONOFF_CLRAR 0x04 /* on/off time clear */
+#define SUB_FORCED_POFF_SET 0x08 /* Forced power off time set */
+#define SUB_PRESET_MODE_DISP 0x10 /* Power resume mode disp */
+#define SUB_PRESET_MODE_SET 0x20 /* Power resume mode set */
+
+#define CMD_RCI_CTL 0x40 /* RCI control */
+#define SUB_HOSTADDR_DISP2 0xff /* Host address disp 2 */
+#define SUB_DEVICE_LIST 0x0a /* Device list disp */
+#define SUB_PANIC 0x03 /* Panic request */
+
+#define CMD_INT_REASON 0x50 /* Event information */
+#define SUB_INT_REASON_DISP 0x10 /* Factor detail disp */
+#define SUB_INT_REASON_RETRY 0x01 /* Factor detail re-disp */
+
+#define CMD_FILE_DOWNLOAD 0x74 /* File dounload or upload */
+
+#define CMD_DOMAIN_INFO 0x81 /* Domain information */
+#define SUB_OPTION_DISP 0x04 /* Option disp */
+#define SUB_PCI_HP_CONFIG 0x52 /* PCI configuration set */
+#define SUB_PCI_DISP 0x54 /* PCI configuration disp */
+#define SUB_PHP_RESET 0x61 /* PHP Reset */
+#define SUB_DISK_LED_DISP 0x70 /* DISK LED disp */
+#define SUB_DISK_LED_ON 0x73 /* DISK LED on */
+#define SUB_DISK_LED_BLINK 0x75 /* DISK LED blink */
+#define SUB_DISK_LED_OFF 0x76 /* DISK LED off */
+
+#define CMD_DR 0xa2 /* DR function */
+
+#define CMD_ERRLOG 0xb0 /* Error log control */
+#define SUB_ERRLOG_SET_MADMIN 0x26 /* Log set madmin */
+
+#define CMD_REMCS_SPT 0xc3 /* REMCS command */
+
+#define CMD_SPARE 0xc4 /* SPARE command */
+
+#define CMD_OS_XSCF_CTL 0xc5 /* OS to ESCF */
+
+/* STATUS : SCF status register define */
+#define STATUS_SCF_READY 0x8000 /* bit15: SCF READY */
+#define STATUS_SHUTDOWN 0x4000 /* bit14: SHUTDOWN */
+#define STATUS_POFF 0x2000 /* bit13: POFF */
+#define STATUS_EVENT 0x1000 /* bit12: EVENT */
+#define STATUS_TIMER_ADJUST 0x0800 /* bit11: TIMER ADJUST */
+#define STATUS_ALIVE 0x0400 /* bit10: ALIVE (Not use) */
+#define STATUS_MODE_CHANGED 0x0200 /* bit 9: MODE CHANGED */
+#define STATUS_U_PARITY 0x0100 /* bit 8: U Parity (Not use */
+#define STATUS_CMD_RTN_CODE 0x00f0 /* bit 7-4: CMD return code */
+#define STATUS_SECURE_MODE 0x0008 /* bit 3: secure mode status */
+#define STATUS_BOOT_MODE 0x0004 /* bit 2: boot mode status */
+#define STATUS_CMD_COMPLETE 0x0002 /* bit 1: Command complete */
+#define STATUS_L_PARITY 0x0001 /* bit 0: L Parity (Not use) */
+
+/* secure mode status */
+#define STATUS_MODE_UNLOCK 0x0000 /* UNLOCK */
+#define STATUS_MODE_LOCK 0x0008 /* LOCK */
+
+/* boot mode status */
+#define STATUS_MODE_OBP_STOP 0x0000 /* OBP stop */
+#define STATUS_MODE_AUTO_BOOT 0x0004 /* Auto boot */
+
+/* STATUS_CMD_RTN_CODE : Command return value */
+#define NORMAL_END 0x00 /* Normal end */
+#define BUF_FUL 0x01 /* Buff full */
+#define RCI_BUSY 0x02 /* RCI busy */
+#define INTERFACE 0x03 /* Parity/Sum error */
+
+#define E_NOT_SUPPORT 0x08 /* Not support */
+#define E_PARAM 0x09 /* Parameter error */
+#define E_SCFC_NOPATH 0x0a /* No SCFC path */
+#define E_RCI_ACCESS 0x0b /* RCI access error */
+#define E_HARD 0x0c /* Hard error */
+#define RCI_NS 0x0f /* Not support RCI */
+
+/* COMMAND_ExR : SCF command extended register define */
+#define COMMAND_ExR_BUSY 0x80 /* Command busy */
+#define COMMAND_ExR_RETRY 0x40 /* Command retry */
+
+/* STATUS_ExR : SCF status extended register define */
+#define STATUS_POWER_FAILURE 0x80000000 /* Power failure */
+#define STATUS_SCF_STATUS_CHANGE 0x40000000 /* SCF status change */
+#define STATUS_SCF_STATUS 0x20000000 /* SCF status */
+#define STATUS_SCF_NO 0x10000000 /* Offline SCF No. */
+#define STATUS_STATUS_DETAIL 0x0000ffff /* Return code detail code */
+
+#define STATUS_SCF_ONLINE 0x20000000 /* SCF status online */
+#define STATUS_SCF_OFFLINE 0x00000000 /* SCF status offline */
+
+/* ACR : Alive check register define */
+#define ACR_ALIVE_INT 0x80 /* Alive Interrupt for SCF */
+#define ACR_PHASE_CODE 0x7f /* Phase code */
+
+/* ATR : Alive timer register define */
+#define ATR_INTERVAL 0x07 /* Interrupt interval */
+#define ATR_INTERVAL_STOP 0x00 /* Interrupt interval stop */
+#define ATR_INTERVAL_30S 0x01 /* Interrupt interval 30 s */
+#define ATR_INTERVAL_60S 0x02 /* Interrupt interval 1 min */
+#define ATR_INTERVAL_120S 0x04 /* Interrupt interval 2 min */
+
+/* DCR : DSCP Buffer Control Register */
+ /* Domain to SCF data transfer request isuued */
+#define DCR_TxREQ (uint8_t)0x80
+ /* SCF to domain data transfer request accepted */
+#define DCR_RxACK 0x40
+ /* SCF to domain data transfer request end */
+#define DCR_RxEND 0x20
+
+/* DSR : DSCP Buffer Status Register */
+ /* SCF to domain data transfer request issued */
+#define DSR_RxREQ (uint8_t)0x80
+ /* domain to SCF data transfer request accepted */
+#define DSR_TxACK 0x40
+ /* domain to SCF data transfer request end */
+#define DSR_TxEND 0x20
+
+/* Tx/Rx SUM magicnumber */
+#define SCF_MAGICNUMBER_S 0xaa /* Small Buffer SUM */
+#define SCF_MAGICNUMBER_L (uint32_t)0xaaaaaaaa /* Large Buffer SUM */
+
+/* Length border conversion */
+#define SCF_LENGTH_16BYTE_CNV 0xfffffff0 /* 16byte border conversion */
+
+
+/*
+ * SCF registers define
+ */
+
+/* CONTROL/INT_ST : SCF Control/SCF Interrupt Status register define */
+
+ /* SCF Path Change Interrupt enable */
+#define CONTROL_PATHCHGIE 0x8000
+ /* SCF Interrupt enable */
+#define CONTROL_SCFIE 0x4000
+ /* DSCP Communication Buffer Interrupt enable */
+#define CONTROL_IDBCIE 0x2000
+ /* Alive Interrupt enable */
+#define CONTROL_ALIVEINE 0x1000
+ /* interrupt enable */
+#define CONTROL_ENABLE \
+ (CONTROL_PATHCHGIE | CONTROL_SCFIE | CONTROL_IDBCIE)
+ /* interrupt disable */
+#define CONTROL_DISABLE 0x0000
+
+ /* SCF Path Change Interrupt */
+#define INT_ST_PATHCHGIE 0x8000
+ /* SCF interrupt */
+#define INT_ST_SCFINT 0x4000
+ /* DSCP Communication Buffer Interrupt */
+#define INT_ST_IDBCINT 0x2000
+ /* Alive Interrupt */
+#define INT_ST_ALIVEINT 0x1000
+
+ /* All Interrupt */
+#define INT_ST_ALL \
+ (INT_ST_PATHCHGIE | INT_ST_SCFINT | INT_ST_IDBCINT | INT_ST_ALIVEINT)
+
+/* Machine address */
+#define SCF_CMD_SYSTEM_ADDR 0x00000000 /* Owner address */
+
+/* status */
+#define REPORT_STAT_PANIC 0x01 /* panic */
+#define REPORT_STAT_SHUTDOWN_START 0x02 /* shutdown start */
+#define REPORT_STAT_SYSTEM_RUNNING 0x0a /* system running */
+#define REPORT_STAT_RCIDWN (uint8_t)0xf0 /* rci down */
+
+/* POFF ID */
+#define POFF_ID_PANEL 0x00 /* panel */
+#define POFF_ID_RCI 0x20 /* RCI */
+#define POFF_ID_XSCF 0x03 /* XSCF */
+#define POFF_ID_MASK 0xf0
+
+/* category type */
+#define DEV_SENSE_ATTR_OWN 0x00 /* Owner host */
+#define DEV_SENSE_ATTR_OTHER 0x01 /* Other host */
+#define DEV_SENSE_ATTR_IO 0x80 /* I/O unit */
+
+/* Remote Device Control */
+#define RCI_DEVCLASS_MASK 0xfff /* mask for device class */
+#define RCI_DEVCLASS_CPU_START 0x001 /* CPU start */
+#define RCI_DEVCLASS_CPU_END 0x0ff /* CPU end */
+#define RCI_DEVCLASS_DISK_START 0x400 /* disk start */
+#define RCI_DEVCLASS_DISK_END 0x4ff /* disk end */
+
+#define RMT_DEV_CLASS_START_SHIFT 16
+
+/* sense */
+#define DEV_SENSE_SHUTDOWN 0x80 /* need shutdown bit */
+
+#define DEV_SENSE_FANUNIT 0x01 /* fan unit error */
+
+#define DEV_SENSE_PWRUNIT 0x02 /* power unit error */
+
+#define DEV_SENSE_UPS 0x05 /* UPS error */
+#define DEV_SENSE_UPS_MASK 0x0f
+#define DEV_SENSE_UPS_LOWBAT 0x8 /* Low Battery */
+
+#define DEV_SENSE_THERMAL 0x06 /* thermal error */
+
+#define DEV_SENSE_PWRSR 0x07 /* power stop/resume */
+#define DEV_SENSE_PWRSR_MASK 0x0f
+#define DEV_SENSE_PWRSR_STOP 0x8 /* power stop */
+
+#define DEV_SENSE_NODE 0x08 /* node error */
+#define DEV_SENSE_NODE_STCKTO 0x90 /* status check timeout */
+
+#define DEV_SENSE_RCI_PATH40 0x40 /* Devive status print */
+#define DEV_SENSE_SYS_REPORT 0x60 /* system status report */
+#define DEV_SENSE_PANIC_REQ 0x61 /* panic request */
+#define DEV_SENSE_IONODESTAT 0x62 /* I/O node status */
+#define DEV_SENSE_STATUS_RPT 0x71 /* Deveice status print */
+
+
+/*
+ * SCF command send control
+ */
+typedef struct scf_cmd {
+ uint_t flag; /* buff type flag */
+ uchar_t cmd; /* SCF command code */
+ uchar_t subcmd; /* SCF sub command code */
+ ushort_t stat0; /* Interrupt status */
+ uint_t scount; /* Tx data count */
+ uint_t rcount; /* Rx data count */
+ uchar_t *sbuf; /* Tx buff address */
+ uchar_t *rbuf; /* Rx buff address */
+ uint_t rbufleng; /* recv data leng */
+ ushort_t status; /* SCF status reg */
+ uchar_t cexr[4]; /* Command extension info */
+} scf_cmd_t;
+
+/* SCF interrupt error status make */
+#define SCF_STAT0_RDATA_SUM 0xf0 /* Rx data sum error */
+#define SCF_STAT0_NOT_PATH 0xff /* Non change path */
+
+/* SCF comannd buff type */
+#define SCF_USE_S_BUF 0 /* Tx : -/S Rx : - */
+#define SCF_USE_SSBUF 1 /* Tx : -/S Rx : S */
+#define SCF_USE_SLBUF 2 /* Tx : -/S Rx : L */
+#define SCF_USE_L_BUF 3 /* Tx : L Rx : - */
+#define SCF_USE_LSBUF 4 /* Tx : L Tx : S */
+
+#define SCF_USE_STOP 0x7e
+#define SCF_USE_START 0x7f
+#define SCF_USE_SP (uint8_t)0x80
+
+/* SCF command size */
+#define SCF_S_CNT_32 32 /* TxRx Small buff size */
+#define SCF_S_CNT_16 16 /* TxRx Small buff size */
+#define SCF_S_CNT_15 15 /* Small buff cnt 15byte */
+#define SCF_S_CNT_12 12 /* Small buff cnt 12byte */
+#define SCF_S_CNT_8 8 /* Small buff cnt 8byte */
+
+#define SCF_L_CNT_MAX SRAM_MAX_SYSTEM
+ /* Command buffer max size (64Kyte) */
+
+#define SCF_SBUFR_SIZE 64 /* RDCTL data size */
+#define SCF_SHORT_BUFFER_SIZE (16 * 5) /* Short bauuer size (16byte * 5) */
+#define SCF_SHORT_BUFFER_SIZE_4BYTE (SCF_SHORT_BUFFER_SIZE / 4)
+#define SCF_INT_REASON_SIZE 32 /* INT_REASON size */
+#define SCF_INT_CNT_MAX (SCF_INT_REASON_SIZE * 4)
+ /* INT_REASON max size (128yte) */
+#define SCF_DEVLIST_MAXCNT 2 /* Device list max count */
+#define SCF_DEVLIST_ENTSIZE 8 /* Device list entry size */
+
+
+/* CMD_RCI_CTL SUB_RCI_PATH_4* value */
+#define SCF_RCI_PATH_PARITY 0x10 /* SUB_RCI_PATH_4* parity */
+
+/* Alive check function value */
+#define SCF_ALIVE_FUNC_ON "on" /* parameter alive start */
+#define SCF_ALIVE_FUNC_OFF "off" /* parameter alive stop */
+
+#define SCF_ALIVE_START 1 /* Alive check start */
+#define SCF_ALIVE_STOP 0 /* Alive check stop */
+
+/* Alive check timer value (10s) */
+#define INTERVAL_TIME_MIN 0x06 /* interval time min (1min) */
+#define INTERVAL_TIME_MAX 0x3c /* cycle_time max (10min) */
+#define INTERVAL_TIME_DEF 0x0c /* cycle_time default (2min) */
+#define MONITOR_TIME_MIN 0x12 /* watch_time min (3min) */
+#define MONITOR_TIME_MAX 0xb4 /* monitor time max (30min) */
+#define MONITOR_TIME_DEF 0x24 /* monitor default (6min) */
+#define PANIC_TIME_MIN 0x00b4 /* panic time min (30min) */
+#define PANIC_TIME_MAX 0x0870 /* panic time max (360min) */
+#define PANIC_TIME_DEF 0x00b4 /* panic time default (30min) */
+#define PANIC_TIME_NONE 0x0000 /* no panic time monitor */
+
+#define MONITOR_TIME_CORRECT 0x03 /* monitor time correct (30sec) */
+
+#define SCF_MIN_TO_10SEC(a) a = a * 6; /* minutes to 10 seconds */
+
+/* Short buffer structure */
+typedef union scf_short_buffer {
+ uchar_t b[SCF_SHORT_BUFFER_SIZE];
+ uint_t four_bytes_access[SCF_SHORT_BUFFER_SIZE_4BYTE];
+} scf_short_buffer_t;
+
+/* Event information structure */
+typedef union scf_int_reason {
+ uchar_t b[SCF_INT_CNT_MAX];
+ uint_t four_bytes_access[SCF_INT_CNT_MAX / 4];
+} scf_int_reason_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SCFREG_H */
diff --git a/usr/src/uts/sun4u/opl/sys/scfd/scfsnap.h b/usr/src/uts/sun4u/opl/sys/scfd/scfsnap.h
new file mode 100644
index 0000000000..c19e94f6b6
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/scfd/scfsnap.h
@@ -0,0 +1,111 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _SCFSNAP_H
+#define _SCFSNAP_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * ioctl
+ */
+#define SCFIOCSNAP 's'<<8
+
+/*
+ * ioctl
+ */
+#define SCFIOCSNAPSHOTSIZE (SCFIOCSNAP|1|0x80040000)
+#define SCFIOCSNAPSHOT (SCFIOCSNAP|2|0x80040000)
+
+/* SCFIOCSNAPSHOTSIZE */
+typedef struct scfsnapsize {
+ int type;
+ int info;
+ int size;
+} scfsnapsize_t;
+
+/* SCFIOCSNAPSHOT */
+typedef struct scfsnap_value {
+ char ss_name[32];
+ int ss_flag;
+ int ss_rsv1;
+ int ss_size;
+ int ss_nextoff;
+} scfsnap_value_t;
+/* for ss_name field */
+#define SNAP_SCF_DRIVER_VL "scf_driver_vl"
+#define SNAP_SCF_COMTBL "scf_comtbl"
+#define SNAP_SCF_STATE "scf_state"
+#define SNAP_SCF_TIMER_TBL "scf_timer"
+#define SNAP_SCF_DSCP_COMTBL "scf_dscp_comtbl"
+#define SNAP_SCF_DSCP_TXDSC "scf_dscp_txdsc"
+#define SNAP_SCF_DSCP_RXDSC "scf_dscp_rxdsc"
+#define SNAP_SCF_DSCP_TXSRAM "scf_dscp_txsram"
+#define SNAP_SCF_DSCP_EVENT "scf_dscp_event"
+#define SNAP_SCF_DSCP_RDATA "scf_dscp_rdata"
+#define SNAP_REGISTER "REGISTER"
+#define SNAP_SRAM "SRAM"
+
+/* for ss_flag field */
+#define SCF_DRIVER_64BIT 64
+#define SCF_DRIVER_32BIT 32
+
+typedef struct scfsnap {
+ int type;
+ int info;
+ scfsnap_value_t *ss_entries;
+} scfsnap_t;
+/* for 32bit */
+typedef struct scfsnap32 {
+ int type;
+ int info;
+ caddr32_t ss_entries;
+} scfsnap32_t;
+/* for type field */
+#define SCFSNAPTYPE_ALL 1
+#define SCFSNAPTYPE_DRIVER 2
+#define SCFSNAPTYPE_REGISTER 3
+#define SCFSNAPTYPE_SRAM 4
+
+/* for info field */
+#define SCFSNAPINFO_AUTO (-1)
+
+/*
+ * External function
+ */
+extern int scf_snapshotsize(intptr_t arg, int mode);
+extern int scf_get_snapize(int type, int info);
+extern int scf_snapshot(intptr_t arg, int mode);
+extern int scf_get_snap(int type, int info, scfsnap_value_t *snap_p,
+ int snap_size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SCFSNAP_H */
diff --git a/usr/src/uts/sun4u/opl/sys/scfd/scfstate.h b/usr/src/uts/sun4u/opl/sys/scfd/scfstate.h
new file mode 100644
index 0000000000..9a7dce7ec9
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/scfd/scfstate.h
@@ -0,0 +1,197 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _SCFSTATE_H
+#define _SCFSTATE_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/scfd/scfreg.h>
+
+/*
+ * SCF driver's software state structure
+ */
+typedef struct scf_state {
+ /* SCF device infomation pointer */
+ dev_info_t *dip; /* device infomation */
+
+ /* SCF state table address */
+ struct scf_state *next; /* next state addr */
+
+ /* flag */
+ uint_t resource_flag; /* resource allocate flag */
+ uint_t scf_herr; /* Hard error flag */
+
+ /* Register hardware register */
+ scf_regs_t *scf_regs; /* SCF register */
+ scf_regs_c_t *scf_regs_c; /* SCF contorol register */
+ scf_dscp_sram_t *scf_dscp_sram; /* SCF DSCP SRAM */
+ scf_sys_sram_t *scf_sys_sram; /* SCF system SRAM */
+ scf_interface_t *scf_interface; /* SCF interface block */
+ scf_if_drvtrc_t *scf_reg_drvtrc; /* SRAM driver trace */
+
+ /* Register data access handle */
+ ddi_acc_handle_t scf_regs_handle; /* SCF register */
+ ddi_acc_handle_t scf_regs_c_handle; /* SCF contorol register */
+ ddi_acc_handle_t scf_dscp_sram_handle; /* SCF DSCP SRAM */
+ ddi_acc_handle_t scf_sys_sram_handle; /* SCF system SRAM */
+ ddi_acc_handle_t scf_interface_handle; /* SCF interface block */
+ ddi_acc_handle_t scf_reg_drvtrc_handle; /* SRAM driver trace block */
+
+ /* Register size */
+ off_t scf_dscp_sram_len; /* SCF system SRAM */
+ off_t scf_sys_sram_len; /* SCF DSCP SRAM */
+ off_t scf_interface_len; /* SCF interface block */
+ off_t scf_reg_drvtrc_len; /* SRAM driver trace */
+
+ /* error retry count */
+ uint_t tesum_rcnt; /* Send sum check error */
+ uint_t resum_rcnt; /* Recv sum check error */
+ uint_t cmd_to_rcnt; /* Command timeout error */
+ uint_t devbusy_to_rcnt; /* Command busy timeout error */
+ uint_t online_to_rcnt; /* SCF online timeout error */
+
+ /* error counter memo */
+ uint_t memo_cmd_to_cnt; /* CMD timeout counter */
+ uint_t no_int_dsr_cnt; /* DSR no interrupt counter */
+ uint_t fail_count; /* SCF path fail counter */
+
+ /* IOMP control area */
+ int instance; /* instance */
+ int path_status; /* IOMP path status */
+ int old_path_status; /* IOMP old path status */
+
+ /* Register memo */
+ uint16_t reg_control; /* SCF INT control */
+ uint16_t reg_int_st; /* SCF INT status */
+ uint16_t reg_command; /* SCF command */
+ uint16_t reg_status; /* SCF status */
+ uint32_t reg_tdata[4]; /* SCF Tx data */
+ uint32_t reg_rdata[4]; /* SCF Rx data0 */
+ uint8_t reg_command_exr; /* SCF command extended */
+ uint32_t reg_status_exr; /* SCF status extended */
+ uint8_t reg_acr; /* Alive check */
+ uint8_t reg_atr; /* Alive timer */
+ uint8_t reg_dcr; /* DSCP buffer control */
+ uint8_t reg_dsr; /* DSCP buffer status */
+ uint16_t reg_txdcr_c_flag; /* DSCP Tx descriptor control */
+ uint16_t reg_txdcr_c_offset; /* DSCP Tx descriptor control */
+ uint32_t reg_txdcr_c_length; /* DSCP Tx descriptor control */
+ uint16_t reg_txdsr_c_flag; /* DSCP Tx descriptor status */
+ uint16_t reg_txdsr_c_offset; /* DSCP Tx descriptor status */
+ uint16_t reg_rxdcr_c_flag; /* DSCP Rx descriptor control */
+ uint16_t reg_rxdcr_c_offset; /* DSCP Rx descriptor control */
+ uint32_t reg_rxdcr_c_length; /* DSCP Rx descriptor control */
+ uint16_t reg_rxdsr_c_flag; /* DSCP Rx descriptor status */
+ uint16_t reg_rxdsr_c_offset; /* DSCP Rx descriptor status */
+
+ /* SRAM driver trace memo */
+ uint32_t memo_DATA_TOP; /* trace data top offset */
+ uint32_t memo_DATA_LAST; /* trace data last offset */
+ uint32_t memo_DATA_WRITE; /* trace data write offset */
+ scf_drvtrc_ent_t memo_scf_drvtrc; /* SRAM driver trace */
+
+ /* SCF device value */
+ char pathname[256]; /* SCFC pathname */
+} scf_state_t;
+
+/*
+ * (resource_flag) macro for resource allocate flag
+ */
+#define S_DID_REG1 (1 << 0)
+#define S_DID_REG2 (1 << 1)
+#define S_DID_REG3 (1 << 2)
+#define S_DID_REG4 (1 << 3)
+#define S_DID_REG5 (1 << 4)
+#define S_DID_REG6 (1 << 5)
+
+#define S_DID_INTR (1 << 8)
+#define S_DID_MNODE (1 << 9)
+
+#define S_DID_REGENB ((uint_t)1 << 31)
+
+/*
+ * (scf_herr) hard error code
+ */
+#define HERR_TESUM (1 << 0)
+#define HERR_RESUM (1 << 1)
+#define HERR_CMD_RTO (1 << 2)
+#define HERR_BUSY_RTO (1 << 3)
+
+#define HERR_DSCP_INTERFACE (1 << 8)
+#define HERR_DSCP_ACKTO (1 << 9)
+#define HERR_DSCP_ENDTO (1 << 10)
+
+#define HERR_EXEC ((uint_t)1 << 31)
+
+/* ddi_dev_regsize(), ddi_regs_map_setup register index number define */
+#define REG_INDEX_SCF 0 /* SCF register */
+#define REG_INDEX_SCFCNTL 1 /* SCF contorol register */
+#define REG_INDEX_DSCPSRAM 2 /* SCF DSCP SRAM */
+#define REG_INDEX_SYSTEMSRAM 3 /* SCF system SRAM */
+#define REG_INDEX_INTERFACE 4 /* SCF interface block(driver trace) */
+
+/*
+ * scf_path_check()/scf_offline_check() return code
+ */
+#define SCF_PATH_ONLINE 0 /* SCF path exec state */
+#define SCF_PATH_OFFLINE 1 /* SCF path offline state */
+#define SCF_PATH_OFFLINE_DRV 2 /* SCF path offline-drv state */
+#define SCF_PATH_CHANGE 3 /* SCF path change state */
+#define SCF_PATH_HALT (-1) /* SCF path halt state */
+
+/*
+ * scf_cmdbusy_check() return code
+ */
+#define SCF_COMMAND_READY 0 /* SCF command ready state */
+#define SCF_COMMAND_BUSY 1 /* SCF command busy state */
+#define SCF_COMMAND_BUSY_DRV 2 /* SCF command busy-drv state */
+
+/*
+ * scf_dscp_start()/scf_dscp_stop() arg "factor" value
+ */
+#define FACTOR_ONLINE 0 /* Factor SCF online */
+#define FACTOR_OFFLINE 1 /* Factor SCF offline */
+#define FACTOR_PATH_CHG 2 /* Factor SCF path change */
+#define FACTOR_PATH_STOP 3 /* Factor IOMP all path stop */
+#define FACTOR_PATH_HALT 4 /* Factor SCF path halt */
+
+/* path status (path_status) */
+#define PATH_STAT_ACTIVE 1
+#define PATH_STAT_STANDBY 2
+#define PATH_STAT_STOP 3
+#define PATH_STAT_FAIL 4
+#define PATH_STAT_DISCON 5
+#define PATH_STAT_ENCAP 6
+#define PATH_STAT_EMPTY 0
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SCFSTATE_H */
diff --git a/usr/src/uts/sun4u/opl/sys/scfd/scfsys.h b/usr/src/uts/sun4u/opl/sys/scfd/scfsys.h
new file mode 100644
index 0000000000..dc0b106cdd
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/scfd/scfsys.h
@@ -0,0 +1,862 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _SCFSYS_H
+#define _SCFSYS_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/scfd/opcio.h>
+#include <sys/scfd/scfstate.h>
+#include <sys/scfd/scftimer.h>
+#include <sys/scfd/scfkstat.h>
+#include <sys/scfd/scfostoescf.h>
+
+#ifndef TRUE
+#define TRUE (1)
+#endif
+
+#ifndef FALSE
+#define FALSE (0)
+#endif
+
+#define FLAG_ON (1)
+#define FLAG_OFF (0)
+
+#define SCF_DRIVER_VERSION "SCF driver 1.5"
+#define SCF_DRIVER_NAME "scfd"
+#define SCF_DEVICE_NAME "scfc"
+
+/* instance number */
+#define SCF_USER_INSTANCE 200 /* instance */
+
+#define SCF_MAX_INSTANCE 80 /* Max instance */
+
+/* SCFHALT after processing mode define */
+#define HALTPROC_STOP 0 /* processing stop mode (default) */
+#define HALTPROC_SHUTDOWN 1 /* processing shutdown mode */
+#define HALTPROC_PANIC 2 /* processing panic mode */
+
+/*
+ * External function
+ */
+extern int scf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
+extern int scf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
+extern void scf_resource_free_dev(scf_state_t *statep);
+extern int scf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,
+ void *arg, void **resultp);
+extern void scf_reload_conf(scf_state_t *statep);
+
+extern int scf_open(dev_t *devp, int flag, int otyp, cred_t *cred_p);
+extern int scf_close(dev_t dev, int flag, int otyp, cred_t *cred_p);
+extern int scf_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
+ cred_t *cred_p, int *rval_p);
+extern int scf_ioc_reportstat(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_clearlcd(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_wrlcd(intptr_t arg, int mode, int *rval_p, int u_mode);
+extern int scf_ioc_getdiskled(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_setdiskled(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_getsdownreason(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_optiondisp(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_getpciconfig(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_hac(intptr_t arg, int mode, int *rval_p, int u_mode);
+extern int scf_ioc_hstadrsinfo(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_rdclistmax(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_rdclistx(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_rdctrl(intptr_t arg, int mode, int *rval_p, int u_mode);
+extern int scf_ioc_opecall(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_getreport(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_rcipwr(intptr_t arg, int mode, int *rval_p, int u_mode);
+extern int scf_ioc_panicreq(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_panicchk(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_parmset(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_parmget(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_autopwrset(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_autopwrget(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_autopwrclr(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_autopwrfpoff(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_autopwrexset(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_autopwrexget(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_dr(intptr_t arg, int mode, int *rval_p, int u_mode);
+extern int scf_ioc_eventlist(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_getevent(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_setmadmevent(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_remcscmd(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_remcsfile(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_sparecmd(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_setphpinfo(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_ioc_pciresetreq(intptr_t arg, int mode, int *rval_p,
+ int u_mode);
+extern int scf_push_reportsense(unsigned int rci_addr,
+ unsigned char *sense, time_t timestamp);
+extern int scf_pop_reportsense(scfreport_t *rsense);
+extern int scf_push_getevent(unsigned char *event_p);
+extern int scf_pop_getevent(scfevent_t *event_p);
+extern int scf_valid_date(int year, int month, int date);
+extern int scf_check_pon_time(scfautopwrtime_t *ptime);
+extern int scf_check_poff_time(scfautopwrtime_t *ptime);
+
+extern uint_t scf_intr(caddr_t arg);
+extern int scf_intr_cmdcmp(scf_state_t *statep);
+extern void scf_intr_cmdcmp_driver(scf_state_t *statep,
+ struct scf_cmd *scfcmdp);
+extern int scf_intr_dsens(struct scf_cmd *scfcmdp,
+ scf_int_reason_t *int_rp, int len);
+extern void scf_status_change(scf_state_t *statep);
+extern void scf_next_cmd_check(scf_state_t *statep);
+extern void scf_next_rxdata_get(void);
+extern void scf_online_wait_tout(void);
+extern void scf_cmdbusy_tout(void);
+extern void scf_cmdend_tout(void);
+extern void scf_report_send_wait_tout(void);
+extern void scf_alivecheck_intr(scf_state_t *statep);
+extern void scf_path_change(scf_state_t *statep);
+extern void scf_halt(uint_t mode);
+extern void scf_panic_callb(int code);
+extern void scf_shutdown_callb(int code);
+extern uint_t scf_softintr(caddr_t arg);
+extern void scf_cmdwait_status_set(void);
+
+extern int scf_map_regs(dev_info_t *dip, scf_state_t *statep);
+extern void scf_unmap_regs(scf_state_t *statep);
+extern int scf_send_cmd_check_bufful(struct scf_cmd *scfcmdp);
+extern int scf_send_cmd(struct scf_cmd *scfcmdp);
+extern void scf_i_send_cmd(struct scf_cmd *scfcmdp,
+ struct scf_state *statep);
+extern void scf_p_send_cmd(struct scf_cmd *scfcmdp,
+ struct scf_state *statep);
+extern int scf_path_check(scf_state_t **statep);
+extern int scf_offline_check(scf_state_t *statep, uint_t timer_exec_flag);
+extern int scf_cmdbusy_check(scf_state_t *statep);
+extern void scf_alivecheck_start(scf_state_t *statep);
+extern void scf_alivecheck_stop(scf_state_t *statep);
+extern void scf_forbid_intr(struct scf_state *statep);
+extern void scf_permit_intr(struct scf_state *statep, int flag);
+extern int scf_check_state(scf_state_t *statep);
+extern void scf_chg_scf(scf_state_t *statep, int status);
+extern void scf_del_queue(scf_state_t *statep);
+extern int scf_make_send_cmd(struct scf_cmd *scfcmdp, uint_t flag);
+extern void scf_sram_trace_init(struct scf_state *statep);
+extern void scf_sram_trace(struct scf_state *statep, uint8_t log_id);
+
+#ifdef DEBUG
+#include <sys/scfd/scftrace.h>
+#include <sys/scfd/iomp_drv.h>
+#include <sys/scfd/scfdebug.h>
+#include <sys/scfd/scfsnap.h>
+
+#define SCF_META_INSTANCE (SCF_USER_INSTANCE + 1)
+ /* meta management instance */
+#define SCF_INST_INSTANCE (SCF_USER_INSTANCE + 2)
+ /* instance management instance */
+
+extern void scf_add_scf(scf_state_t *statep);
+extern void scf_del_scf(scf_state_t *statep);
+extern int scf_meta_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
+ cred_t *cred_p, int *rval_p, int u_mode);
+extern int scf_inst_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
+ cred_t *cred_p, int *rval_p, int u_mode);
+extern void scf_inst_getstat32(scf_state_t *statep,
+ struct fiompstatus_32 *status32_p, char *message_p,
+ int flag);
+extern void scf_inst_getstat(scf_state_t *statep,
+ struct fiompstatus *status_p, char *message_p,
+ int flag);
+
+#define SCF_DDI_PUT8(a, b, c, d) \
+ if (scf_debug_no_device == SCF_DBF_NO_DEVICE_ON) { \
+ uint8_t *cp = (uint8_t *)c; \
+ *cp = d; \
+ } else { \
+ ddi_put8(b, c, d); \
+ }
+
+#define SCF_DDI_PUT16(a, b, c, d) \
+ if (scf_debug_no_device == SCF_DBF_NO_DEVICE_ON) { \
+ uint16_t *cp = (uint16_t *)c; \
+ *cp = d; \
+ } else { \
+ ddi_put16(b, c, d); \
+ }
+
+#define SCF_DDI_PUT32(a, b, c, d) \
+ if (scf_debug_no_device == SCF_DBF_NO_DEVICE_ON) { \
+ uint32_t *cp = (uint32_t *)c; \
+ *cp = d; \
+ } else { \
+ ddi_put32(b, c, d); \
+ }
+
+#define SCF_DDI_GET8(a, b, c) \
+ (scf_debug_no_device == SCF_DBF_NO_DEVICE_ON) ? \
+ *((uint8_t *)c) : ddi_get8(b, c)
+
+#define SCF_DDI_GET16(a, b, c) \
+ (scf_debug_no_device == SCF_DBF_NO_DEVICE_ON) ? \
+ *((uint16_t *)c) : ddi_get16(b, c)
+
+#define SCF_DDI_GET32(a, b, c) \
+ (scf_debug_no_device == SCF_DBF_NO_DEVICE_ON) ? \
+ *((uint32_t *)c) : ddi_get32(b, c)
+
+#define SCF_P_DDI_PUT8(a, b, c) \
+ if (scf_debug_no_device == SCF_DBF_NO_DEVICE_ON) { \
+ uint8_t *bp = (uint8_t *)b; \
+ *bp = c; \
+ } else { \
+ ddi_put8(a, b, c); \
+ }
+
+#define SCF_P_DDI_PUT16(a, b, c) \
+ if (scf_debug_no_device == SCF_DBF_NO_DEVICE_ON) { \
+ uint16_t *bp = (uint16_t *)b; \
+ *bp = c; \
+ } else { \
+ ddi_put16(a, b, c); \
+ }
+
+#define SCF_P_DDI_PUT32(a, b, c) \
+ if (scf_debug_no_device == SCF_DBF_NO_DEVICE_ON) { \
+ uint32_t *bp = (uint32_t *)b; \
+ *bp = c; \
+ } else { \
+ ddi_put32(a, b, c); \
+ }
+
+#define SCF_P_DDI_GET8(a, b) \
+ (scf_debug_no_device == SCF_DBF_NO_DEVICE_ON) ? \
+ *((uint8_t *)b) : ddi_get8(a, b)
+
+#define SCF_P_DDI_GET16(a, b) \
+ (scf_debug_no_device == SCF_DBF_NO_DEVICE_ON) ? \
+ *((uint16_t *)b) : ddi_get16(a, b)
+
+#define SCF_P_DDI_GET32(a, b) \
+ (scf_debug_no_device == SCF_DBF_NO_DEVICE_ON) ? \
+ *((uint32_t *)b) : ddi_get32(a, b)
+
+#define SCF_CHECK_INSTANCE(a) \
+ (a == SCF_USER_INSTANCE) || \
+ (a == SCF_META_INSTANCE) || \
+ (a == SCF_INST_INSTANCE)
+
+#define SCF_DBG_DRV_TRACE_TBL \
+ scf_trctbl_t *trace_f; \
+ scf_trctbl_t *trace_l; \
+ scf_trctbl_t *trace_w; \
+ scf_trctbl_t *err_trace_f; \
+ scf_trctbl_t *err_trace_l; \
+ scf_trctbl_t *err_trace_w; \
+ scf_trctbl_t trace_table[TC_NRM_CNT]; \
+ scf_trctbl_t err_trace_table[TC_ERR_CNT]; \
+ int path_num; \
+ int alloc_size; \
+ scf_state_t **iomp_scf;
+
+#define SC_DBG_DRV_TRACE(a, b, c, d) \
+ scf_trace((ushort_t)(a), (ushort_t)(b), (uchar_t *)(c), (ushort_t)(d))
+
+#define SCF_DBG_DRV_TRACE_INIT \
+ scf_comtbl.trace_f = (scf_trctbl_t *)&scf_comtbl.trace_table[0]; \
+ scf_comtbl.trace_l \
+ = (scf_trctbl_t *)&scf_comtbl.trace_table[TC_NRM_CNT]; \
+ scf_comtbl.trace_w = (scf_trctbl_t *)&scf_comtbl.trace_table[0]; \
+ scf_comtbl.err_trace_f \
+ = (scf_trctbl_t *)&scf_comtbl.err_trace_table[0]; \
+ scf_comtbl.err_trace_l \
+ = (scf_trctbl_t *)&scf_comtbl.err_trace_table[TC_ERR_CNT]; \
+ scf_comtbl.err_trace_w \
+ = (scf_trctbl_t *)&scf_comtbl.err_trace_table[0];
+
+#define SCFDBGMSG(f, s) if (f & scf_trace_msg_flag) \
+ cmn_err(CE_CONT, "scfd:debug:%04d: " s, __LINE__)
+#define SCFDBGMSG1(f, s, a) if (f & scf_trace_msg_flag) \
+ cmn_err(CE_CONT, "scfd:debug:%04d: " s, __LINE__, a)
+#define SCFDBGMSG2(f, s, a, b) if (f & scf_trace_msg_flag) \
+ cmn_err(CE_CONT, "scfd:debug:%04d: " s, __LINE__, a, b)
+#define SCFDBGMSG3(f, s, a, b, c) if (f & scf_trace_msg_flag) \
+ cmn_err(CE_CONT, "scfd:debug:%04d: " s, __LINE__, a, b, c)
+#define SCFDBGMSG4(f, s, a, b, c, d) if (f & scf_trace_msg_flag) \
+ cmn_err(CE_CONT, "scfd:debug:%04d: " s, __LINE__, a, b, c, d)
+
+#define SCF_DBG_IOMP_INSTANCE \
+ { "mscf0", S_IFCHR, SCF_INST_INSTANCE }, \
+ { "mscf", S_IFCHR, SCF_META_INSTANCE },
+
+#define SCF_DBG_IOMP_PROC \
+ if (instance == SCF_META_INSTANCE) { \
+ if (drv_priv(cred_p) != 0) { \
+ ret = EPERM; \
+ goto END_ioctl; \
+ } \
+ ret = scf_meta_ioctl(dev, cmd, arg, mode, cred_p, rval_p, \
+ u_mode); \
+ goto END_ioctl; \
+ } else if (instance == SCF_INST_INSTANCE) { \
+ if (drv_priv(cred_p) != 0) { \
+ ret = EPERM; \
+ goto END_ioctl; \
+ } \
+ ret = scf_inst_ioctl(dev, cmd, arg, mode, cred_p, rval_p, \
+ u_mode); \
+ goto END_ioctl; \
+ }
+
+#define SCF_DBG_IOMP_ADD(a) \
+ { \
+ scf_add_scf(a); \
+ }
+
+#define SCF_DBG_IOMP_DEL(a) \
+ { \
+ scf_del_scf(a); \
+ }
+
+#define SCF_DBG_IOMP_FREE \
+ { \
+ if (scf_comtbl.iomp_scf != NULL) { \
+ kmem_free((void *)scf_comtbl.iomp_scf, \
+ (size_t)scf_comtbl.alloc_size); \
+ scf_comtbl.iomp_scf = NULL; \
+ scf_comtbl.alloc_size = 0; \
+ scf_comtbl.path_num = 0; \
+ } \
+ }
+
+#define SCF_DBG_IOCTL_PROC \
+ if ((uint_t)cmd == SCFIOCCMDTHROUGH) { \
+ ret = scf_debug_cmdthrough(arg, mode); \
+ goto END_ioctl; \
+ } else if ((uint_t)cmd == SCFIOCTEST) { \
+ ret = scf_debug_test(arg, mode); \
+ goto END_ioctl; \
+ } else if ((uint_t)cmd == SCFIOCSNAPSHOTSIZE) { \
+ ret = scf_snapshotsize(arg, mode); \
+ goto END_ioctl; \
+ } else if ((uint_t)cmd == SCFIOCSNAPSHOT) { \
+ ret = scf_snapshot(arg, mode); \
+ goto END_ioctl; \
+ }
+
+#define SCF_DBG_TEST_TIMER_STOP \
+ { \
+ scf_debug_test_timer_stop(); \
+ }
+
+#define SCF_DBG_TEST_INTR(a) \
+ { \
+ if ((scf_debug_test_sys_int_flag == SCF_DBF_SYS_INTR_ON) || \
+ (scf_debug_test_dscp_int_flag == SCF_DBF_DSCP_INT_ON)) \
+ scf_debug_test_intr(a); \
+ }
+
+#define SCF_DBG_TEST_INTR_SCFINT(a) \
+ { \
+ if (scf_debug_test_sys_int_flag == SCF_DBF_SYS_INTR_ON) \
+ scf_debug_test_intr_scfint(a); \
+ }
+
+#define SCF_DBG_TEST_INTR_CMDEND(a) \
+ { \
+ if (scf_debug_test_sys_int_flag == SCF_DBF_SYS_INTR_ON) \
+ scf_debug_test_intr_cmdend(a); \
+ }
+
+#define SCF_DBG_MAKE_RXSUM(a, b) \
+ { \
+ if (scf_debug_norxsum_check != SCF_DBF_NORXSUM_CHECK_OFF) \
+ a = b; \
+ }
+
+#define SCF_DBG_MAKE_RXSUM_L(a, b) \
+ { \
+ if (scf_debug_test_rxbuff_nosum_check_flag == \
+ SCF_DBF_RXBUFF_NOSUM_CHECK_ON) { \
+ a = b; \
+ scf_debug_test_rxbuff_nosum_check_flag \
+ = SCF_DBF_RXBUFF_NOSUM_CHECK_OFF; \
+ } \
+ }
+
+#define SCF_DBG_NO_INT_REASON \
+ { \
+ if (scf_debug_no_int_reason) \
+ scf_comtbl.scf_event_flag &= (~STATUS_EVENT); \
+ }
+
+#define SCF_DBG_TEST_INTR_POFF \
+ { \
+ if (scf_debug_test_sys_poff_flag == SCF_DBF_NO_INT_REASON_ON) \
+ scf_debug_test_intr_poff(); \
+ }
+
+#define SCF_DBG_TEST_DSENS(a, b, c) \
+ { \
+ if (scf_debug_test_sys_event_flag == SCF_DBF_SYS_EVENT_ON) \
+ scf_debug_test_dsens(a, b, c); \
+ }
+
+#define SCF_DBG_TEST_SEND_CMD(a, b) \
+ { \
+ if (scf_debug_nofirm_sys == SCF_DBF_NOFIRM_SYS_ON) \
+ scf_debug_test_send_cmd(a, b); \
+ }
+
+#define SCF_DBG_MAKE_PATH_CHECK(a) \
+ { \
+ if (scf_debug_test_path_check != SCF_DBC_PATH_CHECK_CLEAR) { \
+ scf_debug_test_path_check--; \
+ if (scf_debug_test_path_check == SCF_DBC_PATH_CHECK_CLEAR) \
+ a = scf_debug_test_path_check_rtn; \
+ } \
+ }
+
+#define SCF_DBG_MAKE_ONLINE(a) \
+ { \
+ if (scf_debug_nooffline_check != SCF_DBF_NOOFFLINE_CHECK_OFF) \
+ a = STATUS_SCF_ONLINE; \
+ }
+
+#define SCF_DBG_MAKE_OFFLINE_CHECK(a) \
+ { \
+ if (scf_debug_test_offline_check != SCF_DBC_OFFLINE_CHECK_CLEAR) { \
+ scf_debug_test_offline_check--; \
+ if (scf_debug_test_offline_check \
+ == SCF_DBC_OFFLINE_CHECK_CLEAR) \
+ a = scf_debug_test_offline_check_rtn; \
+ } \
+ }
+
+#define SCF_DBG_RTN_MAKE_CMD_READY \
+ { \
+ if (scf_debug_nofirm_sys == SCF_DBF_NOFIRM_SYS_ON) \
+ return (SCF_COMMAND_READY); \
+ }
+
+#define SCF_DBG_MAKE_CMD_BUSY(a, b) \
+ { \
+ if (scf_debug_test_cmdr_busy != SCF_DBC_CMDR_BUSY_CLEAR) { \
+ scf_debug_test_cmdr_busy--; \
+ if (scf_debug_test_cmdr_busy == SCF_DBC_CMDR_BUSY_CLEAR) \
+ a |= COMMAND_BUSY; \
+ } \
+ if (scf_debug_test_cmdexr_busy != SCF_DBC_CMDEXR_BUSY_CLEAR) { \
+ scf_debug_test_cmdexr_busy--; \
+ if (scf_debug_test_cmdexr_busy == SCF_DBC_CMDEXR_BUSY_CLEAR) \
+ b |= COMMAND_ExR_BUSY; \
+ } \
+ }
+
+#define SCF_DBG_TEST_ALIVE_START(a) \
+ { \
+ if (scf_debug_no_alive == SCF_DBF_NO_ALIVE_ON) \
+ scf_debug_test_alive_start(a); \
+ }
+
+#define SCF_DBG_TEST_ALIVE_STOP(a) \
+ { \
+ if (scf_debug_no_alive == SCF_DBF_NO_ALIVE_ON) \
+ scf_debug_test_alive_stop(a); \
+ }
+
+#define SCF_DBG_TEST_INTR_DSCP_DSR(a) \
+ { \
+ if (scf_debug_test_dscp_int_flag == SCF_DBF_DSCP_INT_ON) \
+ scf_debug_test_intr_dscp_dsr(a); \
+ }
+
+#define SCF_DBG_TEST_INTR_DSCP_RXTX(a, b) \
+ { \
+ if (scf_debug_test_dscp_int_flag == SCF_DBF_DSCP_INT_ON) \
+ scf_debug_test_intr_dscp_rxtx(a, b); \
+ }
+
+#define SCF_DBG_MAKE_LOOPBACK(a) \
+ { \
+ if (scf_debug_test_dscp_loopback == SCF_DBF_DSCP_LOOPBACK_ON) \
+ a = 0; \
+ }
+
+#define SCF_DBG_MAKE_NO_DSCP_PATH(a) \
+ { \
+ if (scf_debug_no_dscp_path == SCF_DBF_NO_DSCP_PATH_ON) \
+ a = FLAG_OFF; \
+ }
+
+#define SCF_DBG_TEST_TXREQ_SEND(a, b) \
+ { \
+ if (scf_debug_nofirm_dscp == SCF_DBF_NOFIRM_DSCP_ON) \
+ scf_debug_test_txreq_send(a, b); \
+ }
+
+#define SCF_DBG_TEST_MAP_REGS(a) \
+ { \
+ if (scf_debug_no_device == SCF_DBF_NO_DEVICE_ON) { \
+ scf_debug_test_map_regs(a); \
+ scf_sram_trace_init(a); \
+ ret = 0; \
+ goto END_map_regs; \
+ } \
+ }
+
+#define SCF_DBG_TEST_UNMAP_REGS(a) \
+ { \
+ if (scf_debug_no_device == SCF_DBF_NO_DEVICE_ON) { \
+ scf_debug_test_unmap_regs(a); \
+ SCFDBGMSG(SCF_DBGFLAG_SYS, "scf_unmap_regs(): end"); \
+ return; \
+ } \
+ }
+
+#define SCF_DBG_XSCF_SET_STATUS \
+ if (scf_comtbl.debugxscf_flag) { \
+ if (scfcmdp->stat0 != NORMAL_END) { \
+ scfcmdp->rbufleng = 0; \
+ scfcmdp->stat0 = NORMAL_END; \
+ } \
+ }
+
+#define SCF_DBG_XSCF_SET_LENGTH \
+ if (scf_comtbl.debugxscf_flag) { \
+ if (((scfcmdp->status & STATUS_CMD_RTN_CODE) >> 4) != \
+ NORMAL_END) { \
+ scfcmdp->rbufleng = 0; \
+ break; \
+ } \
+ }
+
+#define SCF_DBG_CHECK_NODEVICE \
+ scf_debug_no_device == SCF_DBF_NO_DEVICE_ON
+
+#else /* DEBUG */
+
+#define SCF_DDI_PUT8(a, b, c, d) ddi_put8(b, c, d)
+#define SCF_DDI_PUT16(a, b, c, d) ddi_put16(b, c, d)
+#define SCF_DDI_PUT32(a, b, c, d) ddi_put32(b, c, d)
+#define SCF_DDI_GET8(a, b, c) ddi_get8(b, c)
+#define SCF_DDI_GET16(a, b, c) ddi_get16(b, c)
+#define SCF_DDI_GET32(a, b, c) ddi_get32(b, c)
+
+#define SCF_P_DDI_PUT8(a, b, c) ddi_put8(a, b, c)
+#define SCF_P_DDI_PUT16(a, b, c) ddi_put16(a, b, c)
+#define SCF_P_DDI_PUT32(a, b, c) ddi_put32(a, b, c)
+#define SCF_P_DDI_GET8(a, b) ddi_get8(a, b)
+#define SCF_P_DDI_GET16(a, b) ddi_get16(a, b)
+#define SCF_P_DDI_GET32(a, b) ddi_get32(a, b)
+
+#define SCF_CHECK_INSTANCE(a) (a == SCF_USER_INSTANCE)
+
+#define SCF_DBG_DRV_TRACE_TBL
+#define SC_DBG_DRV_TRACE(a, b, c, d)
+#define SCF_DBG_DRV_TRACE_INIT
+
+#define SCFDBGMSG(f, s)
+#define SCFDBGMSG1(f, s, a)
+#define SCFDBGMSG2(f, s, a, b)
+#define SCFDBGMSG3(f, s, a, b, c)
+#define SCFDBGMSG4(f, s, a, b, c, d)
+
+#define SCF_DBG_INIT
+#define SCF_DBG_IOMP_INSTANCE
+#define SCF_DBG_IOMP_PROC
+#define SCF_DBG_IOCTL_PROC
+#define SCF_DBG_IOMP_ADD(a)
+#define SCF_DBG_IOMP_DEL(a)
+#define SCF_DBG_IOMP_FREE
+#define SCF_DBG_TEST_TIMER_STOP
+#define SCF_DBG_TEST_INTR(a)
+#define SCF_DBG_TEST_INTR_SCFINT(a)
+#define SCF_DBG_TEST_INTR_CMDEND(a)
+#define SCF_DBG_MAKE_RXSUM(a, b)
+#define SCF_DBG_MAKE_RXSUM_L(a, b)
+#define SCF_DBG_NO_INT_REASON
+#define SCF_DBG_TEST_INTR_POFF
+#define SCF_DBG_TEST_DSENS(a, b, c)
+#define SCF_DBG_TEST_SEND_CMD(a, b)
+#define SCF_DBG_MAKE_PATH_CHECK(a)
+#define SCF_DBG_MAKE_ONLINE(a)
+#define SCF_DBG_MAKE_OFFLINE_CHECK(a)
+#define SCF_DBG_RTN_MAKE_CMD_READY
+#define SCF_DBG_MAKE_CMD_BUSY(a, b)
+#define SCF_DBG_TEST_ALIVE_START(a)
+#define SCF_DBG_TEST_ALIVE_STOP(a)
+#define SCF_DBG_TEST_INTR_DSCP_DSR(a)
+#define SCF_DBG_TEST_INTR_DSCP_RXTX(a, b)
+#define SCF_DBG_MAKE_LOOPBACK(a)
+#define SCF_DBG_MAKE_NO_DSCP_PATH(a)
+#define SCF_DBG_TEST_TXREQ_SEND(a, b)
+#define SCF_DBG_TEST_MAP_REGS(a)
+#define SCF_DBG_TEST_UNMAP_REGS(a)
+#define SCF_DBG_DDI_PUT(a, b)
+#define SCF_DBG_DDI_GET(a)
+#define SCF_DBG_XSCF_SET_STATUS
+#define SCF_DBG_XSCF_SET_LENGTH
+
+#endif /* DEBUG */
+
+/*
+ * SCF driver common table
+ */
+typedef struct scf_comtbl {
+ /* mutex resource */
+ kmutex_t all_mutex; /* scf driver mutex */
+ kmutex_t trc_mutex; /* scf driver trace mutex */
+ kmutex_t attach_mutex; /* attach mutex */
+ kmutex_t si_mutex; /* softintr mutex */
+
+ /* cookie */
+ ddi_iblock_cookie_t iblock_cookie; /* SCFI-Interrupt */
+ ddi_iblock_cookie_t soft_iblock_cookie; /* softintr cookie */
+
+ /* condition variables */
+ kcondvar_t cmd_cv; /* command send */
+ kcondvar_t cmdwait_cv; /* command send wait */
+ kcondvar_t cmdend_cv; /* command send end */
+ kcondvar_t cmdbusy_cv; /* command busy send wait */
+ kcondvar_t rsense_cv; /* report sense */
+ kcondvar_t rdcsense_cv; /* SCFIOCRDCTRL & sense */
+ kcondvar_t rdctrl_cv; /* SCFIOCRDCTRL command */
+ kcondvar_t getevent_cv; /* SCFIOCGETEVENT */
+ kcondvar_t suspend_wait_cv; /* suspend cv */
+
+ /* ID */
+ ddi_softintr_t scf_softintr_id; /* softintr id */
+
+ /* SCF state table address */
+ scf_state_t *scf_pseudo_p; /* pseudo device state */
+ scf_state_t *scf_exec_p; /* SCF exec state */
+ scf_state_t *scf_path_p; /* SCF path change state */
+ scf_state_t *scf_wait_p; /* Standby state */
+ scf_state_t *scf_stop_p; /* Stop state */
+ scf_state_t *scf_err_p; /* error state */
+ scf_state_t *scf_disc_p; /* Disconnect state */
+ scf_state_t *scf_suspend_p; /* Suspend stste */
+
+ /* flag and counter */
+ uint_t resource_flag; /* resource allocate flag */
+ uint_t scf_event_flag; /* SCF event flag */
+ uint_t poff_factor; /* Shutdown factor */
+ uint_t cmd_busy; /* cmd send busy flag */
+ uint_t cmd_wait; /* cmd send wait counter */
+ uint_t cmd_busy_wait; /* cmd busy send wait flag */
+ uint_t cmd_end_wait; /* cmd send end wait flag */
+ uint_t rdctrl_busy; /* SCFIOCRDCTRL busy flag */
+ uint_t rdctrl_end_wait; /* SCFIOCRDCTRL cmd end wait flag */
+ uint_t alive_running; /* Alive check exec flag */
+ uint_t watchdog_after_resume; /* watch cpu after resume flag */
+ uint_t scf_shutdown_exec_flag; /* SCFIOCSHUTDOWN call flag */
+ uint_t shutdown_start_reported; /* SCFIOCREPORTSTAT(shutdown) call */
+ uint_t scf_exec_cmd_id; /* SCF command exec id */
+ uint_t scf_cmd_exec_flag; /* SCF command exec flag */
+ uint_t putinfo_exec_flag; /* scf_service_putinfo() exec flag */
+ uint_t getinfo_exec_flag; /* scf_service_getinfo() exec flag */
+ uint_t debugxscf_flag; /* SCFIOCCMDTHROUGH exec flag */
+ uint_t suspend_wait; /* Suspend wait flag */
+ uint_t suspend_flag; /* Suspend flag */
+ uint_t scf_suspend_sendstop; /* Suspend send stop flag */
+ uint_t scf_softintr_dscp_kicked; /* Softintr DSCP kick flag */
+ uint_t int_reason_retry; /* INT_REASON retry flag */
+ uint_t scf_alive_int_count; /* Alive check interrupt counter */
+ uint_t reload_conf_flag; /* configuration file load flag */
+ uint_t scf_cmd_resend_flag; /* SCF command re send flag */
+ uint_t scf_cmd_resend_req; /* SCF command re send flag */
+ uint_t path_stop_flag; /* command send stop flag */
+
+ uint_t report_buf_ful_rcnt; /* Report command BUF_FUL */
+ /* retry counter */
+ uint_t report_rci_busy_rcnt; /* Report command RCI_BUSY */
+ /* retry counter */
+ uint_t path_change_rcnt; /* SCF path change retry counter */
+
+ /* status information */
+ ushort_t scf_mode_sw; /* Mode switch status */
+ uchar_t scf_poff_id; /* POFF interrupt id */
+ uint_t scf_shutdownreason; /* Shutdown reason */
+ uint_t scf_status; /* XSCF status */
+
+ /* SCF command control code */
+ uint_t scf_pchg_event_sub; /* SCF path change status */
+ uint_t scf_poff_event_sub; /* POFF event status */
+ uint_t scf_shut_event_sub; /* SHUTDOWN event status */
+ uint_t scf_alive_event_sub; /* ALIVE event status */
+ uint_t scf_report_event_sub; /* REPORT processing status */
+
+ /* SCF command control */
+ scf_cmd_t *scf_cmdp; /* SCF command table address */
+ uint_t scf_last_report; /* Last report */
+ uint_t scf_rem_rxbuff_size; /* remainder receive data size */
+
+ /* SCF command interrupt area */
+ scf_cmd_t scf_cmd_intr; /* SCF comand table(Interrupt use) */
+ uchar_t scf_sbuf[SCF_S_CNT_16]; /* Tx Small buffer table */
+ uchar_t scf_rbuf[SCF_INT_CNT_MAX]; /* Rx Large buffer table */
+ uchar_t last_event[SCF_INT_REASON_SIZE]; /* Last event table */
+
+ /* ioctl control area */
+ scfreport_t *report_sensep; /* SCFIOCGETREPORT save address */
+ int report_sense_top; /* SCFIOCGETREPORT save offset(push) */
+ int report_sense_oldest; /* SCFIOCGETREPORT save offset(pop) */
+ uint_t rcidown_event_flag; /* SCFIOCGETREPORT rci down flag */
+ scfreport_t scfreport_rcidown; /* SCFIOCGETREPORT rci down area */
+
+ scfevent_t *getevent_sensep; /* SCFIOCGETEVENT save address */
+ int getevent_sense_top; /* SCFIOCGETEVENT save offset(push) */
+ int getevent_sense_oldest; /* SCFIOCGETEVENT save offset(pop) */
+
+ scfeventlist_t getevent_tbl; /* SCFIOCEVENTLIST list table */
+
+ uchar_t lcd_seq_mes[SCF_WRLCD_MAX];
+ /* SCFIOCCLEARLCD and SCFIOCWRLCD message */
+
+ uchar_t rdctrl_sense_category_code; /* SCFIOCRDCTL sub command */
+ uchar_t rdctrl_sense[SCF_SBUFR_SIZE]; /* SCFIOCRDCTL information */
+
+ /* memo counter */
+ uint_t attach_count; /* SCF attach count */
+
+ /* error memo counter */
+ uint_t memo_cmd_to_cnt; /* CMD timeout memo */
+ uint_t scf_rsense_overflow; /* SCFIOCGETREPORT overflow memo */
+ uint_t scf_getevent_overflow; /* SCFIOCGETEVENT overflow memo */
+
+ /* Unclaimed interrupt register log */
+ uint_t scf_unclamed_cnt; /* Unclaimed interrupt counter */
+ struct {
+ uint16_t CONTROL; /* SCF Control register */
+ uint16_t INT_ST; /* SCF Interrupt Status register */
+ uint16_t COMMAND; /* SCF command register */
+ uint16_t STATUS; /* SCF status register */
+ uint32_t STATUS_ExR; /* SCFI status extended register */
+ uint8_t DSR; /* DSCP buffer status register */
+ } scf_unclamed;
+
+ /* kstat private area */
+ scf_kstat_private_t *kstat_private; /* for kstat area */
+
+ SCF_DBG_DRV_TRACE_TBL /* SCF driver trace */
+} scf_comtbl_t;
+
+
+/*
+ * (resource_flag) macro for resource allocate flag
+ */
+#define DID_MUTEX_ATH (1 << 0)
+#define DID_MUTEX_ALL (1 << 1)
+#define DID_MUTEX_TRC (1 << 2)
+#define DID_MUTEX_SI (1 << 3)
+
+#define DID_CV (1 << 4)
+#define DID_KSTAT (1 << 5)
+#define DID_ALLOCBUF (1 << 6)
+#define DID_ALIVECHECK (1 << 7)
+
+#define DID_MNODE (1 << 8)
+#define DID_SOFTINTR (1 << 9)
+
+#define DID_DSCPINIT (1 << 10)
+
+/*
+ * XSCF status defaine (scf_status)
+ */
+#define SCF_STATUS_OFFLINE 0 /* XSCF status is offline */
+#define SCF_STATUS_ONLINE 1 /* XSCF status is online */
+#define SCF_STATUS_UNKNOWN 2 /* XSCF status is unknown */
+
+/*
+ * SCF command control status
+ */
+#define EVENT_SUB_NONE 0 /* None status */
+/* (scf_pchg_event_sub) SCF path change status */
+#define EVENT_SUB_PCHG_WAIT 1 /* SCF path change command wait */
+#define EVENT_SUB_PCHG_EXEC 2 /* SCF path change command exec */
+/* (scf_poff_event_sub) POFF event status */
+#define EVENT_SUB_POFF_WAIT 1 /* POFF factor command wait */
+#define EVENT_SUB_POFF_EXEC 2 /* POFF factor command exec */
+/* (scf_shut_event_sub) SHUTDOWN event status */
+#define EVENT_SUB_SHUT_WAIT 1 /* command wait (SHUTDOWN) */
+#define EVENT_SUB_SHUT_EXEC 2 /* command exec (SHUTDOWN) */
+#define EVENT_SUB_WAIT 3 /* command wait (EVENT) */
+#define EVENT_SUB_EXEC 4 /* command exec (EVENT) */
+/* (scf_alive_event_sub) ALIVE event status */
+#define EVENT_SUB_ALST_WAIT 1 /* Alive start command wait */
+#define EVENT_SUB_ALST_EXEC 2 /* Alive start command exec */
+#define EVENT_SUB_ALSP_WAIT 3 /* Alive stop command wait */
+#define EVENT_SUB_ALSP_EXEC 4 /* Alive stop command exec */
+/* (scf_report_event_sub) REPORT processing status */
+#define EVENT_SUB_REPORT_RUN_WAIT 1 /* Report (runnning) send wait */
+#define EVENT_SUB_REPORT_RUN_EXEC 2 /* Report (runnning) send exec */
+#define EVENT_SUB_REPORT_SHUT_WAIT 3 /* Report (shutdown) send wait */
+#define EVENT_SUB_REPORT_SHOT_EXEC 4 /* Report (shutdown) send exec */
+
+/* (scf_last_report) define */
+#define NOT_SEND_REPORT 0xffffffff /* Not report send */
+
+/* scf_cmd_resend_req define */
+#define RESEND_IOCTL (1 << 0) /* Comand from ioctl */
+#define RESEND_PCHG (1 << 1) /* SCF Path change command */
+#define RESEND_POFF (1 << 2) /* Power supply control command */
+#define RESEND_SHUT (1 << 3) /* Event information command */
+#define RESEND_ALST (1 << 4) /* Alive check command (start) */
+#define RESEND_ALSP (1 << 5) /* Alive check command (stop) */
+#define RESEND_REPORT_RUN (1 << 6) /* Report command (system running) */
+#define RESEND_REPORT_SHUT (1 << 7) /* Report command (shutdown start) */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SCFSYS_H */
diff --git a/usr/src/uts/sun4u/opl/sys/scfd/scftimer.h b/usr/src/uts/sun4u/opl/sys/scfd/scftimer.h
new file mode 100644
index 0000000000..d8f356974b
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/scfd/scftimer.h
@@ -0,0 +1,123 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _SCFTIMER_H
+#define _SCFTIMER_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Timer code
+ */
+typedef enum {
+ SCF_TIMERCD_CMDBUSY, /* SCF command busy watch timer */
+ SCF_TIMERCD_CMDEND, /* SCF command completion watch timer */
+ SCF_TIMERCD_ONLINE, /* SCF online watch timer */
+ SCF_TIMERCD_NEXTRECV, /* Next receive wait timer */
+ SCF_TIMERCD_DSCP_ACK, /* DSCP interface TxACK watch timer */
+ SCF_TIMERCD_DSCP_END, /* DSCP interface TxEND watch timer */
+ SCF_TIMERCD_DSCP_BUSY, /* DSCP interface busy watch timer */
+ SCF_TIMERCD_DSCP_CALLBACK, /* DSCP interface callback timer */
+ SCF_TIMERCD_BUF_FUL, /* SCF command BUF_FUL retray timer */
+ SCF_TIMERCD_RCI_BUSY, /* SCF command RCI_BUSY retray timer */
+ SCF_TIMERCD_MAX /* Max timer code */
+} scf_tm_code_t;
+
+/*
+ * Timer table
+ */
+typedef struct scf_timer_tbl {
+ uint8_t code; /* Timer code */
+ uint8_t rsv[3]; /* reserved */
+ timeout_id_t id; /* Timer ID */
+} scf_timer_tbl_t;
+
+/*
+ * Timer control table
+ */
+typedef struct scf_timer {
+ scf_timer_tbl_t tbl[2];
+ uint8_t start; /* Timer start flag */
+ uint8_t restart; /* Timer restart flag */
+ uint8_t stop; /* Timer stop flag */
+ uint8_t side; /* Use table side */
+ uint32_t value; /* Timer value */
+} scf_timer_t;
+
+/*
+ * scf_timer_check() return value
+ */
+#define SCF_TIMER_NOT_EXEC 0
+#define SCF_TIMER_EXEC 1
+
+/*
+ * Timer value (ms)
+ */
+ /* SCF command busy timer value (10s) */
+#define SCF_TIMER_VALUE_DEVBUSY 10000
+ /* SCF command completion timer value (60s) */
+#define SCF_TIMER_VALUE_CMDEND 60000
+ /* SCF online timer value (10s) */
+#define SCF_TIMER_VALUE_ONLINE 10000
+ /* Next receive timer value (20ms) */
+#define SCF_TIMER_VALUE_NEXTRCV 20
+ /* DSCP interface timer value (60s) */
+#define SCF_TIMER_VALUE_DSCP_ACK 60000
+ /* DSCP interface TxEND timer value (60s) */
+#define SCF_TIMER_VALUE_DSCP_END 60000
+ /* DSCP interface busy timer value (2s) */
+#define SCF_TIMER_VALUE_DSCP_BUSY 2000
+ /* DSCP interface callback timer value (20ms) */
+#define SCF_TIMER_VALUE_DSCP_CALLBACK 20
+
+/*
+ * Timer value convert macro
+ */
+#define SCF_MIL2MICRO(x) ((x) * 1000)
+#define SCF_SEC2MICRO(x) ((x) * 1000000)
+
+/*
+ * External function
+ */
+extern void scf_timer_init(void);
+extern void scf_timer_start(int);
+extern void scf_timer_stop(int);
+extern void scf_timer_all_stop(void);
+extern int scf_timer_check(int);
+extern uint32_t scf_timer_value_get(int);
+extern void scf_tout(void *);
+extern int scf_timer_stop_collect(timeout_id_t *tmids, int size);
+extern void scf_timer_untimeout(timeout_id_t *tmids, int size);
+
+extern scf_timer_t scf_timer[SCF_TIMERCD_MAX];
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SCFTIMER_H */
diff --git a/usr/src/uts/sun4u/opl/sys/scfd/scftrace.h b/usr/src/uts/sun4u/opl/sys/scfd/scftrace.h
new file mode 100644
index 0000000000..42f504370e
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/sys/scfd/scftrace.h
@@ -0,0 +1,215 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * All Rights Reserved, Copyright (c) FUJITSU LIMITED 2006
+ */
+
+#ifndef _SCFTRACE_H
+#define _SCFTRACE_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/scfd/scfstate.h>
+
+#define TC_INFO_SIZE 8
+
+/* SCF driver trace table */
+typedef struct scf_trctbl {
+ ushort_t line; /* sorce kine No. */
+ ushort_t tmvl; /* trace time (100ms) */
+ ushort_t code; /* trace code */
+ ushort_t size; /* info size */
+ uchar_t info[TC_INFO_SIZE]; /* detail info */
+} scf_trctbl_t;
+
+#define TC_NRM_CNT 1920 /* normal trace entry count */
+#define TC_ERR_CNT 128 /* error trace entry count */
+
+#define SCF_DBGFLAG_REG 0x00000001
+
+#define SCF_DBGFLAG_IOCTL 0x00000010
+#define SCF_DBGFLAG_SYS 0x00000020
+#define SCF_DBGFLAG_DSCP 0x00000040
+#define SCF_DBGFLAG_SRV 0x00000080
+
+#define SCF_DBGFLAG_IOMP 0x00000100
+#define SCF_DBGFLAG_KSTAT 0x00000200
+#define SCF_DBGFLAG_FOCK 0x00000400
+
+#define SCF_DBGFLAG_DDI 0x00001000
+#define SCF_DBGFLAG_OPCLS 0x00002000
+#define SCF_DBGFLAG_TIMER 0x00004000
+
+#define SCF_DBGFLAG_SNAP 0x00010000
+#define SCF_DBGFLAG_TRACE 0x00020000
+#define SCF_DBGFLAG_DBG 0x00080000
+
+#define SCF_DBGFLAG_ALL 0xffffffff
+
+/*
+ * trace code define
+ */
+#define TC_ERR 0x8000
+#define TC_ERRCD 0x4000
+#define TC_MSG 0x2000
+#define TC_R_REG 0x1000
+#define TC_W_REG 0x0800
+#define TC_R_CONTROL (TC_R_REG | 0x0001)
+#define TC_R_INT_ST (TC_R_REG | 0x0002)
+#define TC_R_COMMAND (TC_R_REG | 0x0011)
+#define TC_R_COMMAND_ExR (TC_R_REG | 0x0012)
+#define TC_R_STATUS (TC_R_REG | 0x0013)
+#define TC_R_STATUS_ExR (TC_R_REG | 0x0014)
+#define TC_R_TDATA0 (TC_R_REG | 0x0020)
+#define TC_R_TDATA1 (TC_R_REG | 0x0021)
+#define TC_R_TDATA2 (TC_R_REG | 0x0022)
+#define TC_R_TDATA3 (TC_R_REG | 0x0023)
+#define TC_R_RDATA0 (TC_R_REG | 0x0030)
+#define TC_R_RDATA1 (TC_R_REG | 0x0031)
+#define TC_R_RDATA2 (TC_R_REG | 0x0032)
+#define TC_R_RDATA3 (TC_R_REG | 0x0033)
+#define TC_R_ACR (TC_R_REG | 0x0040)
+#define TC_R_ATR (TC_R_REG | 0x0041)
+#define TC_R_DCR (TC_R_REG | 0x0050)
+#define TC_R_DSR (TC_R_REG | 0x0051)
+#define TC_R_TxDCR_C_FLAG (TC_R_REG | 0x0052)
+#define TC_R_TxDCR_OFFSET (TC_R_REG | 0x0053)
+#define TC_R_TxDCR_LENGTH (TC_R_REG | 0x0054)
+#define TC_R_TxDSR_C_FLAG (TC_R_REG | 0x0055)
+#define TC_R_TxDSR_OFFSET (TC_R_REG | 0x0056)
+#define TC_R_RxDCR_C_FLAG (TC_R_REG | 0x0057)
+#define TC_R_RxDCR_OFFSET (TC_R_REG | 0x0058)
+#define TC_R_RxDCR_LENGTH (TC_R_REG | 0x0059)
+#define TC_R_RxDSR_C_FLAG (TC_R_REG | 0x005a)
+#define TC_R_RxDSR_OFFSET (TC_R_REG | 0x005b)
+
+#define TC_W_CONTROL (TC_W_REG | 0x0001)
+#define TC_W_INT_ST (TC_W_REG | 0x0002)
+#define TC_W_COMMAND (TC_W_REG | 0x0011)
+#define TC_W_COMMAND_ExR (TC_W_REG | 0x0012)
+#define TC_W_STATUS (TC_W_REG | 0x0013)
+#define TC_W_STATUS_ExR (TC_W_REG | 0x0014)
+#define TC_W_TDATA0 (TC_W_REG | 0x0020)
+#define TC_W_TDATA1 (TC_W_REG | 0x0021)
+#define TC_W_TDATA2 (TC_W_REG | 0x0022)
+#define TC_W_TDATA3 (TC_W_REG | 0x0023)
+#define TC_W_RDATA0 (TC_W_REG | 0x0030)
+#define TC_W_RDATA1 (TC_W_REG | 0x0031)
+#define TC_W_RDATA2 (TC_W_REG | 0x0032)
+#define TC_W_RDATA3 (TC_W_REG | 0x0033)
+#define TC_W_ACR (TC_W_REG | 0x0040)
+#define TC_W_ATR (TC_W_REG | 0x0041)
+#define TC_W_DCR (TC_W_REG | 0x0050)
+#define TC_W_DSR (TC_W_REG | 0x0051)
+#define TC_W_TxDCR_C_FLAG (TC_W_REG | 0x0052)
+#define TC_W_TxDCR_OFFSET (TC_W_REG | 0x0053)
+#define TC_W_TxDCR_LENGTH (TC_W_REG | 0x0054)
+#define TC_W_TxDSR_C_FLAG (TC_W_REG | 0x0055)
+#define TC_W_TxDSR_OFFSET (TC_W_REG | 0x0056)
+#define TC_W_RxDCR_C_FLAG (TC_W_REG | 0x0057)
+#define TC_W_RxDCR_OFFSET (TC_W_REG | 0x0058)
+#define TC_W_RxDCR_LENGTH (TC_W_REG | 0x0059)
+#define TC_W_RxDSR_C_FLAG (TC_W_REG | 0x005a)
+#define TC_W_RxDSR_OFFSET (TC_W_REG | 0x005b)
+
+#define TC_TIMER 0x0400
+#define TC_T_TOUT (TC_TIMER | 0x0001)
+#define TC_T_START (TC_TIMER | 0x0002)
+#define TC_T_STOP (TC_TIMER | 0x0003)
+
+#define TC_OUT 0x0200
+#define TC_IN 0x0100
+
+/* scfconf.c */
+#define TC_PROBE 0x0001
+#define TC_ATTACH 0x0002
+#define TC_DETACH 0x0003
+#define TC_GETINFO 0x0004
+
+/* scfopt.c */
+#define TC_OPEN 0x0011
+#define TC_CLOSE 0x0012
+#define TC_IOCTL 0x0013
+#define TC_CHPOLL 0x0014
+
+/* scfhandler.c */
+#define TC_INTR 0x0021
+#define TC_DSENS 0x0022
+#define TC_SHUTDOWN 0x0023
+
+/* scfreg.c scfhandler.c */
+#define TC_SEND 0x0031
+#define TC_RSTS 0x0032
+
+/* kernel function code */
+#define TC_SIGNAL 0x0041
+#define TC_W_SIG 0x0042
+#define TC_T_WAIT 0x0043
+#define TC_KILL 0x004f
+
+/* DSCP function code */
+#define TC_MB_INIT 0x0081
+#define TC_MB_FINI 0x0082
+#define TC_MB_PUTMSG 0x0083
+#define TC_MB_CANGET 0x0084
+#define TC_MB_GETMSG 0x0085
+#define TC_MB_FLUSH 0x0086
+#define TC_MB_CTRL 0x0087
+#define TC_MB_INTR 0x0088
+#define TC_MB_CALLBACK 0x0089
+
+#define TC_TxREQ 0x00a1
+#define TC_RxACK 0x00a5
+#define TC_RxEND 0x00a6
+#define TC_RxREQ 0x00a4
+#define TC_TxACK 0x00a2
+#define TC_TxEND 0x00a3
+
+/* OS to SCF function code */
+#define TC_S_PUTINFO 0x0091
+#define TC_S_GETINFO 0x0092
+
+/*
+ * SCF driver trace flag
+ */
+extern ushort_t scf_trace_exec; /* 1:trace exec, 0:Trace no exec */
+extern ushort_t scf_trace_flag;
+
+/*
+ * SCF driver trace debug flag
+ */
+extern uint_t scf_trace_msg_flag; /* trace massege flag */
+
+/*
+ * External function
+ */
+extern void scf_trace(ushort_t code, ushort_t line,
+ uchar_t *info, ushort_t size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SCFTRACE_H */
diff --git a/usr/src/uts/sun4u/opl/unix/Makefile b/usr/src/uts/sun4u/opl/unix/Makefile
new file mode 100644
index 0000000000..0be4aed67f
--- /dev/null
+++ b/usr/src/uts/sun4u/opl/unix/Makefile
@@ -0,0 +1,191 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of unix (and unix.o).
+#
+# sun4u opl implementation architecture dependent
+#
+# uts/sun4u/opl/unix/Makefile
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../../..
+
+#
+# Define the module and object file sets.
+#
+UNIX = unix
+OBJECTS = $(SPECIAL_OBJS:%=$(OBJS_DIR)/%) \
+ $(CORE_OBJS:%=$(OBJS_DIR)/%) \
+ $(MACH_NOT_YET_KMODS:%=$(OBJS_DIR)/%)
+LINTS = $(SPECIAL_OBJS:%.o=$(LINTS_DIR)/%.ln) \
+ $(CORE_OBJS:%.o=$(LINTS_DIR)/%.ln) \
+ $(MACH_NOT_YET_KMODS:%.o=$(LINTS_DIR)/%.ln) \
+ $(LINTS_DIR)/vers.ln \
+ $(LINTS_DIR)/modstubs.ln
+ROOTMODULE = $(ROOT_OPL_KERN_DIR)/$(UNIX)
+UNIX_BIN = $(OBJS_DIR)/$(UNIX)
+
+KRTLD_32 = misc/krtld
+KRTLD_64 = misc/$(SUBDIR64)/krtld
+KRTLD = $(KRTLD_$(CLASS))
+
+LIBS = $(GENLIB) $(PLATLIB) $(CPULIB)
+
+GENUNIX = genunix
+GENUNIX_DIR = ../../$(GENUNIX)
+GENOPTS = -L $(GENUNIX_DIR)/$(OBJS_DIR) -l $(GENUNIX)
+
+CPU_DIR = .
+CPUOPTS = -L $(CPU_DIR)/$(OBJS_DIR) -l $(CPUNAME)
+
+PLAT_DIR = ../../platmod
+PLATOPTS = -L $(PLAT_DIR)/$(OBJS_DIR) -l $(PLATMOD)
+
+LIBOPTS = $(GENOPTS) $(PLATOPTS) $(CPUOPTS)
+
+CTFEXTRAOBJS = $(OBJS_DIR)/vers.o
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.opl
+
+#
+# Define targets
+#
+ALL_TARGET = $(UNIX_BIN)
+LINT_TARGET = $(LINT_LIB)
+INSTALL_TARGET = $(UNIX_BIN) $(ROOTMODULE)
+
+#
+# Overrides
+#
+ALL_BUILDS = $(ALL_BUILDSONLY64)
+DEF_BUILDS = $(DEF_BUILDSONLY64)
+SYM_BUILDS = $(DEF_BUILDSONLY64)
+CLEANLINTFILES += $(LINT32_FILES)
+
+#
+# This is UNIX_DIR. Use a short path.
+#
+UNIX_DIR = .
+
+#
+# Overrides
+#
+CLEANFILES += $(UNIX_O) $(MODSTUBS_O) $(OBJS_DIR)/vers.c \
+ $(OBJS_DIR)/vers.o $(CPU_OBJ) $(CPULIB) \
+ $(DTRACESTUBS_O) $(DTRACESTUBS)
+
+CLOBBERFILES = $(CLEANFILES) $(UNIX_BIN)
+CLEANLINTFILES += $(LINT_LIB)
+
+#
+# lint pass one enforcement
+# Turn on doubleword alignment for 64 bit counter timer registers
+#
+CFLAGS += $(CCVERBOSE) -dalign
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+all: $(ALL_DEPS)
+
+def: $(DEF_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+symcheck: $(SYM_DEPS)
+
+$(UNIX_BIN): $(UNIX_O) $(MODSTUBS_O) $(MAPFILE) $(LIBS) $(DTRACESTUBS)
+ $(LD) -dy -b -o $@ -e _start -I $(KRTLD) -M $(MAPFILE) \
+ $(UNIX_O) $(MODSTUBS_O) $(LIBOPTS) $(DTRACESTUBS)
+ $(CTFMERGE_UNIQUIFY_AGAINST_GENUNIX)
+ $(POST_PROCESS)
+
+symcheck.targ: $(UNIX_O) $(MODSTUBS_O) $(LIBS) $(DTRACESTUBS)
+ $(LD) -dy -b -o $(SYM_MOD) -M $(MAPFILE) \
+ $(UNIX_O) $(MODSTUBS_O) $(LIBOPTS) $(DTRACESTUBS)
+
+$(UNIX_O): $(OBJECTS) $(OBJS_DIR)/vers.o
+ $(LD) -r -o $@ $(OBJECTS) $(OBJS_DIR)/vers.o
+
+#
+# Special rules for generating assym.h for inclusion in assembly files.
+#
+$(DSF_DIR)/$(OBJS_DIR)/assym.h: FRC
+ @cd $(DSF_DIR); pwd; $(MAKE) all.targ
+
+$(GENLIB): FRC
+ @(cd $(GENLIB_DIR); pwd; $(MAKE) all.targ)
+ @pwd
+
+$(PLATLIB):
+ ?@(cd $(PLAT_DIR); pwd; $(MAKE) all.targ)
+ ?@pwd
+
+#
+# CPU_OBJ now comprises of 2 object files which come from sun4 common
+# and from architecture dependent code. OBJS_DIR is prepended where
+# CPU_OBJ is defined to allow for building multiple CPU_OBJ's
+#
+$(CPULIB): $(CPU_OBJ)
+ $(LD) -o $@ -G -h 'cpu/$$CPU' $(CPU_OBJ)
+
+#
+# The global lint target builds the kernel lint library (llib-lunix.ln)
+# which is equivalent to a lint of /unix.o. Then all kernel modules for
+# this architecture are linted against the kernel lint library.
+#
+# Note: lint errors in the kernel lint library will be repeated for
+# each module. It is important that the kernel lint library
+# be clean to keep the textual output to a reasonable level.
+#
+
+$(LINT_LIB): $(LINT_LIB_DIR) $(LINTS)
+ @-$(ECHO) "\n$(UNIX): (library construction):"
+ @$(LINT) -o $(UNIX) $(LINTFLAGS) $(LINTS)
+ @$(MV) $(@F) $@
+
+lintlib: $(LINT_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/opl/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl_cfg/Makefile b/usr/src/uts/sun4u/opl_cfg/Makefile
new file mode 100644
index 0000000000..88416310b1
--- /dev/null
+++ b/usr/src/uts/sun4u/opl_cfg/Makefile
@@ -0,0 +1,103 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# uts/sun4u/opl_cfg/Makefile
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+# ident "%Z%%M% %I% %E% SMI"
+#
+# This makefile drives the production of the opl_cfg Jupiter Bus
+# Configurator
+#
+# sun4u implementation architecture dependent
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = opl_cfg
+OBJECTS = $(OPLCFG_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(OPLCFG_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_PSM_MISC_DIR)/$(MODULE)
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/Makefile.sun4u
+
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += $(CCVERBOSE)
+
+#
+# module dependencies
+#
+LDFLAGS += -dy -Nmisc/fcodem -Nmisc/busra
+
+#
+# OPL specific header files
+#
+INC_PATH += -I$(UTSBASE)/sun4u/opl/
+#
+# Turn on doubleword alignment for 64 bit registers
+#
+CFLAGS += -dalign
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/Makefile.targ
diff --git a/usr/src/uts/sun4u/opl_pcbe/Makefile b/usr/src/uts/sun4u/opl_pcbe/Makefile
new file mode 100644
index 0000000000..51a9c171e7
--- /dev/null
+++ b/usr/src/uts/sun4u/opl_pcbe/Makefile
@@ -0,0 +1,84 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#ident "%Z%%M% %I% %E% SMI"
+#
+# uts/sun4u/opl_pcbe/Makefile
+#
+# This Makefile builds the SPARC64 VI Performance Counter BackEnd (PCBE).
+#
+
+UTSBASE = ../..
+
+#
+# Define module and object file sets.
+#
+MODULE = pcbe.4.6
+OBJECTS = $(OPL_PCBE_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(OPL_PCBE_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(USR_PCBE_DIR)/$(MODULE)
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/Makefile.sun4u
+
+#
+# Define targets.
+#
+ALL_TARGET = $(BINARY)
+LINT_MODULE = opl_pcbe
+LINT_TARGET = $(LINT_MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+#
+# lint pass one enforcement
+#
+CFLAGS += $(CCVERBOSE)
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+all: $(ALL_DEPS)
+
+def: $(DEF_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/Makefile.targ
diff --git a/usr/src/uts/sun4u/os/cmp.c b/usr/src/uts/sun4u/os/cmp.c
index 792ccddc6f..22f7a0a772 100644
--- a/usr/src/uts/sun4u/os/cmp.c
+++ b/usr/src/uts/sun4u/os/cmp.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -88,9 +87,11 @@ cmp_delete_cpu(processorid_t cpuid)
* Register is set to either the lowest numbered on-line sibling core, if
* one exists, or else to this core.
*/
+/* ARGSUSED */
void
cmp_error_resteer(processorid_t cpuid)
{
+#ifndef _CMP_NO_ERROR_STEERING
cpuset_t mycores;
cpu_t *cpu;
chipid_t chipid;
@@ -120,6 +121,10 @@ cmp_error_resteer(processorid_t cpuid)
if (i == NCPU) {
xc_one(cpuid, (xcfunc_t *)set_cmp_error_steering, 0, 0);
}
+#else
+ /* Not all CMP's support (e.g. Olympus-C by Fujitsu) error steering */
+ return;
+#endif /* _CMP_NO_ERROR_STEERING */
}
chipid_t
@@ -147,12 +152,27 @@ chip_plat_get_chipid(cpu_t *cp)
}
/*
- * We don't have any multi-threaded cores on sun4u yet.
+ * Return the "core id" for the given cpu_t
+ * The "core id" space spans uniquely across all
+ * cpu chips.
*/
id_t
chip_plat_get_coreid(cpu_t *cp)
{
- return (cp->cpu_id);
+ int impl;
+
+ impl = cpunodes[cp->cpu_id].implementation;
+
+ if (IS_OLYMPUS_C(impl)) {
+ /*
+ * Currently only Fujitsu Olympus-c processor supports
+ * multi-stranded cores. Return the cpu_id with
+ * the strand bit masked out.
+ */
+ return ((id_t)((uint_t)cp->cpu_id & ~(0x1)));
+ } else {
+ return (cp->cpu_id);
+ }
}
void
@@ -167,7 +187,7 @@ chip_plat_define_chip(cpu_t *cp, chip_def_t *cd)
if (IS_JAGUAR(impl)) {
cd->chipd_type = CHIP_CMP_SPLIT_CACHE;
- } else if (IS_PANTHER(impl)) {
+ } else if (IS_PANTHER(impl) || IS_OLYMPUS_C(impl)) {
cd->chipd_type = CHIP_CMP_SHARED_CACHE;
} else {
cd->chipd_type = CHIP_DEFAULT;
diff --git a/usr/src/uts/sun4u/os/cpr_impl.c b/usr/src/uts/sun4u/os/cpr_impl.c
index d3b451381e..b66b18d098 100644
--- a/usr/src/uts/sun4u/os/cpr_impl.c
+++ b/usr/src/uts/sun4u/os/cpr_impl.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -1714,7 +1713,7 @@ i_cpr_prom_pages(int action)
static void
i_cpr_save_tlbinfo(void)
{
- cti_t cti;
+ cti_t cti = {0};
/*
* during resume - shortly after jumping into the cpr module,
@@ -1731,8 +1730,13 @@ i_cpr_save_tlbinfo(void)
cti.writer = NULL;
cti.filter = i_cpr_lnb;
cti.index = cpunodes[CPU->cpu_id].dtlb_size - 1;
- cti.skip = (1 << utsb_dtlb_ttenum);
- cti.skip |= (1 << utsb4m_dtlb_ttenum);
+
+ if (utsb_dtlb_ttenum != -1)
+ cti.skip = (1 << utsb_dtlb_ttenum);
+
+ if (utsb4m_dtlb_ttenum != -1)
+ cti.skip |= (1 << utsb4m_dtlb_ttenum);
+
i_cpr_scan_tlb(&cti);
i_cpr_make_tte(&cti, &i_cpr_data_page, datava);
i_cpr_make_tte(&cti, curthread, datava);
diff --git a/usr/src/uts/sun4u/os/fillsysinfo.c b/usr/src/uts/sun4u/os/fillsysinfo.c
index fcdfbf7fed..5ab2acb13d 100644
--- a/usr/src/uts/sun4u/os/fillsysinfo.c
+++ b/usr/src/uts/sun4u/os/fillsysinfo.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -357,7 +356,9 @@ int
get_portid(pnode_t node, pnode_t *cmpp)
{
int portid;
+ int i;
char dev_type[OBP_MAXPROPNAME];
+ pnode_t cpu_parent;
if (cmpp != NULL)
*cmpp = OBP_NONODE;
@@ -369,14 +370,24 @@ get_portid(pnode_t node, pnode_t *cmpp)
if (GETPROP(node, "device_type", (caddr_t)&dev_type) == -1)
return (-1);
- /* On a CMP core, the "portid" is in the parent */
+ /*
+ * For a virtual cpu node that is a CMP core, the "portid"
+ * is in the parent node.
+ * For a virtual cpu node that is a CMT strand, the "portid" is
+ * in its grandparent node.
+ * So we iterate up as far as 2 levels to get the "portid".
+ */
if (strcmp(dev_type, "cpu") == 0) {
- node = prom_parentnode(node);
- if (GETPROP(node, "portid", (caddr_t)&portid) != -1) {
- if (cmpp != NULL)
- *cmpp = node;
-
- return (portid);
+ cpu_parent = node = prom_parentnode(node);
+ for (i = 0; i < 2; i++) {
+ if (node == OBP_NONODE || node == OBP_BADNODE)
+ break;
+ if (GETPROP(node, "portid", (caddr_t)&portid) != -1) {
+ if (cmpp != NULL)
+ *cmpp = cpu_parent;
+ return (portid);
+ }
+ node = prom_parentnode(node);
}
}
@@ -458,6 +469,7 @@ fill_cpu(pnode_t node)
cpunode = &cpunodes[cpuid];
cpunode->portid = portid;
+ cpunode->nodeid = node;
if (cpu_get_cpu_unum(cpuid, unum, UNUM_NAMLEN, &size) != 0) {
cpunode->fru_fmri[0] = '\0';
@@ -466,10 +478,24 @@ fill_cpu(pnode_t node)
"%s%s", CPU_FRU_FMRI, unum);
}
+ if (cmpnode) {
+ /*
+ * For the CMT case, the parent "core" node contains
+ * properties needed below, use it instead of the
+ * cpu node.
+ */
+ if ((GETPROP(cmpnode, "device_type", namebuf) > 0) &&
+ (strcmp(namebuf, "core") == 0)) {
+ node = cmpnode;
+ }
+ }
+
(void) GETPROP(node, (cmpnode ? "compatible" : "name"), namebuf);
namebufp = namebuf;
if (strncmp(namebufp, "SUNW,", 5) == 0)
namebufp += 5;
+ else if (strncmp(namebufp, "FJSV,", 5) == 0)
+ namebufp += 5;
(void) strcpy(cpunode->name, namebufp);
(void) GETPROP(node, "implementation#",
@@ -507,8 +533,6 @@ fill_cpu(pnode_t node)
ASSERT(tlbsize < USHRT_MAX); /* since we cast it */
cpunode->dtlb_size = (ushort_t)tlbsize;
- cpunode->nodeid = node;
-
if (cmpnode != OBP_NONODE) {
/*
* If the CPU has a level 3 cache, then it will be the
@@ -580,32 +604,42 @@ int
get_portid_ddi(dev_info_t *dip, dev_info_t **cmpp)
{
int portid;
+ int i;
char dev_type[OBP_MAXPROPNAME];
int len = OBP_MAXPROPNAME;
+ dev_info_t *cpu_parent;
if (cmpp != NULL)
*cmpp = NULL;
if ((portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
- DDI_PROP_DONTPASS, "portid", -1)) != -1)
+ DDI_PROP_DONTPASS, "portid", -1)) != -1)
return (portid);
if ((portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
- DDI_PROP_DONTPASS, "upa-portid", -1)) != -1)
+ DDI_PROP_DONTPASS, "upa-portid", -1)) != -1)
return (portid);
if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
- DDI_PROP_DONTPASS, "device_type", (caddr_t)dev_type,
- &len) != 0)
+ DDI_PROP_DONTPASS, "device_type", (caddr_t)dev_type,
+ &len) != 0)
return (-1);
- /* On a CMP core, the "portid" is in the parent */
+ /*
+ * For a virtual cpu node that is a CMP core, the "portid"
+ * is in the parent node.
+ * For a virtual cpu node that is a CMT strand, the "portid" is
+ * in its grandparent node.
+ * So we iterate up as far as 2 levels to get the "portid".
+ */
if (strcmp(dev_type, "cpu") == 0) {
- dip = ddi_get_parent(dip);
- if ((portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
- DDI_PROP_DONTPASS, "portid", -1)) != -1) {
- if (cmpp != NULL)
- *cmpp = dip;
-
- return (portid);
+ cpu_parent = dip = ddi_get_parent(dip);
+ for (i = 0; dip != NULL && i < 2; i++) {
+ if ((portid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ DDI_PROP_DONTPASS, "portid", -1)) != -1) {
+ if (cmpp != NULL)
+ *cmpp = cpu_parent;
+ return (portid);
+ }
+ dip = ddi_get_parent(dip);
}
}
@@ -618,7 +652,6 @@ get_portid_ddi(dev_info_t *dip, dev_info_t **cmpp)
* since it is called very early in the boot cycle before (before
* setup_ddi()). Sigh...someday this will all be cleaned up.
*/
-
void
fill_cpu_ddi(dev_info_t *dip)
{
@@ -626,11 +659,12 @@ fill_cpu_ddi(dev_info_t *dip)
struct cpu_node *cpunode;
processorid_t cpuid;
int portid;
- int len;
+ int len = OBP_MAXPROPNAME;
int tlbsize;
dev_info_t *cmpnode;
char namebuf[OBP_MAXPROPNAME], unum[UNUM_NAMLEN];
char *namebufp;
+ char dev_type[OBP_MAXPROPNAME];
if ((portid = get_portid_ddi(dip, &cmpnode)) == -1) {
cmn_err(CE_PANIC, "portid not found");
@@ -649,6 +683,20 @@ fill_cpu_ddi(dev_info_t *dip)
cpunode = &cpunodes[cpuid];
cpunode->portid = portid;
+ cpunode->nodeid = ddi_get_nodeid(dip);
+
+ if (cmpnode != NULL) {
+ /*
+ * For the CMT case, the parent "core" node contains
+ * properties needed below, use it instead of the
+ * cpu node.
+ */
+ if ((ddi_prop_op(DDI_DEV_T_ANY, cmpnode, PROP_LEN_AND_VAL_BUF,
+ DDI_PROP_DONTPASS, "device_type",
+ (caddr_t)dev_type, &len) == DDI_PROP_SUCCESS) &&
+ (strcmp(dev_type, "core") == 0))
+ dip = cmpnode;
+ }
if (cpu_get_cpu_unum(cpuid, unum, UNUM_NAMLEN, &len) != 0) {
cpunode->fru_fmri[0] = '\0';
@@ -665,6 +713,8 @@ fill_cpu_ddi(dev_info_t *dip)
namebufp = namebuf;
if (strncmp(namebufp, "SUNW,", 5) == 0)
namebufp += 5;
+ else if (strncmp(namebufp, "FJSV,", 5) == 0)
+ namebufp += 5;
(void) strcpy(cpunode->name, namebufp);
cpunode->implementation = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
@@ -678,7 +728,7 @@ fill_cpu_ddi(dev_info_t *dip)
cpunode->version = REMAP_CHEETAH_MASK(cpunode->version);
}
- cpunode->clock_freq = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
+ cpunode->clock_freq = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
DDI_PROP_DONTPASS, "clock-frequency", 0);
ASSERT(cpunode->clock_freq != 0);
@@ -700,8 +750,6 @@ fill_cpu_ddi(dev_info_t *dip)
ASSERT(tlbsize < USHRT_MAX); /* since we cast it */
cpunode->dtlb_size = (ushort_t)tlbsize;
- cpunode->nodeid = ddi_get_nodeid(dip);
-
if (cmpnode != NULL) {
/*
* If the CPU has a level 3 cache, then that is it's
diff --git a/usr/src/uts/sun4u/os/mach_startup.c b/usr/src/uts/sun4u/os/mach_startup.c
index 1b09512a33..816a93c19c 100644
--- a/usr/src/uts/sun4u/os/mach_startup.c
+++ b/usr/src/uts/sun4u/os/mach_startup.c
@@ -29,6 +29,7 @@
#include <sys/archsystm.h>
#include <sys/vm.h>
#include <sys/cpu.h>
+#include <sys/cpupart.h>
#include <sys/atomic.h>
#include <sys/reboot.h>
#include <sys/kdi.h>
@@ -71,6 +72,22 @@ struct fpras_chkfngrp *fpras_chkfngrps_base;
int fpras_frequency = -1;
int64_t fpras_interval = -1;
+/*
+ * Halt idling cpus optimization
+ *
+ * This optimation is only enabled in platforms that have
+ * the CPU halt support. The cpu_halt_cpu() support is provided
+ * in the cpu module and it is referenced here with a pragma weak.
+ * The presence of this routine automatically enable the halt idling
+ * cpus functionality if the global switch enable_halt_idle_cpus
+ * is set (default is set).
+ *
+ */
+#pragma weak cpu_halt_cpu
+extern void cpu_halt_cpu();
+
+int enable_halt_idle_cpus = 1; /* global switch */
+
void
setup_trap_table(void)
{
@@ -174,18 +191,206 @@ mach_memscrub(void)
* Startup memory scrubber, if not running fpu emulation code.
*/
+#ifndef _HW_MEMSCRUB_SUPPORT
if (fpu_exists) {
if (memscrub_init()) {
cmn_err(CE_WARN,
"Memory scrubber failed to initialize");
}
}
+#endif /* _HW_MEMSCRUB_SUPPORT */
+}
+
+/*
+ * Halt the calling CPU until awoken via an interrupt
+ * This routine should only be invoked if cpu_halt_cpu()
+ * exists and is supported, see mach_cpu_halt_idle()
+ */
+static void
+cpu_halt(void)
+{
+ cpu_t *cpup = CPU;
+ processorid_t cpun = cpup->cpu_id;
+ cpupart_t *cp = cpup->cpu_part;
+ int hset_update = 1;
+ uint_t pstate;
+ extern uint_t getpstate(void);
+ extern void setpstate(uint_t);
+
+ /*
+ * If this CPU is online, and there's multiple CPUs
+ * in the system, then we should notate our halting
+ * by adding ourselves to the partition's halted CPU
+ * bitmap. This allows other CPUs to find/awaken us when
+ * work becomes available.
+ */
+ if (CPU->cpu_flags & CPU_OFFLINE || ncpus == 1)
+ hset_update = 0;
+
+ /*
+ * Add ourselves to the partition's halted CPUs bitmask
+ * and set our HALTED flag, if necessary.
+ *
+ * When a thread becomes runnable, it is placed on the queue
+ * and then the halted cpuset is checked to determine who
+ * (if anyone) should be awoken. We therefore need to first
+ * add ourselves to the halted cpuset, and then check if there
+ * is any work available.
+ */
+ if (hset_update) {
+ cpup->cpu_disp_flags |= CPU_DISP_HALTED;
+ membar_producer();
+ CPUSET_ATOMIC_ADD(cp->cp_haltset, cpun);
+ }
+
+ /*
+ * Check to make sure there's really nothing to do.
+ * Work destined for this CPU may become available after
+ * this check. We'll be notified through the clearing of our
+ * bit in the halted CPU bitmask, and a poke.
+ */
+ if (disp_anywork()) {
+ if (hset_update) {
+ cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
+ CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun);
+ }
+ return;
+ }
+
+ /*
+ * We're on our way to being halted.
+ *
+ * Disable interrupts now, so that we'll awaken immediately
+ * after halting if someone tries to poke us between now and
+ * the time we actually halt.
+ *
+ * We check for the presence of our bit after disabling interrupts.
+ * If it's cleared, we'll return. If the bit is cleared after
+ * we check then the poke will pop us out of the halted state.
+ *
+ * The ordering of the poke and the clearing of the bit by cpu_wakeup
+ * is important.
+ * cpu_wakeup() must clear, then poke.
+ * cpu_halt() must disable interrupts, then check for the bit.
+ */
+ pstate = getpstate();
+ setpstate(pstate & ~PSTATE_IE);
+
+ if (hset_update && !CPU_IN_SET(cp->cp_haltset, cpun)) {
+ cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
+ setpstate(pstate);
+ return;
+ }
+
+ /*
+ * The check for anything locally runnable is here for performance
+ * and isn't needed for correctness. disp_nrunnable ought to be
+ * in our cache still, so it's inexpensive to check, and if there
+ * is anything runnable we won't have to wait for the poke.
+ */
+ if (cpup->cpu_disp->disp_nrunnable != 0) {
+ if (hset_update) {
+ cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
+ CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun);
+ }
+ setpstate(pstate);
+ return;
+ }
+
+ /*
+ * Halt the strand.
+ */
+ if (&cpu_halt_cpu)
+ cpu_halt_cpu();
+
+ /*
+ * We're no longer halted
+ */
+ setpstate(pstate);
+ if (hset_update) {
+ cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
+ CPUSET_ATOMIC_DEL(cp->cp_haltset, cpun);
+ }
+}
+
+/*
+ * If "cpu" is halted, then wake it up clearing its halted bit in advance.
+ * Otherwise, see if other CPUs in the cpu partition are halted and need to
+ * be woken up so that they can steal the thread we placed on this CPU.
+ * This function is only used on MP systems.
+ * This function should only be invoked if cpu_halt_cpu()
+ * exists and is supported, see mach_cpu_halt_idle()
+ */
+static void
+cpu_wakeup(cpu_t *cpu, int bound)
+{
+ uint_t cpu_found;
+ int result;
+ cpupart_t *cp;
+
+ cp = cpu->cpu_part;
+ if (CPU_IN_SET(cp->cp_haltset, cpu->cpu_id)) {
+ /*
+ * Clear the halted bit for that CPU since it will be
+ * poked in a moment.
+ */
+ CPUSET_ATOMIC_DEL(cp->cp_haltset, cpu->cpu_id);
+ /*
+ * We may find the current CPU present in the halted cpuset
+ * if we're in the context of an interrupt that occurred
+ * before we had a chance to clear our bit in cpu_halt().
+ * Poking ourself is obviously unnecessary, since if
+ * we're here, we're not halted.
+ */
+ if (cpu != CPU)
+ poke_cpu(cpu->cpu_id);
+ return;
+ } else {
+ /*
+ * This cpu isn't halted, but it's idle or undergoing a
+ * context switch. No need to awaken anyone else.
+ */
+ if (cpu->cpu_thread == cpu->cpu_idle_thread ||
+ cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
+ return;
+ }
+
+ /*
+ * No need to wake up other CPUs if the thread we just enqueued
+ * is bound.
+ */
+ if (bound)
+ return;
+
+ /*
+ * See if there's any other halted CPUs. If there are, then
+ * select one, and awaken it.
+ * It's possible that after we find a CPU, somebody else
+ * will awaken it before we get the chance.
+ * In that case, look again.
+ */
+ do {
+ CPUSET_FIND(cp->cp_haltset, cpu_found);
+ if (cpu_found == CPUSET_NOTINSET)
+ return;
+
+ ASSERT(cpu_found >= 0 && cpu_found < NCPU);
+ CPUSET_ATOMIC_XDEL(cp->cp_haltset, cpu_found, result);
+ } while (result < 0);
+
+ if (cpu_found != CPU->cpu_id)
+ poke_cpu(cpu_found);
}
void
mach_cpu_halt_idle()
{
- /* no suport for halting idle CPU */
+ if (enable_halt_idle_cpus) {
+ if (&cpu_halt_cpu) {
+ idle_cpu = cpu_halt;
+ disp_enq_thread = cpu_wakeup;
+ }
+ }
}
/*ARGSUSED*/
diff --git a/usr/src/uts/sun4u/pcbe/opl_pcbe.c b/usr/src/uts/sun4u/pcbe/opl_pcbe.c
new file mode 100644
index 0000000000..084043e38e
--- /dev/null
+++ b/usr/src/uts/sun4u/pcbe/opl_pcbe.c
@@ -0,0 +1,619 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * SPARC64 VI Performance Counter Backend
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/cpuvar.h>
+#include <sys/systm.h>
+#include <sys/cmn_err.h>
+#include <sys/cpc_impl.h>
+#include <sys/cpc_pcbe.h>
+#include <sys/modctl.h>
+#include <sys/machsystm.h>
+#include <sys/sdt.h>
+#include <sys/cpu_impl.h>
+
+static int opl_pcbe_init(void);
+static uint_t opl_pcbe_ncounters(void);
+static const char *opl_pcbe_impl_name(void);
+static const char *opl_pcbe_cpuref(void);
+static char *opl_pcbe_list_events(uint_t picnum);
+static char *opl_pcbe_list_attrs(void);
+static uint64_t opl_pcbe_event_coverage(char *event);
+static uint64_t opl_pcbe_overflow_bitmap(void);
+static int opl_pcbe_configure(uint_t picnum, char *event, uint64_t preset,
+ uint32_t flags, uint_t nattrs, kcpc_attr_t *attrs, void **data,
+ void *token);
+static void opl_pcbe_program(void *token);
+static void opl_pcbe_allstop(void);
+static void opl_pcbe_sample(void *token);
+static void opl_pcbe_free(void *config);
+
+extern void ultra_setpcr(uint64_t);
+extern uint64_t ultra_getpcr(void);
+extern void ultra_setpic(uint64_t);
+extern uint64_t ultra_getpic(void);
+extern uint64_t ultra_gettick(void);
+
+pcbe_ops_t opl_pcbe_ops = {
+ PCBE_VER_1,
+ CPC_CAP_OVERFLOW_INTERRUPT,
+ opl_pcbe_ncounters,
+ opl_pcbe_impl_name,
+ opl_pcbe_cpuref,
+ opl_pcbe_list_events,
+ opl_pcbe_list_attrs,
+ opl_pcbe_event_coverage,
+ opl_pcbe_overflow_bitmap,
+ opl_pcbe_configure,
+ opl_pcbe_program,
+ opl_pcbe_allstop,
+ opl_pcbe_sample,
+ opl_pcbe_free
+};
+
+typedef struct _opl_pcbe_config {
+ uint8_t opl_picno; /* From 0 to 7 */
+ uint32_t opl_bits; /* %pcr event code unshifted */
+ uint32_t opl_flags; /* user/system/priv */
+ uint32_t opl_pic; /* unshifted raw %pic value */
+} opl_pcbe_config_t;
+
+struct nametable {
+ const uint8_t bits;
+ const char *name;
+};
+
+#define PIC_MASK (((uint64_t)1 << 32) - 1)
+
+#define SPARC64_VI_PCR_PRIVPIC UINT64_C(1)
+
+#define CPC_SPARC64_VI_PCR_USR_SHIFT 2
+#define CPC_SPARC64_VI_PCR_SYS_SHIFT 1
+
+#define CPC_SPARC64_VI_PCR_PICL_SHIFT 4
+#define CPC_SPARC64_VI_PCR_PICU_SHIFT 11
+#define CPC_SPARC64_VI_PCR_PIC_MASK UINT64_C(0x3f)
+
+#define CPC_SPARC64_VI_NPIC 8
+
+#define CPC_SPARC64_VI_PCR_ULRO_SHIFT 3
+#define CPC_SPARC64_VI_PCR_SC_SHIFT 18
+#define CPC_SPARC64_VI_PCR_SC_MASK UINT64_C(0x7)
+#define CPC_SPARC64_VI_PCR_NC_SHIFT 22
+#define CPC_SPARC64_VI_PCR_NC_MASK UINT64_C(0x7)
+#define CPC_SPARC64_VI_PCR_OVRO_SHIFT 26
+#define CPC_SPARC64_VI_PCR_OVF_SHIFT 32
+#define CPC_SPARC64_VI_PCR_OVF_MASK UINT64_C(0xffff)
+
+#define SPARC64_VI_PCR_SYS (UINT64_C(1) << CPC_SPARC64_VI_PCR_SYS_SHIFT)
+#define SPARC64_VI_PCR_USR (UINT64_C(1) << CPC_SPARC64_VI_PCR_USR_SHIFT)
+#define SPARC64_VI_PCR_ULRO (UINT64_C(1) << CPC_SPARC64_VI_PCR_ULRO_SHIFT)
+#define SPARC64_VI_PCR_OVRO (UINT64_C(1) << CPC_SPARC64_VI_PCR_OVRO_SHIFT)
+#define SPARC64_VI_PCR_OVF (CPC_SPARC64_VI_PCR_OVF_MASK << \
+ CPC_SPARC64_VI_PCR_OVF_SHIFT)
+
+#define SPARC64_VI_NUM_PIC_PAIRS 4
+
+#define SPARC64_VI_PCR_SEL_PIC(pcr, picno) { \
+ pcr &= ~((CPC_SPARC64_VI_PCR_SC_MASK \
+ << CPC_SPARC64_VI_PCR_SC_SHIFT)); \
+ \
+ pcr |= (((picno) & CPC_SPARC64_VI_PCR_SC_MASK) \
+ << CPC_SPARC64_VI_PCR_SC_SHIFT); \
+}
+
+#define SPARC64_VI_PCR_SEL_EVENT(pcr, sl, su) { \
+ pcr &= ~((CPC_SPARC64_VI_PCR_PIC_MASK \
+ << CPC_SPARC64_VI_PCR_PICL_SHIFT) \
+ | (CPC_SPARC64_VI_PCR_PIC_MASK \
+ << CPC_SPARC64_VI_PCR_PICU_SHIFT)); \
+ \
+ pcr |= (((sl) & CPC_SPARC64_VI_PCR_PIC_MASK) \
+ << CPC_SPARC64_VI_PCR_PICL_SHIFT); \
+ pcr |= (((su) & CPC_SPARC64_VI_PCR_PIC_MASK) \
+ << CPC_SPARC64_VI_PCR_PICU_SHIFT); \
+}
+
+#define NT_END 0xFF
+
+static const uint64_t allstopped = SPARC64_VI_PCR_PRIVPIC |
+ SPARC64_VI_PCR_ULRO | SPARC64_VI_PCR_OVRO;
+
+#define SPARC64_VI_EVENTS_comm \
+ {0x0, "cycle_counts"}, \
+ {0x1, "instruction_counts"}, \
+ {0x8, "load_store_instructions"}, \
+ {0x9, "branch_instructions"}, \
+ {0xa, "floating_instructions"}, \
+ {0xb, "impdep2_instructions"}, \
+ {0xc, "prefetch_instructions"}
+
+static const struct nametable SPARC64_VI_names_l0[] = {
+ SPARC64_VI_EVENTS_comm,
+ {0x16, "trap_int_vector"},
+ {0x20, "write_op_uTLB"},
+ {0x30, "sx_miss_wait_pf"},
+ {0x31, "jbus_cpi_count"},
+ {NT_END, ""}
+};
+
+static const struct nametable SPARC64_VI_names_u0[] = {
+ SPARC64_VI_EVENTS_comm,
+ {0x16, "trap_all"},
+ {0x20, "write_if_uTLB"},
+ {0x30, "sx_miss_wait_dm"},
+ {0x31, "jbus_bi_count"},
+ {NT_END, ""}
+};
+
+static const struct nametable SPARC64_VI_names_l1[] = {
+ SPARC64_VI_EVENTS_comm,
+ {0x16, "trap_spill"},
+ {0x20, "write_op_uTLB"},
+ {0x30, "sx_miss_count_pf"},
+ {0x31, "jbus_cpd_count"},
+ {NT_END, ""}
+};
+
+static const struct nametable SPARC64_VI_names_u1[] = {
+ SPARC64_VI_EVENTS_comm,
+ {0x16, "trap_int_level"},
+ {0x20, "write_if_uTLB"},
+ {0x30, "sx_miss_count_dm"},
+ {0x31, "jbus_cpb_count"},
+ {NT_END, ""}
+};
+
+static const struct nametable SPARC64_VI_names_l2[] = {
+ SPARC64_VI_EVENTS_comm,
+ {0x16, "trap_trap_inst"},
+ {0x20, "op_r_iu_req_mi_go"},
+ {0x30, "sx_read_count_pf"},
+ {NT_END, ""}
+};
+
+static const struct nametable SPARC64_VI_names_u2[] = {
+ SPARC64_VI_EVENTS_comm,
+ {0x16, "trap_fill"},
+ {0x20, "if_r_iu_req_mi_go"},
+ {0x30, "sx_read_count_dm"},
+ {NT_END, ""}
+};
+
+static const struct nametable SPARC64_VI_names_l3[] = {
+ SPARC64_VI_EVENTS_comm,
+ {0x16, "trap_DMMU_miss"},
+ {0x20, "op_wait_all"},
+ {0x30, "dvp_count_pf"},
+ {NT_END, ""}
+};
+
+static const struct nametable SPARC64_VI_names_u3[] = {
+ SPARC64_VI_EVENTS_comm,
+ {0x16, "trap_IMMU_miss"},
+ {0x20, "if_wait_all"},
+ {0x30, "dvp_count_dm"},
+ {NT_END, ""}
+};
+
+#undef SPARC64_VI_EVENTS_comm
+
+static const struct nametable *SPARC64_VI_names[CPC_SPARC64_VI_NPIC] = {
+ SPARC64_VI_names_l0,
+ SPARC64_VI_names_u0,
+ SPARC64_VI_names_l1,
+ SPARC64_VI_names_u1,
+ SPARC64_VI_names_l2,
+ SPARC64_VI_names_u2,
+ SPARC64_VI_names_l3,
+ SPARC64_VI_names_u3
+};
+
+opl_pcbe_config_t nullpic[CPC_SPARC64_VI_NPIC] = {
+ {0, 0x3f, 0, 0},
+ {1, 0x3f, 0, 0},
+ {2, 0x3f, 0, 0},
+ {3, 0x3f, 0, 0},
+ {4, 0x3f, 0, 0},
+ {5, 0x3f, 0, 0},
+ {6, 0x3f, 0, 0},
+ {7, 0x3f, 0, 0}
+};
+
+static const struct nametable **events;
+static const char *opl_impl_name;
+static const char *opl_cpuref;
+static char *pic_events[CPC_SPARC64_VI_NPIC];
+
+static const char *sp_6_ref = "See the \"SPARC64 VI User's Manual\" "
+ "for descriptions of these events.";
+
+static int
+opl_pcbe_init(void)
+{
+ const struct nametable *n;
+ int i;
+ size_t size;
+
+ /*
+ * Discover type of CPU
+ *
+ * Point nametable to that CPU's table
+ */
+ switch (ULTRA_VER_IMPL(ultra_getver())) {
+ case OLYMPUS_C_IMPL:
+ events = SPARC64_VI_names;
+ opl_impl_name = "SPARC64 VI";
+ opl_cpuref = sp_6_ref;
+ break;
+ default:
+ return (-1);
+ }
+
+ /*
+ * Initialize the list of events for each PIC.
+ * Do two passes: one to compute the size necessary and another
+ * to copy the strings. Need room for event, comma, and NULL terminator.
+ */
+ for (i = 0; i < CPC_SPARC64_VI_NPIC; i++) {
+ size = 0;
+ for (n = events[i]; n->bits != NT_END; n++)
+ size += strlen(n->name) + 1;
+ pic_events[i] = kmem_alloc(size + 1, KM_SLEEP);
+ *pic_events[i] = '\0';
+ for (n = events[i]; n->bits != NT_END; n++) {
+ (void) strcat(pic_events[i], n->name);
+ (void) strcat(pic_events[i], ",");
+ }
+ /*
+ * Remove trailing comma.
+ */
+ pic_events[i][size - 1] = '\0';
+ }
+
+ return (0);
+}
+
+static uint_t
+opl_pcbe_ncounters(void)
+{
+ return (CPC_SPARC64_VI_NPIC);
+}
+
+static const char *
+opl_pcbe_impl_name(void)
+{
+ return (opl_impl_name);
+}
+
+static const char *
+opl_pcbe_cpuref(void)
+{
+ return (opl_cpuref);
+}
+
+static char *
+opl_pcbe_list_events(uint_t picnum)
+{
+ ASSERT(picnum >= 0 && picnum < cpc_ncounters);
+
+ return (pic_events[picnum]);
+}
+
+static char *
+opl_pcbe_list_attrs(void)
+{
+ return ("");
+}
+
+static const struct nametable *
+find_event(int regno, char *name)
+{
+ const struct nametable *n;
+
+ n = events[regno];
+
+ for (; n->bits != NT_END; n++)
+ if (strcmp(name, n->name) == 0)
+ return (n);
+
+ return (NULL);
+}
+
+static uint64_t
+opl_pcbe_event_coverage(char *event)
+{
+ uint64_t bitmap = 0;
+
+ int i;
+ for (i = 0; i < CPC_SPARC64_VI_NPIC; i++) {
+ if (find_event(i, event) != NULL)
+ bitmap |= (1 << i);
+ }
+
+ return (bitmap);
+}
+
+/*
+ * XXX: Need to check if overflow bits can be cleared here.
+ */
+static uint64_t
+opl_pcbe_overflow_bitmap(void)
+{
+ uint64_t pcr;
+
+ pcr = ultra_getpcr();
+ return ((pcr & SPARC64_VI_PCR_OVF) >> CPC_SPARC64_VI_PCR_OVF_SHIFT);
+}
+
+/*ARGSUSED*/
+static int
+opl_pcbe_configure(uint_t picnum, char *event, uint64_t preset, uint32_t flags,
+ uint_t nattrs, kcpc_attr_t *attrs, void **data, void *token)
+{
+ opl_pcbe_config_t *conf;
+ const struct nametable *n;
+ opl_pcbe_config_t *other_config;
+
+ /*
+ * If we've been handed an existing configuration, we need only preset
+ * the counter value.
+ */
+ if (*data != NULL) {
+ conf = *data;
+ conf->opl_pic = (uint32_t)preset;
+ return (0);
+ }
+
+ if (picnum < 0 || picnum >= CPC_SPARC64_VI_NPIC)
+ return (CPC_INVALID_PICNUM);
+
+ if (nattrs != 0)
+ return (CPC_INVALID_ATTRIBUTE);
+
+ /*
+ * Find other requests that will be programmed with this one, and ensure
+ * the flags don't conflict.
+ */
+ if (((other_config = kcpc_next_config(token, NULL, NULL)) != NULL) &&
+ (other_config->opl_flags != flags))
+ return (CPC_CONFLICTING_REQS);
+
+ if ((n = find_event(picnum, event)) == NULL)
+ return (CPC_INVALID_EVENT);
+
+ conf = kmem_alloc(sizeof (opl_pcbe_config_t), KM_SLEEP);
+
+ conf->opl_picno = picnum;
+ conf->opl_bits = (uint32_t)n->bits;
+ conf->opl_flags = flags;
+ conf->opl_pic = (uint32_t)preset;
+
+ *data = conf;
+ return (0);
+}
+
+static void
+opl_pcbe_program(void *token)
+{
+ opl_pcbe_config_t *pic[CPC_SPARC64_VI_NPIC];
+ opl_pcbe_config_t *firstconfig;
+ opl_pcbe_config_t *tmp;
+ uint64_t pcr;
+ uint64_t curpic;
+ uint8_t bitmap = 0; /* for used pic config */
+ int i;
+ opl_pcbe_config_t dummypic[CPC_SPARC64_VI_NPIC];
+
+ /* Get next pic config */
+ firstconfig = tmp = kcpc_next_config(token, NULL, NULL);
+
+ while (tmp != NULL) {
+ ASSERT(tmp->opl_picno < CPC_SPARC64_VI_NPIC);
+ ASSERT(firstconfig->opl_flags == tmp->opl_flags);
+ pic[tmp->opl_picno] = tmp;
+ bitmap |= (uint8_t)(1 << tmp->opl_picno);
+ tmp = kcpc_next_config(token, tmp, NULL);
+ }
+ if (bitmap == 0)
+ panic("opl_pcbe: token %p has no configs", token);
+
+ /* Fill in unused pic config */
+ for (i = 0; i < CPC_SPARC64_VI_NPIC; i++) {
+ if (bitmap & (1 << i))
+ continue;
+
+ dummypic[i] = nullpic[i];
+ dummypic[i].opl_flags = firstconfig->opl_flags;
+ pic[i] = &nullpic[i];
+ }
+
+ /*
+ * For each counter pair, initialize event settings and
+ * counter values.
+ */
+ ultra_setpcr(allstopped);
+ pcr = allstopped;
+ pcr &= ~SPARC64_VI_PCR_ULRO;
+ for (i = 0; i < SPARC64_VI_NUM_PIC_PAIRS; i++) {
+ SPARC64_VI_PCR_SEL_PIC(pcr, i);
+ SPARC64_VI_PCR_SEL_EVENT(pcr, pic[i*2]->opl_bits,
+ pic[i*2 + 1]->opl_bits);
+
+ ultra_setpcr(pcr);
+ curpic = (uint64_t)(pic[i*2]->opl_pic |
+ ((uint64_t)pic[i*2 + 1]->opl_pic << 32));
+ ultra_setpic(curpic);
+ }
+
+ /*
+ * For each counter pair, enable the trace flags to start
+ * counting. Re-read the counters to sample the counter value now
+ * and use that as the baseline for future samples.
+ */
+
+ /* Set pcr */
+ pcr = ultra_getpcr();
+ pcr |= (SPARC64_VI_PCR_ULRO | SPARC64_VI_PCR_OVRO);
+ if (pic[0]->opl_flags & CPC_COUNT_USER)
+ pcr |= SPARC64_VI_PCR_USR;
+ if (pic[0]->opl_flags & CPC_COUNT_SYSTEM)
+ pcr |= SPARC64_VI_PCR_SYS;
+
+ /* Set counter values */
+ for (i = 0; i < SPARC64_VI_NUM_PIC_PAIRS; i++) {
+ SPARC64_VI_PCR_SEL_PIC(pcr, i);
+ SPARC64_VI_PCR_SEL_EVENT(pcr, pic[i*2]->opl_bits,
+ pic[i*2 + 1]->opl_bits);
+
+ ultra_setpcr(pcr);
+ curpic = ultra_getpic();
+ pic[i*2]->opl_pic = (uint32_t)(curpic & PIC_MASK);
+ pic[i*2 + 1]->opl_pic = (uint32_t)(curpic >> 32);
+ }
+
+}
+
+static void
+opl_pcbe_allstop(void)
+{
+ ultra_setpcr(allstopped);
+}
+
+
+static void
+opl_pcbe_sample(void *token)
+{
+ uint64_t curpic;
+ uint64_t pcr;
+ int64_t diff;
+ uint64_t *pic_data[CPC_SPARC64_VI_NPIC];
+ uint64_t *dtmp;
+ opl_pcbe_config_t *pic[CPC_SPARC64_VI_NPIC];
+ opl_pcbe_config_t *ctmp;
+ opl_pcbe_config_t *firstconfig;
+ uint8_t bitmap = 0; /* for used pic config */
+ int i;
+ opl_pcbe_config_t dummypic[CPC_SPARC64_VI_NPIC];
+ uint64_t dummypic_data[CPC_SPARC64_VI_NPIC];
+
+ /* Get next pic config */
+ firstconfig = ctmp = kcpc_next_config(token, NULL, &dtmp);
+
+ while (ctmp != NULL) {
+ ASSERT(ctmp->opl_picno < CPC_SPARC64_VI_NPIC);
+ ASSERT(firstconfig->opl_flags == ctmp->opl_flags);
+ pic[ctmp->opl_picno] = ctmp;
+ pic_data[ctmp->opl_picno] = dtmp;
+ bitmap |= (uint8_t)(1 << ctmp->opl_picno);
+ ctmp = kcpc_next_config(token, ctmp, &dtmp);
+ }
+ if (bitmap == 0)
+ panic("opl_pcbe: token %p has no configs", token);
+
+ /* Fill in unuse pic config */
+ for (i = 0; i < CPC_SPARC64_VI_NPIC; i++) {
+ if (bitmap & (1 << i))
+ continue;
+
+ dummypic[i] = nullpic[i];
+ dummypic[i].opl_flags = firstconfig->opl_flags;
+ pic[i] = &dummypic[i];
+
+ dummypic_data[i] = 0;
+ pic_data[i] = &dummypic_data[i];
+ }
+
+ pcr = ultra_getpcr();
+ pcr |= (SPARC64_VI_PCR_ULRO | SPARC64_VI_PCR_OVRO);
+
+ for (i = 0; i < SPARC64_VI_NUM_PIC_PAIRS; i++) {
+ SPARC64_VI_PCR_SEL_PIC(pcr, i);
+ SPARC64_VI_PCR_SEL_EVENT(pcr, pic[i*2]->opl_bits,
+ pic[i*2 + 1]->opl_bits);
+
+ ultra_setpcr(pcr);
+ curpic = ultra_getpic();
+
+ diff = (int64_t)((uint32_t)(curpic & PIC_MASK) -
+ pic[i*2]->opl_pic);
+ if (diff < 0)
+ diff += (1ll << 32);
+ *pic_data[i*2] += diff;
+
+ diff = (int64_t)((uint32_t)(curpic >> 32) -
+ pic[i*2 + 1]->opl_pic);
+ if (diff < 0)
+ diff += (1ll << 32);
+ *pic_data[i*2 + 1] += diff;
+
+ pic[i*2]->opl_pic = (uint32_t)(curpic & PIC_MASK);
+ pic[i*2 + 1]->opl_pic = (uint32_t)(curpic >> 32);
+ }
+
+}
+
+static void
+opl_pcbe_free(void *config)
+{
+ kmem_free(config, sizeof (opl_pcbe_config_t));
+}
+
+
+static struct modlpcbe modlpcbe = {
+ &mod_pcbeops,
+ "SPARC64 VI Performance Counters v%I%",
+ &opl_pcbe_ops
+};
+
+static struct modlinkage modl = {
+ MODREV_1,
+ &modlpcbe,
+};
+
+int
+_init(void)
+{
+ if (opl_pcbe_init() != 0)
+ return (ENOTSUP);
+ return (mod_install(&modl));
+}
+
+int
+_fini(void)
+{
+ return (mod_remove(&modl));
+}
+
+int
+_info(struct modinfo *mi)
+{
+ return (mod_info(&modl, mi));
+}
diff --git a/usr/src/uts/sun4u/serengeti/io/sbdp_mem.c b/usr/src/uts/sun4u/serengeti/io/sbdp_mem.c
index 633ba07a6a..a11ad815a5 100644
--- a/usr/src/uts/sun4u/serengeti/io/sbdp_mem.c
+++ b/usr/src/uts/sun4u/serengeti/io/sbdp_mem.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -1495,7 +1495,7 @@ mc_check_sibling_cpu(pnode_t nodeid)
* be attempted if any of the cores are marked
* as being in reset.
*/
- for (i = 0; i < SBD_MAX_CORES_PER_CMP; i++) {
+ for (i = 0; i < SBDP_MAX_CORES_PER_CMP; i++) {
unit = SG_PORTID_TO_CPU_UNIT(portid, i);
if (sbdp_is_cpu_present(wnode, bd, unit) &&
sbdp_is_cpu_in_reset(wnode, bd, unit)) {
diff --git a/usr/src/uts/sun4u/serengeti/sys/sbdp_mem.h b/usr/src/uts/sun4u/serengeti/sys/sbdp_mem.h
index 31fb658c84..c84e84d242 100644
--- a/usr/src/uts/sun4u/serengeti/sys/sbdp_mem.h
+++ b/usr/src/uts/sun4u/serengeti/sys/sbdp_mem.h
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -38,6 +38,7 @@ extern "C" {
#define SBDP_MAX_MCS_PER_NODE 4
#define SBDP_MAX_MEM_NODES_PER_BOARD 4
+#define SBDP_MAX_CORES_PER_CMP 2
typedef uint64_t mc_dc_regs_t[SBDP_MAX_MCS_PER_NODE];
diff --git a/usr/src/uts/sun4u/starcat/io/drmach.c b/usr/src/uts/sun4u/starcat/io/drmach.c
index c967a81fe2..417991781b 100644
--- a/usr/src/uts/sun4u/starcat/io/drmach.c
+++ b/usr/src/uts/sun4u/starcat/io/drmach.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -2841,65 +2840,6 @@ drmach_fini(void)
mutex_destroy(&drmach_i_lock);
}
-static struct memlist *
-memlist_add_span(struct memlist *mlist, uint64_t base, uint64_t len)
-{
- struct memlist *ml, *tl, *nl;
-
- if (len == 0ull)
- return (NULL);
-
- if (mlist == NULL) {
- mlist = GETSTRUCT(struct memlist, 1);
- mlist->address = base;
- mlist->size = len;
- mlist->next = mlist->prev = NULL;
-
- return (mlist);
- }
-
- for (tl = ml = mlist; ml; tl = ml, ml = ml->next) {
- if (base < ml->address) {
- if ((base + len) < ml->address) {
- nl = GETSTRUCT(struct memlist, 1);
- nl->address = base;
- nl->size = len;
- nl->next = ml;
- if ((nl->prev = ml->prev) != NULL)
- nl->prev->next = nl;
- ml->prev = nl;
- if (mlist == ml)
- mlist = nl;
- } else {
- ml->size = MAX((base + len),
- (ml->address + ml->size)) -
- base;
- ml->address = base;
- }
- break;
-
- } else if (base <= (ml->address + ml->size)) {
- ml->size = MAX((base + len),
- (ml->address + ml->size)) -
- MIN(ml->address, base);
- ml->address = MIN(ml->address, base);
- break;
- }
- }
- if (ml == NULL) {
- nl = GETSTRUCT(struct memlist, 1);
- nl->address = base;
- nl->size = len;
- nl->next = NULL;
- nl->prev = tl;
- tl->next = nl;
- }
-
- memlist_coalesce(mlist);
-
- return (mlist);
-}
-
static void
drmach_mem_read_madr(drmach_mem_t *mp, int bank, uint64_t *madr)
{
@@ -5032,14 +4972,14 @@ sbd_error_t *
drmach_board_test(drmachid_t id, drmach_opts_t *opts, int force)
{
drmach_board_t *bp;
- drmach_device_t *dp[SBD_MAX_CORES_PER_CMP];
+ drmach_device_t *dp[MAX_CORES_PER_CMP];
dr_mbox_msg_t *obufp;
sbd_error_t *err;
dr_testboard_reply_t tbr;
int cpylen;
char *copts;
int is_io;
- cpu_flag_t oflags[SBD_MAX_CORES_PER_CMP];
+ cpu_flag_t oflags[MAX_CORES_PER_CMP];
if (!DRMACH_IS_BOARD_ID(id))
return (drerr_new(0, ESTC_INAPPROP, NULL));
@@ -5137,7 +5077,7 @@ drmach_board_test(drmachid_t id, drmach_opts_t *opts, int force)
int i;
mutex_enter(&cpu_lock);
- for (i = 0; i < SBD_MAX_CORES_PER_CMP; i++) {
+ for (i = 0; i < MAX_CORES_PER_CMP; i++) {
if (dp[i] != NULL) {
(void) drmach_iocage_cpu_return(dp[i],
oflags[i]);
@@ -7641,7 +7581,7 @@ drmach_iocage_cmp_acquire(drmach_device_t **dpp, cpu_flag_t *oflags)
* and attempt to acquire them. Bail out if an
* error is encountered.
*/
- for (curr = 0; curr < SBD_MAX_CORES_PER_CMP; curr++) {
+ for (curr = 0; curr < MAX_CORES_PER_CMP; curr++) {
/* check for the end of the list */
if (dpp[curr] == NULL) {
diff --git a/usr/src/uts/sun4u/starcat/io/fcgp2.c b/usr/src/uts/sun4u/starcat/io/fcgp2.c
index a557d00df3..8635cde443 100644
--- a/usr/src/uts/sun4u/starcat/io/fcgp2.c
+++ b/usr/src/uts/sun4u/starcat/io/fcgp2.c
@@ -20,7 +20,7 @@
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -120,9 +120,9 @@ struct gfc_ops_v gp2_pov[] = {
{ "rb!", gfc_register_store},
{ "claim-address", gfc_claim_address},
{ "master-interrupt", gfc_master_intr},
- { "schizo,claim-memory", gfc_claim_memory},
- { "schizo,release-memory", gfc_release_memory},
- { "schizo,vtop", gfc_vtop},
+ { "claim-memory", gfc_claim_memory},
+ { "release-memory", gfc_release_memory},
+ { "vtop", gfc_vtop},
{ FC_CONFIG_CHILD, gfc_config_child},
{ FC_GET_FCODE_SIZE, gfc_get_fcode_size},
{ FC_GET_FCODE, gfc_get_fcode},
@@ -653,7 +653,7 @@ gfc_claim_address(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
/*
* gfc_claim_memory
*
- * schizo,claim-memory ( align size vhint -- vaddr)
+ * claim-memory ( align size vhint -- vaddr)
*/
static int
gfc_claim_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
@@ -723,7 +723,7 @@ gfc_claim_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
/*
* gfc_release_memory
*
- * schizo,release-memory ( size vaddr -- )
+ * release-memory ( size vaddr -- )
*/
static int
gfc_release_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
@@ -777,7 +777,7 @@ gfc_release_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
/*
* gfc_vtop
*
- * schizo,vtop ( vaddr -- paddr.lo paddr.hi)
+ * vtop ( vaddr -- paddr.lo paddr.hi)
*/
static int
gfc_vtop(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
diff --git a/usr/src/uts/sun4u/starcat/os/starcat.c b/usr/src/uts/sun4u/starcat/os/starcat.c
index 16de2c1046..207e3b6cdb 100644
--- a/usr/src/uts/sun4u/starcat/os/starcat.c
+++ b/usr/src/uts/sun4u/starcat/os/starcat.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -1236,7 +1235,7 @@ efcode_alloc(caddr_t alloc_base)
}
caddr_t
-starcat_startup_memlist(caddr_t alloc_base)
+plat_startup_memlist(caddr_t alloc_base)
{
caddr_t tmp_alloc_base;
diff --git a/usr/src/uts/sun4u/starfire/io/drmach.c b/usr/src/uts/sun4u/starfire/io/drmach.c
index d57939fdce..ed322c68a7 100644
--- a/usr/src/uts/sun4u/starfire/io/drmach.c
+++ b/usr/src/uts/sun4u/starfire/io/drmach.c
@@ -1142,65 +1142,6 @@ drmach_write_mc_asr(drmachid_t id, uint_t mcreg)
return (err);
}
-static struct memlist *
-memlist_add_span(struct memlist *mlist, uint64_t base, uint64_t len)
-{
- struct memlist *ml, *tl, *nl;
-
- if (len == 0ull)
- return (NULL);
-
- if (mlist == NULL) {
- mlist = GETSTRUCT(struct memlist, 1);
- mlist->address = base;
- mlist->size = len;
- mlist->next = mlist->prev = NULL;
-
- return (mlist);
- }
-
- for (tl = ml = mlist; ml; tl = ml, ml = ml->next) {
- if (base < ml->address) {
- if ((base + len) < ml->address) {
- nl = GETSTRUCT(struct memlist, 1);
- nl->address = base;
- nl->size = len;
- nl->next = ml;
- if ((nl->prev = ml->prev) != NULL)
- nl->prev->next = nl;
- ml->prev = nl;
- if (mlist == ml)
- mlist = nl;
- } else {
- ml->size = MAX((base + len),
- (ml->address + ml->size)) -
- base;
- ml->address = base;
- }
- break;
-
- } else if (base <= (ml->address + ml->size)) {
- ml->size = MAX((base + len),
- (ml->address + ml->size)) -
- MIN(ml->address, base);
- ml->address = MIN(ml->address, base);
- break;
- }
- }
- if (ml == NULL) {
- nl = GETSTRUCT(struct memlist, 1);
- nl->address = base;
- nl->size = len;
- nl->next = NULL;
- nl->prev = tl;
- tl->next = nl;
- }
-
- memlist_coalesce(mlist);
-
- return (mlist);
-}
-
static sbd_error_t *
drmach_prep_rename_script(drmach_device_t *s_mem, drmach_device_t *t_mem,
uint64_t t_slice_offset, caddr_t buf, int buflen)
@@ -3696,7 +3637,6 @@ drmach_unconfigure(drmachid_t id, int flags)
drmach_device_t *dp;
pnode_t nodeid;
dev_info_t *dip, *fdip = NULL;
- uint_t ddi_flags;
if (!DRMACH_IS_DEVICE_ID(id))
return (drerr_new(0, ESTF_INAPPROP, NULL));
@@ -3717,17 +3657,15 @@ drmach_unconfigure(drmachid_t id, int flags)
*/
ddi_release_devi(dip);
- ddi_flags = 0;
-
- if (flags & DRMACH_DEVI_REMOVE)
- ddi_flags |= DEVI_BRANCH_DESTROY | DEVI_BRANCH_EVENT;
+ if (flags & DEVI_BRANCH_DESTROY)
+ flags |= DEVI_BRANCH_EVENT;
/*
* Force flag is no longer necessary. See starcat/io/drmach.c
* for details.
*/
ASSERT(e_ddi_branch_held(dip));
- if (e_ddi_branch_unconfigure(dip, &fdip, ddi_flags)) {
+ if (e_ddi_branch_unconfigure(dip, &fdip, flags)) {
sbd_error_t *err;
char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
diff --git a/usr/src/uts/sun4u/sys/Makefile b/usr/src/uts/sun4u/sys/Makefile
index 0de14065cc..ae9c0bf71c 100644
--- a/usr/src/uts/sun4u/sys/Makefile
+++ b/usr/src/uts/sun4u/sys/Makefile
@@ -75,6 +75,7 @@ $(CLOSED_BUILD)CLOSED_SUN4_HDRS= \
HDRS= \
cheetahregs.h \
cpr_impl.h \
+ cpu_impl.h \
ecc_kstat.h \
envctrl.h \
envctrl_gen.h \
@@ -90,6 +91,7 @@ HDRS= \
machsystm.h \
machthread.h \
mmu.h \
+ opl_module.h \
prom_plat.h \
pte.h \
sbd_ioctl.h \
@@ -116,6 +118,7 @@ $(CLOSED_BUILD)CLOSED_HDRS= \
memtestio_chp.h \
memtestio_ja.h \
memtestio_jg.h \
+ memtestio_oc.h \
memtestio_pn.h \
memtestio_sf.h \
memtestio_sr.h \
diff --git a/usr/src/uts/sun4u/sys/cheetahregs.h b/usr/src/uts/sun4u/sys/cheetahregs.h
index 8d0878be3e..e932d96d2e 100644
--- a/usr/src/uts/sun4u/sys/cheetahregs.h
+++ b/usr/src/uts/sun4u/sys/cheetahregs.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -30,6 +29,7 @@
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/machasi.h>
+#include <sys/cpu_impl.h>
#ifdef _KERNEL
#include <sys/fpras.h>
#endif /* _KERNEL */
@@ -43,66 +43,6 @@ extern "C" {
#endif
/*
- * Definitions of UltraSparc III cpu implementations as specified
- * in version register
- */
-#define CHEETAH_IMPL 0x14
-#define IS_CHEETAH(impl) ((impl) == CHEETAH_IMPL)
-#define CHEETAH_MAJOR_VERSION(rev) (((rev) >> 4) & 0xf)
-#define CHEETAH_MINOR_VERSION(rev) ((rev) & 0xf)
-
-/*
- * Definitions of UltraSPARC III+ cpu implementation as specified
- * in version register
- */
-#define CHEETAH_PLUS_IMPL 0x15
-#define IS_CHEETAH_PLUS(impl) ((impl) == CHEETAH_PLUS_IMPL)
-#define CHEETAH_PLUS_MAJOR_VERSION(rev) CHEETAH_MAJOR_VERSION(rev)
-#define CHEETAH_PLUS_MINOR_VERSION(rev) CHEETAH_MINOR_VERSION(rev)
-
-/*
- * Definitions of UltraSPARC IIIi cpu implementation as specified
- * in version register. Jalapeno major and minor rev's are in
- * the same location and are the same size as Cheetah/Cheetah+.
- */
-#define JALAPENO_IMPL 0x16
-#define IS_JALAPENO(impl) ((impl) == JALAPENO_IMPL)
-#define JALAPENO_MAJOR_VERSION(rev) CHEETAH_MAJOR_VERSION(rev)
-#define JALAPENO_MINOR_VERSION(rev) CHEETAH_MINOR_VERSION(rev)
-
-/*
- * Definitions of UltraSPARC IV cpu implementation as specified
- * in version register. Jaguar major and minor rev's are in
- * the same location and are the same size as Cheetah/Cheetah+.
- */
-#define JAGUAR_IMPL 0x18
-#define IS_JAGUAR(impl) ((impl) == JAGUAR_IMPL)
-#define JAGUAR_MAJOR_VERSION(rev) CHEETAH_MAJOR_VERSION(rev)
-#define JAGUAR_MINOR_VERSION(rev) CHEETAH_MINOR_VERSION(rev)
-
-/*
- * Definitions of UltraSPARC IIIi+ cpu implementation as specified
- * in version register. Serrano major and minor rev's are in
- * the same location and are the same size as Cheetah/Cheetah+.
- */
-#define SERRANO_IMPL 0x22
-#define IS_SERRANO(impl) ((impl) == SERRANO_IMPL)
-#define SERRANO_MAJOR_VERSION(rev) CHEETAH_MAJOR_VERSION(rev)
-#define SERRANO_MINOR_VERSION(rev) CHEETAH_MINOR_VERSION(rev)
-
-/*
- * Definitions of UltraSPARC IV+ cpu implementation as specified
- * in version register. Panther major and minor rev's are in
- * the same location and are the same size as Cheetah/Cheetah+.
- */
-#define PANTHER_IMPL 0x19
-#define IS_PANTHER(impl) ((impl) == PANTHER_IMPL)
-#define PANTHER_MAJOR_VERSION(rev) CHEETAH_MAJOR_VERSION(rev)
-#define PANTHER_MINOR_VERSION(rev) CHEETAH_MINOR_VERSION(rev)
-
-#define CPU_IMPL_IS_CMP(impl) (IS_JAGUAR(impl) || IS_PANTHER(impl))
-
-/*
* Cheetah includes the process info in its mask to make things
* more difficult. The process is the low bit of the major mask,
* so to convert to the netlist major:
diff --git a/usr/src/uts/sun4u/sys/cpu_impl.h b/usr/src/uts/sun4u/sys/cpu_impl.h
new file mode 100644
index 0000000000..6984b921e5
--- /dev/null
+++ b/usr/src/uts/sun4u/sys/cpu_impl.h
@@ -0,0 +1,110 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_CPU_IMPL_H
+#define _SYS_CPU_IMPL_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Definitions of UltraSparc III cpu implementations as specified
+ * in version register
+ */
+#define CHEETAH_IMPL 0x14
+#define IS_CHEETAH(impl) ((impl) == CHEETAH_IMPL)
+#define CHEETAH_MAJOR_VERSION(rev) (((rev) >> 4) & 0xf)
+#define CHEETAH_MINOR_VERSION(rev) ((rev) & 0xf)
+
+/*
+ * Definitions of UltraSPARC III+ cpu implementation as specified
+ * in version register
+ */
+#define CHEETAH_PLUS_IMPL 0x15
+#define IS_CHEETAH_PLUS(impl) ((impl) == CHEETAH_PLUS_IMPL)
+#define CHEETAH_PLUS_MAJOR_VERSION(rev) CHEETAH_MAJOR_VERSION(rev)
+#define CHEETAH_PLUS_MINOR_VERSION(rev) CHEETAH_MINOR_VERSION(rev)
+
+/*
+ * Definitions of UltraSPARC IIIi cpu implementation as specified
+ * in version register. Jalapeno major and minor rev's are in
+ * the same location and are the same size as Cheetah/Cheetah+.
+ */
+#define JALAPENO_IMPL 0x16
+#define IS_JALAPENO(impl) ((impl) == JALAPENO_IMPL)
+#define JALAPENO_MAJOR_VERSION(rev) CHEETAH_MAJOR_VERSION(rev)
+#define JALAPENO_MINOR_VERSION(rev) CHEETAH_MINOR_VERSION(rev)
+
+/*
+ * Definitions of UltraSPARC IV cpu implementation as specified
+ * in version register. Jaguar major and minor rev's are in
+ * the same location and are the same size as Cheetah/Cheetah+.
+ */
+#define JAGUAR_IMPL 0x18
+#define IS_JAGUAR(impl) ((impl) == JAGUAR_IMPL)
+#define JAGUAR_MAJOR_VERSION(rev) CHEETAH_MAJOR_VERSION(rev)
+#define JAGUAR_MINOR_VERSION(rev) CHEETAH_MINOR_VERSION(rev)
+
+/*
+ * Definitions of UltraSPARC IIIi+ cpu implementation as specified
+ * in version register. Serrano major and minor rev's are in
+ * the same location and are the same size as Cheetah/Cheetah+.
+ */
+#define SERRANO_IMPL 0x22
+#define IS_SERRANO(impl) ((impl) == SERRANO_IMPL)
+#define SERRANO_MAJOR_VERSION(rev) CHEETAH_MAJOR_VERSION(rev)
+#define SERRANO_MINOR_VERSION(rev) CHEETAH_MINOR_VERSION(rev)
+
+/*
+ * Definitions of UltraSPARC IV+ cpu implementation as specified
+ * in version register. Panther major and minor rev's are in
+ * the same location and are the same size as Cheetah/Cheetah+.
+ */
+#define PANTHER_IMPL 0x19
+#define IS_PANTHER(impl) ((impl) == PANTHER_IMPL)
+#define PANTHER_MAJOR_VERSION(rev) CHEETAH_MAJOR_VERSION(rev)
+#define PANTHER_MINOR_VERSION(rev) CHEETAH_MINOR_VERSION(rev)
+
+
+/*
+ * Definitions of Olympus-C cpu implementations as specified
+ * in version register
+ */
+#define OLYMPUS_C_IMPL 0x6
+#define IS_OLYMPUS_C(impl) ((impl) == OLYMPUS_C_IMPL)
+#define OLYMPUS_REV_MASK(x) (((x) >> 28) & 0x7)
+#define OLYMPUS_C_A 0
+
+#define CPU_IMPL_IS_CMP(impl) (IS_JAGUAR(impl) || \
+ IS_PANTHER(impl) || IS_OLYMPUS_C(impl))
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_CPU_IMPL_H */
diff --git a/usr/src/uts/sun4u/sys/cpu_module.h b/usr/src/uts/sun4u/sys/cpu_module.h
index a6ed4be5a6..0df1b445c7 100644
--- a/usr/src/uts/sun4u/sys/cpu_module.h
+++ b/usr/src/uts/sun4u/sys/cpu_module.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -61,9 +60,10 @@ void cpu_kdi_init(struct kdi *);
*
* set Panther CPU implementation details
*
- * mmu_init_mmu_page_sizes changes the mmu_page_sizes variable from
- * The default 4 page sizes to 6 page sizes for Panther-only domains,
- * and is called from fillsysinfo.c:check_cpus_set at early bootup time.
+ * On Panther-only domains and Olympus-C, mmu_init_mmu_page_sizes
+ * changes the mmu_page_sizes variable from the default 4 page sizes
+ * to 6 page sizes and is called from fillsysinfo.c:check_cpus_set
+ * at early bootup time.
*/
void cpu_fiximp(pnode_t dnode);
#pragma weak cpu_fix_allpanther
diff --git a/usr/src/uts/sun4u/sys/machasi.h b/usr/src/uts/sun4u/sys/machasi.h
index 0d87c1d6c7..8b37b98c18 100644
--- a/usr/src/uts/sun4u/sys/machasi.h
+++ b/usr/src/uts/sun4u/sys/machasi.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -58,6 +57,8 @@ extern "C" {
#define ASI_INTR_DISPATCH_STATUS 0x48 /* interrupt vector dispatch status */
#define ASI_INTR_RECEIVE_STATUS 0x49 /* interrupt vector receive status */
+#define ASI_SCRATCHPAD 0x4F /* Scratchpad registers ASI */
+
#define ASI_BLK_AIUP 0x70 /* block as if user primary */
#define ASI_BLK_AIUS 0x71 /* block as if user secondary */
diff --git a/usr/src/uts/sun4u/sys/machclock.h b/usr/src/uts/sun4u/sys/machclock.h
index 90dda0c282..3bd3ad7e7b 100644
--- a/usr/src/uts/sun4u/sys/machclock.h
+++ b/usr/src/uts/sun4u/sys/machclock.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -52,7 +51,7 @@ extern "C" {
* (see <sys/clock.h> file for more information)
*/
-#if defined(CHEETAH) || defined(HUMMINGBIRD)
+#if defined(CHEETAH) || defined(HUMMINGBIRD) || defined(OLYMPUS_C)
/*
* At least 3.9MHz, for slower %stick-based systems.
diff --git a/usr/src/uts/sun4u/sys/machparam.h b/usr/src/uts/sun4u/sys/machparam.h
index fe7fc55757..de12ef0ec1 100644
--- a/usr/src/uts/sun4u/sys/machparam.h
+++ b/usr/src/uts/sun4u/sys/machparam.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -291,7 +290,7 @@ extern "C" {
#define PFN_TO_BUSTYPE(pfn) (((pfn) >> 19) & 0x1FF)
#define BUSTYPE_TO_PFN(btype, pfn) \
- (((btype) << 19) | ((pfn) & 0x7FFFF))
+ (((pfn_t)(btype) << 19) | ((pfn) & 0x7FFFF))
#define IO_BUSTYPE(pfn) ((PFN_TO_BUSTYPE(pfn) & 0x100) >> 8)
#ifdef _STARFIRE
diff --git a/usr/src/uts/sun4u/sys/machthread.h b/usr/src/uts/sun4u/sys/machthread.h
index 0b9648002a..d2f0a31e0a 100644
--- a/usr/src/uts/sun4u/sys/machthread.h
+++ b/usr/src/uts/sun4u/sys/machthread.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -33,6 +32,7 @@
#include <sys/sun4asi.h>
#include <sys/machasi.h>
#include <sys/bitmap.h>
+#include <sys/opl_olympus_regs.h>
#ifdef __cplusplus
extern "C" {
@@ -75,6 +75,15 @@ extern "C" {
lduwa [r]ASI_IO, r; \
wrpr scr, 0, %pstate
+#elif defined(_OPL)
+/*
+ * For OPL platform, we get CPU_INDEX from ASI_EIDR.
+ */
+#define CPU_INDEX(r, scr) \
+ ldxa [%g0]ASI_EIDR, r; \
+ and r, 0xfff, r
+
+
#else /* _STARFIRE */
/*
diff --git a/usr/src/uts/sun4u/sys/opl.h b/usr/src/uts/sun4u/sys/opl.h
new file mode 100644
index 0000000000..69fef17266
--- /dev/null
+++ b/usr/src/uts/sun4u/sys/opl.h
@@ -0,0 +1,80 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _OPL_H
+#define _OPL_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define OPL_MAX_CPU_PER_CMP 4
+#define OPL_MAX_CORES_PER_CMP 4
+#define OPL_MAX_STRANDS_PER_CORE 2
+#define OPL_MAX_CMP_UNITS_PER_BOARD 4
+#define OPL_MAX_BOARDS 16
+#define OPL_MAX_CPU_PER_BOARD \
+ (OPL_MAX_CPU_PER_CMP * OPL_MAX_CMP_UNITS_PER_BOARD)
+#define OPL_MAX_MEM_UNITS_PER_BOARD 1
+#define OPL_MAX_IO_UNITS_PER_BOARD 16
+#define OPL_MAX_PCICH_UNITS_PER_BOARD 4
+#define OPL_MAX_TSBS_PER_PCICH 2
+#define OPL_MAX_CORE_UNITS_PER_BOARD \
+ (OPL_MAX_CORES_PER_CMP * OPL_MAX_CMP_UNITS_PER_BOARD)
+
+#define OPL_MAX_COREID_PER_CMP 4
+#define OPL_MAX_STRANDID_PER_CORE 2
+#define OPL_MAX_CPUID_PER_CMP (OPL_MAX_COREID_PER_CMP * \
+ OPL_MAX_STRANDID_PER_CORE)
+#define OPL_MAX_CMPID_PER_BOARD 4
+#define OPL_MAX_CPUID_PER_BOARD \
+ (OPL_MAX_CPUID_PER_CMP * OPL_MAX_CMPID_PER_BOARD)
+#define OPL_MAX_COREID_PER_BOARD \
+ (OPL_MAX_COREID_PER_CMP * OPL_MAX_CMPID_PER_BOARD)
+/*
+ * Macros to extract LSB_ID, CHIP_ID, CORE_ID, and STRAND_ID
+ * from the given cpuid.
+ */
+#define LSB_ID(x) (((uint_t)(x)/OPL_MAX_CPUID_PER_BOARD) & \
+ (OPL_MAX_BOARDS - 1))
+#define CHIP_ID(x) (((uint_t)(x)/OPL_MAX_CPUID_PER_CMP) & \
+ (OPL_MAX_CMPID_PER_BOARD - 1))
+#define CORE_ID(x) (((uint_t)(x)/OPL_MAX_STRANDID_PER_CORE) & \
+ (OPL_MAX_COREID_PER_CMP - 1))
+#define STRAND_ID(x) ((uint_t)(x) & (OPL_MAX_STRANDID_PER_CORE - 1))
+
+extern int plat_max_boards(void);
+extern int plat_max_cpu_units_per_board(void);
+extern int plat_max_mem_units_per_board(void);
+extern int plat_max_io_units_per_board(void);
+extern int plat_max_cmp_units_per_board(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _OPL_H */
diff --git a/usr/src/uts/sun4u/sys/opl_cfg.h b/usr/src/uts/sun4u/sys/opl_cfg.h
new file mode 100644
index 0000000000..021c3881c5
--- /dev/null
+++ b/usr/src/uts/sun4u/sys/opl_cfg.h
@@ -0,0 +1,306 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_OPL_CFG_H
+#define _SYS_OPL_CFG_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Hardware Descriptor.
+ */
+
+#include <sys/opl_hwdesc.h>
+
+#define OPL_PSB_MODE 0x0
+#define OPL_XSB_MODE 0x1
+
+#define OPL_LSBID_MASK 0x1f
+
+/*
+ * CPU device portid:
+ *
+ * 1 0 0 0 0 0 0 0 0 0 0
+ * 0 9 8 7 6 5 4 3 2 1 0
+ * ---------------------------------------
+ * | 1 | LSB ID | CHIP | CORE | CPU |
+ * ---------------------------------------
+ */
+#define OPL_CPUID_TO_LSB(devid) ((devid >> 5) & OPL_LSBID_MASK)
+#define OPL_CPU_CHIP(devid) ((devid >> 3) & 0x3)
+#define OPL_CORE(devid) ((devid >> 1) & 0x3)
+#define OPL_CPU(devid) ((devid & 0x001))
+
+#define OPL_PORTID(board, chip) ((1 << 10) | (board << 5) | (chip << 3))
+
+#define OPL_CPUID(board, chip, core, cpu) \
+ \
+ ((board << 5) | (chip << 3) | (core << 1) | (cpu))
+
+/*
+ * Dummy address space for a chip.
+ */
+#define OPL_PROC_AS(board, chip) \
+ \
+ ((1ULL << 46) | ((uint64_t)board << 40) | (1ULL << 39) | \
+ (1ULL << 33) | ((uint64_t)chip << 4))
+
+/*
+ * pseudo-mc portid:
+ *
+ * 1 0 0 0 0 0 0 0 0 0 0
+ * 0 9 8 7 6 5 4 3 2 1 0
+ * -------------------------------------
+ * | 0 | 1 | LSB ID | 0 |
+ * -------------------------------------
+ */
+#define OPL_LSB_TO_PSEUDOMC_PORTID(board) ((1 << 9) | (board << 4))
+
+/*
+ * Dummy address space for a pseudo memory node
+ */
+#define OPL_MC_AS(board) \
+ \
+ ((1ULL << 46) | ((uint64_t)board << 40) | (1ULL << 39) | \
+ (1ULL << 33))
+
+/*
+ * Defines used by the Jupiter bus-specific library (lfc_jupiter.so).
+ * This library gets loaded into the user-level fcode interpreter
+ * and provides bus-specific methods that are used by the Oberon
+ * and the CMU-channel fcode drivers.
+ */
+/*
+ *
+ * IO port id:
+ *
+ * 1 0 0 0 0 0 0 0 0 0 0
+ * 0 9 8 7 6 5 4 3 2 1 0
+ * ---------------------------------------
+ * | 0 0 | LSB ID | IO CHAN | LEAF |
+ * ---------------------------------------
+ */
+#define OPL_PORTID_MASK 0x7FF
+#define OPL_IO_PORTID_TO_LSB(portid) (((portid) >> 4) & OPL_LSBID_MASK)
+#define OPL_PORTID_TO_CHANNEL(portid) (((portid) >> 1) & 0x7)
+#define OPL_PORTID_TO_LEAF(portid) ((portid) & 0x1)
+#define OPL_IO_PORTID(lsb, ch, leaf) \
+ (((lsb & OPL_LSBID_MASK) << 4) | ((ch & 0x7) << 1) | (leaf & 0x1))
+
+#define OPL_ADDR_TO_LSB(hi) (((hi) >> 8) & OPL_LSBID_MASK)
+#define OPL_ADDR_TO_CHANNEL(hi) (((hi) >> 5) & 0x7)
+#define OPL_ADDR_TO_LEAF(hi, lo) \
+ (!(((hi) >> 7) & 0x1) && (((lo) >> 20) == 0x7))
+
+#define OPL_ADDR_HI(lsb, ch) \
+ ((1 << 14) | ((lsb & OPL_LSBID_MASK) << 8) | ((ch & 0x7) << 5))
+
+#define OPL_CMU_CHANNEL 4
+#define OPL_OBERON_CHANNEL(ch) ((ch >= 0) && (ch <= 3))
+#define OPL_VALID_CHANNEL(ch) ((ch >= 0) && (ch <= 4))
+#define OPL_VALID_LEAF(leaf) ((leaf == 0) || (leaf == 1))
+
+#if defined(_KERNEL)
+
+/*
+ * We store the pointers to the following device nodes in this structure:
+ * "pseudo-mc"
+ * "cmp"
+ * "pci"
+ *
+ * These nodes represent the different branches we create in the device
+ * tree for each board during probe. We store them so that when a board
+ * is unprobed, we can easily locate the branches and destroy them.
+ */
+typedef struct {
+ dev_info_t *cfg_pseudo_mc;
+ dev_info_t *cfg_cpu_chips[HWD_CPU_CHIPS_PER_CMU];
+ dev_info_t *cfg_cmuch_leaf;
+ fco_handle_t cfg_cmuch_handle;
+ char *cfg_cmuch_probe_str;
+ dev_info_t *cfg_pcich_leaf[HWD_PCI_CHANNELS_PER_SB]
+ [HWD_LEAVES_PER_PCI_CHANNEL];
+ fco_handle_t cfg_pcich_handle[HWD_PCI_CHANNELS_PER_SB]
+ [HWD_LEAVES_PER_PCI_CHANNEL];
+ char *cfg_pcich_probe_str[HWD_PCI_CHANNELS_PER_SB]
+ [HWD_LEAVES_PER_PCI_CHANNEL];
+ void *cfg_hwd;
+} opl_board_cfg_t;
+
+/*
+ * Prototypes for the callback functions used in the DDI functions
+ * used to perform device tree operations.
+ *
+ * init functions are used to find device nodes that are created
+ * by Solaris during boot.
+ *
+ * create functions are used to initialize device nodes during DR.
+ */
+typedef int (*opl_init_func_t)(dev_info_t *, char *, int);
+typedef int (*opl_create_func_t)(dev_info_t *, void *, uint_t);
+
+/*
+ * The following probe structure carries all the information required
+ * at various points during probe. This structure serves two purposes:
+ *
+ * 1. It allows us to streamline functions and have them accept just
+ * a single argument.
+ *
+ * 2. It allows us to pass information to the DDI callbacks. DDI
+ * callbacks are allowed only one argument. It also allows
+ * us to return information from those callbacks.
+ *
+ * The probe structure carries a snapshot of the hardware descriptor
+ * taken at the beginning of a probe.
+ */
+typedef struct {
+ hwd_header_t *pr_hdr;
+ hwd_sb_status_t *pr_sb_status;
+ hwd_domain_info_t *pr_dinfo;
+ hwd_sb_t *pr_sb;
+
+ int pr_board;
+ int pr_cpu_chip;
+ int pr_core;
+ int pr_cpu;
+ int pr_channel;
+ int pr_channel_status;
+ int pr_leaf;
+ int pr_leaf_status;
+
+ opl_create_func_t pr_create;
+ dev_info_t *pr_parent;
+ dev_info_t *pr_node;
+ int pr_hold;
+} opl_probe_t;
+
+#define OPL_STR_LEN 256
+
+#define OPL_HI(value) ((uint32_t)((uint64_t)(value) >> 32))
+#define OPL_LO(value) ((uint32_t)(value))
+
+typedef struct {
+ uint32_t addr_hi;
+ uint32_t addr_lo;
+} opl_addr_t;
+
+typedef struct {
+ uint32_t rg_addr_hi;
+ uint32_t rg_addr_lo;
+ uint32_t rg_size_hi;
+ uint32_t rg_size_lo;
+} opl_range_t;
+
+typedef struct {
+ int mc_bank;
+ uint32_t mc_hi;
+ uint32_t mc_lo;
+} opl_mc_addr_t;
+
+/*
+ * Convenience macros for DDI property operations. The functions that
+ * DDI provides for getting and updating properties are not symmetric
+ * either in their names or in the number of arguments. These macros
+ * hide the gory details and provide a symmetric way to get and
+ * set properties.
+ */
+#define opl_prop_get_string(dip, name, bufp, lenp) \
+ ddi_getlongprop(DDI_DEV_T_ANY, dip, \
+ DDI_PROP_DONTPASS, name, (caddr_t)bufp, lenp)
+
+#define opl_prop_get_int(dip, name, value, defvalue) \
+( \
+ *(value) = ddi_getprop(DDI_DEV_T_ANY, dip, \
+ DDI_PROP_DONTPASS, name, defvalue), \
+ (*(value) == defvalue) ? DDI_PROP_NOT_FOUND : DDI_PROP_SUCCESS \
+)
+
+#define opl_prop_get_int_array(dip, name, data, nelems) \
+ ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, \
+ DDI_PROP_DONTPASS, name, (int **)&data, (uint_t *)&nelems)
+
+#define OPL_GET_PROP(type, dip, name, value, arg) \
+ opl_prop_get_##type(dip, name, value, arg)
+
+#define OPL_GET_PROP_ARRAY(type, dip, name, values, nvalues) \
+ opl_prop_get_##type##_array(dip, name, values, nvalues)
+
+#define OPL_FREE_PROP(data) \
+ ddi_prop_free((void *)data)
+
+#define OPL_UPDATE_PROP_ERR(ret, name) \
+ if (ret != DDI_PROP_SUCCESS) { \
+ cmn_err(CE_WARN, "%s (%d): %s update property error (%d)",\
+ __FILE__, __LINE__, name, ret); \
+ return (DDI_WALK_ERROR); \
+ }
+
+#define OPL_UPDATE_PROP(type, dip, name, value) \
+ ret = ndi_prop_update_##type(DDI_DEV_T_NONE, dip, name, value); \
+ OPL_UPDATE_PROP_ERR(ret, name)
+
+
+#define OPL_UPDATE_PROP_ARRAY(type, dip, name, values, nvalues) \
+ ret = ndi_prop_update_##type##_array(DDI_DEV_T_NONE, dip, \
+ name, values, nvalues); \
+ OPL_UPDATE_PROP_ERR(ret, name)
+
+/*
+ * Node names for the different nodes supported in OPL.
+ */
+#define OPL_PSEUDO_MC_NODE "pseudo-mc"
+#define OPL_CPU_CHIP_NODE "cmp"
+#define OPL_CORE_NODE "core"
+#define OPL_CPU_NODE "cpu"
+#define OPL_PCI_LEAF_NODE "pci"
+
+typedef struct {
+ char *fc_service;
+ fc_ops_t *fc_op;
+} opl_fc_ops_t;
+
+/*
+ * Functions used by drmach
+ */
+extern int opl_probe_sb(int);
+extern int opl_unprobe_sb(int);
+extern int opl_read_hwd(int, hwd_header_t **, hwd_sb_status_t **,
+ hwd_domain_info_t **, hwd_sb_t **);
+extern void opl_hold_devtree(void);
+extern void opl_release_devtree(void);
+extern int oplcfg_pa_swap(int from, int to);
+extern int opl_init_cfg();
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_OPL_CFG_H */
diff --git a/usr/src/uts/sun4u/sys/opl_module.h b/usr/src/uts/sun4u/sys/opl_module.h
new file mode 100644
index 0000000000..80ef0acb31
--- /dev/null
+++ b/usr/src/uts/sun4u/sys/opl_module.h
@@ -0,0 +1,159 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_OPL_MODULE_H
+#define _SYS_OPL_MODULE_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/async.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _KERNEL
+
+/*
+ * Sets trap table entry ttentry by overwriting eight instructions from ttlabel.
+ */
+#define OPL_SET_TRAP(ttentry, ttlabel) \
+ bcopy((const void *)&ttlabel, &ttentry, 32); \
+ flush_instr_mem((caddr_t)&ttentry, 32);
+
+/*
+ * Define for max size of "reason" string in panic flows. Since this is on
+ * the stack, we want to keep it as small as is reasonable.
+ */
+#define MAX_REASON_STRING 40
+
+/*
+ * These error types are specific to Olympus and are used internally for the
+ * opl fault structure flt_type field.
+ */
+#define OPL_CPU_SYNC_UE 1
+#define OPL_CPU_SYNC_OTHERS 2
+#define OPL_CPU_URGENT 3
+#define OPL_CPU_INV_SFSR 4
+#define OPL_CPU_INV_UGESR 5
+
+#ifndef _ASM
+
+/*
+ * Define Olympus family (SPARC64-VI) specific asynchronous error structure
+ */
+typedef struct olympus_async_flt {
+ struct async_flt cmn_asyncflt; /* common - see sun4u/sys/async.h */
+ ushort_t flt_type; /* types of faults - cpu specific */
+ uint64_t flt_bit; /* fault bit for this log msg */
+ ushort_t flt_eid_mod; /* module ID (type of hardware) */
+ ushort_t flt_eid_sid; /* source ID */
+} opl_async_flt_t;
+
+/*
+ * Error type table struct.
+ */
+typedef struct ecc_type_to_info {
+ uint64_t ec_afsr_bit; /* SFSR bit of error */
+ char *ec_reason; /* Short error description */
+ uint_t ec_flags; /* Trap type error should be seen at */
+ int ec_flt_type; /* Used for error logging */
+ char *ec_desc; /* Long error description */
+ uint64_t ec_err_payload; /* FM ereport payload information */
+ char *ec_err_class; /* FM ereport class */
+} ecc_type_to_info_t;
+
+/*
+ * Redefine fault status bit field definitions taken from
+ * "async.h". Reused reserved Ultrasparc3 specific fault status
+ * bits here since they are by definition mutually exclusive
+ * w.r.t. OPL
+ */
+#define OPL_ECC_ISYNC_TRAP 0x0100
+#define OPL_ECC_DSYNC_TRAP 0x0200
+#define OPL_ECC_SYNC_TRAP (OPL_ECC_ISYNC_TRAP|OPL_ECC_DSYNC_TRAP)
+#define OPL_ECC_URGENT_TRAP 0x0400
+
+#define TRAP_TYPE_URGENT 0x40
+
+/*
+ * Since all the files share a bunch of routines between each other
+ * we will put all the "extern" definitions in this header file so that we
+ * don't have to repeat it all in every file.
+ */
+
+/*
+ * functions that are defined in the OPL,SPARC64-VI cpu module:
+ */
+extern void shipit(int, int);
+extern void cpu_page_retire(opl_async_flt_t *opl_flt);
+extern void cpu_init_trap(void);
+extern void cpu_error_ecache_flush(void);
+extern void flush_ecache(uint64_t physaddr, size_t ecachesize, size_t linesize);
+extern void stick_adj(int64_t skew);
+extern void stick_timestamp(int64_t *ts);
+extern void hwblkpagecopy(const void *src, void *dst);
+extern void opl_error_setup(uint64_t);
+extern void opl_mpg_enable(void);
+extern int cpu_queue_events(opl_async_flt_t *, char *, uint64_t);
+extern void ras_cntr_reset(void *);
+
+/*
+ * variables and structures that are defined outside the FJSV,SPARC64-VI
+ * cpu module:
+ */
+extern uint64_t xc_tick_limit;
+extern uint64_t xc_tick_jump_limit;
+
+/*
+ * Labels used for the trap_table patching
+ */
+extern uint32_t tt0_iae;
+extern uint32_t tt1_iae;
+extern uint32_t tt0_dae;
+extern uint32_t tt1_dae;
+extern uint32_t tt0_asdat;
+extern uint32_t tt1_asdat;
+
+extern void opl_serr_instr(void);
+extern void opl_ugerr_instr(void);
+
+/*
+ * D$ and I$ global parameters.
+ */
+extern int dcache_size;
+extern int dcache_linesize;
+extern int icache_size;
+extern int icache_linesize;
+
+#endif /* _ASM */
+
+#endif /* _KERNEL */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_OPL_MODULE_H */
diff --git a/usr/src/uts/sun4u/sys/opl_olympus_regs.h b/usr/src/uts/sun4u/sys/opl_olympus_regs.h
new file mode 100644
index 0000000000..ff482ce8a6
--- /dev/null
+++ b/usr/src/uts/sun4u/sys/opl_olympus_regs.h
@@ -0,0 +1,311 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _SYS_OPL_OLYMPUS_REGS_H
+#define _SYS_OPL_OLYMPUS_REGS_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/machasi.h>
+#include <sys/cpu_impl.h>
+
+/*
+ * This file is cpu dependent.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef _ASM
+/*
+ * assembler doesn't understand the 'ull' suffix for C constants so
+ * use the inttypes.h macros and undefine them here for assembly code
+ */
+#undef INT64_C
+#define INT64_C(x) (x)
+#undef UINT64_C
+#define UINT64_C(x) (x)
+#endif /* _ASM */
+
+/*
+ * Synchronous Fault Physical Address Register
+ */
+#define OPL_MMU_SFPAR 0x78
+
+/*
+ * ASI_MCNTL: MEMORY CONTROL Register layout (ASI 0x45, VA 8)
+ *
+ * +-------------------------+---------+--------+--------+-----+---------+
+ * | reserved [63:17] | NC_Cache|fw_fITLB|fw_fDTLB|00000|JPS1_TSBP|
+ * +-------------------------+---------+--------+--------+-----+---------+
+ * 16 15 14 13-9 8
+ * +---------+---------+------+
+ * |mpg_sITLB|mpg_sDTLB|000000|
+ * +---------+---------+------+
+ * 7 6 5-0
+ */
+#define ASI_MCNTL 0x45
+#define LSU_MCNTL 0x8 /* vaddr offset of ASI_MCNTL */
+#define MCNTL_FW_FDTLB INT64_C(0x0000000000004000)
+#define MCNTL_FW_FITLB INT64_C(0x0000000000008000)
+#define MCNTL_JPS1_TSBP INT64_C(0x0000000000000100)
+#define MCNTL_MPG_SITLB INT64_C(0x0000000000000080)
+#define MCNTL_MPG_SDTLB INT64_C(0x0000000000000040)
+#define MCNTL_SPECULATIVE_SHIFT 41 /* bit 41 is speculative mode bit */
+
+/*
+ * ASI_UGESR: URGENT ERROR STATES layout (ASI 0x4C, VA 0x8)
+ *
+ * +--------+---+----+----+---+---+--+--+---+---+-+----+----+----+
+ * |0[63:23]|CRE|TSBX|TSBP|PST|TST| F| R|SDC|WDT|0|DTLB|ITLB|CORE|
+ * +--------+---+----+----+---+---+--+--+---+---+-+----+----+----+
+ * 22 21 20 19 18 17 16 15 14 10 9 8
+ * +-------+----+---+---+---+
+ * |INSTEND|PRIV|DAE|IAE|UGE|
+ * +-------+----+---+---+---+
+ * 5 4 3 2 1 0
+ *
+ */
+#define ASI_UGERSR 0x8
+#define UGESR_IAUG_CRE INT64_C(0x0000000000400000)
+#define UGESR_IAUG_TSBCTXT INT64_C(0x0000000000200000)
+#define UGESR_IUG_TSBP INT64_C(0x0000000000100000)
+#define UGESR_IUG_PSTATE INT64_C(0x0000000000080000)
+#define UGESR_IUG_TSTATE INT64_C(0x0000000000040000)
+#define UGESR_IUG_F INT64_C(0x0000000000020000)
+#define UGESR_IUG_R INT64_C(0x0000000000010000)
+#define UGESR_AUG_SDC INT64_C(0x0000000000008000)
+#define UGESR_IUG_WDT INT64_C(0x0000000000004000)
+#define UGESR_IUG_DTLB INT64_C(0x0000000000000400)
+#define UGESR_IUG_ITLB INT64_C(0x0000000000000200)
+#define UGESR_IUG_COREERR INT64_C(0x0000000000000100)
+#define UGESR_PRIV INT64_C(0x0000000000000008)
+#define UGESR_MULTI_DAE INT64_C(0x0000000000000004)
+#define UGESR_MULTI_IAE INT64_C(0x0000000000000002)
+#define UGESR_MULTI_UGE INT64_C(0x0000000000000001)
+
+#define UGESR_CAN_RECOVER (UGESR_IUG_DTLB | \
+ UGESR_IUG_ITLB | \
+ UGESR_IUG_COREERR)
+
+#define UGESR_MULTI (UGESR_MULTI_DAE | \
+ UGESR_MULTI_IAE | \
+ UGESR_MULTI_UGE)
+
+#define UGESR_NOSYNC_PANIC (UGESR_IAUG_CRE | \
+ UGESR_AUG_SDC | \
+ UGESR_MULTI_DAE | \
+ UGESR_MULTI_IAE | \
+ UGESR_MULTI_UGE)
+/*
+ * The value means 10000 Mz per 10ms.
+ */
+#define OPL_UGER_STICK_DIFF 10000
+
+
+/*
+ * ASI_ECR: Control of Error Action layout (ASI 0x4C, VA 0x10)
+ *
+ * +-------------------------+------+--------+-----+-------+-----------+
+ * | reserved [63:10] |RTE_UE|RTE_CEDG|0...0|WEAK_ED|UGE_HANDLER|
+ * +-------------------------+------+--------+-----+-------+-----------+
+ * 9 8 7 - 2 1 0
+ *
+ */
+#define ASI_ECR ASI_AFSR
+#define AFSR_ECR 0x10
+#define ASI_ECR_RTE_UE INT64_C(0x0000000000000200)
+#define ASI_ECR_RTE_CEDG INT64_C(0x0000000000000100)
+#define ASI_ECR_WEAK_ED INT64_C(0x0000000000000002)
+#define ASI_ECR_UGE_HANDLER INT64_C(0x0000000000000001)
+
+
+/*
+ * ASI_L2_CTRL: Level-2 Cache Control Register (ASI 0x6A, VA 0x10)
+ *
+ * +---------------------+--------+-----+---------+----+--------+
+ * | reserved[63:25] |UGE_TRAP|0...0|NUMINSWAY|0..0|U2_FLUSH|
+ * +---------------------+--------+-----+---------+----+--------+
+ * 24 23 19 18 16 15 1 0
+ *
+ */
+#define ASI_L2_CTRL 0x6A /* L2$ Control Register */
+#define ASI_L2_CTRL_RW_ADDR 0x10
+#define ASI_L2_CTRL_UGE_TRAP INT64_C(0x0000000001000000)
+#define ASI_L2_CTRL_NUMINSWAY_MASK INT64_C(0x0000000000070000)
+#define ASI_L2_CTRL_U2_FLUSH INT64_C(0x0000000000000001)
+
+
+/*
+ * Synchronous Fault Status Register Layout (ASI 0x50/0x58, VA 0x18)
+ *
+ * IMMU and DMMU maintain their own SFSR Register
+ *
+ * +----+----+-----+----+--+-----+--+---+-+----+--+--+-----+--+-+
+ * |TLB#|0..0|index|0..0|MK| EID |UE|UPA|0|mTLB|NC|NF| ASI |TM|0|
+ * +----+----+-----+----+--+-----+--+---+-+----+--+--+-----+--+-+
+ * 63 62 61 58 48 46 45 32 31 30 28 27 25 24 23 16 15 14
+ * +----+-+---+--+-+--+--+
+ * | FT |E| CT|PR|W|OW|FV|
+ * +----+-+---+--+-+--+--+
+ * 13 7 6 5 4 3 2 1 0
+ *
+ */
+#define SFSR_MK_UE INT64_C(0x0000400000000000)
+#define SFSR_EID_MOD INT64_C(0x0000300000000000)
+#define SFSR_EID_SID INT64_C(0x00000FFF00000000)
+#define SFSR_UE INT64_C(0x0000000080000000)
+#define SFSR_BERR INT64_C(0x0000000040000000)
+#define SFSR_TO INT64_C(0x0000000020000000)
+#define SFSR_TLB_MUL INT64_C(0x0000000008000000)
+#define SFSR_TLB_PRT INT64_C(0x0000000004000000)
+
+#define SFSR_EID_MOD_SHIFT 44
+#define SFSR_EID_SID_SHIFT 32
+
+/*
+ * Error Mark ID: Module Type
+ */
+#define OPL_ERRID_MEM 0
+#define OPL_ERRID_CHANNEL 1
+#define OPL_ERRID_CPU 2
+#define OPL_ERRID_PATH 3
+
+
+#define SFSR_ERRS (SFSR_UE | SFSR_BERR | \
+ SFSR_TO | SFSR_TLB_MUL | \
+ SFSR_TLB_PRT)
+
+#define SFSR_MEMORY (SFSR_UE | \
+ SFSR_BERR | \
+ SFSR_TO)
+
+/*
+ * Miscellaneous ASI definitions
+ */
+#define ASI_IIU_INST_TRAP 0x60 /* Instruction breakpoint */
+#define ASI_ALL_FLUSH_L1I 0x67 /* Flush Level-1 Inst. cache */
+#define ASI_L2_TAG_READ 0x6B /* L2 Diagnostics Tag Read */
+#define ASI_L2_TAG_READ_REG 0x6C /* L2 Diagnostics Tag Read Register */
+#define ASI_EIDR 0x6E /* Urgent errors */
+#define ASI_CACHE_INV 0x74 /* Cache invalidation */
+#define ASI_ERR_INJCT 0x76 /* Error injection */
+
+/*
+ * Address of ASI scratch register. ASI 0x4F
+ */
+#define OPL_SCRATCHPAD_SAVE_AG1 0x00 /* used for saving global registers */
+#define OPL_SCRATCHPAD_SAVE_AG2 0x08 /* used for saving global registers */
+#define OPL_SCRATCHPAD_SAVE_AG3 0x10 /* used for saving global registers */
+#define OPL_SCRATCHPAD_ERRLOG 0x18 /* keeps EIDR, log's PA & err counter */
+#define OPL_SCRATCHPAD_UTSBREG4 0x20
+#define OPL_SCRATCHPAD_UNUSED5 0x28
+#define OPL_SCRATCHPAD_UNUSED6 0x30
+#define OPL_SCRATCHPAD_UNUSED7 0x38
+
+/*
+ * Error log scratchpad register format.
+ *
+ * +--------+-------------------+----------+
+ * |ASI_EIDR| PA to logging buf | # of err |
+ * +--------+-------------------+----------+
+ * 63 50 49 6 5 0
+ *
+ */
+
+#define ERRLOG_REG_LOGPA_MASK INT64_C(0x0003ffffffffffc0) /* PA to log */
+#define ERRLOG_REG_NUMERR_MASK INT64_C(0x000000000000003f) /* Counter */
+#define ERRLOG_REG_EIDR_MASK INT64_C(0x0000000000003fff) /* EIDR */
+
+#define ERRLOG_REG_EIDR_SHIFT 50
+#define ERRLOG_REG_ERR_SHIFT 6
+#define ERRLOG_REG_EIDR(reg) ((reg >> ERRLOG_REG_EIDR_SHIFT) & \
+ ERRLOG_REG_EIDR_MASK)
+#define ERRLOG_REG_LOGPA(reg) (reg & ERRLOG_REG_LOGPA_MASK)
+#define ERRLOG_REG_NUMERR(reg) (reg & ERRLOG_REG_NUMERR_MASK)
+
+#define ERRLOG_BUFSZ 0x2000
+#define ERRLOG_SZ (1 << ERRLOG_REG_ERR_SHIFT)
+#define ERRLOG_ALLOC_SZ (ERRLOG_BUFSZ * 512)
+
+/*
+ * Olympus-C default cache parameters.
+ */
+#define OPL_DCACHE_SIZE 0x20000
+#define OPL_DCACHE_LSIZE 0x40
+#define OPL_ICACHE_SIZE 0x20000
+#define OPL_ICACHE_LSIZE 0x40
+#define OPL_ECACHE_SIZE 0x600000
+#define OPL_ECACHE_LSIZE 0x100
+#define OPL_ECACHE_NWAY 12
+#define OPL_ECACHE_SETSIZE 0x80000
+
+/*
+ * The minimum size needed to ensure consistency on a virtually address
+ * cache. Computed by taking the largest virtually indexed cache and dividing
+ * by its associativity.
+ */
+#define OPL_VAC_SIZE 0x4000
+
+/* these are field offsets for opl_errlog structure */
+#define LOG_STICK_OFF 0x0
+#define LOG_TL_OFF 0x8
+#define LOG_ASI3_OFF 0x10
+#define LOG_SFSR_OFF 0x18
+#define LOG_SFAR_OFF 0x20
+
+#define LOG_UGER_OFF 0x18
+#define LOG_TSTATE_OFF 0x20
+#define LOG_TPC_OFF 0x28
+
+#ifndef _ASM
+typedef struct opl_errlog {
+ uint64_t stick;
+ uint32_t tl;
+ uint32_t tt;
+ uint64_t asi3;
+ union {
+ struct {
+ uint64_t sfsr;
+ union {
+ uint64_t sfar;
+ uint64_t sfpar;
+ } sync_addr;
+ } sync;
+ struct {
+ uint64_t ugesr;
+ uint64_t tstate;
+ } ugesr;
+ } reg;
+ uint64_t tpc;
+} opl_errlog_t;
+#endif /* _ASM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_OPL_OLYMPUS_REGS_H */
diff --git a/usr/src/uts/sun4u/sys/prom_plat.h b/usr/src/uts/sun4u/sys/prom_plat.h
index c557eee0a1..646da1c7cc 100644
--- a/usr/src/uts/sun4u/sys/prom_plat.h
+++ b/usr/src/uts/sun4u/sys/prom_plat.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -265,6 +264,15 @@ extern int prom_starfire_move_cpu0(uint_t cpuid);
extern void prom_starfire_init_console(uint_t cpuid);
/*
+ * OPL-specific routines
+ */
+extern void prom_opl_get_tod(time_t *time, int64_t *stickval);
+extern void prom_opl_set_diff(int64_t diff);
+extern int prom_attach_notice(int bn);
+extern int prom_detach_notice(int bn);
+extern int prom_opl_switch_console(int bn);
+
+/*
* The client program implementation is required to provide a wrapper
* to the client handler, for the 32 bit client program to 64 bit cell-sized
* client interface handler (switch stack, etc.). This function is not
diff --git a/usr/src/uts/sun4u/sys/pte.h b/usr/src/uts/sun4u/sys/pte.h
index 988b57fb68..0734bf5411 100644
--- a/usr/src/uts/sun4u/sys/pte.h
+++ b/usr/src/uts/sun4u/sys/pte.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -43,7 +42,9 @@ extern "C" {
* use a V8 compiler all manipulations in C will be done using the bit fields
* or as 2 integers. In assembly code we will deal with it as a double (using
* ldx and stx). The structure is defined to force a double alignment.
- * Note that USIIi uses bits <47:40> for diag, and <49:48> are reserved.
+ * Note that USIIi uses bits [47:41] for diag, and [49:48] are reserved.
+ * Note that pa[46:32] includes USIIi diag bits [46:41] and USIII reserved
+ * bits [46:43].
*/
typedef union {
struct tte {
@@ -55,9 +56,9 @@ typedef union {
uint32_t hmenum:3; /* sw - # of hment in hme_blk */
uint32_t rsv:7; /* former rsv:1 lockcnt:6 */
- uint32_t sz2:1; /* Panther sz2[48] */
- uint32_t diag:5; /* See USII Note above. */
- uint32_t pahi:11; /* pa[42:32] */
+ uint32_t sz2:1; /* sz2[48] Panther, Olympus-C */
+ uint32_t diag:1; /* See USII Note above. */
+ uint32_t pahi:15; /* pa[46:32] See Note above */
uint32_t palo:19; /* pa[31:13] */
uint32_t no_sync:1; /* sw - ghost unload */
@@ -146,16 +147,20 @@ typedef union {
#define TTE_PAGEMASK(sz) (~TTE_PAGE_OFFSET(sz))
#define TTE_PFNMASK(sz) (~(TTE_PAGE_OFFSET(sz) >> MMU_PAGESHIFT))
-#define TTE_PA_LSHIFT 21 /* used to do sllx on tte to get pa */
+#define TTE_PA_LSHIFT 17 /* used to do sllx on tte to get pa */
#ifndef _ASM
#define TTE_PASHIFT 19 /* used to manage pahi and palo */
#define TTE_PALOMASK ((1 << TTE_PASHIFT) -1)
-/* PFN is defined as bits [40-13] of the physical address */
+/*
+ * Spitfire PFN is defined as bits [40:13] of the physical address.
+ * Cheetah PFN is defined as bits [42:13] of the physical address.
+ * Olympus-C PFN is defined as bits [46:13] of the physical address.
+ */
#define TTE_TO_TTEPFN(ttep) \
- ((((ttep)->tte_pahi << TTE_PASHIFT) | (ttep)->tte_palo) & \
- TTE_PFNMASK(TTE_CSZ(ttep)))
+ (((((pfn_t)((ttep)->tte_pahi)) << TTE_PASHIFT) | \
+ (ttep)->tte_palo) & TTE_PFNMASK(TTE_CSZ(ttep)))
/*
* This define adds the vaddr page offset to obtain a correct pfn
*/
@@ -198,11 +203,11 @@ typedef union {
#define TTE_PROT_INT (TTE_WRPRM_INT | TTE_PRIV_INT)
/*
- * Define to clear the high-order 2 bits of the 43-bit PA in a tte. The
- * Spitfire tte has PFN in [40-13] and uses [42-41] as part of Diag bits.
+ * Define to clear the high-order 6 bits of the 47-bit PA in a tte. The
+ * Spitfire tte has PFN in [40:13] and uses [46:41] as part of Diag bits.
*/
-#define TTE_SPITFIRE_PFNHI_CLEAR 0x3
-#define TTE_SPITFIRE_PFNHI_SHIFT 32+9
+#define TTE_SPITFIRE_PFNHI_CLEAR 0x3f
+#define TTE_SPITFIRE_PFNHI_SHIFT 41
#ifndef ASM
@@ -265,7 +270,7 @@ typedef union {
(ttep)->tte_bit.nfo = 1; \
(ttep)->tte_bit.ie = 1; \
(ttep)->tte_bit.sz2 = 1; \
- (ttep)->tte_bit.pahi = 0x7ff; \
+ (ttep)->tte_bit.pahi = 0x7fff; \
(ttep)->tte_bit.palo = 0x7ffff; \
(ttep)->tte_bit.exec_perm = 1; \
(ttep)->tte_bit.l = 1; \
diff --git a/usr/src/uts/sun4u/sys/sbd_ioctl.h b/usr/src/uts/sun4u/sys/sbd_ioctl.h
index 6826e28e9f..a917f3833f 100644
--- a/usr/src/uts/sun4u/sys/sbd_ioctl.h
+++ b/usr/src/uts/sun4u/sys/sbd_ioctl.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -148,7 +147,9 @@ typedef struct {
#define is_suspend is_cm.c_sflags
#define is_time is_cm.c_time
-#define SBD_MAX_CORES_PER_CMP 2
+/* This constant must be the max of the max cores on all platforms */
+
+#define SBD_MAX_CORES_PER_CMP 8
typedef struct {
sbd_cm_stat_t ps_cm;
@@ -577,6 +578,23 @@ typedef struct {
#define ESGT_NOT_SUPP 4027 /* Operation not supported */
#define ESGT_NO_MEM 4028 /* No Memory */
+/* opl error codes */
+
+#define EOPL_GETPROP 5001 /* Cannot read property value */
+#define EOPL_BNUM 5002 /* Invalid board number */
+#define EOPL_CONFIGBUSY 5003
+ /* Cannot proceed; Board is configured or busy */
+#define EOPL_PROBE 5004 /* Firmware probe failed */
+#define EOPL_DEPROBE 5005 /* Firmware deprobe failed */
+#define EOPL_SUPPORT 5006 /* Operation not supported */
+#define EOPL_DRVFAIL 5007 /* Device driver failure */
+#define EOPL_UNKPTCMD 5008 /* Unrecognized platform command */
+#define EOPL_NOTID 5009 /* drmach parameter is not a valid ID */
+#define EOPL_INAPPROP 5010
+ /* drmach parameter is inappropriate for operation */
+#define EOPL_INTERNAL 5011 /* Unexpected internal condition */
+#define EOPL_FINDDEVICE 5012 /* Firmware cannot find node. */
+
#ifdef __cplusplus
}
#endif
diff --git a/usr/src/uts/sun4u/todopl/Makefile b/usr/src/uts/sun4u/todopl/Makefile
new file mode 100644
index 0000000000..28db86792f
--- /dev/null
+++ b/usr/src/uts/sun4u/todopl/Makefile
@@ -0,0 +1,88 @@
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or http://www.opensolaris.org/os/licensing.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#
+# Copyright 2006 Sun Microsystems, Inc. All rights reserved.
+# Use is subject to license terms.
+#
+#pragma ident "%Z%%M% %I% %E% SMI"
+#
+# uts/sun4u/todopl/Makefile
+#
+# This makefile drives the production of the todopl
+# kernel module.
+#
+
+#
+# Path to the base of the uts directory tree (usually /usr/src/uts).
+#
+UTSBASE = ../..
+
+#
+# Define the module and object file sets.
+#
+MODULE = todopl
+OBJECTS = $(TODOPL_OBJS:%=$(OBJS_DIR)/%)
+LINTS = $(TODOPL_OBJS:%.o=$(LINTS_DIR)/%.ln)
+ROOTMODULE = $(ROOT_PSM_TOD_DIR)/$(MODULE)
+
+#
+# Include common rules.
+#
+include $(UTSBASE)/sun4u/Makefile.sun4u
+
+INC_PATH += -I$(UTSBASE)/sun4u/opl
+#
+# Define targets
+#
+ALL_TARGET = $(BINARY)
+LINT_TARGET = $(MODULE).lint
+INSTALL_TARGET = $(BINARY) $(ROOTMODULE)
+
+#
+# module dependencies
+#
+LDFLAGS += -dy
+
+#
+# Default build targets.
+#
+.KEEP_STATE:
+
+def: $(DEF_DEPS)
+
+all: $(ALL_DEPS)
+
+clean: $(CLEAN_DEPS)
+
+clobber: $(CLOBBER_DEPS)
+
+lint: $(LINT_DEPS)
+
+modlintlib: $(MODLINTLIB_DEPS)
+
+clean.lint: $(CLEAN_LINT_DEPS)
+
+install: $(INSTALL_DEPS)
+
+#
+# Include common targets.
+#
+include $(UTSBASE)/sun4u/Makefile.targ
diff --git a/usr/src/uts/sun4u/vm/mach_sfmmu.c b/usr/src/uts/sun4u/vm/mach_sfmmu.c
index fae0ac0caf..b9cf3266f0 100644
--- a/usr/src/uts/sun4u/vm/mach_sfmmu.c
+++ b/usr/src/uts/sun4u/vm/mach_sfmmu.c
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -191,6 +190,7 @@ sfmmu_remap_kernel(void)
sfmmu_set_tlb();
}
+#ifndef UTSB_PHYS
/*
* Unmap all references to user TSBs from the TLB of the current processor.
*/
@@ -216,6 +216,7 @@ sfmmu_clear_user_tsbs()
va += MMU_PAGESIZE;
}
}
+#endif /* UTSB_PHYS */
/*
* Setup the kernel's locked tte's
@@ -242,9 +243,11 @@ sfmmu_set_tlb(void)
(void) prom_dtlb_load(index - 2, *(uint64_t *)&ktext_tte, textva);
index -= 3;
+#ifndef UTSB_PHYS
utsb_dtlb_ttenum = index--;
utsb4m_dtlb_ttenum = index--;
sfmmu_clear_user_tsbs();
+#endif /* UTSB_PHYS */
if (!ktsb_phys && enable_bigktsb) {
int i;
diff --git a/usr/src/uts/sun4u/vm/mach_sfmmu.h b/usr/src/uts/sun4u/vm/mach_sfmmu.h
index 730162e8fe..66640afb9e 100644
--- a/usr/src/uts/sun4u/vm/mach_sfmmu.h
+++ b/usr/src/uts/sun4u/vm/mach_sfmmu.h
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -40,15 +39,33 @@
#include <sys/x_call.h>
#include <sys/cheetahregs.h>
#include <sys/spitregs.h>
+#include <sys/opl_olympus_regs.h>
+
#ifdef __cplusplus
extern "C" {
#endif
/*
- * Define UTSB_PHYS if user TSB is always accessed via physical address.
- * On sun4u platform, user TSB is accessed via virtual address.
+ * On sun4u platforms, user TSBs are accessed via virtual address by default.
+ * Platforms that support ASI_SCRATCHPAD registers can define UTSB_PHYS in the
+ * platform Makefile to access user TSBs via physical address but must also
+ * designate one ASI_SCRATCHPAD register to hold the second user TSB. To
+ * designate the user TSB scratchpad register, platforms must provide a
+ * definition for SCRATCHPAD_UTSBREG below.
+ *
+ * Platforms that use UTSB_PHYS do not allocate 2 locked TLB entries to access
+ * the user TSBs.
*/
-#undef UTSB_PHYS
+#if defined(UTSB_PHYS)
+
+#if defined(_OPL)
+#define SCRATCHPAD_UTSBREG OPL_SCRATCHPAD_UTSBREG4
+#else
+#error "Compiling UTSB_PHYS but no SCRATCHPAD_UTSBREG specified"
+#endif
+
+#endif /* UTSB_PHYS */
+
#ifdef _ASM
@@ -80,8 +97,6 @@ extern "C" {
movrnz qlp, ASI_MEM, tmp; \
mov tmp, %asi
-#define SETUP_UTSB_ATOMIC_ASI(tmp1, tmp2) \
- mov ASI_NQUAD_LD, %asi
/*
* Macro to swtich to alternate global register on sun4u platforms
* (not applicable to sun4v platforms)
@@ -225,6 +240,8 @@ label/**/1:
bnz,pt %xcc, label/**/4; /* if ref bit set-skip ahead */ \
nop; \
GET_CPU_IMPL(tmp1); \
+ cmp tmp1, SPITFIRE_IMPL; \
+ blt %icc, label/**/2; /* skip flush if FJ-OPL cpus */ \
cmp tmp1, CHEETAH_IMPL; \
bl,a %icc, label/**/1; \
/* update reference bit */ \
@@ -272,6 +289,8 @@ label/**/4: \
bnz,pn %xcc, label/**/4; /* nothing to do */ \
nop; \
GET_CPU_IMPL(tmp1); \
+ cmp tmp1, SPITFIRE_IMPL; \
+ blt %icc, label/**/2; /* skip flush if FJ-OPL cpus */ \
cmp tmp1, CHEETAH_IMPL; \
bl,a %icc, label/**/1; \
/* update reference bit */ \
@@ -294,6 +313,8 @@ label/**/4: \
/* END CSTYLED */
+#ifndef UTSB_PHYS
+
/*
* Synthesize TSB base register contents for a process with
* a single TSB.
@@ -359,21 +380,6 @@ label/**/_tsbreg_vamask: ;\
or tsbreg, tmp3, tsbreg ;\
/* END CSTYLED */
-/*
- * Load TSB base register. In the single TSB case this register
- * contains utsb_vabase, bits 21:13 of tsbinfo->tsb_va, and the
- * TSB size code in bits 2:0. See hat_sfmmu.h for the layout in
- * the case where we have multiple TSBs per process.
- *
- * In:
- * tsbreg = value to load (ro)
- */
-#define LOAD_TSBREG(tsbreg, tmp1, tmp2) \
- mov MMU_TSB, tmp1; \
- sethi %hi(FLUSH_ADDR), tmp2; \
- stxa tsbreg, [tmp1]ASI_DMMU; /* dtsb reg */ \
- stxa tsbreg, [tmp1]ASI_IMMU; /* itsb reg */ \
- flush tmp2
/*
* Load the locked TSB TLB entry.
@@ -424,7 +430,7 @@ label/**/_resv_offset: ;\
sllx tmp1, (64 - MMU_PAGESHIFT4M), tmp1 ;\
srlx tmp1, (64 - MMU_PAGESHIFT4M), tmp1 ;\
or tmp1, resva, resva ;\
-9: /* END CSYLED */
+9: /* END CSTYLED */
/*
* Determine the pointer of the entry in the first TSB to probe given
@@ -438,7 +444,7 @@ label/**/_resv_offset: ;\
* Out: tsbe_ptr = TSB entry address
*
* Note: This function is patched at runtime for performance reasons.
- * Any changes here require sfmmu_patch_utsb fixed.
+ * Any changes here require sfmmu_patch_utsb fixed.
*/
#define GET_1ST_TSBE_PTR(tsbp8k, tsbe_ptr, tmp, label) \
@@ -454,54 +460,6 @@ label/**/_get_1st_tsbe_ptr: ;\
or tsbe_ptr, tmp, tsbe_ptr \
/* END CSTYLED */
-
-/*
- * Will probe the first TSB, and if it finds a match, will insert it
- * into the TLB and retry.
- *
- * tsbe_ptr = precomputed first TSB entry pointer (in, ro)
- * vpg_4m = 4M virtual page number for tag matching (in, ro)
- * label = where to branch to if this is a miss (text)
- * %asi = atomic ASI to use for the TSB access
- *
- * For trapstat, we have to explicily use these registers.
- * g4 = location tag will be retrieved into from TSB (out)
- * g5 = location data(tte) will be retrieved into from TSB (out)
- */
-#define PROBE_1ST_DTSB(tsbe_ptr, vpg_4m, label) /* g4/g5 clobbered */ \
- /* BEGIN CSTYLED */ \
- ldda [tsbe_ptr]%asi, %g4 /* g4 = tag, g5 = data */ ;\
- cmp %g4, vpg_4m /* compare tag w/ TSB */ ;\
- bne,pn %xcc, label/**/1 /* branch if !match */ ;\
- nop ;\
- TT_TRACE(trace_tsbhit) ;\
- DTLB_STUFF(%g5, %g1, %g2, %g3, %g4) ;\
- /* trapstat expects tte in %g5 */ ;\
- retry /* retry faulted instruction */ ;\
-label/**/1: \
- /* END CSTYLED */
-
-
-/*
- * Same as above, only if the TTE doesn't have the execute
- * bit set, will branch to exec_fault directly.
- */
-#define PROBE_1ST_ITSB(tsbe_ptr, vpg_4m, label) \
- /* BEGIN CSTYLED */ \
- ldda [tsbe_ptr]%asi, %g4 /* g4 = tag, g5 = data */ ;\
- cmp %g4, vpg_4m /* compare tag w/ TSB */ ;\
- bne,pn %xcc, label/**/1 /* branch if !match */ ;\
- nop ;\
- andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */ ;\
- bz,pn %icc, exec_fault ;\
- nop ;\
- TT_TRACE(trace_tsbhit) ;\
- ITLB_STUFF(%g5, %g1, %g2, %g3, %g4) ;\
- retry /* retry faulted instruction */ ;\
-label/**/1: \
- /* END CSTYLED */
-
-
/*
* Determine the base address of the second TSB given the 8K TSB
* pointer register contents.
@@ -565,6 +523,196 @@ label/**/_get_2nd_tsb_base: ;\
/* tmp1 = TSB size code */ \
GET_TSBE_POINTER(MMU_PAGESHIFT4M, tsbe_ptr, tagacc, tmp1, tmp2)
+#endif /* UTSB_PHYS */
+
+
+#ifdef UTSB_PHYS
+
+/*
+ * Synthesize a TSB base register contents for a process.
+ *
+ * In:
+ * tsbinfo = TSB info pointer (ro)
+ * tsbreg, tmp1 = scratch registers
+ * Out:
+ * tsbreg = value to program into TSB base register
+ */
+
+#define MAKE_UTSBREG_PHYS(tsbinfo, tsbreg, tmp1) \
+ ldx [tsbinfo + TSBINFO_PADDR], tsbreg; \
+ lduh [tsbinfo + TSBINFO_SZCODE], tmp1; \
+ and tmp1, TSB_SOFTSZ_MASK, tmp1; \
+ or tsbreg, tmp1, tsbreg; \
+
+/*
+ * Load TSB base register into a dedicated scratchpad register.
+ * This register contains utsb_pabase in bits 63:13, and TSB size
+ * code in bits 2:0.
+ *
+ * In:
+ * tsbreg = value to load (ro)
+ * regnum = constant or register
+ * tmp1 = scratch register
+ * Out:
+ * Specified scratchpad register updated
+ *
+ * Note: If this is enabled on Panther, a membar #Sync is required
+ * following an ASI store to the scratchpad registers.
+ */
+
+#define SET_UTSBREG(regnum, tsbreg, tmp1) \
+ mov regnum, tmp1; \
+ stxa tsbreg, [tmp1]ASI_SCRATCHPAD; /* save tsbreg */ \
+
+/*
+ * Get TSB base register from the scratchpad
+ *
+ * In:
+ * regnum = constant or register
+ * tsbreg = scratch
+ * Out:
+ * tsbreg = tsbreg from the specified scratchpad register
+ */
+
+#define GET_UTSBREG(regnum, tsbreg) \
+ mov regnum, tsbreg; \
+ ldxa [tsbreg]ASI_SCRATCHPAD, tsbreg
+
+/*
+ * Determine the pointer of the entry in the first TSB to probe given
+ * the 8K TSB pointer register contents.
+ *
+ * In:
+ * tagacc = tag access register
+ * tsbe_ptr = 8K TSB pointer register
+ * tmp = scratch registers
+ *
+ * Out: tsbe_ptr = TSB entry address
+ *
+ * Note: This macro is a nop since the 8K TSB pointer register
+ * is the entry pointer and does not need to be decoded.
+ * It is defined to allow for code sharing with sun4v.
+ */
+
+#define GET_1ST_TSBE_PTR(tagacc, tsbe_ptr, tmp1, tmp2)
+
+/*
+ * Get the location in the 2nd TSB of the tsbe for this fault.
+ * Assumes that the second TSB only contains 4M mappings.
+ *
+ * In:
+ * tagacc = tag access register (not clobbered)
+ * tsbe = 2nd TSB base register
+ * tmp1, tmp2 = scratch registers
+ * Out:
+ * tsbe = pointer to the tsbe in the 2nd TSB
+ */
+
+#define GET_2ND_TSBE_PTR(tagacc, tsbe, tmp1, tmp2) \
+ and tsbe, TSB_SOFTSZ_MASK, tmp2; /* tmp2=szc */ \
+ andn tsbe, TSB_SOFTSZ_MASK, tsbe; /* tsbbase */ \
+ mov TSB_ENTRIES(0), tmp1; /* nentries in TSB size 0 */ \
+ sllx tmp1, tmp2, tmp1; /* tmp1 = nentries in TSB */ \
+ sub tmp1, 1, tmp1; /* mask = nentries - 1 */ \
+ srlx tagacc, MMU_PAGESHIFT4M, tmp2; \
+ and tmp2, tmp1, tmp1; /* tsbent = virtpage & mask */ \
+ sllx tmp1, TSB_ENTRY_SHIFT, tmp1; /* entry num --> ptr */ \
+ add tsbe, tmp1, tsbe /* add entry offset to TSB base */
+
+/*
+ * Read the 2nd TSB base register. This is not done in GET_2ND_TSBE_PTR as
+ * an optimization since the TLB miss trap handler entries have potentially
+ * already loaded the 2nd TSB base reg when we invoke GET_2ND_TSBE_PTR.
+ *
+ * Out:
+ * tsbreg = contents of the 2nd TSB base register
+ */
+#define GET_2ND_TSBREG(tsbreg) \
+ GET_UTSBREG(SCRATCHPAD_UTSBREG, tsbreg);
+
+/*
+ * Load the 2nd TSB base into a dedicated scratchpad register which
+ * is used as a pseudo TSB base register.
+ *
+ * In:
+ * tsbreg = value to load (ro)
+ * regnum = constant or register
+ * tmp1 = scratch register
+ * Out:
+ * Specified scratchpad register updated
+ */
+#define LOAD_2ND_TSBREG(tsbreg, tmp1) \
+ SET_UTSBREG(SCRATCHPAD_UTSBREG, tsbreg, tmp1);
+
+#endif /* UTSB_PHYS */
+
+
+/*
+ * Load TSB base register. In the single TSB case this register
+ * contains utsb_vabase, bits 21:13 of tsbinfo->tsb_va, and the
+ * TSB size code in bits 2:0. See hat_sfmmu.h for the layout in
+ * the case where we have multiple TSBs per process.
+ *
+ * In:
+ * tsbreg = value to load (ro)
+ */
+#define LOAD_TSBREG(tsbreg, tmp1, tmp2) \
+ mov MMU_TSB, tmp1; \
+ sethi %hi(FLUSH_ADDR), tmp2; \
+ stxa tsbreg, [tmp1]ASI_DMMU; /* dtsb reg */ \
+ stxa tsbreg, [tmp1]ASI_IMMU; /* itsb reg */ \
+ flush tmp2
+
+#ifdef UTSB_PHYS
+#define UTSB_PROBE_ASI ASI_QUAD_LDD_PHYS
+#else
+#define UTSB_PROBE_ASI ASI_NQUAD_LD
+#endif
+
+/*
+ * Will probe the first TSB, and if it finds a match, will insert it
+ * into the TLB and retry.
+ *
+ * tsbe_ptr = precomputed first TSB entry pointer (in, ro)
+ * vpg_4m = 4M virtual page number for tag matching (in, ro)
+ * label = where to branch to if this is a miss (text)
+ * %asi = atomic ASI to use for the TSB access
+ *
+ * For trapstat, we have to explicily use these registers.
+ * g4 = location tag will be retrieved into from TSB (out)
+ * g5 = location data(tte) will be retrieved into from TSB (out)
+ */
+#define PROBE_1ST_DTSB(tsbe_ptr, vpg_4m, label) /* g4/g5 clobbered */ \
+ /* BEGIN CSTYLED */ \
+ ldda [tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */ ;\
+ cmp %g4, vpg_4m /* compare tag w/ TSB */ ;\
+ bne,pn %xcc, label/**/1 /* branch if !match */ ;\
+ nop ;\
+ TT_TRACE(trace_tsbhit) ;\
+ DTLB_STUFF(%g5, %g1, %g2, %g3, %g4) ;\
+ /* trapstat expects tte in %g5 */ ;\
+ retry /* retry faulted instruction */ ;\
+label/**/1: \
+ /* END CSTYLED */
+
+/*
+ * Same as above, only if the TTE doesn't have the execute
+ * bit set, will branch to exec_fault directly.
+ */
+#define PROBE_1ST_ITSB(tsbe_ptr, vpg_4m, label) \
+ /* BEGIN CSTYLED */ \
+ ldda [tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */ ;\
+ cmp %g4, vpg_4m /* compare tag w/ TSB */ ;\
+ bne,pn %xcc, label/**/1 /* branch if !match */ ;\
+ nop ;\
+ andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */ ;\
+ bz,pn %icc, exec_fault ;\
+ nop ;\
+ TT_TRACE(trace_tsbhit) ;\
+ ITLB_STUFF(%g5, %g1, %g2, %g3, %g4) ;\
+ retry /* retry faulted instruction */ ;\
+label/**/1: \
+ /* END CSTYLED */
/*
* vpg_4m = 4M virtual page number for tag matching (in)
@@ -577,7 +725,7 @@ label/**/_get_2nd_tsb_base: ;\
*/
#define PROBE_2ND_DTSB(tsbe_ptr, vpg_4m, label) \
/* BEGIN CSTYLED */ \
- ldda [tsbe_ptr]%asi, %g4 /* g4 = tag, g5 = data */ ;\
+ ldda [tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */ ;\
/* since we are looking at 2nd tsb, if it's valid, it must be 4M */ ;\
cmp %g4, vpg_4m ;\
bne,pn %xcc, label/**/1 ;\
@@ -599,7 +747,7 @@ label/**/1: \
*/
#define PROBE_2ND_ITSB(tsbe_ptr, vpg_4m, label) \
/* BEGIN CSTYLED */ \
- ldda [tsbe_ptr]%asi, %g4 /* g4 = tag, g5 = data */ ;\
+ ldda [tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */ ;\
cmp %g4, vpg_4m /* compare tag w/ TSB */ ;\
bne,pn %xcc, sfmmu_tsb_miss_tt /* branch if !match */ ;\
or %g0, TTE4M, %g6 ;\
@@ -621,7 +769,7 @@ label/**/1: ;\
*/
#define PROBE_2ND_ITSB(tsbe_ptr, vpg_4m, label) \
/* BEGIN CSTYLED */ \
- ldda [tsbe_ptr]%asi, %g4 /* g4 = tag, g5 = data */ ;\
+ ldda [tsbe_ptr]UTSB_PROBE_ASI, %g4 /* g4 = tag, g5 = data */ ;\
cmp %g4, vpg_4m /* compare tag w/ TSB */ ;\
bne,pn %xcc, sfmmu_tsb_miss_tt /* branch if !match */ ;\
or %g0, TTE4M, %g6 ;\
diff --git a/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s b/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s
index 18ba9d3441..0a68ce88f7 100644
--- a/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s
+++ b/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s
@@ -2,9 +2,8 @@
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
- * Common Development and Distribution License, Version 1.0 only
- * (the "License"). You may not use this file except in compliance
- * with the License.
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
@@ -20,7 +19,7 @@
* CDDL HEADER END
*/
/*
- * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
@@ -338,7 +337,11 @@ sfmmu_load_mmustate(sfmmu_t *sfmmup)
or %o0, %lo(sfmmu_panic5), %o0 ! give up.
4:
stxa %o0,[%o5]ASI_DMMU ! Setup tag access
+#ifdef OLYMPUS_SHARED_FTLB
+ stxa %g1,[%g0]ASI_DTLB_IN
+#else
stxa %g1,[%g3]ASI_DTLB_ACCESS ! Displace entry at idx
+#endif
membar #Sync
retl
wrpr %g0, %o3, %pstate ! enable interrupts
@@ -399,16 +402,24 @@ sfmmu_load_mmustate(sfmmu_t *sfmmup)
/*
* set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
* returns the detection value in %o0.
+ *
+ * Currently ASI_QUAD_LDD_PHYS is supported in processors as follows
+ * - cheetah+ and later (greater or equal to CHEETAH_PLUS_IMPL)
+ * - FJ OPL Olympus-C and later (less than SPITFIRE_IMPL)
+ *
*/
ENTRY_NP(sfmmu_setup_4lp)
GET_CPU_IMPL(%o0);
cmp %o0, CHEETAH_PLUS_IMPL
- blt,a,pt %icc, 4f
+ bge,pt %icc, 4f
+ mov 1, %o1
+ cmp %o0, SPITFIRE_IMPL
+ bge,a,pn %icc, 3f
clr %o1
+4:
set ktsb_phys, %o2
- mov 1, %o1
st %o1, [%o2]
-4: retl
+3: retl
mov %o1, %o0
SET_SIZE(sfmmu_setup_4lp)
@@ -446,7 +457,24 @@ sfmmu_load_mmustate(sfmmu_t *sfmmup)
*/
ldx [%o0 + SFMMU_TSB], %o1 ! %o1 = first tsbinfo
ldx [%o1 + TSBINFO_NEXTPTR], %g2 ! %g2 = second tsbinfo
- brz,pt %g2, 4f
+
+#ifdef UTSB_PHYS
+ /*
+ * UTSB_PHYS accesses user TSBs via physical addresses. The first
+ * TSB is in the MMU I/D TSB Base registers. The second TSB uses a
+ * designated ASI_SCRATCHPAD register as a pseudo TSB base register.
+ */
+ MAKE_UTSBREG_PHYS(%o1, %o2, %o3) ! %o2 = first utsbreg
+ LOAD_TSBREG(%o2, %o3, %o4) ! write TSB base register
+
+ brz,a,pt %g2, 2f
+ mov -1, %o2 ! use -1 if no second TSB
+
+ MAKE_UTSBREG_PHYS(%g2, %o2, %o3) ! %o2 = second utsbreg
+2:
+ LOAD_2ND_TSBREG(%o2, %o3) ! write 2nd pseudo TSB base register
+#else /* UTSB_PHYS */
+ brz,pt %g2, 4f
nop
/*
* We have a second TSB for this process, so we need to
@@ -485,6 +513,7 @@ sfmmu_load_mmustate(sfmmu_t *sfmmup)
sll %o2, DTACC_SHIFT, %o2 ! %o1 = first TSB TLB index
RESV_OFFSET(%o1, %g3, %o3, sfmmu_tsb_1st) ! or-in bits of TSB VA
LOAD_TSBTTE(%o1, %o2, %g3, %o4) ! load first TSB locked TTE
+#endif /* UTSB_PHYS */
6: ldx [%o0 + SFMMU_ISMBLKPA], %o1 ! copy members of sfmmu
CPU_TSBMISS_AREA(%o2, %o3) ! we need to access from
@@ -639,4 +668,3 @@ prefetch_tsbe_write(struct tsbe *tsbep)
#ifndef lint
#endif /* lint */
-
diff --git a/usr/src/uts/sun4v/Makefile.workarounds b/usr/src/uts/sun4v/Makefile.workarounds
index 0945c5cfcb..40a76c9693 100644
--- a/usr/src/uts/sun4v/Makefile.workarounds
+++ b/usr/src/uts/sun4v/Makefile.workarounds
@@ -33,3 +33,4 @@
WORKAROUND_DEFS =
WORKAROUND_DEFS += -DQCN_POLLING # XXXQ
+WORKAROUND_DEFS += -DDO_CORELEVEL_LOADBAL
diff --git a/usr/src/uts/sun4v/io/px/px_lib4v.c b/usr/src/uts/sun4v/io/px/px_lib4v.c
index 188a5d8d9d..d0477106d0 100644
--- a/usr/src/uts/sun4v/io/px/px_lib4v.c
+++ b/usr/src/uts/sun4v/io/px/px_lib4v.c
@@ -419,6 +419,17 @@ px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p,
return (DDI_SUCCESS);
}
+/*
+ * fetch chip's range propery's value. For sun4v, config space base
+ * is not used (pxtool_get_phys_addr) will return zero, so just return
+ * zero for px_get_range_prop().
+ */
+/*ARGSUSED*/
+uint64_t
+px_get_range_prop(px_t *px_p, px_ranges_t *rp, int bank)
+{
+ return (0);
+}
/*
* Checks dma attributes against system bypass ranges
@@ -427,7 +438,8 @@ px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p,
*/
/*ARGSUSED*/
int
-px_lib_dma_bypass_rngchk(ddi_dma_attr_t *attr_p, uint64_t *lo_p, uint64_t *hi_p)
+px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p,
+ uint64_t *lo_p, uint64_t *hi_p)
{
if ((attr_p->dma_attr_addr_lo != 0ull) ||
(attr_p->dma_attr_addr_hi != UINT64_MAX)) {