summaryrefslogtreecommitdiff
path: root/usr/src/uts/sun4
diff options
context:
space:
mode:
authorMike Zeller <mike@mikezeller.net>2020-04-30 12:05:53 -0400
committerMike Zeller <mike@mikezeller.net>2020-04-30 12:05:53 -0400
commitac392c47376cdb46e02be40735cc74d5d100fe6c (patch)
tree450ea4b694961d00606485aa323234bb2ad69851 /usr/src/uts/sun4
parent95a46e0bd62ba0e68db9fa0b958dc5313920e6fe (diff)
parent0f8413a98e7949bf0a6a2c24153f2928c7eb5cfb (diff)
downloadillumos-joyent-OS-8165.tar.gz
Merge branch 'master' into OS-8165OS-8165
Diffstat (limited to 'usr/src/uts/sun4')
-rw-r--r--usr/src/uts/sun4/io/ivintr.c2
-rw-r--r--usr/src/uts/sun4/io/pcicfg.c24
-rw-r--r--usr/src/uts/sun4/os/machdep.c9
-rw-r--r--usr/src/uts/sun4/os/mlsetup.c2
-rw-r--r--usr/src/uts/sun4/os/mp_states.c4
-rw-r--r--usr/src/uts/sun4/os/x_call.c19
6 files changed, 28 insertions, 32 deletions
diff --git a/usr/src/uts/sun4/io/ivintr.c b/usr/src/uts/sun4/io/ivintr.c
index 1a6cd93eaf..0a80e9da55 100644
--- a/usr/src/uts/sun4/io/ivintr.c
+++ b/usr/src/uts/sun4/io/ivintr.c
@@ -268,7 +268,7 @@ add_softintr(uint_t pil, softintrfunc intr_handler, caddr_t intr_arg1,
intr_vec_t *iv_p;
if (pil > PIL_MAX)
- return (NULL);
+ return ((uint64_t)NULL);
iv_p = iv_alloc(type);
diff --git a/usr/src/uts/sun4/io/pcicfg.c b/usr/src/uts/sun4/io/pcicfg.c
index b1bb75ab1c..e2f2e42d59 100644
--- a/usr/src/uts/sun4/io/pcicfg.c
+++ b/usr/src/uts/sun4/io/pcicfg.c
@@ -577,24 +577,24 @@ pcicfg_get_nslots(dev_info_t *dip, ddi_acc_handle_t handle)
&cap_ptr)) == DDI_SUCCESS) {
uint32_t config;
- PCI_CAP_PUT8(handle, NULL, cap_ptr, PCI_HP_DWORD_SELECT_OFF,
+ PCI_CAP_PUT8(handle, 0, cap_ptr, PCI_HP_DWORD_SELECT_OFF,
PCI_HP_SLOT_CONFIGURATION_REG);
- config = PCI_CAP_GET32(handle, NULL, cap_ptr,
+ config = PCI_CAP_GET32(handle, 0, cap_ptr,
PCI_HP_DWORD_DATA_OFF);
num_slots = config & 0x1F;
} else if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_SLOT_ID, &cap_ptr))
== DDI_SUCCESS) {
- uint8_t esr_reg = PCI_CAP_GET8(handle, NULL,
+ uint8_t esr_reg = PCI_CAP_GET8(handle, 0,
cap_ptr, PCI_CAP_ID_REGS_OFF);
num_slots = PCI_CAPSLOT_NSLOTS(esr_reg);
} else if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr))
== DDI_SUCCESS) {
- int port_type = PCI_CAP_GET16(handle, NULL, cap_ptr,
+ int port_type = PCI_CAP_GET16(handle, 0, cap_ptr,
PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
if ((port_type == PCIE_PCIECAP_DEV_TYPE_DOWN) &&
- (PCI_CAP_GET16(handle, NULL, cap_ptr, PCIE_PCIECAP)
+ (PCI_CAP_GET16(handle, 0, cap_ptr, PCIE_PCIECAP)
& PCIE_PCIECAP_SLOT_IMPL))
num_slots = 1;
}
@@ -614,7 +614,7 @@ pcicfg_is_chassis(dev_info_t *dip, ddi_acc_handle_t handle)
if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_SLOT_ID, &cap_ptr)) !=
DDI_FAILURE) {
- uint8_t esr_reg = PCI_CAP_GET8(handle, NULL, cap_ptr, 2);
+ uint8_t esr_reg = PCI_CAP_GET8(handle, 0, cap_ptr, 2);
if (PCI_CAPSLOT_FIC(esr_reg))
return (B_TRUE);
}
@@ -665,7 +665,7 @@ pcicfg_pcie_port_type(dev_info_t *dip, ddi_acc_handle_t handle)
if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr)) !=
DDI_FAILURE)
- port_type = PCI_CAP_GET16(handle, NULL,
+ port_type = PCI_CAP_GET16(handle, 0,
cap_ptr, PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
return (port_type);
@@ -3535,11 +3535,11 @@ pcicfg_set_standard_props(dev_info_t *dip, ddi_acc_handle_t config_handle,
ret = PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr);
if (pcie_dev && (ret == DDI_SUCCESS)) {
- val = PCI_CAP_GET16(config_handle, NULL, cap_ptr,
+ val = PCI_CAP_GET16(config_handle, 0, cap_ptr,
PCIE_PCIECAP) & PCIE_PCIECAP_SLOT_IMPL;
/* if slot implemented, get physical slot number */
if (val) {
- wordval = (PCI_CAP_GET32(config_handle, NULL,
+ wordval = (PCI_CAP_GET32(config_handle, 0,
cap_ptr, PCIE_SLOTCAP) >>
PCIE_SLOTCAP_PHY_SLOT_NUM_SHIFT) &
PCIE_SLOTCAP_PHY_SLOT_NUM_MASK;
@@ -3977,13 +3977,13 @@ pcicfg_disable_bridge_probe_err(dev_info_t *dip, ddi_acc_handle_t h,
return;
regs->pcie_cap_off = cap_ptr;
- regs->devctl = devctl = PCI_CAP_GET16(h, NULL, cap_ptr,
+ regs->devctl = devctl = PCI_CAP_GET16(h, 0, cap_ptr,
PCIE_DEVCTL);
devctl &= ~(PCIE_DEVCTL_UR_REPORTING_EN |
PCIE_DEVCTL_CE_REPORTING_EN |
PCIE_DEVCTL_NFE_REPORTING_EN |
PCIE_DEVCTL_FE_REPORTING_EN);
- PCI_CAP_PUT16(h, NULL, cap_ptr, PCIE_DEVCTL, devctl);
+ PCI_CAP_PUT16(h, 0, cap_ptr, PCIE_DEVCTL, devctl);
}
}
@@ -4635,7 +4635,7 @@ pcicfg_fcode_probe(dev_info_t *parent, uint_t bus, uint_t device,
* the status property if it exists.
*/
if (ddi_prop_lookup_string(DDI_DEV_T_ANY,
- new_child, NULL, "status", &status_prop) ==
+ new_child, 0, "status", &status_prop) ==
DDI_PROP_SUCCESS) {
if ((strncmp("disabled", status_prop, 8) ==
0) || (strncmp("fail", status_prop, 4) ==
diff --git a/usr/src/uts/sun4/os/machdep.c b/usr/src/uts/sun4/os/machdep.c
index 970d5a4125..ca06f151c9 100644
--- a/usr/src/uts/sun4/os/machdep.c
+++ b/usr/src/uts/sun4/os/machdep.c
@@ -452,7 +452,7 @@ cpu_create_intrstat(cpu_t *cp)
zoneid = ALL_ZONES;
intr_ksp = kstat_create_zone("cpu", cp->cpu_id, "intrstat", "misc",
- KSTAT_TYPE_NAMED, PIL_MAX * 2, NULL, zoneid);
+ KSTAT_TYPE_NAMED, PIL_MAX * 2, 0, zoneid);
/*
* Initialize each PIL's named kstat
@@ -901,11 +901,6 @@ lbolt_softint_post(void)
}
void
-thread_splitstack_run(caddr_t addr, void (*func)(void *), void *)
+do_hotinlines(struct module *mp __unused)
{
- panic("thread_splitstack() not supported on SPARC");
}
-
-void
-thread_splitstack_cleanup(void)
-{}
diff --git a/usr/src/uts/sun4/os/mlsetup.c b/usr/src/uts/sun4/os/mlsetup.c
index b9ade98a26..a4c4d4059a 100644
--- a/usr/src/uts/sun4/os/mlsetup.c
+++ b/usr/src/uts/sun4/os/mlsetup.c
@@ -408,7 +408,7 @@ kobj_start(void *cif)
prom_panic("no ELF image");
ehdr = (Ehdr *)(uintptr_t)eadr;
for (i = 0; i < BA_NUM; i++)
- bootaux[i].ba_val = NULL;
+ bootaux[i].ba_val = 0;
bootaux[BA_PHNUM].ba_val = ehdr->e_phnum;
bootaux[BA_PHENT].ba_val = ehdr->e_phentsize;
bootaux[BA_LDNAME].ba_ptr = NULL;
diff --git a/usr/src/uts/sun4/os/mp_states.c b/usr/src/uts/sun4/os/mp_states.c
index d5f55006b4..d615011f9a 100644
--- a/usr/src/uts/sun4/os/mp_states.c
+++ b/usr/src/uts/sun4/os/mp_states.c
@@ -102,7 +102,7 @@ idle_other_cpus(void)
return;
xt_some(cpu_idle_set, (xcfunc_t *)idle_stop_xcall,
- (uint64_t)cpu_idle_self, NULL);
+ (uint64_t)cpu_idle_self, 0);
for (i = 0; i < NCPU; i++) {
if (!CPU_IN_SET(cpu_idle_set, i))
@@ -215,7 +215,7 @@ mp_cpu_quiesce(cpu_t *cp0)
volatile cpu_t *cp = (volatile cpu_t *) cp0;
int i, sanity_limit = cpu_quiesce_microsecond_sanity_limit;
int cpuid = cp->cpu_id;
- int found_intr = 1;
+ int found_intr = 1;
static fn_t f = "mp_cpu_quiesce";
ASSERT(CPU->cpu_id != cpuid);
diff --git a/usr/src/uts/sun4/os/x_call.c b/usr/src/uts/sun4/os/x_call.c
index 521f740c82..4583ca32cb 100644
--- a/usr/src/uts/sun4/os/x_call.c
+++ b/usr/src/uts/sun4/os/x_call.c
@@ -90,7 +90,8 @@ void send_mondo_set(cpuset_t set);
* values.
*/
static int
-xc_func_timeout_adj(cpu_setup_t what, int cpuid) {
+xc_func_timeout_adj(cpu_setup_t what, int cpuid)
+{
uint64_t freq = cpunodes[cpuid].clock_freq;
switch (what) {
@@ -756,7 +757,7 @@ xc_attention(cpuset_t cpuset)
CPUSET_DEL(xc_cpuset, lcx);
XC_STAT_INC(x_dstat[lcx][XC_ATTENTION]);
- XC_TRACE(XC_ATTENTION, &xc_cpuset, NULL, NULL, NULL);
+ XC_TRACE(XC_ATTENTION, &xc_cpuset, NULL, 0, 0);
if (CPUSET_ISNULL(xc_cpuset))
return;
@@ -843,7 +844,7 @@ xc_dismissed(cpuset_t cpuset)
* exclude itself
*/
CPUSET_DEL(xc_cpuset, lcx);
- XC_TRACE(XC_DISMISSED, &xc_cpuset, NULL, NULL, NULL);
+ XC_TRACE(XC_DISMISSED, &xc_cpuset, NULL, 0, 0);
if (CPUSET_ISNULL(xc_cpuset)) {
xc_holder = -1;
mutex_exit(&xc_sys_mutex);
@@ -963,9 +964,9 @@ xc_loop(void)
*
* The owner of xc_sys_mutex (or xc_holder) can expect
* its xc/xt requests are handled as follows:
- * xc requests use xc_mbox's handshaking for their services
- * xt requests at TL>0 will be handled immediately
- * xt requests at TL=0:
+ * xc requests use xc_mbox's handshaking for their services
+ * xt requests at TL>0 will be handled immediately
+ * xt requests at TL=0:
* if their handlers'pils are <= XCALL_PIL, then
* they will be handled after xc_loop exits
* (so, they probably should not be used)
@@ -976,7 +977,7 @@ xc_loop(void)
* the requests will be handled as follows:
* xc requests will be handled after they grab xc_sys_mutex
* xt requests at TL>0 will be handled immediately
- * xt requests at TL=0:
+ * xt requests at TL=0:
* if their handlers'pils are <= XCALL_PIL, then
* they will be handled after xc_loop exits
* else they will be handled immediately
@@ -988,7 +989,7 @@ xc_loop(void)
CPUSET_ADD(tset, lcx);
membar_stld();
XC_STAT_INC(x_rstat[lcx][XC_LOOP]);
- XC_TRACE(XC_LOOP_ENTER, &tset, NULL, NULL, NULL);
+ XC_TRACE(XC_LOOP_ENTER, &tset, NULL, 0, 0);
while (xmp->xc_state != XC_EXIT) {
if (xmp->xc_state == XC_DOIT) {
func = xmp->xc_func;
@@ -1026,7 +1027,7 @@ xc_loop(void)
}
ASSERT(xmp->xc_state == XC_EXIT);
ASSERT(xc_holder != -1);
- XC_TRACE(XC_LOOP_EXIT, &tset, NULL, NULL, NULL);
+ XC_TRACE(XC_LOOP_EXIT, &tset, NULL, 0, 0);
xmp->xc_state = XC_IDLE;
membar_stld();
return (1);