diff options
author | Patrick Mooney <pmooney@pfmooney.com> | 2019-05-08 17:34:13 +0000 |
---|---|---|
committer | Gordon Ross <gwr@nexenta.com> | 2019-05-10 17:51:12 -0400 |
commit | 89574a1f89d2af2d1755c4e854b150d6113e0564 (patch) | |
tree | e160bfc4ca014c31bf98699403a0bbfcb2572f12 | |
parent | 1974da4becc1ffc39a0e1f1580681b049134dd9f (diff) | |
download | illumos-joyent-89574a1f89d2af2d1755c4e854b150d6113e0564.tar.gz |
10932 want _MACHDEP-free cpuset_t access
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Toomas Soome <tsoome@me.com>
Reviewed by: Peter Tribble <peter.tribble@gmail.com>
Reviewed by: Andy Fiddaman <andy@omniosce.org>
Approved by: Gordon Ross <gwr@nexenta.com>
-rw-r--r-- | usr/src/psm/stand/cpr/sparcv9/sun4u/machdep.c | 13 | ||||
-rw-r--r-- | usr/src/uts/common/os/cpu.c | 156 | ||||
-rw-r--r-- | usr/src/uts/common/sys/cpuvar.h | 181 |
3 files changed, 205 insertions, 145 deletions
diff --git a/usr/src/psm/stand/cpr/sparcv9/sun4u/machdep.c b/usr/src/psm/stand/cpr/sparcv9/sun4u/machdep.c index 95e4b4e143..640dbc4902 100644 --- a/usr/src/psm/stand/cpr/sparcv9/sun4u/machdep.c +++ b/usr/src/psm/stand/cpr/sparcv9/sun4u/machdep.c @@ -22,16 +22,15 @@ /* * Copyright 2005 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. + * Copyright 2016 Joyent, Inc. */ -#pragma ident "%Z%%M% %I% %E% SMI" - #include <sys/types.h> #include <sys/cpr.h> #include <sys/promimpl.h> #include <sys/privregs.h> #include <sys/stack.h> -#include <sys/cpuvar.h> +#include <sys/bitmap.h> #include "cprboot.h" @@ -54,7 +53,7 @@ uint_t cpu_delay; */ typedef void (*tlb_func_t)(int, caddr_t, tte_t *); static uint_t mdlen; -static cpuset_t slave_set; +static ulong_t slave_set[BT_BITOUL(NCPU)]; static int has_scbc; @@ -234,7 +233,7 @@ slave_init(int cpu_id) { restore_tlb(mdinfo.dtte, cpu_id); restore_tlb(mdinfo.itte, cpu_id); - CPUSET_ADD(slave_set, cpu_id); + BT_SET(slave_set, cpu_id); membar_stld(); if (has_scbc) { /* just spin, master will park this cpu */ @@ -282,7 +281,7 @@ cb_mpsetup(void) * and wait about a second for them to checkin with slave_set */ ncpu = 0; - CPUSET_ZERO(slave_set); + bzero(slave_set, sizeof (slave_set)); for (scip = mdinfo.sci, tail = scip + NCPU; scip < tail; scip++) { if (scip->node == 0 || scip->cpu_id == cb_mid) continue; @@ -290,7 +289,7 @@ cb_mpsetup(void) (caddr_t)cpu_launch, scip->cpu_id); for (timeout = TIMEOUT_MSECS; timeout; timeout--) { - if (CPU_IN_SET(slave_set, scip->cpu_id)) + if (BT_TEST(slave_set, scip->cpu_id)) break; cb_usec_wait(MILLISEC); } diff --git a/usr/src/uts/common/os/cpu.c b/usr/src/uts/common/os/cpu.c index 87c0896814..8612ff62b1 100644 --- a/usr/src/uts/common/os/cpu.c +++ b/usr/src/uts/common/os/cpu.c @@ -21,6 +21,7 @@ /* * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012 by Delphix. All rights reserved. + * Copyright 2019 Joyent, Inc. */ /* @@ -108,6 +109,7 @@ kmutex_t cpu_lock; cpu_t *cpu_list; /* list of all CPUs */ cpu_t *clock_cpu_list; /* used by clock to walk CPUs */ cpu_t *cpu_active; /* list of active CPUs */ +cpuset_t cpu_active_set; /* cached set of active CPUs */ static cpuset_t cpu_available; /* set of available CPUs */ cpuset_t cpu_seqid_inuse; /* which cpu_seqids are in use */ @@ -1194,7 +1196,7 @@ cpu_online(cpu_t *cp) * Handle on-line request. * This code must put the new CPU on the active list before * starting it because it will not be paused, and will start - * using the active list immediately. The real start occurs + * using the active list immediately. The real start occurs * when the CPU_QUIESCED flag is turned off. */ @@ -1724,6 +1726,7 @@ cpu_list_init(cpu_t *cp) cp->cpu_part = &cp_default; CPUSET_ADD(cpu_available, cp->cpu_id); + CPUSET_ADD(cpu_active_set, cp->cpu_id); } /* @@ -1895,6 +1898,7 @@ cpu_add_active_internal(cpu_t *cp) cp->cpu_prev_onln = cpu_active->cpu_prev_onln; cpu_active->cpu_prev_onln->cpu_next_onln = cp; cpu_active->cpu_prev_onln = cp; + CPUSET_ADD(cpu_active_set, cp->cpu_id); if (pp->cp_cpulist) { cp->cpu_next_part = pp->cp_cpulist; @@ -1965,6 +1969,7 @@ cpu_remove_active(cpu_t *cp) } cp->cpu_next_onln = cp; cp->cpu_prev_onln = cp; + CPUSET_DEL(cpu_active_set, cp->cpu_id); cp->cpu_prev_part->cpu_next_part = cp->cpu_next_part; cp->cpu_next_part->cpu_prev_part = cp->cpu_prev_part; @@ -2704,13 +2709,18 @@ cpu_bind_thread(kthread_id_t tp, processorid_t bind, processorid_t *obind, return (0); } -#if CPUSET_WORDS > 1 -/* - * Functions for implementing cpuset operations when a cpuset is more - * than one word. On platforms where a cpuset is a single word these - * are implemented as macros in cpuvar.h. - */ +cpuset_t * +cpuset_alloc(int kmflags) +{ + return (kmem_alloc(sizeof (cpuset_t), kmflags)); +} + +void +cpuset_free(cpuset_t *s) +{ + kmem_free(s, sizeof (cpuset_t)); +} void cpuset_all(cpuset_t *s) @@ -2722,43 +2732,66 @@ cpuset_all(cpuset_t *s) } void -cpuset_all_but(cpuset_t *s, uint_t cpu) +cpuset_all_but(cpuset_t *s, const uint_t cpu) { cpuset_all(s); CPUSET_DEL(*s, cpu); } void -cpuset_only(cpuset_t *s, uint_t cpu) +cpuset_only(cpuset_t *s, const uint_t cpu) { CPUSET_ZERO(*s); CPUSET_ADD(*s, cpu); } +long +cpu_in_set(const cpuset_t *s, const uint_t cpu) +{ + VERIFY(cpu < NCPU); + return (BT_TEST(s->cpub, cpu)); +} + +void +cpuset_add(cpuset_t *s, const uint_t cpu) +{ + VERIFY(cpu < NCPU); + BT_SET(s->cpub, cpu); +} + +void +cpuset_del(cpuset_t *s, const uint_t cpu) +{ + VERIFY(cpu < NCPU); + BT_CLEAR(s->cpub, cpu); +} + int -cpuset_isnull(cpuset_t *s) +cpuset_isnull(const cpuset_t *s) { int i; - for (i = 0; i < CPUSET_WORDS; i++) + for (i = 0; i < CPUSET_WORDS; i++) { if (s->cpub[i] != 0) return (0); + } return (1); } int -cpuset_cmp(cpuset_t *s1, cpuset_t *s2) +cpuset_isequal(const cpuset_t *s1, const cpuset_t *s2) { int i; - for (i = 0; i < CPUSET_WORDS; i++) + for (i = 0; i < CPUSET_WORDS; i++) { if (s1->cpub[i] != s2->cpub[i]) return (0); + } return (1); } uint_t -cpuset_find(cpuset_t *s) +cpuset_find(const cpuset_t *s) { uint_t i; @@ -2778,7 +2811,7 @@ cpuset_find(cpuset_t *s) } void -cpuset_bounds(cpuset_t *s, uint_t *smallestid, uint_t *largestid) +cpuset_bounds(const cpuset_t *s, uint_t *smallestid, uint_t *largestid) { int i, j; uint_t bit; @@ -2822,7 +2855,72 @@ cpuset_bounds(cpuset_t *s, uint_t *smallestid, uint_t *largestid) *smallestid = *largestid = CPUSET_NOTINSET; } -#endif /* CPUSET_WORDS */ +void +cpuset_atomic_del(cpuset_t *s, const uint_t cpu) +{ + VERIFY(cpu < NCPU); + BT_ATOMIC_CLEAR(s->cpub, (cpu)) +} + +void +cpuset_atomic_add(cpuset_t *s, const uint_t cpu) +{ + VERIFY(cpu < NCPU); + BT_ATOMIC_SET(s->cpub, (cpu)) +} + +long +cpuset_atomic_xadd(cpuset_t *s, const uint_t cpu) +{ + long res; + + VERIFY(cpu < NCPU); + BT_ATOMIC_SET_EXCL(s->cpub, cpu, res); + return (res); +} + +long +cpuset_atomic_xdel(cpuset_t *s, const uint_t cpu) +{ + long res; + + VERIFY(cpu < NCPU); + BT_ATOMIC_CLEAR_EXCL(s->cpub, cpu, res); + return (res); +} + +void +cpuset_or(cpuset_t *dst, cpuset_t *src) +{ + for (int i = 0; i < CPUSET_WORDS; i++) { + dst->cpub[i] |= src->cpub[i]; + } +} + +void +cpuset_xor(cpuset_t *dst, cpuset_t *src) +{ + for (int i = 0; i < CPUSET_WORDS; i++) { + dst->cpub[i] ^= src->cpub[i]; + } +} + +void +cpuset_and(cpuset_t *dst, cpuset_t *src) +{ + for (int i = 0; i < CPUSET_WORDS; i++) { + dst->cpub[i] &= src->cpub[i]; + } +} + +void +cpuset_zero(cpuset_t *dst) +{ + for (int i = 0; i < CPUSET_WORDS; i++) { + dst->cpub[i] = 0; + } +} + /* * Unbind threads bound to specified CPU. @@ -3112,9 +3210,9 @@ cpu_get_state_str(cpu_t *cpu) static void cpu_stats_kstat_create(cpu_t *cp) { - int instance = cp->cpu_id; - char *module = "cpu"; - char *class = "misc"; + int instance = cp->cpu_id; + char *module = "cpu"; + char *class = "misc"; kstat_t *ksp; zoneid_t zoneid; @@ -3350,18 +3448,18 @@ cpu_stat_ks_update(kstat_t *ksp, int rw) cso->cpu_sysinfo.cpu[CPU_USER] = msnsecs[CMS_USER]; if (cso->cpu_sysinfo.cpu[CPU_KERNEL] < msnsecs[CMS_SYSTEM]) cso->cpu_sysinfo.cpu[CPU_KERNEL] = msnsecs[CMS_SYSTEM]; - cso->cpu_sysinfo.cpu[CPU_WAIT] = 0; - cso->cpu_sysinfo.wait[W_IO] = 0; + cso->cpu_sysinfo.cpu[CPU_WAIT] = 0; + cso->cpu_sysinfo.wait[W_IO] = 0; cso->cpu_sysinfo.wait[W_SWAP] = 0; cso->cpu_sysinfo.wait[W_PIO] = 0; - cso->cpu_sysinfo.bread = CPU_STATS(cp, sys.bread); - cso->cpu_sysinfo.bwrite = CPU_STATS(cp, sys.bwrite); - cso->cpu_sysinfo.lread = CPU_STATS(cp, sys.lread); - cso->cpu_sysinfo.lwrite = CPU_STATS(cp, sys.lwrite); - cso->cpu_sysinfo.phread = CPU_STATS(cp, sys.phread); - cso->cpu_sysinfo.phwrite = CPU_STATS(cp, sys.phwrite); - cso->cpu_sysinfo.pswitch = CPU_STATS(cp, sys.pswitch); - cso->cpu_sysinfo.trap = CPU_STATS(cp, sys.trap); + cso->cpu_sysinfo.bread = CPU_STATS(cp, sys.bread); + cso->cpu_sysinfo.bwrite = CPU_STATS(cp, sys.bwrite); + cso->cpu_sysinfo.lread = CPU_STATS(cp, sys.lread); + cso->cpu_sysinfo.lwrite = CPU_STATS(cp, sys.lwrite); + cso->cpu_sysinfo.phread = CPU_STATS(cp, sys.phread); + cso->cpu_sysinfo.phwrite = CPU_STATS(cp, sys.phwrite); + cso->cpu_sysinfo.pswitch = CPU_STATS(cp, sys.pswitch); + cso->cpu_sysinfo.trap = CPU_STATS(cp, sys.trap); cso->cpu_sysinfo.intr = 0; for (i = 0; i < PIL_MAX; i++) cso->cpu_sysinfo.intr += CPU_STATS(cp, sys.intr[i]); diff --git a/usr/src/uts/common/sys/cpuvar.h b/usr/src/uts/common/sys/cpuvar.h index 8565ca053e..8adc73bdcd 100644 --- a/usr/src/uts/common/sys/cpuvar.h +++ b/usr/src/uts/common/sys/cpuvar.h @@ -24,6 +24,7 @@ * Copyright (c) 2012 by Delphix. All rights reserved. * Copyright 2014 Igor Kozhukhov <ikozhukhov@gmail.com>. * Copyright 2017 RackTop Systems. + * Copyright 2019 Joyent, Inc. */ #ifndef _SYS_CPUVAR_H @@ -98,11 +99,11 @@ typedef struct cpu { /* * Links to other CPUs. It is safe to walk these lists if * one of the following is true: - * - cpu_lock held - * - preemption disabled via kpreempt_disable - * - PIL >= DISP_LEVEL - * - acting thread is an interrupt thread - * - all other CPUs are paused + * - cpu_lock held + * - preemption disabled via kpreempt_disable + * - PIL >= DISP_LEVEL + * - acting thread is an interrupt thread + * - all other CPUs are paused */ struct cpu *cpu_next; /* next existing CPU */ struct cpu *cpu_prev; /* prev existing CPU */ @@ -130,7 +131,7 @@ typedef struct cpu { */ char cpu_runrun; /* scheduling flag - set to preempt */ char cpu_kprunrun; /* force kernel preemption */ - pri_t cpu_chosen_level; /* priority at which cpu */ + pri_t cpu_chosen_level; /* priority at which cpu */ /* was chosen for scheduling */ kthread_t *cpu_dispthread; /* thread selected for dispatch */ disp_lock_t cpu_thread_lock; /* dispatcher lock on current thread */ @@ -286,7 +287,7 @@ extern cpu_core_t cpu_core[]; * list in avintr.c. */ #define INTR_ACTIVE(cpup, level) \ - ((level) <= LOCK_LEVEL ? \ + ((level) <= LOCK_LEVEL ? \ ((cpup)->cpu_intr_actv & (1 << (level))) : (CPU_ON_INTR(cpup))) /* @@ -388,9 +389,6 @@ extern cpu_core_t cpu_core[]; #define CPU_DISP_DONTSTEAL 0x01 /* CPU undergoing context swtch */ #define CPU_DISP_HALTED 0x02 /* CPU halted waiting for interrupt */ -/* Note: inside ifdef: _KERNEL || _KMEMUSER || _BOOT */ -#if defined(_MACHDEP) - /* * Macros for manipulating sets of CPUs as a bitmap. Note that this * bitmap may vary in size depending on the maximum CPU id a specific @@ -405,34 +403,60 @@ extern cpu_core_t cpu_core[]; #define CPUSET_WORDS BT_BITOUL(NCPU) #define CPUSET_NOTINSET ((uint_t)-1) -#if CPUSET_WORDS > 1 - -typedef struct cpuset { +#if defined(_MACHDEP) +struct cpuset { ulong_t cpub[CPUSET_WORDS]; -} cpuset_t; +}; +#else +struct cpuset; +#endif + +typedef struct cpuset cpuset_t; + +extern cpuset_t *cpuset_alloc(int); +extern void cpuset_free(cpuset_t *); /* - * Private functions for manipulating cpusets that do not fit in a - * single word. These should not be used directly; instead the - * CPUSET_* macros should be used so the code will be portable - * across different definitions of NCPU. + * Functions for manipulating cpusets. These were previously considered + * private when some cpuset_t handling was performed in the CPUSET_* macros. + * They are now acceptable to use in non-_MACHDEP code. + */ +extern void cpuset_all(cpuset_t *); +extern void cpuset_all_but(cpuset_t *, const uint_t); +extern int cpuset_isnull(const cpuset_t *); +extern int cpuset_isequal(const cpuset_t *, const cpuset_t *); +extern void cpuset_only(cpuset_t *, const uint_t); +extern long cpu_in_set(const cpuset_t *, const uint_t); +extern void cpuset_add(cpuset_t *, const uint_t); +extern void cpuset_del(cpuset_t *, const uint_t); +extern uint_t cpuset_find(const cpuset_t *); +extern void cpuset_bounds(const cpuset_t *, uint_t *, uint_t *); +extern void cpuset_atomic_del(cpuset_t *, const uint_t); +extern void cpuset_atomic_add(cpuset_t *, const uint_t); +extern long cpuset_atomic_xadd(cpuset_t *, const uint_t); +extern long cpuset_atomic_xdel(cpuset_t *, const uint_t); +extern void cpuset_or(cpuset_t *, cpuset_t *); +extern void cpuset_xor(cpuset_t *, cpuset_t *); +extern void cpuset_and(cpuset_t *, cpuset_t *); +extern void cpuset_zero(cpuset_t *); + + +#if defined(_MACHDEP) + +/* + * Prior to the cpuset_t restructuring, the CPUSET_* macros contained + * significant logic, rather than directly invoking the backend functions. + * They are maintained here so that existing _MACHDEP code can use them. */ -extern void cpuset_all(cpuset_t *); -extern void cpuset_all_but(cpuset_t *, uint_t); -extern int cpuset_isnull(cpuset_t *); -extern int cpuset_cmp(cpuset_t *, cpuset_t *); -extern void cpuset_only(cpuset_t *, uint_t); -extern uint_t cpuset_find(cpuset_t *); -extern void cpuset_bounds(cpuset_t *, uint_t *, uint_t *); #define CPUSET_ALL(set) cpuset_all(&(set)) #define CPUSET_ALL_BUT(set, cpu) cpuset_all_but(&(set), cpu) #define CPUSET_ONLY(set, cpu) cpuset_only(&(set), cpu) -#define CPU_IN_SET(set, cpu) BT_TEST((set).cpub, cpu) -#define CPUSET_ADD(set, cpu) BT_SET((set).cpub, cpu) -#define CPUSET_DEL(set, cpu) BT_CLEAR((set).cpub, cpu) +#define CPU_IN_SET(set, cpu) cpu_in_set(&(set), cpu) +#define CPUSET_ADD(set, cpu) cpuset_add(&(set), cpu) +#define CPUSET_DEL(set, cpu) cpuset_del(&(set), cpu) #define CPUSET_ISNULL(set) cpuset_isnull(&(set)) -#define CPUSET_ISEQUAL(set1, set2) cpuset_cmp(&(set1), &(set2)) +#define CPUSET_ISEQUAL(set1, set2) cpuset_isequal(&(set1), &(set2)) /* * Find one CPU in the cpuset. @@ -460,100 +484,33 @@ extern void cpuset_bounds(cpuset_t *, uint_t *, uint_t *); * deleting a cpu that's not in the cpuset) */ -#define CPUSET_ATOMIC_DEL(set, cpu) BT_ATOMIC_CLEAR((set).cpub, (cpu)) -#define CPUSET_ATOMIC_ADD(set, cpu) BT_ATOMIC_SET((set).cpub, (cpu)) - -#define CPUSET_ATOMIC_XADD(set, cpu, result) \ - BT_ATOMIC_SET_EXCL((set).cpub, cpu, result) - -#define CPUSET_ATOMIC_XDEL(set, cpu, result) \ - BT_ATOMIC_CLEAR_EXCL((set).cpub, cpu, result) - - -#define CPUSET_OR(set1, set2) { \ - int _i; \ - for (_i = 0; _i < CPUSET_WORDS; _i++) \ - (set1).cpub[_i] |= (set2).cpub[_i]; \ -} - -#define CPUSET_XOR(set1, set2) { \ - int _i; \ - for (_i = 0; _i < CPUSET_WORDS; _i++) \ - (set1).cpub[_i] ^= (set2).cpub[_i]; \ -} - -#define CPUSET_AND(set1, set2) { \ - int _i; \ - for (_i = 0; _i < CPUSET_WORDS; _i++) \ - (set1).cpub[_i] &= (set2).cpub[_i]; \ -} - -#define CPUSET_ZERO(set) { \ - int _i; \ - for (_i = 0; _i < CPUSET_WORDS; _i++) \ - (set).cpub[_i] = 0; \ -} - -#elif CPUSET_WORDS == 1 - -typedef ulong_t cpuset_t; /* a set of CPUs */ - -#define CPUSET(cpu) (1UL << (cpu)) - -#define CPUSET_ALL(set) ((void)((set) = ~0UL)) -#define CPUSET_ALL_BUT(set, cpu) ((void)((set) = ~CPUSET(cpu))) -#define CPUSET_ONLY(set, cpu) ((void)((set) = CPUSET(cpu))) -#define CPU_IN_SET(set, cpu) ((set) & CPUSET(cpu)) -#define CPUSET_ADD(set, cpu) ((void)((set) |= CPUSET(cpu))) -#define CPUSET_DEL(set, cpu) ((void)((set) &= ~CPUSET(cpu))) -#define CPUSET_ISNULL(set) ((set) == 0) -#define CPUSET_ISEQUAL(set1, set2) ((set1) == (set2)) -#define CPUSET_OR(set1, set2) ((void)((set1) |= (set2))) -#define CPUSET_XOR(set1, set2) ((void)((set1) ^= (set2))) -#define CPUSET_AND(set1, set2) ((void)((set1) &= (set2))) -#define CPUSET_ZERO(set) ((void)((set) = 0)) +#define CPUSET_ATOMIC_DEL(set, cpu) cpuset_atomic_del(&(set), cpu) +#define CPUSET_ATOMIC_ADD(set, cpu) cpuset_atomic_add(&(set), cpu) -#define CPUSET_FIND(set, cpu) { \ - cpu = (uint_t)(lowbit(set) - 1); \ -} +#define CPUSET_ATOMIC_XADD(set, cpu, result) \ + (result) = cpuset_atomic_xadd(&(set), cpu) -#define CPUSET_BOUNDS(set, smallest, largest) { \ - smallest = (uint_t)(lowbit(set) - 1); \ - largest = (uint_t)(highbit(set) - 1); \ -} +#define CPUSET_ATOMIC_XDEL(set, cpu, result) \ + (result) = cpuset_atomic_xdel(&(set), cpu) -#define CPUSET_ATOMIC_DEL(set, cpu) atomic_and_ulong(&(set), ~CPUSET(cpu)) -#define CPUSET_ATOMIC_ADD(set, cpu) atomic_or_ulong(&(set), CPUSET(cpu)) +#define CPUSET_OR(set1, set2) cpuset_or(&(set1), &(set2)) -#define CPUSET_ATOMIC_XADD(set, cpu, result) \ - { result = atomic_set_long_excl(&(set), (cpu)); } +#define CPUSET_XOR(set1, set2) cpuset_xor(&(set1), &(set2)) -#define CPUSET_ATOMIC_XDEL(set, cpu, result) \ - { result = atomic_clear_long_excl(&(set), (cpu)); } +#define CPUSET_AND(set1, set2) cpuset_and(&(set1), &(set2)) -#else /* CPUSET_WORDS <= 0 */ +#define CPUSET_ZERO(set) cpuset_zero(&(set)) -#error NCPU is undefined or invalid +#endif /* defined(_MACHDEP) */ -#endif /* CPUSET_WORDS */ extern cpuset_t cpu_seqid_inuse; -#endif /* _MACHDEP */ -#endif /* _KERNEL || _KMEMUSER || _BOOT */ - -#define CPU_CPR_OFFLINE 0x0 -#define CPU_CPR_ONLINE 0x1 -#define CPU_CPR_IS_OFFLINE(cpu) (((cpu)->cpu_cpr_flags & CPU_CPR_ONLINE) == 0) -#define CPU_CPR_IS_ONLINE(cpu) ((cpu)->cpu_cpr_flags & CPU_CPR_ONLINE) -#define CPU_SET_CPR_FLAGS(cpu, flag) ((cpu)->cpu_cpr_flags |= flag) - -#if defined(_KERNEL) || defined(_KMEMUSER) - extern struct cpu *cpu[]; /* indexed by CPU number */ extern struct cpu **cpu_seq; /* indexed by sequential CPU id */ extern cpu_t *cpu_list; /* list of CPUs */ extern cpu_t *cpu_active; /* list of active CPUs */ +extern cpuset_t cpu_active_set; /* cached set of active CPUs */ extern int ncpus; /* number of CPUs present */ extern int ncpus_online; /* number of CPUs not quiesced */ extern int max_ncpus; /* max present before ncpus is known */ @@ -613,7 +570,13 @@ extern struct cpu *curcpup(void); */ #define CPU_NEW_GENERATION(cp) ((cp)->cpu_generation++) -#endif /* _KERNEL || _KMEMUSER */ +#endif /* defined(_KERNEL) || defined(_KMEMUSER) */ + +#define CPU_CPR_OFFLINE 0x0 +#define CPU_CPR_ONLINE 0x1 +#define CPU_CPR_IS_OFFLINE(cpu) (((cpu)->cpu_cpr_flags & CPU_CPR_ONLINE) == 0) +#define CPU_CPR_IS_ONLINE(cpu) ((cpu)->cpu_cpr_flags & CPU_CPR_ONLINE) +#define CPU_SET_CPR_FLAGS(cpu, flag) ((cpu)->cpu_cpr_flags |= flag) /* * CPU support routines (not for genassym.c) |