summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Mustacchi <rm@joyent.com>2011-06-06 15:45:28 -0700
committerRobert Mustacchi <rm@joyent.com>2011-06-06 15:45:28 -0700
commit8fd82e2f770502da502987b7529d137dc2962db4 (patch)
tree4911aff6cc31a7fe08a4609ba27cbdb6f5895393
parent18e279fbbd087ed9d6d3ce36a0623ca20ec62e0a (diff)
downloadillumos-kvm-8fd82e2f770502da502987b7529d137dc2962db4.tar.gz
HVM-310 Cleanup includes and extern function definitions in kvm_vmx.c
-rw-r--r--kvm.c42
-rw-r--r--kvm_impl.h59
-rw-r--r--kvm_subr.c48
-rw-r--r--kvm_vmx.c109
-rw-r--r--kvm_x86.c6
-rw-r--r--kvm_x86host.h4
-rw-r--r--kvm_x86impl.h63
-rw-r--r--msr.h7
8 files changed, 114 insertions, 224 deletions
diff --git a/kvm.c b/kvm.c
index ed45e36..e03c3b0 100644
--- a/kvm.c
+++ b/kvm.c
@@ -54,6 +54,7 @@
#include "kvm_i8254.h"
#include "kvm_mmu.h"
#include "kvm_cache_regs.h"
+#include "kvm_x86impl.h"
#undef DEBUG
@@ -573,8 +574,6 @@ kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
-extern void kvm_xcall(processorid_t cpu, kvm_xcall_t func, void *arg);
-
int
kvm_init(void *opaque, unsigned int vcpu_size)
{
@@ -2001,45 +2000,6 @@ __vmwrite(unsigned long field, unsigned long value)
}
}
-/*
- * Volatile isn't enough to prevent the compiler from reordering the
- * read/write functions for the control registers and messing everything up.
- * A memory clobber would solve the problem, but would prevent reordering of
- * all loads stores around it, which can hurt performance. Solution is to
- * use a variable and mimic reads and writes to it to enforce serialization
- */
-static unsigned long __force_order;
-
-unsigned long
-native_read_cr0(void)
-{
- unsigned long val;
- __asm__ volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
- return (val);
-}
-
-#define read_cr0() (native_read_cr0())
-
-unsigned long
-native_read_cr4(void)
-{
- unsigned long val;
- __asm__ volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
- return (val);
-}
-
-#define read_cr4() (native_read_cr4())
-
-unsigned long
-native_read_cr3(void)
-{
- unsigned long val;
- __asm__ volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
- return (val);
-}
-
-#define read_cr3() (native_read_cr3())
-
inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu);
void
diff --git a/kvm_impl.h b/kvm_impl.h
index 3b2142b..02960ca 100644
--- a/kvm_impl.h
+++ b/kvm_impl.h
@@ -93,63 +93,4 @@ typedef struct kvm_stats {
#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))
#endif /*ARRAY_SIZE*/
-#define KVM_CPUALL -1
-
-typedef void (*kvm_xcall_t)(void *);
-
-/*
- * XXX
- * All the follwoing definitions are ones that are expected to just be in
- * x86/x86.c by Linux. However we currently have the things that need them
- * spread out across two files. For now we are putting them here, but this
- * should not last very long.
- */
-#define KVM_NR_SHARED_MSRS 16
-
-typedef struct kvm_shared_msrs_global {
- int nr;
- uint32_t msrs[KVM_NR_SHARED_MSRS];
-} kvm_shared_msrs_global_t;
-
-struct kvm_vcpu;
-
-typedef struct kvm_user_return_notifier {
- void (*on_user_return)(struct kvm_vcpu *,
- struct kvm_user_return_notifier *);
-} kvm_user_return_notifier_t;
-
-typedef struct kvm_shared_msrs {
- struct kvm_user_return_notifier urn;
- int registered;
- struct kvm_shared_msr_values {
- uint64_t host;
- uint64_t curr;
- } values[KVM_NR_SHARED_MSRS];
-} kvm_shared_msrs_t;
-
-/*
- * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
- * we have asm/x86/processor.h
- */
-typedef struct fxsave {
- uint16_t cwd;
- uint16_t swd;
- uint16_t twd;
- uint16_t fop;
- uint64_t rip;
- uint64_t rdp;
- uint32_t mxcsr;
- uint32_t mxcsr_mask;
- uint32_t st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
-#ifdef CONFIG_X86_64
- uint32_t xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
-#else
- uint32_t xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
-#endif
-} fxsave_t;
-
-#ifndef offsetof
-#define offsetof(s, m) ((size_t)(&((s *)0)->m))
-#endif
-
#endif
diff --git a/kvm_subr.c b/kvm_subr.c
index a4230ce..6bcd74a 100644
--- a/kvm_subr.c
+++ b/kvm_subr.c
@@ -22,6 +22,7 @@
#include "kvm_vmx.h"
#include "irqflags.h"
#include "kvm_iodev.h"
+#include "kvm_x86impl.h"
#include "kvm_host.h"
#include "kvm_x86host.h"
#include "kvm.h"
@@ -149,20 +150,6 @@ rdmsrl_safe(unsigned msr, unsigned long long *p)
return (err);
}
-int
-rdmsr_on_cpu(unsigned int cpu, uint32_t msr_no, uint32_t *l, uint32_t *h)
-{
- rdmsr(msr_no, *l, *h);
- return (0);
-}
-
-int
-wrmsr_on_cpu(unsigned int cpu, uint32_t msr_no, uint32_t l, uint32_t h)
-{
- wrmsr(msr_no, l, h);
- return (0);
-}
-
unsigned long
read_msr(unsigned long msr)
{
@@ -311,3 +298,36 @@ zero_constructor(void *buf, void *arg, int tags)
bzero(buf, (size_t)arg);
return (0);
}
+
+/*
+ * Volatile isn't enough to prevent the compiler from reordering the
+ * read/write functions for the control registers and messing everything up.
+ * A memory clobber would solve the problem, but would prevent reordering of
+ * all loads stores around it, which can hurt performance. Solution is to
+ * use a variable and mimic reads and writes to it to enforce serialization
+ */
+static unsigned long __force_order;
+
+unsigned long
+native_read_cr0(void)
+{
+ unsigned long val;
+ __asm__ volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
+ return (val);
+}
+
+unsigned long
+native_read_cr4(void)
+{
+ unsigned long val;
+ __asm__ volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
+ return (val);
+}
+
+unsigned long
+native_read_cr3(void)
+{
+ unsigned long val;
+ __asm__ volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
+ return (val);
+}
diff --git a/kvm_vmx.c b/kvm_vmx.c
index dd2aef5..abce161 100644
--- a/kvm_vmx.c
+++ b/kvm_vmx.c
@@ -19,113 +19,25 @@
#include <sys/types.h>
#include <sys/mach_mmu.h>
+#include <asm/cpu.h>
+#include <sys/x86_archext.h>
-/*
- * XXX Need proper header files!
- */
#include "kvm_bitops.h"
-#include "processor-flags.h"
#include "msr.h"
#include "kvm_cpuid.h"
-#include "irqflags.h"
+#include "kvm_impl.h"
+#include "kvm_x86impl.h"
+#include "kvm_cache_regs.h"
#include "kvm_host.h"
-#include "kvm_x86host.h"
#include "kvm_iodev.h"
-#include "kvm.h"
-#include "kvm_apicdef.h"
-#include "kvm_ioapic.h"
-#include "kvm_lapic.h"
#include "kvm_irq.h"
#include "kvm_mmu.h"
#include "kvm_vmx.h"
-/*
- * XXX
- * The fact that I'm externing these is a sign of failure
- */
-extern void kvm_xcall(processorid_t, kvm_xcall_t, void *);
-extern int is_long_mode(struct kvm_vcpu *vcpu);
-extern void kvm_migrate_timers(struct kvm_vcpu *vcpu);
-extern ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask);
-extern void kvm_rip_write(struct kvm_vcpu *, unsigned long);
-extern int kvm_exception_is_soft(unsigned int);
-extern uint64_t kvm_va2pa(caddr_t va);
-extern int kvm_get_msr_common(struct kvm_vcpu *, uint32_t, uint64_t *);
-extern int kvm_set_msr_common(struct kvm_vcpu *, uint32_t, uint64_t);
-extern int getcr4(void);
-extern void setcr4(ulong_t val);
-extern void kvm_enable_efer_bits(uint64_t);
-extern int is_paging(struct kvm_vcpu *);
-extern int is_pae(struct kvm_vcpu *vcpu);
-extern ulong kvm_read_cr4(struct kvm_vcpu *);
-extern int is_protmode(struct kvm_vcpu *vcpu);
+/* XXX These shouldn't need to be static */
extern kmutex_t vmx_vpid_lock;
extern ulong_t *vmx_vpid_bitmap;
extern size_t vpid_bitmap_words;
-extern unsigned long native_read_cr0(void);
-#define read_cr0() (native_read_cr0())
-extern unsigned long native_read_cr4(void);
-#define read_cr4() (native_read_cr4())
-extern unsigned long native_read_cr3(void);
-#define read_cr3() (native_read_cr3())
-extern void kvm_set_cr8(struct kvm_vcpu *, unsigned long);
-extern void kvm_set_apic_base(struct kvm_vcpu *, uint64_t);
-extern void fx_init(struct kvm_vcpu *);
-extern void kvm_register_write(struct kvm_vcpu *vcpu,
- enum kvm_reg reg, unsigned long val);
-extern ulong kvm_read_cr0(struct kvm_vcpu *vcpu);
-extern int emulate_instruction(struct kvm_vcpu *, unsigned long,
- uint16_t, int);
-extern void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
-extern int kvm_event_needs_reinjection(struct kvm_vcpu *);
-extern int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *, gva_t);
-extern int kvm_mmu_page_fault(struct kvm_vcpu *, gva_t, uint32_t);
-extern int kvm_emulate_halt(struct kvm_vcpu *);
-extern int kvm_emulate_pio(struct kvm_vcpu *, int, int, unsigned);
-extern unsigned long kvm_register_read(struct kvm_vcpu *, enum kvm_reg);
-extern void kvm_set_cr0(struct kvm_vcpu *, unsigned long);
-extern void kvm_set_cr3(struct kvm_vcpu *, unsigned long);
-extern void kvm_set_cr4(struct kvm_vcpu *, unsigned long);
-extern void kvm_set_cr8(struct kvm_vcpu *, unsigned long);
-extern unsigned long kvm_get_cr8(struct kvm_vcpu *);
-extern void kvm_lmsw(struct kvm_vcpu *, unsigned long);
-extern ulong kvm_read_cr4_bits(struct kvm_vcpu *, ulong);
-extern int kvm_require_cpl(struct kvm_vcpu *, int);
-extern void kvm_emulate_cpuid(struct kvm_vcpu *);
-extern int kvm_emulate_hypercall(struct kvm_vcpu *);
-extern void kvm_mmu_invlpg(struct kvm_vcpu *, gva_t);
-extern void kvm_clear_interrupt_queue(struct kvm_vcpu *);
-extern void kvm_clear_exception_queue(struct kvm_vcpu *);
-extern int kvm_task_switch(struct kvm_vcpu *, uint16_t, int);
-extern int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *,
- uint64_t, uint64_t sptes[4]);
-extern void kvm_queue_interrupt(struct kvm_vcpu *, uint8_t, int);
-extern int kvm_vcpu_init(struct kvm_vcpu *, struct kvm *, unsigned);
-extern int irqchip_in_kernel(struct kvm *kvm);
-extern unsigned long kvm_rip_read(struct kvm_vcpu *);
-extern struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
- uint32_t function, uint32_t index);
-
-
-/* These are the region types */
-#define MTRR_TYPE_UNCACHABLE 0
-#define MTRR_TYPE_WRCOMB 1
-#define MTRR_TYPE_WRTHROUGH 4
-#define MTRR_TYPE_WRPROT 5
-#define MTRR_TYPE_WRBACK 6
-#define MTRR_NUM_TYPES 7
-
-extern uint8_t kvm_get_guest_memory_type(struct kvm_vcpu *, gfn_t);
-extern uint32_t bit(int);
-extern int kvm_init(void *, unsigned int);
-extern void kvm_enable_tdp(void);
-extern void kvm_disable_tdp(void);
-
-/*
- * XXX These should be from <asm/cpu.h>
- */
-extern void cli(void);
-extern void sti(void);
static int bypass_guest_pf = 1;
/* XXX This should be static */
@@ -143,11 +55,12 @@ static int emulate_invalid_guest_state = 0;
* is loaded on linux.
*/
-struct vmcs **vmxarea; /* 1 per cpu */
-struct vmcs **current_vmcs;
+static struct vmcs **vmxarea; /* 1 per cpu */
+static struct vmcs **current_vmcs;
+/* XXX Should shared_msrs be static? */
struct kvm_shared_msrs **shared_msrs;
-list_t **vcpus_on_cpu;
-uint64_t *vmxarea_pa; /* physical address of each vmxarea */
+static list_t **vcpus_on_cpu;
+static uint64_t *vmxarea_pa; /* physical address of each vmxarea */
#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
(X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
diff --git a/kvm_x86.c b/kvm_x86.c
index 1ff0d06..3177a39 100644
--- a/kvm_x86.c
+++ b/kvm_x86.c
@@ -40,11 +40,10 @@
#include "kvm_i8254.h"
#include "kvm_lapic.h"
#include "kvm_cache_regs.h"
+#include "kvm_x86impl.h"
#undef DEBUG
-extern struct vmcs **vmxarea;
-
static int vcpuid;
extern uint64_t native_read_msr_safe(unsigned int msr, int *err);
extern int native_write_msr_safe(unsigned int msr, unsigned low, unsigned high);
@@ -315,9 +314,6 @@ extern uint64_t shadow_dirty_mask;
extern pfn_t hat_getpfnum(hat_t *hat, caddr_t addr);
extern inline void ept_sync_global(void);
-extern uint64_t *vmxarea_pa;
-extern list_t **vcpus_on_cpu;
-
extern struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu);
extern void vmcs_writel(unsigned long field, unsigned long value);
diff --git a/kvm_x86host.h b/kvm_x86host.h
index 60e6247..e0a054a 100644
--- a/kvm_x86host.h
+++ b/kvm_x86host.h
@@ -12,6 +12,10 @@
#include "kvm.h"
#include "kvm_types.h"
+#ifndef offsetof
+#define offsetof(s, m) ((size_t)(&((s *)0)->m))
+#endif
+
#define KVM_MAX_VCPUS 64
#define KVM_MEMORY_SLOTS 32
/* memory slots that are not exposted to userspace */
diff --git a/kvm_x86impl.h b/kvm_x86impl.h
index 12014d0..a59f177 100644
--- a/kvm_x86impl.h
+++ b/kvm_x86impl.h
@@ -32,4 +32,67 @@ extern void bitmap_zero(unsigned long *, int);
extern page_t *pfn_to_page(pfn_t);
extern int zero_constructor(void *, void *, int);
+#define KVM_CPUALL -1
+
+typedef void (*kvm_xcall_t)(void *);
+extern void kvm_xcall(processorid_t cpu, kvm_xcall_t func, void *arg);
+
+/*
+ * All the follwoing definitions are ones that are expected to just be in
+ * x86/x86.c by Linux. However we currently have the things that need them
+ * spread out across two files. For now we are putting them here, but this
+ * should not last very long.
+ */
+#define KVM_NR_SHARED_MSRS 16
+
+typedef struct kvm_shared_msrs_global {
+ int nr;
+ uint32_t msrs[KVM_NR_SHARED_MSRS];
+} kvm_shared_msrs_global_t;
+
+struct kvm_vcpu;
+
+typedef struct kvm_user_return_notifier {
+ void (*on_user_return)(struct kvm_vcpu *,
+ struct kvm_user_return_notifier *);
+} kvm_user_return_notifier_t;
+
+typedef struct kvm_shared_msrs {
+ struct kvm_user_return_notifier urn;
+ int registered;
+ struct kvm_shared_msr_values {
+ uint64_t host;
+ uint64_t curr;
+ } values[KVM_NR_SHARED_MSRS];
+} kvm_shared_msrs_t;
+
+/*
+ * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
+ * we have asm/x86/processor.h
+ */
+typedef struct fxsave {
+ uint16_t cwd;
+ uint16_t swd;
+ uint16_t twd;
+ uint16_t fop;
+ uint64_t rip;
+ uint64_t rdp;
+ uint32_t mxcsr;
+ uint32_t mxcsr_mask;
+ uint32_t st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+#ifdef CONFIG_X86_64
+ uint32_t xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
+#else
+ uint32_t xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
+#endif
+} fxsave_t;
+
+unsigned long native_read_cr0(void);
+#define read_cr0() (native_read_cr0())
+unsigned long native_read_cr4(void);
+#define read_cr4() (native_read_cr4())
+unsigned long native_read_cr3(void);
+#define read_cr3() (native_read_cr3())
+
+uint32_t bit(int bitno);
#endif
diff --git a/msr.h b/msr.h
index 7578627..fe833d0 100644
--- a/msr.h
+++ b/msr.h
@@ -94,8 +94,6 @@ do { \
(val2) = (uint32_t)(__val >> 32); \
} while (0)
-extern void wrmsr(unsigned msr, unsigned low, unsigned high);
-
#define rdmsrl(msr, val) \
((val) = native_read_msr((msr)))
@@ -156,11 +154,6 @@ do { \
struct msr *msrs_alloc(void);
void msrs_free(struct msr *msrs);
-extern int rdmsr_on_cpu(unsigned int cpu,
- uint32_t msr_no, uint32_t *l, uint32_t *h);
-extern int wrmsr_on_cpu(unsigned int cpu,
- uint32_t msr_no, uint32_t l, uint32_t h);
-
#endif /* _KERNEL */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_MSR_H */