summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kvm.c21
-rw-r--r--kvm_vmx.c32
-rw-r--r--kvm_x86.c18
-rw-r--r--kvm_x86.h13
4 files changed, 60 insertions, 24 deletions
diff --git a/kvm.c b/kvm.c
index 2772439..a53a35a 100644
--- a/kvm.c
+++ b/kvm.c
@@ -1372,7 +1372,7 @@ hardware_enable(void *junk)
}
}
-static void
+void
hardware_disable(void *junk)
{
int cpu = curthread->t_cpu->cpu_id;
@@ -1384,19 +1384,6 @@ hardware_disable(void *junk)
kvm_arch_hardware_disable(NULL);
}
-/*
- * The following needs to run on each cpu. Currently,
- * wait is always 1, so we use the kvm_xcall() routine which
- * calls xc_sync. Later, if needed, the implementation can be
- * changed to use xc_call or xc_call_nowait.
- */
-#define on_each_cpu(func, info, wait) \
- /*CSTYLED*/ \
- ({ \
- kvm_xcall(KVM_CPUALL, func, info); \
- 0; \
- })
-
static void
hardware_disable_all_nolock(void)
{
@@ -1801,9 +1788,13 @@ kvm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
kvm_dip = NULL;
hardware_disable_all();
+ kvm_arch_hardware_unsetup();
+ kvm_arch_exit();
+ kmem_free(bad_page_kma, PAGESIZE);
+
+ vmx_fini();
mutex_destroy(&kvm_lock);
ddi_soft_state_fini(&kvm_state);
- vmx_fini();
return (DDI_SUCCESS);
}
diff --git a/kvm_vmx.c b/kvm_vmx.c
index fcfb11f..9e09aed 100644
--- a/kvm_vmx.c
+++ b/kvm_vmx.c
@@ -1530,6 +1530,22 @@ alloc_kvm_area(void)
return (0);
}
+static void
+free_vmcs(struct vmcs *vmcs)
+{
+ kmem_free(vmcs, PAGESIZE);
+}
+
+static void
+free_kvm_area(void)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < ncpus; cpu++) {
+ free_vmcs(vmxarea[cpu]);
+ vmxarea[cpu] = NULL;
+ }
+}
static int
vmx_hardware_setup(void)
@@ -1570,6 +1586,12 @@ vmx_hardware_setup(void)
}
static void
+vmx_hardware_unsetup(void)
+{
+ free_kvm_area();
+}
+
+static void
fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
{
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
@@ -4481,7 +4503,7 @@ struct kvm_x86_ops vmx_x86_ops = {
.hardware_setup = vmx_hardware_setup,
- .hardware_unsetup = (void(*)(void))nulldev, /* XXX: hardware_unsetup? */
+ .hardware_unsetup = vmx_hardware_unsetup,
.cpu_has_accelerated_tpr = report_flexpriority,
.vcpu_create = vmx_create_vcpu,
@@ -4633,12 +4655,4 @@ vmx_fini(void)
vpid_bitmap_words);
}
kmem_cache_destroy(kvm_vcpu_cache);
-#ifdef XXX
- kvm_on_each_cpu(hardware_disable, NULL, 1);
- kvm_arch_hardware_unsetup();
- kvm_arch_exit();
-#else
- XXX_KVM_PROBE;
-#endif
- kmem_free(bad_page_kma, PAGESIZE);
}
diff --git a/kvm_x86.c b/kvm_x86.c
index 04dba65..25d350d 100644
--- a/kvm_x86.c
+++ b/kvm_x86.c
@@ -4818,6 +4818,24 @@ kvm_arch_hardware_setup(void)
}
void
+kvm_arch_hardware_unsetup(void)
+{
+ kvm_x86_ops->hardware_unsetup();
+}
+
+void
+kvm_arch_exit(void)
+{
+ /*
+ * XXX if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+ * cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
+ * CPUFREQ_TRANSITION_NOTIFIER);
+ * kvm_x86_ops = NULL;
+ * XXX kvm_mmu_module_exit();
+ */
+}
+
+void
kvm_arch_check_processor_compat(void *rtn)
{
kvm_x86_ops->check_processor_compatibility(rtn);
diff --git a/kvm_x86.h b/kvm_x86.h
index ff98454..a245b6f 100644
--- a/kvm_x86.h
+++ b/kvm_x86.h
@@ -256,4 +256,17 @@ typedef struct kvm_vcpu_events {
uint32_t reserved[10];
} kvm_vcpu_events_t;
+/*
+ * The following needs to run on each cpu. Currently,
+ * wait is always 1, so we use the kvm_xcall() routine which
+ * calls xc_sync. Later, if needed, the implementation can be
+ * changed to use xc_call or xc_call_nowait.
+ */
+#define on_each_cpu(func, info, wait) \
+ /*CSTYLED*/ \
+ ({ \
+ kvm_xcall(KVM_CPUALL, func, info); \
+ 0; \
+ })
+
#endif /* __KVM_X86_H */