diff options
-rw-r--r-- | kvm.c | 21 | ||||
-rw-r--r-- | kvm_vmx.c | 32 | ||||
-rw-r--r-- | kvm_x86.c | 18 | ||||
-rw-r--r-- | kvm_x86.h | 13 |
4 files changed, 60 insertions, 24 deletions
@@ -1372,7 +1372,7 @@ hardware_enable(void *junk) } } -static void +void hardware_disable(void *junk) { int cpu = curthread->t_cpu->cpu_id; @@ -1384,19 +1384,6 @@ hardware_disable(void *junk) kvm_arch_hardware_disable(NULL); } -/* - * The following needs to run on each cpu. Currently, - * wait is always 1, so we use the kvm_xcall() routine which - * calls xc_sync. Later, if needed, the implementation can be - * changed to use xc_call or xc_call_nowait. - */ -#define on_each_cpu(func, info, wait) \ - /*CSTYLED*/ \ - ({ \ - kvm_xcall(KVM_CPUALL, func, info); \ - 0; \ - }) - static void hardware_disable_all_nolock(void) { @@ -1801,9 +1788,13 @@ kvm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) kvm_dip = NULL; hardware_disable_all(); + kvm_arch_hardware_unsetup(); + kvm_arch_exit(); + kmem_free(bad_page_kma, PAGESIZE); + + vmx_fini(); mutex_destroy(&kvm_lock); ddi_soft_state_fini(&kvm_state); - vmx_fini(); return (DDI_SUCCESS); } @@ -1530,6 +1530,22 @@ alloc_kvm_area(void) return (0); } +static void +free_vmcs(struct vmcs *vmcs) +{ + kmem_free(vmcs, PAGESIZE); +} + +static void +free_kvm_area(void) +{ + int cpu; + + for (cpu = 0; cpu < ncpus; cpu++) { + free_vmcs(vmxarea[cpu]); + vmxarea[cpu] = NULL; + } +} static int vmx_hardware_setup(void) @@ -1570,6 +1586,12 @@ vmx_hardware_setup(void) } static void +vmx_hardware_unsetup(void) +{ + free_kvm_area(); +} + +static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save) { struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; @@ -4481,7 +4503,7 @@ struct kvm_x86_ops vmx_x86_ops = { .hardware_setup = vmx_hardware_setup, - .hardware_unsetup = (void(*)(void))nulldev, /* XXX: hardware_unsetup? */ + .hardware_unsetup = vmx_hardware_unsetup, .cpu_has_accelerated_tpr = report_flexpriority, .vcpu_create = vmx_create_vcpu, @@ -4633,12 +4655,4 @@ vmx_fini(void) vpid_bitmap_words); } kmem_cache_destroy(kvm_vcpu_cache); -#ifdef XXX - kvm_on_each_cpu(hardware_disable, NULL, 1); - kvm_arch_hardware_unsetup(); - kvm_arch_exit(); -#else - XXX_KVM_PROBE; -#endif - kmem_free(bad_page_kma, PAGESIZE); } @@ -4818,6 +4818,24 @@ kvm_arch_hardware_setup(void) } void +kvm_arch_hardware_unsetup(void) +{ + kvm_x86_ops->hardware_unsetup(); +} + +void +kvm_arch_exit(void) +{ + /* + * XXX if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) + * cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, + * CPUFREQ_TRANSITION_NOTIFIER); + * kvm_x86_ops = NULL; + * XXX kvm_mmu_module_exit(); + */ +} + +void kvm_arch_check_processor_compat(void *rtn) { kvm_x86_ops->check_processor_compatibility(rtn); @@ -256,4 +256,17 @@ typedef struct kvm_vcpu_events { uint32_t reserved[10]; } kvm_vcpu_events_t; +/* + * The following needs to run on each cpu. Currently, + * wait is always 1, so we use the kvm_xcall() routine which + * calls xc_sync. Later, if needed, the implementation can be + * changed to use xc_call or xc_call_nowait. + */ +#define on_each_cpu(func, info, wait) \ + /*CSTYLED*/ \ + ({ \ + kvm_xcall(KVM_CPUALL, func, info); \ + 0; \ + }) + #endif /* __KVM_X86_H */ |