summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kvm.c23
-rw-r--r--kvm_host.h3
-rw-r--r--kvm_irq_comm.c12
-rw-r--r--kvm_mmu.c65
-rw-r--r--kvm_vmx.c9
5 files changed, 89 insertions, 23 deletions
diff --git a/kvm.c b/kvm.c
index 45fd028..2772439 100644
--- a/kvm.c
+++ b/kvm.c
@@ -352,8 +352,12 @@ kvm_destroy_vm(struct kvm *kvmp)
if (kvmp->kvm_kstat != NULL)
kstat_delete(kvmp->kvm_kstat);
- kvm_arch_destroy_vm_comps(kvmp);
+ kvm_arch_flush_shadow(kvmp); /* clean up shadow page tables */
+ kvm_arch_destroy_vm_comps(kvmp);
+ kvm_free_irq_routing(kvmp);
+ kvm_destroy_pic(kvmp);
+ kvm_ioapic_destroy(kvmp);
kvm_coalesced_mmio_free(kvmp);
list_remove(&vm_list, kvmp);
@@ -384,6 +388,7 @@ kvm_destroy_vm(struct kvm *kvmp)
*/
list_destroy(&kvmp->irq_ack_notifier_list);
list_destroy(&kvmp->mask_notifier_list);
+
kvm_arch_destroy_vm(kvmp);
}
@@ -487,23 +492,21 @@ kvm_free_physmem_slot(struct kvm_memory_slot *free,
if (!dont || free->rmap != dont->rmap)
kmem_free(free->rmap, free->npages * sizeof (struct page *));
-#ifdef XXX
- if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
- kmem_free(free->dirty_bitmap);
+ if ((!dont || free->dirty_bitmap != dont->dirty_bitmap) &&
+ free->dirty_bitmap)
+ kmem_free(free->dirty_bitmap, free->dirty_bitmap_sz);
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
- if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
- vfree(free->lpage_info[i]);
+ if ((!dont || free->lpage_info[i] != dont->lpage_info[i]) &&
+ free->lpage_info[i]) {
+ kmem_free(free->lpage_info[i], free->lpage_info_sz[i]);
free->lpage_info[i] = NULL;
}
}
free->npages = 0;
free->dirty_bitmap = NULL;
-#else
free->rmap = NULL;
- XXX_KVM_PROBE;
-#endif
}
void
@@ -629,6 +632,7 @@ __kvm_set_memory_region(struct kvm *kvmp,
new.lpage_info[i] =
kmem_zalloc(lpages * sizeof (*new.lpage_info[i]), KM_SLEEP);
+ new.lpage_info_sz[i] = lpages * sizeof (*new.lpage_info[i]);
if (base_gfn % KVM_PAGES_PER_HPAGE(level))
new.lpage_info[i][0].write_count = 1;
@@ -653,6 +657,7 @@ skip_lpage:
unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
new.dirty_bitmap = kmem_zalloc(dirty_bytes, KM_SLEEP);
+ new.dirty_bitmap_sz = dirty_bytes;
/* destroy any largepage mappings for dirty tracking */
if (old.npages)
diff --git a/kvm_host.h b/kvm_host.h
index 95e63cc..1fe76ba 100644
--- a/kvm_host.h
+++ b/kvm_host.h
@@ -142,10 +142,12 @@ typedef struct kvm_memory_slot {
unsigned long flags;
unsigned long *rmap;
unsigned long *dirty_bitmap;
+ size_t dirty_bitmap_sz;
struct {
unsigned long rmap_pde;
int write_count;
} *lpage_info[KVM_NR_PAGE_SIZES];
+ size_t lpage_info_sz[KVM_NR_PAGE_SIZES];
unsigned long userspace_addr;
int user_alloc;
} kvm_memory_slot_t;
@@ -210,6 +212,7 @@ typedef struct kvm {
kmutex_t irq_lock;
struct kvm_irq_routing_table *irq_routing;
+ int irq_routing_sz;
list_t mask_notifier_list;
list_t irq_ack_notifier_list;
diff --git a/kvm_irq_comm.c b/kvm_irq_comm.c
index 3f46de5..d01accd 100644
--- a/kvm_irq_comm.c
+++ b/kvm_irq_comm.c
@@ -392,6 +392,17 @@ out:
return (r);
}
+void
+kvm_free_irq_routing(struct kvm *kvm)
+{
+ /*
+ * Called only during vm destruction. Nobody can use the pointer
+ * at this stage
+ */
+ kmem_free(kvm->irq_routing->rt_entries, kvm->irq_routing_sz);
+ kmem_free(kvm->irq_routing, sizeof (struct kvm_irq_routing_table));
+}
+
int
kvm_set_irq_routing(struct kvm *kvm, const struct kvm_irq_routing_entry *ue,
unsigned nr, unsigned flags)
@@ -452,6 +463,7 @@ kvm_set_irq_routing(struct kvm *kvm, const struct kvm_irq_routing_entry *ue,
#else
XXX_KVM_SYNC_PROBE;
kvm->irq_routing = new;
+ kvm->irq_routing_sz = sz * nr;
#endif
mutex_exit(&kvm->irq_lock);
#ifdef XXX
diff --git a/kvm_mmu.c b/kvm_mmu.c
index 2a52fc4..3d0da5f 100644
--- a/kvm_mmu.c
+++ b/kvm_mmu.c
@@ -1753,6 +1753,43 @@ set_pte:
return (ret);
}
+static int
+kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
+ unsigned long data)
+{
+ uint64_t *spte;
+ int need_tlb_flush = 0;
+
+ while ((spte = rmap_next(kvm, rmapp, NULL))) {
+ if (!(*spte & PT_PRESENT_MASK)) {
+ cmn_err(CE_PANIC,
+ "kvm_unmap_rmapp: spte = %p, *spte = %lx\n",
+ spte, *spte);
+ }
+ rmap_remove(kvm, spte);
+ __set_spte(spte, shadow_trap_nonpresent_pte);
+ need_tlb_flush = 1;
+ }
+ return (need_tlb_flush);
+}
+
+#define RMAP_RECYCLE_THRESHOLD 1000
+
+static void
+rmap_recycle(struct kvm_vcpu *vcpu, uint64_t *spte, gfn_t gfn)
+{
+ unsigned long *rmapp;
+ struct kvm_mmu_page *sp;
+
+ sp = page_header(vcpu->kvm, kvm_va2pa((caddr_t)spte));
+
+ gfn = unalias_gfn(vcpu->kvm, gfn);
+ rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
+
+ kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
+ kvm_flush_remote_tlbs(vcpu->kvm);
+}
+
static void
mmu_set_spte(struct kvm_vcpu *vcpu, uint64_t *sptep, unsigned pt_access,
unsigned pte_access, int user_fault, int write_fault, int dirty,
@@ -1796,12 +1833,8 @@ mmu_set_spte(struct kvm_vcpu *vcpu, uint64_t *sptep, unsigned pt_access,
if (!was_rmapped) {
rmap_count = rmap_add(vcpu, sptep, gfn);
kvm_release_pfn_clean(pfn);
-#ifdef XXX
if (rmap_count > RMAP_RECYCLE_THRESHOLD)
rmap_recycle(vcpu, sptep, gfn);
-#else
- XXX_KVM_PROBE;
-#endif
} else {
if (was_writable)
kvm_release_pfn_dirty(pfn);
@@ -2480,21 +2513,27 @@ mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
paging64_update_pte(vcpu, sp, spte, new);
}
-
+static int
+need_remote_flush(uint64_t old, uint64_t new)
+{
+ if (!is_shadow_present_pte(old))
+ return (0);
+ if (!is_shadow_present_pte(new))
+ return (1);
+ if ((old ^ new) & PT64_BASE_ADDR_MASK)
+ return (1);
+ old ^= PT64_NX_MASK;
+ new ^= PT64_NX_MASK;
+ return ((old & ~new & PT64_PERM_MASK) != 0);
+}
static void
mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, uint64_t old, uint64_t new)
{
-#ifdef XXX
if (need_remote_flush(old, new))
kvm_flush_remote_tlbs(vcpu->kvm);
- else {
-#else
- {
- XXX_KVM_PROBE;
-#endif
+ else
kvm_mmu_flush_tlb(vcpu);
- }
}
static int
@@ -2724,7 +2763,7 @@ __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
!list_is_empty(&vcpu->kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *sp;
- sp = list_head(&vcpu->kvm->arch.active_mmu_pages);
+ sp = list_remove_tail(&vcpu->kvm->arch.active_mmu_pages);
kvm_mmu_zap_page(vcpu->kvm, sp);
KVM_KSTAT_INC(vcpu->kvm, kvmks_mmu_recycled);
}
diff --git a/kvm_vmx.c b/kvm_vmx.c
index ab0e0d6..fcfb11f 100644
--- a/kvm_vmx.c
+++ b/kvm_vmx.c
@@ -4627,11 +4627,18 @@ out:
void
vmx_fini(void)
{
- XXX_KVM_PROBE;
if (enable_vpid) {
mutex_destroy(&vmx_vpid_lock);
kmem_free(vmx_vpid_bitmap, sizeof (ulong_t) *
vpid_bitmap_words);
}
kmem_cache_destroy(kvm_vcpu_cache);
+#ifdef XXX
+ kvm_on_each_cpu(hardware_disable, NULL, 1);
+ kvm_arch_hardware_unsetup();
+ kvm_arch_exit();
+#else
+ XXX_KVM_PROBE;
+#endif
+ kmem_free(bad_page_kma, PAGESIZE);
}