summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMax Bruning <max@joyent.com>2011-08-01 07:28:04 -0700
committerMax Bruning <max@joyent.com>2011-08-01 07:29:42 -0700
commit8d3d4a53bb8de182a3686485ee6a0f6acaa8c3e4 (patch)
tree1b27676be896bb1cd8be66fac84f4c8c840815ff
parentd158c438dca2eb7ee01426c87daa76fc4ea2a335 (diff)
downloadillumos-kvm-8d3d4a53bb8de182a3686485ee6a0f6acaa8c3e4.tar.gz
HVM-538 Implement smp_rmb/smp_wmb in kvm
-rw-r--r--kvm.c19
-rw-r--r--kvm_coalesced_mmio.c5
-rw-r--r--kvm_ioapic.c9
-rw-r--r--kvm_irq.c4
-rw-r--r--kvm_mmu.c7
-rw-r--r--kvm_paging_tmpl.h2
-rw-r--r--kvm_x86.c5
-rw-r--r--kvm_x86.h8
8 files changed, 18 insertions, 41 deletions
diff --git a/kvm.c b/kvm.c
index ff803ea..eaf655b 100644
--- a/kvm.c
+++ b/kvm.c
@@ -496,11 +496,7 @@ vcpu_load(struct kvm_vcpu *vcpu)
struct kvm_vcpu *
kvm_get_vcpu(struct kvm *kvm, int i)
{
-#ifdef XXX
smp_rmb();
-#else
- XXX_KVM_PROBE;
-#endif
return (kvm->vcpus[i]);
}
@@ -1555,11 +1551,8 @@ kvm_vm_ioctl_create_vcpu(struct kvm *kvm, uint32_t id, int *rval_p)
/* XXX need to protect online_vcpus */
kvm->vcpus[kvm->online_vcpus] = vcpu;
-#ifdef XXX
smp_wmb();
-#else
- XXX_KVM_SYNC_PROBE;
-#endif
+
atomic_inc_32(&kvm->online_vcpus);
if (kvm->bsp_vcpu_id == id)
@@ -2438,17 +2431,11 @@ kvm_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
}
} else
goto create_irqchip_unlock;
-#ifdef XXX
+
smp_wmb();
-#else
- XXX_KVM_SYNC_PROBE;
-#endif
kvmp->arch.vpic = vpic;
-#ifdef XXX
smp_wmb();
-#else
- XXX_KVM_SYNC_PROBE;
-#endif
+
rval = kvm_setup_default_irq_routing(kvmp);
if (rval) {
mutex_enter(&kvmp->irq_lock);
diff --git a/kvm_coalesced_mmio.c b/kvm_coalesced_mmio.c
index 7c097e3..d231c2e 100644
--- a/kvm_coalesced_mmio.c
+++ b/kvm_coalesced_mmio.c
@@ -72,11 +72,8 @@ coalesced_mmio_write(struct kvm_io_device *this, gpa_t addr,
ring->coalesced_mmio[ring->last].phys_addr = addr;
ring->coalesced_mmio[ring->last].len = len;
memcpy(ring->coalesced_mmio[ring->last].data, val, len);
-#ifdef XXX
+
smp_wmb();
-#else
- XXX_KVM_SYNC_PROBE;
-#endif
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
mutex_exit(&dev->lock);
return (0);
diff --git a/kvm_ioapic.c b/kvm_ioapic.c
index 05a5323..b8fe29a 100644
--- a/kvm_ioapic.c
+++ b/kvm_ioapic.c
@@ -112,11 +112,8 @@ update_handled_vectors(struct kvm_ioapic *ioapic)
__set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors);
memcpy(ioapic->handled_vectors, handled_vectors,
sizeof (handled_vectors));
-#ifdef XXX
+
smp_wmb();
-#else
- XXX_KVM_SYNC_PROBE;
-#endif
}
static void
@@ -265,11 +262,7 @@ kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
-#ifdef XXX
smp_rmb();
-#else
- XXX_KVM_SYNC_PROBE;
-#endif
if (!test_bit(vector, ioapic->handled_vectors))
return;
diff --git a/kvm_irq.c b/kvm_irq.c
index 8772afc..1e8eb8f 100644
--- a/kvm_irq.c
+++ b/kvm_irq.c
@@ -37,11 +37,7 @@ irqchip_in_kernel(struct kvm *kvm)
int ret;
ret = (pic_irqchip(kvm) != NULL);
-#ifdef XXX
smp_rmb();
-#else
- XXX_KVM_SYNC_PROBE;
-#endif
return (ret);
}
diff --git a/kvm_mmu.c b/kvm_mmu.c
index 6e97936..6d60ed1 100644
--- a/kvm_mmu.c
+++ b/kvm_mmu.c
@@ -1844,10 +1844,10 @@ nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
#ifdef XXX
mmu_seq = vcpu->kvm->mmu_notifier_seq;
- smp_rmb();
#else
XXX_KVM_PROBE;
#endif
+ smp_rmb();
pfn = gfn_to_pfn(vcpu->kvm, gfn);
/* mmio */
@@ -2069,11 +2069,10 @@ tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, uint32_t error_code)
#ifdef XXX
mmu_seq = vcpu->kvm->mmu_notifier_seq;
- smp_rmb();
#else
XXX_KVM_PROBE;
#endif
-
+ smp_rmb();
pfn = gfn_to_pfn(vcpu->kvm, gfn);
if (is_error_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
@@ -2508,10 +2507,10 @@ mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
#ifdef XXX
vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
- smp_rmb();
#else
XXX_KVM_PROBE;
#endif
+ smp_rmb();
pfn = gfn_to_pfn(vcpu->kvm, gfn);
if (is_error_pfn(pfn)) {
diff --git a/kvm_paging_tmpl.h b/kvm_paging_tmpl.h
index 46f26fe..84a1c85 100644
--- a/kvm_paging_tmpl.h
+++ b/kvm_paging_tmpl.h
@@ -471,8 +471,8 @@ FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
}
#ifdef XXX
mmu_seq = vcpu->kvm->mmu_notifier_seq;
- smp_rmb();
#endif /* XXX */
+ smp_rmb();
pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
/* mmio */
diff --git a/kvm_x86.c b/kvm_x86.c
index a1ab497..7a661a6 100644
--- a/kvm_x86.c
+++ b/kvm_x86.c
@@ -137,12 +137,9 @@ kvm_define_shared_msr(unsigned slot, uint32_t msr)
if (slot >= shared_msrs_global.nr)
shared_msrs_global.nr = slot + 1;
shared_msrs_global.msrs[slot] = msr;
-#ifdef XXX
+
/* we need ensured the shared_msr_global have been updated */
smp_wmb();
-#else
- XXX_KVM_SYNC_PROBE;
-#endif
}
static void
diff --git a/kvm_x86.h b/kvm_x86.h
index af6a38b..bbe2daf 100644
--- a/kvm_x86.h
+++ b/kvm_x86.h
@@ -274,4 +274,12 @@ typedef struct kvm_vcpu_events {
0; \
})
+/*
+ * The following should provide an optimization barrier.
+ * If the system does reorder loads and stores, this needs to be changed.
+ */
+
+#define smp_wmb() __asm__ __volatile__("" ::: "memory")
+#define smp_rmb() __asm__ __volatile__("" ::: "memory")
+
#endif /* __KVM_X86_H */