summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kvm.c20
-rw-r--r--kvm_host.h7
-rw-r--r--kvm_mdb.c6
-rw-r--r--kvm_x86.c29
4 files changed, 52 insertions, 10 deletions
diff --git a/kvm.c b/kvm.c
index 2383b1d..4cefa5b 100644
--- a/kvm.c
+++ b/kvm.c
@@ -426,12 +426,23 @@ kvm_ringbuf_record(kvm_ringbuf_t *ringbuf, uint32_t tag, uint64_t payload)
{
kvm_ringbuf_entry_t *ent = &ringbuf->kvmr_buf[ringbuf->kvmr_ent++ &
(KVM_RINGBUF_NENTRIES - 1)];
+ int id = curthread->t_cpu->cpu_id;
+ hrtime_t tsc = gethrtime_unscaled();
ent->kvmre_tag = tag;
- ent->kvmre_cpuid = curthread->t_cpu->cpu_id;
+ ent->kvmre_cpuid = id;
ent->kvmre_thread = (uintptr_t)curthread;
- ent->kvmre_tsc = gethrtime_unscaled();
+ ent->kvmre_tsc = tsc;
ent->kvmre_payload = payload;
+
+ ent = &ringbuf->kvmr_taglast[tag];
+ ent->kvmre_tag = tag;
+ ent->kvmre_cpuid = id;
+ ent->kvmre_thread = (uintptr_t)curthread;
+ ent->kvmre_tsc = tsc;
+ ent->kvmre_payload = payload;
+
+ ringbuf->kvmr_tagcount[tag]++;
}
/*
@@ -479,10 +490,11 @@ vcpu_load(struct kvm_vcpu *vcpu)
int cpu;
mutex_enter(&vcpu->mutex);
- kpreempt_disable();
- cpu = CPU->cpu_seqid;
installctx(curthread, vcpu, kvm_ctx_save, kvm_ctx_restore, NULL,
NULL, NULL, NULL);
+
+ kpreempt_disable();
+ cpu = CPU->cpu_seqid;
kvm_arch_vcpu_load(vcpu, cpu);
kvm_ringbuf_record(&vcpu->kvcpu_ringbuf,
KVM_RINGBUF_TAG_VCPULOAD, vcpu->cpu);
diff --git a/kvm_host.h b/kvm_host.h
index 6d8b3da..d9fee36 100644
--- a/kvm_host.h
+++ b/kvm_host.h
@@ -109,6 +109,11 @@ extern int kvm_io_bus_unregister_dev(struct kvm *, enum kvm_bus,
#define KVM_RINGBUF_TAG_VCPUCLEAR 5
#define KVM_RINGBUF_TAG_VCPULOAD 6
#define KVM_RINGBUF_TAG_VCPUPUT 7
+#define KVM_RINGBUF_TAG_RELOAD 8
+#define KVM_RINGBUF_TAG_EMUFAIL0 9
+#define KVM_RINGBUF_TAG_EMUFAIL1 10
+#define KVM_RINGBUF_TAG_EMUFAIL2 11
+#define KVM_RINGBUF_TAG_MAX 11
typedef struct kvm_ringbuf_entry {
uint32_t kvmre_tag; /* tag for this entry */
@@ -120,6 +125,8 @@ typedef struct kvm_ringbuf_entry {
typedef struct kvm_ringbuf {
kvm_ringbuf_entry_t kvmr_buf[KVM_RINGBUF_NENTRIES]; /* ring buffer */
+ kvm_ringbuf_entry_t kvmr_taglast[KVM_RINGBUF_TAG_MAX + 1];
+ uint32_t kvmr_tagcount[KVM_RINGBUF_TAG_MAX + 1]; /* count of tags */
uint32_t kvmr_ent; /* current entry */
} kvm_ringbuf_t;
diff --git a/kvm_mdb.c b/kvm_mdb.c
index 9a681b3..8acab63 100644
--- a/kvm_mdb.c
+++ b/kvm_mdb.c
@@ -346,7 +346,11 @@ kvm_mdb_ringbuf_entry(uintptr_t addr, uint_t flags, int argc,
ent.kvmre_tag == KVM_RINGBUF_TAG_VCPUMIGRATE ? "migrate" :
ent.kvmre_tag == KVM_RINGBUF_TAG_VCPUCLEAR ? "clear" :
ent.kvmre_tag == KVM_RINGBUF_TAG_VCPULOAD ? "load" :
- ent.kvmre_tag == KVM_RINGBUF_TAG_VCPUPUT ? "put" : "????",
+ ent.kvmre_tag == KVM_RINGBUF_TAG_VCPUPUT ? "put" :
+ ent.kvmre_tag == KVM_RINGBUF_TAG_RELOAD ? "reload" :
+ ent.kvmre_tag == KVM_RINGBUF_TAG_EMUFAIL0 ? "efail-0" :
+ ent.kvmre_tag == KVM_RINGBUF_TAG_EMUFAIL1 ? "efail-1" :
+ ent.kvmre_tag == KVM_RINGBUF_TAG_EMUFAIL2 ? "efail-2" : "????",
ent.kvmre_payload);
return (DCMD_OK);
diff --git a/kvm_x86.c b/kvm_x86.c
index 3c4839a..4f553d5 100644
--- a/kvm_x86.c
+++ b/kvm_x86.c
@@ -2577,16 +2577,17 @@ get_segment_base(struct kvm_vcpu *vcpu, int seg)
void
kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
{
- uint8_t opcodes[4];
+ uint64_t ops, ctx = (uint64_t)context;
unsigned long rip = kvm_rip_read(vcpu);
unsigned long rip_linear;
rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
- kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL);
+ kvm_read_guest_virt(rip_linear, &ops, 8, vcpu, NULL);
- cmn_err(CE_CONT, "!emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
- context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
+ kvm_ringbuf_record(&vcpu->kvcpu_ringbuf, KVM_RINGBUF_TAG_EMUFAIL0, ctx);
+ kvm_ringbuf_record(&vcpu->kvcpu_ringbuf, KVM_RINGBUF_TAG_EMUFAIL1, rip);
+ kvm_ringbuf_record(&vcpu->kvcpu_ringbuf, KVM_RINGBUF_TAG_EMUFAIL2, ops);
}
static struct x86_emulate_ops emulate_ops = {
@@ -3364,7 +3365,7 @@ native_set_debugreg(int regno, unsigned long value)
static int
vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
- int r;
+ int r, loaded;
int req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
vcpu->run->request_interrupt_window;
@@ -3417,6 +3418,7 @@ vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (vcpu->fpu_active)
kvm_load_guest_fpu(vcpu);
+ loaded = CPU->cpu_id;
clear_bit(KVM_REQ_KICK, &vcpu->requests);
if (vcpu->requests || issig(JUSTLOOKING)) {
@@ -3428,6 +3430,23 @@ vcpu_enter_guest(struct kvm_vcpu *vcpu)
inject_pending_event(vcpu);
+ if (CPU->cpu_id != loaded) {
+ /*
+ * The kpreempt_disable(), above, disables kernel migration --
+ * but it doesn't disable migration when we block. The call
+ * to inject_pending_event() can, through a circuitous path,
+ * block, and we may therefore have moved to a different CPU.
+ * That's actually okay -- we just need to reload our state
+ * in this case.
+ */
+ kvm_ringbuf_record(&vcpu->kvcpu_ringbuf,
+ KVM_RINGBUF_TAG_RELOAD, loaded);
+ kvm_x86_ops->prepare_guest_switch(vcpu);
+
+ if (vcpu->fpu_active)
+ kvm_load_guest_fpu(vcpu);
+ }
+
cli();
/* enable NMI/IRQ window open exits if needed */