diff options
author | Bryan Cantrill <bryan@joyent.com> | 2011-08-08 22:15:48 -0700 |
---|---|---|
committer | Bryan Cantrill <bryan@joyent.com> | 2011-08-08 22:15:48 -0700 |
commit | 1fcd3312a197a56661bbaecc7027fa3823ee2e6f (patch) | |
tree | 0c00c5584807a74e4e1b0dac48fa537fa08f8818 | |
parent | aca837db5c91a16208314a26872fc144cc54d35f (diff) | |
download | illumos-kvm-1fcd3312a197a56661bbaecc7027fa3823ee2e6f.tar.gz |
HVM-582 remove more XXXs
HVM-583 KVM DTrace probes should all use KVM_TRACE macros
-rw-r--r-- | kvm.c | 66 | ||||
-rw-r--r-- | kvm_host.h | 4 | ||||
-rw-r--r-- | kvm_impl.h | 3 | ||||
-rw-r--r-- | kvm_vmx.c | 27 | ||||
-rw-r--r-- | kvm_x86.c | 144 |
5 files changed, 50 insertions, 194 deletions
@@ -449,16 +449,10 @@ kvm_ctx_restore(void *arg) kvm_arch_vcpu_load(vcpu, cpu); } -#ifdef XXX_KVM_DECLARATION -#define pfn_valid(pfn) ((pfn < physmax) && (pfn != PFN_INVALID)) -#else -#define pfn_valid(pfn) (pfn != PFN_INVALID) -#endif - inline int kvm_is_mmio_pfn(pfn_t pfn) { - if (pfn_valid(pfn)) { + if (pfn != PFN_INVALID) { #ifdef XXX struct page *page = compound_head(pfn_to_page(pfn)); return (PageReserved(page)); @@ -936,8 +930,6 @@ skip_lpage: * - kvm_is_visible_gfn (mmu_check_roots) */ kvm_arch_flush_shadow(kvmp); - - /* XXX: how many bytes to free??? */ kmem_free(old_memslots, sizeof (struct kvm_memslots)); } @@ -1299,18 +1291,8 @@ kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) if (kvm_is_error_hva(addr)) return (-EFAULT); -#ifdef XXX - pagefault_disable(); -#else - XXX_KVM_PROBE; -#endif - r = copyin((caddr_t)addr + offset, data, len); -#ifdef XXX - pagefault_enable(); -#else - XXX_KVM_PROBE; -#endif + if (r) return (-EFAULT); @@ -1329,7 +1311,6 @@ kvm_write_guest_page(struct kvm *kvm, if (kvm_is_error_hva(addr)) return (-EFAULT); - /* XXX - addr could be user or kernel */ if (addr >= kernelbase) { bcopy(data, (caddr_t)(addr + offset), len); } else { @@ -1778,25 +1759,15 @@ out_fail: void -kvm_guest_exit(void) +kvm_guest_exit(struct kvm_vcpu *vcpu) { -#ifdef XXX - account_system_vtime(current); - current->flags &= ~PF_VCPU; -#else - XXX_KVM_PROBE; -#endif + KVM_TRACE1(guest__exit, struct kvm_vcpu *, vcpu); } void -kvm_guest_enter(void) +kvm_guest_enter(struct kvm_vcpu *vcpu) { -#ifdef XXX - account_system_vtime(current); - current->flags |= PF_VCPU; -#else - XXX_KVM_PROBE; -#endif + KVM_TRACE1(guest__entry, struct kvm_vcpu *, vcpu); } /* @@ -2017,21 +1988,13 @@ kvm_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) minor_t minor; kvm_devstate_t *ksp; void *argp = (void *)arg; + struct kvm_pit_config pit; minor = getminor(dev); ksp = ddi_get_soft_state(kvm_state, minor); if (ksp == NULL) return (ENXIO); - union { - struct kvm_pit_state ps; - struct kvm_pit_state2 ps2; -#ifdef XXX_KVM_DECLARATION - struct kvm_memory_alias alias; -#endif - struct kvm_pit_config pit_config; - } u; - struct { int cmd; /* command */ void *func; /* function to call */ @@ -2211,8 +2174,7 @@ kvm_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) break; case KVM_CREATE_PIT2: - if (copyin(argp, &u.pit_config, - sizeof (struct kvm_pit_config)) != 0) { + if (copyin(argp, &pit, sizeof (struct kvm_pit_config)) != 0) { rval = EFAULT; break; } @@ -2227,7 +2189,7 @@ kvm_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) } if (cmd == KVM_CREATE_PIT) { - u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; + pit.flags = KVM_PIT_SPEAKER_DUMMY; } else { ASSERT(cmd == KVM_CREATE_PIT2); } @@ -2237,7 +2199,7 @@ kvm_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) if (kvmp->arch.vpit != NULL) { rval = EEXIST; } else if ((kvmp->arch.vpit = kvm_create_pit(kvmp, - u.pit_config.flags)) == NULL) { + pit.flags)) == NULL) { rval = ENOMEM; } @@ -2739,11 +2701,9 @@ kvm_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) *rv = 0; break; } + default: -#ifndef XXX - XXX_KVM_PROBE; - DTRACE_PROBE1(kvm__xxx__ioctl, int, cmd); -#endif + KVM_TRACE1(bad__ioctl, int, cmd); rval = EINVAL; /* x64, others may do other things... */ } @@ -2820,7 +2780,7 @@ kvm_devmap(dev_t dev, devmap_cookie_t dhp, offset_t off, size_t len, PAGESIZE*2, PROT_READ | PROT_WRITE | PROT_USER, DEVMAP_DEFAULTS, NULL); - *maplen = PAGESIZE*2; + *maplen = PAGESIZE * 2; return (res); } @@ -444,8 +444,8 @@ extern void kvm_free_irq_source_id(struct kvm *, int); /* For vcpu->arch.iommu_flags */ #define KVM_IOMMU_CACHE_COHERENCY 0x1 -extern void kvm_guest_enter(void); -extern void kvm_guest_exit(void); +extern void kvm_guest_enter(struct kvm_vcpu *); +extern void kvm_guest_exit(struct kvm_vcpu *); #ifndef KVM_ARCH_HAS_UNALIAS_INSTANTIATION #define unalias_gfn_instantiation unalias_gfn @@ -13,6 +13,9 @@ #define XXX_KVM_SYNC_PROBE DTRACE_PROBE2(kvm__xxx__sync, \ char *, __FILE__, int, __LINE__) +#define KVM_TRACE(name) \ + DTRACE_PROBE(kvm__##name); + #define KVM_TRACE1(name, type1, arg1) \ DTRACE_PROBE1(kvm__##name, type1, arg1); @@ -433,7 +433,7 @@ __invvpid(int ext, uint16_t vpid, gva_t gva) uint64_t gva; } operand = { vpid, 0, gva }; - DTRACE_PROBE2(kvm__vmx__invvpid, int, vpid, uint64_t, gva); + KVM_TRACE2(vmx__invvpid, int, vpid, uint64_t, gva); /* BEGIN CSTYLED */ __asm__ volatile (ASM_VMX_INVVPID @@ -450,7 +450,7 @@ __invept(int ext, uint64_t eptp, gpa_t gpa) uint64_t eptp, gpa; } operand = {eptp, gpa}; - DTRACE_PROBE2(kvm__vmx__invept, uint64_t, eptp, uint64_t, gpa); + KVM_TRACE2(vmx__invept, uint64_t, eptp, uint64_t, gpa); /* BEGIN CSTYLED */ __asm__ volatile (ASM_VMX_INVEPT @@ -477,7 +477,7 @@ vmcs_clear(uint64_t vmcs_pa) { unsigned char error; - DTRACE_PROBE1(kvm__vmx__vmclear, uint64_t, vmcs_pa); + KVM_TRACE1(vmx__vmclear, uint64_t, vmcs_pa); /*CSTYLED*/ __asm__ volatile (__ex(ASM_VMX_VMCLEAR_RAX) "\n\tsetna %0\n" @@ -552,7 +552,7 @@ vmcs_readl(unsigned long field) __asm__ volatile (ASM_VMX_VMREAD_RDX_RAX : "=a"(value) : "d"(field) : "cc"); - DTRACE_PROBE2(kvm__vmx__vmread, long, field, long, value); + KVM_TRACE2(vmx__vmread, long, field, long, value); return (value); } @@ -594,7 +594,7 @@ __vmwrite(unsigned long field, unsigned long value) : "=q"(err) : "a" (value), "d" (field) : "cc", "memory"); - DTRACE_PROBE3(kvm__vmx__vmwrite, long, field, + KVM_TRACE3(vmx__vmwrite, long, field, long, value, uint8_t, err); /* XXX the following should be ifdef debug... */ @@ -841,7 +841,7 @@ vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) current_vmcs[cpu] = vmx->vmcs; - DTRACE_PROBE1(kvm__vmx__vmptrld, uint64_t, phys_addr); + KVM_TRACE1(vmx__vmptrld, uint64_t, phys_addr); /*CSTYLED*/ __asm__ volatile (ASM_VMX_VMPTRLD_RAX "; setna %0" @@ -1308,7 +1308,7 @@ vmx_hardware_enable(void *garbage) FEATURE_CONTROL_VMXON_ENABLED); } - DTRACE_PROBE1(kvm__vmx__vmxon, uint64_t, phys_addr); + KVM_TRACE1(vmx__vmxon, uint64_t, phys_addr); setcr4(getcr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ /* BEGIN CSTYLED */ @@ -1329,7 +1329,7 @@ vmx_hardware_enable(void *garbage) static void kvm_cpu_vmxoff(void) { - DTRACE_PROBE(kvm__vmx__vmxoff); + KVM_TRACE(vmx__vmxoff); /* BEGIN CSTYLED */ __asm__ volatile ((ASM_VMX_VMXOFF) : : : "cc"); @@ -3254,8 +3254,7 @@ handle_cr(struct kvm_vcpu *vcpu) exit_qualification = vmcs_readl(EXIT_QUALIFICATION); cr = exit_qualification & 15; reg = (exit_qualification >> 8) & 15; - DTRACE_PROBE3(kvm__cr, int, cr, int, reg, int, - (exit_qualification >> 4) & 3); + KVM_TRACE3(cr, int, cr, int, reg, int, (exit_qualification >> 4) & 3); switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ val = kvm_register_read(vcpu, reg); @@ -3903,7 +3902,7 @@ vmx_handle_exit(struct kvm_vcpu *vcpu) /* Always read the guest rip when exiting */ rip = vmcs_readl(GUEST_RIP); - DTRACE_PROBE2(kvm__vexit, unsigned long, rip, uint32_t, exit_reason); + KVM_TRACE2(vexit, unsigned long, rip, uint32_t, exit_reason); /* If guest state is invalid, start emulating */ if (vmx->emulation_required && emulate_invalid_guest_state) @@ -4128,7 +4127,7 @@ vmx_vcpu_run(struct kvm_vcpu *vcpu) if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); - DTRACE_PROBE1(kvm__vrun, unsigned long, vcpu->arch.regs[VCPU_REGS_RIP]); + KVM_TRACE1(vrun, unsigned long, vcpu->arch.regs[VCPU_REGS_RIP]); /* * When single-stepping over STI and MOV SS, we must clear the @@ -4146,9 +4145,9 @@ vmx_vcpu_run(struct kvm_vcpu *vcpu) vmcs_writel(HOST_CR0, read_cr0()); if (vmx->launched) { - DTRACE_PROBE1(kvm__vmx__vmresume, struct vcpu_vmx *, vmx); + KVM_TRACE1(vmx__vmresume, struct vcpu_vmx *, vmx); } else { - DTRACE_PROBE1(kvm__vmx__vmlaunch, struct vcpu_vmx *, vmx); + KVM_TRACE1(vmx__vmlaunch, struct vcpu_vmx *, vmx); } __asm__( @@ -37,12 +37,13 @@ #include "kvm_mmu.h" #include "kvm_cache_regs.h" -/* XXX These don't belong here! */ extern caddr_t smmap64(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); extern int lwp_sigmask(int, uint_t, uint_t, uint_t, uint_t); +extern uint64_t cpu_freq_hz; static unsigned long empty_zero_page[PAGESIZE / sizeof (unsigned long)]; +static uint64_t cpu_tsc_khz; /* * Globals @@ -726,10 +727,6 @@ kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock) hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32); } -static uint64_t cpu_tsc_khz; -/* XXX extern?! */ -extern uint64_t cpu_freq_hz; - static void kvm_write_guest_time(struct kvm_vcpu *v) { @@ -1492,21 +1489,11 @@ __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, vcpu_load(vcpu); -#ifdef XXX - idx = srcu_read_lock(&vcpu->kvm->srcu); -#else - XXX_KVM_SYNC_PROBE; -#endif for (i = 0; i < msrs->nmsrs; i++) { if (do_msr(vcpu, entries[i].index, &entries[i].data)) break; } -#ifdef XXX - srcu_read_unlock(&vcpu->kvm->srcu, idx); -#else - XXX_KVM_SYNC_PROBE; -#endif vcpu_put(vcpu); return (i); @@ -3077,12 +3064,7 @@ kvm_emulate_hypercall(struct kvm_vcpu *vcpu) ret = 0; break; case KVM_HC_MMU_OP: -#ifdef XXX - r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret); -#else - XXX_KVM_PROBE; ret = -ENOSYS; -#endif break; default: ret = -ENOSYS; @@ -3242,18 +3224,9 @@ vapic_exit(struct kvm_vcpu *vcpu) if (!apic || !apic->vapic_addr) return; -#ifdef XXX - idx = srcu_read_lock(&vcpu->kvm->srcu); -#else - XXX_KVM_SYNC_PROBE; -#endif + kvm_release_page_dirty(apic->vapic_page); mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGESHIFT); -#ifdef XXX - srcu_read_unlock(&vcpu->kvm->srcu, idx); -#else - XXX_KVM_SYNC_PROBE; -#endif } static void @@ -3442,11 +3415,6 @@ vcpu_enter_guest(struct kvm_vcpu *vcpu) cli(); clear_bit(KVM_REQ_KICK, &vcpu->requests); -#ifdef XXX - smp_mb__after_clear_bit(); -#else - XXX_KVM_PROBE; -#endif if (vcpu->requests || issig(JUSTLOOKING)) { set_bit(KVM_REQ_KICK, &vcpu->requests); @@ -3468,12 +3436,8 @@ vcpu_enter_guest(struct kvm_vcpu *vcpu) update_cr8_intercept(vcpu); kvm_lapic_sync_to_vapic(vcpu); } -#ifdef XXX - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); -#else - XXX_KVM_PROBE; -#endif - kvm_guest_enter(); + + kvm_guest_enter(vcpu); if (vcpu->arch.switch_db_regs) { set_debugreg(0, 7); @@ -3503,30 +3467,10 @@ vcpu_enter_guest(struct kvm_vcpu *vcpu) sti(); -#ifdef XXX - local_irq_enable(); /* XXX - should be ok with kpreempt_enable below */ - - barrier(); -#else - XXX_KVM_PROBE; -#endif KVM_VCPU_KSTAT_INC(vcpu, kvmvs_exits); - kvm_guest_exit(); + kvm_guest_exit(vcpu); kpreempt_enable(); -#ifdef XXX - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); - - /* - * Profile KVM exit RIPs: - */ - if (unlikely(prof_on == KVM_PROFILING)) { - unsigned long rip = kvm_rip_read(vcpu); - profile_hit(KVM_PROFILING, (void *)rip); - } -#else - XXX_KVM_PROBE; -#endif kvm_lapic_sync_from_vapic(vcpu); r = kvm_x86_ops->handle_exit(vcpu); @@ -3550,11 +3494,6 @@ __vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } -#ifdef XXX - vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); -#else - XXX_KVM_SYNC_PROBE; -#endif vapic_enter(vcpu); r = 1; @@ -3562,17 +3501,8 @@ __vcpu_run(struct kvm_vcpu *vcpu) if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) r = vcpu_enter_guest(vcpu); else { -#ifdef XXX - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); -#else - XXX_KVM_SYNC_PROBE; -#endif kvm_vcpu_block(vcpu); -#ifdef XXX - vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); -#else - XXX_KVM_SYNC_PROBE; -#endif + if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) { switch (vcpu->arch.mp_state) { @@ -3616,11 +3546,7 @@ __vcpu_run(struct kvm_vcpu *vcpu) KVM_VCPU_KSTAT_INC(vcpu, kvmvs_signal_exits); } } -#ifdef XXX - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); -#else - XXX_KVM_SYNC_PROBE; -#endif + KVM_TRACE3(vcpu__run, char *, __FILE__, int, __LINE__, uint64_t, vcpu); post_kvm_run_save(vcpu); @@ -3653,36 +3579,18 @@ kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_set_cr8(vcpu, kvm_run->cr8); if (vcpu->arch.pio.cur_count) { -#ifdef XXX - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); -#else - XXX_KVM_SYNC_PROBE; -#endif - r = complete_pio(vcpu); -#ifdef XXX - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); -#else - XXX_KVM_SYNC_PROBE; -#endif - if (r) + if ((r = complete_pio(vcpu)) != 0) goto out; } + if (vcpu->mmio_needed) { memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); vcpu->mmio_read_completed = 1; vcpu->mmio_needed = 0; -#ifdef XXX - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); -#else - XXX_KVM_SYNC_PROBE; -#endif - r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0, - EMULTYPE_NO_DECODE); -#ifdef XXX - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); -#else - XXX_KVM_SYNC_PROBE; -#endif + + r = emulate_instruction(vcpu, + vcpu->arch.mmio_fault_cr2, 0, EMULTYPE_NO_DECODE); + if (r == EMULATE_DO_MMIO) { /* * Read-modify-write. Back to userspace. @@ -4598,18 +4506,8 @@ void fx_init(struct kvm_vcpu *vcpu) { unsigned after_mxcsr_mask; -#ifdef XXX - /* - * Touch the fpu the first time in non atomic context as if - * this is the first fpu instruction the exception handler - * will fire before the instruction returns and it'll have to - * allocate ram with GFP_KERNEL. - */ - if (!used_math()) -#else - XXX_KVM_PROBE; -#endif - kvm_fx_save(&vcpu->arch.host_fx_image); + + kvm_fx_save(&vcpu->arch.host_fx_image); /* Initialize guest FPU by resetting ours and saving into guest's */ kpreempt_disable(); @@ -4809,10 +4707,6 @@ kvm_arch_hardware_unsetup(void) void kvm_arch_exit(void) { - /* - * kvm_x86_ops = NULL; - * XXX kvm_mmu_module_exit(); - */ } void @@ -4930,7 +4824,7 @@ kvm_free_vcpus(struct kvm *kvmp) int ii, maxcpus; maxcpus = kvmp->online_vcpus; - XXX_KVM_SYNC_PROBE; + for (ii = 0; ii < maxcpus; ii++) kvm_unload_vcpu_mmu(kvmp->vcpus[ii]); @@ -5244,7 +5138,7 @@ native_read_msr_safe(unsigned int msr, int *err) ret = native_read_msr(msr); *err = 0; } else { - *err = EINVAL; /* XXX probably not right... */ + *err = EINVAL; } no_trap(); @@ -5261,7 +5155,7 @@ native_write_msr_safe(unsigned int msr, unsigned low, unsigned high) if (on_trap(&otd, OT_DATA_ACCESS) == 0) { native_write_msr(msr, low, high); } else { - err = EINVAL; /* XXX probably not right... */ + err = EINVAL; } no_trap(); |