1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
|
/*
* Ported from Linux by Joyent.
* Copyright 2011 Joyent, Inc.
*/
#include "kvm_bitops.h"
#include "kvm_host.h"
#include "kvm_cache_regs.h"
unsigned long
kvm_register_read(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
kvm_x86_ops->cache_reg(vcpu, reg);
return (vcpu->arch.regs[reg]);
}
void
kvm_register_write(struct kvm_vcpu *vcpu, enum kvm_reg reg, unsigned long val)
{
vcpu->arch.regs[reg] = val;
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
}
unsigned long
kvm_rip_read(struct kvm_vcpu *vcpu)
{
return (kvm_register_read(vcpu, VCPU_REGS_RIP));
}
void
kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
{
kvm_register_write(vcpu, VCPU_REGS_RIP, val);
}
uint64_t
kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
{
if (!test_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_avail)) {
kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
}
return (vcpu->arch.pdptrs[index]);
}
ulong
kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
{
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
if (tmask & vcpu->arch.cr0_guest_owned_bits)
kvm_x86_ops->decache_cr0_guest_bits(vcpu);
return (vcpu->arch.cr0 & mask);
}
ulong
kvm_read_cr0(struct kvm_vcpu *vcpu)
{
return (kvm_read_cr0_bits(vcpu, ~0UL));
}
ulong
kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
{
uint64_t tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
if (tmask & vcpu->arch.cr4_guest_owned_bits)
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
return (vcpu->arch.cr4 & mask);
}
ulong
kvm_read_cr4(struct kvm_vcpu *vcpu)
{
return (kvm_read_cr4_bits(vcpu, ~0UL));
}
|