summaryrefslogtreecommitdiff
path: root/kvm_msr.h
blob: 6b1485206011cf59c6321e954179fa31dcf04728 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
/*
 * This header file has been slimmed down and adopted from the Linux Kernel's
 * <asm-x86/msr.h>
 */
#ifndef _KVM_MSR_H
#define	_KVM_MSR_H

#include "msr-index.h"

#include <sys/ontrap.h>
#include <sys/errno.h>

typedef struct msr {
	union {
		struct {
			uint32_t l;
			uint32_t h;
		};
		uint64_t q;
	}b;
} msr_t;

typedef struct msr_info {
	uint32_t msr_no;
	struct msr reg;
	struct msr *msrs;
	int err;
} msr_info_t;

typedef struct msr_regs_info {
	uint32_t *regs;
	int err;
} msr_regs_info_t;

/*
 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
 * constraint has different meanings. For i386, "A" means exactly
 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
 * it means rax *or* rdx.
 */
#define	DECLARE_ARGS(val, low, high)	unsigned low, high
#define	EAX_EDX_VAL(val, low, high)	((low) | ((uint64_t)(high) << 32))
#define	EAX_EDX_ARGS(val, low, high)	"a" (low), "d" (high)
#define	EAX_EDX_RET(val, low, high)	"=a" (low), "=d" (high)

extern unsigned long long native_read_msr(unsigned int);
extern uint64_t native_read_msr_safe(unsigned int, int *);
extern int native_write_msr_safe(unsigned int, unsigned, unsigned);
extern void native_write_msr(unsigned int, unsigned, unsigned);

extern unsigned long long native_read_tsc(void);
extern unsigned long long __native_read_tsc(void);

/*
 * Access to machine-specific registers (available on 586 and better only)
 * Note: the rd* operations modify the parameters directly (without using
 * pointer indirection), this allows gcc to optimize better
 */

#define	rdmsr(msr, val1, val2)						\
do {									\
	uint64_t __val = native_read_msr((msr));			\
	(val1) = (uint32_t)__val;					\
	(val2) = (uint32_t)(__val >> 32);				\
} while (0)

#define	rdmsrl(msr, val)						\
	((val) = native_read_msr((msr)))

#define	wrmsrl(msr, val)						\
	native_write_msr((msr), (uint32_t)((uint64_t)(val)),		\
	    (uint32_t)((uint64_t)(val) >> 32))

/* see comment above for wrmsr() */
/* wrmsr with exception handling */
extern int wrmsr_safe(unsigned msr, unsigned low, unsigned high);

/* rdmsr with exception handling */
/* BEGIN CSTYLED */
#define	rdmsr_safe(msr, p1, p2)					\
({								\
	int __err;						\
	uint64_t __val = native_read_msr_safe((msr), &__err);	\
	(*p1) = (uint32_t)__val;				\
	(*p2) = (uint32_t)(__val >> 32);			\
	__err;							\
})
/* END CSTYLED */

extern int rdmsrl_safe(unsigned, unsigned long long *);
extern int rdmsrl_amd_safe(unsigned, unsigned long long *);
extern int wrmsrl_amd_safe(unsigned, unsigned long long);

#define	rdtscl(low)						\
	((low) = (uint32_t)__native_read_tsc())

#define	rdtscll(val)						\
	((val) = __native_read_tsc())

#endif /* _KVM_MSR_H */