diff options
Diffstat (limited to 'linux/x86/mmutrace.h')
-rw-r--r-- | linux/x86/mmutrace.h | 95 |
1 files changed, 45 insertions, 50 deletions
diff --git a/linux/x86/mmutrace.h b/linux/x86/mmutrace.h index b60b4fd..3e4a5c6 100644 --- a/linux/x86/mmutrace.h +++ b/linux/x86/mmutrace.h @@ -6,12 +6,14 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM kvmmmu +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE mmutrace #define KVM_MMU_PAGE_FIELDS \ __field(__u64, gfn) \ __field(__u32, role) \ __field(__u32, root_count) \ - __field(bool, unsync) + __field(__u32, unsync) #define KVM_MMU_PAGE_ASSIGN(sp) \ __entry->gfn = sp->gfn; \ @@ -28,14 +30,14 @@ \ role.word = __entry->role; \ \ - trace_seq_printf(p, "sp gfn %llx %u%s q%u%s %s%s" \ + trace_seq_printf(p, "sp gfn %llx %u/%u q%u%s %s%s %spge" \ " %snxe root %u %s%c", \ - __entry->gfn, role.level, \ - role.cr4_pae ? " pae" : "", \ + __entry->gfn, role.level, role.glevels, \ role.quadrant, \ role.direct ? " direct" : "", \ access_str[role.access], \ role.invalid ? " invalid" : "", \ + role.cr4_pge ? "" : "!", \ role.nxe ? "" : "!", \ __entry->root_count, \ __entry->unsync ? "unsync" : "sync", 0); \ @@ -92,15 +94,15 @@ TRACE_EVENT( TP_printk("pte %llx level %u", __entry->pte, __entry->level) ); -DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class, - +/* We set a pte accessed bit */ +TRACE_EVENT( + kvm_mmu_set_accessed_bit, TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size), - TP_ARGS(table_gfn, index, size), TP_STRUCT__entry( __field(__u64, gpa) - ), + ), TP_fast_assign( __entry->gpa = ((u64)table_gfn << PAGE_SHIFT) @@ -110,20 +112,22 @@ DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class, TP_printk("gpa %llx", __entry->gpa) ); -/* We set a pte accessed bit */ -DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit, - +/* We set a pte dirty bit */ +TRACE_EVENT( + kvm_mmu_set_dirty_bit, TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size), + TP_ARGS(table_gfn, index, size), - TP_ARGS(table_gfn, index, size) -); - -/* We set a pte dirty bit */ -DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit, + TP_STRUCT__entry( + __field(__u64, gpa) + ), - TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size), + TP_fast_assign( + __entry->gpa = ((u64)table_gfn << PAGE_SHIFT) + + index * size; + ), - TP_ARGS(table_gfn, index, size) + TP_printk("gpa %llx", __entry->gpa) ); TRACE_EVENT( @@ -162,64 +166,55 @@ TRACE_EVENT( __entry->created ? "new" : "existing") ); -DECLARE_EVENT_CLASS(kvm_mmu_page_class, - +TRACE_EVENT( + kvm_mmu_sync_page, TP_PROTO(struct kvm_mmu_page *sp), TP_ARGS(sp), TP_STRUCT__entry( KVM_MMU_PAGE_FIELDS - ), + ), TP_fast_assign( KVM_MMU_PAGE_ASSIGN(sp) - ), + ), TP_printk("%s", KVM_MMU_PAGE_PRINTK()) ); -DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page, - TP_PROTO(struct kvm_mmu_page *sp), - - TP_ARGS(sp) -); - -DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page, +TRACE_EVENT( + kvm_mmu_unsync_page, TP_PROTO(struct kvm_mmu_page *sp), + TP_ARGS(sp), - TP_ARGS(sp) -); + TP_STRUCT__entry( + KVM_MMU_PAGE_FIELDS + ), -DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page, - TP_PROTO(struct kvm_mmu_page *sp), + TP_fast_assign( + KVM_MMU_PAGE_ASSIGN(sp) + ), - TP_ARGS(sp) + TP_printk("%s", KVM_MMU_PAGE_PRINTK()) ); TRACE_EVENT( - kvm_mmu_audit, - TP_PROTO(struct kvm_vcpu *vcpu, int audit_point), - TP_ARGS(vcpu, audit_point), + kvm_mmu_zap_page, + TP_PROTO(struct kvm_mmu_page *sp), + TP_ARGS(sp), TP_STRUCT__entry( - __field(struct kvm_vcpu *, vcpu) - __field(int, audit_point) - ), + KVM_MMU_PAGE_FIELDS + ), TP_fast_assign( - __entry->vcpu = vcpu; - __entry->audit_point = audit_point; - ), + KVM_MMU_PAGE_ASSIGN(sp) + ), - TP_printk("vcpu:%d %s", __entry->vcpu->cpu, - audit_point_name[__entry->audit_point]) + TP_printk("%s", KVM_MMU_PAGE_PRINTK()) ); -#endif /* _TRACE_KVMMMU_H */ -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE mmutrace +#endif /* _TRACE_KVMMMU_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |