diff options
author | Robert Mustacchi <rm@joyent.com> | 2011-07-26 18:10:40 +0000 |
---|---|---|
committer | Robert Mustacchi <rm@joyent.com> | 2011-07-26 18:12:57 +0000 |
commit | 002dd17038da793ca80490b77aac5c7a6c50f4ef (patch) | |
tree | 24ba4eab50e8dcd323fd87564e3c9db71306fb27 | |
parent | 6e29ce562aba212ac6fc25678dd595ba312295e4 (diff) | |
download | illumos-kvm-002dd17038da793ca80490b77aac5c7a6c50f4ef.tar.gz |
HVM-528 kvm_tss.h should use the same structure names as <sys/tss.h>
-rw-r--r-- | kvm_tss.h | 124 | ||||
-rw-r--r-- | kvm_x86.c | 168 |
2 files changed, 148 insertions, 144 deletions
@@ -4,69 +4,73 @@ #include <sys/stdint.h> typedef struct tss_segment_32 { - uint16_t prev_task_link; /* 16-bit prior TSS selector */ - uint16_t rsvd0; /* reserved, ignored */ - uint32_t esp0; - uint16_t ss0; - uint16_t rsvd1; /* reserved, ignored */ - uint32_t esp1; - uint16_t ss1; - uint16_t rsvd2; /* reserved, ignored */ - uint32_t esp2; - uint16_t ss2; - uint16_t rsvd3; /* reserved, ignored */ - uint32_t cr3; - uint32_t eip; - uint32_t eflags; - uint32_t eax; - uint32_t ecx; - uint32_t edx; - uint32_t ebx; - uint32_t esp; - uint32_t ebp; - uint32_t esi; - uint32_t edi; - uint16_t es; - uint16_t rsvd4; /* reserved, ignored */ - uint16_t cs; - uint16_t rsvd5; /* reserved, ignored */ - uint16_t ss; - uint16_t rsvd6; /* reserved, ignored */ - uint16_t ds; - uint16_t rsvd7; /* reserved, ignored */ - uint16_t fs; - uint16_t rsvd8; /* reserved, ignored */ - uint16_t gs; - uint16_t rsvd9; /* reserved, ignored */ - uint16_t ldt_selector; - uint16_t rsvd10; /* reserved, ignored */ - uint16_t rsvd11; /* reserved, ignored */ - uint16_t io_map; /* io permission bitmap base address */ + uint16_t tss_link; /* 16-bit prior TSS selector */ + uint16_t tss_rsvd0; /* reserved, ignored */ + uint32_t tss_esp0; + uint16_t tss_ss0; + uint16_t tss_rsvd1; /* reserved, ignored */ + uint32_t tss_esp1; + uint16_t tss_ss1; + uint16_t tss_rsvd2; /* reserved, ignored */ + uint32_t tss_esp2; + uint16_t tss_ss2; + uint16_t tss_rsvd3; /* reserved, ignored */ + uint32_t tss_cr3; + uint32_t tss_eip; + uint32_t tss_eflags; + uint32_t tss_eax; + uint32_t tss_ecx; + uint32_t tss_edx; + uint32_t tss_ebx; + uint32_t tss_esp; + uint32_t tss_ebp; + uint32_t tss_esi; + uint32_t tss_edi; + uint16_t tss_es; + uint16_t tss_rsvd4; /* reserved, ignored */ + uint16_t tss_cs; + uint16_t tss_rsvd5; /* reserved, ignored */ + uint16_t tss_ss; + uint16_t tss_rsvd6; /* reserved, ignored */ + uint16_t tss_ds; + uint16_t tss_rsvd7; /* reserved, ignored */ + uint16_t tss_fs; + uint16_t tss_rsvd8; /* reserved, ignored */ + uint16_t tss_gs; + uint16_t tss_rsvd9; /* reserved, ignored */ + uint16_t tss_ldt; + uint16_t tss_rsvd10; /* reserved, ignored */ + uint16_t tss_rsvd11; /* reserved, ignored */ + uint16_t tss_bitmapbase; /* io permission bitmap base address */ } tss_segment_32_t; +/* + * Based on data from Intel Manual 3a, Intel 64 and IA-32 Architectures Software + * Developer’s Manual Volume 3A: System Programming Guide, Part 1, Section 7.6 + */ typedef struct tss_segment_16 { - uint16_t prev_task_link; - uint16_t sp0; - uint16_t ss0; - uint16_t sp1; - uint16_t ss1; - uint16_t sp2; - uint16_t ss2; - uint16_t ip; - uint16_t flag; - uint16_t ax; - uint16_t cx; - uint16_t dx; - uint16_t bx; - uint16_t sp; - uint16_t bp; - uint16_t si; - uint16_t di; - uint16_t es; - uint16_t cs; - uint16_t ss; - uint16_t ds; - uint16_t ldt; + uint16_t tss_link; + uint16_t tss_sp0; + uint16_t tss_ss0; + uint16_t tss_sp1; + uint16_t tss_ss1; + uint16_t tss_sp2; + uint16_t tss_ss2; + uint16_t tss_ip; + uint16_t tss_flag; + uint16_t tss_ax; + uint16_t tss_cx; + uint16_t tss_dx; + uint16_t tss_bx; + uint16_t tss_sp; + uint16_t tss_bp; + uint16_t tss_si; + uint16_t tss_di; + uint16_t tss_es; + uint16_t tss_cs; + uint16_t tss_ss; + uint16_t tss_ds; + uint16_t tss_ldt; } tss_segment_16_t; #endif @@ -4134,24 +4134,24 @@ exception: static void save_state_to_tss32(struct kvm_vcpu *vcpu, struct tss_segment_32 *tss) { - tss->cr3 = vcpu->arch.cr3; - tss->eip = kvm_rip_read(vcpu); - tss->eflags = kvm_get_rflags(vcpu); - tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX); - tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); - tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX); - tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX); - tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP); - tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP); - tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI); - tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI); - tss->es = get_segment_selector(vcpu, VCPU_SREG_ES); - tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS); - tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS); - tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS); - tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS); - tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS); - tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR); + tss->tss_cr3 = vcpu->arch.cr3; + tss->tss_eip = kvm_rip_read(vcpu); + tss->tss_eflags = kvm_get_rflags(vcpu); + tss->tss_eax = kvm_register_read(vcpu, VCPU_REGS_RAX); + tss->tss_ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); + tss->tss_edx = kvm_register_read(vcpu, VCPU_REGS_RDX); + tss->tss_ebx = kvm_register_read(vcpu, VCPU_REGS_RBX); + tss->tss_esp = kvm_register_read(vcpu, VCPU_REGS_RSP); + tss->tss_ebp = kvm_register_read(vcpu, VCPU_REGS_RBP); + tss->tss_esi = kvm_register_read(vcpu, VCPU_REGS_RSI); + tss->tss_edi = kvm_register_read(vcpu, VCPU_REGS_RDI); + tss->tss_es = get_segment_selector(vcpu, VCPU_SREG_ES); + tss->tss_cs = get_segment_selector(vcpu, VCPU_SREG_CS); + tss->tss_ss = get_segment_selector(vcpu, VCPU_SREG_SS); + tss->tss_ds = get_segment_selector(vcpu, VCPU_SREG_DS); + tss->tss_fs = get_segment_selector(vcpu, VCPU_SREG_FS); + tss->tss_gs = get_segment_selector(vcpu, VCPU_SREG_GS); + tss->tss_ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR); } static void @@ -4166,56 +4166,56 @@ kvm_load_segment_selector(struct kvm_vcpu *vcpu, uint16_t sel, int seg) static int load_state_from_tss32(struct kvm_vcpu *vcpu, struct tss_segment_32 *tss) { - kvm_set_cr3(vcpu, tss->cr3); + kvm_set_cr3(vcpu, tss->tss_cr3); - kvm_rip_write(vcpu, tss->eip); - kvm_set_rflags(vcpu, tss->eflags | 2); + kvm_rip_write(vcpu, tss->tss_eip); + kvm_set_rflags(vcpu, tss->tss_eflags | 2); - kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax); - kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx); - kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx); - kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx); - kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp); - kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp); - kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi); - kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi); + kvm_register_write(vcpu, VCPU_REGS_RAX, tss->tss_eax); + kvm_register_write(vcpu, VCPU_REGS_RCX, tss->tss_ecx); + kvm_register_write(vcpu, VCPU_REGS_RDX, tss->tss_edx); + kvm_register_write(vcpu, VCPU_REGS_RBX, tss->tss_ebx); + kvm_register_write(vcpu, VCPU_REGS_RSP, tss->tss_esp); + kvm_register_write(vcpu, VCPU_REGS_RBP, tss->tss_ebp); + kvm_register_write(vcpu, VCPU_REGS_RSI, tss->tss_esi); + kvm_register_write(vcpu, VCPU_REGS_RDI, tss->tss_edi); /* * SDM says that segment selectors are loaded before segment * descriptors */ - kvm_load_segment_selector(vcpu, tss->ldt_selector, VCPU_SREG_LDTR); - kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES); - kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS); - kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS); - kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS); - kvm_load_segment_selector(vcpu, tss->fs, VCPU_SREG_FS); - kvm_load_segment_selector(vcpu, tss->gs, VCPU_SREG_GS); + kvm_load_segment_selector(vcpu, tss->tss_ldt, VCPU_SREG_LDTR); + kvm_load_segment_selector(vcpu, tss->tss_es, VCPU_SREG_ES); + kvm_load_segment_selector(vcpu, tss->tss_cs, VCPU_SREG_CS); + kvm_load_segment_selector(vcpu, tss->tss_ss, VCPU_SREG_SS); + kvm_load_segment_selector(vcpu, tss->tss_ds, VCPU_SREG_DS); + kvm_load_segment_selector(vcpu, tss->tss_fs, VCPU_SREG_FS); + kvm_load_segment_selector(vcpu, tss->tss_gs, VCPU_SREG_GS); /* * Now load segment descriptors. If fault happenes at this stage * it is handled in a context of new task */ if (kvm_load_segment_descriptor(vcpu, - tss->ldt_selector, VCPU_SREG_LDTR)) + tss->tss_ldt, VCPU_SREG_LDTR)) return (1); - if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES)) + if (kvm_load_segment_descriptor(vcpu, tss->tss_es, VCPU_SREG_ES)) return (1); - if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS)) + if (kvm_load_segment_descriptor(vcpu, tss->tss_cs, VCPU_SREG_CS)) return (1); - if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS)) + if (kvm_load_segment_descriptor(vcpu, tss->tss_ss, VCPU_SREG_SS)) return (1); - if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS)) + if (kvm_load_segment_descriptor(vcpu, tss->tss_ds, VCPU_SREG_DS)) return (1); - if (kvm_load_segment_descriptor(vcpu, tss->fs, VCPU_SREG_FS)) + if (kvm_load_segment_descriptor(vcpu, tss->tss_fs, VCPU_SREG_FS)) return (1); - if (kvm_load_segment_descriptor(vcpu, tss->gs, VCPU_SREG_GS)) + if (kvm_load_segment_descriptor(vcpu, tss->tss_gs, VCPU_SREG_GS)) return (1); return (0); @@ -4224,65 +4224,65 @@ load_state_from_tss32(struct kvm_vcpu *vcpu, struct tss_segment_32 *tss) static void save_state_to_tss16(struct kvm_vcpu *vcpu, struct tss_segment_16 *tss) { - tss->ip = kvm_rip_read(vcpu); - tss->flag = kvm_get_rflags(vcpu); - tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX); - tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX); - tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX); - tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX); - tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP); - tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP); - tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI); - tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI); + tss->tss_ip = kvm_rip_read(vcpu); + tss->tss_flag = kvm_get_rflags(vcpu); + tss->tss_ax = kvm_register_read(vcpu, VCPU_REGS_RAX); + tss->tss_cx = kvm_register_read(vcpu, VCPU_REGS_RCX); + tss->tss_dx = kvm_register_read(vcpu, VCPU_REGS_RDX); + tss->tss_bx = kvm_register_read(vcpu, VCPU_REGS_RBX); + tss->tss_sp = kvm_register_read(vcpu, VCPU_REGS_RSP); + tss->tss_bp = kvm_register_read(vcpu, VCPU_REGS_RBP); + tss->tss_si = kvm_register_read(vcpu, VCPU_REGS_RSI); + tss->tss_di = kvm_register_read(vcpu, VCPU_REGS_RDI); - tss->es = get_segment_selector(vcpu, VCPU_SREG_ES); - tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS); - tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS); - tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS); - tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR); + tss->tss_es = get_segment_selector(vcpu, VCPU_SREG_ES); + tss->tss_cs = get_segment_selector(vcpu, VCPU_SREG_CS); + tss->tss_ss = get_segment_selector(vcpu, VCPU_SREG_SS); + tss->tss_ds = get_segment_selector(vcpu, VCPU_SREG_DS); + tss->tss_ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR); } static int load_state_from_tss16(struct kvm_vcpu *vcpu, struct tss_segment_16 *tss) { - kvm_rip_write(vcpu, tss->ip); - kvm_set_rflags(vcpu, tss->flag | 2); - kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax); - kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx); - kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx); - kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx); - kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp); - kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp); - kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si); - kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di); + kvm_rip_write(vcpu, tss->tss_ip); + kvm_set_rflags(vcpu, tss->tss_flag | 2); + kvm_register_write(vcpu, VCPU_REGS_RAX, tss->tss_ax); + kvm_register_write(vcpu, VCPU_REGS_RCX, tss->tss_cx); + kvm_register_write(vcpu, VCPU_REGS_RDX, tss->tss_dx); + kvm_register_write(vcpu, VCPU_REGS_RBX, tss->tss_bx); + kvm_register_write(vcpu, VCPU_REGS_RSP, tss->tss_sp); + kvm_register_write(vcpu, VCPU_REGS_RBP, tss->tss_bp); + kvm_register_write(vcpu, VCPU_REGS_RSI, tss->tss_si); + kvm_register_write(vcpu, VCPU_REGS_RDI, tss->tss_di); /* * SDM says that segment selectors are loaded before segment * descriptors */ - kvm_load_segment_selector(vcpu, tss->ldt, VCPU_SREG_LDTR); - kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES); - kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS); - kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS); - kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS); + kvm_load_segment_selector(vcpu, tss->tss_ldt, VCPU_SREG_LDTR); + kvm_load_segment_selector(vcpu, tss->tss_es, VCPU_SREG_ES); + kvm_load_segment_selector(vcpu, tss->tss_cs, VCPU_SREG_CS); + kvm_load_segment_selector(vcpu, tss->tss_ss, VCPU_SREG_SS); + kvm_load_segment_selector(vcpu, tss->tss_ds, VCPU_SREG_DS); /* * Now load segment descriptors. If fault happenes at this stage * it is handled in a context of new task */ - if (kvm_load_segment_descriptor(vcpu, tss->ldt, VCPU_SREG_LDTR)) + if (kvm_load_segment_descriptor(vcpu, tss->tss_ldt, VCPU_SREG_LDTR)) return (1); - if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES)) + if (kvm_load_segment_descriptor(vcpu, tss->tss_es, VCPU_SREG_ES)) return (1); - if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS)) + if (kvm_load_segment_descriptor(vcpu, tss->tss_cs, VCPU_SREG_CS)) return (1); - if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS)) + if (kvm_load_segment_descriptor(vcpu, tss->tss_ss, VCPU_SREG_SS)) return (1); - if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS)) + if (kvm_load_segment_descriptor(vcpu, tss->tss_ds, VCPU_SREG_DS)) return (1); return (0); @@ -4310,11 +4310,11 @@ kvm_task_switch_16(struct kvm_vcpu *vcpu, uint16_t tss_selector, goto out; if (old_tss_sel != 0xffff) { - tss_segment_16.prev_task_link = old_tss_sel; + tss_segment_16.tss_link = old_tss_sel; if (kvm_write_guest(vcpu->kvm, get_tss_base_addr_write(vcpu, - nseg_desc), &tss_segment_16.prev_task_link, - sizeof (tss_segment_16.prev_task_link))) + nseg_desc), &tss_segment_16.tss_link, + sizeof (tss_segment_16.tss_link))) goto out; } @@ -4348,11 +4348,11 @@ kvm_task_switch_32(struct kvm_vcpu *vcpu, uint16_t tss_selector, goto out; if (old_tss_sel != 0xffff) { - tss_segment_32.prev_task_link = old_tss_sel; + tss_segment_32.tss_link = old_tss_sel; if (kvm_write_guest(vcpu->kvm, get_tss_base_addr_write(vcpu, - nseg_desc), &tss_segment_32.prev_task_link, - sizeof (tss_segment_32.prev_task_link))) + nseg_desc), &tss_segment_32.tss_link, + sizeof (tss_segment_32.tss_link))) goto out; } |