diff options
author | Robert Mustacchi <rm@joyent.com> | 2012-03-08 00:41:46 +0000 |
---|---|---|
committer | Robert Mustacchi <rm@joyent.com> | 2012-03-08 00:41:46 +0000 |
commit | 9621d5228ac4dbdd99cdfe8f2946e7315261a893 (patch) | |
tree | 488c0b64195ad83c993bb2bf67a12763937811ee | |
parent | 76be9d4d13f5c5d32d915820bbf956b73fd89aab (diff) | |
download | illumos-kvm-9621d5228ac4dbdd99cdfe8f2946e7315261a893.tar.gz |
HVM-719 kvm should build with gcc 4.4.420120517
-rw-r--r-- | Makefile | 16 | ||||
-rw-r--r-- | kvm.c | 6 | ||||
-rw-r--r-- | kvm_bitops.c | 255 | ||||
-rw-r--r-- | kvm_bitops.h | 173 | ||||
-rw-r--r-- | kvm_lapic.c | 39 | ||||
-rw-r--r-- | kvm_vmx.c | 4 | ||||
-rw-r--r-- | kvm_x86.c | 20 | ||||
-rw-r--r-- | kvm_x86host.h | 5 | ||||
-rw-r--r-- | kvm_x86impl.h | 36 |
9 files changed, 190 insertions, 364 deletions
@@ -1,11 +1,11 @@ # -# Copyright (c) 2010 Joyent Inc., All Rights Reserved. +# Copyright (c) 2012 Joyent Inc., All Rights Reserved. # # Use the gcc compiler and Sun linker. KERNEL_SOURCE=$(PWD)/../../illumos PROTO_AREA=$(PWD)/../../../proto -GCC=/usr/sfw/bin/gcc +GCC=/opt/gcc/4.4.4/bin/gcc CC=$(GCC) -m64 -mcmodel=kernel LD=/usr/bin/ld CTFBINDIR=$(KERNEL_SOURCE)/usr/src/tools/proto/*/opt/onbld/bin/i386 @@ -69,7 +69,7 @@ HDRCHK_SYSHDRS= \ world: kvm kvm.so JOY_kvm_link.so -kvm: kvm.c kvm_x86.c kvm_emulate.c kvm_irq.c kvm_i8254.c kvm_lapic.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c kvm_bitops.c $(HEADERS) +kvm: kvm.c kvm_x86.c kvm_emulate.c kvm_irq.c kvm_i8254.c kvm_lapic.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c $(HEADERS) $(CC) $(CFLAGS) $(INCLUDEDIR) kvm.c $(CC) $(CFLAGS) $(INCLUDEDIR) kvm_x86.c $(CC) $(CFLAGS) $(INCLUDEDIR) kvm_emulate.c @@ -84,7 +84,6 @@ kvm: kvm.c kvm_x86.c kvm_emulate.c kvm_irq.c kvm_i8254.c kvm_lapic.c kvm_mmu.c k $(CC) $(CFLAGS) $(INCLUDEDIR) kvm_coalesced_mmio.c $(CC) $(CFLAGS) $(INCLUDEDIR) kvm_irq_comm.c $(CC) $(CFLAGS) $(INCLUDEDIR) kvm_cache_regs.c - $(CC) $(CFLAGS) $(INCLUDEDIR) kvm_bitops.c $(CTFCONVERT) -i -L VERSION kvm.o $(CTFCONVERT) -i -L VERSION kvm_x86.o $(CTFCONVERT) -i -L VERSION kvm_emulate.o @@ -99,9 +98,8 @@ kvm: kvm.c kvm_x86.c kvm_emulate.c kvm_irq.c kvm_i8254.c kvm_lapic.c kvm_mmu.c k $(CTFCONVERT) -i -L VERSION kvm_coalesced_mmio.o $(CTFCONVERT) -i -L VERSION kvm_irq_comm.o $(CTFCONVERT) -i -L VERSION kvm_cache_regs.o - $(CTFCONVERT) -i -L VERSION kvm_bitops.o - $(LD) -r -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o kvm_coalesced_mmio.o kvm_irq_comm.o kvm_cache_regs.o kvm_bitops.o - $(CTFMERGE) -L VERSION -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o kvm_coalesced_mmio.o kvm_irq_comm.o kvm_cache_regs.o kvm_bitops.o + $(LD) -r -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o kvm_coalesced_mmio.o kvm_irq_comm.o kvm_cache_regs.o + $(CTFMERGE) -L VERSION -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o kvm_coalesced_mmio.o kvm_irq_comm.o kvm_cache_regs.o kvm.so: kvm_mdb.c $(GCC) -m64 -shared \ @@ -121,8 +119,8 @@ install: world @cp JOY_kvm_link.so $(DESTDIR)/usr/lib/devfsadm/linkmod check: - @$(CSTYLE) kvm.c kvm_mdb.c kvm_emulate.c kvm_x86.c kvm_irq.c kvm_lapic.c kvm_i8254.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c kvm_bitops.c $(HEADERS) kvm_link.c - @./tools/xxxcheck kvm_x86.c kvm.c kvm_irq.c kvm_lapic.c kvm_i8254.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c kvm_bitops.c + @$(CSTYLE) kvm.c kvm_mdb.c kvm_emulate.c kvm_x86.c kvm_irq.c kvm_lapic.c kvm_i8254.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c $(HEADERS) kvm_link.c + @./tools/xxxcheck kvm_x86.c kvm.c kvm_irq.c kvm_lapic.c kvm_i8254.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c @$(HDRCHK) $(HDRCHK_USRFLAG) $(HDRCHK_USRHDRS) @$(HDRCHK) $(HDRCHK_SYSFLAG) $(HDRCHK_SYSHDRS) @@ -24,7 +24,7 @@ * Yaniv Kamay <yaniv@qumranet.com> * * Ported to illumos by Joyent - * Copyright 2011 Joyent, Inc. All rights reserved. + * Copyright (c) 2012 Joyent, Inc. All rights reserved. * * Authors: * Max Bruning <max@joyent.com> @@ -783,7 +783,7 @@ kvm_free_physmem(struct kvm *kvm) void kvm_get_kvm(struct kvm *kvm) { - atomic_inc_32(&kvm->users_count); + atomic_inc_32((volatile uint32_t *)&kvm->users_count); } unsigned long @@ -1449,7 +1449,7 @@ kvm_vm_ioctl_create_vcpu(struct kvm *kvm, uint32_t id, int *rval_p) smp_wmb(); - atomic_inc_32(&kvm->online_vcpus); + atomic_inc_32((volatile uint32_t *)&kvm->online_vcpus); if (kvm->bsp_vcpu_id == id) kvm->bsp_vcpu = vcpu; diff --git a/kvm_bitops.c b/kvm_bitops.c deleted file mode 100644 index 5dcf71b..0000000 --- a/kvm_bitops.c +++ /dev/null @@ -1,255 +0,0 @@ -/* - * GPL HEADER START - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * GPL HEADER END - * - * Copyright 2011 various Linux Kernel contributors. - * Copyright 2011 Joyent, Inc. All Rights Reserved. - */ -/* - * Copyright 1992, Linus Torvalds. - * Copyright 2011 Joyent, Inc. - * - * Note: inlines with more than a single statement should be marked - * __always_inline to avoid problems with older gcc's inlining heuristics. - */ - -#include "kvm_bitops.h" - -#include "kvm_impl.h" - -#define ADDR BITOP_ADDR(addr) - -/* - * set_bit - Atomically set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. - * - * Note: there are no guarantees that this function will not be reordered - * on non x86 architectures, so if you are writing portable code, - * make sure not to rely on its reordering guarantees. - * - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -inline void -set_bit(unsigned int nr, volatile unsigned long *addr) -{ - if (IS_IMMEDIATE(nr)) { - __asm__ volatile("lock orb %1,%0" - : CONST_MASK_ADDR(nr, addr) - : "iq" ((uint8_t)CONST_MASK(nr)) - : "memory"); - } else { - __asm__ volatile("lock bts %1,%0" - : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); - } -} - -/* - * __set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -inline void -__set_bit(int nr, volatile unsigned long *addr) -{ - __asm__ volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); -} - -/* - * clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * clear_bit() is atomic and may not be reordered. However, it does - * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() - * in order to ensure changes are visible on other processors. - */ -inline void -clear_bit(int nr, volatile unsigned long *addr) -{ - if (IS_IMMEDIATE(nr)) { - __asm__ volatile("lock andb %1,%0" - : CONST_MASK_ADDR(nr, addr) - : "iq" ((uint8_t)~CONST_MASK(nr))); - } else { - __asm__ volatile("lock btr %1,%0" - : BITOP_ADDR(addr) - : "Ir" (nr)); - } -} - -inline void -__clear_bit(int nr, volatile unsigned long *addr) -{ - __asm__ volatile("btr %1,%0" : ADDR : "Ir" (nr)); -} - -/* - * test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -inline int -test_and_set_bit(int nr, volatile unsigned long *addr) -{ - int oldbit; - - __asm__ volatile("lock bts %2,%1\n\t" - "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); - - return (oldbit); -} - -/* - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -inline int -__test_and_set_bit(int nr, volatile unsigned long *addr) -{ - int oldbit; - - __asm__("bts %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr)); - return (oldbit); -} - -/* - * test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -inline int -test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - int oldbit; - - __asm__ volatile("lock btr %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); - - return (oldbit); -} - -/* - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -inline int -__test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - int oldbit; - - __asm__ volatile("btr %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr)); - - return (oldbit); -} - -inline int -constant_test_bit(unsigned int nr, const volatile unsigned long *addr) -{ - return (((1UL << (nr % 64)) & - (((unsigned long *)addr)[nr / 64])) != 0); -} - -inline int -variable_test_bit(int nr, volatile const unsigned long *addr) -{ - int oldbit; - - __asm__ volatile("bt %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit) - : "m" (*(unsigned long *)addr), "Ir" (nr)); - - return (oldbit); -} - -/* - * __ffs - find first set bit in word - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -inline unsigned long -__ffs(unsigned long word) -{ - __asm__("bsf %1,%0" - : "=r" (word) - : "rm" (word)); - return (word); -} - -/* - * ffz - find first zero bit in word - * @word: The word to search - * - * Undefined if no zero exists, so code should check against ~0UL first. - */ -inline unsigned long -ffz(unsigned long word) -{ - __asm__("bsf %1,%0" - : "=r" (word) - : "r" (~word)); - return (word); -} - -/* - * __fls: find last set bit in word - * @word: The word to search - * - * Undefined if no set bit exists, so code should check against 0 first. - */ -inline unsigned long -__fls(unsigned long word) -{ - __asm__("bsr %1,%0" - : "=r" (word) - : "rm" (word)); - return (word); -} diff --git a/kvm_bitops.h b/kvm_bitops.h index 0dc7fd1..f45ccc3 100644 --- a/kvm_bitops.h +++ b/kvm_bitops.h @@ -22,12 +22,14 @@ /* * Copyright 1992, Linus Torvalds. - * Copyright 2011, Joyent, Inc. + * Copyright (c) 2012, Joyent, Inc. * * Note: inlines with more than a single statement should be marked * __always_inline to avoid problems with older gcc's inlining heuristics. */ +#include <sys/types.h> + #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, 8 * sizeof (long)) @@ -48,6 +50,8 @@ #define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) #endif +#define ADDR BITOP_ADDR(addr) + /* * We do the locked ops that don't return the old value as * a mask operation on a byte. @@ -72,7 +76,19 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -extern inline void set_bit(unsigned int, volatile unsigned long *); +static inline void +set_bit(unsigned int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + __asm__ volatile("lock orb %1,%0" + : CONST_MASK_ADDR(nr, addr) + : "iq" ((uint8_t)CONST_MASK(nr)) + : "memory"); + } else { + __asm__ volatile("lock bts %1,%0" + : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); + } +} /* * __set_bit - Set a bit in memory @@ -83,7 +99,11 @@ extern inline void set_bit(unsigned int, volatile unsigned long *); * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -extern inline void __set_bit(int, volatile unsigned long *); +static inline void +__set_bit(int nr, volatile unsigned long *addr) +{ + __asm__ volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); +} /* * clear_bit - Clears a bit in memory @@ -95,8 +115,25 @@ extern inline void __set_bit(int, volatile unsigned long *); * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -extern inline void clear_bit(int, volatile unsigned long *); -extern inline void __clear_bit(int, volatile unsigned long *); +static inline void +clear_bit(int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + __asm__ volatile("lock andb %1,%0" + : CONST_MASK_ADDR(nr, addr) + : "iq" ((uint8_t)~CONST_MASK(nr))); + } else { + __asm__ volatile("lock btr %1,%0" + : BITOP_ADDR(addr) + : "Ir" (nr)); + } +} + +static inline void +__clear_bit(int nr, volatile unsigned long *addr) +{ + __asm__ volatile("btr %1,%0" : ADDR : "Ir" (nr)); +} /* * test_and_set_bit - Set a bit and return its old value @@ -106,7 +143,16 @@ extern inline void __clear_bit(int, volatile unsigned long *); * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -extern inline int test_and_set_bit(int, volatile unsigned long *); +static inline int +test_and_set_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + __asm__ volatile("lock bts %2,%1\n\t" + "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); + + return (oldbit); +} /* * __test_and_set_bit - Set a bit and return its old value @@ -117,7 +163,17 @@ extern inline int test_and_set_bit(int, volatile unsigned long *); * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -extern inline int __test_and_set_bit(int, volatile unsigned long *); +static inline int +__test_and_set_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + __asm__("bts %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr)); + return (oldbit); +} /* * test_and_clear_bit - Clear a bit and return its old value @@ -127,7 +183,17 @@ extern inline int __test_and_set_bit(int, volatile unsigned long *); * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -extern inline int test_and_clear_bit(int, volatile unsigned long *); +static inline int +test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + __asm__ volatile("lock btr %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); + + return (oldbit); +} /* * __test_and_clear_bit - Clear a bit and return its old value @@ -138,11 +204,38 @@ extern inline int test_and_clear_bit(int, volatile unsigned long *); * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -extern inline int __test_and_clear_bit(int, volatile unsigned long *); +static inline int +__test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + __asm__ volatile("btr %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr)); + + return (oldbit); +} + +static inline int +constant_test_bit(unsigned int nr, const volatile unsigned long *addr) +{ + return (((1UL << (nr % 64)) & + (((unsigned long *)addr)[nr / 64])) != 0); +} + +static inline int +variable_test_bit(int nr, volatile const unsigned long *addr) +{ + int oldbit; -extern inline int constant_test_bit(unsigned int, - const volatile unsigned long *); -extern inline int variable_test_bit(int, volatile const unsigned long *); + __asm__ volatile("bt %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit) + : "m" (*(unsigned long *)addr), "Ir" (nr)); + + return (oldbit); +} /* * test_bit - Determine whether a bit is set @@ -161,7 +254,14 @@ extern inline int variable_test_bit(int, volatile const unsigned long *); * * Undefined if no bit exists, so code should check against 0 first. */ -extern inline unsigned long __ffs(unsigned long); +static inline unsigned long +__ffs(unsigned long word) +{ + __asm__("bsf %1,%0" + : "=r" (word) + : "rm" (word)); + return (word); +} /* * ffz - find first zero bit in word @@ -169,7 +269,14 @@ extern inline unsigned long __ffs(unsigned long); * * Undefined if no zero exists, so code should check against ~0UL first. */ -extern inline unsigned long ffz(unsigned long); +static inline unsigned long +ffz(unsigned long word) +{ + __asm__("bsf %1,%0" + : "=r" (word) + : "r" (~word)); + return (word); +} /* * __fls: find last set bit in word @@ -177,35 +284,13 @@ extern inline unsigned long ffz(unsigned long); * * Undefined if no set bit exists, so code should check against 0 first. */ -extern inline unsigned long __fls(unsigned long); - -#ifdef __KERNEL__ -/* - * ffs - find first set bit in word - * @x: the word to search - * - * This is defined the same way as the libc and compiler builtin ffs - * routines, therefore differs in spirit from the other bitops. - * - * ffs(value) returns 0 if value is 0 or the position of the first - * set bit if value is nonzero. The first (least significant) bit - * is at position 1. - */ -extern inline int ffs(int); - -/* - * fls - find last set bit in word - * @x: the word to search - * - * This is defined in a similar way as the libc and compiler builtin - * ffs, but returns the position of the most significant set bit. - * - * fls(value) returns 0 if value is 0 or the position of the last - * set bit if value is nonzero. The last (most significant) bit is - * at position 32. - */ -extern inline int fls(int); - -#endif /* __KERNEL__ */ +static inline unsigned long +__fls(unsigned long word) +{ + __asm__("bsr %1,%0" + : "=r" (word) + : "rm" (word)); + return (word); +} #endif /* _ASM_X86_BITOPS_H */ diff --git a/kvm_lapic.c b/kvm_lapic.c index 5a9d07e..6a3550d 100644 --- a/kvm_lapic.c +++ b/kvm_lapic.c @@ -15,7 +15,7 @@ * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * - * Copyright 2011 Joyent, Inc. All rights reserved. + * Copyright (c) 2012 Joyent, Inc. All rights reserved. */ #include <sys/types.h> #include <sys/atomic.h> @@ -54,7 +54,7 @@ static int __apic_accept_irq(struct kvm_lapic *, int, int, int, int); #define REG_POS(v) (((v) >> 5) << 4) -inline uint32_t +uint32_t apic_get_reg(struct kvm_lapic *apic, int reg_off) { return (*((uint32_t *)((uintptr_t)apic->regs + reg_off))); @@ -66,45 +66,45 @@ apic_set_reg(struct kvm_lapic *apic, int reg_off, uint32_t val) *((uint32_t *)((uintptr_t)apic->regs + reg_off)) = val; } -static inline int +static int apic_test_and_set_vector(int vec, caddr_t bitmap) { return (test_and_set_bit(VEC_POS(vec), (unsigned long *)(bitmap + REG_POS(vec)))); } -static inline int +static int apic_test_and_clear_vector(int vec, caddr_t bitmap) { return (test_and_clear_bit(VEC_POS(vec), (unsigned long *)(bitmap + REG_POS(vec)))); } -inline void +void apic_set_vector(int vec, caddr_t bitmap) { set_bit(VEC_POS(vec), (unsigned long *)(bitmap + REG_POS(vec))); } -inline void +void apic_clear_vector(int vec, caddr_t bitmap) { clear_bit(VEC_POS(vec), (unsigned long *)(bitmap + REG_POS(vec))); } -inline int +int apic_hw_enabled(struct kvm_lapic *apic) { return ((apic)->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE); } -inline int +int apic_sw_enabled(struct kvm_lapic *apic) { return (apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED); } -inline int +int apic_enabled(struct kvm_lapic *apic) { return (apic_sw_enabled(apic) && apic_hw_enabled(apic)); @@ -128,7 +128,7 @@ apic_lvtt_period(struct kvm_lapic *apic) return (apic_get_reg(apic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC); } -static inline int +static int apic_lvt_nmi_mode(uint32_t lvt_val) { return ((lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI); @@ -150,7 +150,7 @@ kvm_apic_set_version(struct kvm_vcpu *vcpu) apic_set_reg(apic, APIC_LVR, v); } -static inline int +static int apic_x2apic_mode(struct kvm_lapic *apic) { return (apic->vcpu->arch.apic_base & X2APIC_ENABLE); @@ -211,7 +211,7 @@ find_highest_vector(void *bitmap) return (fls(word[word_offset << 2]) - 1 + (word_offset << 5)); } -static inline int +static int apic_test_and_set_irr(int vec, struct kvm_lapic *apic) { apic->irr_pending = 1; @@ -219,14 +219,14 @@ apic_test_and_set_irr(int vec, struct kvm_lapic *apic) APIC_IRR))); } -static inline int +static int apic_search_irr(struct kvm_lapic *apic) { return (find_highest_vector((void *)((uintptr_t)apic->regs + APIC_IRR))); } -static inline int +static int apic_find_highest_irr(struct kvm_lapic *apic) { int result; @@ -240,7 +240,7 @@ apic_find_highest_irr(struct kvm_lapic *apic) return (result); } -static inline void +static void apic_clear_irr(int vec, struct kvm_lapic *apic) { apic->irr_pending = 0; @@ -278,7 +278,7 @@ kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq) irq->level, irq->trig_mode)); } -inline int +int apic_find_highest_isr(struct kvm_lapic *apic) { int ret; @@ -553,7 +553,7 @@ __report_tpr_access(struct kvm_lapic *apic, int write) run->tpr_access.is_write = write; } -static inline void +static void report_tpr_access(struct kvm_lapic *apic, int write) { if (apic->vcpu->arch.tpr_access_reporting) @@ -596,7 +596,7 @@ __apic_read(struct kvm_lapic *apic, unsigned int offset) return (val); } -static inline struct kvm_lapic * +static struct kvm_lapic * to_lapic(struct kvm_io_device *dev) { return ((struct kvm_lapic *)((uintptr_t)dev - @@ -1170,7 +1170,8 @@ kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) if (apic && apic->lapic_timer.pending > 0) { if (kvm_apic_local_deliver(apic, APIC_LVTT)) - atomic_dec_32(&apic->lapic_timer.pending); + atomic_dec_32((volatile uint32_t *)&apic-> + lapic_timer.pending); } } @@ -13,7 +13,7 @@ * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * - * Copyright 2011 Joyent, Inc. All Rights Reserved. + * Copyright (c) 2012 Joyent, Inc. All rights reserved. */ #include <sys/sysmacros.h> @@ -1376,7 +1376,7 @@ kvm_cpu_vmxoff(void) KVM_TRACE(vmx__vmxoff); /* BEGIN CSTYLED */ - __asm__ volatile ((ASM_VMX_VMXOFF) : : : "cc"); + __asm__ volatile (ASM_VMX_VMXOFF : : : "cc"); /* END CSTYLED */ setcr4(getcr4() & ~X86_CR4_VMXE); } @@ -17,7 +17,7 @@ * GPL HEADER END * * Copyright 2011 various Linux Kernel contributors. - * Copyright 2012 Joyent, Inc. All Rights Reserved. + * Copyright (c) 2012 Joyent, Inc. All Rights Reserved. */ #include <sys/types.h> @@ -2887,7 +2887,7 @@ kvm_timer_fire(void *arg) mutex_enter(&vcpu->kvcpu_kick_lock); if (timer->reinject || !timer->pending) { - atomic_add_32(&timer->pending, 1); + atomic_add_32((volatile uint32_t *)&timer->pending, 1); set_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); } @@ -5056,7 +5056,7 @@ kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) kvm_x86_ops->set_rflags(vcpu, rflags); } -inline gpa_t +gpa_t gfn_to_gpa(gfn_t gfn) { return ((gpa_t)gfn << PAGESHIFT); @@ -5280,7 +5280,7 @@ native_read_cr3(void) return (val); } -inline unsigned long +unsigned long get_desc_limit(const struct desc_struct *desc) { return (desc->c.b.limit0 | (desc->c.b.limit << 16)); @@ -5293,13 +5293,13 @@ get_desc_base(const struct desc_struct *desc) ((desc->c.b.base2) << 24)); } -inline void +void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) { vcpu->arch.exception.pending = 0; } -inline void +void kvm_queue_interrupt(struct kvm_vcpu *vcpu, uint8_t vector, int soft) { vcpu->arch.interrupt.pending = 1; @@ -5307,7 +5307,7 @@ kvm_queue_interrupt(struct kvm_vcpu *vcpu, uint8_t vector, int soft) vcpu->arch.interrupt.nr = vector; } -inline void +void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) { vcpu->arch.interrupt.pending = 0; @@ -5320,13 +5320,13 @@ kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) vcpu->arch.nmi_injected); } -inline int +int kvm_exception_is_soft(unsigned int nr) { return (nr == BP_VECTOR) || (nr == OF_VECTOR); } -inline int +int is_protmode(struct kvm_vcpu *vcpu) { return (kvm_read_cr0_bits(vcpu, X86_CR0_PE)); @@ -5338,7 +5338,7 @@ is_long_mode(struct kvm_vcpu *vcpu) return (vcpu->arch.efer & EFER_LMA); } -inline int +int is_pae(struct kvm_vcpu *vcpu) { return (kvm_read_cr4_bits(vcpu, X86_CR4_PAE)); diff --git a/kvm_x86host.h b/kvm_x86host.h index 449c689..c2fcc68 100644 --- a/kvm_x86host.h +++ b/kvm_x86host.h @@ -17,7 +17,7 @@ * GPL HEADER END * * Copyright 2011 various Linux Kernel contributors. - * Copyright 2011 Joyent, Inc. All Rights Reserved. + * Copyright (c) 2012 Joyent, Inc. All Rights Reserved. */ #ifndef __KVM_X86_HOST_H @@ -100,9 +100,6 @@ #define KVM_NR_FIXED_MTRR_REGION 88 #define KVM_NR_VAR_MTRR 8 -extern kmutex_t kvm_lock; -extern list_t vm_list; - struct kvm_vcpu; struct kvm; diff --git a/kvm_x86impl.h b/kvm_x86impl.h index faede86..6fdcfd3 100644 --- a/kvm_x86impl.h +++ b/kvm_x86impl.h @@ -17,7 +17,7 @@ * GPL HEADER END * * Copyright 2011 various Linux Kernel contributors. - * Copyright 2011 Joyent, Inc. All Rights Reserved. + * Copyright (c) 2012 Joyent, Inc. All Rights Reserved. */ #ifndef __KVM_X86_IMPL_H @@ -30,18 +30,18 @@ #include "kvm_x86.h" #include "kvm_cache_regs.h" -extern inline void kvm_clear_exception_queue(struct kvm_vcpu *); -extern inline void kvm_queue_interrupt(struct kvm_vcpu *, uint8_t, int); -extern inline void kvm_clear_interrupt_queue(struct kvm_vcpu *); -extern inline int kvm_event_needs_reinjection(struct kvm_vcpu *); -extern inline int kvm_exception_is_soft(unsigned int nr); +extern void kvm_clear_exception_queue(struct kvm_vcpu *); +extern void kvm_queue_interrupt(struct kvm_vcpu *, uint8_t, int); +extern void kvm_clear_interrupt_queue(struct kvm_vcpu *); +extern int kvm_event_needs_reinjection(struct kvm_vcpu *); +extern int kvm_exception_is_soft(unsigned int nr); extern kvm_cpuid_entry2_t *kvm_find_cpuid_entry(struct kvm_vcpu *, uint32_t, uint32_t); -extern inline int is_protmode(struct kvm_vcpu *); -extern inline int is_long_mode(struct kvm_vcpu *); -extern inline int is_pae(struct kvm_vcpu *); -extern inline int is_pse(struct kvm_vcpu *); -extern inline int is_paging(struct kvm_vcpu *); +extern int is_protmode(struct kvm_vcpu *); +extern int is_long_mode(struct kvm_vcpu *); +extern int is_pae(struct kvm_vcpu *); +extern int is_pse(struct kvm_vcpu *); +extern int is_paging(struct kvm_vcpu *); extern caddr_t page_address(page_t *); extern page_t *alloc_page(int, void **); @@ -55,18 +55,18 @@ typedef void (*kvm_xcall_t)(void *); extern void kvm_xcall(processorid_t, kvm_xcall_t, void *); extern int kvm_xcall_func(kvm_xcall_t, void *); -unsigned long native_read_cr0(void); +extern unsigned long native_read_cr0(void); #define read_cr0() (native_read_cr0()) -unsigned long native_read_cr4(void); +extern unsigned long native_read_cr4(void); #define read_cr4() (native_read_cr4()) -unsigned long native_read_cr3(void); +extern unsigned long native_read_cr3(void); #define read_cr3() (native_read_cr3()) -inline page_t *compound_head(page_t *); -inline void get_page(page_t *); -inline unsigned long get_desc_limit(const struct desc_struct *); +extern page_t *compound_head(page_t *); +extern void get_page(page_t *); +extern unsigned long get_desc_limit(const struct desc_struct *); extern unsigned long get_desc_base(const struct desc_struct *); -uint32_t bit(int); +extern uint32_t bit(int); #endif |