diff options
-rw-r--r-- | Makefile | 12 | ||||
-rw-r--r-- | kvm.c | 2 | ||||
-rw-r--r-- | kvm_bitops.c | 378 | ||||
-rw-r--r-- | kvm_bitops.h (renamed from bitops.h) | 265 | ||||
-rw-r--r-- | kvm_cache_regs.c | 2 | ||||
-rw-r--r-- | kvm_ioapic.c | 2 | ||||
-rw-r--r-- | kvm_irq_comm.c | 2 | ||||
-rw-r--r-- | kvm_lapic.c | 2 | ||||
-rw-r--r-- | kvm_mmu.c | 2 | ||||
-rw-r--r-- | kvm_vmx.c | 2 | ||||
-rw-r--r-- | kvm_x86.c | 2 |
11 files changed, 416 insertions, 255 deletions
@@ -16,7 +16,7 @@ CSTYLE=$(KERNEL_SOURCE)/usr/src/tools/scripts/cstyle all: kvm kvm.so -kvm: kvm.c kvm_x86.c kvm_emulate.c kvm.h kvm_x86host.h msr.h bitops.h kvm_subr.c kvm_irq.c kvm_i8254.c kvm_lapic.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c +kvm: kvm.c kvm_x86.c kvm_emulate.c kvm.h kvm_x86host.h msr.h kvm_bitops.h kvm_subr.c kvm_irq.c kvm_i8254.c kvm_lapic.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c kvm_bitops.c $(CC) $(CFLAGS) $(INCLUDEDIR) kvm.c $(CC) $(CFLAGS) $(INCLUDEDIR) kvm_x86.c $(CC) $(CFLAGS) $(INCLUDEDIR) kvm_emulate.c @@ -32,6 +32,7 @@ kvm: kvm.c kvm_x86.c kvm_emulate.c kvm.h kvm_x86host.h msr.h bitops.h kvm_subr.c $(CC) $(CFLAGS) $(INCLUDEDIR) kvm_coalesced_mmio.c $(CC) $(CFLAGS) $(INCLUDEDIR) kvm_irq_comm.c $(CC) $(CFLAGS) $(INCLUDEDIR) kvm_cache_regs.c + $(CC) $(CFLAGS) $(INCLUDEDIR) kvm_bitops.c $(CTFCONVERT) -i -L VERSION kvm.o $(CTFCONVERT) -i -L VERSION kvm_x86.o $(CTFCONVERT) -i -L VERSION kvm_emulate.o @@ -47,8 +48,9 @@ kvm: kvm.c kvm_x86.c kvm_emulate.c kvm.h kvm_x86host.h msr.h bitops.h kvm_subr.c $(CTFCONVERT) -i -L VERSION kvm_coalesced_mmio.o $(CTFCONVERT) -i -L VERSION kvm_irq_comm.o $(CTFCONVERT) -i -L VERSION kvm_cache_regs.o - $(LD) -r -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_subr.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o kvm_coalesced_mmio.o kvm_irq_comm.o kvm_cache_regs.o - $(CTFMERGE) -L VERSION -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_subr.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o kvm_coalesced_mmio.o kvm_irq_comm.o kvm_cache_regs.o + $(CTFCONVERT) -i -L VERSION kvm_bitops.o + $(LD) -r -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_subr.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o kvm_coalesced_mmio.o kvm_irq_comm.o kvm_cache_regs.o kvm_bitops.o + $(CTFMERGE) -L VERSION -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_subr.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o kvm_coalesced_mmio.o kvm_irq_comm.o kvm_cache_regs.o kvm_bitops.o kvm.so: kvm_mdb.c gcc -m64 -shared \ @@ -61,8 +63,8 @@ install: kvm @pfexec cp kvm.conf /usr/kernel/drv check: - @$(CSTYLE) kvm.c kvm_mdb.c kvm_emulate.c kvm_x86.c kvm_irq.c kvm_lapic.c kvm_i8254.c kvm_mmu.c kvm_iodev.c kvm_subr.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c - @./tools/xxxcheck kvm_x86.c kvm.c kvm_irq.c kvm_lapic.c kvm_i8254.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c + @$(CSTYLE) kvm.c kvm_mdb.c kvm_emulate.c kvm_x86.c kvm_irq.c kvm_lapic.c kvm_i8254.c kvm_mmu.c kvm_iodev.c kvm_subr.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c kvm_bitops.c + @./tools/xxxcheck kvm_x86.c kvm.c kvm_irq.c kvm_lapic.c kvm_i8254.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c kvm_irq_comm.c kvm_cache_regs.c kvm_bitops.c load: install @echo "==> Loading kvm module" @@ -34,7 +34,7 @@ #include <sys/strsubr.h> #include <sys/stream.h> -#include "bitops.h" +#include "kvm_bitops.h" #include "vmx.h" #include "msr-index.h" #include "msr.h" diff --git a/kvm_bitops.c b/kvm_bitops.c new file mode 100644 index 0000000..04b3ff8 --- /dev/null +++ b/kvm_bitops.c @@ -0,0 +1,378 @@ +/* + * Copyright 1992, Linus Torvalds. + * + * Note: inlines with more than a single statement should be marked + * __always_inline to avoid problems with older gcc's inlining heuristics. + */ + +#include "kvm_bitops.h" + +#include "kvm_impl.h" + +#define ADDR BITOP_ADDR(addr) + +/* + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * + * Note: there are no guarantees that this function will not be reordered + * on non x86 architectures, so if you are writing portable code, + * make sure not to rely on its reordering guarantees. + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +inline void +set_bit(unsigned int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + __asm__ volatile("lock orb %1,%0" + : CONST_MASK_ADDR(nr, addr) + : "iq" ((uint8_t)CONST_MASK(nr)) + : "memory"); + } else { + __asm__ volatile("lock bts %1,%0" + : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); + } +} + +/* + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +inline void +__set_bit(int nr, volatile unsigned long *addr) +{ + __asm__ volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); +} + +/* + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +inline void +clear_bit(int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + __asm__ volatile("lock andb %1,%0" + : CONST_MASK_ADDR(nr, addr) + : "iq" ((uint8_t)~CONST_MASK(nr))); + } else { + __asm__ volatile("lock btr %1,%0" + : BITOP_ADDR(addr) + : "Ir" (nr)); + } +} + +/* + * clear_bit_unlock - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and implies release semantics before the memory + * operation. It can be used for an unlock. + */ +inline void +clear_bit_unlock(unsigned nr, volatile unsigned long *addr) +{ +#ifdef XXX + barrier(); +#else + XXX_KVM_SYNC_PROBE; +#endif + clear_bit(nr, addr); +} + +inline void +__clear_bit(int nr, volatile unsigned long *addr) +{ + __asm__ volatile("btr %1,%0" : ADDR : "Ir" (nr)); +} + + +/* + * __clear_bit_unlock - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * __clear_bit() is non-atomic and implies release semantics before the memory + * operation. It can be used for an unlock if no other CPUs can concurrently + * modify other bits in the word. + * + * No memory barrier is required here, because x86 cannot reorder stores past + * older loads. Same principle as spin_unlock. + */ +inline void +__clear_bit_unlock(unsigned nr, volatile unsigned long *addr) +{ +#ifdef XXX + barrier(); +#else + XXX_KVM_SYNC_PROBE; +#endif + __clear_bit(nr, addr); +} + +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() + +/* + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +inline void +__change_bit(int nr, volatile unsigned long *addr) +{ + __asm__ volatile("btc %1,%0" : ADDR : "Ir" (nr)); +} + +/* + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +inline void +change_bit(int nr, volatile unsigned long *addr) +{ + if (IS_IMMEDIATE(nr)) { + __asm__ volatile("lock xorb %1,%0" + : CONST_MASK_ADDR(nr, addr) + : "iq" ((uint8_t)CONST_MASK(nr))); + } else { + __asm__ volatile("lock btc %1,%0" + : BITOP_ADDR(addr) + : "Ir" (nr)); + } +} + +/* + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +inline int +test_and_set_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + __asm__ volatile("lock bts %2,%1\n\t" + "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); + + return (oldbit); +} + +/* + * test_and_set_bit_lock - Set a bit and return its old value for lock + * @nr: Bit to set + * @addr: Address to count from + * + * This is the same as test_and_set_bit on x86. + */ +inline int +test_and_set_bit_lock(int nr, volatile unsigned long *addr) +{ + return (test_and_set_bit(nr, addr)); +} + +/* + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +inline int +__test_and_set_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + __asm__("bts %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr)); + return (oldbit); +} + +/* + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +inline int +test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + __asm__ volatile("lock btr %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); + + return (oldbit); +} + +/* + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +inline int +__test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + __asm__ volatile("btr %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr)); + + return (oldbit); +} + +/* WARNING: non atomic and it can be reordered! */ +inline int +__test_and_change_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + __asm__ volatile("btc %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR + : "Ir" (nr) : "memory"); + + return (oldbit); +} + +/* + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +inline int +test_and_change_bit(int nr, volatile unsigned long *addr) +{ + int oldbit; + + __asm__ volatile("lock btc %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); + + return (oldbit); +} + +inline int +constant_test_bit(unsigned int nr, const volatile unsigned long *addr) +{ + return (((1UL << (nr % 64)) & + (((unsigned long *)addr)[nr / 64])) != 0); +} + +inline int +variable_test_bit(int nr, volatile const unsigned long *addr) +{ + int oldbit; + + __asm__ volatile("bt %2,%1\n\t" + "sbb %0,%0" + : "=r" (oldbit) + : "m" (*(unsigned long *)addr), "Ir" (nr)); + + return (oldbit); +} + +#if 0 /* Fool kernel-doc since it doesn't do macros yet */ +/* + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +int test_bit(int nr, const volatile unsigned long *addr); +#endif + +#define test_bit(nr, addr) \ + (__builtin_constant_p((nr)) \ + ? constant_test_bit((nr), (addr)) \ + : variable_test_bit((nr), (addr))) + +/* + * __ffs - find first set bit in word + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +inline unsigned long +__ffs(unsigned long word) +{ + __asm__("bsf %1,%0" + : "=r" (word) + : "rm" (word)); + return (word); +} + +/* + * ffz - find first zero bit in word + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +inline unsigned long +ffz(unsigned long word) +{ + __asm__("bsf %1,%0" + : "=r" (word) + : "r" (~word)); + return (word); +} + +/* + * __fls: find last set bit in word + * @word: The word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +inline unsigned long +__fls(unsigned long word) +{ + __asm__("bsr %1,%0" + : "=r" (word) + : "rm" (word)); + return (word); +} @@ -8,17 +8,6 @@ * __always_inline to avoid problems with older gcc's inlining heuristics. */ -#ifdef XXX -#ifndef _LINUX_BITOPS_H -#error only <linux/bitops.h> can be included directly -#endif - -#include <linux/compiler.h> -#include <asm/alternative.h> -#endif /*XXX*/ - -#include "kvm_impl.h" - #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, 8 * sizeof(long)) @@ -38,8 +27,6 @@ #define BITOP_ADDR(x) "+m" (*(volatile long *) (x)) #endif -#define ADDR BITOP_ADDR(addr) - /* * We do the locked ops that don't return the old value as * a mask operation on a byte. @@ -63,19 +50,7 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void -set_bit(unsigned int nr, volatile unsigned long *addr) -{ - if (IS_IMMEDIATE(nr)) { - __asm__ volatile("lock orb %1,%0" - : CONST_MASK_ADDR(nr, addr) - : "iq" ((uint8_t)CONST_MASK(nr)) - : "memory"); - } else { - __asm__ volatile("lock bts %1,%0" - : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); - } -} +inline void set_bit(unsigned int nr, volatile unsigned long *addr); /** * __set_bit - Set a bit in memory @@ -86,10 +61,7 @@ set_bit(unsigned int nr, volatile unsigned long *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __set_bit(int nr, volatile unsigned long *addr) -{ - __asm__ volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); -} +inline void __set_bit(int nr, volatile unsigned long *addr); /** * clear_bit - Clears a bit in memory @@ -101,19 +73,7 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * in order to ensure changes are visible on other processors. */ -static inline void -clear_bit(int nr, volatile unsigned long *addr) -{ - if (IS_IMMEDIATE(nr)) { - __asm__ volatile("lock andb %1,%0" - : CONST_MASK_ADDR(nr, addr) - : "iq" ((uint8_t)~CONST_MASK(nr))); - } else { - __asm__ volatile("lock btr %1,%0" - : BITOP_ADDR(addr) - : "Ir" (nr)); - } -} +inline void clear_bit(int nr, volatile unsigned long *addr); /* * clear_bit_unlock - Clears a bit in memory @@ -123,21 +83,9 @@ clear_bit(int nr, volatile unsigned long *addr) * clear_bit() is atomic and implies release semantics before the memory * operation. It can be used for an unlock. */ -static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) -{ -#ifdef XXX - barrier(); -#else - XXX_KVM_SYNC_PROBE; -#endif - clear_bit(nr, addr); -} - -static inline void __clear_bit(int nr, volatile unsigned long *addr) -{ - __asm__ volatile("btr %1,%0" : ADDR : "Ir" (nr)); -} +inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr); +inline void __clear_bit(int nr, volatile unsigned long *addr); /* * __clear_bit_unlock - Clears a bit in memory @@ -151,15 +99,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) * No memory barrier is required here, because x86 cannot reorder stores past * older loads. Same principle as spin_unlock. */ -static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) -{ -#ifdef XXX - barrier(); -#else - XXX_KVM_SYNC_PROBE; -#endif - __clear_bit(nr, addr); -} +inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr); #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() @@ -173,10 +113,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) * If it's called on the same region of memory simultaneously, the effect * may be that only one operation succeeds. */ -static inline void __change_bit(int nr, volatile unsigned long *addr) -{ - __asm__ volatile("btc %1,%0" : ADDR : "Ir" (nr)); -} +inline void __change_bit(int nr, volatile unsigned long *addr); /** * change_bit - Toggle a bit in memory @@ -187,18 +124,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void change_bit(int nr, volatile unsigned long *addr) -{ - if (IS_IMMEDIATE(nr)) { - __asm__ volatile("lock xorb %1,%0" - : CONST_MASK_ADDR(nr, addr) - : "iq" ((uint8_t)CONST_MASK(nr))); - } else { - __asm__ volatile("lock btc %1,%0" - : BITOP_ADDR(addr) - : "Ir" (nr)); - } -} +inline void change_bit(int nr, volatile unsigned long *addr); /** * test_and_set_bit - Set a bit and return its old value @@ -208,15 +134,7 @@ static inline void change_bit(int nr, volatile unsigned long *addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int test_and_set_bit(int nr, volatile unsigned long *addr) -{ - int oldbit; - - __asm__ volatile("lock bts %2,%1\n\t" - "sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); - - return oldbit; -} +inline int test_and_set_bit(int nr, volatile unsigned long *addr); /** * test_and_set_bit_lock - Set a bit and return its old value for lock @@ -225,11 +143,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr) * * This is the same as test_and_set_bit on x86. */ -static inline int -test_and_set_bit_lock(int nr, volatile unsigned long *addr) -{ - return test_and_set_bit(nr, addr); -} +inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr); /** * __test_and_set_bit - Set a bit and return its old value @@ -240,16 +154,7 @@ test_and_set_bit_lock(int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) -{ - int oldbit; - - __asm__("bts %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr)); - return oldbit; -} +inline int __test_and_set_bit(int nr, volatile unsigned long *addr); /** * test_and_clear_bit - Clear a bit and return its old value @@ -259,16 +164,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - int oldbit; - - __asm__ volatile("lock btr %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); - - return oldbit; -} +inline int test_and_clear_bit(int nr, volatile unsigned long *addr); /** * __test_and_clear_bit - Clear a bit and return its old value @@ -279,29 +175,10 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) * If two examples of this operation race, one can appear to succeed * but actually fail. You must protect multiple accesses with a lock. */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - int oldbit; - - __asm__ volatile("btr %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr)); - return oldbit; -} +inline int __test_and_clear_bit(int nr, volatile unsigned long *addr); /* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) -{ - int oldbit; - - __asm__ volatile("btc %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR - : "Ir" (nr) : "memory"); - - return oldbit; -} +inline int __test_and_change_bit(int nr, volatile unsigned long *addr); /** * test_and_change_bit - Change a bit and return its old value @@ -311,34 +188,11 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ -static inline int test_and_change_bit(int nr, volatile unsigned long *addr) -{ - int oldbit; - - __asm__ volatile("lock btc %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit), ADDR : "Ir" (nr) : "memory"); - - return oldbit; -} +inline int test_and_change_bit(int nr, volatile unsigned long *addr); -static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) -{ - return ((1UL << (nr % 64)) & - (((unsigned long *)addr)[nr / 64])) != 0; -} +inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr); -static inline int variable_test_bit(int nr, volatile const unsigned long *addr) -{ - int oldbit; - - __asm__ volatile("bt %2,%1\n\t" - "sbb %0,%0" - : "=r" (oldbit) - : "m" (*(unsigned long *)addr), "Ir" (nr)); - - return oldbit; -} +inline int variable_test_bit(int nr, volatile const unsigned long *addr); #if 0 /* Fool kernel-doc since it doesn't do macros yet */ /** @@ -346,7 +200,7 @@ static inline int variable_test_bit(int nr, volatile const unsigned long *addr) * @nr: bit number to test * @addr: Address to start counting from */ -static int test_bit(int nr, const volatile unsigned long *addr); +int test_bit(int nr, const volatile unsigned long *addr); #endif #define test_bit(nr, addr) \ @@ -360,13 +214,7 @@ static int test_bit(int nr, const volatile unsigned long *addr); * * Undefined if no bit exists, so code should check against 0 first. */ -static inline unsigned long __ffs(unsigned long word) -{ - __asm__("bsf %1,%0" - : "=r" (word) - : "rm" (word)); - return word; -} +inline unsigned long __ffs(unsigned long word); /** * ffz - find first zero bit in word @@ -374,13 +222,7 @@ static inline unsigned long __ffs(unsigned long word) * * Undefined if no zero exists, so code should check against ~0UL first. */ -static inline unsigned long ffz(unsigned long word) -{ - __asm__("bsf %1,%0" - : "=r" (word) - : "r" (~word)); - return word; -} +inline unsigned long ffz(unsigned long word); /* * __fls: find last set bit in word @@ -388,13 +230,7 @@ static inline unsigned long ffz(unsigned long word) * * Undefined if no set bit exists, so code should check against 0 first. */ -static inline unsigned long __fls(unsigned long word) -{ - __asm__("bsr %1,%0" - : "=r" (word) - : "rm" (word)); - return word; -} +inline unsigned long __fls(unsigned long word); #ifdef __KERNEL__ /** @@ -408,21 +244,7 @@ static inline unsigned long __fls(unsigned long word) * set bit if value is nonzero. The first (least significant) bit * is at position 1. */ -static inline int ffs(int x) -{ - int r; -#ifdef CONFIG_X86_CMOV - __asm__("bsfl %1,%0\n\t" - "cmovzl %2,%0" - : "=r" (r) : "rm" (x), "r" (-1)); -#else - __asm__("bsfl %1,%0\n\t" - "jnz 1f\n\t" - "movl $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); -#endif - return r + 1; -} +inline int ffs(int x); /** * fls - find last set bit in word @@ -435,49 +257,8 @@ static inline int ffs(int x) * set bit if value is nonzero. The last (most significant) bit is * at position 32. */ -static inline int fls(int x) -{ - int r; -#ifdef CONFIG_X86_CMOV - __asm__("bsrl %1,%0\n\t" - "cmovzl %2,%0" - : "=&r" (r) : "rm" (x), "rm" (-1)); -#else - __asm__("bsrl %1,%0\n\t" - "jnz 1f\n\t" - "movl $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); -#endif - return r + 1; -} -#endif /* __KERNEL__ */ - -#undef ADDR - -#ifdef __KERNEL__ - -#include <__asm__-generic/bitops/sched.h> - -#define ARCH_HAS_FAST_MULTIPLIER 1 - -#include <__asm__-generic/bitops/hweight.h> +inline int fls(int x); #endif /* __KERNEL__ */ -#ifdef XXX -#include <__asm__-generic/bitops/fls64.h> -#endif /*XXX*/ - -#ifdef __KERNEL__ - -#include <__asm__-generic/bitops/ext2-non-atomic.h> - -#define ext2_set_bit_atomic(lock, nr, addr) \ - test_and_set_bit((nr), (unsigned long *)(addr)) -#define ext2_clear_bit_atomic(lock, nr, addr) \ - test_and_clear_bit((nr), (unsigned long *)(addr)) - -#include <asm-generic/bitops/minix.h> - -#endif /* __KERNEL__ */ #endif /* _ASM_X86_BITOPS_H */ diff --git a/kvm_cache_regs.c b/kvm_cache_regs.c index cc34963..a73fd60 100644 --- a/kvm_cache_regs.c +++ b/kvm_cache_regs.c @@ -5,7 +5,7 @@ /* * XXX Need proper header files! */ -#include "bitops.h" +#include "kvm_bitops.h" #include "msr.h" #include "irqflags.h" #include "kvm_host.h" diff --git a/kvm_ioapic.c b/kvm_ioapic.c index 4628f56..ead53e6 100644 --- a/kvm_ioapic.c +++ b/kvm_ioapic.c @@ -31,7 +31,7 @@ /* * XXX Need proper header files! */ -#include "bitops.h" +#include "kvm_bitops.h" #include "msr.h" #include "irqflags.h" #include "kvm_host.h" diff --git a/kvm_irq_comm.c b/kvm_irq_comm.c index aa2ef74..db9ea50 100644 --- a/kvm_irq_comm.c +++ b/kvm_irq_comm.c @@ -24,7 +24,7 @@ /* * XXX Need proper header files! */ -#include "bitops.h" +#include "kvm_bitops.h" #include "msr.h" #include "irqflags.h" #include "kvm_msidef.h" diff --git a/kvm_lapic.c b/kvm_lapic.c index 986c59d..a4cdbfd 100644 --- a/kvm_lapic.c +++ b/kvm_lapic.c @@ -24,7 +24,7 @@ /* * XXX Need proper header files! */ -#include "bitops.h" +#include "kvm_bitops.h" #include "kvm_cpuid.h" #include "msr.h" #include "irqflags.h" @@ -1,7 +1,7 @@ #include <sys/sysmacros.h> #include "processor-flags.h" -#include "bitops.h" +#include "kvm_bitops.h" #include "msr.h" #include "irqflags.h" #include "kvm_host.h" @@ -23,7 +23,7 @@ /* * XXX Need proper header files! */ -#include "bitops.h" +#include "kvm_bitops.h" #include "processor-flags.h" #include "msr.h" #include "kvm_cpuid.h" @@ -24,7 +24,7 @@ #include <sys/int_limits.h> #include <sys/x_call.h> -#include "bitops.h" +#include "kvm_bitops.h" #include "msr-index.h" #include "msr.h" #include "vmx.h" |