summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile12
-rw-r--r--kvm.c2
-rw-r--r--kvm_coalesced_mmio.c222
-rw-r--r--kvm_coalesced_mmio.h (renamed from coalesced_mmio.h)25
-rw-r--r--kvm_i8259.c3
-rw-r--r--kvm_x86.c200
6 files changed, 243 insertions, 221 deletions
diff --git a/Makefile b/Makefile
index 1c0e578..e9994c8 100644
--- a/Makefile
+++ b/Makefile
@@ -16,7 +16,7 @@ CSTYLE=$(KERNEL_SOURCE)/usr/src/tools/scripts/cstyle
all: kvm kvm.so
-kvm: kvm.c kvm_x86.c kvm_emulate.c kvm.h kvm_x86host.h msr.h bitops.h kvm_subr.c kvm_irq.c kvm_i8254.c kvm_lapic.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c
+kvm: kvm.c kvm_x86.c kvm_emulate.c kvm.h kvm_x86host.h msr.h bitops.h kvm_subr.c kvm_irq.c kvm_i8254.c kvm_lapic.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c
$(CC) $(CFLAGS) $(INCLUDEDIR) kvm.c
$(CC) $(CFLAGS) $(INCLUDEDIR) kvm_x86.c
$(CC) $(CFLAGS) $(INCLUDEDIR) kvm_emulate.c
@@ -29,6 +29,7 @@ kvm: kvm.c kvm_x86.c kvm_emulate.c kvm.h kvm_x86host.h msr.h bitops.h kvm_subr.c
$(CC) $(CFLAGS) $(INCLUDEDIR) kvm_ioapic.c
$(CC) $(CFLAGS) $(INCLUDEDIR) kvm_vmx.c
$(CC) $(CFLAGS) $(INCLUDEDIR) kvm_i8259.c
+ $(CC) $(CFLAGS) $(INCLUDEDIR) kvm_coalesced_mmio.c
$(CTFCONVERT) -i -L VERSION kvm.o
$(CTFCONVERT) -i -L VERSION kvm_x86.o
$(CTFCONVERT) -i -L VERSION kvm_emulate.o
@@ -41,8 +42,9 @@ kvm: kvm.c kvm_x86.c kvm_emulate.c kvm.h kvm_x86host.h msr.h bitops.h kvm_subr.c
$(CTFCONVERT) -i -L VERSION kvm_ioapic.o
$(CTFCONVERT) -i -L VERSION kvm_vmx.o
$(CTFCONVERT) -i -L VERSION kvm_i8259.o
- $(LD) -r -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_subr.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o
- $(CTFMERGE) -L VERSION -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_subr.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o
+ $(CC) $(CFLAGS) $(INCLUDEDIR) kvm_coalesced_mmio.o
+ $(LD) -r -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_subr.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o kvm_coalesced_mmio.o
+ $(CTFMERGE) -L VERSION -o kvm kvm.o kvm_x86.o kvm_emulate.o kvm_subr.o kvm_irq.o kvm_i8254.o kvm_lapic.o kvm_mmu.o kvm_iodev.o kvm_ioapic.o kvm_vmx.o kvm_i8259.o kvm_coalesced_mmio.o
kvm.so: kvm_mdb.c
gcc -m64 -shared \
@@ -55,8 +57,8 @@ install: kvm
@pfexec cp kvm.conf /usr/kernel/drv
check:
- @$(CSTYLE) kvm.c kvm_mdb.c kvm_emulate.c kvm_x86.c kvm_irq.c kvm_lapic.c kvm_i8254.c kvm_mmu.c kvm_iodev.c kvm_subr.c kvm_ioapic.c kvm_vmx.c
- @./tools/xxxcheck kvm_x86.c kvm.c kvm_irq.c kvm_lapic.c kvm_i8254.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c
+ @$(CSTYLE) kvm.c kvm_mdb.c kvm_emulate.c kvm_x86.c kvm_irq.c kvm_lapic.c kvm_i8254.c kvm_mmu.c kvm_iodev.c kvm_subr.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c
+ @./tools/xxxcheck kvm_x86.c kvm.c kvm_irq.c kvm_lapic.c kvm_i8254.c kvm_mmu.c kvm_iodev.c kvm_ioapic.c kvm_vmx.c kvm_i8259.c kvm_coalesced_mmio.c
load: install
@echo "==> Loading kvm module"
diff --git a/kvm.c b/kvm.c
index 0785d22..60cbd28 100644
--- a/kvm.c
+++ b/kvm.c
@@ -50,7 +50,7 @@
#include "irq.h"
#include "tss.h"
#include "kvm_ioapic.h"
-#include "coalesced_mmio.h"
+#include "kvm_coalesced_mmio.h"
#include "kvm_mmu.h"
#undef DEBUG
diff --git a/kvm_coalesced_mmio.c b/kvm_coalesced_mmio.c
new file mode 100644
index 0000000..3635c49
--- /dev/null
+++ b/kvm_coalesced_mmio.c
@@ -0,0 +1,222 @@
+/*
+ * KVM coalesced MMIO
+ *
+ * Copyright (c) 2008 Bull S.A.S.
+ *
+ * Author: Laurent Vivier <Laurent.Vivier@bull.net>
+ *
+ * Ported to illumos by Joyent.
+ * Copyright 2011 Joyent, Inc. All Rights Reserved.
+ */
+
+/*
+ * XXX Need proper header files!
+ */
+#include "msr.h"
+#include "irqflags.h"
+#include "kvm_host.h"
+#include "kvm_x86host.h"
+#include "kvm_iodev.h"
+#include "kvm.h"
+#include "apicdef.h"
+#include "kvm_ioapic.h"
+#include "irq.h"
+#include "kvm_iodev.h"
+#include "kvm_coalesced_mmio.h"
+
+static struct kvm_coalesced_mmio_dev *
+to_mmio(struct kvm_io_device *dev)
+{
+#ifdef XXX
+ return (container_of(dev, struct kvm_coalesced_mmio_dev, dev));
+#else
+ XXX_KVM_PROBE;
+ return ((struct kvm_coalesced_mmio_dev *)dev);
+#endif
+}
+
+static int
+coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, gpa_t addr, int len)
+{
+ struct kvm_coalesced_mmio_zone *zone;
+ struct kvm_coalesced_mmio_ring *ring;
+ unsigned avail;
+ int i;
+
+ /* Are we able to batch it ? */
+
+ /*
+ * last is the first free entry
+ * check if we don't meet the first used entry
+ * there is always one unused entry in the buffer
+ */
+ ring = dev->kvm->coalesced_mmio_ring;
+ avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
+ if (avail < KVM_MAX_VCPUS) {
+ /* full */
+ return (0);
+ }
+
+ /* is it in a batchable area ? */
+
+ for (i = 0; i < dev->nb_zones; i++) {
+ zone = &dev->zone[i];
+
+ /*
+ * (addr,len) is fully included in (zone->addr, zone->size)
+ */
+ if (zone->addr <= addr && addr + len <= zone->addr + zone->size)
+ return (1);
+ }
+ return (0);
+}
+
+static int
+coalesced_mmio_write(struct kvm_io_device *this, gpa_t addr,
+ int len, const void *val)
+{
+ struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
+ struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
+ if (!coalesced_mmio_in_range(dev, addr, len))
+ return (-EOPNOTSUPP);
+
+ mutex_enter(&dev->lock);
+
+ /* copy data in first free entry of the ring */
+
+ ring->coalesced_mmio[ring->last].phys_addr = addr;
+ ring->coalesced_mmio[ring->last].len = len;
+ memcpy(ring->coalesced_mmio[ring->last].data, val, len);
+#ifdef XXX
+ smp_wmb();
+#else
+ XXX_KVM_SYNC_PROBE;
+#endif
+ ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
+ mutex_exit(&dev->lock);
+ return (0);
+}
+
+/*
+ * We used to free the struct that contained us. We don't do that any more. It's
+ * just wrong in this case.
+ */
+static void
+coalesced_mmio_destructor(struct kvm_io_device *this)
+{
+
+}
+
+static const struct kvm_io_device_ops coalesced_mmio_ops = {
+ .write = coalesced_mmio_write,
+ .destructor = coalesced_mmio_destructor,
+};
+
+int
+kvm_coalesced_mmio_init(struct kvm *kvm)
+{
+ struct kvm_coalesced_mmio_dev *dev;
+ page_t *page;
+ int ret;
+
+ kvm->coalesced_mmio_ring =
+ ddi_umem_alloc(PAGESIZE, DDI_UMEM_SLEEP, &kvm->mmio_cookie);
+
+ ret = -ENOMEM;
+ dev = kmem_zalloc(sizeof (struct kvm_coalesced_mmio_dev), KM_SLEEP);
+ if (!dev)
+ goto out_free_page;
+ mutex_init(&dev->lock, NULL, MUTEX_DRIVER, 0);
+ kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
+ dev->kvm = kvm;
+ kvm->coalesced_mmio_dev = dev;
+
+ mutex_enter(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
+ mutex_exit(&kvm->slots_lock);
+ if (ret < 0)
+ goto out_free_dev;
+
+ return (ret);
+
+out_free_dev:
+ kmem_free(dev, sizeof (struct kvm_coalesced_mmio_dev));
+out_free_page:
+#ifdef XXX
+ kmem_free(page, PAGESIZE);
+#else
+ XXX_KVM_PROBE;
+#endif
+ ddi_umem_free(kvm->mmio_cookie);
+ return (ret);
+}
+
+void
+kvm_coalesced_mmio_free(struct kvm *kvmp)
+{
+ struct kvm_coalesced_mmio_dev *dev = kvmp->coalesced_mmio_dev;
+ mutex_destroy(&dev->lock);
+ mutex_enter(&kvmp->slots_lock);
+ kvm_io_bus_unregister_dev(kvmp, KVM_MMIO_BUS, &dev->dev);
+ mutex_exit(&kvmp->slots_lock);
+ kvm_iodevice_destructor(&dev->dev);
+ kmem_free(dev, sizeof (struct kvm_coalesced_mmio_dev));
+ if (kvmp->coalesced_mmio_ring)
+ ddi_umem_free(kvmp->mmio_cookie);
+}
+
+int
+kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
+ struct kvm_coalesced_mmio_zone *zone)
+{
+ struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
+
+ if (dev == NULL)
+ return (-EINVAL);
+
+ mutex_enter(&kvm->slots_lock);
+ if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
+ mutex_exit(&kvm->slots_lock);
+ return (-ENOBUFS);
+ }
+
+ bcopy(zone, &dev->zone[dev->nb_zones],
+ sizeof (struct kvm_coalesced_mmio_zone));
+ dev->nb_zones++;
+
+ mutex_exit(&kvm->slots_lock);
+ return (0);
+}
+
+int
+kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
+ struct kvm_coalesced_mmio_zone *zone)
+{
+ int i;
+ struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
+ struct kvm_coalesced_mmio_zone *z;
+
+ if (dev == NULL)
+ return (-EINVAL);
+
+ mutex_enter(&kvm->slots_lock);
+
+ i = dev->nb_zones;
+ while (i) {
+ z = &dev->zone[i - 1];
+
+ /*
+ * Unregister all zones included in (zone->addr, zone->size)
+ */
+ if (zone->addr <= z->addr &&
+ z->addr + z->size <= zone->addr + zone->size) {
+ dev->nb_zones--;
+ *z = dev->zone[dev->nb_zones];
+ }
+ i--;
+ }
+
+ mutex_exit(&kvm->slots_lock);
+
+ return (0);
+}
diff --git a/coalesced_mmio.h b/kvm_coalesced_mmio.h
index fc2942f..58d7a31 100644
--- a/coalesced_mmio.h
+++ b/kvm_coalesced_mmio.h
@@ -10,6 +10,8 @@
*
*/
+/* XXX Linux doesn't define these structures here so why do we? */
+
typedef struct kvm_coalesced_mmio_zone {
uint64_t addr;
uint32_t size;
@@ -27,9 +29,6 @@ typedef struct kvm_coalesced_mmio_zone_ioc {
/* for KVM_REGISTER_COALESCED_MMIO / KVM_UNREGISTER_COALESCED_MMIO */
-
-#ifdef _KERNEL
-
typedef struct kvm_coalesced_mmio_dev {
struct kvm_io_device dev;
struct kvm *kvm;
@@ -38,19 +37,17 @@ typedef struct kvm_coalesced_mmio_dev {
struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX];
} kvm_coalesced_mmio_dev_t;
-int kvm_coalesced_mmio_init(struct kvm *kvm);
-void kvm_coalesced_mmio_free(struct kvm *kvm);
-int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
- struct kvm_coalesced_mmio_zone *zone);
-int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
- struct kvm_coalesced_mmio_zone *zone);
-#endif /*_KERNEL*/
+extern int kvm_coalesced_mmio_init(struct kvm *);
+extern void kvm_coalesced_mmio_free(struct kvm *);
+extern int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *,
+ struct kvm_coalesced_mmio_zone *);
+extern int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *,
+ struct kvm_coalesced_mmio_zone *);
#else
-static int kvm_coalesced_mmio_init(struct kvm *kvm) { return 0; }
-static void kvm_coalesced_mmio_free(struct kvm *kvm) { }
+#error "CONFIG_KVM_MMIO must be suupported"
-#endif
+#endif /* CONFIG_KVM_MMIO */
-#endif
+#endif /* __KVM_COALESCED_MMIO_H__ */
diff --git a/kvm_i8259.c b/kvm_i8259.c
index d3ea651..e6d5e55 100644
--- a/kvm_i8259.c
+++ b/kvm_i8259.c
@@ -26,7 +26,7 @@
* Port from Qemu.
*
* Ported from Linux to illumos by Joyent.
- * Copyright 2011 Joyent, Inc. All Rights Reserved.
+ * Copyright 2011 Joyent, Inc. All Rights Reserved.
*/
/* XXX Includes are always in bad shape, what a surprise! */
@@ -616,4 +616,3 @@ kvm_destroy_pic(struct kvm *kvm)
kmem_free(vpic, sizeof (struct kvm_pic));
}
}
-
diff --git a/kvm_x86.c b/kvm_x86.c
index 2ff1f67..f30f474 100644
--- a/kvm_x86.c
+++ b/kvm_x86.c
@@ -38,7 +38,7 @@
#define PER_CPU_DEF_ATTRIBUTES
#define PER_CPU_BASE_SECTION ".data"
#include "percpu-defs.h"
-#include "coalesced_mmio.h"
+#include "kvm_coalesced_mmio.h"
#include "kvm.h"
#include "kvm_ioapic.h"
#include "irq.h"
@@ -667,14 +667,6 @@ kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
kvm_mmu_destroy(vcpu);
}
-static int coalesced_mmio_write(struct kvm_io_device *this,
- gpa_t addr, int len, const void *val);
-static void coalesced_mmio_destructor(struct kvm_io_device *this);
-
-static const struct kvm_io_device_ops coalesced_mmio_ops = {
- .write = coalesced_mmio_write,
- .destructor = coalesced_mmio_destructor,
-};
int
kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
@@ -1514,52 +1506,6 @@ kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
return (kvm_set_memory_region(kvm, mem, user_alloc));
}
-static inline struct kvm_coalesced_mmio_dev *
-to_mmio(struct kvm_io_device *dev)
-{
-#ifdef XXX
- return (container_of(dev, struct kvm_coalesced_mmio_dev, dev));
-#else
- XXX_KVM_PROBE;
- return ((struct kvm_coalesced_mmio_dev *)dev);
-#endif
-}
-
-static int
-coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, gpa_t addr, int len)
-{
- struct kvm_coalesced_mmio_zone *zone;
- struct kvm_coalesced_mmio_ring *ring;
- unsigned avail;
- int i;
-
- /* Are we able to batch it ? */
-
- /*
- * last is the first free entry
- * check if we don't meet the first used entry
- * there is always one unused entry in the buffer
- */
- ring = dev->kvm->coalesced_mmio_ring;
- avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
- if (avail < KVM_MAX_VCPUS) {
- /* full */
- return (0);
- }
-
- /* is it in a batchable area ? */
-
- for (i = 0; i < dev->nb_zones; i++) {
- zone = &dev->zone[i];
-
- /*
- * (addr,len) is fully included in (zone->addr, zone->size)
- */
- if (zone->addr <= addr && addr + len <= zone->addr + zone->size)
- return (1);
- }
- return (0);
-}
/* Caller must hold slots_lock. */
int
@@ -1630,150 +1576,6 @@ kvm_io_bus_unregister_dev(struct kvm *kvm,
return (r);
}
-static int
-coalesced_mmio_write(struct kvm_io_device *this, gpa_t addr,
- int len, const void *val)
-{
- struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
- struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
- if (!coalesced_mmio_in_range(dev, addr, len))
- return (-EOPNOTSUPP);
-
- mutex_enter(&dev->lock);
-
- /* copy data in first free entry of the ring */
-
- ring->coalesced_mmio[ring->last].phys_addr = addr;
- ring->coalesced_mmio[ring->last].len = len;
- memcpy(ring->coalesced_mmio[ring->last].data, val, len);
-#ifdef XXX
- smp_wmb();
-#else
- XXX_KVM_SYNC_PROBE;
-#endif
- ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
- mutex_exit(&dev->lock);
- return (0);
-}
-
-/*
- * We used to free the struct that contained us. We don't do that any more. It's
- * just wrong in this case.
- */
-static void
-coalesced_mmio_destructor(struct kvm_io_device *this)
-{
-}
-
-int
-kvm_coalesced_mmio_init(struct kvm *kvm)
-{
- struct kvm_coalesced_mmio_dev *dev;
- page_t *page;
- int ret;
-
- kvm->coalesced_mmio_ring =
- ddi_umem_alloc(PAGESIZE, DDI_UMEM_SLEEP, &kvm->mmio_cookie);
-
- ret = -ENOMEM;
- dev = kmem_zalloc(sizeof (struct kvm_coalesced_mmio_dev), KM_SLEEP);
- if (!dev)
- goto out_free_page;
- mutex_init(&dev->lock, NULL, MUTEX_DRIVER, 0);
- kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
- dev->kvm = kvm;
- kvm->coalesced_mmio_dev = dev;
-
- mutex_enter(&kvm->slots_lock);
- ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
- mutex_exit(&kvm->slots_lock);
- if (ret < 0)
- goto out_free_dev;
-
- return (ret);
-
-out_free_dev:
- kmem_free(dev, sizeof (struct kvm_coalesced_mmio_dev));
-out_free_page:
-#ifdef XXX
- kmem_free(page, PAGESIZE);
-#else
- XXX_KVM_PROBE;
-#endif
- ddi_umem_free(kvm->mmio_cookie);
- return (ret);
-}
-
-void
-kvm_coalesced_mmio_free(struct kvm *kvmp)
-{
- struct kvm_coalesced_mmio_dev *dev = kvmp->coalesced_mmio_dev;
- mutex_destroy(&dev->lock);
- mutex_enter(&kvmp->slots_lock);
- kvm_io_bus_unregister_dev(kvmp, KVM_MMIO_BUS, &dev->dev);
- mutex_exit(&kvmp->slots_lock);
- kvm_iodevice_destructor(&dev->dev);
- kmem_free(dev, sizeof (struct kvm_coalesced_mmio_dev));
- if (kvmp->coalesced_mmio_ring)
- ddi_umem_free(kvmp->mmio_cookie);
-}
-
-int
-kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
- struct kvm_coalesced_mmio_zone *zone)
-{
- struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
-
- if (dev == NULL)
- return (-EINVAL);
-
- mutex_enter(&kvm->slots_lock);
- if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
- mutex_exit(&kvm->slots_lock);
- return (-ENOBUFS);
- }
-
- bcopy(zone, &dev->zone[dev->nb_zones],
- sizeof (struct kvm_coalesced_mmio_zone));
- dev->nb_zones++;
-
- mutex_exit(&kvm->slots_lock);
- return (0);
-}
-
-int
-kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
- struct kvm_coalesced_mmio_zone *zone)
-{
- int i;
- struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
- struct kvm_coalesced_mmio_zone *z;
-
- if (dev == NULL)
- return (-EINVAL);
-
- mutex_enter(&kvm->slots_lock);
-
- i = dev->nb_zones;
- while (i) {
- z = &dev->zone[i - 1];
-
- /*
- * Unregister all zones included in (zone->addr, zone->size)
- */
- if (zone->addr <= z->addr &&
- z->addr + z->size <= zone->addr + zone->size) {
- dev->nb_zones--;
- *z = dev->zone[dev->nb_zones];
- }
- i--;
- }
-
- mutex_exit(&kvm->slots_lock);
-
- return (0);
-}
-
long
kvm_vm_ioctl(struct kvm *kvmp, unsigned int ioctl, unsigned long arg, int mode)
{