summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
authorPatrick Mooney <pmooney@pfmooney.com>2022-01-19 03:14:37 +0000
committerPatrick Mooney <pmooney@oxide.computer>2022-01-24 21:03:34 +0000
commitcc7a5a3be88241742ab4e0d7a14a0291f4e32183 (patch)
tree464ac032d45251b3e685b51a418270f85b256252 /usr/src
parent2b91c4a58ba8e370434f09715e0270ff57ca30e3 (diff)
downloadillumos-gate-cc7a5a3be88241742ab4e0d7a14a0291f4e32183.tar.gz
14423 bhyve vm can wedge on shutdown
Reviewed by: Toomas Soome <tsoome@me.com> Reviewed by: Andy Fiddaman <andy@omnios.org> Reviewed by: Mike Zeller <mike.zeller@joyent.com> Reviewed by: Vitaliy Gusev <gusev.vitaliy@gmail.com> Approved by: Gordon Ross <gordon.w.ross@gmail.com>
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/uts/i86pc/io/vmm/vmm_vm.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/usr/src/uts/i86pc/io/vmm/vmm_vm.c b/usr/src/uts/i86pc/io/vmm/vmm_vm.c
index e37488900b..c87c8a62d3 100644
--- a/usr/src/uts/i86pc/io/vmm/vmm_vm.c
+++ b/usr/src/uts/i86pc/io/vmm/vmm_vm.c
@@ -208,8 +208,6 @@ struct vm_page {
int vmp_prot;
};
-#define VMC_IS_ACTIVE(vmc) (((vmc)->vmc_state & VCS_ACTIVE) != 0)
-
static vmspace_mapping_t *vm_mapping_find(vmspace_t *, uintptr_t, size_t);
static void vmspace_hold_enter(vmspace_t *);
static void vmspace_hold_exit(vmspace_t *, bool);
@@ -300,8 +298,12 @@ vmspace_track_dirty(vmspace_t *vms, uint64_t gpa, size_t len, uint8_t *bitmap)
{
/*
* Accumulate dirty bits into the given bit vector. Note that this
- * races both against hardware writes from running VCPUs and
+ * races both against hardware writes from running vCPUs and
* reflections from userspace.
+ *
+ * Called from a userspace-visible ioctl, this depends on the VM
+ * instance being read-locked to prevent vmspace_map/vmspace_unmap
+ * operations from changing the page tables during the walk.
*/
for (size_t offset = 0; offset < len; offset += PAGESIZE) {
bool bit = false;
@@ -861,6 +863,7 @@ vmc_activate(vm_client_t *vmc)
mutex_enter(&vmc->vmc_lock);
VERIFY0(vmc->vmc_state & VCS_ACTIVE);
if ((vmc->vmc_state & VCS_ORPHANED) != 0) {
+ mutex_exit(&vmc->vmc_lock);
return (ENXIO);
}
while ((vmc->vmc_state & VCS_HOLD) != 0) {
@@ -974,6 +977,7 @@ vmc_space_release(vm_client_t *vmc, bool kick_on_cpu)
* VMC_HOLD must be done atomically here.
*/
atomic_and_uint(&vmc->vmc_state, ~VCS_HOLD);
+ cv_broadcast(&vmc->vmc_cv);
mutex_exit(&vmc->vmc_lock);
}