summaryrefslogtreecommitdiff
path: root/usr/src/cmd/bhyve/virtio.c
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/cmd/bhyve/virtio.c')
-rw-r--r--usr/src/cmd/bhyve/virtio.c68
1 files changed, 54 insertions, 14 deletions
diff --git a/usr/src/cmd/bhyve/virtio.c b/usr/src/cmd/bhyve/virtio.c
index c3b11dc439..47a3ed29ba 100644
--- a/usr/src/cmd/bhyve/virtio.c
+++ b/usr/src/cmd/bhyve/virtio.c
@@ -1,6 +1,9 @@
/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
* Copyright (c) 2013 Chris Torek <torek @ torek net>
* All rights reserved.
+ * Copyright (c) 2019 Joyent, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -25,11 +28,13 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/usr.sbin/bhyve/virtio.c 270326 2014-08-22 13:01:22Z tychon $");
+__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/uio.h>
+#include <machine/atomic.h>
+
#include <stdio.h>
#include <stdint.h>
#include <pthread.h>
@@ -49,7 +54,7 @@ __FBSDID("$FreeBSD: head/usr.sbin/bhyve/virtio.c 270326 2014-08-22 13:01:22Z tyc
* front of virtio-based device softc" constraint, let's use
* this to convert.
*/
-#define DEV_SOFTC(vs) ((void *)(vs))
+#define DEV_SOFTC(vs) ((void *)(vs))
/*
* Link a virtio_softc to its constants, the device softc, and
@@ -97,6 +102,7 @@ vi_reset_dev(struct virtio_softc *vs)
for (vq = vs->vs_queues, i = 0; i < nvq; vq++, i++) {
vq->vq_flags = 0;
vq->vq_last_avail = 0;
+ vq->vq_save_used = 0;
vq->vq_pfn = 0;
vq->vq_msix_idx = VIRTIO_MSI_NO_VECTOR;
}
@@ -147,8 +153,13 @@ vi_intr_init(struct virtio_softc *vs, int barnum, int use_msix)
return (1);
} else
vs->vs_flags &= ~VIRTIO_USE_MSIX;
+
/* Only 1 MSI vector for bhyve */
pci_emul_add_msicap(vs->vs_pi, 1);
+
+ /* Legacy interrupts are mandatory for virtio devices */
+ pci_lintr_request(vs->vs_pi);
+
return (0);
}
@@ -188,6 +199,7 @@ vi_vq_init(struct virtio_softc *vs, uint32_t pfn)
/* Mark queue as allocated, and start at 0 when we use it. */
vq->vq_flags = VQ_ALLOC;
vq->vq_last_avail = 0;
+ vq->vq_save_used = 0;
}
/*
@@ -247,12 +259,12 @@ _vq_record(int i, volatile struct virtio_desc *vd, struct vmctx *ctx,
* that vq_has_descs() does one).
*/
int
-vq_getchain(struct vqueue_info *vq,
+vq_getchain(struct vqueue_info *vq, uint16_t *pidx,
struct iovec *iov, int n_iov, uint16_t *flags)
{
int i;
u_int ndesc, n_indir;
- u_int idx, head, next;
+ u_int idx, next;
volatile struct virtio_desc *vdir, *vindir, *vp;
struct vmctx *ctx;
struct virtio_softc *vs;
@@ -295,8 +307,8 @@ vq_getchain(struct vqueue_info *vq,
* index, but we just abort if the count gets excessive.
*/
ctx = vs->vs_pi->pi_vmctx;
- head = vq->vq_avail->va_ring[idx & (vq->vq_qsize - 1)];
- next = head;
+ *pidx = next = vq->vq_avail->va_ring[idx & (vq->vq_qsize - 1)];
+ vq->vq_last_avail++;
for (i = 0; i < VQ_MAX_DESCRIPTORS; next = vdir->vd_next) {
if (next >= vq->vq_qsize) {
fprintf(stderr,
@@ -309,7 +321,7 @@ vq_getchain(struct vqueue_info *vq,
if ((vdir->vd_flags & VRING_DESC_F_INDIRECT) == 0) {
_vq_record(i, vdir, ctx, iov, n_iov, flags);
i++;
- } else if ((vs->vs_negotiated_caps &
+ } else if ((vs->vs_vc->vc_hv_caps &
VIRTIO_RING_F_INDIRECT_DESC) == 0) {
fprintf(stderr,
"%s: descriptor has forbidden INDIRECT flag, "
@@ -370,16 +382,29 @@ loopy:
}
/*
- * Return the currently-first request chain to the guest, setting
- * its I/O length to the provided value.
+ * Return the currently-first request chain back to the available queue.
*
* (This chain is the one you handled when you called vq_getchain()
* and used its positive return value.)
*/
void
-vq_relchain(struct vqueue_info *vq, uint32_t iolen)
+vq_retchain(struct vqueue_info *vq)
{
- uint16_t head, uidx, mask;
+
+ vq->vq_last_avail--;
+}
+
+/*
+ * Return specified request chain to the guest, setting its I/O length
+ * to the provided value.
+ *
+ * (This chain is the one you handled when you called vq_getchain()
+ * and used its positive return value.)
+ */
+void
+vq_relchain(struct vqueue_info *vq, uint16_t idx, uint32_t iolen)
+{
+ uint16_t uidx, mask;
volatile struct vring_used *vuh;
volatile struct virtio_used *vue;
@@ -395,12 +420,17 @@ vq_relchain(struct vqueue_info *vq, uint32_t iolen)
*/
mask = vq->vq_qsize - 1;
vuh = vq->vq_used;
- head = vq->vq_avail->va_ring[vq->vq_last_avail++ & mask];
uidx = vuh->vu_idx;
vue = &vuh->vu_ring[uidx++ & mask];
- vue->vu_idx = head; /* ie, vue->id = head */
+ vue->vu_idx = idx;
vue->vu_tlen = iolen;
+
+ /*
+ * Ensure the used descriptor is visible before updating the index.
+ * This is necessary on ISAs with memory ordering less strict than x86.
+ */
+ atomic_thread_fence_rel();
vuh->vu_idx = uidx;
}
@@ -436,8 +466,15 @@ vq_endchains(struct vqueue_info *vq, int used_all_avail)
* entire avail was processed, we need to interrupt always.
*/
vs = vq->vq_vs;
- new_idx = vq->vq_used->vu_idx;
old_idx = vq->vq_save_used;
+ vq->vq_save_used = new_idx = vq->vq_used->vu_idx;
+
+ /*
+ * Use full memory barrier between vu_idx store from preceding
+ * vq_relchain() call and the loads from VQ_USED_EVENT_IDX() or
+ * va_flags below.
+ */
+ atomic_thread_fence_seq_cst();
if (used_all_avail &&
(vs->vs_negotiated_caps & VIRTIO_F_NOTIFY_ON_EMPTY))
intr = 1;
@@ -698,6 +735,9 @@ bad:
switch (offset) {
case VTCFG_R_GUESTCAP:
vs->vs_negotiated_caps = value & vc->vc_hv_caps;
+ if (vc->vc_apply_features)
+ (*vc->vc_apply_features)(DEV_SOFTC(vs),
+ vs->vs_negotiated_caps);
break;
case VTCFG_R_PFN:
if (vs->vs_curq >= vc->vc_nvq)