summaryrefslogtreecommitdiff
path: root/usr/src/uts
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts')
-rw-r--r--usr/src/uts/common/fs/zfs/dsl_scan.c8
-rw-r--r--usr/src/uts/common/fs/zfs/range_tree.c23
-rw-r--r--usr/src/uts/common/fs/zfs/sys/range_tree.h3
-rw-r--r--usr/src/uts/common/fs/zfs/sys/vdev_removal.h3
-rw-r--r--usr/src/uts/common/fs/zfs/vdev.c2
-rw-r--r--usr/src/uts/common/fs/zfs/vdev_label.c53
-rw-r--r--usr/src/uts/common/fs/zfs/vdev_removal.c208
-rw-r--r--usr/src/uts/common/fs/zfs/zcp.c9
-rw-r--r--usr/src/uts/common/io/ldterm.c3
-rw-r--r--usr/src/uts/common/io/ptem.c3
-rw-r--r--usr/src/uts/common/io/ttcompat.c3
-rw-r--r--usr/src/uts/common/io/usb/clients/hid/hid.c7
-rw-r--r--usr/src/uts/common/os/streamio.c25
-rw-r--r--usr/src/uts/common/os/strsubr.c18
-rw-r--r--usr/src/uts/common/sys/conf.h3
-rw-r--r--usr/src/uts/common/sys/stream.h3
-rw-r--r--usr/src/uts/i86pc/io/apix/apix.c2
17 files changed, 312 insertions, 64 deletions
diff --git a/usr/src/uts/common/fs/zfs/dsl_scan.c b/usr/src/uts/common/fs/zfs/dsl_scan.c
index cf93849a62..6fd97d9bfc 100644
--- a/usr/src/uts/common/fs/zfs/dsl_scan.c
+++ b/usr/src/uts/common/fs/zfs/dsl_scan.c
@@ -1951,14 +1951,14 @@ dsl_scan_scrub_cb(dsl_pool_t *dp,
int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
int scan_delay = 0;
+ count_block(dp->dp_blkstats, bp);
+
if (phys_birth <= scn->scn_phys.scn_min_txg ||
phys_birth >= scn->scn_phys.scn_max_txg)
return (0);
- count_block(dp->dp_blkstats, bp);
-
- if (BP_IS_EMBEDDED(bp))
- return (0);
+ /* Embedded BP's have phys_birth==0, so we reject them above. */
+ ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn));
if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) {
diff --git a/usr/src/uts/common/fs/zfs/range_tree.c b/usr/src/uts/common/fs/zfs/range_tree.c
index 7c6ce90e18..a0b9e5f9a1 100644
--- a/usr/src/uts/common/fs/zfs/range_tree.c
+++ b/usr/src/uts/common/fs/zfs/range_tree.c
@@ -298,7 +298,6 @@ range_tree_remove(void *arg, uint64_t start, uint64_t size)
static range_seg_t *
range_tree_find_impl(range_tree_t *rt, uint64_t start, uint64_t size)
{
- avl_index_t where;
range_seg_t rsearch;
uint64_t end = start + size;
@@ -306,7 +305,7 @@ range_tree_find_impl(range_tree_t *rt, uint64_t start, uint64_t size)
rsearch.rs_start = start;
rsearch.rs_end = end;
- return (avl_find(&rt->rt_root, &rsearch, &where));
+ return (avl_find(&rt->rt_root, &rsearch, NULL));
}
static range_seg_t *
@@ -407,3 +406,23 @@ range_tree_is_empty(range_tree_t *rt)
ASSERT(rt != NULL);
return (range_tree_space(rt) == 0);
}
+
+uint64_t
+range_tree_min(range_tree_t *rt)
+{
+ range_seg_t *rs = avl_first(&rt->rt_root);
+ return (rs != NULL ? rs->rs_start : 0);
+}
+
+uint64_t
+range_tree_max(range_tree_t *rt)
+{
+ range_seg_t *rs = avl_last(&rt->rt_root);
+ return (rs != NULL ? rs->rs_end : 0);
+}
+
+uint64_t
+range_tree_span(range_tree_t *rt)
+{
+ return (range_tree_max(rt) - range_tree_min(rt));
+}
diff --git a/usr/src/uts/common/fs/zfs/sys/range_tree.h b/usr/src/uts/common/fs/zfs/sys/range_tree.h
index 847774fb6a..9360e01509 100644
--- a/usr/src/uts/common/fs/zfs/sys/range_tree.h
+++ b/usr/src/uts/common/fs/zfs/sys/range_tree.h
@@ -86,6 +86,9 @@ boolean_t range_tree_is_empty(range_tree_t *rt);
void range_tree_verify(range_tree_t *rt, uint64_t start, uint64_t size);
void range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst);
void range_tree_stat_verify(range_tree_t *rt);
+uint64_t range_tree_min(range_tree_t *rt);
+uint64_t range_tree_max(range_tree_t *rt);
+uint64_t range_tree_span(range_tree_t *rt);
void range_tree_add(void *arg, uint64_t start, uint64_t size);
void range_tree_remove(void *arg, uint64_t start, uint64_t size);
diff --git a/usr/src/uts/common/fs/zfs/sys/vdev_removal.h b/usr/src/uts/common/fs/zfs/sys/vdev_removal.h
index 5fcd40b6e7..3962237afd 100644
--- a/usr/src/uts/common/fs/zfs/sys/vdev_removal.h
+++ b/usr/src/uts/common/fs/zfs/sys/vdev_removal.h
@@ -86,6 +86,9 @@ extern void spa_vdev_remove_suspend(spa_t *);
extern int spa_vdev_remove_cancel(spa_t *);
extern void spa_vdev_removal_destroy(spa_vdev_removal_t *svr);
+extern int vdev_removal_max_span;
+extern int zfs_remove_max_segment;
+
#ifdef __cplusplus
}
#endif
diff --git a/usr/src/uts/common/fs/zfs/vdev.c b/usr/src/uts/common/fs/zfs/vdev.c
index db2d12db96..43c6e4c9a1 100644
--- a/usr/src/uts/common/fs/zfs/vdev.c
+++ b/usr/src/uts/common/fs/zfs/vdev.c
@@ -168,7 +168,7 @@ vdev_dbgmsg_print_tree(vdev_t *vd, int indent)
}
zfs_dbgmsg("%*svdev %u: %s%s, guid: %llu, path: %s, %s", indent,
- "", vd->vdev_id, vd->vdev_ops->vdev_op_type,
+ "", (int)vd->vdev_id, vd->vdev_ops->vdev_op_type,
vd->vdev_islog ? " (log)" : "",
(u_longlong_t)vd->vdev_guid,
vd->vdev_path ? vd->vdev_path : "N/A", state);
diff --git a/usr/src/uts/common/fs/zfs/vdev_label.c b/usr/src/uts/common/fs/zfs/vdev_label.c
index d906860346..8d5f17c15f 100644
--- a/usr/src/uts/common/fs/zfs/vdev_label.c
+++ b/usr/src/uts/common/fs/zfs/vdev_label.c
@@ -33,15 +33,15 @@
* 1. Uniquely identify this device as part of a ZFS pool and confirm its
* identity within the pool.
*
- * 2. Verify that all the devices given in a configuration are present
+ * 2. Verify that all the devices given in a configuration are present
* within the pool.
*
- * 3. Determine the uberblock for the pool.
+ * 3. Determine the uberblock for the pool.
*
- * 4. In case of an import operation, determine the configuration of the
+ * 4. In case of an import operation, determine the configuration of the
* toplevel vdev of which it is a part.
*
- * 5. If an import operation cannot find all the devices in the pool,
+ * 5. If an import operation cannot find all the devices in the pool,
* provide enough information to the administrator to determine which
* devices are missing.
*
@@ -77,9 +77,9 @@
* In order to identify which labels are valid, the labels are written in the
* following manner:
*
- * 1. For each vdev, update 'L1' to the new label
- * 2. Update the uberblock
- * 3. For each vdev, update 'L2' to the new label
+ * 1. For each vdev, update 'L1' to the new label
+ * 2. Update the uberblock
+ * 3. For each vdev, update 'L2' to the new label
*
* Given arbitrary failure, we can determine the correct label to use based on
* the transaction group. If we fail after updating L1 but before updating the
@@ -117,19 +117,19 @@
*
* The nvlist describing the pool and vdev contains the following elements:
*
- * version ZFS on-disk version
- * name Pool name
- * state Pool state
- * txg Transaction group in which this label was written
- * pool_guid Unique identifier for this pool
- * vdev_tree An nvlist describing vdev tree.
+ * version ZFS on-disk version
+ * name Pool name
+ * state Pool state
+ * txg Transaction group in which this label was written
+ * pool_guid Unique identifier for this pool
+ * vdev_tree An nvlist describing vdev tree.
* features_for_read
* An nvlist of the features necessary for reading the MOS.
*
* Each leaf device label also contains the following:
*
- * top_guid Unique ID for top-level vdev in which this is contained
- * guid Unique ID for the leaf vdev
+ * top_guid Unique ID for top-level vdev in which this is contained
+ * guid Unique ID for the leaf vdev
*
* The 'vs' configuration follows the format described in 'spa_config.c'.
*/
@@ -390,22 +390,33 @@ vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
* histograms.
*/
uint64_t seg_count = 0;
+ uint64_t to_alloc = vd->vdev_stat.vs_alloc;
/*
* There are the same number of allocated segments
* as free segments, so we will have at least one
- * entry per free segment.
+ * entry per free segment. However, small free
+ * segments (smaller than vdev_removal_max_span)
+ * will be combined with adjacent allocated segments
+ * as a single mapping.
*/
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
- seg_count += vd->vdev_mg->mg_histogram[i];
+ if (1ULL << (i + 1) < vdev_removal_max_span) {
+ to_alloc +=
+ vd->vdev_mg->mg_histogram[i] <<
+ i + 1;
+ } else {
+ seg_count +=
+ vd->vdev_mg->mg_histogram[i];
+ }
}
/*
- * The maximum length of a mapping is SPA_MAXBLOCKSIZE,
- * so we need at least one entry per SPA_MAXBLOCKSIZE
- * of allocated data.
+ * The maximum length of a mapping is
+ * zfs_remove_max_segment, so we need at least one entry
+ * per zfs_remove_max_segment of allocated data.
*/
- seg_count += vd->vdev_stat.vs_alloc / SPA_MAXBLOCKSIZE;
+ seg_count += to_alloc / zfs_remove_max_segment;
fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
seg_count *
diff --git a/usr/src/uts/common/fs/zfs/vdev_removal.c b/usr/src/uts/common/fs/zfs/vdev_removal.c
index 06e25a59ad..c9af0e0729 100644
--- a/usr/src/uts/common/fs/zfs/vdev_removal.c
+++ b/usr/src/uts/common/fs/zfs/vdev_removal.c
@@ -106,6 +106,24 @@ int zfs_remove_max_copy_bytes = 64 * 1024 * 1024;
int zfs_remove_max_segment = 1024 * 1024;
/*
+ * Allow a remap segment to span free chunks of at most this size. The main
+ * impact of a larger span is that we will read and write larger, more
+ * contiguous chunks, with more "unnecessary" data -- trading off bandwidth
+ * for iops. The value here was chosen to align with
+ * zfs_vdev_read_gap_limit, which is a similar concept when doing regular
+ * reads (but there's no reason it has to be the same).
+ *
+ * Additionally, a higher span will have the following relatively minor
+ * effects:
+ * - the mapping will be smaller, since one entry can cover more allocated
+ * segments
+ * - more of the fragmentation in the removing device will be preserved
+ * - we'll do larger allocations, which may fail and fall back on smaller
+ * allocations
+ */
+int vdev_removal_max_span = 32 * 1024;
+
+/*
* This is used by the test suite so that it can ensure that certain
* actions happen while in the middle of a removal.
*/
@@ -726,13 +744,52 @@ vdev_mapping_sync(void *arg, dmu_tx_t *tx)
spa_sync_removing_state(spa, tx);
}
+typedef struct vdev_copy_segment_arg {
+ spa_t *vcsa_spa;
+ dva_t *vcsa_dest_dva;
+ uint64_t vcsa_txg;
+ range_tree_t *vcsa_obsolete_segs;
+} vdev_copy_segment_arg_t;
+
+static void
+unalloc_seg(void *arg, uint64_t start, uint64_t size)
+{
+ vdev_copy_segment_arg_t *vcsa = arg;
+ spa_t *spa = vcsa->vcsa_spa;
+ blkptr_t bp = { 0 };
+
+ BP_SET_BIRTH(&bp, TXG_INITIAL, TXG_INITIAL);
+ BP_SET_LSIZE(&bp, size);
+ BP_SET_PSIZE(&bp, size);
+ BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF);
+ BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_OFF);
+ BP_SET_TYPE(&bp, DMU_OT_NONE);
+ BP_SET_LEVEL(&bp, 0);
+ BP_SET_DEDUP(&bp, 0);
+ BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER);
+
+ DVA_SET_VDEV(&bp.blk_dva[0], DVA_GET_VDEV(vcsa->vcsa_dest_dva));
+ DVA_SET_OFFSET(&bp.blk_dva[0],
+ DVA_GET_OFFSET(vcsa->vcsa_dest_dva) + start);
+ DVA_SET_ASIZE(&bp.blk_dva[0], size);
+
+ zio_free(spa, vcsa->vcsa_txg, &bp);
+}
+
/*
* All reads and writes associated with a call to spa_vdev_copy_segment()
* are done.
*/
static void
-spa_vdev_copy_nullzio_done(zio_t *zio)
+spa_vdev_copy_segment_done(zio_t *zio)
{
+ vdev_copy_segment_arg_t *vcsa = zio->io_private;
+
+ range_tree_vacate(vcsa->vcsa_obsolete_segs,
+ unalloc_seg, vcsa);
+ range_tree_destroy(vcsa->vcsa_obsolete_segs);
+ kmem_free(vcsa, sizeof (*vcsa));
+
spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa);
}
@@ -849,7 +906,8 @@ spa_vdev_copy_one_child(vdev_copy_arg_t *vca, zio_t *nzio,
* read from the old location and write to the new location.
*/
static int
-spa_vdev_copy_segment(vdev_t *vd, uint64_t start, uint64_t size, uint64_t txg,
+spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs,
+ uint64_t maxalloc, uint64_t txg,
vdev_copy_arg_t *vca, zio_alloc_list_t *zal)
{
metaslab_group_t *mg = vd->vdev_mg;
@@ -857,8 +915,39 @@ spa_vdev_copy_segment(vdev_t *vd, uint64_t start, uint64_t size, uint64_t txg,
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_indirect_mapping_entry_t *entry;
dva_t dst = { 0 };
+ uint64_t start = range_tree_min(segs);
- ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
+ ASSERT3U(maxalloc, <=, SPA_MAXBLOCKSIZE);
+
+ uint64_t size = range_tree_span(segs);
+ if (range_tree_span(segs) > maxalloc) {
+ /*
+ * We can't allocate all the segments. Prefer to end
+ * the allocation at the end of a segment, thus avoiding
+ * additional split blocks.
+ */
+ range_seg_t search;
+ avl_index_t where;
+ search.rs_start = start + maxalloc;
+ search.rs_end = search.rs_start;
+ range_seg_t *rs = avl_find(&segs->rt_root, &search, &where);
+ if (rs == NULL) {
+ rs = avl_nearest(&segs->rt_root, where, AVL_BEFORE);
+ } else {
+ rs = AVL_PREV(&segs->rt_root, rs);
+ }
+ if (rs != NULL) {
+ size = rs->rs_end - start;
+ } else {
+ /*
+ * There are no segments that end before maxalloc.
+ * I.e. the first segment is larger than maxalloc,
+ * so we must split it.
+ */
+ size = maxalloc;
+ }
+ }
+ ASSERT3U(size, <=, maxalloc);
/*
* We use allocator 0 for this I/O because we don't expect device remap
@@ -873,6 +962,31 @@ spa_vdev_copy_segment(vdev_t *vd, uint64_t start, uint64_t size, uint64_t txg,
return (error);
/*
+ * Determine the ranges that are not actually needed. Offsets are
+ * relative to the start of the range to be copied (i.e. relative to the
+ * local variable "start").
+ */
+ range_tree_t *obsolete_segs = range_tree_create(NULL, NULL);
+
+ range_seg_t *rs = avl_first(&segs->rt_root);
+ ASSERT3U(rs->rs_start, ==, start);
+ uint64_t prev_seg_end = rs->rs_end;
+ while ((rs = AVL_NEXT(&segs->rt_root, rs)) != NULL) {
+ if (rs->rs_start >= start + size) {
+ break;
+ } else {
+ range_tree_add(obsolete_segs,
+ prev_seg_end - start,
+ rs->rs_start - prev_seg_end);
+ }
+ prev_seg_end = rs->rs_end;
+ }
+ /* We don't end in the middle of an obsolete range */
+ ASSERT3U(start + size, <=, prev_seg_end);
+
+ range_tree_clear(segs, start, size);
+
+ /*
* We can't have any padding of the allocated size, otherwise we will
* misunderstand what's allocated, and the size of the mapping.
* The caller ensures this will be true by passing in a size that is
@@ -883,13 +997,22 @@ spa_vdev_copy_segment(vdev_t *vd, uint64_t start, uint64_t size, uint64_t txg,
entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP);
DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start);
entry->vime_mapping.vimep_dst = dst;
+ if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
+ entry->vime_obsolete_count = range_tree_space(obsolete_segs);
+ }
+
+ vdev_copy_segment_arg_t *vcsa = kmem_zalloc(sizeof (*vcsa), KM_SLEEP);
+ vcsa->vcsa_dest_dva = &entry->vime_mapping.vimep_dst;
+ vcsa->vcsa_obsolete_segs = obsolete_segs;
+ vcsa->vcsa_spa = spa;
+ vcsa->vcsa_txg = txg;
/*
* See comment before spa_vdev_copy_one_child().
*/
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL,
- spa_vdev_copy_nullzio_done, NULL, 0);
+ spa_vdev_copy_segment_done, vcsa, 0);
vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dst));
if (dest_vd->vdev_ops == &vdev_mirror_ops) {
for (int i = 0; i < dest_vd->vdev_children; i++) {
@@ -1092,39 +1215,78 @@ spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
mutex_enter(&svr->svr_lock);
- range_seg_t *rs = avl_first(&svr->svr_allocd_segs->rt_root);
- if (rs == NULL) {
+ /*
+ * Determine how big of a chunk to copy. We can allocate up
+ * to max_alloc bytes, and we can span up to vdev_removal_max_span
+ * bytes of unallocated space at a time. "segs" will track the
+ * allocated segments that we are copying. We may also be copying
+ * free segments (of up to vdev_removal_max_span bytes).
+ */
+ range_tree_t *segs = range_tree_create(NULL, NULL);
+ for (;;) {
+ range_seg_t *rs = avl_first(&svr->svr_allocd_segs->rt_root);
+ if (rs == NULL)
+ break;
+
+ uint64_t seg_length;
+
+ if (range_tree_is_empty(segs)) {
+ /* need to truncate the first seg based on max_alloc */
+ seg_length =
+ MIN(rs->rs_end - rs->rs_start, *max_alloc);
+ } else {
+ if (rs->rs_start - range_tree_max(segs) >
+ vdev_removal_max_span) {
+ /*
+ * Including this segment would cause us to
+ * copy a larger unneeded chunk than is allowed.
+ */
+ break;
+ } else if (rs->rs_end - range_tree_min(segs) >
+ *max_alloc) {
+ /*
+ * This additional segment would extend past
+ * max_alloc. Rather than splitting this
+ * segment, leave it for the next mapping.
+ */
+ break;
+ } else {
+ seg_length = rs->rs_end - rs->rs_start;
+ }
+ }
+
+ range_tree_add(segs, rs->rs_start, seg_length);
+ range_tree_remove(svr->svr_allocd_segs,
+ rs->rs_start, seg_length);
+ }
+
+ if (range_tree_is_empty(segs)) {
mutex_exit(&svr->svr_lock);
+ range_tree_destroy(segs);
return;
}
- uint64_t offset = rs->rs_start;
- uint64_t length = MIN(rs->rs_end - rs->rs_start, *max_alloc);
-
- range_tree_remove(svr->svr_allocd_segs, offset, length);
if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) {
dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync,
svr, 0, ZFS_SPACE_CHECK_NONE, tx);
}
- svr->svr_max_offset_to_sync[txg & TXG_MASK] = offset + length;
+ svr->svr_max_offset_to_sync[txg & TXG_MASK] = range_tree_max(segs);
/*
* Note: this is the amount of *allocated* space
* that we are taking care of each txg.
*/
- svr->svr_bytes_done[txg & TXG_MASK] += length;
+ svr->svr_bytes_done[txg & TXG_MASK] += range_tree_space(segs);
mutex_exit(&svr->svr_lock);
zio_alloc_list_t zal;
metaslab_trace_init(&zal);
- uint64_t thismax = *max_alloc;
- while (length > 0) {
- uint64_t mylen = MIN(length, thismax);
-
+ uint64_t thismax = SPA_MAXBLOCKSIZE;
+ while (!range_tree_is_empty(segs)) {
int error = spa_vdev_copy_segment(vd,
- offset, mylen, txg, vca, &zal);
+ segs, thismax, txg, vca, &zal);
if (error == ENOSPC) {
/*
@@ -1138,18 +1300,17 @@ spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
*/
ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift);
- thismax = P2ROUNDUP(mylen / 2,
+ uint64_t attempted =
+ MIN(range_tree_span(segs), thismax);
+ thismax = P2ROUNDUP(attempted / 2,
1 << spa->spa_max_ashift);
- ASSERT3U(thismax, <, mylen);
/*
* The minimum-size allocation can not fail.
*/
- ASSERT3U(mylen, >, 1 << spa->spa_max_ashift);
- *max_alloc = mylen - (1 << spa->spa_max_ashift);
+ ASSERT3U(attempted, >, 1 << spa->spa_max_ashift);
+ *max_alloc = attempted - (1 << spa->spa_max_ashift);
} else {
ASSERT0(error);
- length -= mylen;
- offset += mylen;
/*
* We've performed an allocation, so reset the
@@ -1160,6 +1321,7 @@ spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
}
}
metaslab_trace_fini(&zal);
+ range_tree_destroy(segs);
}
/*
diff --git a/usr/src/uts/common/fs/zfs/zcp.c b/usr/src/uts/common/fs/zfs/zcp.c
index 5ef77044e6..bad8a56f58 100644
--- a/usr/src/uts/common/fs/zfs/zcp.c
+++ b/usr/src/uts/common/fs/zfs/zcp.c
@@ -426,7 +426,7 @@ zcp_lua_to_nvlist_impl(lua_State *state, int index, nvlist_t *nvl,
/*
* Convert a lua value to an nvpair, adding it to an nvlist with the given key.
*/
-void
+static void
zcp_lua_to_nvlist(lua_State *state, int index, nvlist_t *nvl, const char *key)
{
/*
@@ -438,7 +438,7 @@ zcp_lua_to_nvlist(lua_State *state, int index, nvlist_t *nvl, const char *key)
(void) lua_error(state);
}
-int
+static int
zcp_lua_to_nvlist_helper(lua_State *state)
{
nvlist_t *nv = (nvlist_t *)lua_touserdata(state, 2);
@@ -447,11 +447,12 @@ zcp_lua_to_nvlist_helper(lua_State *state)
return (0);
}
-void
+static void
zcp_convert_return_values(lua_State *state, nvlist_t *nvl,
const char *key, zcp_eval_arg_t *evalargs)
{
int err;
+ VERIFY3U(1, ==, lua_gettop(state));
lua_pushcfunction(state, zcp_lua_to_nvlist_helper);
lua_pushlightuserdata(state, (char *)key);
lua_pushlightuserdata(state, nvl);
@@ -897,6 +898,7 @@ zcp_eval_impl(dmu_tx_t *tx, boolean_t sync, zcp_eval_arg_t *evalargs)
ZCP_RET_RETURN, evalargs);
} else if (return_count > 1) {
evalargs->ea_result = SET_ERROR(ECHRNG);
+ lua_settop(state, 0);
(void) lua_pushfstring(state, "Multiple return "
"values not supported");
zcp_convert_return_values(state, evalargs->ea_outnvl,
@@ -958,6 +960,7 @@ static void
zcp_pool_error(zcp_eval_arg_t *evalargs, const char *poolname)
{
evalargs->ea_result = SET_ERROR(ECHRNG);
+ lua_settop(evalargs->ea_state, 0);
(void) lua_pushfstring(evalargs->ea_state, "Could not open pool: %s",
poolname);
zcp_convert_return_values(evalargs->ea_state, evalargs->ea_outnvl,
diff --git a/usr/src/uts/common/io/ldterm.c b/usr/src/uts/common/io/ldterm.c
index 6c396db601..1af97c86ea 100644
--- a/usr/src/uts/common/io/ldterm.c
+++ b/usr/src/uts/common/io/ldterm.c
@@ -22,6 +22,7 @@
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
+ * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
@@ -381,7 +382,7 @@ static struct streamtab ldtrinfo;
static struct fmodsw fsw = {
"ldterm",
&ldtrinfo,
- D_MTQPAIR | D_MP
+ D_MTQPAIR | D_MP | _D_SINGLE_INSTANCE
};
static struct modlstrmod modlstrmod = {
diff --git a/usr/src/uts/common/io/ptem.c b/usr/src/uts/common/io/ptem.c
index 481169daf3..e4dc15a3ac 100644
--- a/usr/src/uts/common/io/ptem.c
+++ b/usr/src/uts/common/io/ptem.c
@@ -26,6 +26,7 @@
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
*/
/*
@@ -62,7 +63,7 @@ extern struct streamtab pteminfo;
static struct fmodsw fsw = {
"ptem",
&pteminfo,
- D_MTQPAIR | D_MP
+ D_MTQPAIR | D_MP | _D_SINGLE_INSTANCE
};
static struct modlstrmod modlstrmod = {
diff --git a/usr/src/uts/common/io/ttcompat.c b/usr/src/uts/common/io/ttcompat.c
index ab420c82e7..22b1442e50 100644
--- a/usr/src/uts/common/io/ttcompat.c
+++ b/usr/src/uts/common/io/ttcompat.c
@@ -21,6 +21,7 @@
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
@@ -73,7 +74,7 @@ static struct streamtab ttcoinfo;
static struct fmodsw fsw = {
"ttcompat",
&ttcoinfo,
- D_MTQPAIR | D_MP
+ D_MTQPAIR | D_MP | _D_SINGLE_INSTANCE
};
/*
diff --git a/usr/src/uts/common/io/usb/clients/hid/hid.c b/usr/src/uts/common/io/usb/clients/hid/hid.c
index 2c9c88a3ec..eccd48bf08 100644
--- a/usr/src/uts/common/io/usb/clients/hid/hid.c
+++ b/usr/src/uts/common/io/usb/clients/hid/hid.c
@@ -441,6 +441,7 @@ hid_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
mutex_enter(&hidp->hid_mutex);
if (usb_ep_xdescr_fill(USB_EP_XDESCR_CURRENT_VERSION, dip, ep_data,
&hidp->hid_ep_intr_xdescr) != USB_SUCCESS) {
+ mutex_exit(&hidp->hid_mutex);
goto fail;
}
@@ -1253,11 +1254,11 @@ hid_wput(queue_t *q, mblk_t *mp)
{
hid_state_t *hidp = (hid_state_t *)q->q_ptr;
int error = USB_SUCCESS;
- struct iocblk *iocbp;
+ struct iocblk *iocbp;
mblk_t *datap;
int direction;
struct copyresp *crp;
- queue_t *tmpq;
+ queue_t *tmpq;
int flag;
USB_DPRINTF_L4(PRINT_MASK_ALL, hidp->hid_log_handle,
@@ -2772,7 +2773,7 @@ hid_send_async_ctrl_request(hid_default_pipe_arg_t *hid_default_pipe_arg,
}
ctrl_req->ctrl_bmRequestType = request_type;
- ctrl_req->ctrl_bRequest = (uint8_t)request_request;
+ ctrl_req->ctrl_bRequest = (uint8_t)request_request;
ctrl_req->ctrl_wValue = hid_request->hid_req_wValue;
ctrl_req->ctrl_wIndex = request_index;
ctrl_req->ctrl_wLength = hid_request->hid_req_wLength;
diff --git a/usr/src/uts/common/os/streamio.c b/usr/src/uts/common/os/streamio.c
index 21ec25b5b3..8ec67ea76a 100644
--- a/usr/src/uts/common/os/streamio.c
+++ b/usr/src/uts/common/os/streamio.c
@@ -25,6 +25,7 @@
/*
* Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2017 Joyent, Inc.
+ * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
*/
#include <sys/types.h>
@@ -3821,6 +3822,30 @@ strioctl(struct vnode *vp, int cmd, intptr_t arg, int flag, int copyflag,
TRACE_2(TR_FAC_STREAMS_FR, TR_I_PUSH,
"I_PUSH:fp %p stp %p", fp, stp);
+ /*
+ * If the module is flagged as single-instance, then check
+ * to see if the module is already pushed. If it is, return
+ * as if the push was successful.
+ */
+ if (fp->f_qflag & _QSINGLE_INSTANCE) {
+ queue_t *q;
+
+ claimstr(stp->sd_wrq);
+ for (q = stp->sd_wrq->q_next; q; q = q->q_next) {
+ if (q->q_flag & QREADR) {
+ q = NULL;
+ break;
+ }
+ if (strcmp(mname, Q2NAME(q)) == 0)
+ break;
+ }
+ releasestr(stp->sd_wrq);
+ if (q != NULL) {
+ fmodsw_rele(fp);
+ return (0);
+ }
+ }
+
if (error = strstartplumb(stp, flag, cmd)) {
fmodsw_rele(fp);
return (error);
diff --git a/usr/src/uts/common/os/strsubr.c b/usr/src/uts/common/os/strsubr.c
index 0d0d290fcc..959e5576f0 100644
--- a/usr/src/uts/common/os/strsubr.c
+++ b/usr/src/uts/common/os/strsubr.c
@@ -27,6 +27,7 @@
* Use is subject to license terms.
* Copyright (c) 2016 by Delphix. All rights reserved.
* Copyright 2018 Joyent, Inc.
+ * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
*/
#include <sys/types.h>
@@ -951,7 +952,7 @@ str_sendsig(vnode_t *vp, int event, uchar_t band, int error)
*/
static void
dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info,
- uchar_t band, int error)
+ uchar_t band, int error)
{
ASSERT(MUTEX_HELD(&proc->p_lock));
@@ -2351,7 +2352,7 @@ mux_rmvedge(stdata_t *upstp, int muxid, str_stack_t *ss)
*/
int
devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp,
- uint32_t *sqtypep)
+ uint32_t *sqtypep)
{
uint32_t qflag = 0;
uint32_t sqtype = 0;
@@ -2465,6 +2466,17 @@ devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp,
qflag |= _QDIRECT;
}
+ /*
+ * Private flag used to indicate that a streams module should only
+ * be pushed once. The TTY streams modules have this flag since if
+ * libc believes itself to be an xpg4 process then it will
+ * automatically and unconditionally push them when a PTS device is
+ * opened. If an application is not aware of this then without this
+ * flag we would end up with duplicate modules.
+ */
+ if (devflag & _D_SINGLE_INSTANCE)
+ qflag |= _QSINGLE_INSTANCE;
+
*qflagp = qflag;
*sqtypep = sqtype;
return (0);
@@ -8088,7 +8100,7 @@ strflushrq(vnode_t *vp, int flag)
void
strsetrputhooks(vnode_t *vp, uint_t flags,
- msgfunc_t protofunc, msgfunc_t miscfunc)
+ msgfunc_t protofunc, msgfunc_t miscfunc)
{
struct stdata *stp = vp->v_stream;
diff --git a/usr/src/uts/common/sys/conf.h b/usr/src/uts/common/sys/conf.h
index 7447db408f..4bf3d5c7e3 100644
--- a/usr/src/uts/common/sys/conf.h
+++ b/usr/src/uts/common/sys/conf.h
@@ -24,6 +24,7 @@
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
*/
#ifndef _SYS_CONF_H
@@ -221,6 +222,8 @@ extern int cdev_prop_op(dev_t, dev_info_t *, ddi_prop_op_t,
#define D_OPEN_RETURNS_EINTR 0x100000 /* EINTR expected from open(9E) */
+#define _D_SINGLE_INSTANCE 0x200000 /* Module may only be pushed once */
+
#endif /* !defined(_XPG4_2) || defined(__EXTENSIONS__) */
#ifdef __cplusplus
diff --git a/usr/src/uts/common/sys/stream.h b/usr/src/uts/common/sys/stream.h
index a45030ff7e..33f7f571fb 100644
--- a/usr/src/uts/common/sys/stream.h
+++ b/usr/src/uts/common/sys/stream.h
@@ -22,6 +22,7 @@
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
* Copyright 2015 Joyent, Inc. All rights reserved.
+ * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
@@ -190,6 +191,8 @@ typedef struct queue {
#define _QASSOCIATED 0x10000000 /* queue is associated with a device */
#define _QDIRECT 0x20000000 /* Private; transport module uses */
/* direct interface to/from sockfs */
+#define _QSINGLE_INSTANCE 0x40000000 /* Private; module may only */
+ /* be pushed once */
/* queue sqflags (protected by SQLOCK). */
#define Q_SQQUEUED 0x01 /* Queue is in the syncq list */
diff --git a/usr/src/uts/i86pc/io/apix/apix.c b/usr/src/uts/i86pc/io/apix/apix.c
index 545d3a0d05..a5a4823a91 100644
--- a/usr/src/uts/i86pc/io/apix/apix.c
+++ b/usr/src/uts/i86pc/io/apix/apix.c
@@ -269,7 +269,7 @@ apix_probe()
*
* Please remove when/if the issue is resolved.
*/
- if (get_hwenv() == HW_XEN_HVM)
+ if (get_hwenv() & HW_XEN_HVM)
return (PSM_FAILURE);
/* check for hw features if specified */