summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJerry Jelinek <jerry.jelinek@joyent.com>2017-03-15 11:37:23 +0000
committerJerry Jelinek <jerry.jelinek@joyent.com>2017-03-15 11:37:23 +0000
commit65901b7e8233a3b0d360a62789ca81efa02f81ff (patch)
tree1ac82d70de1ab204b26000e119c3b912883479a3
parentcbdf1a6587f8ce64ca2522ea298c7a4928358eaf (diff)
parentb0c42cd4706ba01ce158bd2bb1004f7e59eca5fe (diff)
downloadillumos-joyent-release-20170316.tar.gz
[illumos-gate merge]release-20170316
commit b0c42cd4706ba01ce158bd2bb1004f7e59eca5fe 7801 add more by-dnode routines commit fae4f9e0b50e2f5af5fce78f508f8669dc2b1e48 7899 loader: update symlink support in zfs reader commit 3e2079808737e33bb0613ed71ef53a52af0d4c12 7267 SMF is fast and loose with optional dependencies commit 709db5a3061407b4bc5fa69b7edf4c86f58ce9f6 7871 libm: misleading-indentation errors
-rw-r--r--usr/src/boot/Makefile.version6
-rw-r--r--usr/src/boot/sys/boot/zfs/zfsimpl.c204
-rw-r--r--usr/src/boot/sys/cddl/boot/zfs/zfsimpl.h1
-rw-r--r--usr/src/cmd/svc/startd/graph.c198
-rw-r--r--usr/src/cmd/svc/startd/transition.c18
-rw-r--r--usr/src/lib/libm/common/C/sincospi.c67
-rw-r--r--usr/src/uts/common/fs/zfs/dmu.c77
-rw-r--r--usr/src/uts/common/fs/zfs/dmu_object.c9
-rw-r--r--usr/src/uts/common/fs/zfs/dmu_tx.c152
-rw-r--r--usr/src/uts/common/fs/zfs/sys/dmu.h11
-rw-r--r--usr/src/uts/common/fs/zfs/sys/dmu_tx.h2
-rw-r--r--usr/src/uts/common/fs/zfs/sys/zap.h4
-rw-r--r--usr/src/uts/common/fs/zfs/zap_micro.c96
13 files changed, 560 insertions, 285 deletions
diff --git a/usr/src/boot/Makefile.version b/usr/src/boot/Makefile.version
index 68f1901c81..95fa3c253a 100644
--- a/usr/src/boot/Makefile.version
+++ b/usr/src/boot/Makefile.version
@@ -29,4 +29,8 @@
# boot loader code updates.
LOADER_VERSION = 1.1
-BOOT_VERSION = $(LOADER_VERSION)-2017.0.0.1
+
+# Use date like formatting here, YYYY.MM.DD.XX, without leading zeroes.
+# The version is processed from left to right, the version number can only
+# be increased.
+BOOT_VERSION = $(LOADER_VERSION)-2017.3.14.1
diff --git a/usr/src/boot/sys/boot/zfs/zfsimpl.c b/usr/src/boot/sys/boot/zfs/zfsimpl.c
index 1243ce1c85..87a3a34efe 100644
--- a/usr/src/boot/sys/boot/zfs/zfsimpl.c
+++ b/usr/src/boot/sys/boot/zfs/zfsimpl.c
@@ -2183,6 +2183,61 @@ zfs_dnode_stat(const spa_t *spa, dnode_phys_t *dn, struct stat *sb)
return (0);
}
+static int
+zfs_dnode_readlink(const spa_t *spa, dnode_phys_t *dn, char *path, size_t psize)
+{
+ int rc = 0;
+
+ if (dn->dn_bonustype == DMU_OT_SA) {
+ sa_hdr_phys_t *sahdrp = NULL;
+ size_t size = 0;
+ void *buf = NULL;
+ int hdrsize;
+ char *p;
+
+ if (dn->dn_bonuslen != 0)
+ sahdrp = (sa_hdr_phys_t *)DN_BONUS(dn);
+ else {
+ blkptr_t *bp;
+
+ if ((dn->dn_flags & DNODE_FLAG_SPILL_BLKPTR) == 0)
+ return (EIO);
+ bp = &dn->dn_spill;
+
+ size = BP_GET_LSIZE(bp);
+ buf = zfs_alloc(size);
+ rc = zio_read(spa, bp, buf);
+ if (rc != 0) {
+ zfs_free(buf, size);
+ return (rc);
+ }
+ sahdrp = buf;
+ }
+ hdrsize = SA_HDR_SIZE(sahdrp);
+ p = (char *)((uintptr_t)sahdrp + hdrsize + SA_SYMLINK_OFFSET);
+ memcpy(path, p, psize);
+ if (buf != NULL)
+ zfs_free(buf, size);
+ return (0);
+ }
+ /*
+ * Second test is purely to silence bogus compiler
+ * warning about accessing past the end of dn_bonus.
+ */
+ if (psize + sizeof(znode_phys_t) <= dn->dn_bonuslen &&
+ sizeof(znode_phys_t) <= sizeof(dn->dn_bonus)) {
+ memcpy(path, &dn->dn_bonus[sizeof(znode_phys_t)], psize);
+ } else {
+ rc = dnode_read(spa, dn, 0, path, psize);
+ }
+ return (rc);
+}
+
+struct obj_list {
+ uint64_t objnum;
+ STAILQ_ENTRY(obj_list) entry;
+};
+
/*
* Lookup a file and return its dnode.
*/
@@ -2190,7 +2245,7 @@ static int
zfs_lookup(const struct zfsmount *mnt, const char *upath, dnode_phys_t *dnode)
{
int rc;
- uint64_t objnum, rootnum, parentnum;
+ uint64_t objnum;
const spa_t *spa;
dnode_phys_t dn;
const char *p, *q;
@@ -2198,6 +2253,8 @@ zfs_lookup(const struct zfsmount *mnt, const char *upath, dnode_phys_t *dnode)
char path[1024];
int symlinks_followed = 0;
struct stat sb;
+ struct obj_list *entry, *tentry;
+ STAILQ_HEAD(, obj_list) on_cache = STAILQ_HEAD_INITIALIZER(on_cache);
spa = mnt->spa;
if (mnt->objset.os_type != DMU_OST_ZFS) {
@@ -2206,87 +2263,119 @@ zfs_lookup(const struct zfsmount *mnt, const char *upath, dnode_phys_t *dnode)
return (EIO);
}
+ if ((entry = malloc(sizeof(struct obj_list))) == NULL)
+ return (ENOMEM);
+
/*
* Get the root directory dnode.
*/
rc = objset_get_dnode(spa, &mnt->objset, MASTER_NODE_OBJ, &dn);
- if (rc)
+ if (rc) {
+ free(entry);
return (rc);
+ }
- rc = zap_lookup(spa, &dn, ZFS_ROOT_OBJ, &rootnum);
- if (rc)
+ rc = zap_lookup(spa, &dn, ZFS_ROOT_OBJ, &objnum);
+ if (rc) {
+ free(entry);
return (rc);
+ }
+ entry->objnum = objnum;
+ STAILQ_INSERT_HEAD(&on_cache, entry, entry);
- rc = objset_get_dnode(spa, &mnt->objset, rootnum, &dn);
- if (rc)
- return (rc);
+ rc = objset_get_dnode(spa, &mnt->objset, objnum, &dn);
+ if (rc != 0)
+ goto done;
- objnum = rootnum;
p = upath;
while (p && *p) {
+ rc = objset_get_dnode(spa, &mnt->objset, objnum, &dn);
+ if (rc != 0)
+ goto done;
+
while (*p == '/')
p++;
- if (!*p)
+ if (*p == '\0')
break;
- q = strchr(p, '/');
- if (q) {
- memcpy(element, p, q - p);
- element[q - p] = 0;
- p = q;
- } else {
- strcpy(element, p);
- p = 0;
- }
+ q = p;
+ while (*q != '\0' && *q != '/')
+ q++;
- rc = zfs_dnode_stat(spa, &dn, &sb);
- if (rc)
- return (rc);
- if (!S_ISDIR(sb.st_mode))
- return (ENOTDIR);
+ /* skip dot */
+ if (p + 1 == q && p[0] == '.') {
+ p++;
+ continue;
+ }
+ /* double dot */
+ if (p + 2 == q && p[0] == '.' && p[1] == '.') {
+ p += 2;
+ if (STAILQ_FIRST(&on_cache) ==
+ STAILQ_LAST(&on_cache, obj_list, entry)) {
+ rc = ENOENT;
+ goto done;
+ }
+ entry = STAILQ_FIRST(&on_cache);
+ STAILQ_REMOVE_HEAD(&on_cache, entry);
+ free(entry);
+ objnum = (STAILQ_FIRST(&on_cache))->objnum;
+ continue;
+ }
+ if (q - p + 1 > sizeof(element)) {
+ rc = ENAMETOOLONG;
+ goto done;
+ }
+ memcpy(element, p, q - p);
+ element[q - p] = 0;
+ p = q;
+
+ if ((rc = zfs_dnode_stat(spa, &dn, &sb)) != 0)
+ goto done;
+ if (!S_ISDIR(sb.st_mode)) {
+ rc = ENOTDIR;
+ goto done;
+ }
- parentnum = objnum;
rc = zap_lookup(spa, &dn, element, &objnum);
if (rc)
- return (rc);
+ goto done;
objnum = ZFS_DIRENT_OBJ(objnum);
+ if ((entry = malloc(sizeof(struct obj_list))) == NULL) {
+ rc = ENOMEM;
+ goto done;
+ }
+ entry->objnum = objnum;
+ STAILQ_INSERT_HEAD(&on_cache, entry, entry);
rc = objset_get_dnode(spa, &mnt->objset, objnum, &dn);
if (rc)
- return (rc);
+ goto done;
/*
* Check for symlink.
*/
rc = zfs_dnode_stat(spa, &dn, &sb);
if (rc)
- return (rc);
+ goto done;
if (S_ISLNK(sb.st_mode)) {
- if (symlinks_followed > 10)
- return (EMLINK);
+ if (symlinks_followed > 10) {
+ rc = EMLINK;
+ goto done;
+ }
symlinks_followed++;
/*
* Read the link value and copy the tail of our
* current path onto the end.
*/
- if (p)
- strcpy(&path[sb.st_size], p);
- else
- path[sb.st_size] = 0;
- /*
- * Second test is purely to silence bogus compiler
- * warning about accessing past the end of dn_bonus.
- */
- if (sb.st_size + sizeof(znode_phys_t) <=
- dn.dn_bonuslen && sizeof(znode_phys_t) <=
- sizeof(dn.dn_bonus)) {
- memcpy(path, &dn.dn_bonus[sizeof(znode_phys_t)],
- sb.st_size);
- } else {
- rc = dnode_read(spa, &dn, 0, path, sb.st_size);
- if (rc)
- return (rc);
+ if (sb.st_size + strlen(p) + 1 > sizeof(path)) {
+ rc = ENAMETOOLONG;
+ goto done;
}
+ strcpy(&path[sb.st_size], p);
+
+ rc = zfs_dnode_readlink(spa, &dn, path, sb.st_size);
+ if (rc != 0)
+ goto done;
/*
* Restart with the new path, starting either at
@@ -2294,14 +2383,25 @@ zfs_lookup(const struct zfsmount *mnt, const char *upath, dnode_phys_t *dnode)
* not the link is relative.
*/
p = path;
- if (*p == '/')
- objnum = rootnum;
- else
- objnum = parentnum;
- objset_get_dnode(spa, &mnt->objset, objnum, &dn);
+ if (*p == '/') {
+ while (STAILQ_FIRST(&on_cache) !=
+ STAILQ_LAST(&on_cache, obj_list, entry)) {
+ entry = STAILQ_FIRST(&on_cache);
+ STAILQ_REMOVE_HEAD(&on_cache, entry);
+ free(entry);
+ }
+ } else {
+ entry = STAILQ_FIRST(&on_cache);
+ STAILQ_REMOVE_HEAD(&on_cache, entry);
+ free(entry);
+ }
+ objnum = (STAILQ_FIRST(&on_cache))->objnum;
}
}
*dnode = dn;
- return (0);
+done:
+ STAILQ_FOREACH_SAFE(entry, &on_cache, entry, tentry)
+ free(entry);
+ return (rc);
}
diff --git a/usr/src/boot/sys/cddl/boot/zfs/zfsimpl.h b/usr/src/boot/sys/cddl/boot/zfs/zfsimpl.h
index 0ddc96ad93..a88d256f8a 100644
--- a/usr/src/boot/sys/cddl/boot/zfs/zfsimpl.h
+++ b/usr/src/boot/sys/cddl/boot/zfs/zfsimpl.h
@@ -1081,6 +1081,7 @@ typedef struct sa_hdr_phys {
#define SA_UID_OFFSET 24
#define SA_GID_OFFSET 32
#define SA_PARENT_OFFSET 40
+#define SA_SYMLINK_OFFSET 160
/*
* Intent log header - this on disk structure holds fields to manage
diff --git a/usr/src/cmd/svc/startd/graph.c b/usr/src/cmd/svc/startd/graph.c
index fa20e56775..30881ea34a 100644
--- a/usr/src/cmd/svc/startd/graph.c
+++ b/usr/src/cmd/svc/startd/graph.c
@@ -24,6 +24,7 @@
* Copyright 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2015, Syneto S.R.L. All rights reserved.
* Copyright 2016 Toomas Soome <tsoome@me.com>
+ * Copyright 2016 RackTop Systems.
*/
/*
@@ -211,9 +212,12 @@ int info_events_all;
#define is_depgrp_bypassed(v) ((v->gv_type == GVT_GROUP) && \
((v->gv_depgroup == DEPGRP_EXCLUDE_ALL) || \
- (v->gv_depgroup == DEPGRP_OPTIONAL_ALL) || \
(v->gv_restart < RERR_RESTART)))
+#define is_inst_bypassed(v) ((v->gv_type == GVT_INST) && \
+ ((v->gv_flags & GV_TODISABLE) || \
+ (v->gv_flags & GV_TOOFFLINE)))
+
static uu_list_pool_t *graph_edge_pool, *graph_vertex_pool;
static uu_list_t *dgraph;
static pthread_mutex_t dgraph_lock;
@@ -1301,7 +1305,7 @@ require_any_satisfied(graph_vertex_t *groupv, boolean_t satbility)
satisfiable = B_TRUE;
}
- return (!satbility || satisfiable ? 0 : -1);
+ return ((!satbility || satisfiable) ? 0 : -1);
}
/*
@@ -1332,33 +1336,23 @@ optional_all_satisfied(graph_vertex_t *groupv, boolean_t satbility)
switch (v->gv_type) {
case GVT_INST:
- /* Skip missing or disabled instances */
- if ((v->gv_flags & (GV_CONFIGURED | GV_ENABLED)) !=
- (GV_CONFIGURED | GV_ENABLED))
+ /* Skip missing instances */
+ if ((v->gv_flags & GV_CONFIGURED) == 0)
continue;
if (v->gv_state == RESTARTER_STATE_MAINT)
continue;
- if (v->gv_flags & GV_TOOFFLINE)
- continue;
-
any_qualified = B_TRUE;
- if (v->gv_state == RESTARTER_STATE_OFFLINE) {
+ if (v->gv_state == RESTARTER_STATE_OFFLINE ||
+ v->gv_state == RESTARTER_STATE_DISABLED) {
/*
- * For offline dependencies, treat unsatisfiable
- * as satisfied.
+ * For offline/disabled dependencies,
+ * treat unsatisfiable as satisfied.
*/
i = dependency_satisfied(v, B_TRUE);
if (i == -1)
i = 1;
- } else if (v->gv_state == RESTARTER_STATE_DISABLED) {
- /*
- * The service is enabled, but hasn't
- * transitioned out of disabled yet. Treat it
- * as unsatisfied (not unsatisfiable).
- */
- i = 0;
} else {
i = dependency_satisfied(v, satbility);
}
@@ -1371,68 +1365,9 @@ optional_all_satisfied(graph_vertex_t *groupv, boolean_t satbility)
break;
case GVT_SVC: {
- boolean_t svc_any_qualified;
- boolean_t svc_satisfied;
- boolean_t svc_satisfiable;
- graph_vertex_t *v2;
- graph_edge_t *e2;
-
- svc_any_qualified = B_FALSE;
- svc_satisfied = B_FALSE;
- svc_satisfiable = B_FALSE;
-
- for (e2 = uu_list_first(v->gv_dependencies);
- e2 != NULL;
- e2 = uu_list_next(v->gv_dependencies, e2)) {
- v2 = e2->ge_vertex;
- assert(v2->gv_type == GVT_INST);
-
- if ((v2->gv_flags &
- (GV_CONFIGURED | GV_ENABLED)) !=
- (GV_CONFIGURED | GV_ENABLED))
- continue;
-
- if (v2->gv_state == RESTARTER_STATE_MAINT)
- continue;
-
- if (v2->gv_flags & GV_TOOFFLINE)
- continue;
-
- svc_any_qualified = B_TRUE;
-
- if (v2->gv_state == RESTARTER_STATE_OFFLINE) {
- /*
- * For offline dependencies, treat
- * unsatisfiable as satisfied.
- */
- i = dependency_satisfied(v2, B_TRUE);
- if (i == -1)
- i = 1;
- } else if (v2->gv_state ==
- RESTARTER_STATE_DISABLED) {
- i = 0;
- } else {
- i = dependency_satisfied(v2, satbility);
- }
-
- if (i == 1) {
- svc_satisfied = B_TRUE;
- break;
- }
- if (i == 0)
- svc_satisfiable = B_TRUE;
- }
-
- if (!svc_any_qualified)
- continue;
any_qualified = B_TRUE;
- if (svc_satisfied) {
- i = 1;
- } else if (svc_satisfiable) {
- i = 0;
- } else {
- i = -1;
- }
+ i = optional_all_satisfied(v, satbility);
+
break;
}
@@ -1612,24 +1547,35 @@ dependency_satisfied(graph_vertex_t *v, boolean_t satbility)
}
/*
- * Any vertex with the GV_TOOFFLINE flag set is guaranteed
- * to have its dependencies unsatisfiable.
+ * Vertices may be transitioning so we try to figure out if
+ * the end state is likely to satisfy the dependency instead
+ * of assuming the dependency is unsatisfied/unsatisfiable.
+ *
+ * Support for optional_all dependencies depends on us getting
+ * this right because unsatisfiable dependencies are treated
+ * as being satisfied.
*/
- if (v->gv_flags & GV_TOOFFLINE)
- return (-1);
-
switch (v->gv_state) {
case RESTARTER_STATE_ONLINE:
case RESTARTER_STATE_DEGRADED:
+ if (v->gv_flags & GV_TODISABLE)
+ return (-1);
+ if (v->gv_flags & GV_TOOFFLINE)
+ return (0);
return (1);
case RESTARTER_STATE_OFFLINE:
- if (!satbility)
- return (0);
+ if (!satbility || v->gv_flags & GV_TODISABLE)
+ return (satbility ? -1 : 0);
return (instance_satisfied(v, satbility) != -1 ?
0 : -1);
case RESTARTER_STATE_DISABLED:
+ if (!satbility || !(v->gv_flags & GV_ENABLED))
+ return (satbility ? -1 : 0);
+ return (instance_satisfied(v, satbility) != -1 ?
+ 0 : -1);
+
case RESTARTER_STATE_MAINT:
return (-1);
@@ -1732,6 +1678,9 @@ graph_start_if_satisfied(graph_vertex_t *v)
static int
satbility_cb(graph_vertex_t *v, void *arg)
{
+ if (is_inst_bypassed(v))
+ return (UU_WALK_NEXT);
+
if (v->gv_type == GVT_INST)
graph_start_if_satisfied(v);
@@ -1746,13 +1695,34 @@ propagate_satbility(graph_vertex_t *v)
static void propagate_stop(graph_vertex_t *, void *);
-/* ARGSUSED */
+/*
+ * propagate_start()
+ *
+ * This function is used to propagate a start event to the dependents of the
+ * given vertex. Any dependents that are offline but have their dependencies
+ * satisfied are started. Any dependents that are online and have restart_on
+ * set to "restart" or "refresh" are restarted because their dependencies have
+ * just changed. This only happens with optional_all dependencies.
+ */
static void
propagate_start(graph_vertex_t *v, void *arg)
{
+ restarter_error_t err = (restarter_error_t)arg;
+
+ if (is_inst_bypassed(v))
+ return;
+
switch (v->gv_type) {
case GVT_INST:
- graph_start_if_satisfied(v);
+ /* Restarter */
+ if (inst_running(v)) {
+ if (err == RERR_RESTART || err == RERR_REFRESH) {
+ vertex_send_event(v,
+ RESTARTER_EVENT_TYPE_STOP_RESET);
+ }
+ } else {
+ graph_start_if_satisfied(v);
+ }
break;
case GVT_GROUP:
@@ -1761,10 +1731,11 @@ propagate_start(graph_vertex_t *v, void *arg)
(void *)RERR_RESTART);
break;
}
+ err = v->gv_restart;
/* FALLTHROUGH */
case GVT_SVC:
- graph_walk_dependents(v, propagate_start, NULL);
+ graph_walk_dependents(v, propagate_start, (void *)err);
break;
case GVT_FILE:
@@ -1784,13 +1755,23 @@ propagate_start(graph_vertex_t *v, void *arg)
}
}
+/*
+ * propagate_stop()
+ *
+ * This function is used to propagate a stop event to the dependents of the
+ * given vertex. Any dependents that are online (or in degraded state) with
+ * the restart_on property set to "restart" or "refresh" will be stopped as
+ * their dependencies have just changed, propagate_start() will start them
+ * again once their dependencies have been re-satisfied.
+ */
static void
propagate_stop(graph_vertex_t *v, void *arg)
{
- graph_edge_t *e;
- graph_vertex_t *svc;
restarter_error_t err = (restarter_error_t)arg;
+ if (is_inst_bypassed(v))
+ return;
+
switch (v->gv_type) {
case GVT_INST:
/* Restarter */
@@ -1818,26 +1799,15 @@ propagate_stop(graph_vertex_t *v, void *arg)
case GVT_GROUP:
if (v->gv_depgroup == DEPGRP_EXCLUDE_ALL) {
- graph_walk_dependents(v, propagate_start, NULL);
+ graph_walk_dependents(v, propagate_start,
+ (void *)RERR_NONE);
break;
}
if (err == RERR_NONE || err > v->gv_restart)
break;
- assert(uu_list_numnodes(v->gv_dependents) == 1);
- e = uu_list_first(v->gv_dependents);
- svc = e->ge_vertex;
-
- if (inst_running(svc)) {
- if (err == RERR_RESTART || err == RERR_REFRESH) {
- vertex_send_event(svc,
- RESTARTER_EVENT_TYPE_STOP_RESET);
- } else {
- vertex_send_event(svc,
- RESTARTER_EVENT_TYPE_STOP);
- }
- }
+ graph_walk_dependents(v, propagate_stop, arg);
break;
default:
@@ -4396,9 +4366,9 @@ insubtree_dependents_down(graph_vertex_t *v)
return (B_FALSE);
} else {
/*
- * Skip all excluded and optional_all dependencies
- * and decide whether to offline the service based
- * on restart_on attribute.
+ * Skip all excluded dependents and decide whether
+ * to offline the service based on the restart_on
+ * attribute.
*/
if (is_depgrp_bypassed(vv))
continue;
@@ -4977,7 +4947,7 @@ graph_transition_propagate(graph_vertex_t *v, propagate_event_t type,
if (type == PROPAGATE_STOP) {
graph_walk_dependents(v, propagate_stop, (void *)rerr);
} else if (type == PROPAGATE_START || type == PROPAGATE_SAT) {
- graph_walk_dependents(v, propagate_start, NULL);
+ graph_walk_dependents(v, propagate_start, (void *)RERR_NONE);
if (type == PROPAGATE_SAT)
propagate_satbility(v);
@@ -5048,7 +5018,7 @@ dgraph_remove_instance(const char *fmri, scf_handle_t *h)
v->gv_flags &= ~GV_CONFIGURED;
v->gv_flags &= ~GV_DEATHROW;
- graph_walk_dependents(v, propagate_start, NULL);
+ graph_walk_dependents(v, propagate_start, (void *)RERR_NONE);
propagate_satbility(v);
/*
@@ -5464,8 +5434,8 @@ mark_subtree(graph_edge_t *e, void *arg)
switch (v->gv_type) {
case GVT_INST:
- /* If the instance is already disabled, skip it. */
- if (!(v->gv_flags & GV_ENABLED))
+ /* If the instance is already offline, skip it. */
+ if (!inst_running(v))
return (UU_WALK_NEXT);
v->gv_flags |= GV_TOOFFLINE;
@@ -5473,8 +5443,8 @@ mark_subtree(graph_edge_t *e, void *arg)
break;
case GVT_GROUP:
/*
- * Skip all excluded and optional_all dependencies and decide
- * whether to offline the service based on restart_on attribute.
+ * Skip all excluded dependents and decide whether to offline
+ * the service based on the restart_on attribute.
*/
if (is_depgrp_bypassed(v))
return (UU_WALK_NEXT);
diff --git a/usr/src/cmd/svc/startd/transition.c b/usr/src/cmd/svc/startd/transition.c
index 4e7a884719..08da2e9486 100644
--- a/usr/src/cmd/svc/startd/transition.c
+++ b/usr/src/cmd/svc/startd/transition.c
@@ -21,6 +21,8 @@
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright 2016 RackTop Systems.
*/
@@ -137,6 +139,13 @@ gt_enter_maint(scf_handle_t *h, graph_vertex_t *v,
"%s.\n", v->gv_name);
graph_transition_propagate(v, PROPAGATE_STOP, rerr);
+
+ /*
+ * The maintenance transition may satisfy optional_all/restart
+ * dependencies and should be propagated to determine
+ * whether new dependents are satisfiable.
+ */
+ graph_transition_propagate(v, PROPAGATE_SAT, rerr);
} else {
log_framework(LOG_DEBUG, "Propagating maintenance of %s.\n",
v->gv_name);
@@ -154,6 +163,7 @@ gt_enter_offline(scf_handle_t *h, graph_vertex_t *v,
restarter_instance_state_t old_state, restarter_error_t rerr)
{
int to_offline = v->gv_flags & GV_TOOFFLINE;
+ int to_disable = v->gv_flags & GV_TODISABLE;
v->gv_flags &= ~GV_TOOFFLINE;
@@ -164,7 +174,7 @@ gt_enter_offline(scf_handle_t *h, graph_vertex_t *v,
* remains offline until the disable process completes.
*/
if (v->gv_flags & GV_ENABLED) {
- if (to_offline == 0)
+ if (to_offline == 0 && to_disable == 0)
graph_start_if_satisfied(v);
} else {
if (gt_running(old_state) && v->gv_post_disable_f)
@@ -267,6 +277,12 @@ gt_enter_disabled(scf_handle_t *h, graph_vertex_t *v,
graph_transition_propagate(v, PROPAGATE_STOP, rerr);
+ /*
+ * The disable transition may satisfy optional_all/restart
+ * dependencies and should be propagated to determine
+ * whether new dependents are satisfiable.
+ */
+ graph_transition_propagate(v, PROPAGATE_SAT, rerr);
} else {
log_framework(LOG_DEBUG, "Propagating disable of %s.\n",
v->gv_name);
diff --git a/usr/src/lib/libm/common/C/sincospi.c b/usr/src/lib/libm/common/C/sincospi.c
index fe339b3b35..cb49fb500e 100644
--- a/usr/src/lib/libm/common/C/sincospi.c
+++ b/usr/src/lib/libm/common/C/sincospi.c
@@ -84,43 +84,40 @@ static const double
/* INDENT ON */
void
-sincospi(double x, double *s, double *c) {
+sincospi(double x, double *s, double *c)
+{
double y, z, t;
int n, ix, k;
- int hx = ((int *) &x)[HIWORD];
- unsigned h, lx = ((unsigned *) &x)[LOWORD];
+ int hx = ((int *)&x)[HIWORD];
+ unsigned h, lx = ((unsigned *)&x)[LOWORD];
ix = hx & ~0x80000000;
n = (ix >> 20) - 0x3ff;
if (n >= 51) { /* |x| >= 2**51 */
- if (n >= 1024)
+ if (n >= 1024) {
#if defined(FPADD_TRAPS_INCOMPLETE_ON_NAN)
*s = *c = ix >= 0x7ff80000 ? x : x - x;
/* assumes sparc-like QNaN */
#else
*s = *c = x - x;
#endif
- else {
- if (n >= 53) {
+ } else {
+ if (n >= 53) {
*s = 0.0;
*c = 1.0;
- }
- else if (n == 52) {
+ } else if (n == 52) {
if ((lx & 1) == 0) {
*s = 0.0;
*c = 1.0;
- }
- else {
+ } else {
*s = -0.0;
*c = -1.0;
}
- }
- else { /* n == 51 */
+ } else { /* n == 51 */
if ((lx & 1) == 0) {
*s = 0.0;
*c = 1.0;
- }
- else {
+ } else {
*s = 1.0;
*c = 0.0;
}
@@ -130,57 +127,53 @@ sincospi(double x, double *s, double *c) {
}
}
}
- }
- else if (n < -2) /* |x| < 0.25 */
+ } else if (n < -2) /* |x| < 0.25 */
*s = __k_sincos(pi * fabs(x), 0.0, c);
else {
/* y = |4x|, z = floor(y), and n = (int)(z mod 8.0) */
if (ix < 0x41C00000) { /* |x| < 2**29 */
y = 4.0 * fabs(x);
- n = (int) y; /* exact */
- z = (double) n;
+ n = (int)y; /* exact */
+ z = (double)n;
k = z == y;
t = (y - z) * 0.25;
- }
- else { /* 2**29 <= |x| < 2**51 */
+ } else { /* 2**29 <= |x| < 2**51 */
y = fabs(x);
k = 50 - n;
n = lx >> k;
h = n << k;
- ((unsigned *) &z)[LOWORD] = h;
- ((int *) &z)[HIWORD] = ix;
+ ((unsigned *)&z)[LOWORD] = h;
+ ((int *)&z)[HIWORD] = ix;
k = h == lx;
t = y - z;
}
if (k) { /* x = N/4 */
- if ((n & 1) != 0)
+ if ((n & 1) != 0) {
*s = *c = sqrth_h + sqrth_l;
- else
+ } else {
if ((n & 2) == 0) {
*s = 0.0;
*c = 1.0;
- }
- else {
+ } else {
*s = 1.0;
*c = 0.0;
}
- y = (n & 2) == 0 ? 0.0 : 1.0;
- if ((n & 4) != 0)
- *s = -*s;
- if (((n + 1) & 4) != 0)
- *c = -*c;
- }
- else {
+ }
+ if ((n & 4) != 0)
+ *s = -*s;
+ if (((n + 1) & 4) != 0)
+ *c = -*c;
+ } else {
if ((n & 1) != 0)
t = 0.25 - t;
if (((n + (n & 1)) & 2) == 0)
*s = __k_sincos(pi * t, 0.0, c);
else
*c = __k_sincos(pi * t, 0.0, s);
- if ((n & 4) != 0)
- *s = -*s;
- if (((n + 2) & 4) != 0)
- *c = -*c;
+ if ((n & 4) != 0)
+ *s = -*s;
+ if (((n + 2) & 4) != 0)
+ *c = -*c;
}
}
if (hx < 0)
diff --git a/usr/src/uts/common/fs/zfs/dmu.c b/usr/src/uts/common/fs/zfs/dmu.c
index f62e719377..f7ef30548d 100644
--- a/usr/src/uts/common/fs/zfs/dmu.c
+++ b/usr/src/uts/common/fs/zfs/dmu.c
@@ -871,17 +871,12 @@ dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
return (0);
}
-int
-dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
+static int
+dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size,
void *buf, uint32_t flags)
{
- dnode_t *dn;
dmu_buf_t **dbp;
- int numbufs, err;
-
- err = dnode_hold(os, object, FTAG, &dn);
- if (err)
- return (err);
+ int numbufs, err = 0;
/*
* Deal with odd block sizes, where there can't be data past the first
@@ -926,22 +921,37 @@ dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
- dnode_rele(dn, FTAG);
return (err);
}
-void
-dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
- const void *buf, dmu_tx_t *tx)
+int
+dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
+ void *buf, uint32_t flags)
{
- dmu_buf_t **dbp;
- int numbufs, i;
+ dnode_t *dn;
+ int err;
- if (size == 0)
- return;
+ err = dnode_hold(os, object, FTAG, &dn);
+ if (err != 0)
+ return (err);
- VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
- FALSE, FTAG, &numbufs, &dbp));
+ err = dmu_read_impl(dn, offset, size, buf, flags);
+ dnode_rele(dn, FTAG);
+ return (err);
+}
+
+int
+dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
+ uint32_t flags)
+{
+ return (dmu_read_impl(dn, offset, size, buf, flags));
+}
+
+static void
+dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size,
+ const void *buf, dmu_tx_t *tx)
+{
+ int i;
for (i = 0; i < numbufs; i++) {
int tocpy;
@@ -969,6 +979,37 @@ dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
size -= tocpy;
buf = (char *)buf + tocpy;
}
+}
+
+void
+dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
+ const void *buf, dmu_tx_t *tx)
+{
+ dmu_buf_t **dbp;
+ int numbufs;
+
+ if (size == 0)
+ return;
+
+ VERIFY0(dmu_buf_hold_array(os, object, offset, size,
+ FALSE, FTAG, &numbufs, &dbp));
+ dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
+ dmu_buf_rele_array(dbp, numbufs, FTAG);
+}
+
+void
+dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
+ const void *buf, dmu_tx_t *tx)
+{
+ dmu_buf_t **dbp;
+ int numbufs;
+
+ if (size == 0)
+ return;
+
+ VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size,
+ FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH));
+ dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
diff --git a/usr/src/uts/common/fs/zfs/dmu_object.c b/usr/src/uts/common/fs/zfs/dmu_object.c
index 3e4171ce6c..40898ef26d 100644
--- a/usr/src/uts/common/fs/zfs/dmu_object.c
+++ b/usr/src/uts/common/fs/zfs/dmu_object.c
@@ -93,11 +93,11 @@ dmu_object_alloc(objset_t *os, dmu_object_type_t ot, int blocksize,
}
dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, tx);
- dnode_rele(dn, FTAG);
-
mutex_exit(&os->os_obj_lock);
- dmu_tx_add_new_object(tx, os, object);
+ dmu_tx_add_new_object(tx, dn);
+ dnode_rele(dn, FTAG);
+
return (object);
}
@@ -115,9 +115,10 @@ dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot,
if (err)
return (err);
dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, tx);
+ dmu_tx_add_new_object(tx, dn);
+
dnode_rele(dn, FTAG);
- dmu_tx_add_new_object(tx, os, object);
return (0);
}
diff --git a/usr/src/uts/common/fs/zfs/dmu_tx.c b/usr/src/uts/common/fs/zfs/dmu_tx.c
index 88748595b6..34d96b6132 100644
--- a/usr/src/uts/common/fs/zfs/dmu_tx.c
+++ b/usr/src/uts/common/fs/zfs/dmu_tx.c
@@ -93,21 +93,14 @@ dmu_tx_private_ok(dmu_tx_t *tx)
}
static dmu_tx_hold_t *
-dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
- enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
+dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
+ uint64_t arg1, uint64_t arg2)
{
dmu_tx_hold_t *txh;
- dnode_t *dn = NULL;
- int err;
-
- if (object != DMU_NEW_OBJECT) {
- err = dnode_hold(os, object, tx, &dn);
- if (err) {
- tx->tx_err = err;
- return (NULL);
- }
- if (err == 0 && tx->tx_txg != 0) {
+ if (dn != NULL) {
+ (void) refcount_add(&dn->dn_holds, tx);
+ if (tx->tx_txg != 0) {
mutex_enter(&dn->dn_mtx);
/*
* dn->dn_assigned_txg == tx->tx_txg doesn't pose a
@@ -134,17 +127,36 @@ dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
return (txh);
}
+static dmu_tx_hold_t *
+dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
+ enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
+{
+ dnode_t *dn = NULL;
+ dmu_tx_hold_t *txh;
+ int err;
+
+ if (object != DMU_NEW_OBJECT) {
+ err = dnode_hold(os, object, FTAG, &dn);
+ if (err != 0) {
+ tx->tx_err = err;
+ return (NULL);
+ }
+ }
+ txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2);
+ if (dn != NULL)
+ dnode_rele(dn, FTAG);
+ return (txh);
+}
+
void
-dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
+dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn)
{
/*
* If we're syncing, they can manipulate any object anyhow, and
* the hold on the dnode_t can cause problems.
*/
- if (!dmu_tx_is_syncing(tx)) {
- (void) dmu_tx_hold_object_impl(tx, os,
- object, THT_NEWOBJECT, 0, 0);
- }
+ if (!dmu_tx_is_syncing(tx))
+ (void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0);
}
/*
@@ -284,11 +296,26 @@ dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
object, THT_WRITE, off, len);
- if (txh == NULL)
- return;
+ if (txh != NULL) {
+ dmu_tx_count_write(txh, off, len);
+ dmu_tx_count_dnode(txh);
+ }
+}
- dmu_tx_count_write(txh, off, len);
- dmu_tx_count_dnode(txh);
+void
+dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
+{
+ dmu_tx_hold_t *txh;
+
+ ASSERT0(tx->tx_txg);
+ ASSERT3U(len, <=, DMU_MAX_ACCESS);
+ ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
+
+ txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len);
+ if (txh != NULL) {
+ dmu_tx_count_write(txh, off, len);
+ dmu_tx_count_dnode(txh);
+ }
}
/*
@@ -305,18 +332,18 @@ dmu_tx_mark_netfree(dmu_tx_t *tx)
tx->tx_netfree = B_TRUE;
}
-void
-dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
+static void
+dmu_tx_hold_free_impl(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
{
+ dmu_tx_t *tx;
+ dnode_t *dn;
int err;
+ zio_t *zio;
+ tx = txh->txh_tx;
ASSERT(tx->tx_txg == 0);
- dmu_tx_hold_t *txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
- object, THT_FREE, off, len);
- if (txh == NULL)
- return;
- dnode_t *dn = txh->txh_dnode;
+ dn = txh->txh_dnode;
dmu_tx_count_dnode(txh);
if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz)
@@ -397,17 +424,36 @@ dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
}
void
-dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
+dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
+{
+ dmu_tx_hold_t *txh;
+
+ txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
+ object, THT_FREE, off, len);
+ if (txh != NULL)
+ (void) dmu_tx_hold_free_impl(txh, off, len);
+}
+
+void
+dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len)
+{
+ dmu_tx_hold_t *txh;
+
+ txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len);
+ if (txh != NULL)
+ (void) dmu_tx_hold_free_impl(txh, off, len);
+}
+
+static void
+dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, int add, const char *name)
{
+ dmu_tx_t *tx = txh->txh_tx;
+ dnode_t *dn;
int err;
ASSERT(tx->tx_txg == 0);
- dmu_tx_hold_t *txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
- object, THT_ZAP, add, (uintptr_t)name);
- if (txh == NULL)
- return;
- dnode_t *dn = txh->txh_dnode;
+ dn = txh->txh_dnode;
dmu_tx_count_dnode(txh);
@@ -451,6 +497,32 @@ dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
}
void
+dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
+{
+ dmu_tx_hold_t *txh;
+
+ ASSERT0(tx->tx_txg);
+
+ txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
+ object, THT_ZAP, add, (uintptr_t)name);
+ if (txh != NULL)
+ dmu_tx_hold_zap_impl(txh, add, name);
+}
+
+void
+dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name)
+{
+ dmu_tx_hold_t *txh;
+
+ ASSERT0(tx->tx_txg);
+ ASSERT(dn != NULL);
+
+ txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name);
+ if (txh != NULL)
+ dmu_tx_hold_zap_impl(txh, add, name);
+}
+
+void
dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
{
dmu_tx_hold_t *txh;
@@ -464,6 +536,18 @@ dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
}
void
+dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn)
+{
+ dmu_tx_hold_t *txh;
+
+ ASSERT0(tx->tx_txg);
+
+ txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0);
+ if (txh)
+ dmu_tx_count_dnode(txh);
+}
+
+void
dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
{
dmu_tx_hold_t *txh;
diff --git a/usr/src/uts/common/fs/zfs/sys/dmu.h b/usr/src/uts/common/fs/zfs/sys/dmu.h
index 140726af9a..7eb6549cce 100644
--- a/usr/src/uts/common/fs/zfs/sys/dmu.h
+++ b/usr/src/uts/common/fs/zfs/sys/dmu.h
@@ -667,10 +667,17 @@ void dmu_buf_will_dirty(dmu_buf_t *db, dmu_tx_t *tx);
dmu_tx_t *dmu_tx_create(objset_t *os);
void dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len);
+void dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off,
+ int len);
void dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off,
uint64_t len);
+void dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off,
+ uint64_t len);
void dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name);
+void dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add,
+ const char *name);
void dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object);
+void dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn);
void dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object);
void dmu_tx_hold_sa(dmu_tx_t *tx, struct sa_handle *hdl, boolean_t may_grow);
void dmu_tx_hold_sa_create(dmu_tx_t *tx, int total_size);
@@ -720,8 +727,12 @@ int dmu_free_long_object(objset_t *os, uint64_t object);
#define DMU_READ_NO_PREFETCH 1 /* don't prefetch */
int dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
void *buf, uint32_t flags);
+int dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
+ uint32_t flags);
void dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx);
+void dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
+ const void *buf, dmu_tx_t *tx);
void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx);
int dmu_read_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size);
diff --git a/usr/src/uts/common/fs/zfs/sys/dmu_tx.h b/usr/src/uts/common/fs/zfs/sys/dmu_tx.h
index d9abdcd879..afc97994ef 100644
--- a/usr/src/uts/common/fs/zfs/sys/dmu_tx.h
+++ b/usr/src/uts/common/fs/zfs/sys/dmu_tx.h
@@ -136,7 +136,7 @@ extern dmu_tx_t *dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg);
dmu_tx_t *dmu_tx_create_dd(dsl_dir_t *dd);
int dmu_tx_is_syncing(dmu_tx_t *tx);
int dmu_tx_private_ok(dmu_tx_t *tx);
-void dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object);
+void dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn);
void dmu_tx_dirty_buf(dmu_tx_t *tx, struct dmu_buf_impl *db);
void dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space);
diff --git a/usr/src/uts/common/fs/zfs/sys/zap.h b/usr/src/uts/common/fs/zfs/sys/zap.h
index de4c7cced1..10cb6b449b 100644
--- a/usr/src/uts/common/fs/zfs/sys/zap.h
+++ b/usr/src/uts/common/fs/zfs/sys/zap.h
@@ -220,6 +220,9 @@ int zap_count_write_by_dnode(dnode_t *dn, const char *name,
int zap_add(objset_t *ds, uint64_t zapobj, const char *key,
int integer_size, uint64_t num_integers,
const void *val, dmu_tx_t *tx);
+int zap_add_by_dnode(dnode_t *dn, const char *key,
+ int integer_size, uint64_t num_integers,
+ const void *val, dmu_tx_t *tx);
int zap_add_uint64(objset_t *ds, uint64_t zapobj, const uint64_t *key,
int key_numints, int integer_size, uint64_t num_integers,
const void *val, dmu_tx_t *tx);
@@ -259,6 +262,7 @@ int zap_length_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int zap_remove(objset_t *ds, uint64_t zapobj, const char *name, dmu_tx_t *tx);
int zap_remove_norm(objset_t *ds, uint64_t zapobj, const char *name,
matchtype_t mt, dmu_tx_t *tx);
+int zap_remove_by_dnode(dnode_t *dn, const char *name, dmu_tx_t *tx);
int zap_remove_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints, dmu_tx_t *tx);
diff --git a/usr/src/uts/common/fs/zfs/zap_micro.c b/usr/src/uts/common/fs/zfs/zap_micro.c
index 4632518798..15dda6856b 100644
--- a/usr/src/uts/common/fs/zfs/zap_micro.c
+++ b/usr/src/uts/common/fs/zfs/zap_micro.c
@@ -1109,34 +1109,30 @@ again:
ASSERT(!"out of entries!");
}
-int
-zap_add(objset_t *os, uint64_t zapobj, const char *key,
+static int
+zap_add_impl(zap_t *zap, const char *key,
int integer_size, uint64_t num_integers,
- const void *val, dmu_tx_t *tx)
+ const void *val, dmu_tx_t *tx, void *tag)
{
- zap_t *zap;
- int err;
+ int err = 0;
mzap_ent_t *mze;
const uint64_t *intval = val;
zap_name_t *zn;
- err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
- if (err)
- return (err);
zn = zap_name_alloc(zap, key, 0);
if (zn == NULL) {
- zap_unlockdir(zap, FTAG);
+ zap_unlockdir(zap, tag);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
- err = fzap_add(zn, integer_size, num_integers, val, FTAG, tx);
+ err = fzap_add(zn, integer_size, num_integers, val, tag, tx);
zap = zn->zn_zap; /* fzap_add() may change zap */
} else if (integer_size != 8 || num_integers != 1 ||
strlen(key) >= MZAP_NAME_LEN) {
- err = mzap_upgrade(&zn->zn_zap, FTAG, tx, 0);
+ err = mzap_upgrade(&zn->zn_zap, tag, tx, 0);
if (err == 0) {
err = fzap_add(zn, integer_size, num_integers, val,
- FTAG, tx);
+ tag, tx);
}
zap = zn->zn_zap; /* fzap_add() may change zap */
} else {
@@ -1150,7 +1146,39 @@ zap_add(objset_t *os, uint64_t zapobj, const char *key,
ASSERT(zap == zn->zn_zap);
zap_name_free(zn);
if (zap != NULL) /* may be NULL if fzap_add() failed */
- zap_unlockdir(zap, FTAG);
+ zap_unlockdir(zap, tag);
+ return (err);
+}
+
+int
+zap_add(objset_t *os, uint64_t zapobj, const char *key,
+ int integer_size, uint64_t num_integers,
+ const void *val, dmu_tx_t *tx)
+{
+ zap_t *zap;
+ int err;
+
+ err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
+ if (err != 0)
+ return (err);
+ err = zap_add_impl(zap, key, integer_size, num_integers, val, tx, FTAG);
+ /* zap_add_impl() calls zap_unlockdir() */
+ return (err);
+}
+
+int
+zap_add_by_dnode(dnode_t *dn, const char *key,
+ int integer_size, uint64_t num_integers,
+ const void *val, dmu_tx_t *tx)
+{
+ zap_t *zap;
+ int err;
+
+ err = zap_lockdir_by_dnode(dn, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
+ if (err != 0)
+ return (err);
+ err = zap_add_impl(zap, key, integer_size, num_integers, val, tx, FTAG);
+ /* zap_add_impl() calls zap_unlockdir() */
return (err);
}
@@ -1268,23 +1296,17 @@ zap_remove(objset_t *os, uint64_t zapobj, const char *name, dmu_tx_t *tx)
return (zap_remove_norm(os, zapobj, name, 0, tx));
}
-int
-zap_remove_norm(objset_t *os, uint64_t zapobj, const char *name,
+static int
+zap_remove_impl(zap_t *zap, const char *name,
matchtype_t mt, dmu_tx_t *tx)
{
- zap_t *zap;
- int err;
mzap_ent_t *mze;
zap_name_t *zn;
+ int err = 0;
- err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
- if (err)
- return (err);
zn = zap_name_alloc(zap, name, mt);
- if (zn == NULL) {
- zap_unlockdir(zap, FTAG);
+ if (zn == NULL)
return (SET_ERROR(ENOTSUP));
- }
if (!zap->zap_ismicro) {
err = fzap_remove(zn, tx);
} else {
@@ -1299,6 +1321,34 @@ zap_remove_norm(objset_t *os, uint64_t zapobj, const char *name,
}
}
zap_name_free(zn);
+ return (err);
+}
+
+int
+zap_remove_norm(objset_t *os, uint64_t zapobj, const char *name,
+ matchtype_t mt, dmu_tx_t *tx)
+{
+ zap_t *zap;
+ int err;
+
+ err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
+ if (err)
+ return (err);
+ err = zap_remove_impl(zap, name, mt, tx);
+ zap_unlockdir(zap, FTAG);
+ return (err);
+}
+
+int
+zap_remove_by_dnode(dnode_t *dn, const char *name, dmu_tx_t *tx)
+{
+ zap_t *zap;
+ int err;
+
+ err = zap_lockdir_by_dnode(dn, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
+ if (err)
+ return (err);
+ err = zap_remove_impl(zap, name, 0, tx);
zap_unlockdir(zap, FTAG);
return (err);
}