summaryrefslogtreecommitdiff
path: root/usr/src/lib/libzfs/common
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/lib/libzfs/common')
-rw-r--r--usr/src/lib/libzfs/common/libzfs.h298
-rw-r--r--usr/src/lib/libzfs/common/libzfs_changelist.c416
-rw-r--r--usr/src/lib/libzfs/common/libzfs_config.c309
-rw-r--r--usr/src/lib/libzfs/common/libzfs_dataset.c2939
-rw-r--r--usr/src/lib/libzfs/common/libzfs_graph.c527
-rw-r--r--usr/src/lib/libzfs/common/libzfs_impl.h103
-rw-r--r--usr/src/lib/libzfs/common/libzfs_import.c753
-rw-r--r--usr/src/lib/libzfs/common/libzfs_mount.c558
-rw-r--r--usr/src/lib/libzfs/common/libzfs_pool.c1154
-rw-r--r--usr/src/lib/libzfs/common/libzfs_status.c248
-rw-r--r--usr/src/lib/libzfs/common/libzfs_util.c204
-rw-r--r--usr/src/lib/libzfs/common/llib-lzfs32
12 files changed, 7541 insertions, 0 deletions
diff --git a/usr/src/lib/libzfs/common/libzfs.h b/usr/src/lib/libzfs/common/libzfs.h
new file mode 100644
index 0000000000..a9caff662c
--- /dev/null
+++ b/usr/src/lib/libzfs/common/libzfs.h
@@ -0,0 +1,298 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _LIBZFS_H
+#define _LIBZFS_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <assert.h>
+#include <libnvpair.h>
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/varargs.h>
+#include <sys/fs/zfs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Miscellaneous ZFS constants
+ */
+#define ZFS_MAXNAMELEN MAXNAMELEN
+#define ZPOOL_MAXNAMELEN MAXNAMELEN
+#define ZFS_MAXPROPLEN MAXPATHLEN
+
+/*
+ * Basic handle types
+ */
+typedef struct zfs_handle zfs_handle_t;
+typedef struct zpool_handle zpool_handle_t;
+
+/*
+ * Basic handle functions
+ */
+extern zpool_handle_t *zpool_open(const char *);
+extern zpool_handle_t *zpool_open_canfail(const char *);
+extern void zpool_close(zpool_handle_t *);
+extern const char *zpool_get_name(zpool_handle_t *);
+extern uint64_t zpool_get_guid(zpool_handle_t *);
+extern uint64_t zpool_get_space_used(zpool_handle_t *);
+extern uint64_t zpool_get_space_total(zpool_handle_t *);
+extern int zpool_get_root(zpool_handle_t *, char *, size_t);
+extern int zpool_get_state(zpool_handle_t *);
+
+/*
+ * Iterate over all active pools in the system.
+ */
+typedef int (*zpool_iter_f)(zpool_handle_t *, void *);
+extern int zpool_iter(zpool_iter_f, void *);
+
+/*
+ * Functions to create and destroy pools
+ */
+extern int zpool_create(const char *, nvlist_t *, const char *);
+extern int zpool_destroy(zpool_handle_t *);
+extern int zpool_add(zpool_handle_t *, nvlist_t *);
+
+/*
+ * Functions to manipulate pool and vdev state
+ */
+extern int zpool_scrub(zpool_handle_t *, pool_scrub_type_t);
+
+extern int zpool_vdev_online(zpool_handle_t *, const char *);
+extern int zpool_vdev_offline(zpool_handle_t *, const char *);
+extern int zpool_vdev_attach(zpool_handle_t *, const char *, const char *,
+ nvlist_t *, int);
+extern int zpool_vdev_detach(zpool_handle_t *, const char *);
+
+/*
+ * Pool health statistics.
+ */
+typedef enum {
+ /*
+ * The following correspond to faults as defined in the (fault.fs.zfs.*)
+ * event namespace. Each is associated with a correponding message ID.
+ */
+ ZPOOL_STATUS_CORRUPT_CACHE, /* corrupt /kernel/drv/zpool.cache */
+ ZPOOL_STATUS_MISSING_DEV_R, /* missing device with replicas */
+ ZPOOL_STATUS_MISSING_DEV_NR, /* missing device with no replicas */
+ ZPOOL_STATUS_CORRUPT_LABEL_R, /* bad device label with replicas */
+ ZPOOL_STATUS_CORRUPT_LABEL_NR, /* bad device lable with no replicas */
+ ZPOOL_STATUS_BAD_GUID_SUM, /* sum of device guids didn't match */
+ ZPOOL_STATUS_CORRUPT_POOL, /* pool metadata is corrupted */
+ ZPOOL_STATUS_CORRUPT_DATA, /* data errors in user (meta)data */
+ ZPOOL_STATUS_FAILING_DEV, /* device experiencing errors */
+ ZPOOL_STATUS_VERSION_MISMATCH, /* bad on-disk version */
+
+ /*
+ * The following are not faults per se, but still an error possibly
+ * requiring adminsitrative attention. There is no corresponding
+ * message ID.
+ */
+ ZPOOL_STATUS_RESILVERING, /* device being resilvered */
+ ZPOOL_STATUS_OFFLINE_DEV, /* device online */
+
+ /*
+ * Finally, the following indicates a healthy pool.
+ */
+ ZPOOL_STATUS_OK
+} zpool_status_t;
+
+extern zpool_status_t zpool_get_status(zpool_handle_t *, char **msgid);
+extern zpool_status_t zpool_import_status(nvlist_t *, char **msgid);
+
+/*
+ * Statistics and configuration functions.
+ */
+extern nvlist_t *zpool_get_config(zpool_handle_t *);
+extern int zpool_refresh_stats(zpool_handle_t *,
+ nvlist_t **oldconfig, nvlist_t **newconfig);
+
+/*
+ * Import and export functions
+ */
+extern int zpool_export(zpool_handle_t *);
+extern int zpool_import(nvlist_t *, const char *, const char *);
+
+/*
+ * Search for pools to import
+ */
+extern nvlist_t *zpool_find_import(int argc, char **argv);
+
+/*
+ * Basic handle manipulations. These functions do not create or destroy the
+ * underlying datasets, only the references to them.
+ */
+extern zfs_handle_t *zfs_open(const char *, int);
+extern void zfs_close(zfs_handle_t *);
+extern zfs_type_t zfs_get_type(const zfs_handle_t *);
+extern const char *zfs_get_name(const zfs_handle_t *);
+
+typedef enum {
+ ZFS_SRC_NONE = 0x1,
+ ZFS_SRC_DEFAULT = 0x2,
+ ZFS_SRC_TEMPORARY = 0x4,
+ ZFS_SRC_LOCAL = 0x8,
+ ZFS_SRC_INHERITED = 0x10
+} zfs_source_t;
+
+#define ZFS_SRC_ALL 0x1f
+
+/*
+ * Property management functions. Some functions are shared with the kernel,
+ * and are found in fs/zfs.h.
+ */
+const char *zfs_prop_to_name(zfs_prop_t);
+int zfs_prop_set(zfs_handle_t *, zfs_prop_t, const char *);
+int zfs_prop_get(zfs_handle_t *, zfs_prop_t, char *, size_t, zfs_source_t *,
+ char *, size_t, int);
+int zfs_prop_get_numeric(zfs_handle_t *, zfs_prop_t, uint64_t *, zfs_source_t *,
+ char *, size_t);
+uint64_t zfs_prop_get_int(zfs_handle_t *, zfs_prop_t);
+int zfs_prop_validate(zfs_prop_t, const char *, uint64_t *);
+int zfs_prop_inheritable(zfs_prop_t);
+int zfs_prop_inherit(zfs_handle_t *, zfs_prop_t);
+const char *zfs_prop_values(zfs_prop_t);
+int zfs_prop_valid_for_type(zfs_prop_t, int);
+void zfs_prop_default_string(zfs_prop_t prop, char *buf, size_t buflen);
+uint64_t zfs_prop_default_numeric(zfs_prop_t);
+int zfs_prop_is_string(zfs_prop_t prop);
+const char *zfs_prop_column_name(zfs_prop_t);
+const char *zfs_prop_column_format(zfs_prop_t);
+char ** zfs_prop_column_subopts(void);
+char ** zfs_prop_column_short_subopts(void);
+
+#define ZFS_MOUNTPOINT_NONE "none"
+#define ZFS_MOUNTPOINT_LEGACY "legacy"
+
+/*
+ * Iterator functions.
+ */
+typedef int (*zfs_iter_f)(zfs_handle_t *, void *);
+extern int zfs_iter_root(zfs_iter_f, void *);
+extern int zfs_iter_children(zfs_handle_t *, zfs_iter_f, void *);
+extern int zfs_iter_dependents(zfs_handle_t *, zfs_iter_f, void *);
+
+/*
+ * Functions to create and destroy datasets.
+ */
+extern int zfs_create(const char *, zfs_type_t, const char *, const char *);
+extern int zfs_destroy(zfs_handle_t *);
+extern int zfs_clone(zfs_handle_t *, const char *);
+extern int zfs_snapshot(const char *);
+extern int zfs_rollback(zfs_handle_t *);
+extern int zfs_rename(zfs_handle_t *, const char *);
+extern int zfs_backup(zfs_handle_t *, zfs_handle_t *);
+extern int zfs_restore(const char *, int, int, int);
+
+/*
+ * Miscellaneous functions.
+ */
+extern const char *zfs_type_to_name(zfs_type_t);
+extern void zfs_refresh_properties(zfs_handle_t *);
+extern int zfs_name_valid(const char *, zfs_type_t);
+
+/*
+ * Mount support functions.
+ */
+extern int zfs_is_mounted(zfs_handle_t *, char **);
+extern int zfs_mount(zfs_handle_t *, const char *, int);
+extern int zfs_unmount(zfs_handle_t *, const char *, int);
+extern int zfs_unmountall(zfs_handle_t *, int);
+
+/*
+ * Share support functions.
+ */
+extern int zfs_is_shared(zfs_handle_t *, char **);
+extern int zfs_share(zfs_handle_t *);
+extern int zfs_unshare(zfs_handle_t *, const char *);
+extern int zfs_unshareall(zfs_handle_t *);
+
+/*
+ * For clients that need to capture error output.
+ */
+extern void zfs_set_error_handler(void (*)(const char *, va_list));
+
+/*
+ * When dealing with nvlists, verify() is extremely useful
+ */
+#ifdef NDEBUG
+#define verify(EX) ((void)(EX))
+#else
+#define verify(EX) assert(EX)
+#endif
+
+/*
+ * Utility function to convert a number to a human-readable form.
+ */
+extern void zfs_nicenum(uint64_t, char *, size_t);
+extern int zfs_nicestrtonum(const char *, uint64_t *);
+
+/*
+ * Pool destroy special. Remove the device information without destroying
+ * the underlying dataset.
+ */
+extern int zfs_remove_link(zfs_handle_t *);
+
+/*
+ * Given a device or file, determine if it is part of a pool.
+ */
+extern int zpool_in_use(int fd, char **state,
+ char **name);
+
+/*
+ * ftyp special. Read the label from a given device.
+ */
+extern nvlist_t *zpool_read_label(int fd);
+
+/*
+ * Create and remove zvol /dev links
+ */
+extern int zpool_create_zvol_links(zpool_handle_t *);
+extern int zpool_remove_zvol_links(zpool_handle_t *);
+
+/*
+ * zoneadmd hack
+ */
+extern void zfs_init(void);
+
+/*
+ * Useful defines
+ */
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _LIBZFS_H */
diff --git a/usr/src/lib/libzfs/common/libzfs_changelist.c b/usr/src/lib/libzfs/common/libzfs_changelist.c
new file mode 100644
index 0000000000..497461e19f
--- /dev/null
+++ b/usr/src/lib/libzfs/common/libzfs_changelist.c
@@ -0,0 +1,416 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <libintl.h>
+#include <libuutil.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <zone.h>
+
+#include <libzfs.h>
+
+#include "libzfs_impl.h"
+
+/*
+ * Structure to keep track of dataset state. Before changing the 'sharenfs' or
+ * 'mountpoint' property, we record whether the filesystem was previously
+ * mounted/shared. This prior state dictates whether we remount/reshare the
+ * dataset after the property has been changed.
+ *
+ * The interface consists of the following sequence of functions:
+ *
+ * changelist_gather()
+ * changelist_prefix()
+ * < change property >
+ * changelist_postfix()
+ * changelist_free()
+ *
+ * Other interfaces:
+ *
+ * changelist_rename() - renames all datasets appropriately when doing a rename
+ * changelist_unshare() - unshares all the nodes in a given changelist
+ * changelist_haszonedchild() - check if there is any child exported to
+ * a local zone
+ */
+typedef struct prop_changenode {
+ zfs_handle_t *cn_handle;
+ int cn_shared;
+ int cn_mounted;
+ int cn_zoned;
+ uu_list_node_t cn_listnode;
+} prop_changenode_t;
+
+struct prop_changelist {
+ zfs_prop_t cl_prop;
+ zfs_prop_t cl_realprop;
+ uu_list_pool_t *cl_pool;
+ uu_list_t *cl_list;
+ int cl_waslegacy;
+ int cl_allchildren;
+ int cl_flags;
+ int cl_haszonedchild;
+};
+
+/*
+ * If the property is 'mountpoint', go through and unmount filesystems as
+ * necessary. We don't do the same for 'sharenfs', because we can just re-share
+ * with different options without interrupting service.
+ */
+int
+changelist_prefix(prop_changelist_t *clp)
+{
+ prop_changenode_t *cn;
+ int ret = 0;
+
+ if (clp->cl_prop != ZFS_PROP_MOUNTPOINT)
+ return (0);
+
+ for (cn = uu_list_first(clp->cl_list); cn != NULL;
+ cn = uu_list_next(clp->cl_list, cn)) {
+ /*
+ * if we are in a global zone, but this dataset is exported to
+ * a local zone, do nothing.
+ */
+ if ((getzoneid() == GLOBAL_ZONEID) && cn->cn_zoned)
+ continue;
+
+ /*
+ * If we have a volume and this was a rename, remove the
+ * /dev/zvol links
+ */
+ if (cn->cn_handle->zfs_volblocksize &&
+ clp->cl_realprop == ZFS_PROP_NAME) {
+ if (zvol_remove_link(cn->cn_handle->zfs_name) != 0)
+ ret = -1;
+ } else if (zfs_unmount(cn->cn_handle, NULL, clp->cl_flags) != 0)
+ ret = -1;
+ }
+
+ return (ret);
+}
+
+/*
+ * If the proeprty is 'mountpoint' or 'sharenfs', go through and remount and/or
+ * reshare the filesystems as necessary. In changelist_gather() we recorded
+ * whether the filesystem was previously shared or mounted. The action we take
+ * depends on the previous state, and whether the value was previously 'legacy'.
+ * For non-legacy properties, we only remount/reshare the filesystem if it was
+ * previously mounted/shared. Otherwise, we always remount/reshare the
+ * filesystem.
+ */
+int
+changelist_postfix(prop_changelist_t *clp)
+{
+ prop_changenode_t *cn;
+ int ret = 0;
+
+ /*
+ * If we're changing the mountpoint, attempt to destroy the underlying
+ * mountpoint. All other datasets will have inherited from this dataset
+ * (in which case their mountpoints exist in the filesystem in the new
+ * location), or have explicit mountpoints set (in which case they won't
+ * be in the changelist).
+ */
+ if ((cn = uu_list_last(clp->cl_list)) == NULL)
+ return (0);
+
+ if (clp->cl_prop == ZFS_PROP_MOUNTPOINT)
+ remove_mountpoint(cn->cn_handle);
+
+ /*
+ * We walk the datasets in reverse, because we want to mount any parent
+ * datasets before mounting the children.
+ */
+ for (cn = uu_list_last(clp->cl_list); cn != NULL;
+ cn = uu_list_prev(clp->cl_list, cn)) {
+ /*
+ * if we are in a global zone, but this dataset is exported to
+ * a local zone, do nothing.
+ */
+ if ((getzoneid() == GLOBAL_ZONEID) && cn->cn_zoned)
+ continue;
+
+ zfs_refresh_properties(cn->cn_handle);
+
+ /*
+ * If this is a volume and we're doing a rename, recreate the
+ * /dev/zvol links.
+ */
+ if (cn->cn_handle->zfs_volblocksize &&
+ clp->cl_realprop == ZFS_PROP_NAME) {
+ if (zvol_create_link(cn->cn_handle->zfs_name) != 0)
+ ret = -1;
+ continue;
+ }
+
+ if ((clp->cl_waslegacy || cn->cn_mounted) &&
+ !zfs_is_mounted(cn->cn_handle, NULL) &&
+ zfs_mount(cn->cn_handle, NULL, 0) != 0)
+ ret = -1;
+
+ /*
+ * We always re-share even if the filesystem is currently
+ * shared, so that we can adopt any new options.
+ */
+ if ((cn->cn_shared ||
+ (clp->cl_prop == ZFS_PROP_SHARENFS && clp->cl_waslegacy))) {
+ char shareopts[ZFS_MAXPROPLEN];
+ if (zfs_prop_get(cn->cn_handle, ZFS_PROP_SHARENFS,
+ shareopts, sizeof (shareopts), NULL, NULL, 0,
+ FALSE) == 0 && strcmp(shareopts, "off") == 0)
+ ret = zfs_unshare(cn->cn_handle, NULL);
+ else
+ ret = zfs_share(cn->cn_handle);
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * If we rename a filesystem, and child filesystem handles are no longer valid,
+ * since we identify datasets by their name in the ZFS namespace. So, we have
+ * to go through and fix up all the names appropriately. We could do this
+ * automatically if libzfs kept track of all open handles, but this is a lot
+ * less work.
+ */
+void
+changelist_rename(prop_changelist_t *clp, const char *src, const char *dst)
+{
+ prop_changenode_t *cn;
+ char newname[ZFS_MAXNAMELEN];
+
+ for (cn = uu_list_first(clp->cl_list); cn != NULL;
+ cn = uu_list_next(clp->cl_list, cn)) {
+ /*
+ * Destroy the previous mountpoint if needed.
+ */
+ remove_mountpoint(cn->cn_handle);
+
+ (void) strlcpy(newname, dst, sizeof (newname));
+ (void) strcat(newname, cn->cn_handle->zfs_name + strlen(src));
+
+ (void) strlcpy(cn->cn_handle->zfs_name, newname,
+ sizeof (cn->cn_handle->zfs_name));
+ }
+}
+
+/*
+ * Given a gathered changelist for the "sharenfs" property,
+ * unshare all the nodes in the list.
+ */
+int
+changelist_unshare(prop_changelist_t *clp)
+{
+ prop_changenode_t *cn;
+ int ret = 0;
+
+ if (clp->cl_prop != ZFS_PROP_SHARENFS)
+ return (0);
+
+ for (cn = uu_list_first(clp->cl_list); cn != NULL;
+ cn = uu_list_next(clp->cl_list, cn)) {
+
+ if (zfs_unshare(cn->cn_handle, NULL) != 0)
+ ret = -1;
+ }
+
+ return (ret);
+}
+
+/*
+ * Check if there is any child exported to a local zone in a
+ * given changelist. This information has already been recorded
+ * while gathering the changelist via changelist_gather().
+ */
+int
+changelist_haszonedchild(prop_changelist_t *clp)
+{
+ return (clp->cl_haszonedchild);
+}
+
+/*
+ * Release any memory associated with a changelist.
+ */
+void
+changelist_free(prop_changelist_t *clp)
+{
+ prop_changenode_t *cn;
+ uu_list_walk_t *walk;
+
+ verify((walk = uu_list_walk_start(clp->cl_list,
+ UU_WALK_ROBUST)) != NULL);
+
+ while ((cn = uu_list_walk_next(walk)) != NULL) {
+
+ uu_list_remove(clp->cl_list, cn);
+
+ zfs_close(cn->cn_handle);
+ free(cn);
+ }
+
+ uu_list_pool_destroy(clp->cl_pool);
+
+ free(clp);
+}
+
+static int
+change_one(zfs_handle_t *zhp, void *data)
+{
+ prop_changelist_t *clp = data;
+ char property[ZFS_MAXPROPLEN];
+ char where[64];
+ prop_changenode_t *cn;
+ zfs_source_t sourcetype;
+
+ /*
+ * We only want to unmount/unshare those filesystems which may
+ * inherit from the target filesystem. If we find any filesystem
+ * with a locally set mountpoint, we ignore any children since changing
+ * the property will not affect them. If this is a rename, we iterate
+ * over all children regardless, since we need them unmounted in order
+ * to do the rename. Also, if this is a volume and we're doing a
+ * rename, then always add it to the changelist.
+ */
+
+ if (!(zhp->zfs_volblocksize && clp->cl_realprop == ZFS_PROP_NAME) &&
+ zfs_prop_get(zhp, clp->cl_prop, property,
+ sizeof (property), &sourcetype, where, sizeof (where),
+ FALSE) != 0)
+ return (0);
+
+ if (clp->cl_allchildren || sourcetype == ZFS_SRC_DEFAULT ||
+ sourcetype == ZFS_SRC_INHERITED) {
+ cn = zfs_malloc(sizeof (prop_changenode_t));
+
+ cn->cn_handle = zhp;
+ cn->cn_mounted = zfs_is_mounted(zhp, NULL);
+ cn->cn_shared = zfs_is_shared(zhp, NULL);
+ cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
+
+ /* indicate if any child is exported to a local zone */
+ if ((getzoneid() == GLOBAL_ZONEID) && cn->cn_zoned)
+ clp->cl_haszonedchild = TRUE;
+
+ uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool);
+ verify(uu_list_insert_before(clp->cl_list,
+ uu_list_first(clp->cl_list), cn) == 0);
+
+ return (zfs_iter_children(zhp, change_one, data));
+ } else {
+ zfs_close(zhp);
+ }
+
+ return (0);
+}
+
+
+/*
+ * Given a ZFS handle and a property, construct a complete list of datasets that
+ * need to be modified as part of this process. For anything but the
+ * 'mountpoint' and 'sharenfs' properties, this just returns an empty list.
+ * Otherwise, we iterate over all children and look for any datasets which
+ * inherit this property. For each such dataset, we add it to the list and mark
+ * whether it was shared beforehand.
+ */
+prop_changelist_t *
+changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int flags)
+{
+ prop_changelist_t *clp = zfs_malloc(sizeof (prop_changelist_t));
+ prop_changenode_t *cn;
+ zfs_handle_t *temp;
+ char property[ZFS_MAXPROPLEN];
+
+ clp->cl_pool = uu_list_pool_create("changelist_pool",
+ sizeof (prop_changenode_t),
+ offsetof(prop_changenode_t, cn_listnode),
+ NULL, 0);
+ assert(clp->cl_pool != NULL);
+
+ clp->cl_list = uu_list_create(clp->cl_pool, NULL, 0);
+ clp->cl_flags = flags;
+
+ /*
+ * If this is a rename or the 'zoned' property, we pretend we're
+ * changing the mountpoint and flag it so we can catch all children in
+ * change_one().
+ */
+ if (prop == ZFS_PROP_NAME || prop == ZFS_PROP_ZONED) {
+ clp->cl_prop = ZFS_PROP_MOUNTPOINT;
+ clp->cl_allchildren = TRUE;
+ } else {
+ clp->cl_prop = prop;
+ }
+ clp->cl_realprop = prop;
+
+ if (clp->cl_prop != ZFS_PROP_MOUNTPOINT &&
+ clp->cl_prop != ZFS_PROP_SHARENFS)
+ return (clp);
+
+ if (zfs_iter_children(zhp, change_one, clp) != 0) {
+ changelist_free(clp);
+ return (NULL);
+ }
+
+ /*
+ * We have to re-open ourselves because we auto-close all the handles
+ * and can't tell the difference.
+ */
+ if ((temp = zfs_open(zfs_get_name(zhp), ZFS_TYPE_ANY)) == NULL) {
+ free(clp);
+ return (NULL);
+ }
+
+ /*
+ * Always add ourself to the list. We add ourselves to the end so that
+ * we're the last to be unmounted.
+ */
+ cn = zfs_malloc(sizeof (prop_changenode_t));
+ cn->cn_handle = temp;
+ cn->cn_mounted = zfs_is_mounted(temp, NULL);
+ cn->cn_shared = zfs_is_shared(temp, NULL);
+ cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
+
+ uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool);
+ verify(uu_list_insert_after(clp->cl_list,
+ uu_list_last(clp->cl_list), cn) == 0);
+
+ /*
+ * If the property was previously 'legacy' or 'none', record this fact,
+ * as the behavior of changelist_postfix() will be different.
+ */
+ if (zfs_prop_get(zhp, prop, property, sizeof (property),
+ NULL, NULL, 0, FALSE) == 0 &&
+ (strcmp(property, "legacy") == 0 || strcmp(property, "none") == 0 ||
+ strcmp(property, "off") == 0))
+ clp->cl_waslegacy = TRUE;
+
+ return (clp);
+}
diff --git a/usr/src/lib/libzfs/common/libzfs_config.c b/usr/src/lib/libzfs/common/libzfs_config.c
new file mode 100644
index 0000000000..4c5a22a459
--- /dev/null
+++ b/usr/src/lib/libzfs/common/libzfs_config.c
@@ -0,0 +1,309 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * The pool configuration repository is stored in /etc/zfs/zpool.cache as a
+ * single packed nvlist. While it would be nice to just read in this
+ * file from userland, this wouldn't work from a local zone. So we have to have
+ * a zpool ioctl to return the complete configuration for all pools. In the
+ * global zone, this will be identical to reading the file and unpacking it in
+ * userland.
+ */
+
+#include <errno.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+#include <libintl.h>
+#include <libuutil.h>
+
+#include "libzfs_impl.h"
+
+static uu_avl_t *namespace_avl;
+static uint64_t namespace_generation;
+
+typedef struct config_node {
+ char *cn_name;
+ nvlist_t *cn_config;
+ uu_avl_node_t cn_avl;
+} config_node_t;
+
+/* ARGSUSED */
+static int
+config_node_compare(const void *a, const void *b, void *unused)
+{
+ int ret;
+
+ const config_node_t *ca = (config_node_t *)a;
+ const config_node_t *cb = (config_node_t *)b;
+
+ ret = strcmp(ca->cn_name, cb->cn_name);
+
+ if (ret < 0)
+ return (-1);
+ else if (ret > 0)
+ return (1);
+ else
+ return (0);
+}
+
+/*
+ * Loads the pool namespace, or re-loads it if the cache has changed.
+ */
+static void
+namespace_reload()
+{
+ nvlist_t *config;
+ config_node_t *cn;
+ nvpair_t *elem;
+ zfs_cmd_t zc = { 0 };
+ uu_avl_walk_t *walk;
+
+ if (namespace_generation == 0) {
+ /*
+ * This is the first time we've accessed the configuration
+ * cache. Initialize the AVL tree and then fall through to the
+ * common code.
+ */
+ uu_avl_pool_t *pool;
+
+ if ((pool = uu_avl_pool_create("config_pool",
+ sizeof (config_node_t),
+ offsetof(config_node_t, cn_avl),
+ config_node_compare, UU_DEFAULT)) == NULL)
+ no_memory();
+
+ if ((namespace_avl = uu_avl_create(pool, NULL,
+ UU_DEFAULT)) == NULL)
+ no_memory();
+ }
+
+ /*
+ * Issue the ZFS_IOC_POOL_CONFIGS ioctl.
+ * This can fail for one of two reasons:
+ *
+ * EEXIST The generation counts match, nothing to do.
+ * ENOMEM The zc_config_dst buffer isn't large enough to
+ * hold the config; zc_config_dst_size will have
+ * been modified to tell us how much to allocate.
+ */
+ zc.zc_config_dst_size = 1024;
+ zc.zc_config_dst = (uint64_t)(uintptr_t)
+ zfs_malloc(zc.zc_config_dst_size);
+ for (;;) {
+ zc.zc_cookie = namespace_generation;
+ if (ioctl(zfs_fd, ZFS_IOC_POOL_CONFIGS, &zc) != 0) {
+ switch (errno) {
+ case EEXIST:
+ /*
+ * The namespace hasn't changed.
+ */
+ free((void *)(uintptr_t)zc.zc_config_dst);
+ return;
+
+ case ENOMEM:
+ free((void *)(uintptr_t)zc.zc_config_dst);
+ zc.zc_config_dst = (uint64_t)(uintptr_t)
+ zfs_malloc(zc.zc_config_dst_size);
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+ } else {
+ namespace_generation = zc.zc_cookie;
+ break;
+ }
+ }
+
+ verify(nvlist_unpack((void *)(uintptr_t)zc.zc_config_dst,
+ zc.zc_config_dst_size, &config, 0) == 0);
+
+ free((void *)(uintptr_t)zc.zc_config_dst);
+
+ /*
+ * Clear out any existing configuration information.
+ */
+ if ((walk = uu_avl_walk_start(namespace_avl, UU_WALK_ROBUST)) == NULL)
+ no_memory();
+
+ while ((cn = uu_avl_walk_next(walk)) != NULL) {
+ uu_avl_remove(namespace_avl, cn);
+ nvlist_free(cn->cn_config);
+ free(cn->cn_name);
+ free(cn);
+ }
+
+ elem = NULL;
+ while ((elem = nvlist_next_nvpair(config, elem)) != NULL) {
+ nvlist_t *child;
+ uu_avl_index_t where;
+
+ cn = zfs_malloc(sizeof (config_node_t));
+ cn->cn_name = zfs_strdup(nvpair_name(elem));
+
+ verify(nvpair_value_nvlist(elem, &child) == 0);
+ verify(nvlist_dup(child, &cn->cn_config, 0) == 0);
+ verify(uu_avl_find(namespace_avl, cn, NULL, &where) == NULL);
+
+ uu_avl_insert(namespace_avl, cn, where);
+ }
+
+ nvlist_free(config);
+}
+
+/*
+ * Retrive the configuration for the given pool. The configuration is a nvlist
+ * describing the vdevs, as well as the statistics associated with each one.
+ */
+nvlist_t *
+zpool_get_config(zpool_handle_t *zhp)
+{
+ return (zhp->zpool_config);
+}
+
+/*
+ * Refresh the vdev statistics associated with the given pool. This is used in
+ * iostat to show configuration changes and determine the delta from the last
+ * time the function was called. This function can fail, in case the pool has
+ * been destroyed.
+ */
+int
+zpool_refresh_stats(zpool_handle_t *zhp, nvlist_t **oldconfig,
+ nvlist_t **newconfig)
+{
+ zfs_cmd_t zc = { 0 };
+ int error;
+
+ (void) strcpy(zc.zc_name, zhp->zpool_name);
+
+ if (zhp->zpool_config_size == 0)
+ zhp->zpool_config_size = 1 << 16;
+
+ zc.zc_config_dst_size = zhp->zpool_config_size;
+ zc.zc_config_dst = (uint64_t)(uintptr_t)
+ zfs_malloc(zc.zc_config_dst_size);
+
+ while ((error = ioctl(zfs_fd, ZFS_IOC_POOL_STATS, &zc)) != 0) {
+ error = errno;
+
+ if (error == ENXIO) {
+ /*
+ * We can't open one or more top-level vdevs,
+ * but we have the config.
+ */
+ break;
+ }
+
+ free((void *)(uintptr_t)zc.zc_config_dst);
+
+ if (error == ENOENT || error == EINVAL) {
+ /*
+ * There's no such pool (ENOENT)
+ * or the config is bogus (EINVAL).
+ */
+ return (error);
+ }
+
+ if (error != ENOMEM)
+ zfs_baderror(error);
+
+ zc.zc_config_dst =
+ (uint64_t)(uintptr_t)zfs_malloc(zc.zc_config_dst_size);
+ }
+
+ verify(nvlist_unpack((void *)(uintptr_t)zc.zc_config_dst,
+ zc.zc_config_dst_size, newconfig, 0) == 0);
+
+ zhp->zpool_config_size = zc.zc_config_dst_size;
+ free((void *)(uintptr_t)zc.zc_config_dst);
+
+ set_pool_health(*newconfig);
+
+ if (oldconfig != NULL)
+ *oldconfig = zhp->zpool_config;
+ else
+ nvlist_free(zhp->zpool_config);
+
+ zhp->zpool_config = *newconfig;
+
+ return (error);
+}
+
+/*
+ * Iterate over all pools in the system.
+ */
+int
+zpool_iter(zpool_iter_f func, void *data)
+{
+ config_node_t *cn;
+ zpool_handle_t *zhp;
+ int ret;
+
+ namespace_reload();
+
+ for (cn = uu_avl_first(namespace_avl); cn != NULL;
+ cn = uu_avl_next(namespace_avl, cn)) {
+
+ if ((zhp = zpool_open_silent(cn->cn_name)) == NULL)
+ continue;
+
+ if ((ret = func(zhp, data)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
+
+/*
+ * Iterate over root datasets, calling the given function for each. The zfs
+ * handle passed each time must be explicitly closed by the callback.
+ */
+int
+zfs_iter_root(zfs_iter_f func, void *data)
+{
+ config_node_t *cn;
+ zfs_handle_t *zhp;
+ int ret;
+
+ namespace_reload();
+
+ for (cn = uu_avl_first(namespace_avl); cn != NULL;
+ cn = uu_avl_next(namespace_avl, cn)) {
+
+ if ((zhp = make_dataset_handle(cn->cn_name)) == NULL)
+ continue;
+
+ if ((ret = func(zhp, data)) != 0)
+ return (ret);
+ }
+
+ return (0);
+}
diff --git a/usr/src/lib/libzfs/common/libzfs_dataset.c b/usr/src/lib/libzfs/common/libzfs_dataset.c
new file mode 100644
index 0000000000..5a4b1d92be
--- /dev/null
+++ b/usr/src/lib/libzfs/common/libzfs_dataset.c
@@ -0,0 +1,2939 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libdevinfo.h>
+#include <libintl.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <zone.h>
+#include <sys/mntent.h>
+#include <sys/mnttab.h>
+
+#include <sys/spa.h>
+#include <sys/zio.h>
+#include <libzfs.h>
+
+#include "zfs_namecheck.h"
+#include "zfs_prop.h"
+#include "libzfs_impl.h"
+
+/*
+ * Given a single type (not a mask of types), return the type in a human
+ * readable form.
+ */
+const char *
+zfs_type_to_name(zfs_type_t type)
+{
+ switch (type) {
+ case ZFS_TYPE_FILESYSTEM:
+ return (dgettext(TEXT_DOMAIN, "filesystem"));
+ case ZFS_TYPE_SNAPSHOT:
+ return (dgettext(TEXT_DOMAIN, "snapshot"));
+ case ZFS_TYPE_VOLUME:
+ return (dgettext(TEXT_DOMAIN, "volume"));
+ }
+
+ zfs_baderror(type);
+ return (NULL);
+}
+
+/*
+ * Given a path and mask of ZFS types, return a string describing this dataset.
+ * This is used when we fail to open a dataset and we cannot get an exact type.
+ * We guess what the type would have been based on the path and the mask of
+ * acceptable types.
+ */
+static const char *
+path_to_str(const char *path, int types)
+{
+ /*
+ * When given a single type, always report the exact type.
+ */
+ if (types == ZFS_TYPE_SNAPSHOT)
+ return (dgettext(TEXT_DOMAIN, "snapshot"));
+ if (types == ZFS_TYPE_FILESYSTEM)
+ return (dgettext(TEXT_DOMAIN, "filesystem"));
+ if (types == ZFS_TYPE_VOLUME)
+ return (dgettext(TEXT_DOMAIN, "volume"));
+
+ /*
+ * The user is requesting more than one type of dataset. If this is the
+ * case, consult the path itself. If we're looking for a snapshot, and
+ * a '@' is found, then report it as "snapshot". Otherwise, remove the
+ * snapshot attribute and try again.
+ */
+ if (types & ZFS_TYPE_SNAPSHOT) {
+ if (strchr(path, '@') != NULL)
+ return (dgettext(TEXT_DOMAIN, "snapshot"));
+ return (path_to_str(path, types & ~ZFS_TYPE_SNAPSHOT));
+ }
+
+
+ /*
+ * The user has requested either filesystems or volumes.
+ * We have no way of knowing a priori what type this would be, so always
+ * report it as "filesystem" or "volume", our two primitive types.
+ */
+ if (types & ZFS_TYPE_FILESYSTEM)
+ return (dgettext(TEXT_DOMAIN, "filesystem"));
+
+ assert(types & ZFS_TYPE_VOLUME);
+ return (dgettext(TEXT_DOMAIN, "volume"));
+}
+
+/*
+ * Validate a ZFS path. This is used even before trying to open the dataset, to
+ * provide a more meaningful error message. We place a more useful message in
+ * 'buf' detailing exactly why the name was not valid.
+ */
+static int
+zfs_validate_name(const char *path, int type, char *buf, size_t buflen)
+{
+ namecheck_err_t why;
+ char what;
+
+ if (dataset_namecheck(path, &why, &what) != 0) {
+ if (buf != NULL) {
+ switch (why) {
+ case NAME_ERR_LEADING_SLASH:
+ (void) strlcpy(buf, dgettext(TEXT_DOMAIN,
+ "leading slash"), buflen);
+ break;
+
+ case NAME_ERR_EMPTY_COMPONENT:
+ (void) strlcpy(buf, dgettext(TEXT_DOMAIN,
+ "empty component"), buflen);
+ break;
+
+ case NAME_ERR_TRAILING_SLASH:
+ (void) strlcpy(buf, dgettext(TEXT_DOMAIN,
+ "trailing slash"), buflen);
+ break;
+
+ case NAME_ERR_INVALCHAR:
+ (void) snprintf(buf, buflen,
+ dgettext(TEXT_DOMAIN, "invalid character "
+ "'%c'"), what);
+ break;
+
+ case NAME_ERR_MULTIPLE_AT:
+ (void) strlcpy(buf, dgettext(TEXT_DOMAIN,
+ "multiple '@' delimiters"), buflen);
+ break;
+ }
+ }
+
+ return (0);
+ }
+
+ if (!(type & ZFS_TYPE_SNAPSHOT) && strchr(path, '@') != NULL) {
+ if (buf != NULL)
+ (void) strlcpy(buf,
+ dgettext(TEXT_DOMAIN,
+ "snapshot delimiter '@'"), buflen);
+ return (0);
+ }
+
+ return (1);
+}
+
+int
+zfs_name_valid(const char *name, zfs_type_t type)
+{
+ return (zfs_validate_name(name, type, NULL, NULL));
+}
+
+/*
+ * Utility function to gather stats (objset and zpl) for the given object.
+ */
+static int
+get_stats(zfs_handle_t *zhp)
+{
+ zfs_cmd_t zc = { 0 };
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+ /*
+ * get the generic DMU stats and per-type (zfs, zvol) stats
+ */
+ if (ioctl(zfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0)
+ return (-1);
+
+ bcopy(&zc.zc_objset_stats, &zhp->zfs_dmustats,
+ sizeof (zc.zc_objset_stats));
+
+ bcopy(&zc.zc_zfs_stats, &zhp->zfs_zplstats, sizeof (zc.zc_zfs_stats));
+
+ zhp->zfs_volsize = zc.zc_volsize;
+ zhp->zfs_volblocksize = zc.zc_volblocksize;
+
+ return (0);
+}
+
+/*
+ * Refresh the properties currently stored in the handle.
+ */
+void
+zfs_refresh_properties(zfs_handle_t *zhp)
+{
+ (void) get_stats(zhp);
+}
+
+/*
+ * Makes a handle from the given dataset name. Used by zfs_open() and
+ * zfs_iter_* to create child handles on the fly.
+ */
+zfs_handle_t *
+make_dataset_handle(const char *path)
+{
+ zfs_handle_t *zhp = zfs_malloc(sizeof (zfs_handle_t));
+
+ (void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
+
+ if (get_stats(zhp) != 0) {
+ free(zhp);
+ return (NULL);
+ }
+
+ /*
+ * We've managed to open the dataset and gather statistics. Determine
+ * the high-level type.
+ */
+ if (zhp->zfs_dmustats.dds_is_snapshot)
+ zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
+ else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
+ zhp->zfs_type = ZFS_TYPE_VOLUME;
+ else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
+ zhp->zfs_type = ZFS_TYPE_FILESYSTEM;
+ else
+ /* we should never see any other dataset types */
+ zfs_baderror(zhp->zfs_dmustats.dds_type);
+
+ return (zhp);
+}
+
+/*
+ * Opens the given snapshot, filesystem, or volume. The 'types'
+ * argument is a mask of acceptable types. The function will print an
+ * appropriate error message and return NULL if it can't be opened.
+ */
+zfs_handle_t *
+zfs_open(const char *path, int types)
+{
+ zfs_handle_t *zhp;
+
+ /*
+ * If the path is longer than the maximum dataset length, treat it as
+ * ENOENT because we know there can't be any dataset with that path.
+ */
+ if (strlen(path) >= ZFS_MAXNAMELEN) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot open '%s': no such %s"), path,
+ path_to_str(path, types));
+ return (NULL);
+ }
+
+ /*
+ * Validate the name before we even try to open it. We don't care about
+ * the verbose invalid messages here; just report a generic error.
+ */
+ if (!zfs_validate_name(path, types, NULL, 0)) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot open '%s': invalid %s name"), path,
+ path_to_str(path, types));
+ return (NULL);
+ }
+
+ /*
+ * Try to get stats for the dataset, which will tell us if it exists.
+ */
+ errno = 0;
+ if ((zhp = make_dataset_handle(path)) == NULL) {
+ switch (errno) {
+ case ENOENT:
+ /*
+ * The dataset doesn't exist.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot open '%s': no such %s"), path,
+ path_to_str(path, types));
+ break;
+
+ case EBUSY:
+ /*
+ * We were able to open the dataset but couldn't
+ * get the stats.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot open '%s': %s is busy"), path,
+ path_to_str(path, types));
+ break;
+
+ default:
+ zfs_baderror(errno);
+
+ }
+ return (NULL);
+ }
+
+ if (!(types & zhp->zfs_type)) {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': operation "
+ "not supported for %ss"), path,
+ zfs_type_to_name(zhp->zfs_type));
+ free(zhp);
+ return (NULL);
+ }
+
+ return (zhp);
+}
+
+/*
+ * Release a ZFS handle. Nothing to do but free the associated memory.
+ */
+void
+zfs_close(zfs_handle_t *zhp)
+{
+ if (zhp->zfs_mntopts)
+ free(zhp->zfs_mntopts);
+ free(zhp);
+}
+
+struct {
+ const char *name;
+ uint64_t value;
+} checksum_table[] = {
+ { "on", ZIO_CHECKSUM_ON },
+ { "off", ZIO_CHECKSUM_OFF },
+ { "fletcher2", ZIO_CHECKSUM_FLETCHER_2 },
+ { "fletcher4", ZIO_CHECKSUM_FLETCHER_4 },
+ { "sha256", ZIO_CHECKSUM_SHA256 },
+ { NULL }
+};
+
+struct {
+ const char *name;
+ uint64_t value;
+} compress_table[] = {
+ { "on", ZIO_COMPRESS_ON },
+ { "off", ZIO_COMPRESS_OFF },
+ { "lzjb", ZIO_COMPRESS_LZJB },
+ { NULL }
+};
+
+struct {
+ const char *name;
+ uint64_t value;
+} snapdir_table[] = {
+ { "hidden", HIDDEN },
+ { "visible", VISIBLE },
+ { NULL }
+};
+
+struct {
+ const char *name;
+ uint64_t value;
+} acl_mode_table[] = {
+ { "discard", DISCARD },
+ { "groupmask", GROUPMASK },
+ { "passthrough", PASSTHROUGH },
+ { NULL }
+};
+
+struct {
+ const char *name;
+ uint64_t value;
+} acl_inherit_table[] = {
+ { "discard", DISCARD },
+ { "noallow", NOALLOW },
+ { "secure", SECURE },
+ { "passthrough", PASSTHROUGH },
+ { NULL }
+};
+
+
+/*
+ * Given a numeric suffix, convert the value into a number of bits that the
+ * resulting value must be shifted.
+ */
+static int
+str2shift(const char *buf, char *reason, size_t len)
+{
+ const char *ends = "BKMGTPEZ";
+ int i;
+
+ if (buf[0] == '\0')
+ return (0);
+ for (i = 0; i < strlen(ends); i++) {
+ if (toupper(buf[0]) == ends[i])
+ break;
+ }
+ if (i == strlen(ends)) {
+ (void) snprintf(reason, len, dgettext(TEXT_DOMAIN, "invalid "
+ "numeric suffix '%s'"), buf);
+ return (-1);
+ }
+
+ /*
+ * We want to allow trailing 'b' characters for 'GB' or 'Mb'. But don't
+ * allow 'BB' - that's just weird.
+ */
+ if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0' &&
+ toupper(buf[0]) != 'B')) {
+ return (10*i);
+ }
+
+ (void) snprintf(reason, len, dgettext(TEXT_DOMAIN, "invalid numeric "
+ "suffix '%s'"), buf);
+ return (-1);
+}
+
+/*
+ * Convert a string of the form '100G' into a real number. Used when setting
+ * properties or creating a volume. 'buf' is used to place an extended error
+ * message for the caller to use.
+ */
+static int
+nicestrtonum(const char *value, uint64_t *num, char *buf, size_t buflen)
+{
+ char *end;
+ int shift;
+
+ *num = 0;
+
+ /* Check to see if this looks like a number. */
+ if ((value[0] < '0' || value[0] > '9') && value[0] != '.') {
+ (void) strlcpy(buf, dgettext(TEXT_DOMAIN,
+ "must be a numeric value"), buflen);
+ return (-1);
+ }
+
+ /* Rely on stroll() to process the numeric portion. */
+ errno = 0;
+ *num = strtoll(value, &end, 10);
+
+ /*
+ * Check for ERANGE, which indicates that the value is too large to fit
+ * in a 64-bit value.
+ */
+ if (errno == ERANGE) {
+ (void) strlcpy(buf, dgettext(TEXT_DOMAIN,
+ "value is too large"), buflen);
+ return (-1);
+ }
+
+ /*
+ * If we have a decimal value, then do the computation with floating
+ * point arithmetic. Otherwise, use standard arithmetic.
+ */
+ if (*end == '.') {
+ double fval = strtod(value, &end);
+
+ if ((shift = str2shift(end, buf, buflen)) == -1)
+ return (-1);
+
+ fval *= pow(2, shift);
+
+ if (fval > UINT64_MAX) {
+ (void) strlcpy(buf, dgettext(TEXT_DOMAIN,
+ "value is too large"), buflen);
+ return (-1);
+ }
+
+ *num = (uint64_t)fval;
+ } else {
+ if ((shift = str2shift(end, buf, buflen)) == -1)
+ return (-1);
+
+ /* Check for overflow */
+ if (shift >= 64 || (*num << shift) >> shift != *num) {
+ (void) strlcpy(buf, dgettext(TEXT_DOMAIN,
+ "value is too large"), buflen);
+ return (-1);
+ }
+
+ *num <<= shift;
+ }
+
+ return (0);
+}
+
+int
+zfs_nicestrtonum(const char *str, uint64_t *val)
+{
+ char buf[1];
+
+ return (nicestrtonum(str, val, buf, sizeof (buf)));
+}
+
+/*
+ * Given a property type and value, verify that the value is appropriate. Used
+ * by zfs_prop_set() and some libzfs consumers.
+ */
+int
+zfs_prop_validate(zfs_prop_t prop, const char *value, uint64_t *intval)
+{
+ const char *propname = zfs_prop_to_name(prop);
+ uint64_t number;
+ char reason[64];
+ int i;
+
+ /*
+ * Check to see if this a read-only property.
+ */
+ if (zfs_prop_readonly(prop)) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot set %s property: read-only property"), propname);
+ return (-1);
+ }
+
+ /* See if the property value is too long */
+ if (strlen(value) >= ZFS_MAXPROPLEN) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "bad %s value '%s': value is too long"), propname,
+ value);
+ return (-1);
+ }
+
+ /* Perform basic checking based on property type */
+ switch (zfs_prop_get_type(prop)) {
+ case prop_type_boolean:
+ if (strcmp(value, "on") == 0) {
+ number = 1;
+ } else if (strcmp(value, "off") == 0) {
+ number = 0;
+ } else {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "bad %s value '%s': must be 'on' or 'off'"),
+ propname, value);
+ return (-1);
+ }
+ break;
+
+ case prop_type_number:
+ /* treat 'none' as 0 */
+ if (strcmp(value, "none") == 0) {
+ number = 0;
+ break;
+ }
+
+ if (nicestrtonum(value, &number, reason,
+ sizeof (reason)) != 0) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "bad %s value '%s': %s"), propname, value,
+ reason);
+ return (-1);
+ }
+
+ /* don't allow 0 for quota, use 'none' instead */
+ if (prop == ZFS_PROP_QUOTA && number == 0 &&
+ strcmp(value, "none") != 0) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "bad %s value '%s': use '%s=none' to disable"),
+ propname, value, propname);
+ return (-1);
+ }
+
+ /* must be power of two within SPA_{MIN,MAX}BLOCKSIZE */
+ if (prop == ZFS_PROP_RECORDSIZE ||
+ prop == ZFS_PROP_VOLBLOCKSIZE) {
+ if (number < SPA_MINBLOCKSIZE ||
+ number > SPA_MAXBLOCKSIZE || !ISP2(number)) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "bad %s value '%s': "
+ "must be power of 2 from %u to %uk"),
+ propname, value,
+ (uint_t)SPA_MINBLOCKSIZE,
+ (uint_t)SPA_MAXBLOCKSIZE >> 10);
+ return (-1);
+ }
+ }
+
+ break;
+
+ case prop_type_string:
+ case prop_type_index:
+ /*
+ * The two writable string values, 'mountpoint' and
+ * 'checksum' need special consideration. The 'index' types are
+ * specified as strings by the user, but passed to the kernel as
+ * integers.
+ */
+ switch (prop) {
+ case ZFS_PROP_MOUNTPOINT:
+ if (strcmp(value, ZFS_MOUNTPOINT_NONE) == 0 ||
+ strcmp(value, ZFS_MOUNTPOINT_LEGACY) == 0)
+ break;
+
+ if (value[0] != '/') {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "bad %s value '%s': must be an absolute "
+ "path, 'none', or 'legacy'"),
+ propname, value);
+ return (-1);
+ }
+ break;
+
+ case ZFS_PROP_CHECKSUM:
+ for (i = 0; checksum_table[i].name != NULL; i++) {
+ if (strcmp(value, checksum_table[i].name)
+ == 0) {
+ number = checksum_table[i].value;
+ break;
+ }
+ }
+
+ if (checksum_table[i].name == NULL) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "bad %s value '%s': must be 'on', 'off', "
+ "'fletcher2', 'fletcher4', or 'sha256'"),
+ propname, value);
+ return (-1);
+ }
+ break;
+
+ case ZFS_PROP_COMPRESSION:
+ for (i = 0; compress_table[i].name != NULL; i++) {
+ if (strcmp(value, compress_table[i].name)
+ == 0) {
+ number = compress_table[i].value;
+ break;
+ }
+ }
+
+ if (compress_table[i].name == NULL) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "bad %s value '%s': must be 'on', 'off', "
+ "or 'lzjb'"),
+ propname, value);
+ return (-1);
+ }
+ break;
+
+ case ZFS_PROP_SNAPDIR:
+ for (i = 0; snapdir_table[i].name != NULL; i++) {
+ if (strcmp(value, snapdir_table[i].name) == 0) {
+ number = snapdir_table[i].value;
+ break;
+ }
+ }
+
+ if (snapdir_table[i].name == NULL) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "bad %s value '%s': must be 'hidden' "
+ "or 'visible'"),
+ propname, value);
+ return (-1);
+ }
+ break;
+
+ case ZFS_PROP_ACLMODE:
+ for (i = 0; acl_mode_table[i].name != NULL; i++) {
+ if (strcmp(value, acl_mode_table[i].name)
+ == 0) {
+ number = acl_mode_table[i].value;
+ break;
+ }
+ }
+
+ if (acl_mode_table[i].name == NULL) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "bad %s value '%s': must be 'discard', "
+ "'groupmask' or 'passthrough'"),
+ propname, value);
+ return (-1);
+ }
+ break;
+
+ case ZFS_PROP_ACLINHERIT:
+ for (i = 0; acl_inherit_table[i].name != NULL; i++) {
+ if (strcmp(value, acl_inherit_table[i].name)
+ == 0) {
+ number = acl_inherit_table[i].value;
+ break;
+ }
+ }
+
+ if (acl_inherit_table[i].name == NULL) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "bad %s value '%s': must be 'discard', "
+ "'noallow', 'groupmask' or 'passthrough'"),
+ propname, value);
+ return (-1);
+ }
+ break;
+
+ case ZFS_PROP_SHARENFS:
+ /*
+ * Nothing to do for 'sharenfs', this gets passed on to
+ * share(1M) verbatim.
+ */
+ break;
+ }
+ }
+
+ if (intval != NULL)
+ *intval = number;
+
+ return (0);
+}
+
+/*
+ * Given a property name and value, set the property for the given dataset.
+ */
+int
+zfs_prop_set(zfs_handle_t *zhp, zfs_prop_t prop, const char *propval)
+{
+ const char *propname = zfs_prop_to_name(prop);
+ uint64_t number;
+ zfs_cmd_t zc = { 0 };
+ int ret;
+ prop_changelist_t *cl;
+
+ if (zfs_prop_validate(prop, propval, &number) != 0)
+ return (-1);
+
+ /*
+ * Check to see if the value applies to this type
+ */
+ if (!zfs_prop_valid_for_type(prop, zhp->zfs_type)) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot set %s for '%s': property does not apply to %ss"),
+ propname, zhp->zfs_name, zfs_type_to_name(zhp->zfs_type));
+ return (-1);
+ }
+
+ /*
+ * For the mountpoint and sharenfs properties, check if it can be set
+ * in a global/non-global zone based on the zoned property value:
+ *
+ * global zone non-global zone
+ * -----------------------------------------------------
+ * zoned=on mountpoint (no) mountpoint (yes)
+ * sharenfs (no) sharenfs (no)
+ *
+ * zoned=off mountpoint (yes) N/A
+ * sharenfs (yes)
+ */
+ if (prop == ZFS_PROP_MOUNTPOINT || prop == ZFS_PROP_SHARENFS) {
+ if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
+ if (getzoneid() == GLOBAL_ZONEID) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot set %s for '%s', "
+ "dataset is used in a non-global zone"),
+ propname, zhp->zfs_name);
+ return (-1);
+ } else if (prop == ZFS_PROP_SHARENFS) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot set %s for '%s', filesystems "
+ "cannot be shared in a non-global zone"),
+ propname, zhp->zfs_name);
+ return (-1);
+ }
+ } else if (getzoneid() != GLOBAL_ZONEID) {
+ /*
+ * If zoned property is 'off', this must be in
+ * a globle zone. If not, something is wrong.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot set %s for '%s', dataset is "
+ "used in a non-global zone, but 'zoned' "
+ "property is not set"),
+ propname, zhp->zfs_name);
+ return (-1);
+ }
+ }
+
+ if ((cl = changelist_gather(zhp, prop, 0)) == NULL)
+ return (-1);
+
+ if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot set %s for '%s', "
+ "child dataset with inherited mountpoint is used "
+ "in a non-global zone"),
+ propname, zhp->zfs_name);
+ ret = -1;
+ goto error;
+ }
+
+ if ((ret = changelist_prefix(cl)) != 0)
+ goto error;
+
+ /*
+ * Execute the corresponding ioctl() to set this property.
+ */
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+ switch (prop) {
+ case ZFS_PROP_QUOTA:
+ zc.zc_cookie = number;
+ ret = ioctl(zfs_fd, ZFS_IOC_SET_QUOTA, &zc);
+ break;
+ case ZFS_PROP_RESERVATION:
+ zc.zc_cookie = number;
+ ret = ioctl(zfs_fd, ZFS_IOC_SET_RESERVATION, &zc);
+ break;
+ case ZFS_PROP_MOUNTPOINT:
+ case ZFS_PROP_SHARENFS:
+ /*
+ * These properties are passed down as real strings.
+ */
+ (void) strlcpy(zc.zc_prop_name, propname,
+ sizeof (zc.zc_prop_name));
+ (void) strlcpy(zc.zc_prop_value, propval,
+ sizeof (zc.zc_prop_value));
+ zc.zc_intsz = 1;
+ zc.zc_numints = strlen(propval) + 1;
+ ret = ioctl(zfs_fd, ZFS_IOC_SET_PROP, &zc);
+ break;
+ case ZFS_PROP_VOLSIZE:
+ zc.zc_volsize = number;
+ ret = ioctl(zfs_fd, ZFS_IOC_SET_VOLSIZE, &zc);
+ break;
+ case ZFS_PROP_VOLBLOCKSIZE:
+ zc.zc_volblocksize = number;
+ ret = ioctl(zfs_fd, ZFS_IOC_SET_VOLBLOCKSIZE, &zc);
+ break;
+ default:
+ (void) strlcpy(zc.zc_prop_name, propname,
+ sizeof (zc.zc_prop_name));
+ /* LINTED - alignment */
+ *(uint64_t *)zc.zc_prop_value = number;
+ zc.zc_intsz = 8;
+ zc.zc_numints = 1;
+ ret = ioctl(zfs_fd, ZFS_IOC_SET_PROP, &zc);
+ break;
+ }
+
+ if (ret != 0) {
+ switch (errno) {
+
+ case EPERM:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot set %s for '%s': permission "
+ "denied"), propname, zhp->zfs_name);
+ break;
+
+ case ENOENT:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot open '%s': no such %s"), zhp->zfs_name,
+ zfs_type_to_name(zhp->zfs_type));
+ break;
+
+ case ENOSPC:
+ /*
+ * For quotas and reservations, ENOSPC indicates
+ * something different; setting a quota or reservation
+ * doesn't use any disk space.
+ */
+ switch (prop) {
+ case ZFS_PROP_QUOTA:
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot set %s "
+ "for '%s': size is less than current "
+ "used or reserved space"), propname,
+ zhp->zfs_name);
+ break;
+
+ case ZFS_PROP_RESERVATION:
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot set %s "
+ "for '%s': size is greater than available "
+ "space"), propname, zhp->zfs_name);
+ break;
+
+ default:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot set %s for '%s': out of space"),
+ propname, zhp->zfs_name);
+ break;
+ }
+ break;
+
+ case EBUSY:
+ if (prop == ZFS_PROP_VOLBLOCKSIZE) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot set %s for '%s': "
+ "volume already contains data"),
+ propname, zhp->zfs_name);
+ } else {
+ zfs_baderror(errno);
+ }
+ break;
+
+ case EOVERFLOW:
+ /*
+ * This platform can't address a volume this big.
+ */
+#ifdef _ILP32
+ if (prop == ZFS_PROP_VOLSIZE) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot set %s for '%s': "
+ "max volume size is 1TB on 32-bit systems"),
+ propname, zhp->zfs_name);
+ break;
+ }
+#endif
+ zfs_baderror(errno);
+ default:
+ zfs_baderror(errno);
+ }
+ } else {
+ /*
+ * Refresh the statistics so the new property value
+ * is reflected.
+ */
+ if ((ret = changelist_postfix(cl)) != 0)
+ goto error;
+
+ (void) get_stats(zhp);
+ }
+
+error:
+ changelist_free(cl);
+ return (ret);
+}
+
+/*
+ * Given a property, inherit the value from the parent dataset.
+ */
+int
+zfs_prop_inherit(zfs_handle_t *zhp, zfs_prop_t prop)
+{
+ const char *propname = zfs_prop_to_name(prop);
+ zfs_cmd_t zc = { 0 };
+ int ret;
+ prop_changelist_t *cl;
+
+ /*
+ * Verify that this property is inheritable.
+ */
+ if (zfs_prop_readonly(prop)) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot inherit %s for '%s': property is read-only"),
+ propname, zhp->zfs_name);
+ return (-1);
+ }
+
+ if (!zfs_prop_inheritable(prop)) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot inherit %s for '%s': property is not inheritable"),
+ propname, zhp->zfs_name);
+ return (-1);
+ }
+
+ /*
+ * Check to see if the value applies to this type
+ */
+ if (!zfs_prop_valid_for_type(prop, zhp->zfs_type)) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot inherit %s for '%s': property does "
+ "not apply to %ss"), propname, zhp->zfs_name,
+ zfs_type_to_name(zhp->zfs_type));
+ return (-1);
+ }
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ (void) strlcpy(zc.zc_prop_name, propname, sizeof (zc.zc_prop_name));
+
+ if (prop == ZFS_PROP_MOUNTPOINT && getzoneid() == GLOBAL_ZONEID &&
+ zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot inherit %s for '%s', "
+ "dataset is used in a non-global zone"), propname,
+ zhp->zfs_name);
+ return (-1);
+ }
+
+ /*
+ * Determine datasets which will be affected by this change, if any.
+ */
+ if ((cl = changelist_gather(zhp, prop, 0)) == NULL)
+ return (-1);
+
+ if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot inherit %s for '%s', "
+ "child dataset with inherited mountpoint is "
+ "used in a non-global zone"),
+ propname, zhp->zfs_name);
+ ret = -1;
+ goto error;
+ }
+
+ if ((ret = changelist_prefix(cl)) != 0)
+ goto error;
+
+ zc.zc_numints = 0;
+
+ if ((ret = ioctl(zfs_fd, ZFS_IOC_SET_PROP, &zc)) != 0) {
+ switch (errno) {
+ case EPERM:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot inherit %s for '%s': permission "
+ "denied"), propname, zhp->zfs_name);
+ break;
+ case ENOENT:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot open '%s': no such %s"), zhp->zfs_name,
+ zfs_type_to_name(zhp->zfs_type));
+ break;
+ case ENOSPC:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot inherit %s for '%s': "
+ "out of space"), propname, zhp->zfs_name);
+ break;
+ default:
+ zfs_baderror(errno);
+ }
+
+ } else {
+
+ if ((ret = changelist_postfix(cl)) != 0)
+ goto error;
+
+ /*
+ * Refresh the statistics so the new property is reflected.
+ */
+ (void) get_stats(zhp);
+ }
+
+
+error:
+ changelist_free(cl);
+ return (ret);
+}
+
+static void
+nicebool(int value, char *buf, size_t buflen)
+{
+ if (value)
+ (void) strlcpy(buf, "on", buflen);
+ else
+ (void) strlcpy(buf, "off", buflen);
+}
+
+/*
+ * Internal function for getting a numeric property. Both zfs_prop_get() and
+ * zfs_prop_get_int() are built using this interface.
+ *
+ * Certain properties can be overridden using 'mount -o'. In this case, scan
+ * the contents of the /etc/mnttab entry, searching for the appropriate options.
+ * If they differ from the on-disk values, report the current values and mark
+ * the source "temporary".
+ */
+static uint64_t
+get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zfs_source_t *src,
+ char **source)
+{
+ uint64_t val;
+ struct mnttab mnt;
+
+ *source = NULL;
+
+ if (zhp->zfs_mntopts == NULL)
+ mnt.mnt_mntopts = "";
+ else
+ mnt.mnt_mntopts = zhp->zfs_mntopts;
+
+ switch (prop) {
+ case ZFS_PROP_ATIME:
+ *source = zhp->zfs_zplstats.zs_atime_setpoint;
+ val = zhp->zfs_zplstats.zs_devices;
+
+ if (hasmntopt(&mnt, MNTOPT_ATIME) && !val) {
+ val = TRUE;
+ if (src)
+ *src = ZFS_SRC_TEMPORARY;
+ } else if (hasmntopt(&mnt, MNTOPT_NOATIME) && val) {
+ val = FALSE;
+ if (src)
+ *src = ZFS_SRC_TEMPORARY;
+ }
+ return (zhp->zfs_zplstats.zs_atime);
+
+ case ZFS_PROP_AVAILABLE:
+ return (zhp->zfs_dmustats.dds_available);
+
+ case ZFS_PROP_DEVICES:
+ *source = zhp->zfs_zplstats.zs_devices_setpoint;
+ val = zhp->zfs_zplstats.zs_devices;
+
+ if (hasmntopt(&mnt, MNTOPT_DEVICES) && !val) {
+ val = TRUE;
+ if (src)
+ *src = ZFS_SRC_TEMPORARY;
+ } else if (hasmntopt(&mnt, MNTOPT_NODEVICES) && val) {
+ val = FALSE;
+ if (src)
+ *src = ZFS_SRC_TEMPORARY;
+ }
+ return (val);
+
+ case ZFS_PROP_EXEC:
+ *source = zhp->zfs_zplstats.zs_exec_setpoint;
+ val = zhp->zfs_zplstats.zs_exec;
+
+ if (hasmntopt(&mnt, MNTOPT_EXEC) && !val) {
+ val = TRUE;
+ if (src)
+ *src = ZFS_SRC_TEMPORARY;
+ } else if (hasmntopt(&mnt, MNTOPT_NOEXEC) && val) {
+ val = FALSE;
+ if (src)
+ *src = ZFS_SRC_TEMPORARY;
+ }
+ return (val);
+
+ case ZFS_PROP_RECORDSIZE:
+ *source = zhp->zfs_zplstats.zs_recordsize_setpoint;
+ return (zhp->zfs_zplstats.zs_recordsize);
+
+ case ZFS_PROP_COMPRESSION:
+ *source = zhp->zfs_dmustats.dds_compression_setpoint;
+ return (zhp->zfs_dmustats.dds_compression);
+
+ case ZFS_PROP_READONLY:
+ *source = zhp->zfs_zplstats.zs_readonly_setpoint;
+ val = zhp->zfs_zplstats.zs_readonly;
+
+ if (hasmntopt(&mnt, MNTOPT_RO) && !val) {
+ val = TRUE;
+ if (src)
+ *src = ZFS_SRC_TEMPORARY;
+ } else if (hasmntopt(&mnt, MNTOPT_RW) && val) {
+ val = FALSE;
+ if (src)
+ *src = ZFS_SRC_TEMPORARY;
+ }
+ return (val);
+
+ case ZFS_PROP_QUOTA:
+ if (zhp->zfs_dmustats.dds_quota == 0)
+ *source = ""; /* default */
+ else
+ *source = zhp->zfs_name;
+ return (zhp->zfs_dmustats.dds_quota);
+
+ case ZFS_PROP_RESERVATION:
+ if (zhp->zfs_dmustats.dds_reserved == 0)
+ *source = ""; /* default */
+ else
+ *source = zhp->zfs_name;
+ return (zhp->zfs_dmustats.dds_reserved);
+
+ case ZFS_PROP_COMPRESSRATIO:
+ /*
+ * Using physical space and logical space, calculate the
+ * compression ratio. We return the number as a multiple of
+ * 100, so '2.5x' would be returned as 250.
+ */
+ if (zhp->zfs_dmustats.dds_compressed_bytes == 0)
+ return (100ULL);
+ else
+ return (zhp->zfs_dmustats.dds_uncompressed_bytes * 100 /
+ zhp->zfs_dmustats.dds_compressed_bytes);
+
+ case ZFS_PROP_REFERENCED:
+ /*
+ * 'referenced' refers to the amount of physical space
+ * referenced (possibly shared) by this object.
+ */
+ return (zhp->zfs_dmustats.dds_space_refd);
+
+ case ZFS_PROP_SETUID:
+ *source = zhp->zfs_zplstats.zs_setuid_setpoint;
+ val = zhp->zfs_zplstats.zs_setuid;
+
+ if (hasmntopt(&mnt, MNTOPT_SETUID) && !val) {
+ val = TRUE;
+ if (src)
+ *src = ZFS_SRC_TEMPORARY;
+ } else if (hasmntopt(&mnt, MNTOPT_NOSETUID) && val) {
+ val = FALSE;
+ if (src)
+ *src = ZFS_SRC_TEMPORARY;
+ }
+ return (val);
+
+ case ZFS_PROP_VOLSIZE:
+ return (zhp->zfs_volsize);
+
+ case ZFS_PROP_VOLBLOCKSIZE:
+ return (zhp->zfs_volblocksize);
+
+ case ZFS_PROP_ZONED:
+ *source = zhp->zfs_dmustats.dds_zoned_setpoint;
+ return (zhp->zfs_dmustats.dds_zoned);
+
+ case ZFS_PROP_USED:
+ return (zhp->zfs_dmustats.dds_space_used);
+
+ case ZFS_PROP_CREATETXG:
+ return (zhp->zfs_dmustats.dds_creation_txg);
+
+ case ZFS_PROP_MOUNTED:
+ /*
+ * Unlike other properties, we defer calculation of 'MOUNTED'
+ * until actually requested. This is because the getmntany()
+ * call can be extremely expensive on systems with a large
+ * number of filesystems, and the property isn't needed in
+ * normal use cases.
+ */
+ if (zhp->zfs_mntopts == NULL) {
+ struct mnttab search = { 0 }, entry;
+
+ search.mnt_special = (char *)zhp->zfs_name;
+ rewind(mnttab_file);
+
+ if (getmntany(mnttab_file, &entry, &search) == 0)
+ zhp->zfs_mntopts =
+ zfs_strdup(entry.mnt_mntopts);
+ }
+ return (zhp->zfs_mntopts != NULL);
+
+ default:
+ zfs_baderror(EINVAL);
+ }
+
+ return (0);
+}
+
+/*
+ * Calculate the source type, given the raw source string.
+ */
+static void
+get_source(zfs_handle_t *zhp, zfs_source_t *srctype, char *source,
+ char *statbuf, size_t statlen)
+{
+ if (statbuf == NULL || *srctype == ZFS_SRC_TEMPORARY)
+ return;
+
+ if (source == NULL) {
+ *srctype = ZFS_SRC_NONE;
+ } else if (source[0] == '\0') {
+ *srctype = ZFS_SRC_DEFAULT;
+ } else {
+ if (strcmp(source, zhp->zfs_name) == 0) {
+ *srctype = ZFS_SRC_LOCAL;
+ } else {
+ (void) strlcpy(statbuf, source, statlen);
+ *srctype = ZFS_SRC_INHERITED;
+ }
+ }
+
+}
+
+/*
+ * Retrieve a property from the given object. If 'literal' is specified, then
+ * numbers are left as exact values. Otherwise, numbers are converted to a
+ * human-readable form.
+ *
+ * Returns 0 on success, or -1 on error.
+ */
+int
+zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
+ zfs_source_t *src, char *statbuf, size_t statlen, int literal)
+{
+ char *source = NULL;
+ uint64_t val;
+ char *str;
+ int i;
+ const char *root;
+
+ /*
+ * Check to see if this property applies to our object
+ */
+ if (!zfs_prop_valid_for_type(prop, zhp->zfs_type))
+ return (-1);
+
+ if (src)
+ *src = ZFS_SRC_NONE;
+
+ switch (prop) {
+ case ZFS_PROP_ATIME:
+ case ZFS_PROP_READONLY:
+ case ZFS_PROP_SETUID:
+ case ZFS_PROP_ZONED:
+ case ZFS_PROP_DEVICES:
+ case ZFS_PROP_EXEC:
+ /*
+ * Basic boolean values are built on top of
+ * get_numeric_property().
+ */
+ nicebool(get_numeric_property(zhp, prop, src, &source),
+ propbuf, proplen);
+
+ break;
+
+ case ZFS_PROP_AVAILABLE:
+ case ZFS_PROP_RECORDSIZE:
+ case ZFS_PROP_CREATETXG:
+ case ZFS_PROP_REFERENCED:
+ case ZFS_PROP_USED:
+ case ZFS_PROP_VOLSIZE:
+ case ZFS_PROP_VOLBLOCKSIZE:
+ /*
+ * Basic numeric values are built on top of
+ * get_numeric_property().
+ */
+ val = get_numeric_property(zhp, prop, src, &source);
+ if (literal)
+ (void) snprintf(propbuf, proplen, "%llu", val);
+ else
+ zfs_nicenum(val, propbuf, proplen);
+ break;
+
+ case ZFS_PROP_COMPRESSION:
+ for (i = 0; compress_table[i].name != NULL; i++) {
+ if (compress_table[i].value ==
+ zhp->zfs_dmustats.dds_compression)
+ break;
+ }
+ assert(compress_table[i].name != NULL);
+ (void) strlcpy(propbuf, compress_table[i].name, proplen);
+ source = zhp->zfs_dmustats.dds_compression_setpoint;
+ break;
+
+ case ZFS_PROP_CHECKSUM:
+ for (i = 0; checksum_table[i].name != NULL; i++) {
+ if (checksum_table[i].value ==
+ zhp->zfs_dmustats.dds_checksum)
+ break;
+ }
+ assert(checksum_table[i].name != NULL);
+ (void) strlcpy(propbuf, checksum_table[i].name, proplen);
+ source = zhp->zfs_dmustats.dds_checksum_setpoint;
+ break;
+
+ case ZFS_PROP_SNAPDIR:
+ for (i = 0; snapdir_table[i].name != NULL; i++) {
+ if (snapdir_table[i].value ==
+ zhp->zfs_zplstats.zs_snapdir)
+ break;
+ }
+ assert(snapdir_table[i].name != NULL);
+ (void) strlcpy(propbuf, snapdir_table[i].name, proplen);
+ source = zhp->zfs_zplstats.zs_snapdir_setpoint;
+ break;
+
+ case ZFS_PROP_ACLMODE:
+ for (i = 0; acl_mode_table[i].name != NULL; i++) {
+ if (acl_mode_table[i].value ==
+ zhp->zfs_zplstats.zs_acl_mode)
+ break;
+ }
+ assert(acl_mode_table[i].name != NULL);
+ (void) strlcpy(propbuf, acl_mode_table[i].name, proplen);
+ source = zhp->zfs_zplstats.zs_acl_mode_setpoint;
+ break;
+
+ case ZFS_PROP_ACLINHERIT:
+ for (i = 0; acl_inherit_table[i].name != NULL; i++) {
+ if (acl_inherit_table[i].value ==
+ zhp->zfs_zplstats.zs_acl_inherit)
+ break;
+ }
+ assert(acl_inherit_table[i].name != NULL);
+ (void) strlcpy(propbuf, acl_inherit_table[i].name, proplen);
+ source = zhp->zfs_zplstats.zs_acl_inherit_setpoint;
+ break;
+
+ case ZFS_PROP_CREATION:
+ /*
+ * 'creation' is a time_t stored in the statistics. We convert
+ * this into a string unless 'literal' is specified.
+ */
+ {
+ time_t time = (time_t)
+ zhp->zfs_dmustats.dds_creation_time;
+ struct tm t;
+
+ if (literal ||
+ localtime_r(&time, &t) == NULL ||
+ strftime(propbuf, proplen, "%a %b %e %k:%M %Y",
+ &t) == 0)
+ (void) snprintf(propbuf, proplen, "%llu",
+ zhp->zfs_dmustats.dds_creation_time);
+ }
+ break;
+
+ case ZFS_PROP_MOUNTPOINT:
+ /*
+ * Getting the precise mountpoint can be tricky.
+ *
+ * - for 'none' or 'legacy', return those values.
+ * - for default mountpoints, construct it as /zfs/<dataset>
+ * - for inherited mountpoints, we want to take everything
+ * after our ancestor and append it to the inherited value.
+ *
+ * If the pool has an alternate root, we want to prepend that
+ * root to any values we return.
+ */
+ root = zhp->zfs_dmustats.dds_altroot;
+
+ if (zhp->zfs_zplstats.zs_mountpoint[0] == '\0') {
+ (void) snprintf(propbuf, proplen, "%s/zfs/%s",
+ root, zhp->zfs_name);
+ } else if (zhp->zfs_zplstats.zs_mountpoint[0] == '/') {
+ const char *relpath = zhp->zfs_name +
+ strlen(zhp->zfs_zplstats.zs_mountpoint_setpoint);
+ const char *mntpoint = zhp->zfs_zplstats.zs_mountpoint;
+
+ if (relpath[0] == '/')
+ relpath++;
+ if (mntpoint[1] == '\0')
+ mntpoint++;
+
+ if (relpath[0] == '\0')
+ (void) snprintf(propbuf, proplen, "%s%s",
+ root, mntpoint);
+ else
+ (void) snprintf(propbuf, proplen, "%s%s%s%s",
+ root, mntpoint,
+ relpath[0] == '@' ? "" : "/",
+ relpath);
+ } else {
+ /* 'legacy' or 'none' */
+ (void) strlcpy(propbuf, zhp->zfs_zplstats.zs_mountpoint,
+ proplen);
+ }
+
+ source = zhp->zfs_zplstats.zs_mountpoint_setpoint;
+ break;
+
+ case ZFS_PROP_SHARENFS:
+ (void) strlcpy(propbuf, zhp->zfs_zplstats.zs_sharenfs, proplen);
+ source = zhp->zfs_zplstats.zs_sharenfs_setpoint;
+ break;
+
+ case ZFS_PROP_ORIGIN:
+ (void) strlcpy(propbuf, zhp->zfs_dmustats.dds_clone_of,
+ proplen);
+ /*
+ * If there is no parent at all, return failure to indicate that
+ * it doesn't apply to this dataset.
+ */
+ if (propbuf[0] == '\0')
+ return (-1);
+ break;
+
+ case ZFS_PROP_QUOTA:
+ case ZFS_PROP_RESERVATION:
+ val = get_numeric_property(zhp, prop, src, &source);
+
+ /*
+ * If quota or reservation is 0, we translate this into 'none'
+ * (unless literal is set), and indicate that it's the default
+ * value. Otherwise, we print the number nicely and indicate
+ * that its set locally.
+ */
+ if (val == 0) {
+ if (literal)
+ (void) strlcpy(propbuf, "0", proplen);
+ else
+ (void) strlcpy(propbuf, "none", proplen);
+ } else {
+ if (literal)
+ (void) snprintf(propbuf, proplen, "%llu", val);
+ else
+ zfs_nicenum(val, propbuf, proplen);
+ }
+ break;
+
+ case ZFS_PROP_COMPRESSRATIO:
+ val = get_numeric_property(zhp, prop, src, &source);
+ (void) snprintf(propbuf, proplen, "%lld.%02lldx", val / 100,
+ val % 100);
+ break;
+
+ case ZFS_PROP_TYPE:
+ switch (zhp->zfs_type) {
+ case ZFS_TYPE_FILESYSTEM:
+ str = "filesystem";
+ break;
+ case ZFS_TYPE_VOLUME:
+ str = "volume";
+ break;
+ case ZFS_TYPE_SNAPSHOT:
+ str = "snapshot";
+ break;
+ default:
+ zfs_baderror(zhp->zfs_type);
+ }
+ (void) snprintf(propbuf, proplen, "%s", str);
+ break;
+
+ case ZFS_PROP_MOUNTED:
+ /*
+ * The 'mounted' property is a pseudo-property that described
+ * whether the filesystem is currently mounted. Even though
+ * it's a boolean value, the typical values of "on" and "off"
+ * don't make sense, so we translate to "yes" and "no".
+ */
+ if (get_numeric_property(zhp, ZFS_PROP_MOUNTED, src, &source))
+ (void) strlcpy(propbuf, "yes", proplen);
+ else
+ (void) strlcpy(propbuf, "no", proplen);
+ break;
+
+ case ZFS_PROP_NAME:
+ /*
+ * The 'name' property is a pseudo-property derived from the
+ * dataset name. It is presented as a real property to simplify
+ * consumers.
+ */
+ (void) strlcpy(propbuf, zhp->zfs_name, proplen);
+ break;
+
+ default:
+ zfs_baderror(EINVAL);
+ }
+
+ get_source(zhp, src, source, statbuf, statlen);
+
+ return (0);
+}
+
+/*
+ * Utility function to get the given numeric property. Does no validation that
+ * the given property is the appropriate type; should only be used with
+ * hard-coded property types.
+ */
+uint64_t
+zfs_prop_get_int(zfs_handle_t *zhp, zfs_prop_t prop)
+{
+ char *source;
+ zfs_source_t sourcetype = ZFS_SRC_NONE;
+
+ return (get_numeric_property(zhp, prop, &sourcetype, &source));
+}
+
+/*
+ * Similar to zfs_prop_get(), but returns the value as an integer.
+ */
+int
+zfs_prop_get_numeric(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t *value,
+ zfs_source_t *src, char *statbuf, size_t statlen)
+{
+ char *source;
+
+ /*
+ * Check to see if this property applies to our object
+ */
+ if (!zfs_prop_valid_for_type(prop, zhp->zfs_type))
+ return (-1);
+
+ if (src)
+ *src = ZFS_SRC_NONE;
+
+ *value = get_numeric_property(zhp, prop, src, &source);
+
+ get_source(zhp, src, source, statbuf, statlen);
+
+ return (0);
+}
+
+/*
+ * Returns the name of the given zfs handle.
+ */
+const char *
+zfs_get_name(const zfs_handle_t *zhp)
+{
+ return (zhp->zfs_name);
+}
+
+/*
+ * Returns the type of the given zfs handle.
+ */
+zfs_type_t
+zfs_get_type(const zfs_handle_t *zhp)
+{
+ return (zhp->zfs_type);
+}
+
+/*
+ * Iterate over all children, datasets and snapshots.
+ */
+int
+zfs_iter_children(zfs_handle_t *zhp, zfs_iter_f func, void *data)
+{
+ zfs_cmd_t zc = { 0 };
+ zfs_handle_t *nzhp;
+ int ret;
+
+ for ((void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ ioctl(zfs_fd, ZFS_IOC_DATASET_LIST_NEXT, &zc) == 0;
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name))) {
+ /*
+ * Ignore private dataset names.
+ */
+ if (dataset_name_hidden(zc.zc_name))
+ continue;
+
+ /*
+ * Silently ignore errors, as the only plausible explanation is
+ * that the pool has since been removed.
+ */
+ if ((nzhp = make_dataset_handle(zc.zc_name)) == NULL)
+ continue;
+
+ if ((ret = func(nzhp, data)) != 0)
+ return (ret);
+ }
+
+ /*
+ * An errno value of ESRCH indicates normal completion. If ENOENT is
+ * returned, then the underlying dataset has been removed since we
+ * obtained the handle.
+ */
+ if (errno != ESRCH && errno != ENOENT)
+ zfs_baderror(errno);
+
+ bzero(&zc, sizeof (zc));
+
+ for ((void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ ioctl(zfs_fd, ZFS_IOC_SNAPSHOT_LIST_NEXT, &zc) == 0;
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name))) {
+
+ if ((nzhp = make_dataset_handle(zc.zc_name)) == NULL)
+ continue;
+
+ if ((ret = func(nzhp, data)) != 0)
+ return (ret);
+ }
+
+ /*
+ * An errno value of ESRCH indicates normal completion. If ENOENT is
+ * returned, then the underlying dataset has been removed since we
+ * obtained the handle. Silently ignore this case, and return success.
+ */
+ if (errno != ESRCH && errno != ENOENT)
+ zfs_baderror(errno);
+
+ return (0);
+}
+
+/*
+ * Given a complete name, return just the portion that refers to the parent.
+ * Can return NULL if this is a pool.
+ */
+static int
+parent_name(const char *path, char *buf, size_t buflen)
+{
+ char *loc;
+
+ if ((loc = strrchr(path, '/')) == NULL)
+ return (-1);
+
+ (void) strncpy(buf, path, MIN(buflen, loc - path));
+ buf[loc - path] = '\0';
+
+ return (0);
+}
+
+/*
+ * Checks to make sure that the given path has a parent, and that it exists.
+ */
+static int
+check_parents(const char *path, zfs_type_t type)
+{
+ zfs_cmd_t zc = { 0 };
+ char parent[ZFS_MAXNAMELEN];
+ char *slash;
+
+ /* get parent, and check to see if this is just a pool */
+ if (parent_name(path, parent, sizeof (parent)) != 0) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': missing dataset name"),
+ path, zfs_type_to_name(type));
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "use 'zpool create' to create a storage pool"));
+ return (-1);
+ }
+
+ /* check to see if the pool exists */
+ if ((slash = strchr(parent, '/')) == NULL)
+ slash = parent + strlen(parent);
+ (void) strncpy(zc.zc_name, parent, slash - parent);
+ zc.zc_name[slash - parent] = '\0';
+ if (ioctl(zfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 &&
+ errno == ENOENT) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': no such pool '%s'"), path, zc.zc_name);
+ return (-1);
+ }
+
+ /* check to see if the parent dataset exists */
+ (void) strlcpy(zc.zc_name, parent, sizeof (zc.zc_name));
+ if (ioctl(zfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0) {
+ switch (errno) {
+ case ENOENT:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': parent does not exist"), path);
+ return (-1);
+
+ default:
+ zfs_baderror(errno);
+ }
+ }
+
+ /* we are in a non-global zone, but parent is in the global zone */
+ if (getzoneid() != GLOBAL_ZONEID && !zc.zc_objset_stats.dds_zoned) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': permission denied"), path);
+ return (-1);
+ }
+
+ /* make sure parent is a filesystem */
+ if (zc.zc_objset_stats.dds_type != DMU_OST_ZFS) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': parent is not a filesystem"),
+ path);
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * Create a new filesystem or volume. 'sizestr' and 'blocksizestr' are used
+ * only for volumes, and indicate the size and blocksize of the volume.
+ */
+int
+zfs_create(const char *path, zfs_type_t type,
+ const char *sizestr, const char *blocksizestr)
+{
+ char reason[64];
+ zfs_cmd_t zc = { 0 };
+ int ret;
+ uint64_t size = 0;
+ uint64_t blocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
+
+ /* convert sizestr into integer size */
+ if (sizestr != NULL && nicestrtonum(sizestr, &size,
+ reason, sizeof (reason)) != 0) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "bad volume size '%s': %s"), sizestr, reason);
+ return (-1);
+ }
+
+ /* convert blocksizestr into integer blocksize */
+ if (blocksizestr != NULL && nicestrtonum(blocksizestr, &blocksize,
+ reason, sizeof (reason)) != 0) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "bad volume blocksize '%s': %s"), blocksizestr, reason);
+ return (-1);
+ }
+
+ /* make sure the name is not too long */
+ if (strlen(path) >= ZFS_MAXNAMELEN) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': %s name is too long"),
+ path, zfs_type_to_name(type));
+ return (-1);
+ }
+
+ /* validate the path, taking care to note the extended error message */
+ if (!zfs_validate_name(path, type, reason, sizeof (reason))) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': %s in %s name"), path, reason,
+ zfs_type_to_name(type));
+ if (strstr(reason, "snapshot") != NULL)
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "use 'zfs snapshot' to create a snapshot"));
+ return (-1);
+ }
+
+ /* validate parents exist */
+ if (check_parents(path, type) != 0)
+ return (-1);
+
+ /*
+ * The failure modes when creating a dataset of a different type over
+ * one that already exists is a little strange. In particular, if you
+ * try to create a dataset on top of an existing dataset, the ioctl()
+ * will return ENOENT, not EEXIST. To prevent this from happening, we
+ * first try to see if the dataset exists.
+ */
+ (void) strlcpy(zc.zc_name, path, sizeof (zc.zc_name));
+ if (ioctl(zfs_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': dataset exists"), path);
+ return (-1);
+ }
+
+ if (type == ZFS_TYPE_VOLUME)
+ zc.zc_objset_type = DMU_OST_ZVOL;
+ else
+ zc.zc_objset_type = DMU_OST_ZFS;
+
+ if (type == ZFS_TYPE_VOLUME) {
+ if (size == 0) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "bad volume size '%s': cannot be zero"), sizestr);
+ return (-1);
+ }
+
+ zc.zc_volsize = size;
+ zc.zc_volblocksize = blocksize;
+ }
+
+ /* create the dataset */
+
+ ret = ioctl(zfs_fd, ZFS_IOC_CREATE, &zc);
+
+ if (ret == 0 && type == ZFS_TYPE_VOLUME)
+ ret = zvol_create_link(path);
+
+ /* check for failure */
+ if (ret != 0) {
+ char parent[ZFS_MAXNAMELEN];
+ (void) parent_name(path, parent, sizeof (parent));
+
+ switch (errno) {
+ case ENOENT:
+ /*
+ * The parent dataset has been deleted since our
+ * previous check.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': no such parent '%s'"),
+ path, parent);
+ break;
+
+ case EPERM:
+ /*
+ * The user doesn't have permission to create a new
+ * dataset here.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': permission denied"), path);
+ break;
+
+ case EDQUOT:
+ case ENOSPC:
+ /*
+ * The parent dataset does not have enough free space
+ * to create a new dataset.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': not enough space in '%s'"),
+ path, parent);
+ break;
+
+ case EEXIST:
+ /*
+ * The target dataset already exists. We should have
+ * caught this above, but there may be some unexplained
+ * race condition.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': dataset exists"), path);
+ break;
+
+ case EINVAL:
+ /*
+ * The target dataset does not support children.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': children unsupported in '%s'"),
+ path, parent);
+ break;
+
+ case EDOM:
+ zfs_error(dgettext(TEXT_DOMAIN, "bad %s value '%s': "
+ "must be power of 2 from %u to %uk"),
+ zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
+ blocksizestr ? blocksizestr : "<unknown>",
+ (uint_t)SPA_MINBLOCKSIZE,
+ (uint_t)SPA_MAXBLOCKSIZE >> 10);
+ break;
+#ifdef _ILP32
+ case EOVERFLOW:
+ /*
+ * This platform can't address a volume this big.
+ */
+ if (type == ZFS_TYPE_VOLUME) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': "
+ "max volume size is 1TB on 32-bit systems"),
+ path);
+ break;
+ }
+#endif
+
+ default:
+ zfs_baderror(errno);
+ }
+
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * Destroys the given dataset. The caller must make sure that the filesystem
+ * isn't mounted, and that there are no active dependents.
+ */
+int
+zfs_destroy(zfs_handle_t *zhp)
+{
+ zfs_cmd_t zc = { 0 };
+ int ret;
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+ /*
+ * We use the check for 'zfs_volblocksize' instead of ZFS_TYPE_VOLUME
+ * so that we do the right thing for snapshots of volumes.
+ */
+ if (zhp->zfs_volblocksize != 0) {
+ if (zvol_remove_link(zhp->zfs_name) != 0)
+ return (-1);
+
+ zc.zc_objset_type = DMU_OST_ZVOL;
+ } else {
+ zc.zc_objset_type = DMU_OST_ZFS;
+ }
+
+ ret = ioctl(zfs_fd, ZFS_IOC_DESTROY, &zc);
+
+ if (ret != 0) {
+ switch (errno) {
+
+ case EPERM:
+ /*
+ * We don't have permission to destroy this dataset.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot destroy '%s': permission denied"),
+ zhp->zfs_name);
+ break;
+
+ case ENOENT:
+ /*
+ * We've hit a race condition where the dataset has been
+ * destroyed since we opened it.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot destroy '%s': no such %s"),
+ zhp->zfs_name, zfs_type_to_name(zhp->zfs_type));
+ break;
+
+ case EBUSY:
+ /*
+ * Even if we destroy all children, there is a chance we
+ * can hit this case if:
+ *
+ * - A child dataset has since been created
+ * - A filesystem is mounted
+ *
+ * This error message is awful, but hopefully we've
+ * already caught the common cases (and aborted more
+ * appropriately) before calling this function. There's
+ * nothing else we can do at this point.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot destroy '%s': %s is busy"),
+ zhp->zfs_name, zfs_type_to_name(zhp->zfs_type));
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+
+ return (-1);
+ }
+
+ remove_mountpoint(zhp);
+
+ return (0);
+}
+
+/*
+ * Clones the given dataset. The target must be of the same type as the source.
+ */
+int
+zfs_clone(zfs_handle_t *zhp, const char *target)
+{
+ char reason[64];
+ zfs_cmd_t zc = { 0 };
+ char parent[ZFS_MAXNAMELEN];
+ int ret;
+
+ assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
+
+ /* validate the target name */
+ if (!zfs_validate_name(target, ZFS_TYPE_FILESYSTEM, reason,
+ sizeof (reason))) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': %s in filesystem name"), target,
+ reason, zfs_type_to_name(ZFS_TYPE_FILESYSTEM));
+ return (-1);
+ }
+
+ /* validate parents exist */
+ if (check_parents(target, zhp->zfs_type) != 0)
+ return (-1);
+
+ (void) parent_name(target, parent, sizeof (parent));
+
+ /* do the clone */
+ if (zhp->zfs_volblocksize != 0)
+ zc.zc_objset_type = DMU_OST_ZVOL;
+ else
+ zc.zc_objset_type = DMU_OST_ZFS;
+
+ (void) strlcpy(zc.zc_name, target, sizeof (zc.zc_name));
+ (void) strlcpy(zc.zc_filename, zhp->zfs_name, sizeof (zc.zc_filename));
+ ret = ioctl(zfs_fd, ZFS_IOC_CREATE, &zc);
+
+ if (ret != 0) {
+ switch (errno) {
+ case EPERM:
+ /*
+ * The user doesn't have permission to create the clone.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': permission denied"),
+ target);
+ break;
+
+ case ENOENT:
+ /*
+ * The parent doesn't exist. We should have caught this
+ * above, but there may a race condition that has since
+ * destroyed the parent.
+ *
+ * At this point, we don't know whether it's the source
+ * that doesn't exist anymore, or whether the target
+ * dataset doesn't exist.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': no such parent '%s'"),
+ target, parent);
+ break;
+
+ case EDQUOT:
+ case ENOSPC:
+ /*
+ * There is not enough space in the target dataset
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': not enough space in '%s'"),
+ target, parent);
+ break;
+
+ case EEXIST:
+ /*
+ * The target already exists.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': dataset exists"), target);
+ break;
+
+ case EXDEV:
+ /*
+ * The source and target pools differ.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
+ "source and target pools differ"), target);
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+ } else if (zhp->zfs_volblocksize != 0) {
+ ret = zvol_create_link(target);
+ }
+
+ return (ret);
+}
+
+/*
+ * Takes a snapshot of the given dataset
+ */
+int
+zfs_snapshot(const char *path)
+{
+ char reason[64];
+ const char *delim;
+ char *parent;
+ zfs_handle_t *zhp;
+ zfs_cmd_t zc = { 0 };
+ int ret;
+
+ /* validate the snapshot name */
+ if (!zfs_validate_name(path, ZFS_TYPE_SNAPSHOT, reason,
+ sizeof (reason))) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot snapshot '%s': %s in snapshot name"), path,
+ reason);
+ return (-1);
+ }
+
+ /* make sure we have a snapshot */
+ if ((delim = strchr(path, '@')) == NULL) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot snapshot '%s': missing '@' delim in snapshot "
+ "name"), path);
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "use 'zfs create' to create a filesystem"));
+ return (-1);
+ }
+
+ /* make sure the parent exists and is of the appropriate type */
+ parent = zfs_malloc(delim - path + 1);
+ (void) strncpy(parent, path, delim - path);
+ parent[delim - path] = '\0';
+
+ if ((zhp = zfs_open(parent, ZFS_TYPE_FILESYSTEM |
+ ZFS_TYPE_VOLUME)) == NULL) {
+ free(parent);
+ return (-1);
+ }
+
+ (void) strlcpy(zc.zc_name, path, sizeof (zc.zc_name));
+
+ if (zhp->zfs_type == ZFS_TYPE_VOLUME)
+ zc.zc_objset_type = DMU_OST_ZVOL;
+ else
+ zc.zc_objset_type = DMU_OST_ZFS;
+
+ ret = ioctl(zfs_fd, ZFS_IOC_CREATE, &zc);
+
+ if (ret == 0 && zhp->zfs_type == ZFS_TYPE_VOLUME) {
+ ret = zvol_create_link(path);
+ if (ret != 0)
+ (void) ioctl(zfs_fd, ZFS_IOC_DESTROY, &zc);
+ }
+
+ if (ret != 0) {
+ switch (errno) {
+ case EPERM:
+ /*
+ * User doesn't have permission to create a snapshot
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
+ "permission denied"), path);
+ break;
+
+ case EDQUOT:
+ case ENOSPC:
+ /*
+ * Out of space in parent.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
+ "not enough space in '%s'"), path, parent);
+ break;
+
+ case EEXIST:
+ /*
+ * Snapshot already exists.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
+ "snapshot exists"), path);
+ break;
+
+ case ENOENT:
+ /*
+ * Shouldn't happen because we verified the parent
+ * above. But there may be a race condition where it
+ * has since been removed.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': "
+ "no such %s"), parent,
+ zfs_type_to_name(zhp->zfs_type));
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+ }
+
+ free(parent);
+ zfs_close(zhp);
+
+ return (ret);
+}
+
+/*
+ * Dumps a backup of tosnap, incremental from fromsnap if it isn't NULL.
+ */
+int
+zfs_backup(zfs_handle_t *zhp_to, zfs_handle_t *zhp_from)
+{
+ zfs_cmd_t zc = { 0 };
+ int ret;
+
+ /* do the ioctl() */
+ (void) strlcpy(zc.zc_name, zhp_to->zfs_name, sizeof (zc.zc_name));
+ if (zhp_from) {
+ (void) strlcpy(zc.zc_prop_value, zhp_from->zfs_name,
+ sizeof (zc.zc_name));
+ } else {
+ zc.zc_prop_value[0] = '\0';
+ }
+ zc.zc_cookie = STDOUT_FILENO;
+
+ ret = ioctl(zfs_fd, ZFS_IOC_SENDBACKUP, &zc);
+ if (ret != 0) {
+ switch (errno) {
+ case EPERM:
+ /*
+ * User doesn't have permission to do a backup
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot backup '%s': "
+ "permission denied"), zhp_to->zfs_name);
+ break;
+
+ case EXDEV:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot do incremental backup from %s:\n"
+ "it is not an earlier snapshot from the "
+ "same fs as %s"),
+ zhp_from->zfs_name, zhp_to->zfs_name);
+ break;
+
+ case ENOENT:
+ /*
+ * Shouldn't happen because we verified the parent
+ * above. But there may be a race condition where it
+ * has since been removed.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot open: "
+ "no such snapshot"));
+ break;
+
+ case EDQUOT:
+ case EFBIG:
+ case EIO:
+ case ENOLINK:
+ case ENOSPC:
+ case ENOSTR:
+ case ENXIO:
+ case EPIPE:
+ case ERANGE:
+ case EFAULT:
+ case EROFS:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot write backup stream: %s"),
+ strerror(errno));
+ break;
+
+ case EINTR:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "backup failed: signal recieved"));
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * Restores a backup of tosnap from stdin.
+ */
+int
+zfs_restore(const char *tosnap, int isprefix, int verbose, int dryrun)
+{
+ zfs_cmd_t zc = { 0 };
+ time_t begin_time;
+ int err, bytes;
+ char *cp;
+ dmu_replay_record_t drr;
+ struct drr_begin *drrb = &zc.zc_begin_record;
+
+ begin_time = time(NULL);
+
+ /* trim off snapname, if any */
+ (void) strcpy(zc.zc_name, tosnap);
+ cp = strchr(zc.zc_name, '@');
+ if (cp)
+ *cp = '\0';
+
+ /* read in the BEGIN record */
+ cp = (char *)&drr;
+ bytes = 0;
+ do {
+ err = read(STDIN_FILENO, cp, sizeof (drr) - bytes);
+ cp += err;
+ bytes += err;
+ } while (err > 0);
+
+ if (err < 0 || bytes != sizeof (drr)) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore: invalid backup stream "
+ "(couldn't read first record)"));
+ return (-1);
+ }
+
+ zc.zc_begin_record = drr.drr_u.drr_begin;
+
+ if (drrb->drr_magic != DMU_BACKUP_MAGIC &&
+ drrb->drr_magic != BSWAP_64(DMU_BACKUP_MAGIC)) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore: invalid backup stream "
+ "(invalid magic number)"));
+ return (-1);
+ }
+
+ if (drrb->drr_version != DMU_BACKUP_VERSION &&
+ drrb->drr_version != BSWAP_64(DMU_BACKUP_VERSION)) {
+ if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
+ drrb->drr_version = BSWAP_64(drrb->drr_version);
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore: only backup version 0x%llx is supported, "
+ "stream is version %llx."),
+ DMU_BACKUP_VERSION, drrb->drr_version);
+ return (-1);
+ }
+
+ /*
+ * Determine name of destination snapshot.
+ */
+ (void) strcpy(drrb->drr_toname, tosnap);
+ if (isprefix) {
+ if (strchr(tosnap, '@') != NULL) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore: "
+ "argument to -d must be a filesystem"));
+ return (-1);
+ }
+
+ cp = strchr(drr.drr_u.drr_begin.drr_toname, '/');
+ if (cp == NULL)
+ cp = drr.drr_u.drr_begin.drr_toname;
+ else
+ cp++;
+
+ (void) strcat(drrb->drr_toname, "/");
+ (void) strcat(drrb->drr_toname, cp);
+ } else if (strchr(tosnap, '@') == NULL) {
+ /*
+ * they specified just a filesystem; tack on the
+ * snapname from the backup.
+ */
+ cp = strchr(drr.drr_u.drr_begin.drr_toname, '@');
+ if (cp == NULL || strlen(tosnap) + strlen(cp) >= MAXNAMELEN) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore: invalid backup stream "
+ "(invalid snapshot name)"));
+ return (-1);
+ }
+ (void) strcat(drrb->drr_toname, cp);
+ }
+
+ if (drrb->drr_fromguid) {
+ zfs_handle_t *h;
+ /* incremental backup stream */
+
+ /* do the ioctl to the containing fs */
+ (void) strcpy(zc.zc_name, drrb->drr_toname);
+ cp = strchr(zc.zc_name, '@');
+ *cp = '\0';
+
+ /* make sure destination fs exists */
+ h = zfs_open(zc.zc_name, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
+ if (h == NULL) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore incrememtal backup: destination\n"
+ "filesystem %s does not exist"),
+ zc.zc_name);
+ return (-1);
+ }
+ /* unmount destination fs */
+ if (!dryrun)
+ (void) zfs_unmount(h, NULL, 0);
+ zfs_close(h);
+
+
+ } else {
+ /* full backup stream */
+
+ /* do the ioctl to the containing fs's parent */
+ (void) strcpy(zc.zc_name, drrb->drr_toname);
+ cp = strrchr(zc.zc_name, '/');
+ if (cp == NULL) {
+ cp = strchr(zc.zc_name, '@');
+ if (cp)
+ *cp = '\0';
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore: destination fs %s already exists"),
+ zc.zc_name);
+ return (-1);
+ }
+ *cp = '\0';
+
+ /* make sure destination fs exists */
+
+ if (isprefix) {
+ /* make sure prefix exists */
+ zfs_handle_t *h;
+
+ /* make sure destination fs exists */
+ h = zfs_open(tosnap, ZFS_TYPE_FILESYSTEM |
+ ZFS_TYPE_VOLUME);
+ if (h == NULL) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore:"
+ "filesystem %s does not exist"),
+ tosnap);
+ return (-1);
+ }
+
+ /* create any necessary ancestors up to prefix */
+ cp = zc.zc_name + strlen(tosnap) + 1;
+ while (cp = strchr(cp, '/')) {
+ *cp = '\0';
+ err = ioctl(zfs_fd, ZFS_IOC_CREATE, &zc);
+ if (err && err != ENOENT && err != EEXIST) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore:"
+ "couldn't create ancestor %s"),
+ zc.zc_name);
+ return (-1);
+ }
+ }
+ }
+ }
+
+ (void) strcpy(zc.zc_prop_value, tosnap);
+ zc.zc_cookie = STDIN_FILENO;
+ zc.zc_intsz = isprefix;
+ if (verbose) {
+ (void) printf("%s %s backup of %s into %s\n",
+ dryrun ? "would restore" : "restoring",
+ drrb->drr_fromguid ? "incremental" : "full",
+ drr.drr_u.drr_begin.drr_toname,
+ zc.zc_begin_record.drr_toname);
+ (void) fflush(stdout);
+ }
+ if (dryrun)
+ return (0);
+ err = ioctl(zfs_fd, ZFS_IOC_RECVBACKUP, &zc);
+ if (err != 0) {
+ switch (errno) {
+ case ENODEV:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore: "
+ "Most recent snapshot does not "
+ "match incremental backup source"));
+ break;
+ case ETXTBSY:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore: "
+ "Destination has been modified since "
+ "most recent snapshot.\n"
+ "Use 'zfs rollback' to discard changes."));
+ break;
+ case EEXIST:
+ if (drrb->drr_fromguid == 0) {
+ /* it's the containing fs that exists */
+ cp = strchr(drrb->drr_toname, '@');
+ *cp = '\0';
+ }
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore to %s: Destination already exists"),
+ drrb->drr_toname);
+ break;
+ case ENOENT:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore: "
+ "Destination does not exist"));
+ break;
+ case EBUSY:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore: "
+ "Destination is in use"));
+ break;
+ case ENOSPC:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore: "
+ "Out of space"));
+ break;
+ case EDQUOT:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore: "
+ "Quota exceeded"));
+ break;
+ case EINTR:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Restore failed: signal recieved"));
+ break;
+ case EINVAL:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "Can't restore: "
+ "invalid backup stream"));
+ break;
+ default:
+ zfs_baderror(errno);
+ }
+ }
+
+ /*
+ * Mount or recreate the /dev links for the target filesystem.
+ */
+ cp = strchr(drrb->drr_toname, '@');
+ if (cp && (err == 0 || drrb->drr_fromguid)) {
+ zfs_handle_t *h;
+
+ *cp = '\0';
+ h = zfs_open(drrb->drr_toname,
+ ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
+ if (h) {
+ if (h->zfs_type == ZFS_TYPE_FILESYSTEM)
+ err = zfs_mount(h, NULL, 0);
+ else
+ err = zvol_create_link(h->zfs_name);
+ zfs_close(h);
+ }
+ }
+
+ /*
+ * If the destination snapshot was also specified, and it was a volume,
+ * make sure that the appropriate /dev link was created as well.
+ */
+ if (err == 0) {
+ zfs_handle_t *h;
+
+ if (cp)
+ *cp = '@';
+
+ h = zfs_open(drrb->drr_toname, ZFS_TYPE_SNAPSHOT |
+ ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
+ if (h) {
+ if (h->zfs_volblocksize)
+ err = zvol_create_link(h->zfs_name);
+ zfs_close(h);
+ }
+ }
+
+ if (err)
+ return (err);
+
+ if (verbose) {
+ char buf1[64];
+ char buf2[64];
+ uint64_t bytes = zc.zc_cookie;
+ time_t delta = time(NULL) - begin_time;
+ if (delta == 0)
+ delta = 1;
+ zfs_nicenum(bytes, buf1, sizeof (buf1));
+ zfs_nicenum(bytes/delta, buf2, sizeof (buf1));
+
+ (void) printf("restored %sb backup in %lu seconds (%sb/sec)\n",
+ buf1, delta, buf2);
+ }
+ return (0);
+}
+
+/*
+ * Rollback the given dataset to the previous snapshot. It is up to the caller
+ * to verify that there is a previous snapshot available.
+ */
+int
+zfs_rollback(zfs_handle_t *zhp)
+{
+ int ret;
+ zfs_cmd_t zc = { 0 };
+
+ assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM ||
+ zhp->zfs_type == ZFS_TYPE_VOLUME);
+
+ if (zhp->zfs_type == ZFS_TYPE_VOLUME &&
+ zvol_remove_link(zhp->zfs_name) != 0)
+ return (-1);
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+ if (zhp->zfs_volblocksize != 0)
+ zc.zc_objset_type = DMU_OST_ZVOL;
+ else
+ zc.zc_objset_type = DMU_OST_ZFS;
+
+ /*
+ * We rely on the consumer to verify that there are no newer snapshots
+ * for the given dataset. Given these constraints, we can simply pass
+ * the name on to the ioctl() call. There is still an unlikely race
+ * condition where the user has taken a snapshot since we verified that
+ * this was the most recent.
+ */
+ if ((ret = ioctl(zfs_fd, ZFS_IOC_ROLLBACK, &zc)) != 0) {
+ switch (errno) {
+ case EPERM:
+ /*
+ * The user doesn't have permission to rollback the
+ * given dataset.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot rollback '%s': "
+ "permission denied"), zhp->zfs_name);
+ break;
+
+ case EDQUOT:
+ case ENOSPC:
+ /*
+ * The parent dataset doesn't have enough space to
+ * rollback to the last snapshot.
+ */
+ {
+ char parent[ZFS_MAXNAMELEN];
+ (void) parent_name(zhp->zfs_name, parent,
+ sizeof (parent));
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot "
+ "rollback '%s': out of space"), parent);
+ }
+ break;
+
+ case ENOENT:
+ /*
+ * The dataset doesn't exist. This shouldn't happen
+ * except in race conditions.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot rollback '%s': "
+ "no such %s"), zhp->zfs_name,
+ zfs_type_to_name(zhp->zfs_type));
+ break;
+
+ case EBUSY:
+ /*
+ * The filesystem is busy. This should have been caught
+ * by the caller before getting here, but there may be
+ * an unexpected problem.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot rollback '%s': "
+ "%s is busy"), zhp->zfs_name,
+ zfs_type_to_name(zhp->zfs_type));
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+ } else if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
+ ret = zvol_create_link(zhp->zfs_name);
+ }
+
+ return (ret);
+}
+
+/*
+ * Iterate over all dependents for a given dataset. This includes both
+ * hierarchical dependents (children) and data dependents (snapshots and
+ * clones). The bulk of the processing occurs in get_dependents() in
+ * libzfs_graph.c.
+ */
+int
+zfs_iter_dependents(zfs_handle_t *zhp, zfs_iter_f func, void *data)
+{
+ char **dependents;
+ size_t count;
+ int i;
+ zfs_handle_t *child;
+ int ret = 0;
+
+ dependents = get_dependents(zhp->zfs_name, &count);
+ for (i = 0; i < count; i++) {
+ if ((child = make_dataset_handle(dependents[i])) == NULL)
+ continue;
+
+ if ((ret = func(child, data)) != 0)
+ break;
+ }
+
+ for (i = 0; i < count; i++)
+ free(dependents[i]);
+ free(dependents);
+
+ return (ret);
+}
+
+/*
+ * Renames the given dataset.
+ */
+int
+zfs_rename(zfs_handle_t *zhp, const char *target)
+{
+ int ret;
+ zfs_cmd_t zc = { 0 };
+ char reason[64];
+ char *delim;
+ prop_changelist_t *cl;
+ char parent[ZFS_MAXNAMELEN];
+
+ (void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+ (void) strlcpy(zc.zc_prop_value, target, sizeof (zc.zc_prop_value));
+
+ /* if we have the same exact name, just return success */
+ if (strcmp(zhp->zfs_name, target) == 0)
+ return (0);
+
+ /*
+ * Make sure the target name is valid
+ */
+ if (!zfs_validate_name(target, zhp->zfs_type, reason,
+ sizeof (reason))) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create '%s': %s in %s name"), target, reason,
+ zfs_type_to_name(zhp->zfs_type));
+ return (-1);
+ }
+
+ if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
+ if ((delim = strchr(target, '@')) == NULL) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot rename to '%s': not a snapshot"), target);
+ return (-1);
+ }
+
+ /*
+ * Make sure we're renaming within the same dataset.
+ */
+ if (strncmp(zhp->zfs_name, target, delim - target) != 0 ||
+ zhp->zfs_name[delim - target] != '@') {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot rename to '%s': snapshots must be part "
+ "of same dataset"), target);
+ return (-1);
+ }
+
+ (void) strncpy(parent, target, delim - target);
+ parent[delim - target] = '\0';
+ } else {
+ /* validate parents */
+ if (check_parents(target, zhp->zfs_type) != 0)
+ return (-1);
+
+ (void) parent_name(target, parent, sizeof (parent));
+
+ /* make sure we're in the same pool */
+ verify((delim = strchr(target, '/')) != NULL);
+ if (strncmp(zhp->zfs_name, target, delim - target) != 0 ||
+ zhp->zfs_name[delim - target] != '/') {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot rename to '%s': "
+ "datasets must be within same pool"), target);
+ return (-1);
+ }
+ }
+
+ if (getzoneid() == GLOBAL_ZONEID &&
+ zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot rename %s, "
+ "dataset is used in a non-global zone"), zhp->zfs_name);
+ return (-1);
+ }
+
+ if ((cl = changelist_gather(zhp, ZFS_PROP_NAME, 0)) == NULL)
+ return (1);
+
+ if (changelist_haszonedchild(cl)) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot rename '%s': child dataset with inherited "
+ "mountpoint is used in a non-global zone"), zhp->zfs_name);
+ ret = -1;
+ goto error;
+ }
+
+ if ((ret = changelist_prefix(cl)) != 0)
+ goto error;
+
+ if (zhp->zfs_volblocksize != 0)
+ zc.zc_objset_type = DMU_OST_ZVOL;
+ else
+ zc.zc_objset_type = DMU_OST_ZFS;
+
+ if ((ret = ioctl(zfs_fd, ZFS_IOC_RENAME, &zc)) != 0) {
+ switch (errno) {
+ case EPERM:
+ /*
+ * The user doesn't have permission to rename the
+ * given dataset.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot rename '%s': "
+ "permission denied"), zhp->zfs_name);
+ break;
+
+ case EDQUOT:
+ case ENOSPC:
+ /*
+ * Not enough space in the parent dataset.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot "
+ "rename '%s': not enough space in '%s'"),
+ zhp->zfs_name, parent);
+ break;
+
+ case ENOENT:
+ /*
+ * The destination doesn't exist.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot rename '%s' "
+ "to '%s': destination doesn't exist"),
+ zhp->zfs_name, target);
+ break;
+
+ case EEXIST:
+ /*
+ * The destination already exists.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot rename '%s' "
+ "to '%s': destination already exists"),
+ zhp->zfs_name, target);
+ break;
+
+ case EBUSY:
+ /*
+ * The filesystem is busy. This should have been caught
+ * by the caller before getting here, but there may be
+ * an unexpected problem.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot rename '%s': "
+ "%s is busy"), zhp->zfs_name,
+ zfs_type_to_name(zhp->zfs_type));
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+
+ /*
+ * On failure, we still want to remount any filesystems that
+ * were previously mounted, so we don't alter the system state.
+ */
+ (void) changelist_postfix(cl);
+ } else {
+ changelist_rename(cl, zfs_get_name(zhp), target);
+
+ ret = changelist_postfix(cl);
+ }
+
+error:
+ changelist_free(cl);
+ return (ret);
+}
+
+/*
+ * Given a zvol dataset, issue the ioctl to create the appropriate minor node,
+ * poke devfsadm to create the /dev link, and then wait for the link to appear.
+ */
+int
+zvol_create_link(const char *dataset)
+{
+ zfs_cmd_t zc = { 0 };
+ di_devlink_handle_t hdl;
+
+ (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+
+ /*
+ * Issue the appropriate ioctl.
+ */
+ if (ioctl(zfs_fd, ZFS_IOC_CREATE_MINOR, &zc) != 0) {
+ switch (errno) {
+ case EPERM:
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot create "
+ "device links for '%s': permission denied"),
+ dataset);
+ break;
+
+ case EEXIST:
+ /*
+ * Silently ignore the case where the link already
+ * exists. This allows 'zfs volinit' to be run multiple
+ * times without errors.
+ */
+ return (0);
+
+ default:
+ zfs_baderror(errno);
+ }
+
+ return (-1);
+ }
+
+ /*
+ * Call devfsadm and wait for the links to magically appear.
+ */
+ if ((hdl = di_devlink_init(ZFS_DRIVER, DI_MAKE_LINK)) == NULL) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot create device links for '%s'"), dataset);
+ (void) ioctl(zfs_fd, ZFS_IOC_REMOVE_MINOR, &zc);
+ return (-1);
+ } else {
+ (void) di_devlink_fini(&hdl);
+ }
+
+ return (0);
+}
+
+/*
+ * Remove a minor node for the given zvol and the associated /dev links.
+ */
+int
+zvol_remove_link(const char *dataset)
+{
+ zfs_cmd_t zc = { 0 };
+
+ (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+
+ if (ioctl(zfs_fd, ZFS_IOC_REMOVE_MINOR, &zc) != 0) {
+ switch (errno) {
+ case EPERM:
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot remove "
+ "device links for '%s': permission denied"),
+ dataset);
+ break;
+
+ case EBUSY:
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot remove "
+ "device links for '%s': volume is in use"),
+ dataset);
+ break;
+
+ case ENXIO:
+ /*
+ * Silently ignore the case where the link no longer
+ * exists, so that 'zfs volfini' can be run multiple
+ * times without errors.
+ */
+ return (0);
+
+ default:
+ zfs_baderror(errno);
+ }
+
+ return (-1);
+ }
+
+ return (0);
+}
diff --git a/usr/src/lib/libzfs/common/libzfs_graph.c b/usr/src/lib/libzfs/common/libzfs_graph.c
new file mode 100644
index 0000000000..65b115879b
--- /dev/null
+++ b/usr/src/lib/libzfs/common/libzfs_graph.c
@@ -0,0 +1,527 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Iterate over all children of the current object. This includes the normal
+ * dataset hierarchy, but also arbitrary hierarchies due to clones. We want to
+ * walk all datasets in the pool, and construct a directed graph of the form:
+ *
+ * home
+ * |
+ * +----+----+
+ * | |
+ * v v ws
+ * bar baz |
+ * | |
+ * v v
+ * @yesterday ----> foo
+ *
+ * In order to construct this graph, we have to walk every dataset in the pool,
+ * because the clone parent is stored as a property of the child, not the
+ * parent. The parent only keeps track of the number of clones.
+ *
+ * In the normal case (without clones) this would be rather expensive. To avoid
+ * unnecessary computation, we first try a walk of the subtree hierarchy
+ * starting from the initial node. At each dataset, we construct a node in the
+ * graph and an edge leading from its parent. If we don't see any snapshots
+ * with a non-zero clone count, then we are finished.
+ *
+ * If we do find a cloned snapshot, then we finish the walk of the current
+ * subtree, but indicate that we need to do a complete walk. We then perform a
+ * global walk of all datasets, avoiding the subtree we already processed.
+ *
+ * At the end of this, we'll end up with a directed graph of all relevant (and
+ * possible some irrelevant) datasets in the system. We need to both find our
+ * limiting subgraph and determine a safe ordering in which to destroy the
+ * datasets. We do a topological ordering of our graph starting at our target
+ * dataset, and then walk the results in reverse.
+ *
+ * When removing datasets, we want to destroy the snapshots in chronological
+ * order (because this is the most efficient method). In order to accomplish
+ * this, we store the creation transaction group with each vertex and keep each
+ * vertex's edges sorted according to this value. The topological sort will
+ * automatically walk the snapshots in the correct order.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+
+#include <libzfs.h>
+
+#include "libzfs_impl.h"
+#include "zfs_namecheck.h"
+
+#define MIN_EDGECOUNT 4
+
+/*
+ * Vertex structure. Indexed by dataset name, this structure maintains a list
+ * of edges to other vertices.
+ */
+struct zfs_edge;
+typedef struct zfs_vertex {
+ char zv_dataset[ZFS_MAXNAMELEN];
+ struct zfs_vertex *zv_next;
+ int zv_visited;
+ uint64_t zv_txg;
+ struct zfs_edge **zv_edges;
+ int zv_edgecount;
+ int zv_edgealloc;
+} zfs_vertex_t;
+
+/*
+ * Edge structure. Simply maintains a pointer to the destination vertex. There
+ * is no need to store the source vertex, since we only use edges in the context
+ * of the source vertex.
+ */
+typedef struct zfs_edge {
+ zfs_vertex_t *ze_dest;
+ struct zfs_edge *ze_next;
+} zfs_edge_t;
+
+#define ZFS_GRAPH_SIZE 1027 /* this could be dynamic some day */
+
+/*
+ * Graph structure. Vertices are maintained in a hash indexed by dataset name.
+ */
+typedef struct zfs_graph {
+ zfs_vertex_t **zg_hash;
+ size_t zg_size;
+ size_t zg_nvertex;
+} zfs_graph_t;
+
+/*
+ * Allocate a new edge pointing to the target vertex.
+ */
+static zfs_edge_t *
+zfs_edge_create(zfs_vertex_t *dest)
+{
+ zfs_edge_t *zep = zfs_malloc(sizeof (zfs_edge_t));
+
+ zep->ze_dest = dest;
+
+ return (zep);
+}
+
+/*
+ * Destroy an edge.
+ */
+static void
+zfs_edge_destroy(zfs_edge_t *zep)
+{
+ free(zep);
+}
+
+/*
+ * Allocate a new vertex with the given name.
+ */
+static zfs_vertex_t *
+zfs_vertex_create(const char *dataset)
+{
+ zfs_vertex_t *zvp = zfs_malloc(sizeof (zfs_vertex_t));
+
+ assert(strlen(dataset) < ZFS_MAXNAMELEN);
+
+ (void) strlcpy(zvp->zv_dataset, dataset, sizeof (zvp->zv_dataset));
+
+ zvp->zv_edges = zfs_malloc(MIN_EDGECOUNT * sizeof (void *));
+ zvp->zv_edgealloc = MIN_EDGECOUNT;
+
+ return (zvp);
+}
+
+/*
+ * Destroy a vertex. Frees up any associated edges.
+ */
+static void
+zfs_vertex_destroy(zfs_vertex_t *zvp)
+{
+ int i;
+
+ for (i = 0; i < zvp->zv_edgecount; i++)
+ zfs_edge_destroy(zvp->zv_edges[i]);
+
+ free(zvp->zv_edges);
+ free(zvp);
+}
+
+/*
+ * Given a vertex, add an edge to the destination vertex.
+ */
+static void
+zfs_vertex_add_edge(zfs_vertex_t *zvp, zfs_vertex_t *dest)
+{
+ zfs_edge_t *zep = zfs_edge_create(dest);
+
+ if (zvp->zv_edgecount == zvp->zv_edgealloc) {
+ zfs_edge_t **newedges = zfs_malloc(zvp->zv_edgealloc * 2 *
+ sizeof (void *));
+
+ bcopy(zvp->zv_edges, newedges,
+ zvp->zv_edgealloc * sizeof (void *));
+
+ zvp->zv_edgealloc *= 2;
+ free(zvp->zv_edges);
+ zvp->zv_edges = newedges;
+ }
+
+ zvp->zv_edges[zvp->zv_edgecount++] = zep;
+}
+
+static int
+zfs_edge_compare(const void *a, const void *b)
+{
+ const zfs_edge_t *ea = *((zfs_edge_t **)a);
+ const zfs_edge_t *eb = *((zfs_edge_t **)b);
+
+ if (ea->ze_dest->zv_txg < eb->ze_dest->zv_txg)
+ return (-1);
+ if (ea->ze_dest->zv_txg > eb->ze_dest->zv_txg)
+ return (1);
+ return (0);
+}
+
+/*
+ * Sort the given vertex edges according to the creation txg of each vertex.
+ */
+static void
+zfs_vertex_sort_edges(zfs_vertex_t *zvp)
+{
+ if (zvp->zv_edgecount == 0)
+ return;
+
+ qsort(zvp->zv_edges, zvp->zv_edgecount, sizeof (void *),
+ zfs_edge_compare);
+}
+
+/*
+ * Construct a new graph object. We allow the size to be specified as a
+ * parameter so in the future we can size the hash according to the number of
+ * datasets in the pool.
+ */
+static zfs_graph_t *
+zfs_graph_create(size_t size)
+{
+ zfs_graph_t *zgp = zfs_malloc(sizeof (zfs_graph_t));
+
+ zgp->zg_size = size;
+ zgp->zg_hash = zfs_malloc(size * sizeof (zfs_vertex_t *));
+
+ return (zgp);
+}
+
+/*
+ * Destroy a graph object. We have to iterate over all the hash chains,
+ * destroying each vertex in the process.
+ */
+static void
+zfs_graph_destroy(zfs_graph_t *zgp)
+{
+ int i;
+ zfs_vertex_t *current, *next;
+
+ for (i = 0; i < zgp->zg_size; i++) {
+ current = zgp->zg_hash[i];
+ while (current != NULL) {
+ next = current->zv_next;
+ zfs_vertex_destroy(current);
+ current = next;
+ }
+ }
+
+ free(zgp->zg_hash);
+ free(zgp);
+}
+
+/*
+ * Graph hash function. Classic bernstein k=33 hash function, taken from
+ * usr/src/cmd/sgs/tools/common/strhash.c
+ */
+static size_t
+zfs_graph_hash(zfs_graph_t *zgp, const char *str)
+{
+ size_t hash = 5381;
+ int c;
+
+ while ((c = *str++) != 0)
+ hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
+
+ return (hash % zgp->zg_size);
+}
+
+/*
+ * Given a dataset name, finds the associated vertex, creating it if necessary.
+ */
+static zfs_vertex_t *
+zfs_graph_lookup(zfs_graph_t *zgp, const char *dataset, uint64_t txg)
+{
+ size_t idx = zfs_graph_hash(zgp, dataset);
+ zfs_vertex_t *zvp;
+
+ for (zvp = zgp->zg_hash[idx]; zvp != NULL; zvp = zvp->zv_next) {
+ if (strcmp(zvp->zv_dataset, dataset) == 0) {
+ if (zvp->zv_txg == 0)
+ zvp->zv_txg = txg;
+ return (zvp);
+ }
+ }
+
+ zvp = zfs_vertex_create(dataset);
+ zvp->zv_next = zgp->zg_hash[idx];
+ zvp->zv_txg = txg;
+ zgp->zg_hash[idx] = zvp;
+ zgp->zg_nvertex++;
+
+ return (zvp);
+}
+
+/*
+ * Given two dataset names, create an edge between them. For the source vertex,
+ * mark 'zv_visited' to indicate that we have seen this vertex, and not simply
+ * created it as a destination of another edge. If 'dest' is NULL, then this
+ * is an individual vertex (i.e. the starting vertex), so don't add an edge.
+ */
+static void
+zfs_graph_add(zfs_graph_t *zgp, const char *source, const char *dest,
+ uint64_t txg)
+{
+ zfs_vertex_t *svp, *dvp;
+
+ svp = zfs_graph_lookup(zgp, source, 0);
+ svp->zv_visited = 1;
+ if (dest != NULL) {
+ dvp = zfs_graph_lookup(zgp, dest, txg);
+ zfs_vertex_add_edge(svp, dvp);
+ }
+}
+
+/*
+ * Iterate over all children of the given dataset, adding any vertices as
+ * necessary. Returns 0 if no cloned snapshots were seen, 1 otherwise. This is
+ * a simple recursive algorithm - the ZFS namespace typically is very flat. We
+ * manually invoke the necessary ioctl() calls to avoid the overhead and
+ * additional semantics of zfs_open().
+ */
+static int
+iterate_children(zfs_graph_t *zgp, const char *dataset)
+{
+ zfs_cmd_t zc = { 0 };
+ int ret = 0;
+ zfs_vertex_t *zvp;
+
+ /*
+ * Look up the source vertex, and avoid it if we've seen it before.
+ */
+ zvp = zfs_graph_lookup(zgp, dataset, 0);
+ if (zvp->zv_visited)
+ return (0);
+
+ for ((void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+ ioctl(zfs_fd, ZFS_IOC_DATASET_LIST_NEXT, &zc) == 0;
+ (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name))) {
+
+ /*
+ * Ignore private dataset names.
+ */
+ if (dataset_name_hidden(zc.zc_name))
+ continue;
+
+ /*
+ * Get statistics for this dataset, to determine the type of the
+ * dataset and clone statistics. If this fails, the dataset has
+ * since been removed, and we're pretty much screwed anyway.
+ */
+ if (ioctl(zfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0)
+ continue;
+
+ /*
+ * Add an edge between the parent and the child.
+ */
+ zfs_graph_add(zgp, dataset, zc.zc_name,
+ zc.zc_objset_stats.dds_creation_txg);
+
+ /*
+ * If this dataset has a clone parent, add an appropriate edge.
+ */
+ if (zc.zc_objset_stats.dds_clone_of[0] != '\0')
+ zfs_graph_add(zgp, zc.zc_objset_stats.dds_clone_of,
+ zc.zc_name, zc.zc_objset_stats.dds_creation_txg);
+
+ /*
+ * Iterate over all children
+ */
+ ret |= iterate_children(zgp, zc.zc_name);
+
+ /*
+ * Indicate if we found a dataset with a non-zero clone count.
+ */
+ if (zc.zc_objset_stats.dds_num_clones != 0)
+ ret |= 1;
+ }
+
+ /*
+ * Now iterate over all snapshots.
+ */
+ bzero(&zc, sizeof (zc));
+
+ for ((void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+ ioctl(zfs_fd, ZFS_IOC_SNAPSHOT_LIST_NEXT, &zc) == 0;
+ (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name))) {
+
+ /*
+ * Get statistics for this dataset, to determine the type of the
+ * dataset and clone statistics. If this fails, the dataset has
+ * since been removed, and we're pretty much screwed anyway.
+ */
+ if (ioctl(zfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0)
+ continue;
+
+ /*
+ * Add an edge between the parent and the child.
+ */
+ zfs_graph_add(zgp, dataset, zc.zc_name,
+ zc.zc_objset_stats.dds_creation_txg);
+
+ /*
+ * Indicate if we found a dataset with a non-zero clone count.
+ */
+ if (zc.zc_objset_stats.dds_num_clones != 0)
+ ret |= 1;
+ }
+
+ zvp->zv_visited = 1;
+
+ return (ret);
+}
+
+/*
+ * Construct a complete graph of all necessary vertices. First, we iterate over
+ * only our object's children. If we don't find any cloned snapshots, then we
+ * simple return that. Otherwise, we have to start at the pool root and iterate
+ * over all datasets.
+ */
+static zfs_graph_t *
+construct_graph(const char *dataset)
+{
+ zfs_graph_t *zgp = zfs_graph_create(ZFS_GRAPH_SIZE);
+ zfs_cmd_t zc = { 0 };
+
+ /*
+ * We need to explicitly check whether this dataset has clones or not,
+ * since iterate_children() only checks the children.
+ */
+ (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+ (void) ioctl(zfs_fd, ZFS_IOC_OBJSET_STATS, &zc);
+
+ if (zc.zc_objset_stats.dds_num_clones != 0 ||
+ iterate_children(zgp, dataset) != 0) {
+ /*
+ * Determine pool name and try again.
+ */
+ char *pool, *slash;
+
+ if ((slash = strchr(dataset, '/')) != NULL ||
+ (slash = strchr(dataset, '@')) != NULL) {
+ pool = zfs_malloc(slash - dataset + 1);
+ (void) strncpy(pool, dataset, slash - dataset);
+ pool[slash - dataset] = '\0';
+
+ (void) iterate_children(zgp, pool);
+ zfs_graph_add(zgp, pool, NULL, 0);
+
+ free(pool);
+ }
+ }
+ zfs_graph_add(zgp, dataset, NULL, 0);
+
+ return (zgp);
+}
+
+/*
+ * Given a graph, do a recursive topological sort into the given array. This is
+ * really just a depth first search, so that the deepest nodes appear first.
+ * hijack the 'zv_visited' marker to avoid visiting the same vertex twice.
+ */
+static void
+topo_sort(char **result, size_t *idx, zfs_vertex_t *zgv)
+{
+ int i;
+
+ /* avoid doing a search if we don't have to */
+ if (zgv->zv_visited == 2)
+ return;
+
+ zfs_vertex_sort_edges(zgv);
+ for (i = 0; i < zgv->zv_edgecount; i++)
+ topo_sort(result, idx, zgv->zv_edges[i]->ze_dest);
+
+ /* we may have visited this in the course of the above */
+ if (zgv->zv_visited == 2)
+ return;
+
+ result[*idx] = zfs_malloc(strlen(zgv->zv_dataset) + 1);
+ (void) strcpy(result[*idx], zgv->zv_dataset);
+ *idx += 1;
+ zgv->zv_visited = 2;
+}
+
+/*
+ * The only public interface for this file. Do the dirty work of constructing a
+ * child list for the given object. Construct the graph, do the toplogical
+ * sort, and then return the array of strings to the caller.
+ */
+char **
+get_dependents(const char *dataset, size_t *count)
+{
+ char **result;
+ zfs_graph_t *zgp;
+ zfs_vertex_t *zvp;
+
+ zgp = construct_graph(dataset);
+ result = zfs_malloc(zgp->zg_nvertex * sizeof (char *));
+
+ zvp = zfs_graph_lookup(zgp, dataset, 0);
+
+ *count = 0;
+ topo_sort(result, count, zvp);
+
+ /*
+ * Get rid of the last entry, which is our starting vertex and not
+ * strictly a dependent.
+ */
+ assert(*count > 0);
+ free(result[*count - 1]);
+ (*count)--;
+
+ zfs_graph_destroy(zgp);
+
+ return (result);
+}
diff --git a/usr/src/lib/libzfs/common/libzfs_impl.h b/usr/src/lib/libzfs/common/libzfs_impl.h
new file mode 100644
index 0000000000..3fdd98c997
--- /dev/null
+++ b/usr/src/lib/libzfs/common/libzfs_impl.h
@@ -0,0 +1,103 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _LIBFS_IMPL_H
+#define _LIBFS_IMPL_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/dmu.h>
+#include <sys/fs/zfs.h>
+#include <sys/zfs_ioctl.h>
+#include <sys/zfs_acl.h>
+#include <sys/nvpair.h>
+
+#include <libzfs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct zfs_handle {
+ char zfs_name[ZFS_MAXNAMELEN];
+ zfs_type_t zfs_type;
+ dmu_objset_stats_t zfs_dmustats;
+ zfs_stats_t zfs_zplstats;
+ uint64_t zfs_volsize;
+ uint64_t zfs_volblocksize;
+ char *zfs_mntopts;
+};
+
+struct zpool_handle {
+ char zpool_name[ZPOOL_MAXNAMELEN];
+ int zpool_state;
+ size_t zpool_config_size;
+ nvlist_t *zpool_config;
+};
+
+void zfs_error(const char *, ...);
+void zfs_fatal(const char *, ...);
+void *zfs_malloc(size_t);
+char *zfs_strdup(const char *);
+void no_memory(void);
+
+#define zfs_baderror(err) \
+ (zfs_fatal(dgettext(TEXT_DOMAIN, \
+ "internal error: unexpected error %d at line %d of %s"), \
+ (err), (__LINE__), (__FILE__)))
+
+int zfs_fd;
+
+char **get_dependents(const char *, size_t *);
+
+FILE *mnttab_file;
+FILE *sharetab_file;
+
+typedef struct prop_changelist prop_changelist_t;
+
+int changelist_prefix(prop_changelist_t *);
+int changelist_postfix(prop_changelist_t *);
+void changelist_rename(prop_changelist_t *, const char *, const char *);
+void changelist_free(prop_changelist_t *);
+prop_changelist_t *changelist_gather(zfs_handle_t *, zfs_prop_t, int);
+int changelist_unshare(prop_changelist_t *);
+int changelist_haszonedchild(prop_changelist_t *);
+
+void remove_mountpoint(zfs_handle_t *);
+
+zfs_handle_t *make_dataset_handle(const char *);
+void set_pool_health(nvlist_t *config);
+
+zpool_handle_t *zpool_open_silent(const char *pool);
+
+int zvol_create_link(const char *dataset);
+int zvol_remove_link(const char *dataset);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _LIBFS_IMPL_H */
diff --git a/usr/src/lib/libzfs/common/libzfs_import.c b/usr/src/lib/libzfs/common/libzfs_import.c
new file mode 100644
index 0000000000..c71bc437f5
--- /dev/null
+++ b/usr/src/lib/libzfs/common/libzfs_import.c
@@ -0,0 +1,753 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Pool import support functions.
+ *
+ * To import a pool, we rely on reading the configuration information from the
+ * ZFS label of each device. If we successfully read the label, then we
+ * organize the configuration information in the following hierarchy:
+ *
+ * pool guid -> toplevel vdev guid -> label txg
+ *
+ * Duplicate entries matching this same tuple will be discarded. Once we have
+ * examined every device, we pick the best label txg config for each toplevel
+ * vdev. We then arrange these toplevel vdevs into a complete pool config, and
+ * update any paths that have changed. Finally, we attempt to import the pool
+ * using our derived config, and record the results.
+ */
+
+#include <devid.h>
+#include <dirent.h>
+#include <errno.h>
+#include <libintl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <sys/vdev_impl.h>
+
+#include "libzfs.h"
+#include "libzfs_impl.h"
+
+/*
+ * Intermediate structures used to gather configuration information.
+ */
+typedef struct config_entry {
+ uint64_t ce_txg;
+ nvlist_t *ce_config;
+ struct config_entry *ce_next;
+} config_entry_t;
+
+typedef struct vdev_entry {
+ uint64_t ve_guid;
+ config_entry_t *ve_configs;
+ struct vdev_entry *ve_next;
+} vdev_entry_t;
+
+typedef struct pool_entry {
+ uint64_t pe_guid;
+ vdev_entry_t *pe_vdevs;
+ struct pool_entry *pe_next;
+} pool_entry_t;
+
+typedef struct name_entry {
+ const char *ne_name;
+ uint64_t ne_guid;
+ struct name_entry *ne_next;
+} name_entry_t;
+
+typedef struct pool_list {
+ pool_entry_t *pools;
+ name_entry_t *names;
+} pool_list_t;
+
+static char *
+get_devid(const char *path)
+{
+ int fd;
+ ddi_devid_t devid;
+ char *minor, *ret;
+
+ if ((fd = open(path, O_RDONLY)) < 0)
+ return (NULL);
+
+ minor = NULL;
+ ret = NULL;
+ if (devid_get(fd, &devid) == 0) {
+ if (devid_get_minor_name(fd, &minor) == 0)
+ ret = devid_str_encode(devid, minor);
+ if (minor != NULL)
+ devid_str_free(minor);
+ devid_free(devid);
+ }
+
+ return (ret);
+}
+
+
+/*
+ * Go through and fix up any path and/or devid information for the given vdev
+ * configuration.
+ */
+static void
+fix_paths(nvlist_t *nv, name_entry_t *names)
+{
+ nvlist_t **child;
+ uint_t c, children;
+ uint64_t guid;
+ name_entry_t *ne;
+ char *devid;
+
+ if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) == 0) {
+ for (c = 0; c < children; c++)
+ fix_paths(child[c], names);
+ return;
+ }
+
+ /*
+ * This is a leaf (file or disk) vdev. In either case, go through
+ * the name list and see if we find a matching guid. If so, replace
+ * the path and see if we can calculate a new devid.
+ */
+ verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
+
+ for (ne = names; ne != NULL; ne = ne->ne_next)
+ if (ne->ne_guid == guid)
+ break;
+
+ if (ne == NULL)
+ return;
+
+ verify(nvlist_add_string(nv, ZPOOL_CONFIG_PATH, ne->ne_name) == 0);
+
+ if ((devid = get_devid(ne->ne_name)) == NULL) {
+ (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
+ } else {
+ verify(nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) == 0);
+ devid_str_free(devid);
+ }
+}
+
+/*
+ * Add the given configuration to the list of known devices.
+ */
+static void
+add_config(pool_list_t *pl, const char *path, nvlist_t *config)
+{
+ uint64_t pool_guid, vdev_guid, top_guid, txg;
+ pool_entry_t *pe;
+ vdev_entry_t *ve;
+ config_entry_t *ce;
+ name_entry_t *ne;
+
+ /*
+ * If we have a valid config but cannot read any of these fields, then
+ * it means we have a half-initialized label. In vdev_label_init()
+ * we write a label with txg == 0 so that we can identify the device
+ * in case the user refers to the same disk later on. If we fail to
+ * create the pool, we'll be left with a label in this state
+ * which should not be considered part of a valid pool.
+ */
+ if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+ &pool_guid) != 0 ||
+ nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
+ &vdev_guid) != 0 ||
+ nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
+ &top_guid) != 0 ||
+ nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
+ &txg) != 0 || txg == 0) {
+ nvlist_free(config);
+ return;
+ }
+
+ /*
+ * First, see if we know about this pool. If not, then add it to the
+ * list of known pools.
+ */
+ for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
+ if (pe->pe_guid == pool_guid)
+ break;
+ }
+
+ if (pe == NULL) {
+ pe = zfs_malloc(sizeof (pool_entry_t));
+ pe->pe_guid = pool_guid;
+ pe->pe_next = pl->pools;
+ pl->pools = pe;
+ }
+
+ /*
+ * Second, see if we know about this toplevel vdev. Add it if its
+ * missing.
+ */
+ for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
+ if (ve->ve_guid == top_guid)
+ break;
+ }
+
+ if (ve == NULL) {
+ ve = zfs_malloc(sizeof (vdev_entry_t));
+ ve->ve_guid = top_guid;
+ ve->ve_next = pe->pe_vdevs;
+ pe->pe_vdevs = ve;
+ }
+
+ /*
+ * Third, see if we have a config with a matching transaction group. If
+ * so, then we do nothing. Otherwise, add it to the list of known
+ * configs.
+ */
+ for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
+ if (ce->ce_txg == txg)
+ break;
+ }
+
+ if (ce == NULL) {
+ ce = zfs_malloc(sizeof (config_entry_t));
+ ce->ce_txg = txg;
+ ce->ce_config = config;
+ ce->ce_next = ve->ve_configs;
+ ve->ve_configs = ce;
+ } else {
+ nvlist_free(config);
+ }
+
+ /*
+ * At this point we've successfully added our config to the list of
+ * known configs. The last thing to do is add the vdev guid -> path
+ * mappings so that we can fix up the configuration as necessary before
+ * doing the import.
+ */
+ ne = zfs_malloc(sizeof (name_entry_t));
+
+ ne->ne_name = zfs_strdup(path);
+ ne->ne_guid = vdev_guid;
+ ne->ne_next = pl->names;
+ pl->names = ne;
+}
+
+/*
+ * Convert our list of pools into the definitive set of configurations. We
+ * start by picking the best config for each toplevel vdev. Once that's done,
+ * we assemble the toplevel vdevs into a full config for the pool. We make a
+ * pass to fix up any incorrect paths, and then add it to the main list to
+ * return to the user.
+ */
+static nvlist_t *
+get_configs(pool_list_t *pl)
+{
+ pool_entry_t *pe, *penext;
+ vdev_entry_t *ve, *venext;
+ config_entry_t *ce, *cenext;
+ nvlist_t *ret, *config, *tmp, *nvtop, *nvroot;
+ int config_seen;
+ uint64_t best_txg;
+ char *name;
+ zfs_cmd_t zc = { 0 };
+ uint64_t guid;
+ char *packed;
+ size_t len;
+ int err;
+
+ verify(nvlist_alloc(&ret, 0, 0) == 0);
+
+ for (pe = pl->pools; pe != NULL; pe = penext) {
+ uint_t c;
+ uint_t children = 0;
+ uint64_t id;
+ nvlist_t **child = NULL;
+
+ penext = pe->pe_next;
+
+ verify(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0);
+ config_seen = FALSE;
+
+ /*
+ * Iterate over all toplevel vdevs. Grab the pool configuration
+ * from the first one we find, and then go through the rest and
+ * add them as necessary to the 'vdevs' member of the config.
+ */
+ for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
+ venext = ve->ve_next;
+
+ /*
+ * Determine the best configuration for this vdev by
+ * selecting the config with the latest transaction
+ * group.
+ */
+ best_txg = 0;
+ for (ce = ve->ve_configs; ce != NULL;
+ ce = ce->ce_next) {
+
+ if (ce->ce_txg > best_txg)
+ tmp = ce->ce_config;
+ }
+
+ if (!config_seen) {
+ /*
+ * Copy the relevant pieces of data to the pool
+ * configuration:
+ *
+ * pool guid
+ * name
+ * pool state
+ */
+ uint64_t state;
+
+ verify(nvlist_lookup_uint64(tmp,
+ ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
+ verify(nvlist_add_uint64(config,
+ ZPOOL_CONFIG_POOL_GUID, guid) == 0);
+ verify(nvlist_lookup_string(tmp,
+ ZPOOL_CONFIG_POOL_NAME, &name) == 0);
+ verify(nvlist_add_string(config,
+ ZPOOL_CONFIG_POOL_NAME, name) == 0);
+ verify(nvlist_lookup_uint64(tmp,
+ ZPOOL_CONFIG_POOL_STATE, &state) == 0);
+ verify(nvlist_add_uint64(config,
+ ZPOOL_CONFIG_POOL_STATE, state) == 0);
+
+ config_seen = TRUE;
+ }
+
+ /*
+ * Add this top-level vdev to the child array.
+ */
+ verify(nvlist_lookup_nvlist(tmp,
+ ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
+ verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
+ &id) == 0);
+ if (id >= children) {
+ nvlist_t **newchild;
+
+ newchild = zfs_malloc((id + 1) *
+ sizeof (nvlist_t *));
+
+ for (c = 0; c < children; c++)
+ newchild[c] = child[c];
+
+ free(child);
+ child = newchild;
+ children = id + 1;
+ }
+ verify(nvlist_dup(nvtop, &child[id], 0) == 0);
+
+ /*
+ * Go through and free all config information.
+ */
+ for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
+ cenext = ce->ce_next;
+
+ nvlist_free(ce->ce_config);
+ free(ce);
+ }
+
+ /*
+ * Free this vdev entry, since it has now been merged
+ * into the main config.
+ */
+ free(ve);
+ }
+
+ verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+ &guid) == 0);
+
+ /*
+ * Look for any missing top-level vdevs. If this is the case,
+ * create a faked up 'missing' vdev as a placeholder. We cannot
+ * simply compress the child array, because the kernel performs
+ * certain checks to make sure the vdev IDs match their location
+ * in the configuration.
+ */
+ for (c = 0; c < children; c++)
+ if (child[c] == NULL) {
+ nvlist_t *missing;
+ verify(nvlist_alloc(&missing, NV_UNIQUE_NAME,
+ 0) == 0);
+ verify(nvlist_add_string(missing,
+ ZPOOL_CONFIG_TYPE, VDEV_TYPE_MISSING) == 0);
+ verify(nvlist_add_uint64(missing,
+ ZPOOL_CONFIG_ID, c) == 0);
+ verify(nvlist_add_uint64(missing,
+ ZPOOL_CONFIG_GUID, 0ULL) == 0);
+ child[c] = missing;
+ }
+
+ /*
+ * Put all of this pool's top-level vdevs into a root vdev.
+ */
+ verify(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) == 0);
+ verify(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
+ VDEV_TYPE_ROOT) == 0);
+ verify(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0);
+ verify(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) == 0);
+ verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
+ child, children) == 0);
+
+ for (c = 0; c < children; c++)
+ nvlist_free(child[c]);
+ free(child);
+
+ /*
+ * Go through and fix up any paths and/or devids based on our
+ * known list of vdev GUID -> path mappings.
+ */
+ fix_paths(nvroot, pl->names);
+
+ /*
+ * Add the root vdev to this pool's configuration.
+ */
+ verify(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+ nvroot) == 0);
+ nvlist_free(nvroot);
+
+ /*
+ * Free this pool entry.
+ */
+ free(pe);
+
+ /*
+ * Determine if this pool is currently active, in which case we
+ * can't actually import it.
+ */
+ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
+ &name) == 0);
+ verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+ &guid) == 0);
+
+ (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
+ if (ioctl(zfs_fd, ZFS_IOC_POOL_GUID, &zc) == 0 &&
+ guid == zc.zc_pool_guid) {
+ nvlist_free(config);
+ continue;
+ }
+
+ /*
+ * Try to do the import in order to get vdev state.
+ */
+ if ((err = nvlist_size(config, &len, NV_ENCODE_NATIVE)) != 0)
+ zfs_baderror(err);
+
+ packed = zfs_malloc(len);
+
+ if ((err = nvlist_pack(config, &packed, &len,
+ NV_ENCODE_NATIVE, 0)) != 0)
+ zfs_baderror(err);
+
+ nvlist_free(config);
+ config = NULL;
+
+ zc.zc_config_src_size = len;
+ zc.zc_config_src = (uint64_t)(uintptr_t)packed;
+
+ zc.zc_config_dst_size = 2 * len;
+ zc.zc_config_dst = (uint64_t)(uintptr_t)
+ zfs_malloc(zc.zc_config_dst_size);
+
+ while ((err = ioctl(zfs_fd, ZFS_IOC_POOL_TRYIMPORT,
+ &zc)) != 0 && errno == ENOMEM) {
+ free((void *)(uintptr_t)zc.zc_config_dst);
+ zc.zc_config_dst = (uint64_t)(uintptr_t)
+ zfs_malloc(zc.zc_config_dst_size);
+ }
+
+ free(packed);
+
+ if (err)
+ zfs_baderror(errno);
+
+ verify(nvlist_unpack((void *)(uintptr_t)zc.zc_config_dst,
+ zc.zc_config_dst_size, &config, 0) == 0);
+
+ set_pool_health(config);
+
+ /*
+ * Add this pool to the list of configs.
+ */
+ verify(nvlist_add_nvlist(ret, name, config) == 0);
+
+ nvlist_free(config);
+
+ free((void *)(uintptr_t)zc.zc_config_dst);
+ }
+
+ return (ret);
+}
+
+/*
+ * Return the offset of the given label.
+ */
+static uint64_t
+label_offset(size_t size, int l)
+{
+ return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
+ 0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
+}
+
+/*
+ * Given a file descriptor, read the label information and return an nvlist
+ * describing the configuration, if there is one.
+ */
+nvlist_t *
+zpool_read_label(int fd)
+{
+ struct stat64 statbuf;
+ int l;
+ vdev_label_t *label;
+ nvlist_t *config;
+ uint64_t version, state, txg;
+
+ if (fstat64(fd, &statbuf) == -1)
+ return (NULL);
+
+ label = zfs_malloc(sizeof (vdev_label_t));
+
+ for (l = 0; l < VDEV_LABELS; l++) {
+ if (pread(fd, label, sizeof (vdev_label_t),
+ label_offset(statbuf.st_size, l)) != sizeof (vdev_label_t))
+ continue;
+
+ if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
+ sizeof (label->vl_vdev_phys.vp_nvlist), &config, 0) != 0)
+ continue;
+
+ if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
+ &version) != 0 || version != UBERBLOCK_VERSION) {
+ nvlist_free(config);
+ continue;
+ }
+
+ if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
+ &state) != 0 || state > POOL_STATE_EXPORTED) {
+ nvlist_free(config);
+ continue;
+ }
+
+ if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
+ &txg) != 0 || txg == 0) {
+ nvlist_free(config);
+ continue;
+ }
+
+ free(label);
+ return (config);
+ }
+
+ free(label);
+ return (NULL);
+}
+
+/*
+ * Given a list of directories to search, find all pools stored on disk. This
+ * includes partial pools which are not available to import. If no args are
+ * given (argc is 0), then the default directory (/dev/dsk) is searched.
+ */
+nvlist_t *
+zpool_find_import(int argc, char **argv)
+{
+ int i;
+ DIR *dirp;
+ struct dirent64 *dp;
+ char path[MAXPATHLEN];
+ struct stat64 statbuf;
+ nvlist_t *ret, *config;
+ static char *default_dir = "/dev/dsk";
+ int fd;
+ pool_list_t pools = { 0 };
+
+ if (argc == 0) {
+ argc = 1;
+ argv = &default_dir;
+ }
+
+ /*
+ * Go through and read the label configuration information from every
+ * possible device, organizing the information according to pool GUID
+ * and toplevel GUID.
+ */
+ for (i = 0; i < argc; i++) {
+ if (argv[i][0] != '/') {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot open '%s': must be an absolute path"),
+ argv[i]);
+ return (NULL);
+ }
+
+ if ((dirp = opendir(argv[i])) == NULL) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot open '%s': %s"), argv[i],
+ strerror(errno));
+ return (NULL);
+ }
+
+ /*
+ * This is not MT-safe, but we have no MT consumers of libzfs
+ */
+ while ((dp = readdir64(dirp)) != NULL) {
+
+ (void) snprintf(path, sizeof (path), "%s/%s",
+ argv[i], dp->d_name);
+
+ if (stat64(path, &statbuf) != 0)
+ continue;
+
+ /*
+ * Ignore directories (which includes "." and "..").
+ */
+ if (S_ISDIR(statbuf.st_mode))
+ continue;
+
+ if ((fd = open64(path, O_RDONLY)) < 0)
+ continue;
+
+ config = zpool_read_label(fd);
+
+ (void) close(fd);
+
+ if (config != NULL)
+ add_config(&pools, path, config);
+ }
+ }
+
+ ret = get_configs(&pools);
+
+ return (ret);
+}
+
+int
+find_guid(nvlist_t *nv, uint64_t guid)
+{
+ uint64_t tmp;
+ nvlist_t **child;
+ uint_t c, children;
+
+ verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
+ if (tmp == guid)
+ return (TRUE);
+
+ if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) == 0) {
+ for (c = 0; c < children; c++)
+ if (find_guid(child[c], guid))
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+/*
+ * Determines if the pool is in use. If so, it returns TRUE and the state of
+ * the pool as well as the name of the pool. Both strings are allocated and
+ * must be freed by the caller.
+ */
+int
+zpool_in_use(int fd, char **statestr, char **namestr)
+{
+ nvlist_t *config;
+ uint64_t state;
+ char *name;
+ int ret;
+ zfs_cmd_t zc = { 0 };
+ uint64_t guid, vdev_guid;
+ zpool_handle_t *zhp;
+ nvlist_t *pool_config;
+
+ if ((config = zpool_read_label(fd)) == NULL)
+ return (FALSE);
+
+ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
+ &name) == 0);
+ verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
+ &state) == 0);
+ verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+ &guid) == 0);
+ verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
+ &vdev_guid) == 0);
+
+ switch (state) {
+ case POOL_STATE_EXPORTED:
+ *statestr = zfs_strdup(dgettext(TEXT_DOMAIN, "exported"));
+ *namestr = zfs_strdup(name);
+ ret = TRUE;
+ break;
+
+ case POOL_STATE_ACTIVE:
+ /*
+ * For an active pool, we have to determine if it's really part
+ * of an active pool (in which case the pool will exist and the
+ * guid will be the same), or whether it's part of an active
+ * pool that was disconnected without being explicitly exported.
+ *
+ * We use the direct ioctl() first to avoid triggering an error
+ * message if the pool cannot be opened.
+ */
+ (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
+ if (ioctl(zfs_fd, ZFS_IOC_POOL_GUID, &zc) == 0 &&
+ guid == zc.zc_pool_guid) {
+ /*
+ * Because the device may have been removed while
+ * offlined, we only report it as active if the vdev is
+ * still present in the config. Otherwise, pretend like
+ * it's not in use.
+ */
+ if ((zhp = zpool_open_canfail(name)) != NULL &&
+ (pool_config = zpool_get_config(zhp)) != NULL) {
+ nvlist_t *nvroot;
+
+ verify(nvlist_lookup_nvlist(pool_config,
+ ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
+ if (find_guid(nvroot, vdev_guid)) {
+ *statestr = zfs_strdup(
+ dgettext(TEXT_DOMAIN, "active"));
+ *namestr = zfs_strdup(name);
+ ret = TRUE;
+ } else {
+ ret = FALSE;
+ }
+ } else {
+ ret = FALSE;
+ }
+ } else {
+ *statestr = zfs_strdup(dgettext(TEXT_DOMAIN,
+ "potentially active"));
+ *namestr = zfs_strdup(name);
+ ret = TRUE;
+ }
+ break;
+
+ default:
+ ret = FALSE;
+ }
+
+ nvlist_free(config);
+ return (ret);
+}
diff --git a/usr/src/lib/libzfs/common/libzfs_mount.c b/usr/src/lib/libzfs/common/libzfs_mount.c
new file mode 100644
index 0000000000..1f4bec2499
--- /dev/null
+++ b/usr/src/lib/libzfs/common/libzfs_mount.c
@@ -0,0 +1,558 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Routines to manage ZFS mounts. We separate all the nasty routines that have
+ * to deal with the OS. The main entry points are:
+ *
+ * zfs_is_mounted()
+ * zfs_mount()
+ * zfs_unmount()
+ * zfs_unmountall()
+ *
+ * These functions are used by mount and unmount, and when changing a
+ * filesystem's mountpoint. This file also contains the functions used to
+ * manage sharing filesystems via NFS:
+ *
+ * zfs_is_shared()
+ * zfs_share()
+ * zfs_unshare()
+ * zfs_unshareall()
+ */
+
+#include <dirent.h>
+#include <errno.h>
+#include <libgen.h>
+#include <libintl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <zone.h>
+#include <sys/mntent.h>
+#include <sys/mnttab.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+
+#include <libzfs.h>
+
+#include "libzfs_impl.h"
+
+
+/*
+ * The following two files are opened as part of zfs_init(). It's OK to for
+ * the sharetab to be NULL, but mnttab must always be non-NULL;
+ */
+FILE *mnttab_file;
+FILE *sharetab_file;
+
+/*
+ * Search the sharetab for the given mountpoint, returning TRUE if it is found.
+ */
+static int
+is_shared(const char *mountpoint)
+{
+ char buf[MAXPATHLEN], *tab;
+
+ if (sharetab_file == NULL)
+ return (0);
+
+ (void) fseek(sharetab_file, 0, SEEK_SET);
+
+ while (fgets(buf, sizeof (buf), sharetab_file) != NULL) {
+
+ /* the mountpoint is the first entry on each line */
+ if ((tab = strchr(buf, '\t')) != NULL) {
+ *tab = '\0';
+ if (strcmp(buf, mountpoint) == 0)
+ return (1);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Returns TRUE if the specified directory is empty. If we can't open the
+ * directory at all, return TRUE so that the mount can fail with a more
+ * informative error message.
+ */
+static int
+dir_is_empty(const char *dirname)
+{
+ DIR *dirp;
+ struct dirent64 *dp;
+
+ if ((dirp = opendir(dirname)) == NULL)
+ return (TRUE);
+
+ while ((dp = readdir64(dirp)) != NULL) {
+
+ if (strcmp(dp->d_name, ".") == 0 ||
+ strcmp(dp->d_name, "..") == 0)
+ continue;
+
+ (void) closedir(dirp);
+ return (FALSE);
+ }
+
+ (void) closedir(dirp);
+ return (TRUE);
+}
+
+/*
+ * Checks to see if the mount is active. If the filesystem is mounted, we fill
+ * in 'where' with the current mountpoint, and return 1. Otherwise, we return
+ * 0.
+ */
+int
+zfs_is_mounted(zfs_handle_t *zhp, char **where)
+{
+ struct mnttab search = { 0 }, entry;
+
+ /*
+ * Search for the entry in /etc/mnttab. We don't bother getting the
+ * mountpoint, as we can just search for the special device. This will
+ * also let us find mounts when the mountpoint is 'legacy'.
+ */
+ search.mnt_special = (char *)zfs_get_name(zhp);
+
+ rewind(mnttab_file);
+ if (getmntany(mnttab_file, &entry, &search) != 0)
+ return (FALSE);
+
+ if (where != NULL)
+ *where = zfs_strdup(entry.mnt_mountp);
+
+ return (TRUE);
+}
+
+/*
+ * Mount the given filesystem.
+ */
+int
+zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
+{
+ struct stat buf;
+ char mountpoint[ZFS_MAXPROPLEN];
+ char mntopts[MNT_LINE_MAX];
+
+ if (options == NULL)
+ mntopts[0] = '\0';
+ else
+ (void) strlcpy(mntopts, options, sizeof (mntopts));
+
+ /* ignore non-filesystems */
+ if (zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
+ sizeof (mountpoint), NULL, NULL, 0, FALSE) != 0)
+ return (0);
+
+ /* return success if there is no mountpoint set */
+ if (strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) == 0 ||
+ strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) == 0)
+ return (0);
+
+ /*
+ * If the 'zoned' property is set, and we're in the global zone, simply
+ * return success.
+ */
+ if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
+ char zonename[ZONENAME_MAX];
+ if (getzonenamebyid(getzoneid(), zonename,
+ sizeof (zonename)) < 0) {
+ zfs_error(dgettext(TEXT_DOMAIN, "internal error: "
+ "cannot determine current zone"));
+ return (1);
+ }
+
+ if (strcmp(zonename, "global") == 0)
+ return (0);
+ }
+
+ /* Create the directory if it doesn't already exist */
+ if (lstat(mountpoint, &buf) != 0) {
+ if (mkdirp(mountpoint, 0755) != 0) {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot mount '%s': "
+ "unable to create mountpoint"), mountpoint);
+ return (1);
+ }
+ }
+
+ /*
+ * Determine if the mountpoint is empty. If so, refuse to perform the
+ * mount. We don't perform this check if MS_OVERLAY is specified, which
+ * would defeat the point. We also avoid this check if 'remount' is
+ * specified.
+ */
+ if ((flags & MS_OVERLAY) == 0 &&
+ strstr(mntopts, MNTOPT_REMOUNT) == NULL &&
+ !dir_is_empty(mountpoint)) {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot mount '%s': "
+ "directory is not empty"), mountpoint);
+ zfs_error(dgettext(TEXT_DOMAIN, "use legacy mountpoint to "
+ "allow this behavior, or use the -O flag"));
+ return (1);
+ }
+
+ /* perform the mount */
+ if (mount(zfs_get_name(zhp), mountpoint, MS_OPTIONSTR | flags,
+ MNTTYPE_ZFS, NULL, 0, mntopts, sizeof (mntopts)) != 0) {
+ /*
+ * Generic errors are nasty, but there are just way too many
+ * from mount(), and they're well-understood. We pick a few
+ * common ones to improve upon.
+ */
+ switch (errno) {
+ case EBUSY:
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot mount '%s': "
+ "mountpoint '%s' is busy"), zhp->zfs_name,
+ mountpoint);
+ break;
+ case EPERM:
+ case EACCES:
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot mount '%s': "
+ "permission denied"), zhp->zfs_name,
+ mountpoint);
+ break;
+ default:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot mount '%s': %s"),
+ mountpoint, strerror(errno));
+ break;
+ }
+ return (1);
+ }
+
+ return (0);
+}
+
+/*
+ * Unmount the given filesystem.
+ */
+int
+zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags)
+{
+ struct mnttab search = { 0 }, entry;
+
+ /* check to see if need to unmount the filesystem */
+ search.mnt_special = (char *)zfs_get_name(zhp);
+ rewind(mnttab_file);
+ if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
+ getmntany(mnttab_file, &entry, &search) == 0)) {
+
+ if (mountpoint == NULL)
+ mountpoint = entry.mnt_mountp;
+
+ /*
+ * Always unshare the filesystem first.
+ */
+ if (zfs_unshare(zhp, mountpoint) != 0)
+ return (-1);
+
+ /*
+ * Try to unmount the filesystem. There is no reason to try a
+ * forced unmount because the vnodes will still carry a
+ * reference to the underlying dataset, so we can't destroy it
+ * anyway.
+ *
+ * In the unmount case, we print out a slightly more informative
+ * error message, though we'll be relying on the poor error
+ * semantics from the kernel.
+ */
+ if (umount2(mountpoint, flags) != 0) {
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot unmount '%s': %s"),
+ mountpoint, strerror(errno));
+ return (-1);
+ }
+
+ /*
+ * Don't actually destroy the underlying directory
+ */
+ }
+
+ return (0);
+}
+
+/*
+ * Unmount this filesystem and any children inheriting the mountpoint property.
+ * To do this, just act like we're changing the mountpoint property, but don't
+ * remount the filesystems afterwards.
+ */
+int
+zfs_unmountall(zfs_handle_t *zhp, int flags)
+{
+ prop_changelist_t *clp;
+ int ret;
+
+ clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, flags);
+ if (clp == NULL)
+ return (-1);
+
+ ret = changelist_prefix(clp);
+ changelist_free(clp);
+
+ return (ret);
+}
+
+/*
+ * Check to see if the filesystem is currently shared.
+ */
+int
+zfs_is_shared(zfs_handle_t *zhp, char **where)
+{
+ char *mountpoint;
+
+ if (!zfs_is_mounted(zhp, &mountpoint))
+ return (FALSE);
+
+ if (is_shared(mountpoint)) {
+ if (where != NULL)
+ *where = mountpoint;
+ else
+ free(mountpoint);
+ return (TRUE);
+ } else {
+ free(mountpoint);
+ return (FALSE);
+ }
+}
+
+/*
+ * Share the given filesystem according to the options in 'sharenfs'. We rely
+ * on share(1M) to the dirty work for us.
+ */
+int
+zfs_share(zfs_handle_t *zhp)
+{
+ char mountpoint[ZFS_MAXPROPLEN];
+ char shareopts[ZFS_MAXPROPLEN];
+ char buf[MAXPATHLEN];
+ FILE *fp;
+
+ /* ignore non-filesystems */
+ if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM)
+ return (0);
+
+ /* return success if there is no mountpoint set */
+ if (zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT,
+ mountpoint, sizeof (mountpoint), NULL, NULL, 0, FALSE) != 0 ||
+ strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) == 0 ||
+ strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) == 0)
+ return (0);
+
+ /* return success if there are no share options */
+ if (zfs_prop_get(zhp, ZFS_PROP_SHARENFS, shareopts, sizeof (shareopts),
+ NULL, NULL, 0, FALSE) != 0 ||
+ strcmp(shareopts, "off") == 0)
+ return (0);
+
+ /*
+ * If the 'zoned' property is set, simply return success since:
+ * 1. in a global zone, a dataset should not be shared if it's
+ * managed in a local zone.
+ * 2. in a local zone, NFS server is not available.
+ */
+ if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
+ return (0);
+ }
+
+ /*
+ * Invoke the share(1M) command. We always do this, even if it's
+ * currently shared, as the options may have changed.
+ */
+ if (strcmp(shareopts, "on") == 0)
+ (void) snprintf(buf, sizeof (buf), "/usr/sbin/share "
+ "-F nfs \"%s\" 2>&1", mountpoint);
+ else
+ (void) snprintf(buf, sizeof (buf), "/usr/sbin/share "
+ "-F nfs -o \"%s\" \"%s\" 2>&1", shareopts,
+ mountpoint);
+
+ if ((fp = popen(buf, "r")) == NULL) {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot share '%s': "
+ "share(1M) failed"), zfs_get_name(zhp));
+ return (-1);
+ }
+
+ /*
+ * share(1M) should only produce output if there is some kind
+ * of error. All output begins with "share_nfs: ", so we trim
+ * this off to get to the real error.
+ */
+ if (fgets(buf, sizeof (buf), fp) != NULL) {
+ char *colon = strchr(buf, ':');
+
+ while (buf[strlen(buf) - 1] == '\n')
+ buf[strlen(buf) - 1] = '\0';
+
+ if (colon == NULL)
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot share "
+ "'%s': share(1M) failed"),
+ zfs_get_name(zhp));
+ else
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot share "
+ "'%s': %s"), zfs_get_name(zhp),
+ colon + 2);
+
+ verify(pclose(fp) != 0);
+ return (-1);
+ }
+
+ verify(pclose(fp) == 0);
+
+ return (0);
+}
+
+/*
+ * Unshare the given filesystem.
+ */
+int
+zfs_unshare(zfs_handle_t *zhp, const char *mountpoint)
+{
+ char buf[MAXPATHLEN];
+ struct mnttab search = { 0 }, entry;
+
+ /* check to see if need to unmount the filesystem */
+ search.mnt_special = (char *)zfs_get_name(zhp);
+ rewind(mnttab_file);
+ if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
+ getmntany(mnttab_file, &entry, &search) == 0)) {
+
+ if (mountpoint == NULL)
+ mountpoint = entry.mnt_mountp;
+
+ if (is_shared(mountpoint)) {
+ FILE *fp;
+
+ (void) snprintf(buf, sizeof (buf),
+ "/usr/sbin/unshare \"%s\" 2>&1",
+ mountpoint);
+
+ if ((fp = popen(buf, "r")) == NULL) {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot "
+ "unshare '%s': unshare(1M) failed"),
+ zfs_get_name(zhp));
+ return (-1);
+ }
+
+ /*
+ * unshare(1M) should only produce output if there is
+ * some kind of error. All output begins with "unshare
+ * nfs: ", so we trim this off to get to the real error.
+ */
+ if (fgets(buf, sizeof (buf), fp) != NULL) {
+ char *colon = strchr(buf, ':');
+
+ while (buf[strlen(buf) - 1] == '\n')
+ buf[strlen(buf) - 1] = '\0';
+
+ if (colon == NULL)
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot unshare '%s': unshare(1M) "
+ "failed"), zfs_get_name(zhp));
+ else
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot unshare '%s': %s"),
+ zfs_get_name(zhp), colon + 2);
+
+ verify(pclose(fp) != 0);
+ return (-1);
+ }
+
+ verify(pclose(fp) == 0);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Same as zfs_unmountall(), but for unshares.
+ */
+int
+zfs_unshareall(zfs_handle_t *zhp)
+{
+ prop_changelist_t *clp;
+ int ret;
+
+ clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0);
+ if (clp == NULL)
+ return (-1);
+
+ ret = changelist_unshare(clp);
+ changelist_free(clp);
+
+ return (ret);
+}
+
+/*
+ * Remove the mountpoint associated with the current dataset, if necessary.
+ * We only remove the underlying directory if:
+ *
+ * - The mountpoint is not 'none' or 'legacy'
+ * - The mountpoint is non-empty
+ * - The mountpoint is the default or inherited
+ * - The 'zoned' property is set, or we're in a local zone
+ *
+ * Any other directories we leave alone.
+ */
+void
+remove_mountpoint(zfs_handle_t *zhp)
+{
+ char mountpoint[ZFS_MAXPROPLEN];
+ char source[ZFS_MAXNAMELEN];
+ zfs_source_t sourcetype;
+ char zonename[ZONENAME_MAX];
+
+ /* ignore non-filesystems */
+ if (zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
+ sizeof (mountpoint), &sourcetype, source, sizeof (source),
+ FALSE) != 0)
+ return;
+
+ if (getzonenamebyid(getzoneid(), zonename, sizeof (zonename)) < 0)
+ zfs_fatal(dgettext(TEXT_DOMAIN, "internal error: "
+ "cannot determine current zone"));
+
+ if (strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0 &&
+ strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
+ (sourcetype == ZFS_SRC_DEFAULT ||
+ sourcetype == ZFS_SRC_INHERITED) &&
+ (!zfs_prop_get_int(zhp, ZFS_PROP_ZONED) ||
+ strcmp(zonename, "global") != 0)) {
+
+ /*
+ * Try to remove the directory, silently ignoring any errors.
+ * The filesystem may have since been removed or moved around,
+ * and this isn't really useful to the administrator in any
+ * way.
+ */
+ (void) rmdir(mountpoint);
+ }
+}
diff --git a/usr/src/lib/libzfs/common/libzfs_pool.c b/usr/src/lib/libzfs/common/libzfs_pool.c
new file mode 100644
index 0000000000..6b6f381bb1
--- /dev/null
+++ b/usr/src/lib/libzfs/common/libzfs_pool.c
@@ -0,0 +1,1154 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <devid.h>
+#include <fcntl.h>
+#include <libintl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/zfs_ioctl.h>
+
+#include "zfs_namecheck.h"
+#include "libzfs_impl.h"
+
+/*
+ * Validate the given pool name, optionally putting an extended error message in
+ * 'buf'.
+ */
+static int
+zpool_name_valid(const char *pool, char *buf, size_t buflen)
+{
+ namecheck_err_t why;
+ char what;
+
+ if (strlen(pool) >= ZPOOL_MAXNAMELEN) {
+ if (buf)
+ (void) snprintf(buf, buflen,
+ dgettext(TEXT_DOMAIN, "name is too long"));
+ return (FALSE);
+ }
+
+ if (pool_namecheck(pool, &why, &what) != 0) {
+ if (buf != NULL) {
+ switch (why) {
+ case NAME_ERR_INVALCHAR:
+ (void) snprintf(buf, buflen,
+ dgettext(TEXT_DOMAIN, "invalid character "
+ "'%c' in pool name"), what);
+ break;
+
+ case NAME_ERR_NOLETTER:
+ (void) strlcpy(buf, dgettext(TEXT_DOMAIN,
+ "name must begin with a letter"), buflen);
+ break;
+
+ case NAME_ERR_RESERVED:
+ (void) strlcpy(buf, dgettext(TEXT_DOMAIN,
+ "name is reserved\n"
+ "pool name may have been omitted"), buflen);
+ break;
+
+ case NAME_ERR_DISKLIKE:
+ (void) strlcpy(buf, dgettext(TEXT_DOMAIN,
+ "pool name is reserved\n"
+ "pool name may have been omitted"), buflen);
+ break;
+ }
+ }
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
+/*
+ * Set the pool-wide health based on the vdev state of the root vdev.
+ */
+void
+set_pool_health(nvlist_t *config)
+{
+ nvlist_t *nvroot;
+ vdev_stat_t *vs;
+ uint_t vsc;
+ char *health;
+
+ verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvroot) == 0);
+ verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
+ (uint64_t **)&vs, &vsc) == 0);
+
+ switch (vs->vs_state) {
+
+ case VDEV_STATE_CLOSED:
+ case VDEV_STATE_CANT_OPEN:
+ case VDEV_STATE_OFFLINE:
+ health = dgettext(TEXT_DOMAIN, "FAULTED");
+ break;
+
+ case VDEV_STATE_DEGRADED:
+ health = dgettext(TEXT_DOMAIN, "DEGRADED");
+ break;
+
+ case VDEV_STATE_HEALTHY:
+ health = dgettext(TEXT_DOMAIN, "ONLINE");
+ break;
+
+ default:
+ zfs_baderror(vs->vs_state);
+ }
+
+ verify(nvlist_add_string(config, ZPOOL_CONFIG_POOL_HEALTH,
+ health) == 0);
+}
+
+/*
+ * Open a handle to the given pool, even if the pool is currently in the FAULTED
+ * state.
+ */
+zpool_handle_t *
+zpool_open_canfail(const char *pool)
+{
+ zpool_handle_t *zhp;
+ nvlist_t *newconfig;
+ int error;
+
+ /*
+ * Make sure the pool name is valid.
+ */
+ if (!zpool_name_valid(pool, NULL, 0)) {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': invalid "
+ "pool name"), pool);
+ return (NULL);
+ }
+
+ zhp = zfs_malloc(sizeof (zpool_handle_t));
+
+ (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
+
+ if ((error = zpool_refresh_stats(zhp, NULL, &newconfig)) != 0) {
+ if (error == ENOENT || error == EINVAL) {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot open '%s': no "
+ "such pool"), pool);
+ free(zhp);
+ return (NULL);
+ } else {
+ zhp->zpool_state = POOL_STATE_UNAVAIL;
+ }
+ } else {
+ zhp->zpool_state = POOL_STATE_ACTIVE;
+ }
+
+ return (zhp);
+}
+
+/*
+ * Like the above, but silent on error. Used when iterating over pools (because
+ * the configuration cache may be out of date).
+ */
+zpool_handle_t *
+zpool_open_silent(const char *pool)
+{
+ zpool_handle_t *zhp;
+ nvlist_t *newconfig;
+ int error;
+
+ zhp = zfs_malloc(sizeof (zpool_handle_t));
+
+ (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
+
+ if ((error = zpool_refresh_stats(zhp, NULL, &newconfig)) != 0) {
+ if (error == ENOENT || error == EINVAL) {
+ free(zhp);
+ return (NULL);
+ } else {
+ zhp->zpool_state = POOL_STATE_UNAVAIL;
+ }
+ } else {
+ zhp->zpool_state = POOL_STATE_ACTIVE;
+ }
+
+ return (zhp);
+}
+
+/*
+ * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
+ * state.
+ */
+zpool_handle_t *
+zpool_open(const char *pool)
+{
+ zpool_handle_t *zhp;
+
+ if ((zhp = zpool_open_canfail(pool)) == NULL)
+ return (NULL);
+
+ if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot open ' %s': pool is "
+ "currently unavailable\n"), zhp->zpool_name);
+ zfs_error(dgettext(TEXT_DOMAIN, "run 'zpool status -v %s' for "
+ "detailed information\n"), zhp->zpool_name);
+ zpool_close(zhp);
+ return (NULL);
+ }
+
+ return (zhp);
+}
+
+/*
+ * Close the handle. Simply frees the memory associated with the handle.
+ */
+void
+zpool_close(zpool_handle_t *zhp)
+{
+ if (zhp->zpool_config)
+ nvlist_free(zhp->zpool_config);
+ free(zhp);
+}
+
+/*
+ * Return the name of the pool.
+ */
+const char *
+zpool_get_name(zpool_handle_t *zhp)
+{
+ return (zhp->zpool_name);
+}
+
+/*
+ * Return the GUID of the pool.
+ */
+uint64_t
+zpool_get_guid(zpool_handle_t *zhp)
+{
+ uint64_t guid;
+
+ verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
+ &guid) == 0);
+ return (guid);
+}
+
+/*
+ * Return the amount of space currently consumed by the pool.
+ */
+uint64_t
+zpool_get_space_used(zpool_handle_t *zhp)
+{
+ nvlist_t *nvroot;
+ vdev_stat_t *vs;
+ uint_t vsc;
+
+ verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvroot) == 0);
+ verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
+ (uint64_t **)&vs, &vsc) == 0);
+
+ return (vs->vs_alloc);
+}
+
+/*
+ * Return the total space in the pool.
+ */
+uint64_t
+zpool_get_space_total(zpool_handle_t *zhp)
+{
+ nvlist_t *nvroot;
+ vdev_stat_t *vs;
+ uint_t vsc;
+
+ verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvroot) == 0);
+ verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
+ (uint64_t **)&vs, &vsc) == 0);
+
+ return (vs->vs_space);
+}
+
+/*
+ * Return the alternate root for this pool, if any.
+ */
+int
+zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
+{
+ zfs_cmd_t zc = { 0 };
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ if (ioctl(zfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
+ zc.zc_objset_stats.dds_altroot[0] == '\0')
+ return (-1);
+
+ (void) strlcpy(buf, zc.zc_objset_stats.dds_altroot, buflen);
+
+ return (0);
+}
+
+/*
+ * Return the state of the pool (ACTIVE or UNAVAILABLE)
+ */
+int
+zpool_get_state(zpool_handle_t *zhp)
+{
+ return (zhp->zpool_state);
+}
+
+/*
+ * Create the named pool, using the provided vdev list. It is assumed
+ * that the consumer has already validated the contents of the nvlist, so we
+ * don't have to worry about error semantics.
+ */
+int
+zpool_create(const char *pool, nvlist_t *nvroot, const char *altroot)
+{
+ zfs_cmd_t zc = { 0 };
+ char *packed;
+ size_t len;
+ int err;
+ char reason[64];
+
+ if (!zpool_name_valid(pool, reason, sizeof (reason))) {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': %s"),
+ pool, reason);
+ return (-1);
+ }
+
+ if (altroot != NULL && altroot[0] != '/') {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': alternate "
+ "root '%s' must be a complete path"), pool, altroot);
+ return (-1);
+ }
+
+ if ((err = nvlist_size(nvroot, &len, NV_ENCODE_NATIVE)) != 0)
+ zfs_baderror(err);
+
+ packed = zfs_malloc(len);
+
+ if ((err = nvlist_pack(nvroot, &packed, &len,
+ NV_ENCODE_NATIVE, 0)) != 0)
+ zfs_baderror(err);
+
+ (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
+ zc.zc_config_src = (uint64_t)(uintptr_t)packed;
+ zc.zc_config_src_size = len;
+
+ if (altroot != NULL)
+ (void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root));
+
+ if (ioctl(zfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
+ switch (errno) {
+ case EEXIST:
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
+ "pool exists"), pool);
+ break;
+
+ case EPERM:
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
+ "permission denied"), pool);
+ break;
+
+ case EBUSY:
+ /*
+ * This can happen if the user has specified the same
+ * device multiple times. We can't reliably detect this
+ * until we try to add it and see we already have a
+ * label.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
+ "one or more vdevs refer to the same device"),
+ pool);
+ break;
+
+ case EOVERFLOW:
+ /*
+ * This occurrs when one of the devices is below
+ * SPA_MINDEVSIZE. Unfortunately, we can't detect which
+ * device was the problem device since there's no
+ * reliable way to determine device size from userland.
+ */
+ {
+ char buf[64];
+
+ zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
+
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot "
+ "create '%s': one or more devices is less "
+ "than the minimum size (%s)"), pool,
+ buf);
+ }
+ break;
+
+ case ENAMETOOLONG:
+ /*
+ * One of the vdevs has exceeded VDEV_SPEC_MAX length in
+ * its plaintext representation.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
+ "too many devices in a single vdev"), pool);
+ break;
+
+ case EIO:
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
+ "I/O error on one or more devices"), pool);
+ break;
+
+ case ENXIO:
+ /*
+ * This is unlikely to happen since we've verified that
+ * all the devices can be opened from userland, but it's
+ * still possible in some circumstances.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
+ "one or more devices is unavailable"), pool);
+ break;
+
+ case ENOSPC:
+ /*
+ * This can occur if we were incapable of writing to a
+ * file vdev because the underlying filesystem is out of
+ * space. This is very similar to EOVERFLOW, but we'll
+ * produce a slightly different message.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot create '%s': "
+ "one or more devices is out of space"), pool);
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+
+ return (-1);
+ }
+
+ free(packed);
+
+ /*
+ * If this is an alternate root pool, then we automatically set the
+ * moutnpoint of the root dataset to be '/'.
+ */
+ if (altroot != NULL) {
+ zfs_handle_t *zhp;
+
+ verify((zhp = zfs_open(pool, ZFS_TYPE_ANY)) != NULL);
+ verify(zfs_prop_set(zhp, ZFS_PROP_MOUNTPOINT, "/") == 0);
+
+ zfs_close(zhp);
+ }
+
+ return (0);
+}
+
+/*
+ * Destroy the given pool. It is up to the caller to ensure that there are no
+ * datasets left in the pool.
+ */
+int
+zpool_destroy(zpool_handle_t *zhp)
+{
+ zfs_cmd_t zc = { 0 };
+ zfs_handle_t *zfp = NULL;
+
+ if (zhp->zpool_state == POOL_STATE_ACTIVE &&
+ (zfp = zfs_open(zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
+ return (-1);
+
+ if (zpool_remove_zvol_links(zhp) != NULL)
+ return (-1);
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+
+ if (ioctl(zfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
+ switch (errno) {
+ case EPERM:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot destroy '%s': permission denied"),
+ zhp->zpool_name);
+ break;
+
+ case EBUSY:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot destroy '%s': pool busy"),
+ zhp->zpool_name);
+ break;
+
+ case ENOENT:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot destroy '%s': no such pool"),
+ zhp->zpool_name);
+ break;
+
+ case EROFS:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot destroy '%s': one or more devices is "
+ "read only, or '/' is mounted read only"),
+ zhp->zpool_name);
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+
+ if (zfp)
+ zfs_close(zfp);
+ return (-1);
+ }
+
+ if (zfp) {
+ remove_mountpoint(zfp);
+ zfs_close(zfp);
+ }
+
+ return (0);
+}
+
+/*
+ * Add the given vdevs to the pool. The caller must have already performed the
+ * necessary verification to ensure that the vdev specification is well-formed.
+ */
+int
+zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
+{
+ char *packed;
+ size_t len;
+ zfs_cmd_t zc;
+
+ verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0);
+
+ packed = zfs_malloc(len);
+
+ verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ zc.zc_config_src = (uint64_t)(uintptr_t)packed;
+ zc.zc_config_src_size = len;
+
+ if (ioctl(zfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) {
+ switch (errno) {
+ case EPERM:
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': "
+ "permission denied"), zhp->zpool_name);
+ break;
+
+ case EBUSY:
+ /*
+ * This can happen if the user has specified the same
+ * device multiple times. We can't reliably detect this
+ * until we try to add it and see we already have a
+ * label.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': "
+ "one or more vdevs refer to the same device"),
+ zhp->zpool_name);
+ break;
+
+ case ENAMETOOLONG:
+ /*
+ * One of the vdevs has exceeded VDEV_SPEC_MAX length in
+ * its plaintext representation.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': "
+ "too many devices in a single vdev"),
+ zhp->zpool_name);
+ break;
+
+ case ENXIO:
+ /*
+ * This is unlikely to happen since we've verified that
+ * all the devices can be opened from userland, but it's
+ * still possible in some circumstances.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot add to '%s': "
+ "one or more devices is unavailable"),
+ zhp->zpool_name);
+ break;
+
+ case EOVERFLOW:
+ /*
+ * This occurrs when one of the devices is below
+ * SPA_MINDEVSIZE. Unfortunately, we can't detect which
+ * device was the problem device since there's no
+ * reliable way to determine device size from userland.
+ */
+ {
+ char buf[64];
+
+ zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
+
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot "
+ "add to '%s': one or more devices is less "
+ "than the minimum size (%s)"),
+ zhp->zpool_name, buf);
+ }
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+
+ return (-1);
+ }
+
+ free(packed);
+
+ return (0);
+}
+
+/*
+ * Exports the pool from the system. The caller must ensure that there are no
+ * mounted datasets in the pool.
+ */
+int
+zpool_export(zpool_handle_t *zhp)
+{
+ zfs_cmd_t zc = { 0 };
+
+ if (zpool_remove_zvol_links(zhp) != 0)
+ return (-1);
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+
+ if (ioctl(zfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
+ switch (errno) {
+ case EPERM:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot export '%s': permission denied"),
+ zhp->zpool_name);
+ break;
+
+ case EBUSY:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot export '%s': pool is in use"),
+ zhp->zpool_name);
+ break;
+
+ case ENOENT:
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "cannot export '%s': no such pool"),
+ zhp->zpool_name);
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * Import the given pool using the known configuration. The configuration
+ * should have come from zpool_find_import(). The 'newname' and 'altroot'
+ * parameters control whether the pool is imported with a different name or with
+ * an alternate root, respectively.
+ */
+int
+zpool_import(nvlist_t *config, const char *newname, const char *altroot)
+{
+ zfs_cmd_t zc;
+ char *packed;
+ size_t len;
+ char *thename;
+ char *origname;
+ int ret;
+
+ verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
+ &origname) == 0);
+
+ if (newname != NULL) {
+ if (!zpool_name_valid(newname, NULL, 0)) {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot import '%s': "
+ "invalid pool name"), newname);
+ return (-1);
+ }
+ thename = (char *)newname;
+ } else {
+ thename = origname;
+ }
+
+ if (altroot != NULL && altroot[0] != '/') {
+ zfs_error(dgettext(TEXT_DOMAIN, "cannot import '%s': alternate "
+ "root '%s' must be a complete path"), thename,
+ altroot);
+ return (-1);
+ }
+
+ (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
+
+ if (altroot != NULL)
+ (void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root));
+ else
+ zc.zc_root[0] = '\0';
+
+ verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+ &zc.zc_pool_guid) == 0);
+
+ verify(nvlist_size(config, &len, NV_ENCODE_NATIVE) == 0);
+
+ packed = zfs_malloc(len);
+
+ verify(nvlist_pack(config, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
+
+ zc.zc_config_src = (uint64_t)(uintptr_t)packed;
+ zc.zc_config_src_size = len;
+
+ ret = 0;
+ if (ioctl(zfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
+ char desc[1024];
+ if (newname == NULL)
+ (void) snprintf(desc, sizeof (desc),
+ dgettext(TEXT_DOMAIN, "cannot import '%s'"),
+ thename);
+ else
+ (void) snprintf(desc, sizeof (desc),
+ dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
+ origname, thename);
+
+ switch (errno) {
+ case EEXIST:
+ /*
+ * A pool with that name already exists.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: pool exists"),
+ desc);
+ break;
+
+ case EPERM:
+ /*
+ * The user doesn't have permission to create pools.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: permission "
+ "denied"), desc);
+ break;
+
+ case ENXIO:
+ case EDOM:
+ /*
+ * Device is unavailable, or vdev sum didn't match.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: one or more "
+ "devices is unavailable"),
+ desc);
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+
+ ret = -1;
+ } else {
+ zpool_handle_t *zhp;
+ /*
+ * This should never fail, but play it safe anyway.
+ */
+ if ((zhp = zpool_open_silent(thename)) != NULL) {
+ ret = zpool_create_zvol_links(zhp);
+ zpool_close(zhp);
+ }
+ }
+
+ free(packed);
+ return (ret);
+}
+
+/*
+ * Scrub the pool.
+ */
+int
+zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ zc.zc_cookie = type;
+
+ if (ioctl(zfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0)
+ return (0);
+
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
+
+ switch (errno) {
+ case EPERM:
+ /*
+ * No permission to scrub this pool.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg);
+ break;
+
+ case EBUSY:
+ /*
+ * Resilver in progress.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: currently resilvering"),
+ msg);
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+ return (-1);
+}
+
+/*
+ * Bring the specified vdev online
+ */
+int
+zpool_vdev_online(zpool_handle_t *zhp, const char *path)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ (void) snprintf(zc.zc_prop_value, sizeof (zc.zc_prop_value),
+ "%s%s", path[0] == '/' ? "" : "/dev/dsk/", path);
+
+ if (ioctl(zfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0)
+ return (0);
+
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot online %s"), zc.zc_prop_value);
+
+ switch (errno) {
+ case ENODEV:
+ /*
+ * Device doesn't exist
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg);
+ break;
+
+ case EPERM:
+ /*
+ * No permission to bring this vdev online.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg);
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+ return (-1);
+}
+
+/*
+ * Take the specified vdev offline
+ */
+int
+zpool_vdev_offline(zpool_handle_t *zhp, const char *path)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ (void) snprintf(zc.zc_prop_value, sizeof (zc.zc_prop_value),
+ "%s%s", path[0] == '/' ? "" : "/dev/dsk/", path);
+
+ if (ioctl(zfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0)
+ return (0);
+
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot offline %s"), zc.zc_prop_value);
+
+ switch (errno) {
+ case ENODEV:
+ /*
+ * Device doesn't exist
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg);
+ break;
+
+ case EPERM:
+ /*
+ * No permission to take this vdev offline.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg);
+ break;
+
+ case EBUSY:
+ /*
+ * There are no other replicas of this device.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: no valid replicas"), msg);
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+ return (-1);
+}
+
+/*
+ * Attach new_disk (fully described by nvroot) to old_disk.
+ * If 'replacing' is specified, tne new disk will replace the old one.
+ */
+int
+zpool_vdev_attach(zpool_handle_t *zhp,
+ const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+ char *packed;
+ int ret;
+ size_t len;
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ (void) snprintf(zc.zc_prop_value, sizeof (zc.zc_prop_value),
+ "%s%s", old_disk[0] == '/' ? "" : "/dev/dsk/", old_disk);
+ zc.zc_cookie = replacing;
+
+ verify(nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) == 0);
+
+ packed = zfs_malloc(len);
+
+ verify(nvlist_pack(nvroot, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
+
+ zc.zc_config_src = (uint64_t)(uintptr_t)packed;
+ zc.zc_config_src_size = len;
+
+ ret = ioctl(zfs_fd, ZFS_IOC_VDEV_ATTACH, &zc);
+
+ free(packed);
+
+ if (ret == 0)
+ return (0);
+
+ if (replacing)
+ (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+ "cannot replace %s with %s"), old_disk, new_disk);
+ else
+ (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+ "cannot attach %s to %s"), new_disk, old_disk);
+
+ switch (errno) {
+ case EPERM:
+ /*
+ * No permission to mess with the config.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg);
+ break;
+
+ case ENODEV:
+ /*
+ * Device doesn't exist.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: %s not in pool"),
+ msg, old_disk);
+ break;
+
+ case ENOTSUP:
+ /*
+ * Can't attach to or replace this type of vdev.
+ */
+ if (replacing)
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "%s: cannot replace a replacing device"), msg);
+ else
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "%s: attach is only applicable to mirrors"), msg);
+ break;
+
+ case EINVAL:
+ /*
+ * The new device must be a single disk.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "%s: <new_device> must be a single disk"), msg);
+ break;
+
+ case ENXIO:
+ /*
+ * This is unlikely to happen since we've verified that
+ * all the devices can be opened from userland, but it's
+ * still possible in some circumstances.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: %s is unavailable"),
+ msg, new_disk);
+ break;
+
+ case EBUSY:
+ /*
+ * The new device is is use.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: %s busy"), msg, new_disk);
+ break;
+
+ case EOVERFLOW:
+ /*
+ * The new device is too small.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: %s is too small"),
+ msg, new_disk);
+ break;
+
+ case EDOM:
+ /*
+ * The new device has a different alignment requirement.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "%s: devices have different sector alignment"), msg);
+ break;
+
+ case ENAMETOOLONG:
+ /*
+ * The resulting top-level vdev spec won't fit in the label.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "%s: too many devices in a single vdev"), msg);
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+
+ return (1);
+}
+
+/*
+ * Detach the specified device.
+ */
+int
+zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
+{
+ zfs_cmd_t zc = { 0 };
+ char msg[1024];
+
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ (void) snprintf(zc.zc_prop_value, sizeof (zc.zc_prop_value),
+ "%s%s", path[0] == '/' ? "" : "/dev/dsk/", path);
+
+ if (ioctl(zfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0)
+ return (0);
+
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot detach %s"), zc.zc_prop_value);
+
+ switch (errno) {
+ case EPERM:
+ /*
+ * No permission to mess with the config.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: permission denied"), msg);
+ break;
+
+ case ENODEV:
+ /*
+ * Device doesn't exist.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: device not in pool"), msg);
+ break;
+
+ case ENOTSUP:
+ /*
+ * Can't detach from this type of vdev.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN,
+ "%s: only applicable to mirror and replacing vdevs"), msg);
+ break;
+
+ case EBUSY:
+ /*
+ * There are no other replicas of this device.
+ */
+ zfs_error(dgettext(TEXT_DOMAIN, "%s: no valid replicas"), msg);
+ break;
+
+ default:
+ zfs_baderror(errno);
+ }
+
+ return (1);
+}
+
+static int
+do_zvol(zfs_handle_t *zhp, void *data)
+{
+ int linktype = (int)(uintptr_t)data;
+ int ret;
+
+ /*
+ * We check for volblocksize intead of ZFS_TYPE_VOLUME so that we
+ * correctly handle snapshots of volumes.
+ */
+ if (zhp->zfs_volblocksize != 0) {
+ if (linktype)
+ ret = zvol_create_link(zhp->zfs_name);
+ else
+ ret = zvol_remove_link(zhp->zfs_name);
+ }
+
+ ret = zfs_iter_children(zhp, do_zvol, data);
+
+ zfs_close(zhp);
+ return (ret);
+}
+
+/*
+ * Iterate over all zvols in the pool and make any necessary minor nodes.
+ */
+int
+zpool_create_zvol_links(zpool_handle_t *zhp)
+{
+ zfs_handle_t *zfp;
+ int ret;
+
+ /*
+ * If the pool is unavailable, just return success.
+ */
+ if ((zfp = make_dataset_handle(zhp->zpool_name)) == NULL)
+ return (0);
+
+ ret = zfs_iter_children(zfp, do_zvol, (void *)TRUE);
+
+ zfs_close(zfp);
+ return (ret);
+}
+
+/*
+ * Iterate over all zvols in the poool and remove any minor nodes.
+ */
+int
+zpool_remove_zvol_links(zpool_handle_t *zhp)
+{
+ zfs_handle_t *zfp;
+ int ret;
+
+ /*
+ * If the pool is unavailable, just return success.
+ */
+ if ((zfp = make_dataset_handle(zhp->zpool_name)) == NULL)
+ return (0);
+
+ ret = zfs_iter_children(zfp, do_zvol, (void *)FALSE);
+
+ zfs_close(zfp);
+ return (ret);
+}
diff --git a/usr/src/lib/libzfs/common/libzfs_status.c b/usr/src/lib/libzfs/common/libzfs_status.c
new file mode 100644
index 0000000000..27a86d0c3c
--- /dev/null
+++ b/usr/src/lib/libzfs/common/libzfs_status.c
@@ -0,0 +1,248 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * This file contains the functions which analyze the status of a pool. This
+ * include both the status of an active pool, as well as the status exported
+ * pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
+ * the pool. This status is independent (to a certain degree) from the state of
+ * the pool. A pool's state descsribes only whether or not it is capable of
+ * providing the necessary fault tolerance for data. The status describes the
+ * overall status of devices. A pool that is online can still have a device
+ * that is experiencing errors.
+ *
+ * Only a subset of the possible faults can be detected using 'zpool status',
+ * and not all possible errors correspond to a FMA message ID. The explanation
+ * is left up to the caller, depending on whether it is a live pool or an
+ * import.
+ */
+
+#include <libzfs.h>
+#include <string.h>
+#include "libzfs_impl.h"
+
+/*
+ * Message ID table. This must be kep in sync with the ZPOOL_STATUS_* defines
+ * in libzfs.h. Note that there are some status results which go past the end
+ * of this table, and hence have no associated message ID.
+ */
+static char *msgid_table[] = {
+ "ZFS-8000-14",
+ "ZFS-8000-2Q",
+ "ZFS-8000-3C",
+ "ZFS-8000-4J",
+ "ZFS-8000-5E",
+ "ZFS-8000-6X",
+ "ZFS-8000-72",
+ "ZFS-8000-8A",
+ "ZFS-8000-9P",
+ "ZFS-8000-A5"
+};
+
+#define NMSGID (sizeof (msgid_table) / sizeof (msgid_table[0]))
+
+/* ARGSUSED */
+static int
+vdev_missing(uint64_t state, uint64_t aux, uint64_t errs)
+{
+ return (state == VDEV_STATE_CANT_OPEN &&
+ aux == VDEV_AUX_OPEN_FAILED);
+}
+
+/* ARGSUSED */
+static int
+vdev_errors(uint64_t state, uint64_t aux, uint64_t errs)
+{
+ return (errs != 0);
+}
+
+/* ARGSUSED */
+static int
+vdev_broken(uint64_t state, uint64_t aux, uint64_t errs)
+{
+ return (state == VDEV_STATE_CANT_OPEN);
+}
+
+/* ARGSUSED */
+static int
+vdev_offlined(uint64_t state, uint64_t aux, uint64_t errs)
+{
+ return (state == VDEV_STATE_OFFLINE);
+}
+
+/*
+ * Detect if any leaf devices that have seen errors or could not be opened.
+ */
+static int
+find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t))
+{
+ nvlist_t **child;
+ vdev_stat_t *vs;
+ uint_t c, children;
+ char *type;
+
+ /*
+ * Ignore problems within a 'replacing' vdev, since we're presumably in
+ * the process of repairing any such errors, and don't want to call them
+ * out again. We'll pick up the fact that a resilver is happening
+ * later.
+ */
+ verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0);
+ if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
+ return (FALSE);
+
+ if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
+ &children) == 0) {
+ for (c = 0; c < children; c++)
+ if (find_vdev_problem(child[c], func))
+ return (TRUE);
+ } else {
+ verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_STATS,
+ (uint64_t **)&vs, &c) == 0);
+
+ if (func(vs->vs_state, vs->vs_aux,
+ vs->vs_read_errors +
+ vs->vs_write_errors +
+ vs->vs_checksum_errors))
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+/*
+ * Active pool health status.
+ *
+ * To determine the status for a pool, we make several passes over the config,
+ * picking the most egregious error we find. In order of importance, we do the
+ * following:
+ *
+ * - Check for a complete and valid configuration
+ * - Look for any missing devices
+ * - Look for any devices showing errors
+ * - Check for any data errors
+ * - Check for any resilvering devices
+ *
+ * There can obviously be multiple errors within a single pool, so this routine
+ * only picks the most damaging of all the current errors to report.
+ */
+static zpool_status_t
+check_status(nvlist_t *config, int isimport)
+{
+ nvlist_t *nvroot;
+ vdev_stat_t *vs;
+ uint_t vsc;
+
+ verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+ &nvroot) == 0);
+ verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
+ (uint64_t **)&vs, &vsc) == 0);
+
+ /*
+ * Check that the config is complete.
+ */
+ if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+ vs->vs_aux == VDEV_AUX_BAD_GUID_SUM) {
+ return (ZPOOL_STATUS_BAD_GUID_SUM);
+ }
+
+ /*
+ * Missing devices
+ */
+ if (find_vdev_problem(nvroot, vdev_missing)) {
+ if (vs->vs_state == VDEV_STATE_CANT_OPEN)
+ return (ZPOOL_STATUS_MISSING_DEV_NR);
+ else
+ return (ZPOOL_STATUS_MISSING_DEV_R);
+ }
+
+ /*
+ * Devices with corrupted labels.
+ */
+ if (find_vdev_problem(nvroot, vdev_broken)) {
+ if (vs->vs_state == VDEV_STATE_CANT_OPEN)
+ return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
+ else
+ return (ZPOOL_STATUS_CORRUPT_LABEL_R);
+ }
+
+ /*
+ * Devices with errors
+ */
+ if (!isimport && find_vdev_problem(nvroot, vdev_errors))
+ return (ZPOOL_STATUS_FAILING_DEV);
+
+ /*
+ * Offlined devices
+ */
+ if (find_vdev_problem(nvroot, vdev_offlined))
+ return (ZPOOL_STATUS_OFFLINE_DEV);
+
+ /*
+ * Currently resilvering
+ */
+ if (!vs->vs_scrub_complete && vs->vs_scrub_type == POOL_SCRUB_RESILVER)
+ return (ZPOOL_STATUS_RESILVERING);
+
+ /*
+ * We currently have no way to detect the following errors:
+ *
+ * CORRUPT_CACHE
+ * VERSION_MISMATCH
+ * CORRUPT_POOL
+ * CORRUPT_DATA
+ */
+
+ return (ZPOOL_STATUS_OK);
+}
+
+zpool_status_t
+zpool_get_status(zpool_handle_t *zhp, char **msgid)
+{
+ zpool_status_t ret = check_status(zhp->zpool_config, FALSE);
+
+ if (ret >= NMSGID)
+ *msgid = NULL;
+ else
+ *msgid = msgid_table[ret];
+
+ return (ret);
+}
+
+zpool_status_t
+zpool_import_status(nvlist_t *config, char **msgid)
+{
+ zpool_status_t ret = check_status(config, TRUE);
+
+ if (ret >= NMSGID)
+ *msgid = NULL;
+ else
+ *msgid = msgid_table[ret];
+
+ return (ret);
+}
diff --git a/usr/src/lib/libzfs/common/libzfs_util.c b/usr/src/lib/libzfs/common/libzfs_util.c
new file mode 100644
index 0000000000..2f5c538212
--- /dev/null
+++ b/usr/src/lib/libzfs/common/libzfs_util.c
@@ -0,0 +1,204 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*
+ * Internal utility routines for the ZFS library.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <libintl.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/mnttab.h>
+
+#include <libzfs.h>
+
+#include "libzfs_impl.h"
+
+int zfs_fd;
+
+void (*error_func)(const char *, va_list);
+
+/*
+ * All error handling is kept within libzfs where we have the most information
+ * immediately available. While this may not be suitable for a general purpose
+ * library, it greatly simplifies our commands. This command name is used to
+ * prefix all error messages appropriately.
+ */
+void
+zfs_error(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+
+ if (error_func != NULL) {
+ error_func(fmt, ap);
+ } else {
+ (void) vfprintf(stderr, fmt, ap);
+ (void) fprintf(stderr, "\n");
+ }
+
+ va_end(ap);
+}
+
+/*
+ * An internal error is something that we cannot recover from, and should never
+ * happen (such as running out of memory). It should only be used in
+ * exceptional circumstances.
+ */
+void
+zfs_fatal(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+
+ if (error_func != NULL) {
+ error_func(fmt, ap);
+ } else {
+ (void) vfprintf(stderr, fmt, ap);
+ (void) fprintf(stderr, "\n");
+ }
+
+ va_end(ap);
+
+ exit(1);
+}
+
+/*
+ * Consumers (such as the JNI interface) that need to capture error output can
+ * override the default error handler using this function.
+ */
+void
+zfs_set_error_handler(void (*func)(const char *, va_list))
+{
+ error_func = func;
+}
+
+/*
+ * Display an out of memory error message and abort the current program.
+ */
+void
+no_memory(void)
+{
+ assert(errno == ENOMEM);
+ zfs_fatal(dgettext(TEXT_DOMAIN, "internal error: out of memory\n"));
+}
+
+/*
+ * A safe form of malloc() which will die if the allocation fails.
+ */
+void *
+zfs_malloc(size_t size)
+{
+ void *data;
+
+ if ((data = calloc(1, size)) == NULL)
+ no_memory();
+
+ return (data);
+}
+
+/*
+ * A safe form of strdup() which will die if the allocation fails.
+ */
+char *
+zfs_strdup(const char *str)
+{
+ char *ret;
+
+ if ((ret = strdup(str)) == NULL)
+ no_memory();
+
+ return (ret);
+}
+
+/*
+ * Initialize the library. Sets the command name used when reporting errors.
+ * This command name is used to prefix all error messages appropriately.
+ * Also opens /dev/zfs and dies if it cannot be opened.
+ */
+#pragma init(zfs_init)
+void
+zfs_init(void)
+{
+ if ((zfs_fd = open(ZFS_DEV, O_RDWR)) < 0)
+ zfs_fatal(dgettext(TEXT_DOMAIN,
+ "internal error: cannot open zfs device"));
+
+ if ((mnttab_file = fopen(MNTTAB, "r")) == NULL)
+ zfs_fatal(dgettext(TEXT_DOMAIN, "internal error: unable to "
+ "open %s\n"), MNTTAB);
+
+ sharetab_file = fopen("/etc/dfs/sharetab", "r");
+}
+
+/*
+ * Cleanup function for library. Simply close the file descriptors that we
+ * opened as part of libzfs_init().
+ */
+#pragma fini(zfs_fini)
+void
+zfs_fini(void)
+{
+ (void) close(zfs_fd);
+}
+
+/*
+ * Convert a number to an appropriately human-readable output.
+ */
+void
+zfs_nicenum(uint64_t num, char *buf, size_t buflen)
+{
+ uint64_t n = num;
+ int index = 0;
+ char u;
+
+ while (n >= 1024) {
+ n = (n + (1024 / 2)) / 1024; /* Round up or down */
+ index++;
+ }
+
+ u = " KMGTPE"[index];
+
+ if (index == 0)
+ (void) snprintf(buf, buflen, "%llu", n);
+ else if (n < 10 && (num & (num - 1)) != 0)
+ (void) snprintf(buf, buflen, "%.2f%c",
+ (double)num / (1ULL << 10 * index), u);
+ else if (n < 100 && (num & (num - 1)) != 0)
+ (void) snprintf(buf, buflen, "%.1f%c",
+ (double)num / (1ULL << 10 * index), u);
+ else
+ (void) snprintf(buf, buflen, "%llu%c", n, u);
+}
diff --git a/usr/src/lib/libzfs/common/llib-lzfs b/usr/src/lib/libzfs/common/llib-lzfs
new file mode 100644
index 0000000000..83ac1841a8
--- /dev/null
+++ b/usr/src/lib/libzfs/common/llib-lzfs
@@ -0,0 +1,32 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+/*LINTLIBRARY*/
+/*PROTOLIB1*/
+
+#include <libzfs.h>