summaryrefslogtreecommitdiff
path: root/usr/src/uts/common/fs/zfs/dnode.c
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/common/fs/zfs/dnode.c')
-rw-r--r--usr/src/uts/common/fs/zfs/dnode.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/usr/src/uts/common/fs/zfs/dnode.c b/usr/src/uts/common/fs/zfs/dnode.c
index 5b5ece424a..417e219b88 100644
--- a/usr/src/uts/common/fs/zfs/dnode.c
+++ b/usr/src/uts/common/fs/zfs/dnode.c
@@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@@ -1031,12 +1031,12 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag,
dn = (object == DMU_USERUSED_OBJECT) ?
DMU_USERUSED_DNODE(os) : DMU_GROUPUSED_DNODE(os);
if (dn == NULL)
- return (ENOENT);
+ return (SET_ERROR(ENOENT));
type = dn->dn_type;
if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
- return (ENOENT);
+ return (SET_ERROR(ENOENT));
if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
- return (EEXIST);
+ return (SET_ERROR(EEXIST));
DNODE_VERIFY(dn);
(void) refcount_add(&dn->dn_holds, tag);
*dnp = dn;
@@ -1044,7 +1044,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag,
}
if (object == 0 || object >= DN_MAX_OBJECT)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
mdn = DMU_META_DNODE(os);
ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
@@ -1062,7 +1062,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag,
if (drop_struct_lock)
rw_exit(&mdn->dn_struct_rwlock);
if (db == NULL)
- return (EIO);
+ return (SET_ERROR(EIO));
err = dbuf_read(db, NULL, DB_RF_CANFAIL);
if (err) {
dbuf_rele(db, FTAG);
@@ -1370,7 +1370,7 @@ dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
fail:
rw_exit(&dn->dn_struct_rwlock);
- return (ENOTSUP);
+ return (SET_ERROR(ENOTSUP));
}
/* read-holding callers must not rely on the lock being continuously held */
@@ -1856,7 +1856,7 @@ dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
* at the pointer to this block in its parent, and its
* going to be unallocated, so we will skip over it.
*/
- return (ESRCH);
+ return (SET_ERROR(ESRCH));
}
error = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT);
if (error) {
@@ -1872,7 +1872,7 @@ dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
* This can only happen when we are searching up the tree
* and these conditions mean that we need to keep climbing.
*/
- error = ESRCH;
+ error = SET_ERROR(ESRCH);
} else if (lvl == 0) {
dnode_phys_t *dnp = data;
span = DNODE_SHIFT;
@@ -1885,7 +1885,7 @@ dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
*offset += (1ULL << span) * inc;
}
if (i < 0 || i == blkfill)
- error = ESRCH;
+ error = SET_ERROR(ESRCH);
} else {
blkptr_t *bp = data;
uint64_t start = *offset;
@@ -1917,7 +1917,7 @@ dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
*offset = start;
}
if (i < 0 || i >= epb)
- error = ESRCH;
+ error = SET_ERROR(ESRCH);
}
if (db)
@@ -1961,7 +1961,7 @@ dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_phys->dn_nlevels == 0) {
- error = ESRCH;
+ error = SET_ERROR(ESRCH);
goto out;
}
@@ -1970,7 +1970,7 @@ dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
if (flags & DNODE_FIND_HOLE)
*offset = dn->dn_datablksz;
} else {
- error = ESRCH;
+ error = SET_ERROR(ESRCH);
}
goto out;
}
@@ -1991,7 +1991,7 @@ dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
initial_offset < *offset : initial_offset > *offset))
- error = ESRCH;
+ error = SET_ERROR(ESRCH);
out:
if (!(flags & DNODE_FIND_HAVELOCK))
rw_exit(&dn->dn_struct_rwlock);