summaryrefslogtreecommitdiff
path: root/usr/src/uts/common/os
diff options
context:
space:
mode:
authorRichard Lowe <richlowe@richlowe.net>2014-04-16 02:39:14 +0100
committerRichard Lowe <richlowe@richlowe.net>2016-10-15 12:02:16 -0400
commitd2a70789f056fc6c9ce3ab047b52126d80b0e3da (patch)
treebcf5eedbc5aeec80cac59ea37052e3b87108c253 /usr/src/uts/common/os
parent8ab1c3f559468e655c4eb8acce993320403dd72b (diff)
downloadillumos-joyent-d2a70789f056fc6c9ce3ab047b52126d80b0e3da.tar.gz
7029 want per-process exploit mitigation features (secflags)
7030 want basic address space layout randomization (ASLR) 7031 noexec_user_stack should be a security-flag 7032 want a means to forbid mappings around NULL Reviewed by: Robert Mustacchi <rm@joyent.com> Reviewed by: Josef 'Jeff' Sipek <jeffpc@josefsipek.net> Reviewed by: Patrick Mooney <pmooney@joyent.com> Approved by: Dan McDonald <danmcd@omniti.com>
Diffstat (limited to 'usr/src/uts/common/os')
-rw-r--r--usr/src/uts/common/os/cred.c6
-rw-r--r--usr/src/uts/common/os/exec.c84
-rw-r--r--usr/src/uts/common/os/fork.c5
-rw-r--r--usr/src/uts/common/os/grow.c47
-rw-r--r--usr/src/uts/common/os/mmapobj.c35
-rw-r--r--usr/src/uts/common/os/policy.c19
-rw-r--r--usr/src/uts/common/os/priv_defs5
-rw-r--r--usr/src/uts/common/os/proc.c12
-rw-r--r--usr/src/uts/common/os/sysent.c7
-rw-r--r--usr/src/uts/common/os/zone.c55
10 files changed, 239 insertions, 36 deletions
diff --git a/usr/src/uts/common/os/cred.c b/usr/src/uts/common/os/cred.c
index 733fd03a92..25727d54c5 100644
--- a/usr/src/uts/common/os/cred.c
+++ b/usr/src/uts/common/os/cred.c
@@ -215,6 +215,8 @@ cred_init(void)
priv_fillset(&CR_LPRIV(kcred));
CR_IPRIV(kcred) = *priv_basic;
+ priv_addset(&CR_IPRIV(kcred), PRIV_PROC_SECFLAGS);
+
/* Not a basic privilege, if chown is not restricted add it to I0 */
if (!rstchown)
priv_addset(&CR_IPRIV(kcred), PRIV_FILE_CHOWN_SELF);
@@ -1255,7 +1257,7 @@ eph_gid_alloc(zone_t *zone, int flags, gid_t *start, int count)
void
get_ephemeral_data(zone_t *zone, uid_t *min_uid, uid_t *last_uid,
- gid_t *min_gid, gid_t *last_gid)
+ gid_t *min_gid, gid_t *last_gid)
{
ephemeral_zsd_t *eph_zsd = get_ephemeral_zsd(zone);
@@ -1274,7 +1276,7 @@ get_ephemeral_data(zone_t *zone, uid_t *min_uid, uid_t *last_uid,
void
set_ephemeral_data(zone_t *zone, uid_t min_uid, uid_t last_uid,
- gid_t min_gid, gid_t last_gid)
+ gid_t min_gid, gid_t last_gid)
{
ephemeral_zsd_t *eph_zsd = get_ephemeral_zsd(zone);
diff --git a/usr/src/uts/common/os/exec.c b/usr/src/uts/common/os/exec.c
index 172fce8d89..2aaa6a9076 100644
--- a/usr/src/uts/common/os/exec.c
+++ b/usr/src/uts/common/os/exec.c
@@ -69,6 +69,7 @@
#include <sys/sdt.h>
#include <sys/brand.h>
#include <sys/klpd.h>
+#include <sys/random.h>
#include <c2/audit.h>
@@ -99,6 +100,21 @@ uint_t auxv_hwcap32_2 = 0; /* 32-bit version of auxv_hwcap2 */
#define PSUIDFLAGS (SNOCD|SUGID)
/*
+ * These are consumed within the specific exec modules, but are defined here
+ * because
+ *
+ * 1) The exec modules are unloadable, which would make this near useless.
+ *
+ * 2) We want them to be common across all of them, should more than ELF come
+ * to support them.
+ *
+ * All must be powers of 2.
+ */
+size_t aslr_max_brk_skew = 16 * 1024 * 1024; /* 16MB */
+#pragma weak exec_stackgap = aslr_max_stack_skew /* Old, compatible name */
+size_t aslr_max_stack_skew = 64 * 1024; /* 64KB */
+
+/*
* exece() - system call wrapper around exec_common()
*/
int
@@ -560,6 +576,9 @@ gexec(
int privflags = 0;
int setidfl;
priv_set_t fset;
+ secflagset_t old_secflags;
+
+ secflags_copy(&old_secflags, &pp->p_secflags.psf_effective);
/*
* If the SNOCD or SUGID flag is set, turn it off and remember the
@@ -660,6 +679,9 @@ gexec(
priv_adjust_PA(cred);
}
+ /* The new image gets the inheritable secflags as its secflags */
+ secflags_promote(pp);
+
/* SunOS 4.x buy-back */
if ((vp->v_vfsp->vfs_flag & VFS_NOSETUID) &&
(vattr.va_mode & (VSUID|VSGID))) {
@@ -720,7 +742,8 @@ gexec(
* Use /etc/system variable to determine if the stack
* should be marked as executable by default.
*/
- if (noexec_user_stack)
+ if ((noexec_user_stack != 0) ||
+ secflag_enabled(pp, PROC_SEC_NOEXECSTACK))
args->stk_prot &= ~PROT_EXEC;
args->execswp = eswp; /* Save execsw pointer in uarg for exec_func */
@@ -876,11 +899,17 @@ bad_noclose:
if (error == 0)
error = ENOEXEC;
+ mutex_enter(&pp->p_lock);
if (suidflags) {
- mutex_enter(&pp->p_lock);
pp->p_flag |= suidflags;
- mutex_exit(&pp->p_lock);
}
+ /*
+ * Restore the effective secflags, to maintain the invariant they
+ * never change for a given process
+ */
+ secflags_copy(&pp->p_secflags.psf_effective, &old_secflags);
+ mutex_exit(&pp->p_lock);
+
return (error);
}
@@ -1787,6 +1816,44 @@ stk_copyout(uarg_t *args, char *usrstack, void **auxvpp, user_t *up)
}
/*
+ * Though the actual stack base is constant, slew the %sp by a random aligned
+ * amount in [0,aslr_max_stack_skew). Mostly, this makes life slightly more
+ * complicated for buffer overflows hoping to overwrite the return address.
+ *
+ * On some platforms this helps avoid cache thrashing when identical processes
+ * simultaneously share caches that don't provide enough associativity
+ * (e.g. sun4v systems). In this case stack slewing makes the same hot stack
+ * variables in different processes live in different cache sets increasing
+ * effective associativity.
+ */
+size_t
+exec_get_spslew(void)
+{
+#ifdef sun4v
+ static uint_t sp_color_stride = 16;
+ static uint_t sp_color_mask = 0x1f;
+ static uint_t sp_current_color = (uint_t)-1;
+#endif
+ size_t off;
+
+ ASSERT(ISP2(aslr_max_stack_skew));
+
+ if ((aslr_max_stack_skew == 0) ||
+ !secflag_enabled(curproc, PROC_SEC_ASLR)) {
+#ifdef sun4v
+ uint_t spcolor = atomic_inc_32_nv(&sp_current_color);
+ return ((size_t)((spcolor & sp_color_mask) *
+ SA(sp_color_stride)));
+#else
+ return (0);
+#endif
+ }
+
+ (void) random_get_pseudo_bytes((uint8_t *)&off, sizeof (off));
+ return (SA(P2PHASE(off, aslr_max_stack_skew)));
+}
+
+/*
* Initialize a new user stack with the specified arguments and environment.
* The initial user stack layout is as follows:
*
@@ -2016,17 +2083,10 @@ exec_args(execa_t *uap, uarg_t *args, intpdata_t *intp, void **auxvpp)
p->p_flag |= SAUTOLPG; /* kernel controls page sizes */
mutex_exit(&p->p_lock);
- /*
- * Some platforms may choose to randomize real stack start by adding a
- * small slew (not more than a few hundred bytes) to the top of the
- * stack. This helps avoid cache thrashing when identical processes
- * simultaneously share caches that don't provide enough associativity
- * (e.g. sun4v systems). In this case stack slewing makes the same hot
- * stack variables in different processes to live in different cache
- * sets increasing effective associativity.
- */
sp_slew = exec_get_spslew();
ASSERT(P2PHASE(sp_slew, args->stk_align) == 0);
+ /* Be certain we don't underflow */
+ VERIFY((curproc->p_usrstack - (size + sp_slew)) < curproc->p_usrstack);
exec_set_sp(size + sp_slew);
as = as_alloc();
diff --git a/usr/src/uts/common/os/fork.c b/usr/src/uts/common/os/fork.c
index f835981610..ce0913a52a 100644
--- a/usr/src/uts/common/os/fork.c
+++ b/usr/src/uts/common/os/fork.c
@@ -1095,6 +1095,11 @@ getproc(proc_t **cpp, pid_t pid, uint_t flags)
cp->p_ppid = pp->p_pid;
cp->p_ancpid = pp->p_pid;
cp->p_portcnt = pp->p_portcnt;
+ /*
+ * Security flags are preserved on fork, the inherited copy come into
+ * effect on exec
+ */
+ cp->p_secflags = pp->p_secflags;
/*
* Initialize watchpoint structures
diff --git a/usr/src/uts/common/os/grow.c b/usr/src/uts/common/os/grow.c
index f5e92cfd94..647bca2542 100644
--- a/usr/src/uts/common/os/grow.c
+++ b/usr/src/uts/common/os/grow.c
@@ -62,10 +62,16 @@
int use_brk_lpg = 1;
int use_stk_lpg = 1;
+/*
+ * If set, we will not randomize mappings where the 'addr' argument is
+ * non-NULL and not an alignment.
+ */
+int aslr_respect_mmap_hint = 1;
+
static int brk_lpg(caddr_t nva);
static int grow_lpg(caddr_t sp);
-int
+intptr_t
brk(caddr_t nva)
{
int error;
@@ -77,6 +83,17 @@ brk(caddr_t nva)
* and p_brkpageszc.
*/
as_rangelock(p->p_as);
+
+ /*
+ * As a special case to aid the implementation of sbrk(3C), if given a
+ * new brk of 0, return the current brk. We'll hide this in brk(3C).
+ */
+ if (nva == 0) {
+ intptr_t base = (intptr_t)(p->p_brkbase + p->p_brksize);
+ as_rangeunlock(p->p_as);
+ return (base);
+ }
+
if (use_brk_lpg && (p->p_flag & SAUTOLPG) != 0) {
error = brk_lpg(nva);
} else {
@@ -490,10 +507,10 @@ grow_internal(caddr_t sp, uint_t growszc)
}
/*
- * Find address for user to map.
- * If MAP_FIXED is not specified, we can pick any address we want, but we will
- * first try the value in *addrp if it is non-NULL. Thus this is implementing
- * a way to try and get a preferred address.
+ * Find address for user to map. If MAP_FIXED is not specified, we can pick
+ * any address we want, but we will first try the value in *addrp if it is
+ * non-NULL and _MAP_RANDOMIZE is not set. Thus this is implementing a way to
+ * try and get a preferred address.
*/
int
choose_addr(struct as *as, caddr_t *addrp, size_t len, offset_t off,
@@ -506,7 +523,8 @@ choose_addr(struct as *as, caddr_t *addrp, size_t len, offset_t off,
if (flags & MAP_FIXED) {
(void) as_unmap(as, *addrp, len);
return (0);
- } else if (basep != NULL && ((flags & MAP_ALIGN) == 0) &&
+ } else if (basep != NULL &&
+ ((flags & (MAP_ALIGN | _MAP_RANDOMIZE)) == 0) &&
!as_gap(as, len, &basep, &lenp, 0, *addrp)) {
/* User supplied address was available */
*addrp = basep;
@@ -587,6 +605,9 @@ zmap(struct as *as, caddr_t *addrp, size_t len, uint_t uprot, int flags,
return (as_map(as, *addrp, len, segvn_create, &vn_a));
}
+#define RANDOMIZABLE_MAPPING(addr, flags) (((flags & MAP_FIXED) == 0) && \
+ !(((flags & MAP_ALIGN) == 0) && (addr != 0) && aslr_respect_mmap_hint))
+
static int
smmap_common(caddr_t *addrp, size_t len,
int prot, int flags, struct file *fp, offset_t pos)
@@ -612,6 +633,19 @@ smmap_common(caddr_t *addrp, size_t len,
return (EINVAL);
}
+ if ((flags & (MAP_FIXED | _MAP_RANDOMIZE)) ==
+ (MAP_FIXED | _MAP_RANDOMIZE)) {
+ return (EINVAL);
+ }
+
+ /*
+ * If it's not a fixed allocation and mmap ASLR is enabled, randomize
+ * it.
+ */
+ if (RANDOMIZABLE_MAPPING(*addrp, flags) &&
+ secflag_enabled(curproc, PROC_SEC_ASLR))
+ flags |= _MAP_RANDOMIZE;
+
#if defined(__sparc)
/*
* See if this is an "old mmap call". If so, remember this
@@ -630,7 +664,6 @@ smmap_common(caddr_t *addrp, size_t len,
if (flags & MAP_ALIGN) {
-
if (flags & MAP_FIXED)
return (EINVAL);
diff --git a/usr/src/uts/common/os/mmapobj.c b/usr/src/uts/common/os/mmapobj.c
index b6b5446d71..80f556a2de 100644
--- a/usr/src/uts/common/os/mmapobj.c
+++ b/usr/src/uts/common/os/mmapobj.c
@@ -68,8 +68,9 @@
*
* Having mmapobj interpret and map objects will allow the kernel to make the
* best decision for where to place the mappings for said objects. Thus, we
- * can make optimizations inside of the kernel for specific platforms or
- * cache mapping information to make mapping objects faster.
+ * can make optimizations inside of the kernel for specific platforms or cache
+ * mapping information to make mapping objects faster. The cache is ignored
+ * if ASLR is enabled.
*
* The lib_va_hash will be one such optimization. For each ELF object that
* mmapobj is asked to interpret, we will attempt to cache the information
@@ -718,7 +719,7 @@ mmapobj_lookup_start_addr(struct lib_va *lvp)
*/
static caddr_t
mmapobj_alloc_start_addr(struct lib_va **lvpp, size_t len, int use_lib_va,
- size_t align, vattr_t *vap)
+ int randomize, size_t align, vattr_t *vap)
{
proc_t *p = curproc;
struct as *as = p->p_as;
@@ -733,6 +734,7 @@ mmapobj_alloc_start_addr(struct lib_va **lvpp, size_t len, int use_lib_va,
size_t lib_va_len;
ASSERT(lvpp != NULL);
+ ASSERT((randomize & use_lib_va) != 1);
MOBJ_STAT_ADD(alloc_start);
model = get_udatamodel();
@@ -748,6 +750,10 @@ mmapobj_alloc_start_addr(struct lib_va **lvpp, size_t len, int use_lib_va,
if (align > 1) {
ma_flags |= MAP_ALIGN;
}
+
+ if (randomize != 0)
+ ma_flags |= _MAP_RANDOMIZE;
+
if (use_lib_va) {
/*
* The first time through, we need to setup the lib_va arenas.
@@ -861,7 +867,14 @@ nolibva:
* If we don't have an expected base address, or the one that we want
* to use is not available or acceptable, go get an acceptable
* address range.
+ *
+ * If ASLR is enabled, we should never have used the cache, and should
+ * also start our real work here, in the consequent of the next
+ * condition.
*/
+ if (randomize != 0)
+ ASSERT(base == NULL);
+
if (base == NULL || as_gap(as, len, &base, &len, 0, NULL) ||
valid_usr_range(base, len, PROT_ALL, as, as->a_userlimit) !=
RANGE_OKAY || OVERLAPS_STACK(base + len, p)) {
@@ -1525,7 +1538,7 @@ check_exec_addrs(int loadable, mmapobj_result_t *mrp, caddr_t start_addr)
* Return 0 on success or error on failure.
*/
static int
-process_phdr(Ehdr *ehdrp, caddr_t phdrbase, int nphdrs, mmapobj_result_t *mrp,
+process_phdrs(Ehdr *ehdrp, caddr_t phdrbase, int nphdrs, mmapobj_result_t *mrp,
vnode_t *vp, uint_t *num_mapped, size_t padding, cred_t *fcred)
{
int i;
@@ -1581,7 +1594,7 @@ process_phdr(Ehdr *ehdrp, caddr_t phdrbase, int nphdrs, mmapobj_result_t *mrp,
}
}
- if (padding != 0) {
+ if ((padding != 0) || secflag_enabled(curproc, PROC_SEC_ASLR)) {
use_lib_va = 0;
}
if (e_type == ET_DYN) {
@@ -1591,7 +1604,8 @@ process_phdr(Ehdr *ehdrp, caddr_t phdrbase, int nphdrs, mmapobj_result_t *mrp,
return (error);
}
/* Check to see if we already have a description for this lib */
- lvp = lib_va_find(&vattr);
+ if (!secflag_enabled(curproc, PROC_SEC_ASLR))
+ lvp = lib_va_find(&vattr);
if (lvp != NULL) {
MOBJ_STAT_ADD(lvp_found);
@@ -1701,7 +1715,9 @@ process_phdr(Ehdr *ehdrp, caddr_t phdrbase, int nphdrs, mmapobj_result_t *mrp,
*/
ASSERT(lvp ? use_lib_va == 0 : 1);
start_addr = mmapobj_alloc_start_addr(&lvp, len,
- use_lib_va, align, &vattr);
+ use_lib_va,
+ secflag_enabled(curproc, PROC_SEC_ASLR),
+ align, &vattr);
if (start_addr == NULL) {
if (lvp) {
lib_va_release(lvp);
@@ -2026,7 +2042,7 @@ doelfwork(Ehdr *ehdrp, vnode_t *vp, mmapobj_result_t *mrp,
}
/* Now process the phdr's */
- error = process_phdr(ehdrp, phbasep, nphdrs, mrp, vp, num_mapped,
+ error = process_phdrs(ehdrp, phbasep, nphdrs, mrp, vp, num_mapped,
padding, fcred);
kmem_free(phbasep, phsizep);
return (error);
@@ -2312,7 +2328,8 @@ mmapobj_map_interpret(vnode_t *vp, mmapobj_result_t *mrp,
* for this library. This is the fast path and only used for
* ET_DYN ELF files (dynamic libraries).
*/
- if (padding == 0 && (lvp = lib_va_find(&vattr)) != NULL) {
+ if (padding == 0 && !secflag_enabled(curproc, PROC_SEC_ASLR) &&
+ ((lvp = lib_va_find(&vattr)) != NULL)) {
int num_segs;
model = get_udatamodel();
diff --git a/usr/src/uts/common/os/policy.c b/usr/src/uts/common/os/policy.c
index 07bc2920da..06f7fb0f4f 100644
--- a/usr/src/uts/common/os/policy.c
+++ b/usr/src/uts/common/os/policy.c
@@ -1384,9 +1384,9 @@ secpolicy_xvattr(xvattr_t *xvap, uid_t owner, cred_t *cr, vtype_t vtype)
int
secpolicy_vnode_setattr(cred_t *cr, struct vnode *vp, struct vattr *vap,
- const struct vattr *ovap, int flags,
- int unlocked_access(void *, int, cred_t *),
- void *node)
+ const struct vattr *ovap, int flags,
+ int unlocked_access(void *, int, cred_t *),
+ void *node)
{
int mask = vap->va_mask;
int error = 0;
@@ -1728,6 +1728,19 @@ secpolicy_pset(const cred_t *cr)
return (PRIV_POLICY(cr, PRIV_SYS_RES_CONFIG, B_FALSE, EPERM, NULL));
}
+/* Process security flags */
+int
+secpolicy_psecflags(const cred_t *cr, proc_t *tp, proc_t *sp)
+{
+ if (PRIV_POLICY(cr, PRIV_PROC_SECFLAGS, B_FALSE, EPERM, NULL) != 0)
+ return (EPERM);
+
+ if (!prochasprocperm(tp, sp, cr))
+ return (EPERM);
+
+ return (0);
+}
+
/*
* Processor set binding.
*/
diff --git a/usr/src/uts/common/os/priv_defs b/usr/src/uts/common/os/priv_defs
index a3cdaccc2a..bc1787c9ca 100644
--- a/usr/src/uts/common/os/priv_defs
+++ b/usr/src/uts/common/os/priv_defs
@@ -318,6 +318,11 @@ privilege PRIV_PROC_PRIOCNTL
Allows a process to change its scheduling class to any scheduling class,
including the RT class.
+basic privilege PRIV_PROC_SECFLAGS
+
+ Allows a process to manipulate the secflags of processes (subject to,
+ additionally, the ability to signal that process)
+
basic privilege PRIV_PROC_SESSION
Allows a process to send signals or trace processes outside its
diff --git a/usr/src/uts/common/os/proc.c b/usr/src/uts/common/os/proc.c
index 3d4c82fcad..c699744733 100644
--- a/usr/src/uts/common/os/proc.c
+++ b/usr/src/uts/common/os/proc.c
@@ -159,3 +159,15 @@ freepctx(proc_t *p, int isexec)
}
kpreempt_enable();
}
+
+boolean_t
+secflag_enabled(proc_t *p, secflag_t flag)
+{
+ return (secflag_isset(p->p_secflags.psf_effective, flag));
+}
+
+void
+secflags_promote(proc_t *p)
+{
+ secflags_copy(&p->p_secflags.psf_effective, &p->p_secflags.psf_inherit);
+}
diff --git a/usr/src/uts/common/os/sysent.c b/usr/src/uts/common/os/sysent.c
index 0d1bb6a8a1..b3861dec03 100644
--- a/usr/src/uts/common/os/sysent.c
+++ b/usr/src/uts/common/os/sysent.c
@@ -63,7 +63,7 @@ int alarm(int);
int auditsys(struct auditcalls *, rval_t *);
int64_t brandsys(int, uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t,
uintptr_t);
-int brk(caddr_t);
+intptr_t brk(caddr_t);
int chdir(char *);
int chmod(char *, int);
int chown(char *, uid_t, gid_t);
@@ -112,6 +112,7 @@ long pcsample(void *, long);
int privsys(int, priv_op_t, priv_ptype_t, void *, size_t, int);
int profil(unsigned short *, size_t, ulong_t, uint_t);
ssize_t pread(int, void *, size_t, off_t);
+int psecflags(procset_t *, psecflagwhich_t, secflagdelta_t *);
ssize_t pwrite(int, void *, size_t, off_t);
ssize_t read(int, void *, size_t);
int rename(char *, char *);
@@ -439,7 +440,7 @@ struct sysent sysent[NSYSCALL] =
SYSENT_NOSYS(),
SYSENT_C("indir", indir, 1)),
/* 1 */ SYSENT_CI("exit", rexit, 1),
- /* 2 */ SYSENT_LOADABLE(), /* (was forkall) */
+ /* 2 */ SYSENT_CI("psecflags", psecflags, 3),
/* 3 */ SYSENT_CL("read", read, 3),
/* 4 */ SYSENT_CL("write", write, 3),
/* 5 */ SYSENT_CI("open", open, 3),
@@ -820,7 +821,7 @@ struct sysent sysent32[NSYSCALL] =
{
/* 0 */ SYSENT_C("indir", indir, 1),
/* 1 */ SYSENT_CI("exit", (int (*)())rexit, 1),
- /* 2 */ SYSENT_LOADABLE32(), /* (was forkall) */
+ /* 2 */ SYSENT_CI("psecflags", psecflags, 3),
/* 3 */ SYSENT_CI("read", read32, 3),
/* 4 */ SYSENT_CI("write", write32, 3),
/* 5 */ SYSENT_CI("open", open32, 3),
diff --git a/usr/src/uts/common/os/zone.c b/usr/src/uts/common/os/zone.c
index 20d7311af7..d966b5e72a 100644
--- a/usr/src/uts/common/os/zone.c
+++ b/usr/src/uts/common/os/zone.c
@@ -2080,6 +2080,7 @@ zone_zsd_init(void)
zone0.zone_domain = srpc_domain;
zone0.zone_hostid = HW_INVALID_HOSTID;
zone0.zone_fs_allowed = NULL;
+ psecflags_default(&zone0.zone_secflags);
zone0.zone_ref = 1;
zone0.zone_id = GLOBAL_ZONEID;
zone0.zone_status = ZONE_IS_RUNNING;
@@ -2527,6 +2528,32 @@ zone_set_brand(zone_t *zone, const char *brand)
}
static int
+zone_set_secflags(zone_t *zone, const psecflags_t *zone_secflags)
+{
+ int err = 0;
+ psecflags_t psf;
+
+ ASSERT(zone != global_zone);
+
+ if ((err = copyin(zone_secflags, &psf, sizeof (psf))) != 0)
+ return (err);
+
+ if (zone_status_get(zone) > ZONE_IS_READY)
+ return (EINVAL);
+
+ if (!psecflags_validate(&psf))
+ return (EINVAL);
+
+ (void) memcpy(&zone->zone_secflags, &psf, sizeof (psf));
+
+ /* Set security flags on the zone's zsched */
+ (void) memcpy(&zone->zone_zsched->p_secflags, &zone->zone_secflags,
+ sizeof (zone->zone_zsched->p_secflags));
+
+ return (0);
+}
+
+static int
zone_set_fs_allowed(zone_t *zone, const char *zone_fs_allowed)
{
char *buf = kmem_zalloc(ZONE_FS_ALLOWED_MAX, KM_SLEEP);
@@ -3988,6 +4015,7 @@ zsched(void *arg)
mutex_exit(&pp->p_lock);
}
}
+
/*
* Tell the world that we're done setting up.
*
@@ -4491,6 +4519,12 @@ zone_create(const char *zone_name, const char *zone_root,
zone->zone_ipc.ipcq_msgmni = 0;
zone->zone_bootargs = NULL;
zone->zone_fs_allowed = NULL;
+
+ secflags_zero(&zone0.zone_secflags.psf_lower);
+ secflags_zero(&zone0.zone_secflags.psf_effective);
+ secflags_zero(&zone0.zone_secflags.psf_inherit);
+ secflags_fullset(&zone0.zone_secflags.psf_upper);
+
zone->zone_initname =
kmem_alloc(strlen(zone_default_initname) + 1, KM_SLEEP);
(void) strcpy(zone->zone_initname, zone_default_initname);
@@ -5612,6 +5646,13 @@ zone_getattr(zoneid_t zoneid, int attr, void *buf, size_t bufsize)
error = EFAULT;
}
break;
+ case ZONE_ATTR_SECFLAGS:
+ size = sizeof (zone->zone_secflags);
+ if (bufsize > size)
+ bufsize = size;
+ if ((err = copyout(&zone->zone_secflags, buf, bufsize)) != 0)
+ error = EFAULT;
+ break;
case ZONE_ATTR_NETWORK:
zbuf = kmem_alloc(bufsize, KM_SLEEP);
if (copyin(buf, zbuf, bufsize) != 0) {
@@ -5696,6 +5737,9 @@ zone_setattr(zoneid_t zoneid, int attr, void *buf, size_t bufsize)
case ZONE_ATTR_FS_ALLOWED:
err = zone_set_fs_allowed(zone, (const char *)buf);
break;
+ case ZONE_ATTR_SECFLAGS:
+ err = zone_set_secflags(zone, (psecflags_t *)buf);
+ break;
case ZONE_ATTR_PHYS_MCAP:
err = zone_set_phys_mcap(zone, (const uint64_t *)buf);
break;
@@ -6183,6 +6227,17 @@ zone_enter(zoneid_t zoneid)
zone_chdir(vp, &PTOU(pp)->u_rdir, pp);
/*
+ * Change process security flags. Note that the _effective_ flags
+ * cannot change
+ */
+ secflags_copy(&pp->p_secflags.psf_lower,
+ &zone->zone_secflags.psf_lower);
+ secflags_copy(&pp->p_secflags.psf_upper,
+ &zone->zone_secflags.psf_upper);
+ secflags_copy(&pp->p_secflags.psf_inherit,
+ &zone->zone_secflags.psf_inherit);
+
+ /*
* Change process credentials
*/
newcr = cralloc();