#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
#include <sys/signal.h>
#include <sys/cred.h>
#include <sys/policy.h>
#include <sys/user.h>
#include <sys/systm.h>
#include <sys/cpuvar.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/file.h>
#include <sys/errno.h>
#include <sys/time.h>
#include <sys/proc.h>
#include <sys/cmn_err.h>
#include <sys/acct.h>
#include <sys/tuneable.h>
#include <sys/class.h>
#include <sys/kmem.h>
#include <sys/session.h>
#include <sys/ucontext.h>
#include <sys/stack.h>
#include <sys/procfs.h>
#include <sys/prsystm.h>
#include <sys/vmsystm.h>
#include <sys/vtrace.h>
#include <sys/debug.h>
#include <sys/shm_impl.h>
#include <sys/door_data.h>
#include <vm/as.h>
#include <vm/rm.h>
#include <c2/audit.h>
#include <sys/var.h>
#include <sys/schedctl.h>
#include <sys/utrap.h>
#include <sys/task.h>
#include <sys/resource.h>
#include <sys/cyclic.h>
#include <sys/lgrp.h>
#include <sys/rctl.h>
#include <sys/contract_impl.h>
#include <sys/contract/process_impl.h>
#include <sys/list.h>
#include <sys/dtrace.h>
#include <sys/pool.h>
#include <sys/zone.h>
#include <sys/sdt.h>
#include <sys/class.h>
#include <sys/corectl.h>
#include <sys/brand.h>
#include <sys/fork.h>
static int64_t cfork(int, int, int);
static int getproc(proc_t **, pid_t, uint_t);
#define GETPROC_USER 0x0
#define GETPROC_KERNEL 0x1
static void fork_fail(proc_t *);
static void forklwp_fail(proc_t *);
int fork_fail_pending;
extern struct kmem_cache *process_cache;
int64_t
vfork(void)
{
curthread->t_post_sys = 1;
return (cfork(1, 1, 0));
}
int64_t
forksys(int subcode, int flags)
{
switch (subcode) {
case 0:
return (cfork(0, 1, flags));
case 1:
return (cfork(0, 0, flags));
case 2:
curthread->t_post_sys = 1;
return (cfork(1, 1, flags));
default:
return ((int64_t)set_errno(EINVAL));
}
}
static void
disown_proc(proc_t *pp, proc_t *cp)
{
proc_t **orphpp;
ASSERT(MUTEX_HELD(&pidlock));
orphpp = &pp->p_orphan;
while (*orphpp != cp)
orphpp = &(*orphpp)->p_nextorph;
*orphpp = cp->p_nextorph;
if (pp->p_child == cp)
pp->p_child = cp->p_sibling;
if (cp->p_sibling)
cp->p_sibling->p_psibling = cp->p_psibling;
if (cp->p_psibling)
cp->p_psibling->p_sibling = cp->p_sibling;
}
static int64_t
cfork(int isvfork, int isfork1, int flags)
{
proc_t *p = ttoproc(curthread);
struct as *as;
proc_t *cp;
klwp_t *clone;
kthread_t *t;
task_t *tk;
rval_t r;
int error;
int i;
rctl_set_t *dup_set;
rctl_alloc_gp_t *dup_gp;
rctl_entity_p_t e;
lwpdir_t *ldp;
lwpent_t *lep;
lwpent_t *clep;
clone = NULL;
if ((flags & ~(FORK_NOSIGCHLD | FORK_WAITPID)) != 0) {
error = EINVAL;
atomic_inc_32(&curproc->p_zone->zone_ffmisc);
goto forkerr;
}
if (curthread == p->p_agenttp) {
error = ENOTSUP;
atomic_inc_32(&curproc->p_zone->zone_ffmisc);
goto forkerr;
}
if ((error = secpolicy_basic_fork(CRED())) != 0) {
atomic_inc_32(&p->p_zone->zone_ffmisc);
goto forkerr;
}
if (!holdlwps(isfork1 ? SHOLDFORK1 : SHOLDFORK)) {
aston(curthread);
error = EINTR;
atomic_inc_32(&p->p_zone->zone_ffmisc);
goto forkerr;
}
#if defined(__sparc)
(void) flush_user_windows_to_stack(NULL);
#endif
mutex_enter(&p->p_lock);
if (isvfork)
curthread->t_proc_flag &= ~TP_HOLDLWP;
pool_barrier_enter();
mutex_exit(&p->p_lock);
if (getproc(&cp, 0, GETPROC_USER) < 0) {
mutex_enter(&p->p_lock);
pool_barrier_exit();
continuelwps(p);
mutex_exit(&p->p_lock);
error = EAGAIN;
goto forkerr;
}
TRACE_2(TR_FAC_PROC, TR_PROC_FORK, "proc_fork:cp %p p %p", cp, p);
if (isvfork) {
as = p->p_as;
if (avl_numnodes(&as->a_wpage) != 0) {
AS_LOCK_ENTER(as, RW_WRITER);
as_clearwatch(as);
p->p_wpage = as->a_wpage;
avl_create(&as->a_wpage, wp_compare,
sizeof (struct watched_page),
offsetof(struct watched_page, wp_link));
AS_LOCK_EXIT(as);
}
cp->p_as = as;
cp->p_flag |= SVFORK;
cp->p_segacct = p->p_segacct;
} else {
mutex_enter(&p->p_lock);
sprlock_proc(p);
p->p_flag |= SFORKING;
mutex_exit(&p->p_lock);
error = as_dup(p->p_as, cp);
if (error != 0) {
mutex_enter(&p->p_lock);
sprunlock(p);
fork_fail(cp);
mutex_enter(&pidlock);
disown_proc(p, cp);
mutex_enter(&cp->p_lock);
tk = cp->p_task;
task_detach(cp);
ASSERT(cp->p_pool->pool_ref > 0);
atomic_dec_32(&cp->p_pool->pool_ref);
mutex_exit(&cp->p_lock);
pid_exit(cp, tk);
mutex_exit(&pidlock);
task_rele(tk);
mutex_enter(&p->p_lock);
p->p_flag &= ~SFORKING;
pool_barrier_exit();
continuelwps(p);
mutex_exit(&p->p_lock);
error = (error == ENOMEM) ? ENOMEM : EAGAIN;
atomic_inc_32(&p->p_zone->zone_ffnomem);
goto forkerr;
}
if (p->p_dtrace_count > 0)
dtrace_fasttrap_fork(p, cp);
mutex_enter(&p->p_lock);
sprunlock(p);
if (p->p_segacct)
shmfork(p, cp);
if (p->p_dtrace_helpers != NULL) {
ASSERT(dtrace_helpers_fork != NULL);
(*dtrace_helpers_fork)(p, cp);
}
mutex_enter(&p->p_lock);
p->p_flag &= ~SFORKING;
mutex_exit(&p->p_lock);
}
dup_set = rctl_set_create();
for (;;) {
dup_gp = rctl_set_dup_prealloc(p->p_rctls);
mutex_enter(&p->p_rctls->rcs_lock);
if (rctl_set_dup_ready(p->p_rctls, dup_gp))
break;
mutex_exit(&p->p_rctls->rcs_lock);
rctl_prealloc_destroy(dup_gp);
}
e.rcep_p.proc = cp;
e.rcep_t = RCENTITY_PROCESS;
cp->p_rctls = rctl_set_dup(p->p_rctls, p, cp, &e, dup_set, dup_gp,
RCD_DUP | RCD_CALLBACK);
mutex_exit(&p->p_rctls->rcs_lock);
rctl_prealloc_destroy(dup_gp);
if (isfork1)
cp->p_lwpdir_sz = 2;
else
cp->p_lwpdir_sz = p->p_lwpdir_sz;
cp->p_lwpdir = cp->p_lwpfree = ldp =
kmem_zalloc(cp->p_lwpdir_sz * sizeof (lwpdir_t), KM_SLEEP);
for (i = 1; i < cp->p_lwpdir_sz; i++, ldp++)
ldp->ld_next = ldp + 1;
cp->p_tidhash_sz = (cp->p_lwpdir_sz + 2) / 2;
cp->p_tidhash =
kmem_zalloc(cp->p_tidhash_sz * sizeof (tidhash_t), KM_SLEEP);
klgrpset_clear(cp->p_lgrpset);
if (isfork1) {
clone = forklwp(ttolwp(curthread), cp, curthread->t_tid);
if (clone == NULL)
goto forklwperr;
lwptot(clone)->t_proc_flag |=
(curthread->t_proc_flag & TP_TWAIT);
} else {
ASSERT(p->p_lwpwait == 0 && p->p_lwpdwait == 0);
for (i = 0, ldp = p->p_lwpdir; i < p->p_lwpdir_sz; i++, ldp++) {
klwp_t *clwp;
kthread_t *ct;
if ((lep = ldp->ld_entry) == NULL)
continue;
if ((t = lep->le_thread) != NULL) {
clwp = forklwp(ttolwp(t), cp, t->t_tid);
if (clwp == NULL)
goto forklwperr;
ct = lwptot(clwp);
ct->t_proc_flag |=
(t->t_proc_flag & (TP_TWAIT|TP_DAEMON));
if (t == curthread)
clone = clwp;
else
ct->t_flag |= T_FORKALL;
} else {
clep = kmem_zalloc(sizeof (*clep), KM_SLEEP);
clep->le_lwpid = lep->le_lwpid;
clep->le_start = lep->le_start;
lwp_hash_in(cp, clep,
cp->p_tidhash, cp->p_tidhash_sz, 0);
}
}
}
if (contract_process_fork(NULL, cp, p, B_TRUE) == NULL) {
atomic_inc_32(&p->p_zone->zone_ffmisc);
goto forklwperr;
}
cp->p_lwpid = p->p_lwpid;
if (!isfork1) {
cp->p_lwpdaemon = p->p_lwpdaemon;
cp->p_zombcnt = p->p_zombcnt;
cp->p_flag |= p->p_flag & SLWPWRAP;
}
mutex_enter(&p->p_lock);
corectl_path_hold(cp->p_corefile = p->p_corefile);
corectl_content_hold(cp->p_content = p->p_content);
mutex_exit(&p->p_lock);
if (p->p_pctx)
forkpctx(p, cp);
#ifdef __sparc
utrap_dup(p, cp);
#endif
if (PTOU(cp)->u_systrap &&
prismember(&PTOU(cp)->u_exitmask, curthread->t_sysnum)) {
mutex_enter(&cp->p_lock);
t = cp->p_tlist;
do {
t->t_proc_flag |= TP_PRSTOP;
aston(t);
} while ((t = t->t_forw) != cp->p_tlist);
mutex_exit(&cp->p_lock);
}
if (!(p->p_proc_flag & P_PR_ASYNC) && PTOU(p)->u_systrap &&
prismember(&PTOU(p)->u_exitmask, curthread->t_sysnum)) {
mutex_enter(&p->p_lock);
t = p->p_tlist;
do {
t->t_proc_flag |= TP_PRSTOP;
aston(t);
} while ((t = t->t_forw) != p->p_tlist);
mutex_exit(&p->p_lock);
}
if (PROC_IS_BRANDED(p))
BROP(p)->b_lwp_setrval(clone, p->p_pid, 1);
else
lwp_setrval(clone, p->p_pid, 1);
r.r_val1 = (int)cp->p_pid;
r.r_val2 = 0;
mutex_enter(&p->p_lock);
pool_barrier_exit();
mutex_exit(&p->p_lock);
mutex_enter(&pidlock);
mutex_enter(&cp->p_lock);
if (flags & FORK_NOSIGCHLD)
cp->p_pidflag |= CLDNOSIGCHLD;
if (flags & FORK_WAITPID)
cp->p_pidflag |= CLDWAITPID;
pgjoin(cp, p->p_pgidp);
cp->p_stat = SRUN;
t = cp->p_tlist;
do {
if (t->t_proc_flag & TP_HOLDLWP)
lwp_create_done(t);
else {
thread_lock(t);
ASSERT(t->t_state == TS_STOPPED &&
!(t->t_schedflag & (TS_CREATE|TS_CSTART)));
t->t_schedflag |= TS_CREATE;
thread_unlock(t);
}
} while ((t = t->t_forw) != cp->p_tlist);
mutex_exit(&cp->p_lock);
if (isvfork) {
CPU_STATS_ADDQ(CPU, sys, sysvfork, 1);
mutex_enter(&p->p_lock);
p->p_flag |= SVFWAIT;
curthread->t_flag |= T_VFPARENT;
DTRACE_PROC1(create, proc_t *, cp);
cv_broadcast(&pr_pid_cv[p->p_slot]);
mutex_exit(&p->p_lock);
mutex_enter(&cp->p_lock);
mutex_exit(&pidlock);
sigdefault(cp);
continuelwps(cp);
mutex_exit(&cp->p_lock);
} else {
CPU_STATS_ADDQ(CPU, sys, sysfork, 1);
DTRACE_PROC1(create, proc_t *, cp);
CL_FORKRET(curthread, cp->p_tlist);
schedctl_set_cidpri(curthread);
ASSERT(MUTEX_NOT_HELD(&pidlock));
}
return (r.r_vals);
forklwperr:
if (isvfork) {
if (avl_numnodes(&p->p_wpage) != 0) {
as = p->p_as;
AS_LOCK_ENTER(as, RW_WRITER);
as->a_wpage = p->p_wpage;
avl_create(&p->p_wpage, wp_compare,
sizeof (struct watched_page),
offsetof(struct watched_page, wp_link));
as_setwatch(as);
AS_LOCK_EXIT(as);
}
} else {
if (cp->p_segacct)
shmexit(cp);
as = cp->p_as;
cp->p_as = &kas;
as_free(as);
}
if (cp->p_lwpdir) {
for (i = 0, ldp = cp->p_lwpdir; i < cp->p_lwpdir_sz; i++, ldp++)
if ((lep = ldp->ld_entry) != NULL)
kmem_free(lep, sizeof (*lep));
kmem_free(cp->p_lwpdir,
cp->p_lwpdir_sz * sizeof (*cp->p_lwpdir));
}
cp->p_lwpdir = NULL;
cp->p_lwpfree = NULL;
cp->p_lwpdir_sz = 0;
if (cp->p_tidhash)
kmem_free(cp->p_tidhash,
cp->p_tidhash_sz * sizeof (*cp->p_tidhash));
cp->p_tidhash = NULL;
cp->p_tidhash_sz = 0;
forklwp_fail(cp);
fork_fail(cp);
if (cp->p_dtrace_helpers != NULL) {
ASSERT(dtrace_helpers_cleanup != NULL);
(*dtrace_helpers_cleanup)(cp);
}
rctl_set_free(cp->p_rctls);
mutex_enter(&pidlock);
mutex_enter(&cp->p_lock);
tk = cp->p_task;
task_detach(cp);
ASSERT(cp->p_pool->pool_ref > 0);
atomic_dec_32(&cp->p_pool->pool_ref);
mutex_exit(&cp->p_lock);
disown_proc(p, cp);
pid_exit(cp, tk);
mutex_exit(&pidlock);
task_rele(tk);
mutex_enter(&p->p_lock);
pool_barrier_exit();
continuelwps(p);
mutex_exit(&p->p_lock);
error = EAGAIN;
forkerr:
return ((int64_t)set_errno(error));
}
static void
fork_fail(proc_t *cp)
{
uf_info_t *fip = P_FINFO(cp);
fcnt_add(fip, -1);
sigdelq(cp, NULL, 0);
mutex_enter(&pidlock);
upcount_dec(crgetruid(cp->p_cred), crgetzoneid(cp->p_cred));
mutex_exit(&pidlock);
crfree(cp->p_cred);
kmem_free(fip->fi_list, fip->fi_nfiles * sizeof (uf_entry_t));
VN_RELE(PTOU(curproc)->u_cdir);
if (PTOU(curproc)->u_rdir)
VN_RELE(PTOU(curproc)->u_rdir);
if (cp->p_exec)
VN_RELE(cp->p_exec);
if (cp->p_execdir)
VN_RELE(cp->p_execdir);
if (PTOU(curproc)->u_cwd)
refstr_rele(PTOU(curproc)->u_cwd);
if (PROC_IS_BRANDED(cp)) {
brand_clearbrand(cp, B_TRUE);
}
}
static void
forklwp_fail(proc_t *p)
{
kthread_t *t;
task_t *tk;
int branded = 0;
if (PROC_IS_BRANDED(p))
branded = 1;
while ((t = p->p_tlist) != NULL) {
if (t != t->t_forw)
p->p_tlist = t->t_forw;
else
p->p_tlist = NULL;
p->p_lwpcnt--;
t->t_forw->t_back = t->t_back;
t->t_back->t_forw = t->t_forw;
tk = p->p_task;
mutex_enter(&p->p_zone->zone_nlwps_lock);
tk->tk_nlwps--;
tk->tk_proj->kpj_nlwps--;
p->p_zone->zone_nlwps--;
mutex_exit(&p->p_zone->zone_nlwps_lock);
ASSERT(t->t_schedctl == NULL);
if (branded)
BROP(p)->b_freelwp(ttolwp(t));
if (t->t_door != NULL) {
kmem_free(t->t_door, sizeof (door_data_t));
t->t_door = NULL;
}
lwp_ctmpl_clear(ttolwp(t));
mutex_enter(&pidlock);
t->t_next->t_prev = t->t_prev;
t->t_prev->t_next = t->t_next;
CL_EXIT(t);
cv_broadcast(&t->t_joincv);
mutex_exit(&pidlock);
kpreempt_disable();
lgrp_move_thread(t, NULL, 1);
kpreempt_enable();
t->t_state = TS_FREE;
thread_rele(t);
thread_free(t);
}
}
extern struct as kas;
int
newproc(void (*pc)(), caddr_t arg, id_t cid, int pri, struct contract **ct,
pid_t pid)
{
proc_t *p;
struct user *up;
kthread_t *t;
cont_process_t *ctp = NULL;
rctl_entity_p_t e;
ASSERT(cid != sysdccid);
ASSERT(cid != syscid || ct == NULL);
if (CLASS_KERNEL(cid)) {
rctl_alloc_gp_t *init_gp;
rctl_set_t *init_set;
ASSERT(pid != 1);
if (getproc(&p, pid, GETPROC_KERNEL) < 0)
return (EAGAIN);
if (p->p_execdir != NULL)
VN_RELE(p->p_execdir);
if (p->p_exec != NULL)
VN_RELE(p->p_exec);
p->p_flag |= SNOWAIT;
p->p_exec = NULL;
p->p_execdir = NULL;
init_set = rctl_set_create();
init_gp = rctl_set_init_prealloc(RCENTITY_PROCESS);
sigemptyset(&p->p_sigmask);
premptyset(&p->p_fltmask);
up = PTOU(p);
up->u_systrap = 0;
premptyset(&(up->u_entrymask));
premptyset(&(up->u_exitmask));
mutex_enter(&p->p_lock);
e.rcep_p.proc = p;
e.rcep_t = RCENTITY_PROCESS;
p->p_rctls = rctl_set_init(RCENTITY_PROCESS, p, &e, init_set,
init_gp);
mutex_exit(&p->p_lock);
rctl_prealloc_destroy(init_gp);
t = lwp_kernel_create(p, pc, arg, TS_STOPPED, pri);
} else {
rctl_alloc_gp_t *init_gp, *default_gp;
rctl_set_t *init_set;
task_t *tk, *tk_old;
klwp_t *lwp;
if (getproc(&p, pid, GETPROC_USER) < 0)
return (EAGAIN);
tk = task_create(0, p->p_zone);
mutex_enter(&tk->tk_zone->zone_nlwps_lock);
tk->tk_proj->kpj_ntasks++;
tk->tk_nprocs++;
mutex_exit(&tk->tk_zone->zone_nlwps_lock);
default_gp = rctl_rlimit_set_prealloc(RLIM_NLIMITS);
init_gp = rctl_set_init_prealloc(RCENTITY_PROCESS);
init_set = rctl_set_create();
mutex_enter(&pidlock);
mutex_enter(&p->p_lock);
tk_old = p->p_task;
task_detach(p);
task_begin(tk, p);
mutex_exit(&pidlock);
mutex_enter(&tk_old->tk_zone->zone_nlwps_lock);
tk_old->tk_nprocs--;
mutex_exit(&tk_old->tk_zone->zone_nlwps_lock);
e.rcep_p.proc = p;
e.rcep_t = RCENTITY_PROCESS;
p->p_rctls = rctl_set_init(RCENTITY_PROCESS, p, &e, init_set,
init_gp);
rctlproc_default_init(p, default_gp);
mutex_exit(&p->p_lock);
task_rele(tk_old);
rctl_prealloc_destroy(default_gp);
rctl_prealloc_destroy(init_gp);
if ((lwp = lwp_create(pc, arg, 0, p, TS_STOPPED, pri,
&curthread->t_hold, cid, 1)) == NULL) {
task_t *tk;
fork_fail(p);
mutex_enter(&pidlock);
disown_proc(p->p_parent, p);
mutex_enter(&p->p_lock);
tk = p->p_task;
task_detach(p);
ASSERT(p->p_pool->pool_ref > 0);
atomic_add_32(&p->p_pool->pool_ref, -1);
mutex_exit(&p->p_lock);
pid_exit(p, tk);
mutex_exit(&pidlock);
task_rele(tk);
return (EAGAIN);
}
t = lwptot(lwp);
ctp = contract_process_fork(sys_process_tmpl, p, curproc,
B_FALSE);
ASSERT(ctp != NULL);
if (ct != NULL)
*ct = &ctp->conp_contract;
}
ASSERT3U(t->t_tid, ==, 1);
p->p_lwpid = 1;
mutex_enter(&pidlock);
pgjoin(p, p->p_parent->p_pgidp);
p->p_stat = SRUN;
mutex_enter(&p->p_lock);
t->t_proc_flag &= ~TP_HOLDLWP;
lwp_create_done(t);
mutex_exit(&p->p_lock);
mutex_exit(&pidlock);
return (0);
}
static int
getproc(proc_t **cpp, pid_t pid, uint_t flags)
{
proc_t *pp, *cp;
pid_t newpid;
struct user *uarea;
extern uint_t nproc;
struct cred *cr;
uid_t ruid;
zoneid_t zoneid;
task_t *task;
kproject_t *proj;
zone_t *zone;
int rctlfail = 0;
if (zone_status_get(curproc->p_zone) >= ZONE_IS_SHUTTING_DOWN)
return (-1);
pp = (flags & GETPROC_KERNEL) ? &p0 : curproc;
task = pp->p_task;
proj = task->tk_proj;
zone = pp->p_zone;
mutex_enter(&pp->p_lock);
mutex_enter(&zone->zone_nlwps_lock);
if (proj != proj0p) {
if (task->tk_nprocs >= task->tk_nprocs_ctl)
if (rctl_test(rc_task_nprocs, task->tk_rctls,
pp, 1, 0) & RCT_DENY)
rctlfail = 1;
if (proj->kpj_nprocs >= proj->kpj_nprocs_ctl)
if (rctl_test(rc_project_nprocs, proj->kpj_rctls,
pp, 1, 0) & RCT_DENY)
rctlfail = 1;
if (zone->zone_nprocs >= zone->zone_nprocs_ctl)
if (rctl_test(rc_zone_nprocs, zone->zone_rctls,
pp, 1, 0) & RCT_DENY)
rctlfail = 1;
if (rctlfail) {
mutex_exit(&zone->zone_nlwps_lock);
mutex_exit(&pp->p_lock);
atomic_inc_32(&zone->zone_ffcap);
goto punish;
}
}
task->tk_nprocs++;
proj->kpj_nprocs++;
zone->zone_nprocs++;
mutex_exit(&zone->zone_nlwps_lock);
mutex_exit(&pp->p_lock);
cp = kmem_cache_alloc(process_cache, KM_SLEEP);
bzero(cp, sizeof (proc_t));
mutex_init(&cp->p_splock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&cp->p_crlock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&cp->p_pflock, NULL, MUTEX_DEFAULT, NULL);
#if defined(__x86)
mutex_init(&cp->p_ldtlock, NULL, MUTEX_DEFAULT, NULL);
#endif
mutex_init(&cp->p_maplock, NULL, MUTEX_DEFAULT, NULL);
cp->p_stat = SIDL;
cp->p_mstart = gethrtime();
cp->p_as = &kas;
cp->p_zone = pp->p_zone;
cp->p_t1_lgrpid = LGRP_NONE;
cp->p_tr_lgrpid = LGRP_NONE;
if ((newpid = pid_allocate(cp, pid, PID_ALLOC_PROC)) == -1) {
if (nproc == v.v_proc) {
CPU_STATS_ADDQ(CPU, sys, procovf, 1);
cmn_err(CE_WARN, "out of processes");
}
goto bad;
}
mutex_enter(&pp->p_lock);
cp->p_exec = pp->p_exec;
cp->p_execdir = pp->p_execdir;
mutex_exit(&pp->p_lock);
if (cp->p_exec) {
VN_HOLD(cp->p_exec);
if (VOP_OPEN(&cp->p_exec, FREAD, CRED(), NULL) != 0) {
VN_RELE(cp->p_exec);
cp->p_exec = NULLVP;
cp->p_execdir = NULLVP;
goto bad;
}
}
if (cp->p_execdir)
VN_HOLD(cp->p_execdir);
mutex_enter(&pidlock);
ASSERT(nproc < v.v_proc);
cr = CRED();
ruid = crgetruid(cr);
zoneid = crgetzoneid(cr);
if (nproc >= v.v_maxup &&
(nproc >= v.v_maxupttl ||
upcount_get(ruid, zoneid) >= v.v_maxup) &&
secpolicy_newproc(cr) != 0) {
mutex_exit(&pidlock);
zcmn_err(zoneid, CE_NOTE,
"out of per-user processes for uid %d", ruid);
goto bad;
}
nproc++;
upcount_inc(ruid, zoneid);
cp->p_next = practive;
practive->p_prev = cp;
practive = cp;
cp->p_ignore = pp->p_ignore;
cp->p_siginfo = pp->p_siginfo;
cp->p_flag = pp->p_flag & (SJCTL|SNOWAIT|SNOCD);
cp->p_sessp = pp->p_sessp;
sess_hold(pp);
cp->p_brand = pp->p_brand;
if (PROC_IS_BRANDED(pp))
BROP(pp)->b_copy_procdata(cp, pp);
cp->p_bssbase = pp->p_bssbase;
cp->p_brkbase = pp->p_brkbase;
cp->p_brksize = pp->p_brksize;
cp->p_brkpageszc = pp->p_brkpageszc;
cp->p_stksize = pp->p_stksize;
cp->p_stkpageszc = pp->p_stkpageszc;
cp->p_stkprot = pp->p_stkprot;
cp->p_datprot = pp->p_datprot;
cp->p_usrstack = pp->p_usrstack;
cp->p_model = pp->p_model;
cp->p_ppid = pp->p_pid;
cp->p_ancpid = pp->p_pid;
cp->p_portcnt = pp->p_portcnt;
cp->p_secflags = pp->p_secflags;
avl_create(&cp->p_warea, wa_compare, sizeof (struct watched_area),
offsetof(struct watched_area, wa_link));
cp->p_stk_ctl = pp->p_stk_ctl;
cp->p_fsz_ctl = pp->p_fsz_ctl;
cp->p_vmem_ctl = pp->p_vmem_ctl;
cp->p_fno_ctl = pp->p_fno_ctl;
cp->p_sibling = pp->p_child;
if (pp->p_child)
pp->p_child->p_psibling = cp;
cp->p_parent = pp;
pp->p_child = cp;
cp->p_child_ns = NULL;
cp->p_sibling_ns = NULL;
cp->p_nextorph = pp->p_orphan;
cp->p_nextofkin = pp;
pp->p_orphan = cp;
cp->p_prof = pp->p_prof;
cp->p_rprof_cyclic = CYCLIC_NONE;
mutex_enter(&pp->p_lock);
if (flags & GETPROC_KERNEL) {
cp->p_pool = pool_default;
cp->p_flag |= SSYS;
} else {
cp->p_pool = pp->p_pool;
}
atomic_inc_32(&cp->p_pool->pool_ref);
mutex_exit(&pp->p_lock);
mutex_enter(&cp->p_lock);
if (flags & GETPROC_KERNEL)
task_attach(task0p, cp);
else
task_attach(pp->p_task, cp);
mutex_exit(&cp->p_lock);
mutex_exit(&pidlock);
avl_create(&cp->p_ct_held, contract_compar, sizeof (contract_t),
offsetof(contract_t, ct_ctlist));
if (audit_active)
audit_newproc(cp);
crhold(cp->p_cred = cr);
fcnt_add(P_FINFO(pp), 1);
if (PTOU(pp)->u_cdir) {
VN_HOLD(PTOU(pp)->u_cdir);
} else {
ASSERT(pp == &p0);
}
if (PTOU(pp)->u_rdir)
VN_HOLD(PTOU(pp)->u_rdir);
if (PTOU(pp)->u_cwd)
refstr_hold(PTOU(pp)->u_cwd);
uarea = PTOU(cp);
bcopy(PTOU(pp), uarea, sizeof (*uarea));
flist_fork(P_FINFO(pp), P_FINFO(cp));
gethrestime(&uarea->u_start);
uarea->u_ticks = ddi_get_lbolt();
uarea->u_mem = rm_asrss(pp->p_as);
uarea->u_acflag = AFORK;
if ((pp->p_proc_flag & P_PR_FORK) != 0) {
cp->p_proc_flag |= pp->p_proc_flag & (P_PR_TRACE|P_PR_FORK);
cp->p_sigmask = pp->p_sigmask;
cp->p_fltmask = pp->p_fltmask;
} else {
sigemptyset(&cp->p_sigmask);
premptyset(&cp->p_fltmask);
uarea->u_systrap = 0;
premptyset(&uarea->u_entrymask);
premptyset(&uarea->u_exitmask);
}
if ((pp->p_flag & SMSFORK) != 0)
cp->p_flag |= pp->p_flag & (SMSFORK|SMSACCT);
cp->p_fixalignment = pp->p_fixalignment;
*cpp = cp;
return (0);
bad:
ASSERT(MUTEX_NOT_HELD(&pidlock));
mutex_destroy(&cp->p_crlock);
mutex_destroy(&cp->p_pflock);
#if defined(__x86)
mutex_destroy(&cp->p_ldtlock);
#endif
if (newpid != -1) {
proc_entry_free(cp->p_pidp);
(void) pid_rele(cp->p_pidp);
}
kmem_cache_free(process_cache, cp);
mutex_enter(&zone->zone_nlwps_lock);
task->tk_nprocs--;
proj->kpj_nprocs--;
zone->zone_nprocs--;
mutex_exit(&zone->zone_nlwps_lock);
atomic_inc_32(&zone->zone_ffnoproc);
punish:
INCR_COUNT(&fork_fail_pending, &pidlock);
delay(fork_fail_pending / ncpus + 1);
DECR_COUNT(&fork_fail_pending, &pidlock);
return (-1);
}
void
relvm()
{
proc_t *p = curproc;
ASSERT((unsigned)p->p_lwpcnt <= 1);
prrelvm();
if (p->p_flag & SVFORK) {
proc_t *pp = p->p_parent;
try_again:
mutex_enter(&p->p_lock);
prbarrier(p);
mutex_enter(&pp->p_lock);
if (pp->p_proc_flag & P_PR_LOCK) {
mutex_exit(&p->p_lock);
prbarrier(pp);
mutex_exit(&pp->p_lock);
goto try_again;
}
p->p_flag &= ~SVFORK;
kpreempt_disable();
p->p_as = &kas;
hat_thread_exit(curthread);
kpreempt_enable();
pp->p_brkbase = p->p_brkbase;
pp->p_brksize = p->p_brksize;
pp->p_stksize = p->p_stksize;
pp->p_segacct = p->p_segacct;
p->p_segacct = NULL;
pp->p_flag &= ~SVFWAIT;
if (avl_numnodes(&pp->p_wpage) != 0) {
pp->p_as->a_wpage = pp->p_wpage;
avl_create(&pp->p_wpage, wp_compare,
sizeof (struct watched_page),
offsetof(struct watched_page, wp_link));
}
cv_signal(&pp->p_cv);
mutex_exit(&pp->p_lock);
mutex_exit(&p->p_lock);
} else {
if (p->p_as != &kas) {
struct as *as;
if (p->p_segacct)
shmexit(p);
kpreempt_disable();
mutex_enter(&p->p_lock);
prbarrier(p);
as = p->p_as;
p->p_as = &kas;
mutex_exit(&p->p_lock);
hat_thread_exit(curthread);
kpreempt_enable();
as_free(as);
p->p_tr_lgrpid = LGRP_NONE;
}
}
}
void
vfwait(pid_t pid)
{
int signalled = 0;
proc_t *pp = ttoproc(curthread);
proc_t *cp;
for (;;) {
mutex_enter(&pidlock);
cp = prfind(pid);
if (cp == NULL || cp->p_parent != pp) {
mutex_exit(&pidlock);
break;
}
mutex_enter(&cp->p_lock);
mutex_exit(&pidlock);
if (!(cp->p_flag & SVFORK)) {
mutex_exit(&cp->p_lock);
break;
}
mutex_enter(&pp->p_lock);
mutex_exit(&cp->p_lock);
if (signalled)
cv_wait(&pp->p_cv, &pp->p_lock);
else
signalled = !cv_wait_sig(&pp->p_cv, &pp->p_lock);
mutex_exit(&pp->p_lock);
}
if (pr_watch_active(pp)) {
struct as *as = pp->p_as;
AS_LOCK_ENTER(as, RW_WRITER);
as_setwatch(as);
AS_LOCK_EXIT(as);
}
mutex_enter(&pp->p_lock);
prbarrier(pp);
continuelwps(pp);
mutex_exit(&pp->p_lock);
}