/* ARGSUSED */ static int64_t cfork(int isvfork, int isfork1, int flags) { proc_t *p = ttoproc(curthread); struct as *as; proc_t *cp, **orphpp; klwp_t *clone; kthread_t *t; task_t *tk; rval_t r; int error; int i; rctl_set_t *dup_set; rctl_alloc_gp_t *dup_gp; rctl_entity_p_t e; lwpdir_t *ldp; lwpent_t *lep; lwpent_t *clep; /* * Allow only these two flags. */ if ((flags & ~(FORK_NOSIGCHLD | FORK_WAITPID)) != 0) { error = EINVAL; goto forkerr; } /* * fork is not supported for the /proc agent lwp. */ if (curthread == p->p_agenttp) { error = ENOTSUP; goto forkerr; } if ((error = secpolicy_basic_fork(CRED())) != 0) goto forkerr; /* * If the calling lwp is doing a fork1() then the * other lwps in this process are not duplicated and * don't need to be held where their kernel stacks can be * cloned. If doing forkall(), the process is held with * SHOLDFORK, so that the lwps are at a point where their * stacks can be copied which is on entry or exit from * the kernel. */ if (!holdlwps(isfork1 ? SHOLDFORK1 : SHOLDFORK)) { aston(curthread); error = EINTR; goto forkerr; } #if defined(__sparc) /* * Ensure that the user stack is fully constructed * before creating the child process structure. */ (void) flush_user_windows_to_stack(NULL); #endif mutex_enter(&p->p_lock); /* * If this is vfork(), cancel any suspend request we might * have gotten from some other thread via lwp_suspend(). * Otherwise we could end up with a deadlock on return * from the vfork() in both the parent and the child. */ if (isvfork) curthread->t_proc_flag &= ~TP_HOLDLWP; /* * Prevent our resource set associations from being changed during fork. */ pool_barrier_enter(); mutex_exit(&p->p_lock); /* * Create a child proc struct. Place a VN_HOLD on appropriate vnodes. */ if (getproc(&cp, 0) < 0) { mutex_enter(&p->p_lock); pool_barrier_exit(); continuelwps(p); mutex_exit(&p->p_lock); error = EAGAIN; goto forkerr; } TRACE_2(TR_FAC_PROC, TR_PROC_FORK, "proc_fork:cp %p p %p", cp, p); /* * Assign an address space to child */ if (isvfork) { /* * Clear any watched areas and remember the * watched pages for restoring in vfwait(). */ as = p->p_as; if (avl_numnodes(&as->a_wpage) != 0) { AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); as_clearwatch(as); p->p_wpage = as->a_wpage; avl_create(&as->a_wpage, wp_compare, sizeof (struct watched_page), offsetof(struct watched_page, wp_link)); AS_LOCK_EXIT(as, &as->a_lock); } cp->p_as = as; cp->p_flag |= SVFORK; } else {
/* * taskid_t tasksys_settaskid(projid_t projid, uint_t flags); * * Overview * Place the calling process in a new task if sufficiently privileged. If the * present task is finalized, the process may not create a new task. * * Return values * 0 on success, errno on failure. */ static long tasksys_settaskid(projid_t projid, uint_t flags) { proc_t *p = ttoproc(curthread); kproject_t *oldpj; kproject_t *kpj; task_t *tk, *oldtk; rctl_entity_p_t e; zone_t *zone; int rctlfail = 0; if (secpolicy_tasksys(CRED()) != 0) return (set_errno(EPERM)); if (projid < 0 || projid > MAXPROJID) return (set_errno(EINVAL)); if (flags & ~TASK_FINAL) return (set_errno(EINVAL)); mutex_enter(&pidlock); if (p->p_task->tk_flags & TASK_FINAL) { mutex_exit(&pidlock); return (set_errno(EACCES)); } mutex_exit(&pidlock); /* * Try to stop all other lwps in the process while we're changing * our project. This way, curthread doesn't need to grab its own * thread_lock to find its project ID (see curprojid()). If this * is the /proc agent lwp, we know that the other lwps are already * held. If we failed to hold all lwps, bail out and return EINTR. */ if (curthread != p->p_agenttp && !holdlwps(SHOLDFORK1)) return (set_errno(EINTR)); /* * Put a hold on our new project and make sure that nobody is * trying to bind it to a pool while we're joining. */ kpj = project_hold_by_id(projid, p->p_zone, PROJECT_HOLD_INSERT); e.rcep_p.proj = kpj; e.rcep_t = RCENTITY_PROJECT; mutex_enter(&p->p_lock); oldpj = p->p_task->tk_proj; zone = p->p_zone; mutex_enter(&zone->zone_nlwps_lock); mutex_enter(&zone->zone_mem_lock); if (kpj->kpj_nlwps + p->p_lwpcnt > kpj->kpj_nlwps_ctl) if (rctl_test_entity(rc_project_nlwps, kpj->kpj_rctls, p, &e, p->p_lwpcnt, 0) & RCT_DENY) rctlfail = 1; if (kpj->kpj_ntasks + 1 > kpj->kpj_ntasks_ctl) if (rctl_test_entity(rc_project_ntasks, kpj->kpj_rctls, p, &e, 1, 0) & RCT_DENY) rctlfail = 1; if (kpj->kpj_data.kpd_locked_mem + p->p_locked_mem > kpj->kpj_data.kpd_locked_mem_ctl) if (rctl_test_entity(rc_project_locked_mem, kpj->kpj_rctls, p, &e, p->p_locked_mem, 0) & RCT_DENY) rctlfail = 1; mutex_enter(&(kpj->kpj_data.kpd_crypto_lock)); if (kpj->kpj_data.kpd_crypto_mem + p->p_crypto_mem > kpj->kpj_data.kpd_crypto_mem_ctl) if (rctl_test_entity(rc_project_crypto_mem, kpj->kpj_rctls, p, &e, p->p_crypto_mem, 0) & RCT_DENY) rctlfail = 1; if (rctlfail) { mutex_exit(&(kpj->kpj_data.kpd_crypto_lock)); mutex_exit(&zone->zone_mem_lock); mutex_exit(&zone->zone_nlwps_lock); if (curthread != p->p_agenttp) continuelwps(p); mutex_exit(&p->p_lock); return (set_errno(EAGAIN)); } kpj->kpj_data.kpd_crypto_mem += p->p_crypto_mem; mutex_exit(&(kpj->kpj_data.kpd_crypto_lock)); kpj->kpj_data.kpd_locked_mem += p->p_locked_mem; kpj->kpj_nlwps += p->p_lwpcnt; kpj->kpj_ntasks++; oldpj->kpj_data.kpd_locked_mem -= p->p_locked_mem; mutex_enter(&(oldpj->kpj_data.kpd_crypto_lock)); oldpj->kpj_data.kpd_crypto_mem -= p->p_crypto_mem; mutex_exit(&(oldpj->kpj_data.kpd_crypto_lock)); oldpj->kpj_nlwps -= p->p_lwpcnt; mutex_exit(&zone->zone_mem_lock); mutex_exit(&zone->zone_nlwps_lock); mutex_exit(&p->p_lock); mutex_enter(&kpj->kpj_poolbind); tk = task_create(projid, curproc->p_zone); mutex_enter(&cpu_lock); /* * Returns with p_lock held. */ oldtk = task_join(tk, flags); if (curthread != p->p_agenttp) continuelwps(p); mutex_exit(&p->p_lock); mutex_exit(&cpu_lock); mutex_exit(&kpj->kpj_poolbind); task_rele(oldtk); project_rele(kpj); return (tk->tk_tkid); }