Example #1
0
static int
splat_cred_test1(struct file *file, void *arg)
{
	char str[GROUP_STR_SIZE];
	uid_t uid, ruid, suid;
	gid_t gid, rgid, sgid, *groups;
	int ngroups, i, count = 0;

	uid  = crgetuid(CRED());
	ruid = crgetruid(CRED());
	suid = crgetsuid(CRED());

	gid  = crgetgid(CRED());
	rgid = crgetrgid(CRED());
	sgid = crgetsgid(CRED());

	crhold(CRED());
	ngroups = crgetngroups(CRED());
	groups  = crgetgroups(CRED());

	memset(str, 0, GROUP_STR_SIZE);
	for (i = 0; i < ngroups; i++) {
		count += sprintf(str + count, "%d ", groups[i]);

		if (count > (GROUP_STR_SIZE - GROUP_STR_REDZONE)) {
			splat_vprint(file, SPLAT_CRED_TEST1_NAME,
				     "Failed too many group entries for temp "
				     "buffer: %d, %s\n", ngroups, str);
			return -ENOSPC;
		}
	}

	crfree(CRED());

	splat_vprint(file, SPLAT_CRED_TEST1_NAME,
		     "uid: %d ruid: %d suid: %d "
		     "gid: %d rgid: %d sgid: %d\n",
		     uid, ruid, suid, gid, rgid, sgid);
	splat_vprint(file, SPLAT_CRED_TEST1_NAME,
		     "ngroups: %d groups: %s\n", ngroups, str);

	if (uid || ruid || suid || gid || rgid || sgid) {
		splat_vprint(file, SPLAT_CRED_TEST1_NAME,
			     "Failed expected all uids+gids to be %d\n", 0);
		return -EIDRM;
	}

	if (ngroups > NGROUPS_MAX) {
		splat_vprint(file, SPLAT_CRED_TEST1_NAME,
			     "Failed ngroups must not exceed NGROUPS_MAX: "
			     "%d > %d\n", ngroups, NGROUPS_MAX);
		return -EIDRM;
	}

	splat_vprint(file, SPLAT_CRED_TEST1_NAME,
		     "Success sane CRED(): %d\n", 0);

        return 0;
} /* splat_cred_test1() */
/*
 *
 * Cachefs used to know too much about how creds looked; since it's
 * committed to persistent storage, we can't change the layout so
 * it now has a "dl_cred_t" which (unsurprisingly) looks exactly like
 * an old credential.
 *
 * The dst argument needs to point to:
 *		struct dl_cred_t;
 *		<buffer space>			buffer for groups
 *
 * The source is a proper kernel cred_t.
 *
 */
static size_t
copy_cred(cred_t *src, dl_cred_t *dst)
{
	int n;
	const gid_t *sgrp = crgetgroups(src);

	n = MIN(NGROUPS_MAX_DEFAULT, crgetngroups(src));

	/* copy the fixed fields */
	dst->cr_uid = crgetuid(src);
	dst->cr_ruid = crgetruid(src);
	dst->cr_suid = crgetsuid(src);
	dst->cr_gid = crgetgid(src);
	dst->cr_rgid = crgetrgid(src);
	dst->cr_sgid = crgetsgid(src);
	dst->cr_groups[0] = sgrp[0];

	dst->cr_ngroups = n;
	bcopy(sgrp, (void *)(dst + 1), (n - 1) * sizeof (gid_t));
	return (sizeof (dl_cred_t) + (n - 1) * sizeof (gid_t));
}
Example #3
0
/*
 * Allocate a new lxproc node
 *
 * This also allocates the vnode associated with it
 */
lxpr_node_t *
lxpr_getnode(vnode_t *dp, lxpr_nodetype_t type, proc_t *p, int fd)
{
	lxpr_node_t *lxpnp;
	vnode_t *vp;
	user_t *up;
	timestruc_t now;

	/*
	 * Allocate a new node. It is deallocated in vop_innactive
	 */
	lxpnp = kmem_cache_alloc(lxpr_node_cache, KM_SLEEP);

	/*
	 * Set defaults (may be overridden below)
	 */
	gethrestime(&now);
	lxpnp->lxpr_type = type;
	lxpnp->lxpr_realvp = NULL;
	lxpnp->lxpr_parent = dp;
	VN_HOLD(dp);
	if (p != NULL) {
		lxpnp->lxpr_pid = ((p->p_pid ==
		    curproc->p_zone->zone_proc_initpid) ? 1 : p->p_pid);

		lxpnp->lxpr_time = PTOU(p)->u_start;
		lxpnp->lxpr_uid = crgetruid(p->p_cred);
		lxpnp->lxpr_gid = crgetrgid(p->p_cred);
		lxpnp->lxpr_ino = lxpr_inode(type, p->p_pid, fd);
	} else {
		/* Pretend files without a proc belong to sched */
		lxpnp->lxpr_pid = 0;
		lxpnp->lxpr_time = now;
		lxpnp->lxpr_uid = lxpnp->lxpr_gid = 0;
		lxpnp->lxpr_ino = lxpr_inode(type, 0, 0);
	}

	/* initialize the vnode data */
	vp = lxpnp->lxpr_vnode;
	vn_reinit(vp);
	vp->v_flag = VNOCACHE|VNOMAP|VNOSWAP|VNOMOUNT;
	vp->v_vfsp = dp->v_vfsp;

	/*
	 * Do node specific stuff
	 */
	switch (type) {
	case LXPR_PROCDIR:
		vp->v_flag |= VROOT;
		vp->v_type = VDIR;
		lxpnp->lxpr_mode = 0555;	/* read-search by everyone */
		break;

	case LXPR_PID_CURDIR:
		ASSERT(p != NULL);

		/*
		 * Zombie check.  p_stat is officially protected by pidlock,
		 * but we can't grab pidlock here because we already hold
		 * p_lock.  Luckily if we look at the process exit code
		 * we see that p_stat only transisions from SRUN to SZOMB
		 * while p_lock is held.  Aside from this, the only other
		 * p_stat transition that we need to be aware about is
		 * SIDL to SRUN, but that's not a problem since lxpr_lock()
		 * ignores nodes in the SIDL state so we'll never get a node
		 * that isn't already in the SRUN state.
		 */
		if (p->p_stat == SZOMB) {
			lxpnp->lxpr_realvp = NULL;
		} else {
			up = PTOU(p);
			lxpnp->lxpr_realvp = up->u_cdir;
			ASSERT(lxpnp->lxpr_realvp != NULL);
			VN_HOLD(lxpnp->lxpr_realvp);
		}
		vp->v_type = VLNK;
		lxpnp->lxpr_mode = 0777;	/* anyone does anything ! */
		break;

	case LXPR_PID_ROOTDIR:
		ASSERT(p != NULL);
		/* Zombie check.  see locking comment above */
		if (p->p_stat == SZOMB) {
			lxpnp->lxpr_realvp = NULL;
		} else {
			up = PTOU(p);
			lxpnp->lxpr_realvp =
			    up->u_rdir != NULL ? up->u_rdir : rootdir;
			ASSERT(lxpnp->lxpr_realvp != NULL);
			VN_HOLD(lxpnp->lxpr_realvp);
		}
		vp->v_type = VLNK;
		lxpnp->lxpr_mode = 0777;	/* anyone does anything ! */
		break;

	case LXPR_PID_EXE:
		ASSERT(p != NULL);
		lxpnp->lxpr_realvp = p->p_exec;
		if (lxpnp->lxpr_realvp != NULL) {
			VN_HOLD(lxpnp->lxpr_realvp);
		}
		vp->v_type = VLNK;
		lxpnp->lxpr_mode = 0777;
		break;

	case LXPR_SELF:
		vp->v_type = VLNK;
		lxpnp->lxpr_mode = 0777;	/* anyone does anything ! */
		break;

	case LXPR_PID_FD_FD:
		ASSERT(p != NULL);
		/* lxpr_realvp is set after we return */
		vp->v_type = VLNK;
		lxpnp->lxpr_mode = 0700;	/* read-write-exe owner only */
		break;

	case LXPR_PID_FDDIR:
		ASSERT(p != NULL);
		vp->v_type = VDIR;
		lxpnp->lxpr_mode = 0500;	/* read-search by owner only */
		break;

	case LXPR_PIDDIR:
		ASSERT(p != NULL);
		vp->v_type = VDIR;
		lxpnp->lxpr_mode = 0511;
		break;

	case LXPR_SYSDIR:
	case LXPR_SYS_FSDIR:
	case LXPR_SYS_FS_INOTIFYDIR:
	case LXPR_SYS_KERNELDIR:
	case LXPR_NETDIR:
		vp->v_type = VDIR;
		lxpnp->lxpr_mode = 0555;	/* read-search by all */
		break;

	case LXPR_PID_ENV:
	case LXPR_PID_MEM:
		ASSERT(p != NULL);
		/*FALLTHRU*/
	case LXPR_KCORE:
		vp->v_type = VREG;
		lxpnp->lxpr_mode = 0400;	/* read-only by owner only */
		break;

	default:
		vp->v_type = VREG;
		lxpnp->lxpr_mode = 0444;	/* read-only by all */
		break;
	}

	return (lxpnp);
}
/**
 * open() worker.
 */
static int VBoxDrvSolarisOpen(dev_t *pDev, int fFlag, int fType, cred_t *pCred)
{
    const bool          fUnrestricted = getminor(*pDev) == 0;
    PSUPDRVSESSION      pSession;
    int                 rc;

    LogFlowFunc(("VBoxDrvSolarisOpen: pDev=%p:%#x\n", pDev, *pDev));

    /*
     * Validate input
     */
    if (   (getminor(*pDev) != 0 && getminor(*pDev) != 1)
        || fType != OTYP_CHR)
        return EINVAL; /* See mmopen for precedent. */

#ifndef USE_SESSION_HASH
    /*
     * Locate a new device open instance.
     *
     * For each open call we'll allocate an item in the soft state of the device.
     * The item index is stored in the dev_t. I hope this is ok...
     */
    vbox_devstate_t *pState = NULL;
    unsigned iOpenInstance;
    for (iOpenInstance = 0; iOpenInstance < 4096; iOpenInstance++)
    {
        if (    !ddi_get_soft_state(g_pVBoxDrvSolarisState, iOpenInstance) /* faster */
            &&  ddi_soft_state_zalloc(g_pVBoxDrvSolarisState, iOpenInstance) == DDI_SUCCESS)
        {
            pState = ddi_get_soft_state(g_pVBoxDrvSolarisState, iOpenInstance);
            break;
        }
    }
    if (!pState)
    {
        LogRel(("VBoxDrvSolarisOpen: too many open instances.\n"));
        return ENXIO;
    }

    /*
     * Create a new session.
     */
    rc = supdrvCreateSession(&g_DevExt, true /* fUser */, fUnrestricted, &pSession);
    if (RT_SUCCESS(rc))
    {
        pSession->Uid = crgetruid(pCred);
        pSession->Gid = crgetrgid(pCred);

        pState->pSession = pSession;
        *pDev = makedevice(getmajor(*pDev), iOpenInstance);
        LogFlow(("VBoxDrvSolarisOpen: Dev=%#x pSession=%p pid=%d r0proc=%p thread=%p\n",
                 *pDev, pSession, RTProcSelf(), RTR0ProcHandleSelf(), RTThreadNativeSelf() ));
        return 0;
    }

    /* failed - clean up */
    ddi_soft_state_free(g_pVBoxDrvSolarisState, iOpenInstance);

#else
    /*
     * Create a new session.
     * Sessions in Solaris driver are mostly useless. It's however needed
     * in VBoxDrvSolarisIOCtlSlow() while calling supdrvIOCtl()
     */
    rc = supdrvCreateSession(&g_DevExt, true /* fUser */, fUnrestricted, &pSession);
    if (RT_SUCCESS(rc))
    {
        unsigned        iHash;

        pSession->Uid = crgetruid(pCred);
        pSession->Gid = crgetrgid(pCred);

        /*
         * Insert it into the hash table.
         */
# error "Only one entry per process!"
        iHash = SESSION_HASH(pSession->Process);
        RTSpinlockAcquire(g_Spinlock);
        pSession->pNextHash = g_apSessionHashTab[iHash];
        g_apSessionHashTab[iHash] = pSession;
        RTSpinlockRelease(g_Spinlock);
        LogFlow(("VBoxDrvSolarisOpen success\n"));
    }

    int instance;
    for (instance = 0; instance < DEVICE_MAXINSTANCES; instance++)
    {
        vbox_devstate_t *pState = ddi_get_soft_state(g_pVBoxDrvSolarisState, instance);
        if (pState)
            break;
    }

    if (instance >= DEVICE_MAXINSTANCES)
    {
        LogRel(("VBoxDrvSolarisOpen: All instances exhausted\n"));
        return ENXIO;
    }

    *pDev = makedevice(getmajor(*pDev), instance);
#endif

    return VBoxSupDrvErr2SolarisErr(rc);
}
Example #5
0
void
exacct_calculate_proc_usage(proc_t *p, proc_usage_t *pu, ulong_t *mask,
    int flag, int wstat)
{
	timestruc_t ts, ts_run;

	ASSERT(MUTEX_HELD(&p->p_lock));

	/*
	 * Convert CPU and execution times to sec/nsec format.
	 */
	if (BT_TEST(mask, AC_PROC_CPU)) {
		hrt2ts(mstate_aggr_state(p, LMS_USER), &ts);
		pu->pu_utimesec = (uint64_t)(ulong_t)ts.tv_sec;
		pu->pu_utimensec = (uint64_t)(ulong_t)ts.tv_nsec;
		hrt2ts(mstate_aggr_state(p, LMS_SYSTEM), &ts);
		pu->pu_stimesec = (uint64_t)(ulong_t)ts.tv_sec;
		pu->pu_stimensec = (uint64_t)(ulong_t)ts.tv_nsec;
	}
	if (BT_TEST(mask, AC_PROC_TIME)) {
		gethrestime(&ts);
		pu->pu_finishsec = (uint64_t)(ulong_t)ts.tv_sec;
		pu->pu_finishnsec = (uint64_t)(ulong_t)ts.tv_nsec;
		hrt2ts(gethrtime() - p->p_mstart, &ts_run);
		ts.tv_sec -= ts_run.tv_sec;
		ts.tv_nsec -= ts_run.tv_nsec;
		if (ts.tv_nsec < 0) {
			ts.tv_sec--;
			if ((ts.tv_nsec = ts.tv_nsec + NANOSEC) >= NANOSEC) {
				ts.tv_sec++;
				ts.tv_nsec -= NANOSEC;
			}
		}
		pu->pu_startsec = (uint64_t)(ulong_t)ts.tv_sec;
		pu->pu_startnsec = (uint64_t)(ulong_t)ts.tv_nsec;
	}

	pu->pu_pid = p->p_pidp->pid_id;
	pu->pu_acflag = p->p_user.u_acflag;
	pu->pu_projid = p->p_task->tk_proj->kpj_id;
	pu->pu_taskid = p->p_task->tk_tkid;
	pu->pu_major = getmajor(p->p_sessp->s_dev);
	pu->pu_minor = getminor(p->p_sessp->s_dev);
	pu->pu_ancpid = p->p_ancpid;
	pu->pu_wstat = wstat;
	/*
	 * Compute average RSS in K.  The denominator is the number of
	 * samples:  the number of clock ticks plus the initial value.
	 */
	pu->pu_mem_rss_avg = (PTOU(p)->u_mem / (p->p_stime + p->p_utime + 1)) *
	    (PAGESIZE / 1024);
	pu->pu_mem_rss_max = PTOU(p)->u_mem_max * (PAGESIZE / 1024);

	mutex_enter(&p->p_crlock);
	pu->pu_ruid = crgetruid(p->p_cred);
	pu->pu_rgid = crgetrgid(p->p_cred);
	mutex_exit(&p->p_crlock);

	bcopy(p->p_user.u_comm, pu->pu_command, strlen(p->p_user.u_comm) + 1);
	bcopy(p->p_zone->zone_name, pu->pu_zonename,
	    strlen(p->p_zone->zone_name) + 1);
	bcopy(p->p_zone->zone_nodename, pu->pu_nodename,
	    strlen(p->p_zone->zone_nodename) + 1);

	/*
	 * Calculate microstate accounting data for a process that is still
	 * running.  Presently, we explicitly collect all of the LWP usage into
	 * the proc usage structure here.
	 */
	if (flag & EW_PARTIAL)
		exacct_calculate_proc_mstate(p, pu);
	if (flag & EW_FINAL)
		exacct_copy_proc_mstate(p, pu);
}