Ejemplo n.º 1
0
static
#endif /* !PTS_EXTERNAL */
int
pts_alloc(int fflags, struct thread *td, struct file *fp)
{
	int unit, ok, error;
	struct tty *tp;
	struct pts_softc *psc;
	struct proc *p = td->td_proc;
	struct ucred *cred = td->td_ucred;

	/* Resource limiting. */
	PROC_LOCK(p);
	error = racct_add(p, RACCT_NPTS, 1);
	if (error != 0) {
		PROC_UNLOCK(p);
		return (EAGAIN);
	}
	ok = chgptscnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_NPTS));
	if (!ok) {
		racct_sub(p, RACCT_NPTS, 1);
		PROC_UNLOCK(p);
		return (EAGAIN);
	}
	PROC_UNLOCK(p);

	/* Try to allocate a new pts unit number. */
	unit = alloc_unr(pts_pool);
	if (unit < 0) {
		racct_sub(p, RACCT_NPTS, 1);
		chgptscnt(cred->cr_ruidinfo, -1, 0);
		return (EAGAIN);
	}

	/* Allocate TTY and softc. */
	psc = malloc(sizeof(struct pts_softc), M_PTS, M_WAITOK|M_ZERO);
	cv_init(&psc->pts_inwait, "ptsin");
	cv_init(&psc->pts_outwait, "ptsout");

	psc->pts_unit = unit;
	psc->pts_cred = crhold(cred);

	tp = tty_alloc(&pts_class, psc);
	knlist_init_mtx(&psc->pts_inpoll.si_note, tp->t_mtx);
	knlist_init_mtx(&psc->pts_outpoll.si_note, tp->t_mtx);

	/* Expose the slave device as well. */
	tty_makedev(tp, td->td_ucred, "pts/%u", psc->pts_unit);

	finit(fp, fflags, DTYPE_PTS, tp, &ptsdev_ops);

	return (0);
}
Ejemplo n.º 2
0
/* Create a struct for tracking per-device suspend notification. */
static struct apm_clone_data *
apm_create_clone(struct cdev *dev, struct acpi_softc *acpi_sc)
{
	struct apm_clone_data *clone;

	clone = malloc(sizeof(*clone), M_APMDEV, M_WAITOK);
	clone->cdev = dev;
	clone->acpi_sc = acpi_sc;
	clone->notify_status = APM_EV_NONE;
	bzero(&clone->sel_read, sizeof(clone->sel_read));
	knlist_init_mtx(&clone->sel_read.si_note, &acpi_mutex);

	/*
	 * The acpi device is always managed by devd(8) and is considered
	 * writable (i.e., ack is required to allow suspend to proceed.)
	 */
	if (strcmp("acpi", devtoname(dev)) == 0)
		clone->flags = ACPI_EVF_DEVD | ACPI_EVF_WRITE;
	else
		clone->flags = ACPI_EVF_NONE;

	ACPI_LOCK(acpi);
	STAILQ_INSERT_TAIL(&acpi_sc->apm_cdevs, clone, entries);
	ACPI_UNLOCK(acpi);
	return (clone);
}
Ejemplo n.º 3
0
int
pts_alloc_external(int fflags, struct thread *td, struct file *fp,
    struct cdev *dev, const char *name)
{
	int ok, error;
	struct tty *tp;
	struct pts_softc *psc;
	struct proc *p = td->td_proc;
	struct ucred *cred = td->td_ucred;

	/* Resource limiting. */
	PROC_LOCK(p);
	error = racct_add(p, RACCT_NPTS, 1);
	if (error != 0) {
		PROC_UNLOCK(p);
		return (EAGAIN);
	}
	ok = chgptscnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_NPTS));
	if (!ok) {
		racct_sub(p, RACCT_NPTS, 1);
		PROC_UNLOCK(p);
		return (EAGAIN);
	}
	PROC_UNLOCK(p);

	/* Allocate TTY and softc. */
	psc = malloc(sizeof(struct pts_softc), M_PTS, M_WAITOK|M_ZERO);
	cv_init(&psc->pts_inwait, "ptsin");
	cv_init(&psc->pts_outwait, "ptsout");

	psc->pts_unit = -1;
	psc->pts_cdev = dev;
	psc->pts_cred = crhold(cred);

	tp = tty_alloc(&pts_class, psc);
	knlist_init_mtx(&psc->pts_inpoll.si_note, tp->t_mtx);
	knlist_init_mtx(&psc->pts_outpoll.si_note, tp->t_mtx);

	/* Expose the slave device as well. */
	tty_makedev(tp, td->td_ucred, "%s", name);

	finit(fp, fflags, DTYPE_PTS, tp, &ptsdev_ops);

	return (0);
}
Ejemplo n.º 4
0
static void
log_drvinit(void *unused)
{

	cv_init(&log_wakeup, "klog");
	callout_init_mtx(&logsoftc.sc_callout, &msgbuf_lock, 0);
	knlist_init_mtx(&logsoftc.sc_selp.si_note, &msgbuf_lock);
	make_dev_credf(MAKEDEV_ETERNAL, &log_cdevsw, 0, NULL, UID_ROOT,
	    GID_WHEEL, 0600, "klog");
}
Ejemplo n.º 5
0
void nm_os_selinfo_init(NM_SELINFO_T *si) {
	struct mtx *m = &si->m;
	mtx_init(m, "nm_kn_lock", NULL, MTX_DEF);
	knlist_init_mtx(&si->si.si_note, m);
}
Ejemplo n.º 6
0
int
fork1(struct thread *td, struct fork_req *fr)
{
	struct proc *p1, *newproc;
	struct thread *td2;
	struct vmspace *vm2;
	struct file *fp_procdesc;
	vm_ooffset_t mem_charged;
	int error, nprocs_new, ok;
	static int curfail;
	static struct timeval lastfail;
	int flags, pages;

	flags = fr->fr_flags;
	pages = fr->fr_pages;

	if ((flags & RFSTOPPED) != 0)
		MPASS(fr->fr_procp != NULL && fr->fr_pidp == NULL);
	else
		MPASS(fr->fr_procp == NULL);

#ifdef PAX_SEGVGUARD
	if (td->td_proc->p_pid != 0) {
		error = pax_segvguard_check(curthread, curthread->td_proc->p_textvp,
				td->td_proc->p_comm);
		if (error)
			return (error);
	}
#endif

	/* Check for the undefined or unimplemented flags. */
	if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0)
		return (EINVAL);

	/* Signal value requires RFTSIGZMB. */
	if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0)
		return (EINVAL);

	/* Can't copy and clear. */
	if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
		return (EINVAL);

	/* Check the validity of the signal number. */
	if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG)
		return (EINVAL);

	if ((flags & RFPROCDESC) != 0) {
		/* Can't not create a process yet get a process descriptor. */
		if ((flags & RFPROC) == 0)
			return (EINVAL);

		/* Must provide a place to put a procdesc if creating one. */
		if (fr->fr_pd_fd == NULL)
			return (EINVAL);
	}

	p1 = td->td_proc;

	/*
	 * Here we don't create a new process, but we divorce
	 * certain parts of a process from itself.
	 */
	if ((flags & RFPROC) == 0) {
		if (fr->fr_procp != NULL)
			*fr->fr_procp = NULL;
		else if (fr->fr_pidp != NULL)
			*fr->fr_pidp = 0;
		return (fork_norfproc(td, flags));
	}

	fp_procdesc = NULL;
	newproc = NULL;
	vm2 = NULL;

	/*
	 * Increment the nprocs resource before allocations occur.
	 * Although process entries are dynamically created, we still
	 * keep a global limit on the maximum number we will
	 * create. There are hard-limits as to the number of processes
	 * that can run, established by the KVA and memory usage for
	 * the process data.
	 *
	 * Don't allow a nonprivileged user to use the last ten
	 * processes; don't let root exceed the limit.
	 */
	nprocs_new = atomic_fetchadd_int(&nprocs, 1) + 1;
	if ((nprocs_new >= maxproc - 10 && priv_check_cred(td->td_ucred,
	    PRIV_MAXPROC, 0) != 0) || nprocs_new >= maxproc) {
		error = EAGAIN;
		sx_xlock(&allproc_lock);
		if (ppsratecheck(&lastfail, &curfail, 1)) {
			printf("maxproc limit exceeded by uid %u (pid %d); "
			    "see tuning(7) and login.conf(5)\n",
			    td->td_ucred->cr_ruid, p1->p_pid);
		}
		sx_xunlock(&allproc_lock);
		goto fail2;
	}

	/*
	 * If required, create a process descriptor in the parent first; we
	 * will abandon it if something goes wrong. We don't finit() until
	 * later.
	 */
	if (flags & RFPROCDESC) {
		error = falloc_caps(td, &fp_procdesc, fr->fr_pd_fd, 0,
		    fr->fr_pd_fcaps);
		if (error != 0)
			goto fail2;
	}

	mem_charged = 0;
	if (pages == 0)
		pages = kstack_pages;
	/* Allocate new proc. */
	newproc = uma_zalloc(proc_zone, M_WAITOK);
	td2 = FIRST_THREAD_IN_PROC(newproc);
	if (td2 == NULL) {
		td2 = thread_alloc(pages);
		if (td2 == NULL) {
			error = ENOMEM;
			goto fail2;
		}
		proc_linkup(newproc, td2);
	} else {
		if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) {
			if (td2->td_kstack != 0)
				vm_thread_dispose(td2);
			if (!thread_alloc_stack(td2, pages)) {
				error = ENOMEM;
				goto fail2;
			}
		}
	}

	if ((flags & RFMEM) == 0) {
		vm2 = vmspace_fork(p1->p_vmspace, &mem_charged);
		if (vm2 == NULL) {
			error = ENOMEM;
			goto fail2;
		}
		if (!swap_reserve(mem_charged)) {
			/*
			 * The swap reservation failed. The accounting
			 * from the entries of the copied vm2 will be
			 * substracted in vmspace_free(), so force the
			 * reservation there.
			 */
			swap_reserve_force(mem_charged);
			error = ENOMEM;
			goto fail2;
		}
	} else
		vm2 = NULL;

	/*
	 * XXX: This is ugly; when we copy resource usage, we need to bump
	 *      per-cred resource counters.
	 */
	proc_set_cred_init(newproc, crhold(td->td_ucred));

	/*
	 * Initialize resource accounting for the child process.
	 */
	error = racct_proc_fork(p1, newproc);
	if (error != 0) {
		error = EAGAIN;
		goto fail1;
	}

#ifdef MAC
	mac_proc_init(newproc);
#endif
	knlist_init_mtx(&newproc->p_klist, &newproc->p_mtx);
	STAILQ_INIT(&newproc->p_ktr);

	/* We have to lock the process tree while we look for a pid. */
	sx_slock(&proctree_lock);
	sx_xlock(&allproc_lock);

	/*
	 * Increment the count of procs running with this uid. Don't allow
	 * a nonprivileged user to exceed their current limit.
	 *
	 * XXXRW: Can we avoid privilege here if it's not needed?
	 */
	error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, 0);
	if (error == 0)
		ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
	else {
		ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
		    lim_cur(td, RLIMIT_NPROC));
	}
	if (ok) {
		do_fork(td, fr, newproc, td2, vm2, fp_procdesc);
		return (0);
	}

	error = EAGAIN;
	sx_sunlock(&proctree_lock);
	sx_xunlock(&allproc_lock);
#ifdef MAC
	mac_proc_destroy(newproc);
#endif
	racct_proc_exit(newproc);
fail1:
	crfree(newproc->p_ucred);
	newproc->p_ucred = NULL;
fail2:
	if (vm2 != NULL)
		vmspace_free(vm2);
	uma_zfree(proc_zone, newproc);
	if ((flags & RFPROCDESC) != 0 && fp_procdesc != NULL) {
		fdclose(td, fp_procdesc, *fr->fr_pd_fd);
		fdrop(fp_procdesc, td);
	}
	atomic_add_int(&nprocs, -1);
	pause("fork", hz / 2);
	return (error);
}
Ejemplo n.º 7
0
static int
cyapa_attach(device_t dev)
{
	struct cyapa_softc *sc;
	struct cyapa_cap cap;
	int unit;
	int addr;

	sc = device_get_softc(dev);
	sc->reporting_mode = 1;

	unit = device_get_unit(dev);
	addr = smbus_get_addr(dev);

	if (init_device(dev, &cap, addr, 0))
		return (ENXIO);

	mtx_init(&sc->mutex, "cyapa", NULL, MTX_DEF);

	sc->dev = dev;
	sc->addr = addr;

	knlist_init_mtx(&sc->selinfo.si_note, &sc->mutex);

	sc->cap_resx = ((cap.max_abs_xy_high << 4) & 0x0F00) |
	    cap.max_abs_x_low;
	sc->cap_resy = ((cap.max_abs_xy_high << 8) & 0x0F00) |
	    cap.max_abs_y_low;
	sc->cap_phyx = ((cap.phy_siz_xy_high << 4) & 0x0F00) |
	    cap.phy_siz_x_low;
	sc->cap_phyy = ((cap.phy_siz_xy_high << 8) & 0x0F00) |
	    cap.phy_siz_y_low;
	sc->cap_buttons = cap.buttons;

	device_printf(dev, "%5.5s-%6.6s-%2.2s buttons=%c%c%c res=%dx%d\n",
	    cap.prod_ida, cap.prod_idb, cap.prod_idc,
	    ((cap.buttons & CYAPA_FNGR_LEFT) ? 'L' : '-'),
	    ((cap.buttons & CYAPA_FNGR_MIDDLE) ? 'M' : '-'),
	    ((cap.buttons & CYAPA_FNGR_RIGHT) ? 'R' : '-'),
	    sc->cap_resx, sc->cap_resy);

	sc->hw.buttons = 5;
	sc->hw.iftype = MOUSE_IF_PS2;
	sc->hw.type = MOUSE_MOUSE;
	sc->hw.model = MOUSE_MODEL_INTELLI;
	sc->hw.hwid = addr;

	sc->mode.protocol = MOUSE_PROTO_PS2;
	sc->mode.rate = 100;
	sc->mode.resolution = 4;
	sc->mode.accelfactor = 1;
	sc->mode.level = 0;
	sc->mode.packetsize = MOUSE_PS2_PACKETSIZE;

	sc->drag_state = D_IDLE;
	sc->draglock_ticks = -1;
	sc->dragwait_ticks = -1;
	sc->tft_state = T_IDLE;
	sc->tft_ticks = -1;
	sc->send_but = 0;

	/* Setup input event tracking */
	cyapa_set_power_mode(sc, CMD_POWER_MODE_IDLE);

	/* Start the polling thread */
	 kthread_add(cyapa_poll_thread, sc, NULL, NULL,
	    0, 0, "cyapa-poll");

	sc->devnode = make_dev(&cyapa_cdevsw, unit,
	    UID_ROOT, GID_WHEEL, 0600, "cyapa%d", unit);

	sc->devnode->si_drv1 = sc;


	return (0);
}