/*
 * Sync the filesystem.  Currently we have to run it twice, the second
 * one will advance the undo start index to the end index, so if a crash
 * occurs no undos will be run on mount.
 *
 * We do not sync the filesystem if we are called from a panic.  If we did
 * we might end up blowing up a sync that was already in progress.
 */
static int
hammer_vfs_sync(struct mount *mp, int waitfor)
{
	struct hammer_mount *hmp = (void *)mp->mnt_data;
	int error;

	lwkt_gettoken(&hmp->fs_token);
	if (panicstr == NULL) {
		error = hammer_sync_hmp(hmp, waitfor);
	} else {
		error = EIO;
	}
	lwkt_reltoken(&hmp->fs_token);
	return (error);
}
Пример #2
0
/*
 * Use the device/inum pair to find the incore inode, and return a pointer
 * to it. If it is in core, return it, even if it is locked.
 */
struct vnode *
ext2_ihashlookup(cdev_t dev, ino_t inum)
{
	struct inode *ip;

	lwkt_gettoken(&ext2_ihash_token);
	for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) {
		if (inum == ip->i_number && dev == ip->i_dev)
			break;
	}
	lwkt_reltoken(&ext2_ihash_token);
	if (ip)
		return (ITOV(ip));
	return (NULLVP);
}
Пример #3
0
/*
 * Removes the vnode from the syncer list.  Since we might block while
 * acquiring the syncer_token we have to recheck conditions.
 *
 * vp->v_token held on call
 */
void
vn_syncer_remove(struct vnode *vp)
{
	struct syncer_ctx *ctx;

	ctx = vn_get_syncer(vp);

	lwkt_gettoken(&ctx->sc_token);

	if ((vp->v_flag & VONWORKLST) && RB_EMPTY(&vp->v_rbdirty_tree)) {
		vclrflags(vp, VONWORKLST);
		LIST_REMOVE(vp, v_synclist);
	}

	lwkt_reltoken(&ctx->sc_token);
}
Пример #4
0
/*
 * Removes the vnode from the syncer list.  Since we might block while
 * acquiring the syncer_token we have to recheck conditions.
 *
 * vp->v_token held on call
 */
void
vn_syncer_remove(struct vnode *vp)
{
	struct syncer_ctx *ctx;

	ctx = vp->v_mount->mnt_syncer_ctx;
	lwkt_gettoken(&ctx->sc_token);

	if ((vp->v_flag & (VISDIRTY | VONWORKLST | VOBJDIRTY)) == VONWORKLST &&
	    RB_EMPTY(&vp->v_rbdirty_tree)) {
		vclrflags(vp, VONWORKLST);
		LIST_REMOVE(vp, v_synclist);
	}

	lwkt_reltoken(&ctx->sc_token);
}
Пример #5
0
static int
snpwrite(struct dev_write_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct uio *uio = ap->a_uio;
	struct snoop *snp;
	struct tty *tp;
	int error, i, len;
	unsigned char c[SNP_INPUT_BUF];

	lwkt_gettoken(&tty_token);
	snp = dev->si_drv1;
	tp = snp->snp_tty;
	if (tp == NULL) {
		lwkt_reltoken(&tty_token);
		return (EIO);
	}
	if ((tp->t_sc == snp) && (tp->t_state & TS_SNOOP) &&
	    tp->t_line == snooplinedisc)
		goto tty_input;

	kprintf("Snoop: attempt to write to bad tty.\n");
	lwkt_reltoken(&tty_token);
	return (EIO);

tty_input:
	if (!(tp->t_state & TS_ISOPEN)) {
		lwkt_reltoken(&tty_token);
		return (EIO);
	}

	while (uio->uio_resid > 0) {
		len = (int)szmin(uio->uio_resid, SNP_INPUT_BUF);
		if ((error = uiomove(c, (size_t)len, uio)) != 0) {
			lwkt_reltoken(&tty_token);
			return (error);
		}
		for (i=0; i < len; i++) {
			if (ttyinput(c[i], tp)) {
				lwkt_reltoken(&tty_token);
				return (EIO);
			}
		}
	}
	lwkt_reltoken(&tty_token);
	return (0);
}
Пример #6
0
/*
 * The strategy function is typically only called when memory pressure
 * forces the system to attempt to pageout pages.  It can also be called
 * by [n]vtruncbuf() when a truncation cuts a page in half.  Normal write
 * operations
 */
static int
tmpfs_strategy(struct vop_strategy_args *ap)
{
	struct bio *bio = ap->a_bio;
	struct bio *nbio;
	struct buf *bp = bio->bio_buf;
	struct vnode *vp = ap->a_vp;
	struct tmpfs_node *node;
	vm_object_t uobj;
	vm_page_t m;
	int i;

	if (vp->v_type != VREG) {
		bp->b_resid = bp->b_bcount;
		bp->b_flags |= B_ERROR | B_INVAL;
		bp->b_error = EINVAL;
		biodone(bio);
		return(0);
	}

	lwkt_gettoken(&vp->v_mount->mnt_token);
	node = VP_TO_TMPFS_NODE(vp);

	uobj = node->tn_reg.tn_aobj;

	/*
	 * Don't bother flushing to swap if there is no swap, just
	 * ensure that the pages are marked as needing a commit (still).
	 */
	if (bp->b_cmd == BUF_CMD_WRITE && vm_swap_size == 0) {
		for (i = 0; i < bp->b_xio.xio_npages; ++i) {
			m = bp->b_xio.xio_pages[i];
			vm_page_need_commit(m);
		}
		bp->b_resid = 0;
		bp->b_error = 0;
		biodone(bio);
	} else {
		nbio = push_bio(bio);
		nbio->bio_done = tmpfs_strategy_done;
		nbio->bio_offset = bio->bio_offset;
		swap_pager_strategy(uobj, nbio);
	}

	lwkt_reltoken(&vp->v_mount->mnt_token);
	return 0;
}
Пример #7
0
/*
 * Write to pseudo-tty.
 * Wakeups of controlling tty will happen
 * indirectly, when tty driver calls ptsstart.
 */
static	int
ptswrite(struct dev_write_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct tty *tp;
	int ret;

	lwkt_gettoken(&tty_token);
	tp = dev->si_tty;
	if (tp->t_oproc == NULL) {
		lwkt_reltoken(&tty_token);
		return (EIO);
	}
	ret = ((*linesw[tp->t_line].l_write)(tp, ap->a_uio, ap->a_ioflag));
	lwkt_reltoken(&tty_token);
	return ret;
}
Пример #8
0
void
kproc_suspend_loop(void)
{
	struct thread *td = curthread;

	if (td->td_mpflags & TDF_MP_STOPREQ) {
		lwkt_gettoken(&kpsus_token);
		atomic_clear_int(&td->td_mpflags, TDF_MP_STOPREQ);
		while ((td->td_mpflags & TDF_MP_WAKEREQ) == 0) {
			wakeup(td);
			tsleep(td, 0, "kpsusp", 0);
		}
		atomic_clear_int(&td->td_mpflags, TDF_MP_WAKEREQ);
		wakeup(td);
		lwkt_reltoken(&kpsus_token);
	}
}
Пример #9
0
/*
 * Get login name, if available.
 */
int
sys_getlogin(struct getlogin_args *uap)
{
	struct proc *p = curproc;
	char buf[MAXLOGNAME];
	int error;

	if (uap->namelen > MAXLOGNAME)		/* namelen is unsigned */
		uap->namelen = MAXLOGNAME;
	bzero(buf, sizeof(buf));
	lwkt_gettoken(&proc_token);
	bcopy(p->p_pgrp->pg_session->s_login, buf, uap->namelen);
	lwkt_reltoken(&proc_token);

	error = copyout(buf, uap->namebuf, uap->namelen);
	return (error);
}
Пример #10
0
static int
snplclose(struct tty *tp, int flag)
{
	struct snoop *snp;
	int error;

	lwkt_gettoken(&tty_token);
	snp = tp->t_sc;
	error = snp_down(snp);
	if (error != 0) {
		lwkt_reltoken(&tty_token);
		return (error);
	}
	error = ttylclose(tp, flag);
	lwkt_reltoken(&tty_token);
	return (error);
}
Пример #11
0
/*
 * Initialize an XIO given a kernelspace buffer.  0 is returned on success,
 * an error code on failure.  The actual number of bytes that could be
 * accomodated in the XIO will be stored in xio_bytes and the page offset
 * will be stored in xio_offset.
 */
int
xio_init_kbuf(xio_t xio, void *kbase, size_t kbytes)
{
    vm_offset_t addr;
    vm_paddr_t paddr;
    vm_page_t m;
    int i;
    int n;

    addr = trunc_page((vm_offset_t)kbase);
    xio->xio_flags = 0;
    xio->xio_offset = (vm_offset_t)kbase & PAGE_MASK;
    xio->xio_bytes = 0;
    xio->xio_pages = xio->xio_internal_pages;
    xio->xio_error = 0;
    if ((n = PAGE_SIZE - xio->xio_offset) > kbytes)
	n = kbytes;
    lwkt_gettoken(&vm_token);
    crit_enter();
    for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) {
	if ((paddr = pmap_kextract(addr)) == 0)
	    break;
	m = PHYS_TO_VM_PAGE(paddr);
	vm_page_hold(m);
	xio->xio_pages[i] = m;
	kbytes -= n;
	xio->xio_bytes += n;
	if ((n = kbytes) > PAGE_SIZE)
	    n = PAGE_SIZE;
	addr += PAGE_SIZE;
    }
    crit_exit();
    lwkt_reltoken(&vm_token);
    xio->xio_npages = i;

    /*
     * If a failure occured clean out what we loaded and return EFAULT.
     * Return 0 on success.
     */
    if (i < XIO_INTERNAL_PAGES && n) {
	xio_release(xio);
	xio->xio_error = EFAULT;
    }
    return(xio->xio_error);
}
Пример #12
0
/*
 * The syncer vnode is no longer needed and is being decommissioned.
 * This can only occur when the last reference has been released on
 * mp->mnt_syncer, so mp->mnt_syncer had better be NULL.
 *
 * Modifications to the worklist must be protected with a critical
 * section.
 *
 *	sync_reclaim { struct vnode *a_vp }
 */
static int
sync_reclaim(struct vop_reclaim_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct syncer_ctx *ctx;

	ctx = vn_get_syncer(vp);

	lwkt_gettoken(&ctx->sc_token);
	KKASSERT(vp->v_mount->mnt_syncer != vp);
	if (vp->v_flag & VONWORKLST) {
		LIST_REMOVE(vp, v_synclist);
		vclrflags(vp, VONWORKLST);
	}
	lwkt_reltoken(&ctx->sc_token);

	return (0);
}
Пример #13
0
static int
snpfilter_rd(struct knote *kn, long hint)
{
	struct snoop *snp = (struct snoop *)kn->kn_hook;
	int ready = 0;

	lwkt_gettoken(&tty_token);
	/*
	 * If snoop is down, we don't want to poll forever so we return 1.
	 * Caller should see if we down via FIONREAD ioctl().  The last should
	 * return -1 to indicate down state.
	 */
	if (snp->snp_flags & SNOOP_DOWN || snp->snp_len > 0)
		ready = 1;

	lwkt_reltoken(&tty_token);
	return (ready);
}
Пример #14
0
/*
 * Faultin the specified process.  Note that the process can be in any
 * state.  Just clear P_SWAPPEDOUT and call wakeup in case the process is
 * sleeping.
 *
 * No requirements.
 */
void
faultin(struct proc *p)
{
	if (p->p_flags & P_SWAPPEDOUT) {
		/*
		 * The process is waiting in the kernel to return to user
		 * mode but cannot until P_SWAPPEDOUT gets cleared.
		 */
		lwkt_gettoken(&p->p_token);
		p->p_flags &= ~(P_SWAPPEDOUT | P_SWAPWAIT);
#ifdef INVARIANTS
		if (swap_debug)
			kprintf("swapping in %d (%s)\n", p->p_pid, p->p_comm);
#endif
		wakeup(p);
		lwkt_reltoken(&p->p_token);
	}
}
Пример #15
0
int
sys_setgroups(struct setgroups_args *uap)
{
	struct proc *p = curproc;
	struct ucred *cr;
	u_int ngrp;
	int error;

	lwkt_gettoken(&proc_token);
	cr = p->p_ucred;

	if ((error = priv_check_cred(cr, PRIV_CRED_SETGROUPS, 0)))
		goto done;
	ngrp = uap->gidsetsize;
	if (ngrp > NGROUPS) {
		error = EINVAL;
		goto done;
	}
	/*
	 * XXX A little bit lazy here.  We could test if anything has
	 * changed before cratom() and setting P_SUGID.
	 */
	cr = cratom(&p->p_ucred);
	if (ngrp < 1) {
		/*
		 * setgroups(0, NULL) is a legitimate way of clearing the
		 * groups vector on non-BSD systems (which generally do not
		 * have the egid in the groups[0]).  We risk security holes
		 * when running non-BSD software if we do not do the same.
		 */
		cr->cr_ngroups = 1;
	} else {
		error = copyin(uap->gidset, cr->cr_groups,
			       ngrp * sizeof(gid_t));
		if (error)
			goto done;
		cr->cr_ngroups = ngrp;
	}
	setsugid();
	error = 0;
done:
	lwkt_reltoken(&proc_token);
	return (error);
}
Пример #16
0
static int
scheduler_callback(struct proc *p, void *data)
{
	struct scheduler_info *info = data;
	struct lwp *lp;
	segsz_t pgs;
	int pri;

	if (p->p_flags & P_SWAPWAIT) {
		pri = 0;
		FOREACH_LWP_IN_PROC(lp, p) {
			/* XXX lwp might need a different metric */
			pri += lp->lwp_slptime;
		}
		pri += p->p_swtime - p->p_nice * 8;

		/*
		 * The more pages paged out while we were swapped,
		 * the more work we have to do to get up and running
		 * again and the lower our wakeup priority.
		 *
		 * Each second of sleep time is worth ~1MB
		 */
		lwkt_gettoken(&p->p_vmspace->vm_map.token);
		pgs = vmspace_resident_count(p->p_vmspace);
		if (pgs < p->p_vmspace->vm_swrss) {
			pri -= (p->p_vmspace->vm_swrss - pgs) /
				(1024 * 1024 / PAGE_SIZE);
		}
		lwkt_reltoken(&p->p_vmspace->vm_map.token);

		/*
		 * If this process is higher priority and there is
		 * enough space, then select this process instead of
		 * the previous selection.
		 */
		if (pri > info->ppri) {
			if (info->pp)
				PRELE(info->pp);
			PHOLD(p);
			info->pp = p;
			info->ppri = pri;
		}
	}
Пример #17
0
/*
 * vm_contig_pg_free:
 *
 * Remove pages previously allocated by vm_contig_pg_alloc, and
 * assume all references to the pages have been removed, and that
 * it is OK to add them back to the free list.
 *
 * Caller must ensure no races on the page range in question.
 * No other requirements.
 */
void
vm_contig_pg_free(int start, u_long size)
{
	vm_page_t pga = vm_page_array;
	vm_page_t m;
	int i;
	
	size = round_page(size);
	if (size == 0)
		panic("vm_contig_pg_free: size must not be 0");

	lwkt_gettoken(&vm_token);
	for (i = start; i < (start + size / PAGE_SIZE); i++) {
		m = &pga[i];
		vm_page_busy(m);
		vm_page_free(m);
	}
	lwkt_reltoken(&vm_token);
}
Пример #18
0
/*
 * Free an entry, unblock any waiters.  Allow NULL.
 */
void
mpipe_free(malloc_pipe_t mpipe, void *buf)
{
    int n;

    if (buf == NULL)
	return;

    lwkt_gettoken(&mpipe->token);
    if ((n = mpipe->free_count) < mpipe->ary_count) {
	/*
	 * Free slot available in free array (LIFO)
	 */
	mpipe->array[n] = buf;
	++mpipe->free_count;
	if ((mpipe->mpflags & (MPF_CACHEDATA|MPF_NOZERO)) == 0) 
	    bzero(buf, mpipe->bytes);
	if (mpipe->mpflags & MPF_QUEUEWAIT) {
		mpipe->mpflags &= ~MPF_QUEUEWAIT;
		lwkt_reltoken(&mpipe->token);
		wakeup(&mpipe->queue);
	} else {
		lwkt_reltoken(&mpipe->token);
	}
	/*
	 * Wakeup anyone blocked in mpipe_alloc_*().
	 */
	if (mpipe->pending) {
	    mpipe->pending = 0;
	    wakeup(mpipe);
	}
    } else {
	/*
	 * All the free slots are full, free the buffer directly.
	 */
	--mpipe->total_count;
	KKASSERT(mpipe->total_count >= mpipe->free_count);
	if (mpipe->deconstruct)
	    mpipe->deconstruct(buf, mpipe->priv);
	lwkt_reltoken(&mpipe->token);
	kfree(buf, mpipe->type);
    }
}
Пример #19
0
/*
 * Start output on pseudo-tty.
 * Wake up process selecting or sleeping for input from controlling tty.
 */
static void
ptsstart(struct tty *tp)
{
	lwkt_gettoken(&tty_token);
	struct pt_ioctl *pti = tp->t_dev->si_drv1;

	if (tp->t_state & TS_TTSTOP) {
		lwkt_reltoken(&tty_token);
		return;
	}
	if (pti) {
		if (pti->pt_flags & PF_STOPPED) {
			pti->pt_flags &= ~PF_STOPPED;
			pti->pt_send = TIOCPKT_START;
		}
	}
	ptcwakeup(tp, FREAD);
	lwkt_reltoken(&tty_token);
}
Пример #20
0
/*
 * setresgid(rgid, egid, sgid) is like setregid except control over the
 * saved gid is explicit.
 */
int
sys_setresgid(struct setresgid_args *uap)
{
	struct proc *p = curproc;
	struct ucred *cr;
	gid_t rgid, egid, sgid;
	int error;

	lwkt_gettoken(&proc_token);
	cr = p->p_ucred;
	rgid = uap->rgid;
	egid = uap->egid;
	sgid = uap->sgid;
	if (((rgid != (gid_t)-1 && rgid != cr->cr_rgid && rgid != cr->cr_svgid &&
	      rgid != cr->cr_groups[0]) ||
	     (egid != (gid_t)-1 && egid != cr->cr_rgid && egid != cr->cr_svgid &&
	      egid != cr->cr_groups[0]) ||
	     (sgid != (gid_t)-1 && sgid != cr->cr_rgid && sgid != cr->cr_svgid &&
	      sgid != cr->cr_groups[0])) &&
	    (error = priv_check_cred(cr, PRIV_CRED_SETRESGID, 0)) != 0) {
		goto done;
	}

	if (egid != (gid_t)-1 && cr->cr_groups[0] != egid) {
		cr = cratom(&p->p_ucred);
		cr->cr_groups[0] = egid;
		setsugid();
	}
	if (rgid != (gid_t)-1 && cr->cr_rgid != rgid) {
		cr = cratom(&p->p_ucred);
		cr->cr_rgid = rgid;
		setsugid();
	}
	if (sgid != (gid_t)-1 && cr->cr_svgid != sgid) {
		cr = cratom(&p->p_ucred);
		cr->cr_svgid = sgid;
		setsugid();
	}
	error = 0;
done:
	lwkt_reltoken(&proc_token);
	return (error);
}
Пример #21
0
static int
tmpfs_inactive(struct vop_inactive_args *v)
{
	struct vnode *vp = v->a_vp;
	struct tmpfs_node *node;
	struct mount *mp;

	mp = vp->v_mount;
	lwkt_gettoken(&mp->mnt_token);
	node = VP_TO_TMPFS_NODE(vp);

	/*
	 * Degenerate case
	 */
	if (node == NULL) {
		vrecycle(vp);
		lwkt_reltoken(&mp->mnt_token);
		return(0);
	}

	/*
	 * Get rid of unreferenced deleted vnodes sooner rather than
	 * later so the data memory can be recovered immediately.
	 *
	 * We must truncate the vnode to prevent the normal reclamation
	 * path from flushing the data for the removed file to disk.
	 */
	TMPFS_NODE_LOCK(node);
	if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 &&
	    node->tn_links == 0)
	{
		node->tn_vpstate = TMPFS_VNODE_DOOMED;
		TMPFS_NODE_UNLOCK(node);
		if (node->tn_type == VREG)
			tmpfs_truncate(vp, 0);
		vrecycle(vp);
	} else {
		TMPFS_NODE_UNLOCK(node);
	}
	lwkt_reltoken(&mp->mnt_token);

	return 0;
}
/*
 * Obtain a vnode for the specified inode number.  An exclusively locked
 * vnode is returned.
 */
int
hammer_vfs_vget(struct mount *mp, struct vnode *dvp,
		ino_t ino, struct vnode **vpp)
{
	struct hammer_transaction trans;
	struct hammer_mount *hmp = (void *)mp->mnt_data;
	struct hammer_inode *ip;
	int error;
	u_int32_t localization;

	lwkt_gettoken(&hmp->fs_token);
	hammer_simple_transaction(&trans, hmp);

	/*
	 * If a directory vnode is supplied (mainly NFS) then we can acquire
	 * the PFS domain from it.  Otherwise we would only be able to vget
	 * inodes in the root PFS.
	 */
	if (dvp) {
		localization = HAMMER_DEF_LOCALIZATION +
				VTOI(dvp)->obj_localization;
	} else {
		localization = HAMMER_DEF_LOCALIZATION;
	}

	/*
	 * Lookup the requested HAMMER inode.  The structure must be
	 * left unlocked while we manipulate the related vnode to avoid
	 * a deadlock.
	 */
	ip = hammer_get_inode(&trans, NULL, ino,
			      hmp->asof, localization,
			      0, &error);
	if (ip == NULL) {
		*vpp = NULL;
	} else {
		error = hammer_get_vnode(ip, vpp);
		hammer_rel_inode(ip, 0);
	}
	hammer_done_transaction(&trans);
	lwkt_reltoken(&hmp->fs_token);
	return (error);
}
Пример #23
0
/*
 * Initiate (or continue) disconnect.
 * If embryonic state, just send reset (once).
 * If in ``let data drain'' option and linger null, just drop.
 * Otherwise (hard), mark socket disconnecting and drop
 * current input data; switch states based on user close, and
 * send segment to peer (with FIN).
 */
static struct tcpcb *
tcp_disconnect(struct tcpcb *tp)
{
	struct socket *so = tp->t_inpcb->inp_socket;

	if (tp->t_state < TCPS_ESTABLISHED) {
		tp = tcp_close(tp);
	} else if ((so->so_options & SO_LINGER) && so->so_linger == 0) {
		tp = tcp_drop(tp, 0);
	} else {
		lwkt_gettoken(&so->so_rcv.ssb_token);
		soisdisconnecting(so);
		sbflush(&so->so_rcv.sb);
		tp = tcp_usrclosed(tp);
		if (tp)
			tcp_output(tp);
		lwkt_reltoken(&so->so_rcv.ssb_token);
	}
	return (tp);
}
Пример #24
0
/*
 * Advise a kernel process to suspend (or resume) in its main loop.
 * Participation is voluntary.
 */
int
suspend_kproc(struct thread *td, int timo)
{
	if (td->td_proc == NULL) {
		lwkt_gettoken(&kpsus_token);
		/* request thread pause */
		atomic_set_int(&td->td_mpflags, TDF_MP_STOPREQ);
		wakeup(td);
		while (td->td_mpflags & TDF_MP_STOPREQ) {
			int error = tsleep(td, 0, "suspkp", timo);
			if (error == EWOULDBLOCK)
				break;
		}
		atomic_clear_int(&td->td_mpflags, TDF_MP_STOPREQ);
		lwkt_reltoken(&kpsus_token);
		return(0);
	} else {
		return(EINVAL);	/* not a kernel thread */
	}
}
Пример #25
0
int
sys_setsid(struct setsid_args *uap)
{
	struct proc *p = curproc;
	struct pgrp *pg = NULL;
	int error;

	lwkt_gettoken(&p->p_token);
	if (p->p_pgid == p->p_pid || (pg = pgfind(p->p_pid)) != NULL) {
		error = EPERM;
		if (pg)
			pgrel(pg);
	} else {
		enterpgrp(p, p->p_pid, 1);
		uap->sysmsg_result = p->p_pid;
		error = 0;
	}
	lwkt_reltoken(&p->p_token);
	return (error);
}
Пример #26
0
void
hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
		       u64 range_ns, const enum hrtimer_mode mode)
{
	int expire_ticks = tim.tv64 / (NSEC_PER_SEC / hz);

	if (mode == HRTIMER_MODE_ABS)
		expire_ticks -= ticks;

	if (expire_ticks <= 0)
		expire_ticks = 1;

	lwkt_gettoken(&timer->timer_token);

	timer->active = true;
	callout_reset(&timer->timer_callout,
		      expire_ticks, __hrtimer_function, timer);

	lwkt_reltoken(&timer->timer_token);
}
Пример #27
0
/*
 * Allocate an entry, block until the allocation succeeds.  This may cause
 * us to block waiting for a prior allocation to be freed.
 */
void *
mpipe_alloc_waitok(malloc_pipe_t mpipe)
{
    void *buf;
    int mfailed;

    lwkt_gettoken(&mpipe->token);
    mfailed = 0;
    while ((buf = _mpipe_alloc_locked(mpipe, mfailed)) == NULL) {
	/*
	 * Block if we have hit our limit
	 */
	mpipe->pending = 1;
	tsleep(mpipe, 0, "mpipe1", 0);
	mfailed = 1;
    }
    lwkt_reltoken(&mpipe->token);

    return(buf);
}
Пример #28
0
/*
 * setresuid(ruid, euid, suid) is like setreuid except control over the
 * saved uid is explicit.
 */
int
sys_setresuid(struct setresuid_args *uap)
{
	struct proc *p = curproc;
	struct ucred *cr;
	uid_t ruid, euid, suid;
	int error;

	lwkt_gettoken(&proc_token);
	cr = p->p_ucred;

	ruid = uap->ruid;
	euid = uap->euid;
	suid = uap->suid;
	if (((ruid != (uid_t)-1 && ruid != cr->cr_ruid && ruid != cr->cr_svuid &&
	      ruid != cr->cr_uid) ||
	     (euid != (uid_t)-1 && euid != cr->cr_ruid && euid != cr->cr_svuid &&
	      euid != cr->cr_uid) ||
	     (suid != (uid_t)-1 && suid != cr->cr_ruid && suid != cr->cr_svuid &&
	      suid != cr->cr_uid)) &&
	    (error = priv_check_cred(cr, PRIV_CRED_SETRESUID, 0)) != 0) {
		goto done;
	}
	if (euid != (uid_t)-1 && cr->cr_uid != euid) {
		cr = change_euid(euid);
		setsugid();
	}
	if (ruid != (uid_t)-1 && cr->cr_ruid != ruid) {
		cr = change_ruid(ruid);
		setsugid();
	}
	if (suid != (uid_t)-1 && cr->cr_svuid != suid) {
		cr = cratom(&p->p_ucred);
		cr->cr_svuid = suid;
		setsugid();
	}
	error = 0;
done:
	lwkt_reltoken(&proc_token);
	return (error);
}
Пример #29
0
static int
snplwrite(struct tty *tp, struct uio *uio, int flag)
{
	struct iovec iov;
	struct uio uio2;
	struct snoop *snp;
	int error, ilen;
	char *ibuf;

	lwkt_gettoken(&tty_token);
	error = 0;
	ibuf = NULL;
	snp = tp->t_sc;
	while (uio->uio_resid > 0) {
		ilen = (int)szmin(512, uio->uio_resid);
		ibuf = kmalloc(ilen, M_SNP, M_WAITOK);
		error = uiomove(ibuf, (size_t)ilen, uio);
		if (error != 0)
			break;
		snp_in(snp, ibuf, ilen);
		/* Hackish, but probably the least of all evils. */
		iov.iov_base = ibuf;
		iov.iov_len = ilen;
		uio2.uio_iov = &iov;
		uio2.uio_iovcnt = 1;
		uio2.uio_offset = 0;
		uio2.uio_resid = ilen;
		uio2.uio_segflg = UIO_SYSSPACE;
		uio2.uio_rw = UIO_WRITE;
		uio2.uio_td = uio->uio_td;
		error = ttwrite(tp, &uio2, flag);
		if (error != 0)
			break;
		kfree(ibuf, M_SNP);
		ibuf = NULL;
	}
	if (ibuf != NULL)
		kfree(ibuf, M_SNP);
	lwkt_reltoken(&tty_token);
	return (error);
}
Пример #30
0
int
tmpfs_getattr(struct vop_getattr_args *v)
{
	struct vnode *vp = v->a_vp;
	struct vattr *vap = v->a_vap;
	struct tmpfs_node *node;

	node = VP_TO_TMPFS_NODE(vp);

	lwkt_gettoken(&vp->v_mount->mnt_token);
	tmpfs_update(vp);

	vap->va_type = vp->v_type;
	vap->va_mode = node->tn_mode;
	vap->va_nlink = node->tn_links;
	vap->va_uid = node->tn_uid;
	vap->va_gid = node->tn_gid;
	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
	vap->va_fileid = node->tn_id;
	vap->va_size = node->tn_size;
	vap->va_blocksize = PAGE_SIZE;
	vap->va_atime.tv_sec = node->tn_atime;
	vap->va_atime.tv_nsec = node->tn_atimensec;
	vap->va_mtime.tv_sec = node->tn_mtime;
	vap->va_mtime.tv_nsec = node->tn_mtimensec;
	vap->va_ctime.tv_sec = node->tn_ctime;
	vap->va_ctime.tv_nsec = node->tn_ctimensec;
	vap->va_gen = node->tn_gen;
	vap->va_flags = node->tn_flags;
	if (vp->v_type == VBLK || vp->v_type == VCHR)
	{
		vap->va_rmajor = umajor(node->tn_rdev);
		vap->va_rminor = uminor(node->tn_rdev);
	}
	vap->va_bytes = round_page(node->tn_size);
	vap->va_filerev = 0;

	lwkt_reltoken(&vp->v_mount->mnt_token);

	return 0;
}