Example #1
0
static void
fuse_block_sigs(sigset_t *oldset)
{
	sigset_t newset;

	SIGFILLSET(newset);
	SIGDELSET(newset, SIGKILL);
	if (kern_sigprocmask(curthread, SIG_BLOCK, &newset, oldset, 0))
		panic("%s: Invalid operation for kern_sigprocmask()",
		    __func__);
}
Example #2
0
int
kproc_resume(struct proc *p)
{
	/*
	 * Make sure this is indeed a system process and we can safely
	 * use the p_siglist field.
	 */
	PROC_LOCK(p);
	if ((p->p_flag & P_KTHREAD) == 0) {
		PROC_UNLOCK(p);
		return (EINVAL);
	}
	SIGDELSET(p->p_siglist, SIGSTOP);
	PROC_UNLOCK(p);
	wakeup(&p->p_siglist);
	return (0);
}
Example #3
0
static int
lib_sigtimedwait(const sigset_t *set, siginfo_t *info,
	const struct timespec *timeout)
{
	struct pthread	*curthread = _get_curthread();
	int		ret = 0;
	int		i;
	struct sigwait_data waitdata;
	sigset_t	waitset;
	kse_critical_t  crit;
	siginfo_t	siginfo;

	if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) {
		if (info == NULL)
			info = &siginfo;
		return (__sys_sigtimedwait(set, info, timeout));
	}

	/*
	 * Initialize the set of signals that will be waited on:
	 */
	waitset = *set;

	/* These signals can't be waited on. */
	SIGDELSET(waitset, SIGKILL);
	SIGDELSET(waitset, SIGSTOP);

	/*
	 * POSIX says that the _application_ must explicitly install
	 * a dummy handler for signals that are SIG_IGN in order
	 * to sigwait on them. Note that SIG_IGN signals are left in
	 * the mask because a subsequent sigaction could enable an
	 * ignored signal.
	 */

	crit = _kse_critical_enter();
	KSE_SCHED_LOCK(curthread->kse, curthread->kseg);
	for (i = 1; i <= _SIG_MAXSIG; ++i) {
		if (SIGISMEMBER(waitset, i) &&
		    SIGISMEMBER(curthread->sigpend, i)) {
			SIGDELSET(curthread->sigpend, i);
			siginfo = curthread->siginfo[i - 1];
			KSE_SCHED_UNLOCK(curthread->kse,
				curthread->kseg);
			_kse_critical_leave(crit);
			ret = i;
			goto OUT;
		}
	}
	curthread->timeout = 0;
	curthread->interrupted = 0;
	_thr_set_timeout(timeout);
	/* Wait for a signal: */
	siginfo.si_signo = 0;
	waitdata.waitset = &waitset;
	waitdata.siginfo = &siginfo;
	curthread->data.sigwait = &waitdata;
	THR_SET_STATE(curthread, PS_SIGWAIT);
	_thr_sched_switch_unlocked(curthread);
	/*
	 * Return the signal number to the caller:
	 */
	if (siginfo.si_signo > 0) {
		ret = siginfo.si_signo;
	} else {
		if (curthread->interrupted)
			errno = EINTR;
		else if (curthread->timeout)
			errno = EAGAIN;
		ret = -1;
	}
	curthread->timeout = 0;
	curthread->interrupted = 0;
	/*
	 * Probably unnecessary, but since it's in a union struct
	 * we don't know how it could be used in the future.
	 */
	curthread->data.sigwait = NULL;

OUT:
	if (ret > 0 && info != NULL)
		*info = siginfo;

	return (ret);
}
Example #4
0
static inline void
remove_thr_signals(sigset_t *set)
{
	if (SIGISMEMBER(*set, SIGCANCEL))
		SIGDELSET(*set, SIGCANCEL);
}
Example #5
0
int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
	       void *(*start_routine) (void *), void *arg)
{
	struct pthread *curthread, *new_thread;
	struct thr_param param;
	struct sched_param sched_param;
	struct rtprio rtp;
	sigset_t set, oset;
	cpuset_t *cpusetp;
	int i, cpusetsize, create_suspended, locked, old_stack_prot, ret;

	cpusetp = NULL;
	ret = cpusetsize = 0;
	_thr_check_init();

	/*
	 * Tell libc and others now they need lock to protect their data.
	 */
	if (_thr_isthreaded() == 0) {
		_malloc_first_thread();
		if (_thr_setthreaded(1))
			return (EAGAIN);
	}

	curthread = _get_curthread();
	if ((new_thread = _thr_alloc(curthread)) == NULL)
		return (EAGAIN);

	memset(&param, 0, sizeof(param));

	if (attr == NULL || *attr == NULL)
		/* Use the default thread attributes: */
		new_thread->attr = _pthread_attr_default;
	else {
		new_thread->attr = *(*attr);
		cpusetp = new_thread->attr.cpuset;
		cpusetsize = new_thread->attr.cpusetsize;
		new_thread->attr.cpuset = NULL;
		new_thread->attr.cpusetsize = 0;
	}
	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
		/* inherit scheduling contention scope */
		if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
			new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
		else
			new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;

		new_thread->attr.prio = curthread->attr.prio;
		new_thread->attr.sched_policy = curthread->attr.sched_policy;
	}

	new_thread->tid = TID_TERMINATED;

	old_stack_prot = _rtld_get_stack_prot();
	if (create_stack(&new_thread->attr) != 0) {
		/* Insufficient memory to create a stack: */
		_thr_free(curthread, new_thread);
		return (EAGAIN);
	}
	/*
	 * Write a magic value to the thread structure
	 * to help identify valid ones:
	 */
	new_thread->magic = THR_MAGIC;
	new_thread->start_routine = start_routine;
	new_thread->arg = arg;
	new_thread->cancel_enable = 1;
	new_thread->cancel_async = 0;
	/* Initialize the mutex queue: */
	for (i = 0; i < TMQ_NITEMS; i++)
		TAILQ_INIT(&new_thread->mq[i]);

	/* Initialise hooks in the thread structure: */
	if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) {
		new_thread->flags = THR_FLAGS_NEED_SUSPEND;
		create_suspended = 1;
	} else {
		create_suspended = 0;
	}

	new_thread->state = PS_RUNNING;

	if (new_thread->attr.flags & PTHREAD_CREATE_DETACHED)
		new_thread->flags |= THR_FLAGS_DETACHED;

	/* Add the new thread. */
	new_thread->refcount = 1;
	_thr_link(curthread, new_thread);

	/*
	 * Handle the race between __pthread_map_stacks_exec and
	 * thread linkage.
	 */
	if (old_stack_prot != _rtld_get_stack_prot())
		_thr_stack_fix_protection(new_thread);

	/* Return thread pointer eariler so that new thread can use it. */
	(*thread) = new_thread;
	if (SHOULD_REPORT_EVENT(curthread, TD_CREATE) || cpusetp != NULL) {
		THR_THREAD_LOCK(curthread, new_thread);
		locked = 1;
	} else
		locked = 0;
	param.start_func = (void (*)(void *)) thread_start;
	param.arg = new_thread;
	param.stack_base = new_thread->attr.stackaddr_attr;
	param.stack_size = new_thread->attr.stacksize_attr;
	param.tls_base = (char *)new_thread->tcb;
	param.tls_size = sizeof(struct tcb);
	param.child_tid = &new_thread->tid;
	param.parent_tid = &new_thread->tid;
	param.flags = 0;
	if (new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM)
		param.flags |= THR_SYSTEM_SCOPE;
	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED)
		param.rtp = NULL;
	else {
		sched_param.sched_priority = new_thread->attr.prio;
		_schedparam_to_rtp(new_thread->attr.sched_policy,
			&sched_param, &rtp);
		param.rtp = &rtp;
	}

	/* Schedule the new thread. */
	if (create_suspended) {
		SIGFILLSET(set);
		SIGDELSET(set, SIGTRAP);
		__sys_sigprocmask(SIG_SETMASK, &set, &oset);
		new_thread->sigmask = oset;
		SIGDELSET(new_thread->sigmask, SIGCANCEL);
	}

	ret = thr_new(&param, sizeof(param));

	if (ret != 0) {
		ret = errno;
		/*
		 * Translate EPROCLIM into well-known POSIX code EAGAIN.
		 */
		if (ret == EPROCLIM)
			ret = EAGAIN;
	}

	if (create_suspended)
		__sys_sigprocmask(SIG_SETMASK, &oset, NULL);

	if (ret != 0) {
		if (!locked)
			THR_THREAD_LOCK(curthread, new_thread);
		new_thread->state = PS_DEAD;
		new_thread->tid = TID_TERMINATED;
		new_thread->flags |= THR_FLAGS_DETACHED;
		new_thread->refcount--;
		if (new_thread->flags & THR_FLAGS_NEED_SUSPEND) {
			new_thread->cycle++;
			_thr_umtx_wake(&new_thread->cycle, INT_MAX, 0);
		}
		_thr_try_gc(curthread, new_thread); /* thread lock released */
		atomic_add_int(&_thread_active_threads, -1);
	} else if (locked) {
		if (cpusetp != NULL) {
			if (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
				TID(new_thread), cpusetsize, cpusetp)) {
				ret = errno;
				/* kill the new thread */
				new_thread->force_exit = 1;
				new_thread->flags |= THR_FLAGS_DETACHED;
				_thr_try_gc(curthread, new_thread);
				 /* thread lock released */
				goto out;
			}
		}

		_thr_report_creation(curthread, new_thread);
		THR_THREAD_UNLOCK(curthread, new_thread);
	}
out:
	if (ret)
		(*thread) = 0;
	return (ret);
}
Example #6
0
int
afs_BioDaemon(afs_int32 nbiods)
{
    afs_int32 code, s, pflg = 0;
    label_t jmpbuf;
    struct buf *bp, *bp1, *tbp1, *tbp2;	/* temp pointers only */
    caddr_t tmpaddr;
    struct vnode *vp;
    struct vcache *vcp;
    char tmperr;
    if (!afs_initbiod) {
	/* XXX ###1 XXX */
	afs_initbiod = 1;
	/* pin lock, since we'll be using it in an interrupt. */
	lock_alloc(&afs_asyncbuf_lock, LOCK_ALLOC_PIN, 2, 1);
	simple_lock_init(&afs_asyncbuf_lock);
	pin(&afs_asyncbuf, sizeof(struct buf *));
	pin(&afs_asyncbuf_cv, sizeof(afs_int32));
    }

    /* Ignore HUP signals... */
    {
	sigset_t sigbits, osigbits;
	/*
	 * add SIGHUP to the set of already masked signals
	 */
	SIGFILLSET(sigbits);	/* allow all signals    */
	SIGDELSET(sigbits, SIGHUP);	/*   except SIGHUP      */
	limit_sigs(&sigbits, &osigbits);	/*   and already masked */
    }
    /* Main body starts here -- this is an intentional infinite loop, and
     * should NEVER exit
     *
     * Now, the loop will exit if get_bioreq() returns NULL, indicating
     * that we've been interrupted.
     */
    while (1) {
	bp = afs_get_bioreq();
	if (!bp)
	    break;		/* we were interrupted */
	if (code = setjmpx(&jmpbuf)) {
	    /* This should not have happend, maybe a lack of resources  */
	    AFS_GUNLOCK();
	    s = disable_lock(INTMAX, &afs_asyncbuf_lock);
	    for (bp1 = bp; bp; bp = bp1) {
		if (bp1)
		    bp1 = (struct buf *)bp1->b_work;
		bp->b_actf = 0;
		bp->b_error = code;
		bp->b_flags |= B_ERROR;
		iodone(bp);
	    }
	    unlock_enable(s, &afs_asyncbuf_lock);
	    AFS_GLOCK();
	    continue;
	}
	vcp = VTOAFS(bp->b_vp);
	if (bp->b_flags & B_PFSTORE) {	/* XXXX */
	    ObtainWriteLock(&vcp->lock, 404);
	    if (vcp->v.v_gnode->gn_mwrcnt) {
		afs_offs_t newlength =
		    (afs_offs_t) dbtob(bp->b_blkno) + bp->b_bcount;
		if (vcp->f.m.Length < newlength) {
		    afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
			       ICL_TYPE_STRING, __FILE__, ICL_TYPE_LONG,
			       __LINE__, ICL_TYPE_OFFSET,
			       ICL_HANDLE_OFFSET(vcp->f.m.Length),
			       ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(newlength));
		    vcp->f.m.Length = newlength;
		}
	    }
	    ReleaseWriteLock(&vcp->lock);
	}
	/* If the buffer represents a protection violation, rather than
	 * an actual request for I/O, no special action need be taken.
	 */
	if (bp->b_flags & B_PFPROT) {
	    iodone(bp);		/* Notify all users of the buffer that we're done */
	    clrjmpx(&jmpbuf);
	    continue;
	}
	if (DOvmlock)
	    ObtainWriteLock(&vcp->pvmlock, 211);
	/*
	 * First map its data area to a region in the current address space
	 * by calling vm_att with the subspace identifier, and a pointer to
	 * the data area.  vm_att returns  a new data area pointer, but we
	 * also want to hang onto the old one.
	 */
	tmpaddr = bp->b_baddr;
	bp->b_baddr = (caddr_t) vm_att(bp->b_xmemd.subspace_id, tmpaddr);
	tmperr = afs_ustrategy(bp);	/* temp variable saves offset calculation */
	if (tmperr) {		/* in non-error case */
	    bp->b_flags |= B_ERROR;	/* should other flags remain set ??? */
	    bp->b_error = tmperr;
	}

	/* Unmap the buffer's data area by calling vm_det.  Reset data area
	 * to the value that we saved above.
	 */
	vm_det(bp->b_baddr);
	bp->b_baddr = tmpaddr;

	/*
	 * buffer may be linked with other buffers via the b_work field.
	 * See also afs_gn_strategy.  For each buffer in the chain (including
	 * bp) notify all users of the buffer that the daemon is finished
	 * using it by calling iodone.
	 * assumes iodone can modify the b_work field.
	 */
	for (tbp1 = bp;;) {
	    tbp2 = (struct buf *)tbp1->b_work;
	    iodone(tbp1);
	    if (!tbp2)
		break;

	    tbp1 = (struct buf *)tbp2->b_work;
	    iodone(tbp2);
	    if (!tbp1)
		break;
	}
	if (DOvmlock)
	    ReleaseWriteLock(&vcp->pvmlock);	/* Unlock the vnode.  */
	clrjmpx(&jmpbuf);
    }				/* infinite loop (unless we're interrupted) */
}				/* end of afs_BioDaemon() */
Example #7
0
/*
 * Threaded process initialization.
 *
 * This is only called under two conditions:
 *
 *   1) Some thread routines have detected that the library hasn't yet
 *      been initialized (_thr_initial == NULL && curthread == NULL), or
 *
 *   2) An explicit call to reinitialize after a fork (indicated
 *      by curthread != NULL)
 */
void
_libpthread_init(struct pthread *curthread)
{
	int fd, first = 0;
	sigset_t sigset, oldset;

	/* Check if this function has already been called: */
	if ((_thr_initial != NULL) && (curthread == NULL))
		/* Only initialize the threaded application once. */
		return;

	/*
	 * Check for the special case of this process running as
	 * or in place of init as pid = 1:
	 */
	if ((_thr_pid = getpid()) == 1) {
		/*
		 * Setup a new session for this process which is
		 * assumed to be running as root.
		 */
		if (setsid() == -1)
			PANIC("Can't set session ID");
		if (revoke(_PATH_CONSOLE) != 0)
			PANIC("Can't revoke console");
		if ((fd = __sys_open(_PATH_CONSOLE, O_RDWR)) < 0)
			PANIC("Can't open console");
		if (setlogin("root") == -1)
			PANIC("Can't set login to root");
		if (__sys_ioctl(fd, TIOCSCTTY, NULL) == -1)
			PANIC("Can't set controlling terminal");
	}

	/* Initialize pthread private data. */
	init_private();

	/* Set the initial thread. */
	if (curthread == NULL) {
		first = 1;
		/* Create and initialize the initial thread. */
		curthread = _thr_alloc(NULL);
		if (curthread == NULL)
			PANIC("Can't allocate initial thread");
		init_main_thread(curthread);
	}
	/*
	 * Add the thread to the thread list queue.
	 */
	THR_LIST_ADD(curthread);
	_thread_active_threads = 1;

	/* Setup the thread specific data */
	tls_set_tcb(curthread->tcb);

	if (first) {
		SIGFILLSET(sigset);
		__sys_sigprocmask(SIG_SETMASK, &sigset, &oldset);
		_thr_signal_init();
		_thr_initial = curthread;
		SIGDELSET(oldset, SIGCANCEL);
		__sys_sigprocmask(SIG_SETMASK, &oldset, NULL);
		if (td_eventismember(&_thread_event_mask, TD_CREATE))
			_thr_report_creation(curthread, curthread);
	}
}