Example #1
0
void
_cgo_sys_thread_start(ThreadStart *ts)
{
	pthread_attr_t attr;
	sigset_t ign, oset;
	pthread_t p;
	size_t size;
	int err;

	SIGFILLSET(ign);
	pthread_sigmask(SIG_SETMASK, &ign, &oset);

	// Not sure why the memset is necessary here,
	// but without it, we get a bogus stack size
	// out of pthread_attr_getstacksize.  C'est la Linux.
	memset(&attr, 0, sizeof attr);
	pthread_attr_init(&attr);
	size = 0;
	pthread_attr_getstacksize(&attr, &size);
	ts->g->stackguard = size;
	err = pthread_create(&p, &attr, threadentry, ts);

	pthread_sigmask(SIG_SETMASK, &oset, nil);

	if (err != 0) {
		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
		abort();
	}
}
Example #2
0
void
_cgo_sys_thread_start(ThreadStart *ts)
{
	pthread_attr_t attr;
	sigset_t ign, oset;
	pthread_t p;
	size_t size;
	int err;

	SIGFILLSET(ign);
	pthread_sigmask(SIG_SETMASK, &ign, &oset);

	pthread_attr_init(&attr);
	pthread_attr_getstacksize(&attr, &size);
	// Leave stacklo=0 and set stackhi=size; mstack will do the rest.
	ts->g->stackhi = size;
	err = pthread_create(&p, &attr, threadentry, ts);

	pthread_sigmask(SIG_SETMASK, &oset, nil);

	if (err != 0) {
		fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
		abort();
	}
}
Example #3
0
static void
fuse_block_sigs(sigset_t *oldset)
{
	sigset_t newset;

	SIGFILLSET(newset);
	SIGDELSET(newset, SIGKILL);
	if (kern_sigprocmask(curthread, SIG_BLOCK, &newset, oldset, 0))
		panic("%s: Invalid operation for kern_sigprocmask()",
		    __func__);
}
Example #4
0
int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
	       void *(*start_routine) (void *), void *arg)
{
	struct pthread *curthread, *new_thread;
	struct thr_param param;
	struct sched_param sched_param;
	struct rtprio rtp;
	sigset_t set, oset;
	cpuset_t *cpusetp;
	int i, cpusetsize, create_suspended, locked, old_stack_prot, ret;

	cpusetp = NULL;
	ret = cpusetsize = 0;
	_thr_check_init();

	/*
	 * Tell libc and others now they need lock to protect their data.
	 */
	if (_thr_isthreaded() == 0) {
		_malloc_first_thread();
		if (_thr_setthreaded(1))
			return (EAGAIN);
	}

	curthread = _get_curthread();
	if ((new_thread = _thr_alloc(curthread)) == NULL)
		return (EAGAIN);

	memset(&param, 0, sizeof(param));

	if (attr == NULL || *attr == NULL)
		/* Use the default thread attributes: */
		new_thread->attr = _pthread_attr_default;
	else {
		new_thread->attr = *(*attr);
		cpusetp = new_thread->attr.cpuset;
		cpusetsize = new_thread->attr.cpusetsize;
		new_thread->attr.cpuset = NULL;
		new_thread->attr.cpusetsize = 0;
	}
	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
		/* inherit scheduling contention scope */
		if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
			new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
		else
			new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;

		new_thread->attr.prio = curthread->attr.prio;
		new_thread->attr.sched_policy = curthread->attr.sched_policy;
	}

	new_thread->tid = TID_TERMINATED;

	old_stack_prot = _rtld_get_stack_prot();
	if (create_stack(&new_thread->attr) != 0) {
		/* Insufficient memory to create a stack: */
		_thr_free(curthread, new_thread);
		return (EAGAIN);
	}
	/*
	 * Write a magic value to the thread structure
	 * to help identify valid ones:
	 */
	new_thread->magic = THR_MAGIC;
	new_thread->start_routine = start_routine;
	new_thread->arg = arg;
	new_thread->cancel_enable = 1;
	new_thread->cancel_async = 0;
	/* Initialize the mutex queue: */
	for (i = 0; i < TMQ_NITEMS; i++)
		TAILQ_INIT(&new_thread->mq[i]);

	/* Initialise hooks in the thread structure: */
	if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) {
		new_thread->flags = THR_FLAGS_NEED_SUSPEND;
		create_suspended = 1;
	} else {
		create_suspended = 0;
	}

	new_thread->state = PS_RUNNING;

	if (new_thread->attr.flags & PTHREAD_CREATE_DETACHED)
		new_thread->flags |= THR_FLAGS_DETACHED;

	/* Add the new thread. */
	new_thread->refcount = 1;
	_thr_link(curthread, new_thread);

	/*
	 * Handle the race between __pthread_map_stacks_exec and
	 * thread linkage.
	 */
	if (old_stack_prot != _rtld_get_stack_prot())
		_thr_stack_fix_protection(new_thread);

	/* Return thread pointer eariler so that new thread can use it. */
	(*thread) = new_thread;
	if (SHOULD_REPORT_EVENT(curthread, TD_CREATE) || cpusetp != NULL) {
		THR_THREAD_LOCK(curthread, new_thread);
		locked = 1;
	} else
		locked = 0;
	param.start_func = (void (*)(void *)) thread_start;
	param.arg = new_thread;
	param.stack_base = new_thread->attr.stackaddr_attr;
	param.stack_size = new_thread->attr.stacksize_attr;
	param.tls_base = (char *)new_thread->tcb;
	param.tls_size = sizeof(struct tcb);
	param.child_tid = &new_thread->tid;
	param.parent_tid = &new_thread->tid;
	param.flags = 0;
	if (new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM)
		param.flags |= THR_SYSTEM_SCOPE;
	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED)
		param.rtp = NULL;
	else {
		sched_param.sched_priority = new_thread->attr.prio;
		_schedparam_to_rtp(new_thread->attr.sched_policy,
			&sched_param, &rtp);
		param.rtp = &rtp;
	}

	/* Schedule the new thread. */
	if (create_suspended) {
		SIGFILLSET(set);
		SIGDELSET(set, SIGTRAP);
		__sys_sigprocmask(SIG_SETMASK, &set, &oset);
		new_thread->sigmask = oset;
		SIGDELSET(new_thread->sigmask, SIGCANCEL);
	}

	ret = thr_new(&param, sizeof(param));

	if (ret != 0) {
		ret = errno;
		/*
		 * Translate EPROCLIM into well-known POSIX code EAGAIN.
		 */
		if (ret == EPROCLIM)
			ret = EAGAIN;
	}

	if (create_suspended)
		__sys_sigprocmask(SIG_SETMASK, &oset, NULL);

	if (ret != 0) {
		if (!locked)
			THR_THREAD_LOCK(curthread, new_thread);
		new_thread->state = PS_DEAD;
		new_thread->tid = TID_TERMINATED;
		new_thread->flags |= THR_FLAGS_DETACHED;
		new_thread->refcount--;
		if (new_thread->flags & THR_FLAGS_NEED_SUSPEND) {
			new_thread->cycle++;
			_thr_umtx_wake(&new_thread->cycle, INT_MAX, 0);
		}
		_thr_try_gc(curthread, new_thread); /* thread lock released */
		atomic_add_int(&_thread_active_threads, -1);
	} else if (locked) {
		if (cpusetp != NULL) {
			if (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
				TID(new_thread), cpusetsize, cpusetp)) {
				ret = errno;
				/* kill the new thread */
				new_thread->force_exit = 1;
				new_thread->flags |= THR_FLAGS_DETACHED;
				_thr_try_gc(curthread, new_thread);
				 /* thread lock released */
				goto out;
			}
		}

		_thr_report_creation(curthread, new_thread);
		THR_THREAD_UNLOCK(curthread, new_thread);
	}
out:
	if (ret)
		(*thread) = 0;
	return (ret);
}
Example #5
0
int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
    void *(*start_routine) (void *), void *arg)
{
	struct lwp_params create_params;
	void *stack;
	sigset_t sigmask, oldsigmask;
	struct pthread *curthread, *new_thread;
	int ret = 0, locked;

	_thr_check_init();

	/*
	 * Tell libc and others now they need lock to protect their data.
	 */
	if (_thr_isthreaded() == 0 && _thr_setthreaded(1))
		return (EAGAIN);

	curthread = tls_get_curthread();
	if ((new_thread = _thr_alloc(curthread)) == NULL)
		return (EAGAIN);

	if (attr == NULL || *attr == NULL) {
		/* Use the default thread attributes: */
		new_thread->attr = _pthread_attr_default;
	} else {
		new_thread->attr = *(*attr);
	}
	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
		/* inherit scheduling contention scope */
		if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
			new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
		else
			new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
		/*
		 * scheduling policy and scheduling parameters will be
		 * inherited in following code.
		 */
	}

	if (create_stack(&new_thread->attr) != 0) {
		/* Insufficient memory to create a stack: */
		new_thread->terminated = 1;
		_thr_free(curthread, new_thread);
		return (EAGAIN);
	}
	/*
	 * Write a magic value to the thread structure
	 * to help identify valid ones:
	 */
	new_thread->magic = THR_MAGIC;
	new_thread->start_routine = start_routine;
	new_thread->arg = arg;
	new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
	    PTHREAD_CANCEL_DEFERRED;
	/*
	 * Check if this thread is to inherit the scheduling
	 * attributes from its parent:
	 */
	if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
		/*
		 * Copy the scheduling attributes. Lock the scheduling
		 * lock to get consistent scheduling parameters.
		 */
		THR_LOCK(curthread);
		new_thread->base_priority = curthread->base_priority;
		new_thread->attr.prio = curthread->attr.prio;
		new_thread->attr.sched_policy = curthread->attr.sched_policy;
		THR_UNLOCK(curthread);
	} else {
		/*
		 * Use just the thread priority, leaving the
		 * other scheduling attributes as their
		 * default values:
		 */
		new_thread->base_priority = new_thread->attr.prio;
	}
	new_thread->active_priority = new_thread->base_priority;

	/* Initialize the mutex queue: */
	TAILQ_INIT(&new_thread->mutexq);

	/* Initialise hooks in the thread structure: */
	if (new_thread->attr.suspend == THR_CREATE_SUSPENDED)
		new_thread->flags = THR_FLAGS_NEED_SUSPEND;

	new_thread->state = PS_RUNNING;

	if (new_thread->attr.flags & PTHREAD_CREATE_DETACHED)
		new_thread->tlflags |= TLFLAGS_DETACHED;

	/* Add the new thread. */
	new_thread->refcount = 1;
	_thr_link(curthread, new_thread);
	/* Return thread pointer eariler so that new thread can use it. */
	(*thread) = new_thread;
	if (SHOULD_REPORT_EVENT(curthread, TD_CREATE)) {
		THR_THREAD_LOCK(curthread, new_thread);
		locked = 1;
	} else
		locked = 0;
	/* Schedule the new thread. */
	stack = (char *)new_thread->attr.stackaddr_attr +
			new_thread->attr.stacksize_attr;
	bzero(&create_params, sizeof(create_params));
	create_params.lwp_func = thread_start;
	create_params.lwp_arg = new_thread;
	create_params.lwp_stack = stack;
	create_params.lwp_tid1 = &new_thread->tid;
	/*
	 * Thread created by thr_create() inherits currrent thread
	 * sigmask, however, before new thread setup itself correctly,
	 * it can not handle signal, so we should mask all signals here.
	 * We do this at the very last moment, so that we don't run
	 * into problems while we have all signals disabled.
	 */
	SIGFILLSET(sigmask);
	__sys_sigprocmask(SIG_SETMASK, &sigmask, &oldsigmask);
	new_thread->sigmask = oldsigmask;
	ret = lwp_create(&create_params);
	__sys_sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
	if (ret != 0) {
		if (!locked)
			THR_THREAD_LOCK(curthread, new_thread);
		new_thread->state = PS_DEAD;
		new_thread->terminated = 1;
		if (new_thread->flags & THR_FLAGS_NEED_SUSPEND) {
			new_thread->cycle++;
			_thr_umtx_wake(&new_thread->cycle, INT_MAX);
		}
		THR_THREAD_UNLOCK(curthread, new_thread);
		THREAD_LIST_LOCK(curthread);
		_thread_active_threads--;
		new_thread->tlflags |= TLFLAGS_DETACHED;
		_thr_ref_delete_unlocked(curthread, new_thread);
		THREAD_LIST_UNLOCK(curthread);
		(*thread) = NULL;
		ret = EAGAIN;
	} else if (locked) {
		_thr_report_creation(curthread, new_thread);
		THR_THREAD_UNLOCK(curthread, new_thread);
	}
	return (ret);
}
Example #6
0
int
afs_BioDaemon(afs_int32 nbiods)
{
    afs_int32 code, s, pflg = 0;
    label_t jmpbuf;
    struct buf *bp, *bp1, *tbp1, *tbp2;	/* temp pointers only */
    caddr_t tmpaddr;
    struct vnode *vp;
    struct vcache *vcp;
    char tmperr;
    if (!afs_initbiod) {
	/* XXX ###1 XXX */
	afs_initbiod = 1;
	/* pin lock, since we'll be using it in an interrupt. */
	lock_alloc(&afs_asyncbuf_lock, LOCK_ALLOC_PIN, 2, 1);
	simple_lock_init(&afs_asyncbuf_lock);
	pin(&afs_asyncbuf, sizeof(struct buf *));
	pin(&afs_asyncbuf_cv, sizeof(afs_int32));
    }

    /* Ignore HUP signals... */
    {
	sigset_t sigbits, osigbits;
	/*
	 * add SIGHUP to the set of already masked signals
	 */
	SIGFILLSET(sigbits);	/* allow all signals    */
	SIGDELSET(sigbits, SIGHUP);	/*   except SIGHUP      */
	limit_sigs(&sigbits, &osigbits);	/*   and already masked */
    }
    /* Main body starts here -- this is an intentional infinite loop, and
     * should NEVER exit
     *
     * Now, the loop will exit if get_bioreq() returns NULL, indicating
     * that we've been interrupted.
     */
    while (1) {
	bp = afs_get_bioreq();
	if (!bp)
	    break;		/* we were interrupted */
	if (code = setjmpx(&jmpbuf)) {
	    /* This should not have happend, maybe a lack of resources  */
	    AFS_GUNLOCK();
	    s = disable_lock(INTMAX, &afs_asyncbuf_lock);
	    for (bp1 = bp; bp; bp = bp1) {
		if (bp1)
		    bp1 = (struct buf *)bp1->b_work;
		bp->b_actf = 0;
		bp->b_error = code;
		bp->b_flags |= B_ERROR;
		iodone(bp);
	    }
	    unlock_enable(s, &afs_asyncbuf_lock);
	    AFS_GLOCK();
	    continue;
	}
	vcp = VTOAFS(bp->b_vp);
	if (bp->b_flags & B_PFSTORE) {	/* XXXX */
	    ObtainWriteLock(&vcp->lock, 404);
	    if (vcp->v.v_gnode->gn_mwrcnt) {
		afs_offs_t newlength =
		    (afs_offs_t) dbtob(bp->b_blkno) + bp->b_bcount;
		if (vcp->f.m.Length < newlength) {
		    afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
			       ICL_TYPE_STRING, __FILE__, ICL_TYPE_LONG,
			       __LINE__, ICL_TYPE_OFFSET,
			       ICL_HANDLE_OFFSET(vcp->f.m.Length),
			       ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(newlength));
		    vcp->f.m.Length = newlength;
		}
	    }
	    ReleaseWriteLock(&vcp->lock);
	}
	/* If the buffer represents a protection violation, rather than
	 * an actual request for I/O, no special action need be taken.
	 */
	if (bp->b_flags & B_PFPROT) {
	    iodone(bp);		/* Notify all users of the buffer that we're done */
	    clrjmpx(&jmpbuf);
	    continue;
	}
	if (DOvmlock)
	    ObtainWriteLock(&vcp->pvmlock, 211);
	/*
	 * First map its data area to a region in the current address space
	 * by calling vm_att with the subspace identifier, and a pointer to
	 * the data area.  vm_att returns  a new data area pointer, but we
	 * also want to hang onto the old one.
	 */
	tmpaddr = bp->b_baddr;
	bp->b_baddr = (caddr_t) vm_att(bp->b_xmemd.subspace_id, tmpaddr);
	tmperr = afs_ustrategy(bp);	/* temp variable saves offset calculation */
	if (tmperr) {		/* in non-error case */
	    bp->b_flags |= B_ERROR;	/* should other flags remain set ??? */
	    bp->b_error = tmperr;
	}

	/* Unmap the buffer's data area by calling vm_det.  Reset data area
	 * to the value that we saved above.
	 */
	vm_det(bp->b_baddr);
	bp->b_baddr = tmpaddr;

	/*
	 * buffer may be linked with other buffers via the b_work field.
	 * See also afs_gn_strategy.  For each buffer in the chain (including
	 * bp) notify all users of the buffer that the daemon is finished
	 * using it by calling iodone.
	 * assumes iodone can modify the b_work field.
	 */
	for (tbp1 = bp;;) {
	    tbp2 = (struct buf *)tbp1->b_work;
	    iodone(tbp1);
	    if (!tbp2)
		break;

	    tbp1 = (struct buf *)tbp2->b_work;
	    iodone(tbp2);
	    if (!tbp1)
		break;
	}
	if (DOvmlock)
	    ReleaseWriteLock(&vcp->pvmlock);	/* Unlock the vnode.  */
	clrjmpx(&jmpbuf);
    }				/* infinite loop (unless we're interrupted) */
}				/* end of afs_BioDaemon() */
Example #7
0
/*
 * Threaded process initialization.
 *
 * This is only called under two conditions:
 *
 *   1) Some thread routines have detected that the library hasn't yet
 *      been initialized (_thr_initial == NULL && curthread == NULL), or
 *
 *   2) An explicit call to reinitialize after a fork (indicated
 *      by curthread != NULL)
 */
void
_libpthread_init(struct pthread *curthread)
{
	int fd, first = 0;
	sigset_t sigset, oldset;

	/* Check if this function has already been called: */
	if ((_thr_initial != NULL) && (curthread == NULL))
		/* Only initialize the threaded application once. */
		return;

	/*
	 * Check for the special case of this process running as
	 * or in place of init as pid = 1:
	 */
	if ((_thr_pid = getpid()) == 1) {
		/*
		 * Setup a new session for this process which is
		 * assumed to be running as root.
		 */
		if (setsid() == -1)
			PANIC("Can't set session ID");
		if (revoke(_PATH_CONSOLE) != 0)
			PANIC("Can't revoke console");
		if ((fd = __sys_open(_PATH_CONSOLE, O_RDWR)) < 0)
			PANIC("Can't open console");
		if (setlogin("root") == -1)
			PANIC("Can't set login to root");
		if (__sys_ioctl(fd, TIOCSCTTY, NULL) == -1)
			PANIC("Can't set controlling terminal");
	}

	/* Initialize pthread private data. */
	init_private();

	/* Set the initial thread. */
	if (curthread == NULL) {
		first = 1;
		/* Create and initialize the initial thread. */
		curthread = _thr_alloc(NULL);
		if (curthread == NULL)
			PANIC("Can't allocate initial thread");
		init_main_thread(curthread);
	}
	/*
	 * Add the thread to the thread list queue.
	 */
	THR_LIST_ADD(curthread);
	_thread_active_threads = 1;

	/* Setup the thread specific data */
	tls_set_tcb(curthread->tcb);

	if (first) {
		SIGFILLSET(sigset);
		__sys_sigprocmask(SIG_SETMASK, &sigset, &oldset);
		_thr_signal_init();
		_thr_initial = curthread;
		SIGDELSET(oldset, SIGCANCEL);
		__sys_sigprocmask(SIG_SETMASK, &oldset, NULL);
		if (td_eventismember(&_thread_event_mask, TD_CREATE))
			_thr_report_creation(curthread, curthread);
	}
}