Пример #1
0
taskqid_t
taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, unsigned int tqflags)
{
	taskq_ent_t *t;

	if (taskq_now) {
		func(arg);
		return (1);
	}

	mxlock(&tq->tq_lock);
	assert(tq->tq_flags & TASKQ_ACTIVE);
	if ((t = task_alloc(tq, tqflags)) == NULL) {
		mxunlock(&tq->tq_lock);
		return (0);
	}
	if (tqflags & TQ_FRONT) {
		t->tqent_next = tq->tq_task.tqent_next;
		t->tqent_prev = &tq->tq_task;
	} else {
		t->tqent_next = &tq->tq_task;
		t->tqent_prev = tq->tq_task.tqent_prev;
	}
	t->tqent_next->tqent_prev = t;
	t->tqent_prev->tqent_next = t;
	t->tqent_func = func;
	t->tqent_arg = arg;
	t->tqent_flags = 0;
	condsig(&tq->tq_dispatch_cv);
	mxunlock(&tq->tq_lock);
	return (1);
}
Пример #2
0
void
taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, unsigned int flags,
    taskq_ent_t *t)
{
	assert(func != NULL);
	assert(!(tq->tq_flags & TASKQ_DYNAMIC));

	/*
	 * Mark it as a prealloc'd task.  This is important
	 * to ensure that we don't free it later.
	 */
	t->tqent_flags |= TQENT_FLAG_PREALLOC;
	/*
	 * Enqueue the task to the underlying queue.
	 */
	mxlock(&tq->tq_lock);

	if (flags & TQ_FRONT) {
		t->tqent_next = tq->tq_task.tqent_next;
		t->tqent_prev = &tq->tq_task;
	} else {
		t->tqent_next = &tq->tq_task;
		t->tqent_prev = tq->tq_task.tqent_prev;
	}
	t->tqent_next->tqent_prev = t;
	t->tqent_prev->tqent_next = t;
	t->tqent_func = func;
	t->tqent_arg = arg;
	condsig(&tq->tq_dispatch_cv);
	mxunlock(&tq->tq_lock);
}
Пример #3
0
Файл: vg.c Проект: nurh/copterfs
struct objstore *objstore_vg_create(const char *name,
                                    enum objstore_vg_type type)
{
    struct objstore *vg;

    if (type != OS_VG_SIMPLE)
        return ERR_PTR(EINVAL);

    vg = umem_cache_alloc(vg_cache, 0);
    if (!vg)
        return ERR_PTR(ENOMEM);

    vg->name = strdup(name);
    if (!vg->name) {
        umem_cache_free(vg_cache, vg);
        return ERR_PTR(ENOMEM);
    }

    list_create(&vg->vols, sizeof(struct objstore_vol),
                offsetof(struct objstore_vol, vg_list));

    mxinit(&vg->lock);

    mxlock(&vgs_lock);
    list_insert_tail(&vgs, vg);
    mxunlock(&vgs_lock);

    return vg;
}
Пример #4
0
Файл: vg.c Проект: nurh/copterfs
int objstore_create(struct objstore *vg, const struct nobjhndl *dir,
                    const char *name, uint16_t mode,
                    struct nobjhndl *child)
{
    struct objstore_vol *vol;
    int ret;

    cmn_err(CE_DEBUG, "%s(%p, %p, '%s', %#o, %p)", __func__, vg, dir,
            name, mode, child);

    if (!vg || !dir || !name || !child)
        return EINVAL;

    /*
     * TODO: we're assuming OS_VG_SIMPLE
     */
    mxlock(&vg->lock);
    vol = list_head(&vg->vols);
    if (vol)
        ret = vol_create(vol, dir, name, mode, child);
    else
        ret = ENXIO;
    mxunlock(&vg->lock);

    return ret;
}
Пример #5
0
void
taskq_wait(taskq_t *tq)
{
	mxlock(&tq->tq_lock);
	while (tq->tq_task.tqent_next != &tq->tq_task || tq->tq_active != 0)
		condwait(&tq->tq_wait_cv, &tq->tq_lock);
	mxunlock(&tq->tq_lock);
}
Пример #6
0
Файл: vg.c Проект: nurh/copterfs
struct objstore *objstore_vg_lookup(const char *name)
{
    struct objstore *vg;

    mxlock(&vgs_lock);
    list_for_each(&vgs, vg)
    if (!strcmp(name, vg->name))
        break;
    mxunlock(&vgs_lock);

    return vg;
}
Пример #7
0
/*ARGSUSED*/
taskq_t *
taskq_create(const char *name, int nthreads,
	int minalloc, int maxalloc, unsigned int flags)
{
	taskq_t *tq;
	int t;

	tq = umem_zalloc(sizeof(taskq_t), 0);
	if (!tq)
		return NULL;

	if (flags & TASKQ_THREADS_CPU_PCT) {
		int pct;
		assert(nthreads >= 0);
		assert(nthreads <= taskq_cpupct_max_percent);
		pct = MIN(nthreads, taskq_cpupct_max_percent);
		pct = MAX(pct, 0);

		nthreads = (sysconf(_SC_NPROCESSORS_ONLN) * pct) / 100;
		nthreads = MAX(nthreads, 1);	/* need at least 1 thread */
	} else {
		assert(nthreads >= 1);
	}

	rwinit(&tq->tq_threadlock);
	mxinit(&tq->tq_lock);
	condinit(&tq->tq_dispatch_cv);
	condinit(&tq->tq_wait_cv);
	condinit(&tq->tq_maxalloc_cv);
	(void) strncpy(tq->tq_name, name, TASKQ_NAMELEN + 1);
	tq->tq_flags = flags | TASKQ_ACTIVE;
	tq->tq_active = nthreads;
	tq->tq_nthreads = nthreads;
	tq->tq_minalloc = minalloc;
	tq->tq_maxalloc = maxalloc;
	tq->tq_task.tqent_next = &tq->tq_task;
	tq->tq_task.tqent_prev = &tq->tq_task;
	tq->tq_threadlist =
	    umem_alloc(nthreads * sizeof (pthread_t), UMEM_NOFAIL);

	if (flags & TASKQ_PREPOPULATE) {
		mxlock(&tq->tq_lock);
		while (minalloc-- > 0)
			task_free(tq, task_alloc(tq, UMEM_NOFAIL));
		mxunlock(&tq->tq_lock);
	}

	for (t = 0; t < nthreads; t++)
		pthread_create(&tq->tq_threadlist[t], NULL, taskq_thread, tq);

	return (tq);
}
Пример #8
0
static void *
taskq_thread(void *arg)
{
	taskq_t *tq = arg;
	taskq_ent_t *t;
	bool prealloc;

	mxlock(&tq->tq_lock);
	while (tq->tq_flags & TASKQ_ACTIVE) {
		if ((t = tq->tq_task.tqent_next) == &tq->tq_task) {
			if (--tq->tq_active == 0)
				condbcast(&tq->tq_wait_cv);
			condwait(&tq->tq_dispatch_cv, &tq->tq_lock);
			tq->tq_active++;
			continue;
		}
		t->tqent_prev->tqent_next = t->tqent_next;
		t->tqent_next->tqent_prev = t->tqent_prev;
		t->tqent_next = NULL;
		t->tqent_prev = NULL;
		prealloc = t->tqent_flags & TQENT_FLAG_PREALLOC;
		mxunlock(&tq->tq_lock);

		rwlock(&tq->tq_threadlock, false);
		t->tqent_func(t->tqent_arg);
		rwunlock(&tq->tq_threadlock);

		mxlock(&tq->tq_lock);
		if (!prealloc)
			task_free(tq, t);
	}
	tq->tq_nthreads--;
	condbcast(&tq->tq_wait_cv);
	mxunlock(&tq->tq_lock);
	return (NULL);
}
Пример #9
0
static void
task_free(taskq_t *tq, taskq_ent_t *t)
{
	if (tq->tq_nalloc <= tq->tq_minalloc) {
		t->tqent_next = tq->tq_freelist;
		tq->tq_freelist = t;
	} else {
		tq->tq_nalloc--;
		mxunlock(&tq->tq_lock);
		umem_free(t, sizeof (taskq_ent_t));
		mxlock(&tq->tq_lock);
	}

	if (tq->tq_maxalloc_wait)
		condsig(&tq->tq_maxalloc_cv);
}
Пример #10
0
static taskq_ent_t *
task_alloc(taskq_t *tq, int tqflags)
{
	taskq_ent_t *t;
	struct timespec ts;
	int err;

again:	if ((t = tq->tq_freelist) != NULL && tq->tq_nalloc >= tq->tq_minalloc) {
		tq->tq_freelist = t->tqent_next;
	} else {
		if (tq->tq_nalloc >= tq->tq_maxalloc) {
			if (!(tqflags & UMEM_NOFAIL))
				return (NULL);

			/*
			 * We don't want to exceed tq_maxalloc, but we can't
			 * wait for other tasks to complete (and thus free up
			 * task structures) without risking deadlock with
			 * the caller.  So, we just delay for one second
			 * to throttle the allocation rate. If we have tasks
			 * complete before one second timeout expires then
			 * taskq_ent_free will signal us and we will
			 * immediately retry the allocation.
			 */
			tq->tq_maxalloc_wait++;

			ts.tv_sec = 1;
			ts.tv_nsec = 0;
			err = condreltimedwait(&tq->tq_maxalloc_cv,
			    &tq->tq_lock, &ts);

			tq->tq_maxalloc_wait--;
			if (err == 0)
				goto again;		/* signaled */
		}
		mxunlock(&tq->tq_lock);

		t = umem_alloc(sizeof (taskq_ent_t), tqflags);

		mxlock(&tq->tq_lock);
		if (t != NULL)
			tq->tq_nalloc++;
	}
	return (t);
}
Пример #11
0
Файл: vg.c Проект: nurh/copterfs
int objstore_getroot(struct objstore *vg, struct nobjhndl *hndl)
{
    struct objstore_vol *vol;
    int ret;

    if (!vg || !hndl)
        return EINVAL;

    /*
     * TODO: we're assuming OS_VG_SIMPLE
     */
    mxlock(&vg->lock);
    vol = list_head(&vg->vols);
    if (vol)
        ret = vol_getroot(vol, hndl);
    else
        ret = ENXIO;
    mxunlock(&vg->lock);

    return ret;
}
Пример #12
0
void
taskq_destroy(taskq_t *tq)
{
	int t;
	int nthreads = tq->tq_nthreads;

	taskq_wait(tq);

	mxlock(&tq->tq_lock);

	tq->tq_flags &= ~TASKQ_ACTIVE;
	condbcast(&tq->tq_dispatch_cv);

	while (tq->tq_nthreads != 0)
		condwait(&tq->tq_wait_cv, &tq->tq_lock);

	tq->tq_minalloc = 0;
	while (tq->tq_nalloc != 0) {
		assert(tq->tq_freelist != NULL);
		task_free(tq, task_alloc(tq, UMEM_NOFAIL));
	}

	mxunlock(&tq->tq_lock);

	for (t = 0; t < nthreads; t++)
		pthread_join(tq->tq_threadlist[t], NULL);

	umem_free(tq->tq_threadlist, nthreads * sizeof (pthread_t));

	rwdestroy(&tq->tq_threadlock);
	mxdestroy(&tq->tq_lock);
	conddestroy(&tq->tq_dispatch_cv);
	conddestroy(&tq->tq_wait_cv);
	conddestroy(&tq->tq_maxalloc_cv);

	umem_free(tq, sizeof (taskq_t));
}
Пример #13
0
Файл: vg.c Проект: nurh/copterfs
int objstore_remove(struct objstore *vg, const struct nobjhndl *dir,
                    const char *name)
{
    struct objstore_vol *vol;
    int ret;

    cmn_err(CE_DEBUG, "%s(%p, %p, '%s')", __func__, vg, dir, name);

    if (!vg || !dir || !name)
        return EINVAL;

    /*
     * TODO: we're assuming OS_VG_SIMPLE
     */
    mxlock(&vg->lock);
    vol = list_head(&vg->vols);
    if (vol)
        ret = vol_remove(vol, dir, name);
    else
        ret = ENXIO;
    mxunlock(&vg->lock);

    return ret;
}
Пример #14
0
Файл: vg.c Проект: nurh/copterfs
void vg_add_vol(struct objstore *vg, struct objstore_vol *vol)
{
    mxlock(&vg->lock);
    list_insert_tail(&vg->vols, vol);
    mxunlock(&vg->lock);
}