Beispiel #1
0
static void *task_trampoline(void *arg)
{
	struct wind_task *task = arg;
	struct wind_task_args *args = &task->args;
	struct service svc;
	int ret;

	ret = __bt(threadobj_prologue(&task->thobj, task->name));
	if (ret)
		goto done;

	COPPERPLATE_PROTECT(svc);

	ret = __bt(registry_add_file(&task->fsobj, O_RDONLY,
				     "/vxworks/tasks/%s", task->name));
	if (ret)
		warning("failed to export task %s to registry",
			task->name);

	COPPERPLATE_UNPROTECT(svc);

	/* Wait for someone to run taskActivate() upon us. */
	threadobj_wait_start(&task->thobj);

	args->entry(args->arg0, args->arg1, args->arg2, args->arg3,
		    args->arg4, args->arg5, args->arg6, args->arg7,
		    args->arg8, args->arg9);
done:
	threadobj_lock(&task->thobj);
	threadobj_set_magic(&task->thobj, ~task_magic);
	threadobj_unlock(&task->thobj);

	pthread_exit((void *)(long)ret);
}
Beispiel #2
0
int clockobj_init(struct clockobj *clkobj,
		  const char *name, unsigned int resolution_ns)
{
	pthread_mutexattr_t mattr;
	struct timespec now;
	int ret;

	if (resolution_ns == 0)
		return __bt(-EINVAL);

	memset(clkobj, 0, sizeof(*clkobj));
	ret = __clockobj_set_resolution(clkobj, resolution_ns);
	if (ret)
		return __bt(ret);

	/*
	 * FIXME: this lock is only used to protect the wallclock
	 * offset readings from updates. We should replace this by a
	 * confirmed reading loop.
	 */
	__RT(pthread_mutexattr_init(&mattr));
	__RT(pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT));
	__RT(pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE));
	__RT(pthread_mutex_init(&clkobj->lock, &mattr));
	__RT(pthread_mutexattr_destroy(&mattr));
	__RT(clock_gettime(CLOCK_COPPERPLATE, &now));
	timespec_sub(&clkobj->offset, &clkobj->epoch, &now);
	clkobj->name = name;

	return 0;
}
Beispiel #3
0
static int vxworks_init(int argc, char *const argv[])
{
	int ret, lindex, c;

	for (;;) {
		c = getopt_long_only(argc, argv, "", vxworks_options, &lindex);
		if (c == EOF)
			break;
		if (c > 0)
			continue;
		switch (lindex) {
		case clock_resolution_opt:
			clock_resolution = atoi(optarg);
			break;
		}
	}

	registry_add_dir("/vxworks");
	registry_add_dir("/vxworks/tasks");
	registry_add_dir("/vxworks/semaphores");
	registry_add_dir("/vxworks/queues");
	registry_add_dir("/vxworks/watchdogs");

	cluster_init(&wind_task_table, "vxworks.task");

	ret = clockobj_init(&wind_clock, "vxworks", clock_resolution);
	if (ret) {
		warning("%s: failed to initialize VxWorks clock (res=%u ns)",
			__FUNCTION__, clock_resolution);
		return __bt(ret);
	}

	return 0;
}
Beispiel #4
0
static int psos_init(void)
{
	int ret;

	registry_add_dir("/psos");
	registry_add_dir("/psos/tasks");
	registry_add_dir("/psos/semaphores");
	registry_add_dir("/psos/queues");
	registry_add_dir("/psos/timers");
	registry_add_dir("/psos/partitions");
	registry_add_dir("/psos/regions");

	cluster_init(&psos_task_table, "psos.task");
	cluster_init(&psos_sem_table, "psos.sema4");
	cluster_init(&psos_queue_table, "psos.queue");
	pvcluster_init(&psos_pt_table, "psos.pt");
	pvcluster_init(&psos_rn_table, "psos.rn");

	ret = clockobj_init(&psos_clock, "psos", clock_resolution);
	if (ret) {
		warning("%s: failed to initialize pSOS clock (res=%u ns)",
			__FUNCTION__, clock_resolution);
		return __bt(ret);
	}

	/* Convert pSOS ticks to timespec. */
	clockobj_ticks_to_timespec(&psos_clock, time_slice_in_ticks, &psos_rrperiod);

	return 0;
}
int heapobj_init_array_private(struct heapobj *hobj, const char *name,
			       size_t size, int elems)
{
	size += TLSF_BLOCK_OVERHEAD;
	if (size < 16)
		size = 16;

	return __bt(heapobj_init_private(hobj, name, size * elems, NULL));
}
int __heapobj_extend(struct heapobj *hobj, size_t size, void *mem)
{
	__RT(pthread_mutex_lock(&hobj->lock));
	hobj->size = add_new_area(hobj->pool, size, mem);
	__RT(pthread_mutex_unlock(&hobj->lock));
	if (hobj->size == (size_t)-1)
		return __bt(-EINVAL);

	return 0;
}
Beispiel #7
0
int rt_alarm_create(RT_ALARM *alarm, const char *name)
{
    struct trank_alarm_wait *aw;
    pthread_mutexattr_t mattr;
    pthread_condattr_t cattr;
    int ret;

    aw = xnmalloc(sizeof(*aw));
    if (aw == NULL)
        return -ENOMEM;

    aw->alarm_pulses = 0;

    pthread_mutexattr_init(&mattr);
    pthread_mutexattr_settype(&mattr, mutex_type_attribute);
    pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
    pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
    ret = __bt(-__RT(pthread_mutex_init(&aw->lock, &mattr)));
    pthread_mutexattr_destroy(&mattr);
    if (ret)
        goto fail_lock;

    pthread_condattr_init(&cattr);
    pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_PRIVATE);
    ret = __bt(-pthread_cond_init(&aw->event, &cattr));
    pthread_condattr_destroy(&cattr);
    if (ret)
        goto fail_cond;

    ret = __CURRENT(rt_alarm_create(alarm, name, trank_alarm_handler, aw));
    if (ret)
        goto fail_alarm;

    return 0;
fail_alarm:
    __RT(pthread_cond_destroy(&aw->event));
fail_cond:
    __RT(pthread_mutex_destroy(&aw->lock));
fail_lock:
    xnfree(aw);

    return ret;
}
Beispiel #8
0
int traceobj_init(struct traceobj *trobj, const char *label, int nr_marks)
{
	pthread_mutexattr_t mattr;
	pthread_condattr_t cattr;
	int ret;

	pthread_mutexattr_init(&mattr);
	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
	ret = __bt(-__RT(pthread_mutex_init(&trobj->lock, &mattr)));
	pthread_mutexattr_destroy(&mattr);
	if (ret)
		return ret;

	pthread_condattr_init(&cattr);
	pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_PRIVATE);
	ret = __bt(-__RT(pthread_cond_init(&trobj->join, &cattr)));
	pthread_condattr_destroy(&cattr);
	if (ret) {
		__RT(pthread_mutex_destroy(&trobj->lock));
		return ret;
	}

	/*
	 * We make sure not to unblock from threadobj_join() until at
	 * least one thread has called trace_enter() for this trace
	 * object.
	 */
	trobj->nr_threads = -1;

	trobj->label = label;
	trobj->nr_marks = nr_marks;
	trobj->cur_mark = 0;

	if (nr_marks > 0) {
		trobj->marks = pvmalloc(sizeof(struct tracemark) * nr_marks);
		if (trobj->marks == NULL)
			panic("cannot allocate mark table for tracing");
	}

	return 0;
}
Beispiel #9
0
static inline
int __clockobj_set_resolution(struct clockobj *clkobj,
			      unsigned int resolution_ns)
{
	if (resolution_ns > 1) {
		warning("low resolution clock disabled [--enable-lores-clock]");
		return __bt(-EINVAL);
	}

	return 0;
}
Beispiel #10
0
int __check_cancel_type(const char *locktype)
{
	int oldtype;

	pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &oldtype);
	if (oldtype == PTHREAD_CANCEL_DEFERRED)
		return 0;

	warning("%s() section is NOT cancel-safe", locktype);
	abort();

	return __bt(-EINVAL);
}
int heapobj_init_private(struct heapobj *hobj, const char *name,
			 size_t size, void *mem)
{
	if (mem == NULL) {
		/*
		 * When the memory area is unspecified, obtain it from
		 * the main pool, accounting for the TLSF overhead.
		 */
		size += tlsf_pool_overhead;
		mem = tlsf_malloc(size);
		if (mem == NULL)
			return __bt(-ENOMEM);
	}

	if (name)
		snprintf(hobj->name, sizeof(hobj->name), "%s", name);
	else
		snprintf(hobj->name, sizeof(hobj->name), "%p", hobj);
#ifdef CONFIG_XENO_PSHARED
	hobj->ops = &tlsf_ops;
#endif
	hobj->pool = mem;
	/* Make sure to wipe out tlsf's signature. */
	memset(mem, 0, size);
	hobj->size = init_memory_pool(size, mem);
	if (hobj->size == (size_t)-1)
		return __bt(-EINVAL);

	/*
	 * TLSF does not lock around so-called extended calls aimed at
	 * specific pools, which is definitely braindamage. So DIY.
	 */
	__RT(pthread_mutex_init(&hobj->lock, NULL));

	return 0;
}
Beispiel #12
0
int rt_alarm_wait(RT_ALARM *alarm)
{
    struct threadobj *current = threadobj_current();
    struct sched_param_ex param_ex;
    struct trank_alarm_wait *aw;
    struct alchemy_alarm *acb;
    int ret, prio, pulses;

    acb = find_alarm(alarm);
    if (acb == NULL)
        return -EINVAL;

    threadobj_lock(current);
    prio = threadobj_get_priority(current);
    if (prio != threadobj_irq_prio) {
        param_ex.sched_priority = threadobj_irq_prio;
        /* Working on self, so -EIDRM can't happen. */
        threadobj_set_schedparam(current, SCHED_FIFO, &param_ex);
    }
    threadobj_unlock(current);

    aw = acb->arg;

    /*
     * Emulate the original behavior: wait for the next pulse (no
     * event buffering, broadcast to all waiters), while
     * preventing spurious wakeups.
     */
    __RT(pthread_mutex_lock(&aw->lock));

    pulses = aw->alarm_pulses;

    for (;;) {
        ret = -__RT(pthread_cond_wait(&aw->event, &aw->lock));
        if (ret || aw->alarm_pulses != pulses)
            break;
    }

    __RT(pthread_mutex_unlock(&aw->lock));

    return __bt(ret);
}
Beispiel #13
0
static int vxworks_init(void)
{
	int ret;

	registry_add_dir("/vxworks");
	registry_add_dir("/vxworks/tasks");
	registry_add_dir("/vxworks/semaphores");
	registry_add_dir("/vxworks/queues");
	registry_add_dir("/vxworks/watchdogs");

	cluster_init(&wind_task_table, "vxworks.task");

	ret = clockobj_init(&wind_clock, clock_resolution);
	if (ret) {
		warning("%s: failed to initialize VxWorks clock (res=%u ns)",
			__FUNCTION__, clock_resolution);
		return __bt(ret);
	}

	__RT(pthread_mutex_init(&wind_task_lock, NULL));

	return 0;
}
Beispiel #14
0
static int alchemy_init(void)
{
	int ret;

	syncluster_init(&alchemy_task_table, "alchemy.task");
	syncluster_init(&alchemy_sem_table, "alchemy.sem");
	syncluster_init(&alchemy_event_table, "alchemy.event");
	syncluster_init(&alchemy_cond_table, "alchemy.cond");
	syncluster_init(&alchemy_mutex_table, "alchemy.mutex");
	syncluster_init(&alchemy_queue_table, "alchemy.queue");
	syncluster_init(&alchemy_buffer_table, "alchemy.buffer");
	syncluster_init(&alchemy_heap_table, "alchemy.heap");
	pvcluster_init(&alchemy_alarm_table, "alchemy.alarm");

	ret = clockobj_init(&alchemy_clock, clock_resolution);
	if (ret) {
		warning("%s: failed to initialize Alchemy clock (res=%u ns)",
			__FUNCTION__, clock_resolution);
		return __bt(ret);
	}

	registry_add_dir("/alchemy");
	registry_add_dir("/alchemy/tasks");
	registry_add_dir("/alchemy/semaphores");
	registry_add_dir("/alchemy/events");
	registry_add_dir("/alchemy/condvars");
	registry_add_dir("/alchemy/mutexes");
	registry_add_dir("/alchemy/queues");
	registry_add_dir("/alchemy/buffers");
	registry_add_dir("/alchemy/heaps");
	registry_add_dir("/alchemy/alarms");

	init_corespec();

	return 0;
}
Beispiel #15
0
static STATUS __taskInit(struct wind_task *task,
			 struct WIND_TCB *tcb, const char *name,
			 int prio, int flags, FUNCPTR entry, int stacksize)
{
	struct threadobj_init_data idata;
	pthread_mutexattr_t mattr;
	struct sched_param param;
	pthread_attr_t thattr;
	int ret, cprio;

	ret = check_task_priority(prio, &cprio);
	if (ret) {
		errno = ret;
		return ERROR;
	}

	if (name == NULL || *name == '\0')
		sprintf(task->name, "t%lu", ++anon_tids);
	else {
		strncpy(task->name, name, sizeof(task->name));
		task->name[sizeof(task->name) - 1] = '\0';
	}

	__RT(pthread_mutexattr_init(&mattr));
	__RT(pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE));
	__RT(pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT));
	__RT(pthread_mutexattr_setpshared(&mattr, mutex_scope_attribute));
	__RT(pthread_mutex_init(&task->safelock, &mattr));
	__RT(pthread_mutexattr_destroy(&mattr));

	task->tcb = tcb;
	tcb->opaque = task;
	/*
	 * CAUTION: tcb->status in only modified by the owner task
	 * (see suspend/resume hooks), or when such task is guaranteed
	 * not to be running, e.g. in taskActivate(). So we do NOT
	 * take any lock specifically for updating it. However, we
	 * know that a memory barrier will be issued shortly after
	 * such updates because of other locking being in effect, so
	 * we don't explicitely have to provide for it.
	 */
	tcb->status = WIND_SUSPEND;
	tcb->safeCnt = 0;
	tcb->flags = flags;
	tcb->entry = entry;

	pthread_attr_init(&thattr);

	if (stacksize == 0)
		stacksize = PTHREAD_STACK_MIN * 4;
	else if (stacksize < PTHREAD_STACK_MIN)
		stacksize = PTHREAD_STACK_MIN;

	memset(&param, 0, sizeof(param));
	param.sched_priority = cprio;
	pthread_attr_setinheritsched(&thattr, PTHREAD_EXPLICIT_SCHED);
	pthread_attr_setschedpolicy(&thattr, SCHED_RT);
	pthread_attr_setschedparam(&thattr, &param);
	pthread_attr_setstacksize(&thattr, stacksize);
	pthread_attr_setscope(&thattr, thread_scope_attribute);
	pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);

	idata.magic = task_magic;
	idata.wait_hook = task_wait_hook;
	idata.suspend_hook = task_suspend_hook;
	idata.finalizer = task_finalizer;
	idata.priority = cprio;
	threadobj_init(&task->thobj, &idata);

	ret = __bt(cluster_addobj(&wind_task_table, task->name, &task->cobj));
	if (ret) {
		warning("duplicate task name: %s", task->name);
		threadobj_destroy(&task->thobj);
		__RT(pthread_mutex_destroy(&task->safelock));
		errno = S_objLib_OBJ_ID_ERROR;
		return ERROR;
	}

	registry_init_file(&task->fsobj, &registry_ops);

	ret = __bt(-__RT(pthread_create(&task->thobj.tid, &thattr,
					&task_trampoline, task)));
	pthread_attr_destroy(&thattr);
	if (ret) {
		registry_destroy_file(&task->fsobj);
		cluster_delobj(&wind_task_table, &task->cobj);
		threadobj_destroy(&task->thobj);
		__RT(pthread_mutex_destroy(&task->safelock));
		errno = ret == -EAGAIN ? S_memLib_NOT_ENOUGH_MEMORY : -ret;
		return ERROR;
	}

	return OK;
}
Beispiel #16
0
/**
 * @fn int rt_buffer_create(RT_BUFFER *bf, const char *name, size_t bufsz, int mode)
 * @brief Create an IPC buffer.
 *
 * This routine creates an IPC object that allows tasks to send and
 * receive data asynchronously via a memory buffer. Data may be of an
 * arbitrary length, albeit this IPC is best suited for small to
 * medium-sized messages, since data always have to be copied to the
 * buffer during transit. Large messages may be more efficiently
 * handled by message queues (RT_QUEUE).
 *
 * @param bf The address of a buffer descriptor which can be later
 * used to identify uniquely the created object, upon success of this
 * call.
 *
 * @param name An ASCII string standing for the symbolic name of the
 * buffer. When non-NULL and non-empty, a copy of this string is used
 * for indexing the created buffer into the object registry.
 *
 * @param bufsz The size of the buffer space available to hold
 * data. The required memory is obtained from the main heap.
 *
 * @param mode The buffer creation mode. The following flags can be
 * OR'ed into this bitmask, each of them affecting the new buffer:
 *
 * - B_FIFO makes tasks pend in FIFO order for reading data from the
 *   buffer.
 *
 * - B_PRIO makes tasks pend in priority order for reading data from
 *   the buffer.
 *
 * This parameter also applies to tasks blocked on the buffer's write
 * side (see rt_buffer_write()).
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -ENOMEM is returned if the system fails to get memory from the
 * main heap in order to create the buffer.
 *
 * - -EEXIST is returned if the @a name is conflicting with an already
 * registered buffer.
 *
 * - -EPERM is returned if this service was called from an
 * asynchronous context.
 *
 * Valid calling context:
 *
 * - Regular POSIX threads
 * - Xenomai threads
 *
 * @note Buffers can be shared by multiple processes which belong to
 * the same Xenomai session.
 */
int rt_buffer_create(RT_BUFFER *bf, const char *name,
		     size_t bufsz, int mode)
{
	struct alchemy_buffer *bcb;
	struct service svc;
	int sobj_flags = 0;
	int ret;

	if (threadobj_irq_p())
		return -EPERM;

	if (bufsz == 0)
		return -EINVAL;

	CANCEL_DEFER(svc);

	bcb = xnmalloc(sizeof(*bcb));
	if (bcb == NULL) {
		ret = __bt(-ENOMEM);
		goto fail;
	}

	bcb->buf = xnmalloc(bufsz);
	if (bcb == NULL) {
		ret = __bt(-ENOMEM);
		goto fail_bufalloc;
	}

	generate_name(bcb->name, name, &buffer_namegen);
	bcb->magic = buffer_magic;
	bcb->mode = mode;
	bcb->bufsz = bufsz;
	bcb->rdoff = 0;
	bcb->wroff = 0;
	bcb->fillsz = 0;
	if (mode & B_PRIO)
		sobj_flags = SYNCOBJ_PRIO;

	syncobj_init(&bcb->sobj, CLOCK_COPPERPLATE, sobj_flags,
		     fnref_put(libalchemy, buffer_finalize));

	if (syncluster_addobj(&alchemy_buffer_table, bcb->name, &bcb->cobj)) {
		ret = -EEXIST;
		goto fail_register;
	}

	bf->handle = mainheap_ref(bcb, uintptr_t);

	CANCEL_RESTORE(svc);

	return 0;

fail_register:
	syncobj_uninit(&bcb->sobj);
	xnfree(bcb->buf);
fail_bufalloc:
	xnfree(bcb);
fail:
	CANCEL_RESTORE(svc);

	return ret;
}