Beispiel #1
0
void __traceobj_mark(struct traceobj *trobj,
		     const char *file, int line, int mark)
{
	struct tracemark *tmk;
	struct service svc;
	int cur_mark;

	CANCEL_DEFER(svc);

	pthread_testcancel();
	push_cleanup_lock(&trobj->lock);
	write_lock(&trobj->lock);

	cur_mark = trobj->cur_mark;
	if (cur_mark >= trobj->nr_marks) {
		dump_marks(trobj);
		panic("too many marks: [%d] at %s:%d", mark, file, line);
	}

	tmk = trobj->marks + cur_mark;
	tmk->file = file;
	tmk->line = line;
	tmk->mark = mark;
	trobj->cur_mark++;

	write_unlock(&trobj->lock);
	pop_cleanup_lock(&trobj->lock);

	CANCEL_RESTORE(svc);
}
Beispiel #2
0
/**
 * @fn int rt_buffer_inquire(RT_BUFFER *bf, RT_BUFFER_INFO *info)
 * @brief Query buffer status.
 *
 * This routine returns the status information about the specified
 * buffer.
 *
 * @param bf The descriptor address of the buffer to get the status
 * of.
 *
 * @return Zero is returned and status information is written to the
 * structure pointed at by @a info upon success. Otherwise:
 *
 * - -EINVAL is returned if @a bf is not a valid buffer descriptor.
 *
 * Valid calling context: any.
 */
int rt_buffer_inquire(RT_BUFFER *bf, RT_BUFFER_INFO *info)
{
	struct alchemy_buffer *bcb;
	struct syncstate syns;
	struct service svc;
	int ret = 0;

	CANCEL_DEFER(svc);

	bcb = get_alchemy_buffer(bf, &syns, &ret);
	if (bcb == NULL)
		goto out;

	info->iwaiters = syncobj_count_grant(&bcb->sobj);
	info->owaiters = syncobj_count_drain(&bcb->sobj);
	info->totalmem = bcb->bufsz;
	info->availmem = bcb->bufsz - bcb->fillsz;
	strcpy(info->name, bcb->name);

	put_alchemy_buffer(bcb, &syns);
out:
	CANCEL_RESTORE(svc);

	return ret;
}
Beispiel #3
0
/**
 * @fn int rt_pipe_delete(RT_PIPE *pipe)
 * @brief Delete a message pipe.
 *
 * This routine deletes a pipe object previously created by a call to
 * rt_pipe_create(). All resources attached to that pipe are
 * automatically released, all pending data is flushed.
 *
 * @param pipe The pipe descriptor.
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -EINVAL is returned if @a pipe is not a valid pipe descriptor.
 *
 * - -EIDRM is returned if @a pipe is a closed pipe descriptor.
 *
 * - -EPERM is returned if this service was called from an
 * asynchronous context.
 *
 * @apitags{thread-unrestricted, switch-secondary}
 */
int rt_pipe_delete(RT_PIPE *pipe)
{
	struct alchemy_pipe *pcb;
	struct service svc;
	int ret = 0;

	if (threadobj_irq_p())
		return -EPERM;

	CANCEL_DEFER(svc);

	pcb = find_alchemy_pipe(pipe, &ret);
	if (pcb == NULL)
		goto out;

	ret = __RT(close(pcb->sock));
	if (ret) {
		ret = -errno;
		if (ret == -EBADF)
			ret = -EIDRM;
		goto out;
	}

	syncluster_delobj(&alchemy_pipe_table, &pcb->cobj);
	pcb->magic = ~pipe_magic;
out:
	CANCEL_RESTORE(svc);

	return ret;
}
Beispiel #4
0
static ssize_t do_write_pipe(RT_PIPE *pipe,
			     const void *buf, size_t size, int flags)
{
	struct alchemy_pipe *pcb;
	struct service svc;
	ssize_t ret;
	int err = 0;

	CANCEL_DEFER(svc);

	pcb = find_alchemy_pipe(pipe, &err);
	if (pcb == NULL) {
		ret = err;
		goto out;
	}

	ret = __RT(sendto(pcb->sock, buf, size, flags, NULL, 0));
	if (ret < 0) {
		ret = -errno;
		if (ret == -EBADF)
			ret = -EIDRM;
	}
out:
	CANCEL_RESTORE(svc);

	return ret;
}
Beispiel #5
0
int alchemy_bind_object(const char *name, struct syncluster *sc,
			RTIME timeout,
			int offset,
			uintptr_t *handle)
{
	struct clusterobj *cobj;
	struct service svc;
	struct timespec ts;
	void *p;
	int ret;

	CANCEL_DEFER(svc);
	ret = syncluster_findobj(sc, name,
				 alchemy_rel_timeout(timeout, &ts),
				 &cobj);
	CANCEL_RESTORE(svc);
	if (ret)
		return ret;

	p = cobj;
	p -= offset;
	*handle = mainheap_ref(p, uintptr_t);

	return 0;
}
Beispiel #6
0
static STATUS xsem_give(struct wind_sem *sem)
{
	struct syncstate syns;
	struct service svc;
	STATUS ret = OK;

	CANCEL_DEFER(svc);

	if (syncobj_lock(&sem->u.xsem.sobj, &syns)) {
		ret = S_objLib_OBJ_ID_ERROR;
		goto out;
	}

	if (sem->u.xsem.value >= sem->u.xsem.maxvalue) {
		if (sem->u.xsem.maxvalue == INT_MAX)
			/* No wrap around. */
			ret = S_semLib_INVALID_OPERATION;
	} else if (++sem->u.xsem.value <= 0)
		syncobj_grant_one(&sem->u.xsem.sobj);

	syncobj_unlock(&sem->u.xsem.sobj, &syns);
out:
	CANCEL_RESTORE(svc);

	return ret;
}
Beispiel #7
0
SEM_ID semCCreate(int options, int count)
{
	struct service svc;
	SEM_ID sem_id;

	CANCEL_DEFER(svc);
	sem_id = alloc_xsem(options, count, INT_MAX);
	CANCEL_RESTORE(svc);

	return sem_id;
}
Beispiel #8
0
SEM_ID semBCreate(int options, SEM_B_STATE state)
{
	struct service svc;
	SEM_ID sem_id;

	CANCEL_DEFER(svc);
	sem_id = alloc_xsem(options, state, 1);
	CANCEL_RESTORE(svc);

	return sem_id;
}
Beispiel #9
0
static STATUS xsem_take(struct wind_sem *sem, int timeout)
{
	struct timespec ts, *timespec;
	struct syncstate syns;
	struct service svc;
	STATUS ret = OK;

	if (threadobj_irq_p())
		return S_intLib_NOT_ISR_CALLABLE;

	CANCEL_DEFER(svc);

	if (syncobj_lock(&sem->u.xsem.sobj, &syns)) {
		ret = S_objLib_OBJ_ID_ERROR;
		goto out;
	}

	if (--sem->u.xsem.value >= 0)
		goto done;

	if (timeout == NO_WAIT) {
		sem->u.xsem.value++;
		ret = S_objLib_OBJ_UNAVAILABLE;
		goto done;
	}

	if (timeout != WAIT_FOREVER) {
		timespec = &ts;
		clockobj_ticks_to_timeout(&wind_clock, timeout, timespec);
	} else
		timespec = NULL;

	ret = syncobj_wait_grant(&sem->u.xsem.sobj, timespec, &syns);
	if (ret == -EIDRM) {
		ret = S_objLib_OBJ_DELETED;
		goto out;
	}
	if (ret) {
		sem->u.xsem.value++;
		if (ret == -ETIMEDOUT)
			ret = S_objLib_OBJ_TIMEOUT;
		else if (ret == -EINTR)
			ret = OK;	/* Flushed. */
	}
done:
	syncobj_unlock(&sem->u.xsem.sobj, &syns);
out:
	CANCEL_RESTORE(svc);

	return ret;
}
Beispiel #10
0
static void dump_marks_on_error(struct traceobj *trobj)
{
	struct service svc;

	CANCEL_DEFER(svc);

	push_cleanup_lock(&trobj->lock);
	read_lock(&trobj->lock);
	dump_marks(trobj);
	read_unlock(&trobj->lock);
	pop_cleanup_lock(&trobj->lock);

	CANCEL_RESTORE(svc);
}
Beispiel #11
0
/* May be directly called from finalizer. */
void traceobj_unwind(struct traceobj *trobj)
{
	struct service svc;
	int state;

	CANCEL_DEFER(svc);

	write_lock_safe(&trobj->lock, state);

	if (--trobj->nr_threads <= 0)
		threadobj_cond_signal(&trobj->join);

	write_unlock_safe(&trobj->lock, state);

	CANCEL_RESTORE(svc);
}
Beispiel #12
0
TASK_ID taskNameToId(const char *name)
{
	struct clusterobj *cobj;
	struct wind_task *task;
	struct service svc;

	CANCEL_DEFER(svc);
	cobj = cluster_findobj(&wind_task_table, name);
	CANCEL_RESTORE(svc);
	if (cobj == NULL)
		return ERROR;

	task = container_of(cobj, struct wind_task, cobj);

	return (TASK_ID)task->tcb;
}
Beispiel #13
0
void traceobj_join(struct traceobj *trobj)
{
	struct service svc;

	CANCEL_DEFER(svc);

	push_cleanup_lock(&trobj->lock);
	read_lock(&trobj->lock);

	while (trobj->nr_threads < 0 || trobj->nr_threads > 0)
		threadobj_cond_wait(&trobj->join, &trobj->lock);

	read_unlock(&trobj->lock);
	pop_cleanup_lock(&trobj->lock);

	CANCEL_RESTORE(svc);
}
Beispiel #14
0
void traceobj_enter(struct traceobj *trobj)
{
	struct threadobj *current = threadobj_current();
	struct service svc;

	if (current)
		current->tracer = trobj;

	CANCEL_DEFER(svc);

	write_lock_nocancel(&trobj->lock);

	if (++trobj->nr_threads == 0)
		trobj->nr_threads = 1;

	write_unlock(&trobj->lock);

	CANCEL_RESTORE(svc);
}
Beispiel #15
0
STATUS errnoOfTaskSet(TASK_ID task_id, int status)
{
    struct wind_task *task;
    struct service svc;
    STATUS ret = OK;

    CANCEL_DEFER(svc);

    task = get_wind_task_or_self(task_id);
    if (task == NULL) {
        ret = ERROR;
        goto out;
    }

    *task->thobj.errno_pointer = status;
    put_wind_task(task);
out:
    CANCEL_RESTORE(svc);

    return ret;
}
Beispiel #16
0
STATUS errnoOfTaskGet(TASK_ID task_id)
{
    struct wind_task *task;
    struct service svc;
    STATUS status = OK;

    CANCEL_DEFER(svc);

    task = get_wind_task_or_self(task_id);
    if (task == NULL) {
        status = ERROR;
        goto out;
    }

    status = *task->thobj.errno_pointer;
    put_wind_task(task);
out:
    CANCEL_RESTORE(svc);

    return status;
}
Beispiel #17
0
static STATUS xsem_flush(struct wind_sem *sem)
{
	struct syncstate syns;
	struct service svc;
	STATUS ret = OK;

	CANCEL_DEFER(svc);

	if (syncobj_lock(&sem->u.xsem.sobj, &syns)) {
		ret = S_objLib_OBJ_ID_ERROR;
		goto out;
	}

	syncobj_flush(&sem->u.xsem.sobj);

	syncobj_unlock(&sem->u.xsem.sobj, &syns);
out:
	CANCEL_RESTORE(svc);

	return ret;
}
Beispiel #18
0
static STATUS xsem_delete(struct wind_sem *sem)
{
	struct syncstate syns;
	struct service svc;
	int ret = OK;

	if (threadobj_irq_p())
		return S_intLib_NOT_ISR_CALLABLE;

	CANCEL_DEFER(svc);

	if (syncobj_lock(&sem->u.xsem.sobj, &syns)) {
		ret = S_objLib_OBJ_ID_ERROR;
		goto out;
	}

	sem->magic = ~sem_magic; /* Prevent further reference. */
	syncobj_destroy(&sem->u.xsem.sobj, &syns);
out:
	CANCEL_RESTORE(svc);

	return ret;
}
Beispiel #19
0
/**
 * @fn int rt_buffer_clear(RT_BUFFER *bf)
 * @brief Clear an IPC buffer.
 *
 * This routine empties a buffer from any data.
 *
 * @param bf The descriptor address of the buffer to clear.
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -EINVAL is returned if @a bf is not a valid buffer descriptor.
 *
 * Valid calling context: any.
 */
int rt_buffer_clear(RT_BUFFER *bf)
{
	struct alchemy_buffer *bcb;
	struct syncstate syns;
	struct service svc;
	int ret = 0;

	CANCEL_DEFER(svc);

	bcb = get_alchemy_buffer(bf, &syns, &ret);
	if (bcb == NULL)
		goto out;

	bcb->wroff = 0;
	bcb->rdoff = 0;
	bcb->fillsz = 0;
	syncobj_drain(&bcb->sobj);

	put_alchemy_buffer(bcb, &syns);
out:
	CANCEL_RESTORE(svc);

	return ret;
}
Beispiel #20
0
/**
 * @fn int rt_buffer_delete(RT_BUFFER *bf)
 * @brief Delete an IPC buffer.
 *
 * This routine deletes a buffer object previously created by a call
 * to rt_buffer_create().
 *
 * @param bf The descriptor address of the deleted buffer.
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -EINVAL is returned if @a bf is not a valid buffer descriptor.
 *
 * - -EPERM is returned if this service was called from an
 * asynchronous context.
 *
 * Valid calling context:
 *
 * - Regular POSIX threads
 * - Xenomai threads
 */
int rt_buffer_delete(RT_BUFFER *bf)
{
	struct alchemy_buffer *bcb;
	struct syncstate syns;
	struct service svc;
	int ret = 0;

	if (threadobj_irq_p())
		return -EPERM;

	CANCEL_DEFER(svc);

	bcb = get_alchemy_buffer(bf, &syns, &ret);
	if (bcb == NULL)
		goto out;

	syncluster_delobj(&alchemy_buffer_table, &bcb->cobj);
	bcb->magic = ~buffer_magic;
	syncobj_destroy(&bcb->sobj, &syns);
out:
	CANCEL_RESTORE(svc);

	return ret;
}
Beispiel #21
0
/**
 * @fn int rt_buffer_create(RT_BUFFER *bf, const char *name, size_t bufsz, int mode)
 * @brief Create an IPC buffer.
 *
 * This routine creates an IPC object that allows tasks to send and
 * receive data asynchronously via a memory buffer. Data may be of an
 * arbitrary length, albeit this IPC is best suited for small to
 * medium-sized messages, since data always have to be copied to the
 * buffer during transit. Large messages may be more efficiently
 * handled by message queues (RT_QUEUE).
 *
 * @param bf The address of a buffer descriptor which can be later
 * used to identify uniquely the created object, upon success of this
 * call.
 *
 * @param name An ASCII string standing for the symbolic name of the
 * buffer. When non-NULL and non-empty, a copy of this string is used
 * for indexing the created buffer into the object registry.
 *
 * @param bufsz The size of the buffer space available to hold
 * data. The required memory is obtained from the main heap.
 *
 * @param mode The buffer creation mode. The following flags can be
 * OR'ed into this bitmask, each of them affecting the new buffer:
 *
 * - B_FIFO makes tasks pend in FIFO order for reading data from the
 *   buffer.
 *
 * - B_PRIO makes tasks pend in priority order for reading data from
 *   the buffer.
 *
 * This parameter also applies to tasks blocked on the buffer's write
 * side (see rt_buffer_write()).
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -ENOMEM is returned if the system fails to get memory from the
 * main heap in order to create the buffer.
 *
 * - -EEXIST is returned if the @a name is conflicting with an already
 * registered buffer.
 *
 * - -EPERM is returned if this service was called from an
 * asynchronous context.
 *
 * Valid calling context:
 *
 * - Regular POSIX threads
 * - Xenomai threads
 *
 * @note Buffers can be shared by multiple processes which belong to
 * the same Xenomai session.
 */
int rt_buffer_create(RT_BUFFER *bf, const char *name,
		     size_t bufsz, int mode)
{
	struct alchemy_buffer *bcb;
	struct service svc;
	int sobj_flags = 0;
	int ret;

	if (threadobj_irq_p())
		return -EPERM;

	if (bufsz == 0)
		return -EINVAL;

	CANCEL_DEFER(svc);

	bcb = xnmalloc(sizeof(*bcb));
	if (bcb == NULL) {
		ret = __bt(-ENOMEM);
		goto fail;
	}

	bcb->buf = xnmalloc(bufsz);
	if (bcb == NULL) {
		ret = __bt(-ENOMEM);
		goto fail_bufalloc;
	}

	generate_name(bcb->name, name, &buffer_namegen);
	bcb->magic = buffer_magic;
	bcb->mode = mode;
	bcb->bufsz = bufsz;
	bcb->rdoff = 0;
	bcb->wroff = 0;
	bcb->fillsz = 0;
	if (mode & B_PRIO)
		sobj_flags = SYNCOBJ_PRIO;

	syncobj_init(&bcb->sobj, CLOCK_COPPERPLATE, sobj_flags,
		     fnref_put(libalchemy, buffer_finalize));

	if (syncluster_addobj(&alchemy_buffer_table, bcb->name, &bcb->cobj)) {
		ret = -EEXIST;
		goto fail_register;
	}

	bf->handle = mainheap_ref(bcb, uintptr_t);

	CANCEL_RESTORE(svc);

	return 0;

fail_register:
	syncobj_uninit(&bcb->sobj);
	xnfree(bcb->buf);
fail_bufalloc:
	xnfree(bcb);
fail:
	CANCEL_RESTORE(svc);

	return ret;
}
Beispiel #22
0
SEM_ID semMCreate(int options)
{
	pthread_mutexattr_t mattr;
	struct wind_sem *sem;
	struct service svc;

	if (options & ~(SEM_Q_PRIORITY|SEM_DELETE_SAFE|SEM_INVERSION_SAFE)) {
		errno = S_semLib_INVALID_OPTION;
		return (SEM_ID)0;
	}

	if ((options & SEM_Q_PRIORITY) == 0) {
		if (options & SEM_INVERSION_SAFE) {
			errno = S_semLib_INVALID_QUEUE_TYPE; /* C'mon... */
			return (SEM_ID)0;
		}
	}

	CANCEL_DEFER(svc);

	sem = alloc_sem(options, &msem_ops);
	if (sem == NULL) {
		errno = S_memLib_NOT_ENOUGH_MEMORY;
		CANCEL_RESTORE(svc);
		return (SEM_ID)0;
	}

	/*
	 * XXX: POSIX-wise, we have a few issues with emulating
	 * VxWorks semaphores of the mutex kind.
	 *
	 * VxWorks flushes any kind of semaphore upon deletion
	 * (however, explicit semFlush() is not allowed on the mutex
	 * kind though); but POSIX doesn't implement such mechanism on
	 * its mutex object. At the same time, we need priority
	 * inheritance when SEM_INVERSION_SAFE is passed, so we can't
	 * emulate VxWorks mutex semaphores using condvars. Since the
	 * only way to get priority inheritance is to use a POSIX
	 * mutex, we choose not to emulate flushing in semDelete(),
	 * but keep inversion-safe locking possible.
	 *
	 * The same way, we don't support FIFO ordering for mutexes,
	 * since this would require to handle them as recursive binary
	 * semaphores with ownership, for no obvious upside.
	 * Logically speaking, relying on recursion without any
	 * consideration for priority while serializing threads is
	 * just asking for troubles anyway.
	 */
	pthread_mutexattr_init(&mattr);
	pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
	/* pthread_mutexattr_setrobust_np() might not be implemented. */
	pthread_mutexattr_setrobust_np(&mattr, PTHREAD_MUTEX_ROBUST_NP);
	if (options & SEM_INVERSION_SAFE)
		pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
	pthread_mutexattr_setpshared(&mattr, mutex_scope_attribute);

	__RT(pthread_mutex_init(&sem->u.msem.lock, &mattr));
	pthread_mutexattr_destroy(&mattr);

	CANCEL_RESTORE(svc);

	return mainheap_ref(sem, SEM_ID);
}
Beispiel #23
0
void traceobj_verify(struct traceobj *trobj, int tseq[], int nr_seq)
{
	int end_mark, mark, state;
	struct service svc;

	CANCEL_DEFER(svc);

	read_lock_safe(&trobj->lock, state);

	if (nr_seq > trobj->nr_marks)
		goto fail;

	end_mark = trobj->cur_mark;
	if (end_mark == 0) {
		read_unlock_safe(&trobj->lock, state);
		panic("no mark defined");
	}

	if (end_mark != nr_seq)
		goto fail;

	for (mark = 0; mark < end_mark; mark++) {
		if (trobj->marks[mark].mark != tseq[mark])
			goto fail;
	}
out:
	read_unlock_safe(&trobj->lock, state);

	CANCEL_RESTORE(svc);

	return;

fail:
	if (valgrind_detected()) {
		warning("valgrind detected: ignoring sequence mismatch");
		goto out;
	}

	warning("mismatching execution sequence detected");
	compare_marks(trobj, tseq, nr_seq);
	read_unlock_safe(&trobj->lock, state);

	CANCEL_RESTORE(svc);

#ifdef CONFIG_XENO_MERCURY
	/*
	 * The Mercury core does not force any affinity, which may
	 * lead to wrong results with some unit tests checking strict
	 * ordering of operations. Tell the user about this. Normally,
	 * such unit tests on Mercury should be pinned on a single CPU
	 * using --cpu-affinity.
	 */
	if (CPU_COUNT(&__base_setup_data.cpu_affinity) == 0)
		warning("NOTE: --cpu-affinity option was not given - this might explain?");
#endif
#ifndef CONFIG_XENO_ASYNC_CANCEL
	/*
	 * Lack of async cancellation support might also explain why
	 * some tests have failed.
	 */
	warning("NOTE: --disable-async-cancel option was given - this might explain?");
#endif
	exit(5);
}
Beispiel #24
0
int rt_pipe_create(RT_PIPE *pipe,
		   const char *name, int minor, size_t poolsize)
#endif
{
	struct rtipc_port_label plabel;
	struct sockaddr_ipc saddr;
	struct alchemy_pipe *pcb;
	struct service svc;
	size_t streambufsz;
	socklen_t addrlen;
	int ret, sock;

	if (threadobj_irq_p())
		return -EPERM;

	CANCEL_DEFER(svc);

	pcb = xnmalloc(sizeof(*pcb));
	if (pcb == NULL) {
		ret = -ENOMEM;
		goto out;
	}

	sock = __RT(socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP));
	if (sock < 0) {
		warning("RTIPC/XDDP protocol not supported by kernel");
		ret = -errno;
		xnfree(pcb);
		goto out;
	}

	if (name && *name) {
		namecpy(plabel.label, name);
		ret = __RT(setsockopt(sock, SOL_XDDP, XDDP_LABEL,
				      &plabel, sizeof(plabel)));
		if (ret)
			goto fail_sockopt;
	}

	if (poolsize > 0) {
		ret = __RT(setsockopt(sock, SOL_XDDP, XDDP_POOLSZ,
				      &poolsize, sizeof(poolsize)));
		if (ret)
			goto fail_sockopt;
	}

	streambufsz = ALCHEMY_PIPE_STREAMSZ;
	ret = __RT(setsockopt(sock, SOL_XDDP, XDDP_BUFSZ,
			      &streambufsz, sizeof(streambufsz)));
	if (ret)
		goto fail_sockopt;

	memset(&saddr, 0, sizeof(saddr));
	saddr.sipc_family = AF_RTIPC;
	saddr.sipc_port = minor;
	ret = __RT(bind(sock, (struct sockaddr *)&saddr, sizeof(saddr)));
	if (ret)
		goto fail_sockopt;

	if (minor == P_MINOR_AUTO) {
		/* Fetch the assigned minor device. */
		addrlen = sizeof(saddr);
		ret = __RT(getsockname(sock, (struct sockaddr *)&saddr, &addrlen));
		if (ret)
			goto fail_sockopt;
		if (addrlen != sizeof(saddr)) {
			ret = -EINVAL;
			goto fail_register;
		}
		minor = saddr.sipc_port;
	}

	generate_name(pcb->name, name, &pipe_namegen);
	pcb->sock = sock;
	pcb->minor = minor;
	pcb->magic = pipe_magic;

	if (syncluster_addobj(&alchemy_pipe_table, pcb->name, &pcb->cobj)) {
		ret = -EEXIST;
		goto fail_register;
	}

	pipe->handle = mainheap_ref(pcb, uintptr_t);

	CANCEL_RESTORE(svc);

	return minor;
fail_sockopt:
	ret = -errno;
	if (ret == -EADDRINUSE)
		ret = -EBUSY;
fail_register:
	__RT(close(sock));
	xnfree(pcb);
out:
	CANCEL_RESTORE(svc);

	return ret;	
}
Beispiel #25
0
/**
 * @fn ssize_t rt_buffer_write_timed(RT_BUFFER *bf, const void *ptr, size_t len, const struct timespec *abs_timeout)
 * @brief Write to an IPC buffer.
 *
 * This routine writes a message to the specified buffer. If not
 * enough buffer space is available on entry to hold the message, the
 * caller is allowed to block until enough room is freed, or a timeout
 * elapses, whichever comes first.
 *
 * @param bf The descriptor address of the buffer to write to.
 *
 * @param ptr The address of the message data to be written to the
 * buffer.
 *
 * @param len The length in bytes of the message data. Zero is a valid
 * value, in which case the buffer is left untouched, and zero is
 * returned to the caller.
 *
 * @param abs_timeout An absolute date expressed in clock ticks,
 * specifying a time limit to wait for enough buffer space to be
 * available to hold the message (see note). Passing NULL causes the
 * caller to block indefinitely until enough buffer space is
 * available. Passing { .tv_sec = 0, .tv_nsec = 0 } causes the service
 * to return immediately without blocking in case of buffer space
 * shortage.
 *
 * @return The number of bytes written to the buffer is returned upon
 * success. Otherwise:
 *
 * - -ETIMEDOUT is returned if the absolute @a abs_timeout date is
 * reached before enough buffer space is available to hold the
 * message.
 *
 * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
 * .tv_nsec = 0 } and no buffer space is immediately available on
 * entry to hold the message.

 * - -EINTR is returned if rt_task_unblock() was called for the
 * current task before enough buffer space became available to hold
 * the message.
 *
 * - -EINVAL is returned if @a bf is not a valid buffer descriptor, or
 * @a len is greater than the actual buffer length.
 *
 * - -EIDRM is returned if @a bf is deleted while the caller was
 * waiting for buffer space. In such event, @a bf is no more valid
 * upon return of this service.
 *
 * - -EPERM is returned if this service should block, but was not
 * called from a Xenomai thread.
 *
 * Valid calling contexts:
 *
 * - Xenomai threads
 * - Any other context if @a abs_timeout is { .tv_sec = 0, .tv_nsec = 0 } .
 *
 * @note @a abs_timeout is interpreted as a multiple of the Alchemy
 * clock resolution (see --alchemy-clock-resolution option, defaults
 * to 1 nanosecond).
 */
ssize_t rt_buffer_write_timed(RT_BUFFER *bf,
			      const void *ptr, size_t size,
			      const struct timespec *abs_timeout)
{
	struct alchemy_buffer_wait *wait = NULL;
	struct alchemy_buffer *bcb;
	struct threadobj *thobj;
	size_t len, rbytes, n;
	struct syncstate syns;
	struct service svc;
	const void *p;
	size_t wroff;
	int ret = 0;

	len = size;
	if (len == 0)
		return 0;

	if (!threadobj_current_p() && !alchemy_poll_mode(abs_timeout))
		return -EPERM;

	CANCEL_DEFER(svc);

	bcb = get_alchemy_buffer(bf, &syns, &ret);
	if (bcb == NULL)
		goto out;

	/*
	 * We may only send complete messages, so there is no point in
	 * accepting messages which are larger than what the buffer
	 * can hold.
	 */
	if (len > bcb->bufsz) {
		ret = -EINVAL;
		goto done;
	}

	for (;;) {
		/*
		 * We should be able to write the entire message at
		 * once, or block.
		 */
		if (bcb->fillsz + len > bcb->bufsz)
			goto wait;

		/* Write to the buffer in a circular way. */
		wroff = bcb->wroff;
		rbytes = len;
		p = ptr;

		do {
			if (wroff + rbytes > bcb->bufsz)
				n = bcb->bufsz - wroff;
			else
				n = rbytes;

			memcpy(bcb->buf + wroff, p, n);
			p += n;
			wroff = (wroff + n) % bcb->bufsz;
			rbytes -= n;
		} while (rbytes > 0);

		bcb->fillsz += len;
		bcb->wroff = wroff;
		ret = (ssize_t)len;

		/*
		 * Wake up all threads waiting for input, if we
		 * accumulated enough data to feed the leading one.
		 */
		thobj = syncobj_peek_grant(&bcb->sobj);
		if (thobj == NULL)
			goto done;

		wait = threadobj_get_wait(thobj);
		if (wait->size <= bcb->fillsz)
			syncobj_grant_all(&bcb->sobj);

		goto done;
	wait:
		if (alchemy_poll_mode(abs_timeout)) {
			ret = -EWOULDBLOCK;
			goto done;
		}

		if (wait == NULL)
			wait = threadobj_prepare_wait(struct alchemy_buffer_wait);

		wait->size = len;

		/*
		 * Check whether readers are already waiting for
		 * receiving data, while we are about to wait for
		 * sending some. In such a case, we have the converse
		 * pathological use of the buffer. We must kick
		 * readers to allow for a short read to prevent a
		 * deadlock.
		 *
		 * XXX: instead of broadcasting a general wake up
		 * event, we could be smarter and wake up only the
		 * number of waiters required to consume the amount of
		 * data we want to send, but this does not seem worth
		 * the burden: this is an error condition, we just
		 * have to mitigate its effect, avoiding a deadlock.
		 */
		if (bcb->fillsz > 0 && syncobj_count_grant(&bcb->sobj))
			syncobj_grant_all(&bcb->sobj);

		ret = syncobj_wait_drain(&bcb->sobj, abs_timeout, &syns);
		if (ret) {
			if (ret == -EIDRM)
				goto out;
			break;
		}
	}
done:
	put_alchemy_buffer(bcb, &syns);
out:
	if (wait)
		threadobj_finish_wait();

	CANCEL_RESTORE(svc);

	return ret;
}
Beispiel #26
0
/**
 * @fn ssize_t rt_buffer_read_timed(RT_BUFFER *bf, void *ptr, size_t len, const struct timespec *abs_timeout)
 * @brief Read from an IPC buffer.
 *
 * This routine reads the next message from the specified buffer. If
 * no message is available on entry, the caller is allowed to block
 * until enough data is written to the buffer, or a timeout elapses.
 *
 * @param bf The descriptor address of the buffer to read from.
 *
 * @param ptr A pointer to a memory area which will be written upon
 * success with the received data.
 *
 * @param len The length in bytes of the memory area pointed to by @a
 * ptr. Under normal circumstances, rt_buffer_read_timed() only
 * returns entire messages as specified by the @a len argument, or an
 * error value. However, short reads are allowed when a potential
 * deadlock situation is detected (see note below).
 *
 * @param abs_timeout An absolute date expressed in clock ticks,
 * specifying a time limit to wait for a message to be available from
 * the buffer (see note). Passing NULL causes the caller to block
 * indefinitely until enough data is available. Passing { .tv_sec = 0,
 * .tv_nsec = 0 } causes the service to return immediately without
 * blocking in case not enough data is available.
 *
 * @return The number of bytes read from the buffer is returned upon
 * success. Otherwise:
 *
 * - -ETIMEDOUT is returned if @a abs_timeout is reached before a
 * complete message arrives.
 *
 * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
 * .tv_nsec = 0 } and not enough data is immediately available on
 * entry to form a complete message.

 * - -EINTR is returned if rt_task_unblock() was called for the
 * current task before enough data became available to form a complete
 * message.
 *
 * - -EINVAL is returned if @a bf is not a valid buffer descriptor, or
 * @a len is greater than the actual buffer length.
 *
 * - -EIDRM is returned if @a bf is deleted while the caller was
 * waiting for data. In such event, @a bf is no more valid upon return
 * of this service.
 *
 * - -EPERM is returned if this service should block, but was not
 * called from a Xenomai thread.
 *
 * @note A short read (i.e. fewer bytes returned than requested by @a
 * len) may happen whenever a pathological use of the buffer is
 * encountered. This condition only arises when the system detects
 * that one or more writers are waiting for sending data, while a
 * reader would have to wait for receiving a complete message at the
 * same time. For instance, consider the following sequence, involving
 * a 1024-byte buffer (bf) and two threads:
 *
 * writer thread > rt_write_buffer(&bf, ptr, 1, TM_INFINITE);
 *        (one byte to read, 1023 bytes available for sending)
 * writer thread > rt_write_buffer(&bf, ptr, 1024, TM_INFINITE);
 *        (writer blocks - no space for another 1024-byte message)
 * reader thread > rt_read_buffer(&bf, ptr, 1024, TM_INFINITE);
 *        (short read - a truncated (1-byte) message is returned)
 *
 * In order to prevent both threads to wait for each other
 * indefinitely, a short read is allowed, which may be completed by a
 * subsequent call to rt_buffer_read() or rt_buffer_read_until().  If
 * that case arises, thread priorities, buffer and/or message lengths
 * should likely be fixed, in order to eliminate such condition.
 *
 * Valid calling contexts:
 *
 * - Xenomai threads
 * - Any other context if @a abs_timeout is { .tv_sec = 0,
 * .tv_nsec = 0 }.
 *
 * @note @a abs_timeout is interpreted as a multiple of the Alchemy
 * clock resolution (see --alchemy-clock-resolution option, defaults
 * to 1 nanosecond).
 */
ssize_t rt_buffer_read_timed(RT_BUFFER *bf,
			     void *ptr, size_t size,
			     const struct timespec *abs_timeout)
{
	struct alchemy_buffer_wait *wait = NULL;
	struct alchemy_buffer *bcb;
	struct threadobj *thobj;
	size_t len, rbytes, n;
	struct syncstate syns;
	struct service svc;
	size_t rdoff;
	int ret = 0;
	void *p;

	len = size;
	if (len == 0)
		return 0;

	if (!threadobj_current_p() && !alchemy_poll_mode(abs_timeout))
		return -EPERM;

	CANCEL_DEFER(svc);

	bcb = get_alchemy_buffer(bf, &syns, &ret);
	if (bcb == NULL)
		goto out;

	/*
	 * We may only return complete messages to readers, so there
	 * is no point in waiting for messages which are larger than
	 * what the buffer can hold.
	 */
	if (len > bcb->bufsz) {
		ret = -EINVAL;
		goto done;
	}
redo:
	for (;;) {
		/*
		 * We should be able to read a complete message of the
		 * requested length, or block.
		 */
		if (bcb->fillsz < len)
			goto wait;

		/* Read from the buffer in a circular way. */
		rdoff = bcb->rdoff;
		rbytes = len;
		p = ptr;

		do {
			if (rdoff + rbytes > bcb->bufsz)
				n = bcb->bufsz - rdoff;
			else
				n = rbytes;
			memcpy(p, bcb->buf + rdoff, n);
			p += n;
			rdoff = (rdoff + n) % bcb->bufsz;
			rbytes -= n;
		} while (rbytes > 0);

		bcb->fillsz -= len;
		bcb->rdoff = rdoff;
		ret = (ssize_t)len;

		/*
		 * Wake up all threads waiting for the buffer to
		 * drain, if we freed enough room for the leading one
		 * to post its message.
		 */
		thobj = syncobj_peek_drain(&bcb->sobj);
		if (thobj == NULL)
			goto done;

		wait = threadobj_get_wait(thobj);
		if (wait->size + bcb->fillsz <= bcb->bufsz)
			syncobj_drain(&bcb->sobj);

		goto done;
	wait:
		if (alchemy_poll_mode(abs_timeout)) {
			ret = -EWOULDBLOCK;
			goto done;
		}

		/*
		 * Check whether writers are already waiting for
		 * sending data, while we are about to wait for
		 * receiving some. In such a case, we have a
		 * pathological use of the buffer. We must allow for a
		 * short read to prevent a deadlock.
		 */
		if (bcb->fillsz > 0 && syncobj_count_drain(&bcb->sobj)) {
			len = bcb->fillsz;
			goto redo;
		}

		if (wait == NULL)
			wait = threadobj_prepare_wait(struct alchemy_buffer_wait);

		wait->size = len;

		ret = syncobj_wait_grant(&bcb->sobj, abs_timeout, &syns);
		if (ret) {
			if (ret == -EIDRM)
				goto out;
			break;
		}
	}
done:
	put_alchemy_buffer(bcb, &syns);
out:
	if (wait)
		threadobj_finish_wait();

	CANCEL_RESTORE(svc);

	return ret;
}