Beispiel #1
0
static SEM_ID alloc_xsem(int options, int initval, int maxval)
{
	int sobj_flags = 0, ret;
	struct wind_sem *sem;

	if (options & ~SEM_Q_PRIORITY) {
		errno = S_semLib_INVALID_OPTION;
		return (SEM_ID)0;
	}

	sem = alloc_sem(options, &xsem_ops);
	if (sem == NULL) {
		errno = S_memLib_NOT_ENOUGH_MEMORY;
		return (SEM_ID)0;
	}

	if (options & SEM_Q_PRIORITY)
		sobj_flags = SYNCOBJ_PRIO;

	sem->u.xsem.value = initval;
	sem->u.xsem.maxvalue = maxval;
	ret = syncobj_init(&sem->u.xsem.sobj, CLOCK_COPPERPLATE, sobj_flags,
			   fnref_put(libvxworks, sem_finalize));
	if (ret) {
		xnfree(sem);
		errno = S_memLib_NOT_ENOUGH_MEMORY;
		return (SEM_ID)0;
	}

	return mainheap_ref(sem, SEM_ID);
}
Beispiel #2
0
int alchemy_bind_object(const char *name, struct syncluster *sc,
			RTIME timeout,
			int offset,
			uintptr_t *handle)
{
	struct clusterobj *cobj;
	struct service svc;
	struct timespec ts;
	void *p;
	int ret;

	CANCEL_DEFER(svc);
	ret = syncluster_findobj(sc, name,
				 alchemy_rel_timeout(timeout, &ts),
				 &cobj);
	CANCEL_RESTORE(svc);
	if (ret)
		return ret;

	p = cobj;
	p -= offset;
	*handle = mainheap_ref(p, uintptr_t);

	return 0;
}
Beispiel #3
0
static u_long start_evtimer(u_long events,
			    struct itimerspec *it, u_long *tmid_r)
{
	void (*handler)(struct timerobj *tmobj);
	struct psos_task *current;
	struct psos_tm *tm;
	int ret;

	tm = pvmalloc(sizeof(*tm));
	if (tm == NULL)
		return ERR_NOTIMERS;

	pvholder_init(&tm->link);
	tm->events = events;
	tm->magic = tm_magic;

	current = get_psos_task_or_self(0, &ret);
	if (current == NULL) {
		pvfree(tm);
		return ret;
	}

	tm->tid = mainheap_ref(current, u_long);
	pvlist_append(&tm->link, &current->timer_list);
	put_psos_task(current);

	*tmid_r = (u_long)tm;

	ret = timerobj_init(&tm->tmobj);
	if (ret) {
	fail:
		pvlist_remove(&tm->link);
		pvfree(tm);
		return ERR_NOTIMERS;
	}

	handler = post_event_periodic;
	if (it->it_interval.tv_sec == 0 &&
	    it->it_interval.tv_nsec == 0)
		handler = post_event_once;

	ret = timerobj_start(&tm->tmobj, handler, it);
	if (ret)
		goto fail;

	return SUCCESS;
}
Beispiel #4
0
int rt_cond_create(RT_COND *cond, const char *name)
{
	pthread_mutexattr_t mattr;
	struct alchemy_cond *ccb;
	pthread_condattr_t cattr;
	struct service svc;

	if (threadobj_async_p())
		return -EPERM;

	COPPERPLATE_PROTECT(svc);

	ccb = xnmalloc(sizeof(*ccb));
	if (ccb == NULL) {
		COPPERPLATE_UNPROTECT(svc);
		return -ENOMEM;
	}

	strncpy(ccb->name, name, sizeof(ccb->name));
	ccb->name[sizeof(ccb->name) - 1] = '\0';
	ccb->nwaiters = 0;

	if (cluster_addobj(&alchemy_cond_table, ccb->name, &ccb->cobj)) {
		xnfree(ccb);
		COPPERPLATE_UNPROTECT(svc);
		return -EEXIST;
	}

	__RT(pthread_mutexattr_init(&mattr));
	__RT(pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT));
	__RT(pthread_mutexattr_setpshared(&mattr, mutex_scope_attribute));
	__RT(pthread_mutex_init(&ccb->safe, &mattr));
	__RT(pthread_mutexattr_destroy(&mattr));

	__RT(pthread_condattr_init(&cattr));
	__RT(pthread_condattr_setpshared(&cattr, mutex_scope_attribute));
	__RT(pthread_condattr_setclock(&cattr, CLOCK_COPPERPLATE));
	__RT(pthread_cond_init(&ccb->cond, &cattr));
	__RT(pthread_condattr_destroy(&cattr));
	ccb->magic = cond_magic;
	cond->handle = mainheap_ref(ccb, uintptr_t);

	COPPERPLATE_UNPROTECT(svc);

	return 0;
}
Beispiel #5
0
int rt_pipe_create(RT_PIPE *pipe,
		   const char *name, int minor, size_t poolsize)
#endif
{
	struct rtipc_port_label plabel;
	struct sockaddr_ipc saddr;
	struct alchemy_pipe *pcb;
	struct service svc;
	size_t streambufsz;
	socklen_t addrlen;
	int ret, sock;

	if (threadobj_irq_p())
		return -EPERM;

	CANCEL_DEFER(svc);

	pcb = xnmalloc(sizeof(*pcb));
	if (pcb == NULL) {
		ret = -ENOMEM;
		goto out;
	}

	sock = __RT(socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP));
	if (sock < 0) {
		warning("RTIPC/XDDP protocol not supported by kernel");
		ret = -errno;
		xnfree(pcb);
		goto out;
	}

	if (name && *name) {
		namecpy(plabel.label, name);
		ret = __RT(setsockopt(sock, SOL_XDDP, XDDP_LABEL,
				      &plabel, sizeof(plabel)));
		if (ret)
			goto fail_sockopt;
	}

	if (poolsize > 0) {
		ret = __RT(setsockopt(sock, SOL_XDDP, XDDP_POOLSZ,
				      &poolsize, sizeof(poolsize)));
		if (ret)
			goto fail_sockopt;
	}

	streambufsz = ALCHEMY_PIPE_STREAMSZ;
	ret = __RT(setsockopt(sock, SOL_XDDP, XDDP_BUFSZ,
			      &streambufsz, sizeof(streambufsz)));
	if (ret)
		goto fail_sockopt;

	memset(&saddr, 0, sizeof(saddr));
	saddr.sipc_family = AF_RTIPC;
	saddr.sipc_port = minor;
	ret = __RT(bind(sock, (struct sockaddr *)&saddr, sizeof(saddr)));
	if (ret)
		goto fail_sockopt;

	if (minor == P_MINOR_AUTO) {
		/* Fetch the assigned minor device. */
		addrlen = sizeof(saddr);
		ret = __RT(getsockname(sock, (struct sockaddr *)&saddr, &addrlen));
		if (ret)
			goto fail_sockopt;
		if (addrlen != sizeof(saddr)) {
			ret = -EINVAL;
			goto fail_register;
		}
		minor = saddr.sipc_port;
	}

	generate_name(pcb->name, name, &pipe_namegen);
	pcb->sock = sock;
	pcb->minor = minor;
	pcb->magic = pipe_magic;

	if (syncluster_addobj(&alchemy_pipe_table, pcb->name, &pcb->cobj)) {
		ret = -EEXIST;
		goto fail_register;
	}

	pipe->handle = mainheap_ref(pcb, uintptr_t);

	CANCEL_RESTORE(svc);

	return minor;
fail_sockopt:
	ret = -errno;
	if (ret == -EADDRINUSE)
		ret = -EBUSY;
fail_register:
	__RT(close(sock));
	xnfree(pcb);
out:
	CANCEL_RESTORE(svc);

	return ret;	
}
Beispiel #6
0
/**
 * @fn int rt_buffer_create(RT_BUFFER *bf, const char *name, size_t bufsz, int mode)
 * @brief Create an IPC buffer.
 *
 * This routine creates an IPC object that allows tasks to send and
 * receive data asynchronously via a memory buffer. Data may be of an
 * arbitrary length, albeit this IPC is best suited for small to
 * medium-sized messages, since data always have to be copied to the
 * buffer during transit. Large messages may be more efficiently
 * handled by message queues (RT_QUEUE).
 *
 * @param bf The address of a buffer descriptor which can be later
 * used to identify uniquely the created object, upon success of this
 * call.
 *
 * @param name An ASCII string standing for the symbolic name of the
 * buffer. When non-NULL and non-empty, a copy of this string is used
 * for indexing the created buffer into the object registry.
 *
 * @param bufsz The size of the buffer space available to hold
 * data. The required memory is obtained from the main heap.
 *
 * @param mode The buffer creation mode. The following flags can be
 * OR'ed into this bitmask, each of them affecting the new buffer:
 *
 * - B_FIFO makes tasks pend in FIFO order for reading data from the
 *   buffer.
 *
 * - B_PRIO makes tasks pend in priority order for reading data from
 *   the buffer.
 *
 * This parameter also applies to tasks blocked on the buffer's write
 * side (see rt_buffer_write()).
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -ENOMEM is returned if the system fails to get memory from the
 * main heap in order to create the buffer.
 *
 * - -EEXIST is returned if the @a name is conflicting with an already
 * registered buffer.
 *
 * - -EPERM is returned if this service was called from an
 * asynchronous context.
 *
 * Valid calling context:
 *
 * - Regular POSIX threads
 * - Xenomai threads
 *
 * @note Buffers can be shared by multiple processes which belong to
 * the same Xenomai session.
 */
int rt_buffer_create(RT_BUFFER *bf, const char *name,
		     size_t bufsz, int mode)
{
	struct alchemy_buffer *bcb;
	struct service svc;
	int sobj_flags = 0;
	int ret;

	if (threadobj_irq_p())
		return -EPERM;

	if (bufsz == 0)
		return -EINVAL;

	CANCEL_DEFER(svc);

	bcb = xnmalloc(sizeof(*bcb));
	if (bcb == NULL) {
		ret = __bt(-ENOMEM);
		goto fail;
	}

	bcb->buf = xnmalloc(bufsz);
	if (bcb == NULL) {
		ret = __bt(-ENOMEM);
		goto fail_bufalloc;
	}

	generate_name(bcb->name, name, &buffer_namegen);
	bcb->magic = buffer_magic;
	bcb->mode = mode;
	bcb->bufsz = bufsz;
	bcb->rdoff = 0;
	bcb->wroff = 0;
	bcb->fillsz = 0;
	if (mode & B_PRIO)
		sobj_flags = SYNCOBJ_PRIO;

	syncobj_init(&bcb->sobj, CLOCK_COPPERPLATE, sobj_flags,
		     fnref_put(libalchemy, buffer_finalize));

	if (syncluster_addobj(&alchemy_buffer_table, bcb->name, &bcb->cobj)) {
		ret = -EEXIST;
		goto fail_register;
	}

	bf->handle = mainheap_ref(bcb, uintptr_t);

	CANCEL_RESTORE(svc);

	return 0;

fail_register:
	syncobj_uninit(&bcb->sobj);
	xnfree(bcb->buf);
fail_bufalloc:
	xnfree(bcb);
fail:
	CANCEL_RESTORE(svc);

	return ret;
}
Beispiel #7
0
SEM_ID semMCreate(int options)
{
	pthread_mutexattr_t mattr;
	struct wind_sem *sem;
	struct service svc;

	if (options & ~(SEM_Q_PRIORITY|SEM_DELETE_SAFE|SEM_INVERSION_SAFE)) {
		errno = S_semLib_INVALID_OPTION;
		return (SEM_ID)0;
	}

	if ((options & SEM_Q_PRIORITY) == 0) {
		if (options & SEM_INVERSION_SAFE) {
			errno = S_semLib_INVALID_QUEUE_TYPE; /* C'mon... */
			return (SEM_ID)0;
		}
	}

	CANCEL_DEFER(svc);

	sem = alloc_sem(options, &msem_ops);
	if (sem == NULL) {
		errno = S_memLib_NOT_ENOUGH_MEMORY;
		CANCEL_RESTORE(svc);
		return (SEM_ID)0;
	}

	/*
	 * XXX: POSIX-wise, we have a few issues with emulating
	 * VxWorks semaphores of the mutex kind.
	 *
	 * VxWorks flushes any kind of semaphore upon deletion
	 * (however, explicit semFlush() is not allowed on the mutex
	 * kind though); but POSIX doesn't implement such mechanism on
	 * its mutex object. At the same time, we need priority
	 * inheritance when SEM_INVERSION_SAFE is passed, so we can't
	 * emulate VxWorks mutex semaphores using condvars. Since the
	 * only way to get priority inheritance is to use a POSIX
	 * mutex, we choose not to emulate flushing in semDelete(),
	 * but keep inversion-safe locking possible.
	 *
	 * The same way, we don't support FIFO ordering for mutexes,
	 * since this would require to handle them as recursive binary
	 * semaphores with ownership, for no obvious upside.
	 * Logically speaking, relying on recursion without any
	 * consideration for priority while serializing threads is
	 * just asking for troubles anyway.
	 */
	pthread_mutexattr_init(&mattr);
	pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
	/* pthread_mutexattr_setrobust_np() might not be implemented. */
	pthread_mutexattr_setrobust_np(&mattr, PTHREAD_MUTEX_ROBUST_NP);
	if (options & SEM_INVERSION_SAFE)
		pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
	pthread_mutexattr_setpshared(&mattr, mutex_scope_attribute);

	__RT(pthread_mutex_init(&sem->u.msem.lock, &mattr));
	pthread_mutexattr_destroy(&mattr);

	CANCEL_RESTORE(svc);

	return mainheap_ref(sem, SEM_ID);
}