Example #1
0
static void *__map_umm(const char *name, uint32_t *size_r)
{
	struct cobalt_memdev_stat statbuf;
	int fd, ret;
	void *addr;

	fd = __RT(open(name, O_RDWR));
	if (fd < 0) {
		early_warning("cannot open RTDM device %s: %s", name,
			      strerror(errno));
		return MAP_FAILED;
	}

	ret = __RT(ioctl(fd, MEMDEV_RTIOC_STAT, &statbuf));
	if (ret) {
		early_warning("failed getting status of %s: %s",
			      name, strerror(errno));
		return MAP_FAILED;
	}

	addr = __RT(mmap(NULL, statbuf.size, PROT_READ|PROT_WRITE,
			 MAP_SHARED, fd, 0));
	__RT(close(fd));

	*size_r = statbuf.size;

	return addr;
}
Example #2
0
int clockobj_init(struct clockobj *clkobj,
		  unsigned int resolution_ns)
{
	pthread_mutexattr_t mattr;
	struct timespec now;
	int ret;

	if (resolution_ns == 0)
		return __bt(-EINVAL);

	memset(clkobj, 0, sizeof(*clkobj));
	ret = __clockobj_set_resolution(clkobj, resolution_ns);
	if (ret)
		return __bt(ret);

	/*
	 * FIXME: this lock is only used to protect the wallclock
	 * offset readings from updates. We should replace this by a
	 * confirmed reading loop.
	 */
	pthread_mutexattr_init(&mattr);
	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
	ret = __bt(-__RT(pthread_mutex_init(&clkobj->lock, &mattr)));
	pthread_mutexattr_destroy(&mattr);
	if (ret)
		return ret;

	__RT(clock_gettime(CLOCK_COPPERPLATE, &now));
	timespec_sub(&clkobj->offset, &clkobj->epoch, &now);

	return 0;
}
Example #3
0
void __boilerplate_init(void)
{
	__RT(clock_gettime(CLOCK_MONOTONIC, &init_date));
	__RT(pthread_mutex_init(&__printlock, NULL));
	debug_init();
	init_done = 1;
}
Example #4
0
int rt_cond_delete(RT_COND *cond)
{
	struct alchemy_cond *ccb;
	struct service svc;
	int ret = 0;

	if (threadobj_async_p())
		return -EPERM;

	COPPERPLATE_PROTECT(svc);

	ccb = get_alchemy_cond(cond, &ret);
	if (ccb == NULL)
		goto out;

	ret = -__RT(pthread_cond_destroy(&ccb->cond));
	if (ret) {
		if (ret == -EBUSY)
			put_alchemy_cond(ccb);
		goto out;
	}

	ccb->magic = ~cond_magic;
	put_alchemy_cond(ccb);
	cluster_delobj(&alchemy_cond_table, &ccb->cobj);
	__RT(pthread_mutex_destroy(&ccb->safe));
	xnfree(ccb);
out:
	COPPERPLATE_UNPROTECT(svc);

	return ret;
}
Example #5
0
static void trank_alarm_handler(void *arg)
{
    struct trank_alarm_wait *aw = arg;

    __RT(pthread_mutex_lock(&aw->lock));
    aw->alarm_pulses++;
    __RT(pthread_cond_broadcast(&aw->event));
    __RT(pthread_mutex_unlock(&aw->lock));
}
Example #6
0
int __heapobj_extend(struct heapobj *hobj, size_t size, void *mem)
{
	__RT(pthread_mutex_lock(&hobj->lock));
	hobj->size = add_new_area(hobj->pool, size, mem);
	__RT(pthread_mutex_unlock(&hobj->lock));
	if (hobj->size == (size_t)-1)
		return __bt(-EINVAL);

	return 0;
}
Example #7
0
void *__heapobj_alloc(struct heapobj *hobj, size_t size)
{
	void *p;

	__RT(pthread_mutex_lock(&hobj->lock));
	p = malloc_ex(size, hobj->pool);
	__RT(pthread_mutex_unlock(&hobj->lock));

	return p;
}
Example #8
0
size_t __heapobj_inquire(struct heapobj *hobj, void *ptr)
{
	size_t size;

	__RT(pthread_mutex_lock(&hobj->lock));
	size = malloc_usable_size_ex(ptr, hobj->pool);
	__RT(pthread_mutex_unlock(&hobj->lock));

	return size;
}
Example #9
0
static STATUS msem_take(struct wind_sem *sem, int timeout)
{
	struct wind_task *current;
	struct timespec ts;
	int ret;

	if (threadobj_irq_p())
		return S_intLib_NOT_ISR_CALLABLE;

	/*
	 * We allow threads from other APIs to grab a VxWorks mutex
	 * ignoring the safe option in such a case.
	 */
	current = wind_task_current();
	if (current && (sem->options & SEM_DELETE_SAFE))
		__RT(pthread_mutex_lock(&current->safelock));

	if (timeout == NO_WAIT) {
		ret = __RT(pthread_mutex_trylock(&sem->u.msem.lock));
		goto check;
	}

	if  (timeout == WAIT_FOREVER) {
		ret = __RT(pthread_mutex_lock(&sem->u.msem.lock));
		goto check;
	}

	__clockobj_ticks_to_timeout(&wind_clock, CLOCK_REALTIME, timeout, &ts);
	ret = __RT(pthread_mutex_timedlock(&sem->u.msem.lock, &ts));
check:
	switch (ret) {
	case 0:
		return OK;
	case EINVAL:
		ret = S_objLib_OBJ_ID_ERROR;
		break;
	case EBUSY:
		ret = S_objLib_OBJ_UNAVAILABLE;
		break;
	case ETIMEDOUT:
		ret = S_objLib_OBJ_TIMEOUT;
		break;
	case EOWNERDEAD:
	case ENOTRECOVERABLE:
		warning("owner of mutex-type semaphore %p died", sem);
		ret = S_objLib_OBJ_UNAVAILABLE;
		break;
	}

	if (current != NULL && (sem->options & SEM_DELETE_SAFE))
		__RT(pthread_mutex_unlock(&current->safelock));

	return ret;
}
Example #10
0
/* Conversion from CLOCK_COPPERPLATE to clk_id. */
void clockobj_convert_clocks(struct clockobj *clkobj,
			     const struct timespec *in,
			     clockid_t clk_id,
			     struct timespec *out)
{
	struct timespec now, delta;

	__RT(clock_gettime(CLOCK_COPPERPLATE, &now));
	/* Offset from CLOCK_COPPERPLATE epoch. */
	timespec_sub(&delta, in, &now);
	/* Current time for clk_id. */
	__RT(clock_gettime(clk_id, &now));
	/* Absolute timeout again, clk_id-based this time. */
	timespec_add(out, &delta, &now);
}
Example #11
0
static ssize_t do_write_pipe(RT_PIPE *pipe,
			     const void *buf, size_t size, int flags)
{
	struct alchemy_pipe *pcb;
	struct service svc;
	ssize_t ret;
	int err = 0;

	CANCEL_DEFER(svc);

	pcb = find_alchemy_pipe(pipe, &err);
	if (pcb == NULL) {
		ret = err;
		goto out;
	}

	ret = __RT(sendto(pcb->sock, buf, size, flags, NULL, 0));
	if (ret < 0) {
		ret = -errno;
		if (ret == -EBADF)
			ret = -EIDRM;
	}
out:
	CANCEL_RESTORE(svc);

	return ret;
}
Example #12
0
void __printout(const char *name, const char *header,
		const char *fmt, va_list ap)
{
	struct timespec now, delta;
	unsigned long long ns;
	unsigned int ms, us;

	/*
	 * Catch early printouts, when the init sequence is not
	 * completed yet. In such event, we don't care for serializing
	 * output, since we must be running over the main thread
	 * uncontended.
	 */
	if (!init_done) {
		__do_printout(name, header, 0, 0, fmt, ap);
		return;
	}
	
	__RT(clock_gettime(CLOCK_MONOTONIC, &now));
	timespec_sub(&delta, &now, &init_date);
	ns = delta.tv_sec * 1000000000ULL;
	ns += delta.tv_nsec;
	ms = ns / 1000000ULL;
	us = (ns % 1000000ULL) / 1000ULL;
	SIGSAFE_LOCK_ENTRY(&__printlock);
	__do_printout(name, header, ms, us, fmt, ap);
	SIGSAFE_LOCK_EXIT(&__printlock);
}
Example #13
0
static struct alchemy_cond *
__get_alchemy_cond(struct alchemy_cond *ccb, int *err_r)
{
	int ret;

	if (ccb->magic == ~cond_magic)
		goto dead_handle;

	if (ccb->magic != cond_magic)
		goto bad_handle;

	ret = __RT(pthread_mutex_lock(&ccb->safe));
	if (ret)
		goto bad_handle;

	/* Recheck under lock. */
	if (ccb->magic == cond_magic)
		return ccb;

dead_handle:
	/* Removed under our feet. */
	*err_r = -EIDRM;
	return NULL;

bad_handle:
	*err_r = -EINVAL;
	return NULL;
}
Example #14
0
/**
 * @fn int rt_pipe_delete(RT_PIPE *pipe)
 * @brief Delete a message pipe.
 *
 * This routine deletes a pipe object previously created by a call to
 * rt_pipe_create(). All resources attached to that pipe are
 * automatically released, all pending data is flushed.
 *
 * @param pipe The pipe descriptor.
 *
 * @return Zero is returned upon success. Otherwise:
 *
 * - -EINVAL is returned if @a pipe is not a valid pipe descriptor.
 *
 * - -EIDRM is returned if @a pipe is a closed pipe descriptor.
 *
 * - -EPERM is returned if this service was called from an
 * asynchronous context.
 *
 * @apitags{thread-unrestricted, switch-secondary}
 */
int rt_pipe_delete(RT_PIPE *pipe)
{
	struct alchemy_pipe *pcb;
	struct service svc;
	int ret = 0;

	if (threadobj_irq_p())
		return -EPERM;

	CANCEL_DEFER(svc);

	pcb = find_alchemy_pipe(pipe, &ret);
	if (pcb == NULL)
		goto out;

	ret = __RT(close(pcb->sock));
	if (ret) {
		ret = -errno;
		if (ret == -EBADF)
			ret = -EIDRM;
		goto out;
	}

	syncluster_delobj(&alchemy_pipe_table, &pcb->cobj);
	pcb->magic = ~pipe_magic;
out:
	CANCEL_RESTORE(svc);

	return ret;
}
Example #15
0
int rt_cond_wait_until(RT_COND *cond, RT_MUTEX *mutex,
		       RTIME timeout)
{
	struct alchemy_mutex *mcb;
	struct alchemy_cond *ccb;
	struct service svc;
	struct timespec ts;
	int ret = 0;

	if (timeout == TM_NONBLOCK)
		return -EWOULDBLOCK;

	COPPERPLATE_PROTECT(svc);

	ccb = get_alchemy_cond(cond, &ret);
	if (ccb == NULL)
		goto out;

	mcb = find_alchemy_mutex(mutex, &ret);
	if (mcb == NULL)
		goto unlock;

	ccb->nwaiters++;
	put_alchemy_cond(ccb);

	if (timeout != TM_INFINITE) {
		clockobj_ticks_to_timeout(&alchemy_clock, timeout, &ts);
		ret = -__RT(pthread_cond_timedwait(&ccb->cond, &mcb->lock, &ts));
	} else
		ret = -__RT(pthread_cond_wait(&ccb->cond, &mcb->lock));

	/*
	 * Be cautious, grab the internal safe lock again to update
	 * the control block.
	 */
	ccb = __get_alchemy_cond(ccb, &ret);
	if (ccb == NULL)
		goto out;

	ccb->nwaiters--;
unlock:
	put_alchemy_cond(ccb);
out:
	COPPERPLATE_UNPROTECT(svc);

	return ret;
}
Example #16
0
int rt_cond_create(RT_COND *cond, const char *name)
{
	pthread_mutexattr_t mattr;
	struct alchemy_cond *ccb;
	pthread_condattr_t cattr;
	struct service svc;

	if (threadobj_async_p())
		return -EPERM;

	COPPERPLATE_PROTECT(svc);

	ccb = xnmalloc(sizeof(*ccb));
	if (ccb == NULL) {
		COPPERPLATE_UNPROTECT(svc);
		return -ENOMEM;
	}

	strncpy(ccb->name, name, sizeof(ccb->name));
	ccb->name[sizeof(ccb->name) - 1] = '\0';
	ccb->nwaiters = 0;

	if (cluster_addobj(&alchemy_cond_table, ccb->name, &ccb->cobj)) {
		xnfree(ccb);
		COPPERPLATE_UNPROTECT(svc);
		return -EEXIST;
	}

	__RT(pthread_mutexattr_init(&mattr));
	__RT(pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT));
	__RT(pthread_mutexattr_setpshared(&mattr, mutex_scope_attribute));
	__RT(pthread_mutex_init(&ccb->safe, &mattr));
	__RT(pthread_mutexattr_destroy(&mattr));

	__RT(pthread_condattr_init(&cattr));
	__RT(pthread_condattr_setpshared(&cattr, mutex_scope_attribute));
	__RT(pthread_condattr_setclock(&cattr, CLOCK_COPPERPLATE));
	__RT(pthread_cond_init(&ccb->cond, &cattr));
	__RT(pthread_condattr_destroy(&cattr));
	ccb->magic = cond_magic;
	cond->handle = mainheap_ref(ccb, uintptr_t);

	COPPERPLATE_UNPROTECT(svc);

	return 0;
}
Example #17
0
void backtrace_init_context(struct backtrace_data *btd,
			    const char *name)
{
	__RT(pthread_mutex_init(&btd->lock, NULL));
	btd->inner = NULL;
	btd->name = name ?: "<anonymous>";
	pthread_setspecific(btkey, btd);
}
Example #18
0
void __clockobj_ticks_to_timeout(struct clockobj *clkobj,
				 clockid_t clk_id,
				 ticks_t ticks, struct timespec *ts)
{
	struct timespec now, delta;

	__RT(clock_gettime(clk_id, &now));
	__clockobj_ticks_to_timespec(clkobj, ticks, &delta);
	timespec_add(ts, &now, &delta);
}
Example #19
0
int rt_alarm_create(RT_ALARM *alarm, const char *name)
{
    struct trank_alarm_wait *aw;
    pthread_mutexattr_t mattr;
    pthread_condattr_t cattr;
    int ret;

    aw = xnmalloc(sizeof(*aw));
    if (aw == NULL)
        return -ENOMEM;

    aw->alarm_pulses = 0;

    pthread_mutexattr_init(&mattr);
    pthread_mutexattr_settype(&mattr, mutex_type_attribute);
    pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
    pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
    ret = __bt(-__RT(pthread_mutex_init(&aw->lock, &mattr)));
    pthread_mutexattr_destroy(&mattr);
    if (ret)
        goto fail_lock;

    pthread_condattr_init(&cattr);
    pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_PRIVATE);
    ret = __bt(-pthread_cond_init(&aw->event, &cattr));
    pthread_condattr_destroy(&cattr);
    if (ret)
        goto fail_cond;

    ret = __CURRENT(rt_alarm_create(alarm, name, trank_alarm_handler, aw));
    if (ret)
        goto fail_alarm;

    return 0;
fail_alarm:
    __RT(pthread_cond_destroy(&aw->event));
fail_cond:
    __RT(pthread_mutex_destroy(&aw->lock));
fail_lock:
    xnfree(aw);

    return ret;
}
Example #20
0
int traceobj_init(struct traceobj *trobj, const char *label, int nr_marks)
{
	pthread_mutexattr_t mattr;
	pthread_condattr_t cattr;
	int ret;

	pthread_mutexattr_init(&mattr);
	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
	ret = __bt(-__RT(pthread_mutex_init(&trobj->lock, &mattr)));
	pthread_mutexattr_destroy(&mattr);
	if (ret)
		return ret;

	pthread_condattr_init(&cattr);
	pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_PRIVATE);
	ret = __bt(-__RT(pthread_cond_init(&trobj->join, &cattr)));
	pthread_condattr_destroy(&cattr);
	if (ret) {
		__RT(pthread_mutex_destroy(&trobj->lock));
		return ret;
	}

	/*
	 * We make sure not to unblock from threadobj_join() until at
	 * least one thread has called trace_enter() for this trace
	 * object.
	 */
	trobj->nr_threads = -1;

	trobj->label = label;
	trobj->nr_marks = nr_marks;
	trobj->cur_mark = 0;

	if (nr_marks > 0) {
		trobj->marks = pvmalloc(sizeof(struct tracemark) * nr_marks);
		if (trobj->marks == NULL)
			panic("cannot allocate mark table for tracing");
	}

	return 0;
}
Example #21
0
int rt_alarm_wait(RT_ALARM *alarm)
{
    struct threadobj *current = threadobj_current();
    struct sched_param_ex param_ex;
    struct trank_alarm_wait *aw;
    struct alchemy_alarm *acb;
    int ret, prio, pulses;

    acb = find_alarm(alarm);
    if (acb == NULL)
        return -EINVAL;

    threadobj_lock(current);
    prio = threadobj_get_priority(current);
    if (prio != threadobj_irq_prio) {
        param_ex.sched_priority = threadobj_irq_prio;
        /* Working on self, so -EIDRM can't happen. */
        threadobj_set_schedparam(current, SCHED_FIFO, &param_ex);
    }
    threadobj_unlock(current);

    aw = acb->arg;

    /*
     * Emulate the original behavior: wait for the next pulse (no
     * event buffering, broadcast to all waiters), while
     * preventing spurious wakeups.
     */
    __RT(pthread_mutex_lock(&aw->lock));

    pulses = aw->alarm_pulses;

    for (;;) {
        ret = -__RT(pthread_cond_wait(&aw->event, &aw->lock));
        if (ret || aw->alarm_pulses != pulses)
            break;
    }

    __RT(pthread_mutex_unlock(&aw->lock));

    return __bt(ret);
}
Example #22
0
void smokey_note(const char *fmt, ...)
{
	va_list ap;

	va_start(ap, fmt);

	if (!smokey_quiet_mode)
		__RT(vfprintf(stdout, fmt, ap));

	va_end(ap);
}
Example #23
0
static void task_finalizer(struct threadobj *thobj)
{
	struct wind_task *task = container_of(thobj, struct wind_task, thobj);

	task->tcb->status |= WIND_DEAD;
	cluster_delobj(&wind_task_table, &task->cobj);
	registry_destroy_file(&task->fsobj);
	__RT(pthread_mutex_destroy(&task->safelock));
	threadobj_destroy(&task->thobj);

	xnfree(task);
}
Example #24
0
int rt_alarm_delete(RT_ALARM *alarm)
{
    struct trank_alarm_wait *aw;
    struct alchemy_alarm *acb;
    int ret;

    acb = find_alarm(alarm);
    if (acb == NULL)
        return -EINVAL;

    aw = acb->arg;
    ret = __CURRENT(rt_alarm_delete(alarm));
    if (ret)
        return ret;

    __RT(pthread_cond_destroy(&aw->event));
    __RT(pthread_mutex_destroy(&aw->lock));
    xnfree(aw);

    return 0;
}
Example #25
0
static STATUS msem_give(struct wind_sem *sem)
{
	struct wind_task *current;
	int ret;

	if (threadobj_irq_p())
		return S_intLib_NOT_ISR_CALLABLE;

	ret = __RT(pthread_mutex_unlock(&sem->u.msem.lock));
	if (ret == EINVAL)
		return S_objLib_OBJ_ID_ERROR;
	if (ret == EPERM)
		return S_semLib_INVALID_OPERATION;

	if (sem->options & SEM_DELETE_SAFE) {
		current = wind_task_current();
		if (current)
			__RT(pthread_mutex_unlock(&current->safelock));
	}

	return OK;
}
Example #26
0
static void compare_marks(struct traceobj *trobj, int tseq[], int nr_seq) /* lock held */
{
	int mark;

	for (mark = 0; mark < trobj->cur_mark || mark < nr_seq; mark++) {
		if (mark >= trobj->cur_mark) {
			fprintf(stderr, " <missing mark> |  [%d] expected\n",
				tseq[mark]);
		} else if (mark < nr_seq)
			__RT(fprintf(stderr, "at %s:%d  |  [%d] should be [%d]\n",
				     trobj->marks[mark].file,
				     trobj->marks[mark].line,
				     trobj->marks[mark].mark,
				     tseq[mark]));
		else
			__RT(fprintf(stderr, "at %s:%d  |  unexpected [%d]\n",
				     trobj->marks[mark].file,
				     trobj->marks[mark].line,
				     trobj->marks[mark].mark));
	}

	fflush(stderr);
}
Example #27
0
/**
 * @fn ssize_t rt_pipe_read_timed(RT_PIPE *pipe, void *buf, size_t size, const struct timespec *abs_timeout)
 * @brief Read a message from a pipe.
 *
 * This service reads the next available message from a given pipe.
 *
 * @param pipe The pipe descriptor.
 *
 * @param buf A pointer to a memory area which will be written upon
 * success with the message received.
 *
 * @param size The count of bytes from the received message to read up
 * into @a buf. If @a size is lower than the actual message size,
 * -ENOBUFS is returned since the incompletely received message would
 * be lost. If @a size is zero, this call returns immediately with no
 * other action.
 *
 * @param abs_timeout An absolute date expressed in clock ticks,
 * specifying a time limit to wait for a message to be available from
 * the pipe (see note). Passing NULL causes the caller to block
 * indefinitely until a message is available. Passing { .tv_sec = 0,
 * .tv_nsec = 0 } causes the service to return immediately without
 * blocking in case no message is available.
 *
 * @return The number of bytes available from the received message is
 * returned upon success. Otherwise:
 *
 * - -ETIMEDOUT is returned if @a abs_timeout is reached before a
 * message arrives.
 *
 * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
 * .tv_nsec = 0 } and no message is immediately available on entry to
 * the call.
 *
 * - -EINTR is returned if rt_task_unblock() was called for the
 * current task before a message was available.
 *
 * - -EINVAL is returned if @a pipe is not a valid pipe descriptor.
 *
 * - -EIDRM is returned if @a pipe is deleted while the caller was
 * waiting for a message. In such event, @a pipe is no more valid upon
 * return of this service.
 *
 * - -EPERM is returned if this service should block, but was not
 * called from a Xenomai thread.
 *
 * @apitags{xthread-nowait, switch-primary}
 *
 * @note @a abs_timeout is interpreted as a multiple of the Alchemy
 * clock resolution (see --alchemy-clock-resolution option, defaults
 * to 1 nanosecond).
 */
ssize_t rt_pipe_read_timed(RT_PIPE *pipe,
			   void *buf, size_t size,
			   const struct timespec *abs_timeout)
{
	struct alchemy_pipe *pcb;
	int err = 0, flags;
	struct timeval tv;
	ssize_t ret;

	pcb = find_alchemy_pipe(pipe, &err);
	if (pcb == NULL)
		return err;

	if (alchemy_poll_mode(abs_timeout))
		flags = MSG_DONTWAIT;
	else {
		if (!threadobj_current_p())
			return -EPERM;
		if (abs_timeout) {
			tv.tv_sec = abs_timeout->tv_sec;
			tv.tv_usec = abs_timeout->tv_nsec / 1000;
		} else {
			tv.tv_sec = 0;
			tv.tv_usec = 0;
		}
		__RT(setsockopt(pcb->sock, SOL_SOCKET,
				SO_RCVTIMEO, &tv, sizeof(tv)));
		flags = 0;
	}

	ret = __RT(recvfrom(pcb->sock, buf, size, flags, NULL, 0));
	if (ret < 0)
		ret = -errno;

	return ret;
}
Example #28
0
void clockobj_get_date(struct clockobj *clkobj, ticks_t *pticks)
{
	struct timespec now, date;

	read_lock_nocancel(&clkobj->lock);

	__RT(clock_gettime(CLOCK_COPPERPLATE, &now));

	/* Add offset from epoch to current system time. */
	timespec_add(&date, &clkobj->offset, &now);

	/* Convert the time value to ticks,. */
	*pticks = (ticks_t)date.tv_sec * clockobj_get_frequency(clkobj)
		+ (ticks_t)date.tv_nsec / clockobj_get_resolution(clkobj);

	read_unlock(&clkobj->lock);
}
Example #29
0
int rt_cond_broadcast(RT_COND *cond)
{
	struct alchemy_cond *ccb;
	struct service svc;
	int ret = 0;

	COPPERPLATE_PROTECT(svc);

	ccb = find_alchemy_cond(cond, &ret);
	if (ccb == NULL)
		goto out;

	ret = -__RT(pthread_cond_broadcast(&ccb->cond));
out:
	COPPERPLATE_UNPROTECT(svc);

	return ret;
}
Example #30
0
void clockobj_set_date(struct clockobj *clkobj, ticks_t ticks)
{
	struct timespec now;

	/*
	 * XXX: we grab the lock to exclude other threads from reading
	 * the clock offset while we update it, so that they either
	 * compute against the old value, or the new one, but always
	 * see a valid offset.
	 */
	read_lock_nocancel(&clkobj->lock);

	__RT(clock_gettime(CLOCK_COPPERPLATE, &now));

	__clockobj_ticks_to_timespec(clkobj, ticks, &clkobj->epoch);
	timespec_sub(&clkobj->offset, &clkobj->epoch, &now);

	read_unlock(&clkobj->lock);
}