Beispiel #1
0
int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
	struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
	int err;

#ifdef CONFIG_XENO_FASTSYNCH
	xnarch_atomic_t *ownerp;
	unsigned long status;
	xnhandle_t cur;

	cur = xeno_get_current();
	if (cur == XN_NO_HANDLE)
		return EPERM;

	status = xeno_get_current_mode();

	if (unlikely(cb_try_read_lock(&shadow->lock, s)))
		return EINVAL;

	if (unlikely(shadow->magic != PSE51_MUTEX_MAGIC)) {
		err = -EINVAL;
		goto out_err;
	}

	if (unlikely(status & XNOTHER))
		goto do_syscall;

	ownerp = get_ownerp(shadow);

	err = xnsynch_fast_owner_check(ownerp, cur);
	if (unlikely(err))
		goto out_err;

	if (shadow->lockcnt > 1) {
		--shadow->lockcnt;
		goto out;
	}

	if (likely(xnsynch_fast_release(ownerp, cur))) {
	  out:
		cb_read_unlock(&shadow->lock, s);
		return 0;
	}

do_syscall:
#endif /* CONFIG_XENO_FASTSYNCH */

	do {
		err = XENOMAI_SKINCALL1(__pse51_muxid,
					__pse51_mutex_unlock, shadow);
	} while (err == -EINTR);

#ifdef CONFIG_XENO_FASTSYNCH
  out_err:
	cb_read_unlock(&shadow->lock, s);
#endif /* CONFIG_XENO_FASTSYNCH */

	return -err;
}
Beispiel #2
0
int __wrap_pthread_mutex_init(pthread_mutex_t *mutex,
			      const pthread_mutexattr_t *attr)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
	struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
	int err;

#ifdef CONFIG_XENO_FASTSYNCH
	if (unlikely(cb_try_read_lock(&shadow->lock, s)))
		goto checked;

	err = -XENOMAI_SKINCALL2(__pse51_muxid,__pse51_check_init,shadow,attr);

	if (err) {
		cb_read_unlock(&shadow->lock, s);
		return err;
	}

  checked:
	cb_force_write_lock(&shadow->lock, s);
#endif /* CONFIG_XENO_FASTSYNCH */

	err = -XENOMAI_SKINCALL2(__pse51_muxid,__pse51_mutex_init,shadow,attr);

#ifdef CONFIG_XENO_FASTSYNCH
	if (!shadow->attr.pshared)
		shadow->owner = (xnarch_atomic_t *)
			(xeno_sem_heap[0] + shadow->owner_offset);

	cb_write_unlock(&shadow->lock, s);
#endif /* CONFIG_XENO_FASTSYNCH */

	return err;
}
Beispiel #3
0
int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
	struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
	int err;

#ifdef CONFIG_XENO_FASTSYNCH
	unsigned long status;
	xnhandle_t cur;

	cur = xeno_get_current();
	if (cur == XN_NO_HANDLE)
		return EPERM;

	status = xeno_get_current_mode();
	if (unlikely(status & XNOTHER))
		goto do_syscall;

	if (unlikely(cb_try_read_lock(&shadow->lock, s)))
		return EINVAL;

	if (unlikely(shadow->magic != PSE51_MUTEX_MAGIC)) {
		err = -EINVAL;
		goto out;
	}

	if (unlikely(status & XNRELAX)) {
		do {
			err = XENOMAI_SYSCALL1(__xn_sys_migrate,
					       XENOMAI_XENO_DOMAIN);
		} while (err == -EINTR);

		if (err < 0)
			goto out;
	}

	err = xnsynch_fast_acquire(get_ownerp(shadow), cur);

	if (likely(!err)) {
		shadow->lockcnt = 1;
		cb_read_unlock(&shadow->lock, s);
		return 0;
	}

	if (err == -EBUSY && shadow->attr.type == PTHREAD_MUTEX_RECURSIVE) {
		if (shadow->lockcnt == UINT_MAX)
			err = -EAGAIN;
		else {
			++shadow->lockcnt;
			err = 0;
		}
	} else
		err = -EBUSY;

  out:
	cb_read_unlock(&shadow->lock, s);
	return -err;

do_syscall:
#endif /* !CONFIG_XENO_FASTSYNCH */

	do {
		err = XENOMAI_SKINCALL1(__pse51_muxid,
					__pse51_mutex_trylock, shadow);
	} while (err == -EINTR);

	return -err;
}
Beispiel #4
0
int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex,
				   const struct timespec *to)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
	struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
	int err;

#ifdef CONFIG_XENO_FASTSYNCH
	unsigned long status;
	xnhandle_t cur;

	cur = xeno_get_current();
	if (cur == XN_NO_HANDLE)
		return EPERM;

	status = xeno_get_current_mode();

	if (unlikely(cb_try_read_lock(&shadow->lock, s)))
		return EINVAL;

	if (shadow->magic != PSE51_MUTEX_MAGIC) {
		err = -EINVAL;
		goto out;
	}

	/* See __wrap_pthread_mutex_lock() */
	if (likely(!(status & (XNRELAX|XNOTHER)))) {
		err = xnsynch_fast_acquire(get_ownerp(shadow), cur);

		if (likely(!err)) {
			shadow->lockcnt = 1;
			cb_read_unlock(&shadow->lock, s);
			return 0;
		}

		if (err == -EBUSY)
			switch(shadow->attr.type) {
			case PTHREAD_MUTEX_NORMAL:
				break;

			case PTHREAD_MUTEX_ERRORCHECK:
				err = -EDEADLK;
				goto out;

			case PTHREAD_MUTEX_RECURSIVE:
				if (shadow->lockcnt == UINT_MAX) {
					err = -EAGAIN;
					goto out;
				}

				++shadow->lockcnt;
				goto out;
			}
	}
#endif /* CONFIG_XENO_FASTSYNCH */

	do {
		err = XENOMAI_SKINCALL2(__pse51_muxid,
					__pse51_mutex_timedlock, shadow, to);
	} while (err == -EINTR);

#ifdef CONFIG_XENO_FASTSYNCH
  out:
	cb_read_unlock(&shadow->lock, s);
#endif /* CONFIG_XENO_FASTSYNCH */

	return -err;
}
Beispiel #5
0
int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
	struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
	int err;

#ifdef CONFIG_XENO_FASTSYNCH
	unsigned long status;
	xnhandle_t cur;

	cur = xeno_get_current();
	if (cur == XN_NO_HANDLE)
		return EPERM;

	status = xeno_get_current_mode();

	if (unlikely(cb_try_read_lock(&shadow->lock, s)))
		return EINVAL;

	if (shadow->magic != PSE51_MUTEX_MAGIC) {
		err = -EINVAL;
		goto out;
	}

	/*
	 * We track resource ownership for non real-time shadows in
	 * order to handle the auto-relax feature, so we must always
	 * obtain them via a syscall.
	 */
	if (likely(!(status & (XNRELAX|XNOTHER)))) {
		err = xnsynch_fast_acquire(get_ownerp(shadow), cur);

		if (likely(!err)) {
			shadow->lockcnt = 1;
			cb_read_unlock(&shadow->lock, s);
			return 0;
		}

		if (err == -EBUSY)
			switch(shadow->attr.type) {
			case PTHREAD_MUTEX_NORMAL:
				break;

			case PTHREAD_MUTEX_ERRORCHECK:
				err = -EDEADLK;
				goto out;

			case PTHREAD_MUTEX_RECURSIVE:
				if (shadow->lockcnt == UINT_MAX) {
					err = -EAGAIN;
					goto out;
				}
				++shadow->lockcnt;
				err = 0;
				goto out;
			}
		}
#endif /* CONFIG_XENO_FASTSYNCH */

	do {
		err = XENOMAI_SKINCALL1(__pse51_muxid,__pse51_mutex_lock,shadow);
	} while (err == -EINTR);

#ifdef CONFIG_XENO_FASTSYNCH
  out:
	cb_read_unlock(&shadow->lock, s);
#endif /* CONFIG_XENO_FASTSYNCH */

	return -err;
}
Beispiel #6
0
int __wrap_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
{
	struct pse51_cond_cleanup_t c = {
		.cond = (union __xeno_cond *)cond,
		.mutex = (union __xeno_mutex *)mutex,
	};
	int err, oldtype;

	if (unlikely(c.cond->shadow_cond.magic != PSE51_COND_MAGIC))
		goto autoinit;

  start:
	if (cb_try_read_lock(&c.mutex->shadow_mutex.lock, s))
		return EINVAL;

	pthread_cleanup_push(&__pthread_cond_cleanup, &c);

	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);

	err = -XENOMAI_SKINCALL5(__pse51_muxid,
				 __pse51_cond_wait_prologue,
				 &c.cond->shadow_cond,
				 &c.mutex->shadow_mutex, &c.count, 0, NULL);

	pthread_setcanceltype(oldtype, NULL);

	pthread_cleanup_pop(0);

	while (err == EINTR)
		err = -XENOMAI_SKINCALL3(__pse51_muxid,
					 __pse51_cond_wait_epilogue,
					 &c.cond->shadow_cond,
					 &c.mutex->shadow_mutex,
					 c.count);

	cb_read_unlock(&c.mutex->shadow_mutex.lock, s);

	pthread_testcancel();

	return err ?: c.err;

  autoinit:
	err = cond_autoinit(cond);
	if (err)
		return err;
	goto start;
}

int __wrap_pthread_cond_timedwait(pthread_cond_t * cond,
				  pthread_mutex_t * mutex,
				  const struct timespec *abstime)
{
	struct pse51_cond_cleanup_t c = {
		.cond = (union __xeno_cond *)cond,
		.mutex = (union __xeno_mutex *)mutex,
	};
	int err, oldtype;

	if (unlikely(c.cond->shadow_cond.magic != PSE51_COND_MAGIC))
		goto autoinit;

  start:
	if (cb_try_read_lock(&c.mutex->shadow_mutex.lock, s))
		return EINVAL;

	pthread_cleanup_push(&__pthread_cond_cleanup, &c);

	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);

	err = -XENOMAI_SKINCALL5(__pse51_muxid,
				 __pse51_cond_wait_prologue,
				 &c.cond->shadow_cond,
				 &c.mutex->shadow_mutex, &c.count, 1, abstime);
	pthread_setcanceltype(oldtype, NULL);

	pthread_cleanup_pop(0);

	while (err == EINTR)
		err = -XENOMAI_SKINCALL3(__pse51_muxid,
					 __pse51_cond_wait_epilogue,
					 &c.cond->shadow_cond,
					 &c.mutex->shadow_mutex,
					 c.count);

	cb_read_unlock(&c.mutex->shadow_mutex.lock, s);

	pthread_testcancel();

	return err ?: c.err;

  autoinit:
	err = cond_autoinit(cond);
	if (err)
		return err;
	goto start;
}

int __wrap_pthread_cond_signal(pthread_cond_t * cond)
{
	union __xeno_cond *_cond = (union __xeno_cond *)cond;
	int err;

	if (unlikely(_cond->shadow_cond.magic != PSE51_COND_MAGIC))
		goto autoinit;

  start:
	return -XENOMAI_SKINCALL1(__pse51_muxid,
				  __pse51_cond_signal, &_cond->shadow_cond);

  autoinit:
	err = cond_autoinit(cond);
	if (err)
		return err;
	goto start;
}

int __wrap_pthread_cond_broadcast(pthread_cond_t * cond)
{
	union __xeno_cond *_cond = (union __xeno_cond *)cond;
	int err;

	if (unlikely(_cond->shadow_cond.magic != PSE51_COND_MAGIC))
		goto autoinit;

  start:
	return -XENOMAI_SKINCALL1(__pse51_muxid,
				  __pse51_cond_broadcast, &_cond->shadow_cond);

  autoinit:
	err = cond_autoinit(cond);
	if (err)
		return err;
	goto start;
}

static int __attribute__((cold)) cond_autoinit(pthread_cond_t *cond)
{
	return __wrap_pthread_cond_init(cond, NULL);
}