Example #1
0
void
__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
    int line)
{
	struct mtx *m;

	if (SCHEDULER_STOPPED())
		return;

	m = mtxlock2mtx(c);

	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
	    m->lock_object.lo_name, file, line));
	if (mtx_owned(m))
		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
		    (opts & MTX_RECURSE) != 0,
	    ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
		    m->lock_object.lo_name, file, line));
	opts &= ~MTX_RECURSE;
	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
	    file, line, NULL);
	__mtx_lock_spin(m, curthread, opts, file, line);
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
}
Example #2
0
int
__mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
    int line)
{
	struct mtx *m;

	if (SCHEDULER_STOPPED())
		return (1);

	m = mtxlock2mtx(c);

	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
	    ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
	    m->lock_object.lo_name, file, line));
	KASSERT((opts & MTX_RECURSE) == 0,
	    ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
	    m->lock_object.lo_name, file, line));
	if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
		LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
		return (1);
	}
	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
	return (0);
}
Example #3
0
/*
 * Function versions of the inlined __mtx_* macros.  These are used by
 * modules and can also be called from assembly language if needed.
 */
void
__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
{
	struct mtx *m;

	if (SCHEDULER_STOPPED())
		return;

	m = mtxlock2mtx(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
	    curthread, m->lock_object.lo_name, file, line));
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));
	WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
	    LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);

	__mtx_lock(m, curthread, opts, file, line);
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
	    file, line);
	TD_LOCKS_INC(curthread);
}
Example #4
0
/*
 * The important part of mtx_trylock{,_flags}()
 * Tries to acquire lock `m.'  If this function is called on a mutex that
 * is already owned, it will recursively acquire the lock.
 */
int
_mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
{
	struct thread *td;
	uintptr_t tid, v;
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	int rval;
	bool recursed;

	td = curthread;
	tid = (uintptr_t)td;
	if (SCHEDULER_STOPPED_TD(td))
		return (1);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
	    ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
	    curthread, m->lock_object.lo_name, file, line));
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));

	rval = 1;
	recursed = false;
	v = MTX_UNOWNED;
	for (;;) {
		if (_mtx_obtain_lock_fetch(m, &v, tid))
			break;
		if (v == MTX_UNOWNED)
			continue;
		if (v == tid &&
		    ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
		    (opts & MTX_RECURSE) != 0)) {
			m->mtx_recurse++;
			atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
			recursed = true;
			break;
		}
		rval = 0;
		break;
	}

	opts &= ~MTX_RECURSE;

	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		TD_LOCKS_INC(curthread);
		if (!recursed)
			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
			    m, contested, waittime, file, line);
	}

	return (rval);
}
Example #5
0
/*
 * Check to see if a thread that is blocked on a sleep queue is actually
 * blocked on an sx lock.  If so, output some details and return true.
 * If the lock has an exclusive owner, return that in *ownerp.
 */
int
sx_chain(struct thread *td, struct thread **ownerp)
{
	struct sx *sx;

	/*
	 * Check to see if this thread is blocked on an sx lock.
	 * First, we check the lock class.  If that is ok, then we
	 * compare the lock name against the wait message.
	 */
	sx = td->td_wchan;
	if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
	    sx->lock_object.lo_name != td->td_wmesg)
		return (0);

	/* We think we have an sx lock, so output some details. */
	db_printf("blocked on sx \"%s\" ", td->td_wmesg);
	*ownerp = sx_xholder(sx);
	if (sx->sx_lock & SX_LOCK_SHARED)
		db_printf("SLOCK (count %ju)\n",
		    (uintmax_t)SX_SHARERS(sx->sx_lock));
	else
		db_printf("XLOCK\n");
	return (1);
}
Example #6
0
static void
db_show_rm(const struct lock_object *lock)
{
	struct rm_priotracker *tr;
	struct rm_queue *queue;
	const struct rmlock *rm;
	struct lock_class *lc;
	struct pcpu *pc;

	rm = (const struct rmlock *)lock;
	db_printf(" writecpus: ");
	ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
	db_printf("\n");
	db_printf(" per-CPU readers:\n");
	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
		for (queue = pc->pc_rm_queue.rmq_next;
		    queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
			tr = (struct rm_priotracker *)queue;
			if (tr->rmp_rmlock == rm)
				print_tracker(tr);
		}
	db_printf(" active readers:\n");
	LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
		print_tracker(tr);
	lc = LOCK_CLASS(&rm->rm_wlock_object);
	db_printf("Backing write-lock (%s):\n", lc->lc_name);
	lc->lc_ddb_show(&rm->rm_wlock_object);
}
Example #7
0
/*
 * Function versions of the inlined __mtx_* macros.  These are used by
 * modules and can also be called from assembly language if needed.
 */
void
__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
{
	struct mtx *m;
	uintptr_t tid, v;

	m = mtxlock2mtx(c);

	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
	    !TD_IS_IDLETHREAD(curthread),
	    ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
	    curthread, m->lock_object.lo_name, file, line));
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));
	WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
	    LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);

	tid = (uintptr_t)curthread;
	v = MTX_UNOWNED;
	if (!_mtx_obtain_lock_fetch(m, &v, tid))
		_mtx_lock_sleep(m, v, opts, file, line);
	else
		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
		    m, 0, 0, file, line);
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
	    file, line);
	TD_LOCKS_INC(curthread);
}
Example #8
0
JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompressor_compressBytesDirect
(JNIEnv *env, jobject thisj){
  // Get members of SnappyCompressor
  jobject clazz = (*env)->GetStaticObjectField(env, thisj, SnappyCompressor_clazz);
  jobject uncompressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_uncompressedDirectBuf);
  jint uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen);
  jobject compressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_compressedDirectBuf);
  jint compressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_directBufferSize);
  size_t buf_len;

  // Get the input direct buffer
  LOCK_CLASS(env, clazz, "SnappyCompressor");
  const char* uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
  UNLOCK_CLASS(env, clazz, "SnappyCompressor");

  if (uncompressed_bytes == 0) {
    return (jint)0;
  }

  // Get the output direct buffer
  LOCK_CLASS(env, clazz, "SnappyCompressor");
  char* compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
  UNLOCK_CLASS(env, clazz, "SnappyCompressor");

  if (compressed_bytes == 0) {
    return (jint)0;
  }

  /* size_t should always be 4 bytes or larger. */
  buf_len = (size_t)compressed_direct_buf_len;
  snappy_status ret = dlsym_snappy_compress(uncompressed_bytes,
        uncompressed_direct_buf_len, compressed_bytes, &buf_len);
  if (ret != SNAPPY_OK){
    THROW(env, "Ljava/lang/InternalError", "Could not compress data. Buffer length is too small.");
    return 0;
  }
  if (buf_len > JINT_MAX) {
    THROW(env, "Ljava/lang/InternalError", "Invalid return buffer length.");
    return 0;
  }

  (*env)->SetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen, 0);
  return (jint)buf_len;
}
Example #9
0
static int
owner_rm(const struct lock_object *lock, struct thread **owner)
{
	const struct rmlock *rm;
	struct lock_class *lc;

	rm = (const struct rmlock *)lock;
	lc = LOCK_CLASS(&rm->rm_wlock_object);
	return (lc->lc_owner(&rm->rm_wlock_object, owner));
}
void test(void)
{
	LOCK_CLASS(lc);
	struct lock a;

	MXINIT(&a, &lc);

	test_set_panic_string("lockdep: Aborting - releasing unheld lock");

	MXUNLOCK(&a);
}
Example #11
0
void test(void)
{
	LOCK_CLASS(lc);
	struct lock a;

	MXINIT(&a, &lc);
	MXLOCK(&a);

	test_set_panic_string("lockdep: Aborting - deadlock");

	MXLOCK(&a);
}
Example #12
0
JNIEXPORT jint JNICALL Java_com_indeed_util_compress_snappy_SnappyDecompressor_decompressBytesDirect
(JNIEnv *env, jobject thisj){
  // Get members of SnappyDecompressor
  jobject clazz = (*env)->GetStaticObjectField(env,thisj, SnappyDecompressor_clazz);
  jobject compressed_direct_buf = (*env)->GetObjectField(env,thisj, SnappyDecompressor_compressedDirectBuf);
  jint compressed_direct_buf_len = (*env)->GetIntField(env,thisj, SnappyDecompressor_compressedDirectBufLen);
  jobject uncompressed_direct_buf = (*env)->GetObjectField(env,thisj, SnappyDecompressor_uncompressedDirectBuf);
  size_t uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyDecompressor_directBufferSize);

  // Get the input direct buffer
  LOCK_CLASS(env, clazz, "SnappyDecompressor");
  const char* compressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
  UNLOCK_CLASS(env, clazz, "SnappyDecompressor");

  if (compressed_bytes == 0) {
    return (jint)0;
  }

  // Get the output direct buffer
  LOCK_CLASS(env, clazz, "SnappyDecompressor");
  char* uncompressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
  UNLOCK_CLASS(env, clazz, "SnappyDecompressor");

  if (uncompressed_bytes == 0) {
    return (jint)0;
  }

  snappy_status ret = dlsym_snappy_uncompress(compressed_bytes, compressed_direct_buf_len, uncompressed_bytes, &uncompressed_direct_buf_len);
  if (ret == SNAPPY_BUFFER_TOO_SMALL){
    THROW(env, "java/lang/InternalError", "Could not decompress data. Buffer length is too small.");
  } else if (ret == SNAPPY_INVALID_INPUT){
    THROW(env, "java/lang/InternalError", "Could not decompress data. Input is invalid.");
  } else if (ret != SNAPPY_OK){
    THROW(env, "java/lang/InternalError", "Could not decompress data.");
  }

  (*env)->SetIntField(env, thisj, SnappyDecompressor_compressedDirectBufLen, 0);

  return (jint)uncompressed_direct_buf_len;
}
JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_lz4_Lz4Decompressor_decompressBytesDirect
(JNIEnv *env, jobject thisj){
  const char *compressed_bytes;
  char *uncompressed_bytes;

  // Get members of Lz4Decompressor
  jobject clazz = (*env)->GetStaticObjectField(env,thisj, Lz4Decompressor_clazz);
  jobject compressed_direct_buf = (*env)->GetObjectField(env,thisj, Lz4Decompressor_compressedDirectBuf);
  jint compressed_direct_buf_len = (*env)->GetIntField(env,thisj, Lz4Decompressor_compressedDirectBufLen);
  jobject uncompressed_direct_buf = (*env)->GetObjectField(env,thisj, Lz4Decompressor_uncompressedDirectBuf);
  size_t uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, Lz4Decompressor_directBufferSize);

  // Get the input direct buffer
  LOCK_CLASS(env, clazz, "Lz4Decompressor");
  compressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
  UNLOCK_CLASS(env, clazz, "Lz4Decompressor");

  if (compressed_bytes == 0) {
    return (jint)0;
  }

  // Get the output direct buffer
  LOCK_CLASS(env, clazz, "Lz4Decompressor");
  uncompressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
  UNLOCK_CLASS(env, clazz, "Lz4Decompressor");

  if (uncompressed_bytes == 0) {
    return (jint)0;
  }

  uncompressed_direct_buf_len = LZ4_uncompress_unknownOutputSize(compressed_bytes, uncompressed_bytes, compressed_direct_buf_len, uncompressed_direct_buf_len);
  if (uncompressed_direct_buf_len < 0) {
    THROW(env, "java/lang/InternalError", "LZ4_uncompress_unknownOutputSize failed.");
  }

  (*env)->SetIntField(env, thisj, Lz4Decompressor_compressedDirectBufLen, 0);

  return (jint)uncompressed_direct_buf_len;
}
Example #14
0
static void
thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
{

	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
	    ("thread_lock() of sleep mutex %s @ %s:%d",
	    m->lock_object.lo_name, file, line));
	if (mtx_owned(m))
		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
		    ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
		    m->lock_object.lo_name, file, line));
	WITNESS_CHECKORDER(&m->lock_object,
	    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
}
Example #15
0
/*
 * The important part of mtx_trylock{,_flags}()
 * Tries to acquire lock `m.'  If this function is called on a mutex that
 * is already owned, it will recursively acquire the lock.
 */
int
_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
{
	struct mtx *m;
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	int rval;

	if (SCHEDULER_STOPPED())
		return (1);

	m = mtxlock2mtx(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
	    curthread, m->lock_object.lo_name, file, line));
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));

	if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
	    (opts & MTX_RECURSE) != 0)) {
		m->mtx_recurse++;
		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
		rval = 1;
	} else
		rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
	opts &= ~MTX_RECURSE;

	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		TD_LOCKS_INC(curthread);
		if (m->mtx_recurse == 0)
			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
			    m, contested, waittime, file, line);

	}

	return (rval);
}
Example #16
0
void
_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
{

	MPASS(curthread != NULL);
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
	    m->lock_object.lo_name, file, line));
	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	mtx_assert(m, MA_OWNED);

	_rel_spin_lock(m);
}
Example #17
0
/*
 * Function versions of the inlined __mtx_* macros.  These are used by
 * modules and can also be called from assembly language if needed.
 */
void
_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
{

	MPASS(curthread != NULL);
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));
	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
	    file, line, NULL);

	_get_sleep_lock(m, curthread, opts, file, line);
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
	curthread->td_locks++;
}
Example #18
0
void
_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
{
	MPASS(curthread != NULL);
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));
	curthread->td_locks--;
	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	mtx_assert(m, MA_OWNED);

	if (m->mtx_recurse == 0)
		LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m);
	_rel_sleep_lock(m, curthread, opts, file, line);
}
Example #19
0
void
__mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
    int line)
{
	struct mtx *m;

	m = mtxlock2mtx(c);

	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
	    m->lock_object.lo_name, file, line));
	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	mtx_assert(m, MA_OWNED);

	__mtx_unlock_spin(m);
}
Example #20
0
void
_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
{

	MPASS(curthread != NULL);
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
	    m->lock_object.lo_name, file, line));
	if (mtx_owned(m))
		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
	    ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
		    m->lock_object.lo_name, file, line));
	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
	    file, line, NULL);
	_get_spin_lock(m, curthread, opts, file, line);
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
}
Example #21
0
void
__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
    int line)
{
	struct mtx *m;
#ifdef SMP
	uintptr_t tid, v;
#endif

	m = mtxlock2mtx(c);

	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
	    m->lock_object.lo_name, file, line));
	if (mtx_owned(m))
		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
		    (opts & MTX_RECURSE) != 0,
	    ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
		    m->lock_object.lo_name, file, line));
	opts &= ~MTX_RECURSE;
	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
	    file, line, NULL);
#ifdef SMP
	spinlock_enter();
	tid = (uintptr_t)curthread;
	v = MTX_UNOWNED;
	if (!_mtx_obtain_lock_fetch(m, &v, tid))
		_mtx_lock_spin(m, v, opts, file, line);
	else
		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire,
		    m, 0, 0, file, line);
#else
	__mtx_lock_spin(m, curthread, opts, file, line);
#endif
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
}
Example #22
0
void
__mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
{
	struct mtx *m;

	if (SCHEDULER_STOPPED())
		return;

	m = mtxlock2mtx(c);

	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));
	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	mtx_assert(m, MA_OWNED);

	__mtx_unlock(m, curthread, opts, file, line);
	TD_LOCKS_DEC(curthread);
}
Example #23
0
/*
 * The important part of mtx_trylock{,_flags}()
 * Tries to acquire lock `m.'  If this function is called on a mutex that
 * is already owned, it will recursively acquire the lock.
 */
int
_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
{
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	int rval;

	MPASS(curthread != NULL);
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));

	if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) {
		m->mtx_recurse++;
		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
		rval = 1;
	} else
		rval = _obtain_lock(m, (uintptr_t)curthread);

	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		curthread->td_locks++;
		if (m->mtx_recurse == 0)
			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE,
			    m, contested, waittime, file, line);

	}

	return (rval);
}
Example #24
0
void
_thread_lock_flags(struct thread *td, int opts, const char *file, int line)
{
	struct mtx *m;
	uintptr_t tid;
	int i;
#ifdef LOCK_PROFILING
	int contested = 0;
	uint64_t waittime = 0;
#endif
#ifdef KDTRACE_HOOKS
	uint64_t spin_cnt = 0;
#endif

	i = 0;
	tid = (uintptr_t)curthread;
	for (;;) {
retry:
		spinlock_enter();
		m = td->td_lock;
		KASSERT(m->mtx_lock != MTX_DESTROYED,
		    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
		KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
		    ("thread_lock() of sleep mutex %s @ %s:%d",
		    m->lock_object.lo_name, file, line));
		if (mtx_owned(m))
			KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
	    ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
			    m->lock_object.lo_name, file, line));
		WITNESS_CHECKORDER(&m->lock_object,
		    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
		while (!_obtain_lock(m, tid)) {
#ifdef KDTRACE_HOOKS
			spin_cnt++;
#endif
			if (m->mtx_lock == tid) {
				m->mtx_recurse++;
				break;
			}
			lock_profile_obtain_lock_failed(&m->lock_object,
			    &contested, &waittime);
			/* Give interrupts a chance while we spin. */
			spinlock_exit();
			while (m->mtx_lock != MTX_UNOWNED) {
				if (i++ < 10000000)
					cpu_spinwait();
				else if (i < 60000000 ||
				    kdb_active || panicstr != NULL)
					DELAY(1);
				else
					_mtx_lock_spin_failed(m);
				cpu_spinwait();
				if (m != td->td_lock)
					goto retry;
			}
			spinlock_enter();
		}
		if (m == td->td_lock)
			break;
		_rel_spin_lock(m);	/* does spinlock_exit() */
#ifdef KDTRACE_HOOKS
		spin_cnt++;
#endif
	}
	if (m->mtx_recurse == 0)
		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
		    m, contested, waittime, (file), (line));
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
	LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt);
}
Example #25
0
void
thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
{
	struct mtx *m;
	uintptr_t tid;
	int i;
#ifdef LOCK_PROFILING
	int contested = 0;
	uint64_t waittime = 0;
#endif
#ifdef KDTRACE_HOOKS
	int64_t spin_time = 0;
#endif

	i = 0;
	tid = (uintptr_t)curthread;

	if (SCHEDULER_STOPPED())
		return;

#ifdef KDTRACE_HOOKS
	spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
#endif
	for (;;) {
retry:
		spinlock_enter();
		m = td->td_lock;
		KASSERT(m->mtx_lock != MTX_DESTROYED,
		    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
		KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
		    ("thread_lock() of sleep mutex %s @ %s:%d",
		    m->lock_object.lo_name, file, line));
		if (mtx_owned(m))
			KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
	    ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
			    m->lock_object.lo_name, file, line));
		WITNESS_CHECKORDER(&m->lock_object,
		    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
		while (!_mtx_obtain_lock(m, tid)) {
			if (m->mtx_lock == tid) {
				m->mtx_recurse++;
				break;
			}
#ifdef HWPMC_HOOKS
			PMC_SOFT_CALL( , , lock, failed);
#endif
			lock_profile_obtain_lock_failed(&m->lock_object,
			    &contested, &waittime);
			/* Give interrupts a chance while we spin. */
			spinlock_exit();
			while (m->mtx_lock != MTX_UNOWNED) {
				if (i++ < 10000000)
					cpu_spinwait();
				else if (i < 60000000 ||
				    kdb_active || panicstr != NULL)
					DELAY(1);
				else
					_mtx_lock_spin_failed(m);
				cpu_spinwait();
				if (m != td->td_lock)
					goto retry;
			}
			spinlock_enter();
		}
		if (m == td->td_lock)
			break;
		__mtx_unlock_spin(m);	/* does spinlock_exit() */
	}
#ifdef KDTRACE_HOOKS
	spin_time += lockstat_nsecs(&m->lock_object);
#endif
	if (m->mtx_recurse == 0)
		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
		    contested, waittime, file, line);
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
	LOCKSTAT_RECORD1(thread__spin, m, spin_time);
}
Example #26
0
void
thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
{
	struct mtx *m;
	uintptr_t tid;
	struct lock_delay_arg lda;
#ifdef LOCK_PROFILING
	int contested = 0;
	uint64_t waittime = 0;
#endif
#ifdef KDTRACE_HOOKS
	int64_t spin_time = 0;
#endif

	tid = (uintptr_t)curthread;

	if (SCHEDULER_STOPPED()) {
		/*
		 * Ensure that spinlock sections are balanced even when the
		 * scheduler is stopped, since we may otherwise inadvertently
		 * re-enable interrupts while dumping core.
		 */
		spinlock_enter();
		return;
	}

	lock_delay_arg_init(&lda, &mtx_spin_delay);

#ifdef KDTRACE_HOOKS
	spin_time -= lockstat_nsecs(&td->td_lock->lock_object);
#endif
	for (;;) {
retry:
		spinlock_enter();
		m = td->td_lock;
		KASSERT(m->mtx_lock != MTX_DESTROYED,
		    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
		KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
		    ("thread_lock() of sleep mutex %s @ %s:%d",
		    m->lock_object.lo_name, file, line));
		if (mtx_owned(m))
			KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
	    ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
			    m->lock_object.lo_name, file, line));
		WITNESS_CHECKORDER(&m->lock_object,
		    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
		for (;;) {
			if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
				break;
			if (m->mtx_lock == tid) {
				m->mtx_recurse++;
				break;
			}
#ifdef HWPMC_HOOKS
			PMC_SOFT_CALL( , , lock, failed);
#endif
			lock_profile_obtain_lock_failed(&m->lock_object,
			    &contested, &waittime);
			/* Give interrupts a chance while we spin. */
			spinlock_exit();
			while (m->mtx_lock != MTX_UNOWNED) {
				if (lda.spin_cnt < 10000000) {
					lock_delay(&lda);
				} else {
					lda.spin_cnt++;
					if (lda.spin_cnt < 60000000 ||
					    kdb_active || panicstr != NULL)
						DELAY(1);
					else
						_mtx_lock_spin_failed(m);
					cpu_spinwait();
				}
				if (m != td->td_lock)
					goto retry;
			}
			spinlock_enter();
		}
		if (m == td->td_lock)
			break;
		__mtx_unlock_spin(m);	/* does spinlock_exit() */
	}
#ifdef KDTRACE_HOOKS
	spin_time += lockstat_nsecs(&m->lock_object);
#endif
	if (m->mtx_recurse == 0)
		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m,
		    contested, waittime, file, line);
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
#ifdef KDTRACE_HOOKS
	if (spin_time != 0)
		LOCKSTAT_RECORD1(thread__spin, m, spin_time);
#endif
}