Esempio n. 1
0
void
mutex_destroy(mutex* lock)
{
	char* name = (lock->flags & MUTEX_FLAG_CLONE_NAME) != 0
		? (char*)lock->name : NULL;

	// unblock all waiters
	InterruptsSpinLocker locker(gThreadSpinlock);

#if KDEBUG
	if (lock->waiters != NULL && thread_get_current_thread_id()
		!= lock->holder) {
		panic("mutex_destroy(): there are blocking threads, but caller doesn't "
			"hold the lock (%p)", lock);
		if (_mutex_lock(lock, true) != B_OK)
			return;
	}
#endif

	while (mutex_waiter* waiter = lock->waiters) {
		// dequeue
		lock->waiters = waiter->next;

		// unblock thread
		thread_unblock_locked(waiter->thread, B_ERROR);
	}

	lock->name = NULL;

	locker.Unlock();

	free(name);
}
Esempio n. 2
0
/*!	You must call this function with interrupts disabled, and the semaphore's
	spinlock held. Note that it will unlock the spinlock itself.
	Since it cannot free() the semaphore's name with interrupts turned off, it
	will return that one in \a name.
*/
static void
uninit_sem_locked(struct sem_entry& sem, char** _name)
{
	KTRACE("delete_sem(sem: %ld)", sem.u.used.id);

	notify_sem_select_events(&sem, B_EVENT_INVALID);
	sem.u.used.select_infos = NULL;

	// free any threads waiting for this semaphore
	SpinLocker schedulerLocker(gSchedulerLock);
	while (queued_thread* entry = sem.queue.RemoveHead()) {
		entry->queued = false;
		thread_unblock_locked(entry->thread, B_BAD_SEM_ID);
	}
	schedulerLocker.Unlock();

	int32 id = sem.id;
	sem.id = -1;
	*_name = sem.u.used.name;
	sem.u.used.name = NULL;

	RELEASE_SEM_LOCK(sem);

	// append slot to the free list
	GRAB_SEM_LIST_LOCK();
	free_sem_slot(id % sMaxSems, id + sMaxSems);
	atomic_add(&sUsedSems, -1);
	RELEASE_SEM_LIST_LOCK();
}
Esempio n. 3
0
static int32
rw_lock_unblock(rw_lock* lock)
{
    // Check whether there are any waiting threads at all and whether anyone
    // has the write lock.
    rw_lock_waiter* waiter = lock->waiters;
    if (waiter == NULL || lock->holder >= 0)
        return 0;

    // writer at head of queue?
    if (waiter->writer) {
        if (lock->active_readers > 0 || lock->pending_readers > 0)
            return 0;

        // dequeue writer
        lock->waiters = waiter->next;
        if (lock->waiters != NULL)
            lock->waiters->last = waiter->last;

        lock->holder = waiter->thread->id;

        // unblock thread
        thread_unblock_locked(waiter->thread, B_OK);
        waiter->thread = NULL;
        return RW_LOCK_WRITER_COUNT_BASE;
    }

    // wake up one or more readers
    uint32 readerCount = 0;
    do {
        // dequeue reader
        lock->waiters = waiter->next;
        if (lock->waiters != NULL)
            lock->waiters->last = waiter->last;

        readerCount++;

        // unblock thread
        thread_unblock_locked(waiter->thread, B_OK);
        waiter->thread = NULL;
    } while ((waiter = lock->waiters) != NULL && !waiter->writer);

    if (lock->count >= RW_LOCK_WRITER_COUNT_BASE)
        lock->active_readers += readerCount;

    return readerCount;
}
	void WakeUpThread(bool waitingForZero)
	{
		InterruptsSpinLocker schedulerLocker(gSchedulerLock);
		if (waitingForZero) {
			// Wake up all threads waiting on zero
			while (queued_thread *entry = fWaitingToBeZeroQueue.RemoveHead()) {
				entry->queued = false;
				fThreadsWaitingToBeZero--;
				thread_unblock_locked(entry->thread, 0);
			}
		} else {
			// Wake up all threads even though they might go back to sleep
			while (queued_thread *entry = fWaitingToIncreaseQueue.RemoveHead()) {
				entry->queued = false;
				fThreadsWaitingToIncrease--;
				thread_unblock_locked(entry->thread, 0);
			}
		}
	}
Esempio n. 5
0
	void Notify(status_t status = B_OK)
	{
		InterruptsSpinLocker _(fLock);
		TRACE("ReadRequest %p::Notify(), fNotified %d\n", this, fNotified);

		if (!fNotified) {
			SpinLocker schedulerLocker(gSchedulerLock);
			thread_unblock_locked(fThread, status);
			fNotified = true;
		}
	}
	~XsiSemaphore()
	{
		// For some reason the semaphore is getting destroyed.
		// Wake up any remaing awaiting threads
		InterruptsSpinLocker schedulerLocker(gSchedulerLock);

		while (queued_thread *entry = fWaitingToIncreaseQueue.RemoveHead()) {
			entry->queued = false;
			thread_unblock_locked(entry->thread, EIDRM);
		}
		while (queued_thread *entry = fWaitingToBeZeroQueue.RemoveHead()) {
			entry->queued = false;
			thread_unblock_locked(entry->thread, EIDRM);
		}
		// No need to remove any sem_undo request still
		// hanging. When the process exit and doesn't found
		// the semaphore set, it'll just ignore the sem_undo
		// request. That's better than iterating trough the
		// whole sUndoList. Beside we don't know our semaphore
		// number nor our semaphore set id.
	}
Esempio n. 7
0
/*!	Notifies the profiler thread when the profiling buffer is full enough.
	The caller must hold fLock.
*/
inline void
SystemProfiler::_MaybeNotifyProfilerThreadLocked()
{
	// If the buffer is full enough, notify the profiler.
	if (fWaitingProfilerThread != NULL && fBufferSize > fBufferCapacity / 2) {
		int cpu = smp_get_current_cpu();
		fReentered[cpu] = true;

		SpinLocker _(fWaitingProfilerThread->scheduler_lock);
		thread_unblock_locked(fWaitingProfilerThread, B_OK);

		fWaitingProfilerThread = NULL;
		fReentered[cpu] = false;
	}
}
Esempio n. 8
0
void
_mutex_unlock(mutex* lock, bool threadsLocked)
{
	// lock only, if !threadsLocked
	InterruptsSpinLocker locker(gThreadSpinlock, false, !threadsLocked);

#if KDEBUG
	if (thread_get_current_thread_id() != lock->holder) {
		panic("_mutex_unlock() failure: thread %ld is trying to release "
			"mutex %p (current holder %ld)\n", thread_get_current_thread_id(),
			lock, lock->holder);
		return;
	}
#else
	if (lock->ignore_unlock_count > 0) {
		lock->ignore_unlock_count--;
		return;
	}
#endif

	mutex_waiter* waiter = lock->waiters;
	if (waiter != NULL) {
		// dequeue the first waiter
		lock->waiters = waiter->next;
		if (lock->waiters != NULL)
			lock->waiters->last = waiter->last;

		// unblock thread
		thread_unblock_locked(waiter->thread, B_OK);

#if KDEBUG
		// Already set the holder to the unblocked thread. Besides that this
		// actually reflects the current situation, setting it to -1 would
		// cause a race condition, since another locker could think the lock
		// is not held by anyone.
		lock->holder = waiter->thread->id;
#endif
	} else {
		// We've acquired the spinlock before the locker that is going to wait.
		// Just mark the lock as released.
#if KDEBUG
		lock->holder = -1;
#else
		lock->flags |= MUTEX_FLAG_RELEASED;
#endif
	}
}
Esempio n. 9
0
/*!	Forcibly removes a thread from a semaphores wait queue. May have to wake up
	other threads in the process.
	Must be called with semaphore lock held. The thread lock must not be held.
*/
static void
remove_thread_from_sem(queued_thread *entry, struct sem_entry *sem)
{
	if (!entry->queued)
		return;

	sem->queue.Remove(entry);
	entry->queued = false;
	sem->u.used.count += entry->count;

	// We're done with this entry. We only have to check, if other threads
	// need unblocking, too.

	// Now see if more threads need to be woken up. We get the scheduler lock
	// for that time, so the blocking state of threads won't change (due to
	// interruption or timeout). We need that lock anyway when unblocking a
	// thread.
	SpinLocker schedulerLocker(gSchedulerLock);

	while ((entry = sem->queue.Head()) != NULL) {
		if (thread_is_blocked(entry->thread)) {
			// The thread is still waiting. If its count is satisfied, unblock
			// it. Otherwise we can't unblock any other thread.
			if (entry->count > sem->u.used.net_count)
				break;

			thread_unblock_locked(entry->thread, B_OK);
			sem->u.used.net_count -= entry->count;
		} else {
			// The thread is no longer waiting, but still queued, which means
			// acquiration failed and we can just remove it.
			sem->u.used.count += entry->count;
		}

		sem->queue.Remove(entry);
		entry->queued = false;
	}

	schedulerLocker.Unlock();

	// select notification, if the semaphore is now acquirable
	if (sem->u.used.count > 0)
		notify_sem_select_events(sem, B_EVENT_ACQUIRE_SEMAPHORE);
}
Esempio n. 10
0
void
rw_lock_destroy(rw_lock* lock)
{
	char* name = (lock->flags & RW_LOCK_FLAG_CLONE_NAME) != 0
		? (char*)lock->name : NULL;

	// unblock all waiters
	InterruptsSpinLocker locker(gThreadSpinlock);

#if KDEBUG
	if (lock->waiters != NULL && thread_get_current_thread_id()
			!= lock->holder) {
		panic("rw_lock_destroy(): there are blocking threads, but the caller "
			"doesn't hold the write lock (%p)", lock);

		locker.Unlock();
		if (rw_lock_write_lock(lock) != B_OK)
			return;
		locker.Lock();
	}
#endif

	while (rw_lock_waiter* waiter = lock->waiters) {
		// dequeue
		lock->waiters = waiter->next;

		// unblock thread
		thread_unblock_locked(waiter->thread, B_ERROR);
	}

	lock->name = NULL;

	locker.Unlock();

	free(name);
}
Esempio n. 11
0
status_t
release_sem_etc(sem_id id, int32 count, uint32 flags)
{
	int32 slot = id % sMaxSems;

	if (gKernelStartup)
		return B_OK;
	if (sSemsActive == false)
		return B_NO_MORE_SEMS;
	if (id < 0)
		return B_BAD_SEM_ID;
	if (count <= 0 && (flags & B_RELEASE_ALL) == 0)
		return B_BAD_VALUE;

	InterruptsLocker _;
	SpinLocker semLocker(sSems[slot].lock);

	if (sSems[slot].id != id) {
		TRACE(("sem_release_etc: invalid sem_id %ld\n", id));
		return B_BAD_SEM_ID;
	}

	// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
	//	doesn't have any use outside the kernel
	if ((flags & B_CHECK_PERMISSION) != 0
		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
		dprintf("thread %ld tried to release kernel semaphore.\n",
			thread_get_current_thread_id());
		return B_NOT_ALLOWED;
	}

	KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count,
		flags);

	sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer;
#if DEBUG_SEM_LAST_ACQUIRER
	sSems[slot].u.used.last_releaser = thread_get_current_thread_id();
	sSems[slot].u.used.last_release_count = count;
#endif

	if (flags & B_RELEASE_ALL) {
		count = sSems[slot].u.used.net_count - sSems[slot].u.used.count;

		// is there anything to do for us at all?
		if (count == 0)
			return B_OK;

		// Don't release more than necessary -- there might be interrupted/
		// timed out threads in the queue.
		flags |= B_RELEASE_IF_WAITING_ONLY;
	}

	// Grab the scheduler lock, so thread_is_blocked() is reliable (due to
	// possible interruptions or timeouts, it wouldn't be otherwise).
	SpinLocker schedulerLocker(gSchedulerLock);

	while (count > 0) {
		queued_thread* entry = sSems[slot].queue.Head();
		if (entry == NULL) {
			if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) {
				sSems[slot].u.used.count += count;
				sSems[slot].u.used.net_count += count;
			}
			break;
		}

		if (thread_is_blocked(entry->thread)) {
			// The thread is still waiting. If its count is satisfied,
			// unblock it. Otherwise we can't unblock any other thread.
			if (entry->count > sSems[slot].u.used.net_count + count) {
				sSems[slot].u.used.count += count;
				sSems[slot].u.used.net_count += count;
				break;
			}

			thread_unblock_locked(entry->thread, B_OK);

			int delta = min_c(count, entry->count);
			sSems[slot].u.used.count += delta;
			sSems[slot].u.used.net_count += delta - entry->count;
			count -= delta;
		} else {
			// The thread is no longer waiting, but still queued, which
			// means acquiration failed and we can just remove it.
			sSems[slot].u.used.count += entry->count;
		}

		sSems[slot].queue.Remove(entry);
		entry->queued = false;
	}

	schedulerLocker.Unlock();

	if (sSems[slot].u.used.count > 0)
		notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);

	// If we've unblocked another thread reschedule, if we've not explicitly
	// been told not to.
	if ((flags & B_DO_NOT_RESCHEDULE) == 0) {
		semLocker.Unlock();
		schedulerLocker.Lock();
		scheduler_reschedule_if_necessary_locked();
	}

	return B_OK;
}