예제 #1
0
파일: kernel.c 프로젝트: JoepDriesen/jOS
void kernel_early(multiboot_info_t* mbd, unsigned int magic)
{
	terminal_initialize();

	printf("Starting jOS Kernel\n");
	printf("===================\n\n");
	
	if (magic != MULTIBOOT_BOOTLOADER_MAGIC)
		panic("Bootloader is not Multiboot Compliant");
	
	memory_init(mbd);

	printf("[x] Memory Initialized\n");
	
	gdt_init();
	
	printf("GDT Initialized, entering Protected Mode\n");
	
	printf("Enabling IDT:\n");

	pic_init();

	printf("\t[x] PIC IRQs remapped\n");
	
	idt_init();

	printf("\t[x] IDT Initialized\n");
	if (are_interrupts_enabled())
		printf("\t[x] Hardware Interrupts enabled\n");
	else
		printf("\t[ ] Error enabling Hardware Interrupts\n");
	
	phys_mem_management_init();
	
}
예제 #2
0
파일: tracing.cpp 프로젝트: sahil9912/haiku
tracing_stack_trace*
capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool kernelOnly)
{
#if	ENABLE_TRACING
	// page_fault_exception() doesn't allow us to gracefully handle a bad
	// address in the stack trace, if interrupts are disabled, so we always
	// restrict the stack traces to the kernel only in this case. A bad address
	// in the kernel stack trace would still cause a panic(), but this is
	// probably even desired.
	if (!are_interrupts_enabled())
		kernelOnly = true;

	tracing_stack_trace* stackTrace
		= (tracing_stack_trace*)alloc_tracing_buffer(
			sizeof(tracing_stack_trace) + maxCount * sizeof(addr_t));

	if (stackTrace != NULL) {
		stackTrace->depth = arch_debug_get_stack_trace(
			stackTrace->return_addresses, maxCount, 0, skipFrames + 1,
			STACK_TRACE_KERNEL | (kernelOnly ? 0 : STACK_TRACE_USER));
	}

	return stackTrace;
#else
	return NULL;
#endif
}
void
X86PagingStructuresPAE::Delete()
{
	if (are_interrupts_enabled())
		delete this;
	else
		deferred_delete(this);
}
예제 #4
0
void irq_entry(regs *r)
{
   ASSERT(!are_interrupts_enabled());
   DEBUG_VALIDATE_STACK_PTR();
   ASSERT(get_curr_task() != NULL);
   DEBUG_check_not_same_interrupt_nested(regs_intnum(r));

   handle_irq(r);
}
예제 #5
0
bool in_nested_irq_num(int irq_num)
{
   ASSERT(!are_interrupts_enabled());

   for (int i = nested_interrupts_count - 2; i >= 0; i--)
      if (nested_interrupts[i] == irq_num)
         return true;

   return false;
}
예제 #6
0
void
release_spinlock(spinlock *lock)
{
#if DEBUG_SPINLOCK_LATENCIES
	test_latency(lock);
#endif

	if (sNumCPUs > 1) {
		if (are_interrupts_enabled())
			panic("release_spinlock: attempt to release lock %p with "
				"interrupts enabled\n", lock);
#if B_DEBUG_SPINLOCK_CONTENTION
		{
			int32 count = atomic_and(&lock->lock, 0) - 1;
			if (count < 0) {
				panic("release_spinlock: lock %p was already released\n", lock);
			} else {
				// add to the total count -- deal with carry manually
				if ((uint32)atomic_add(&lock->count_low, count) + count
						< (uint32)count) {
					atomic_add(&lock->count_high, 1);
				}
			}
		}
#else
		if (atomic_and((int32*)lock, 0) != 1)
			panic("release_spinlock: lock %p was already released\n", lock);
#endif
	} else {
#if DEBUG_SPINLOCKS
		if (are_interrupts_enabled()) {
			panic("release_spinlock: attempt to release lock %p with "
				"interrupts enabled\n", lock);
		}
		if (atomic_and((int32*)lock, 0) != 1)
			panic("release_spinlock: lock %p was already released\n", lock);
#endif
#if DEBUG_SPINLOCK_LATENCIES
		test_latency(lock);
#endif
	}
}
예제 #7
0
파일: lock.cpp 프로젝트: mariuz/haiku
status_t
_mutex_lock(mutex* lock, bool threadsLocked)
{
#if KDEBUG
	if (!gKernelStartup && !threadsLocked && !are_interrupts_enabled()) {
		panic("_mutex_lock(): called with interrupts disabled for lock %p",
			lock);
	}
#endif

	// lock only, if !threadsLocked
	InterruptsSpinLocker locker(gThreadSpinlock, false, !threadsLocked);

	// Might have been released after we decremented the count, but before
	// we acquired the spinlock.
#if KDEBUG
	if (lock->holder < 0) {
		lock->holder = thread_get_current_thread_id();
		return B_OK;
	} else if (lock->holder == thread_get_current_thread_id()) {
		panic("_mutex_lock(): double lock of %p by thread %ld", lock,
			lock->holder);
	} else if (lock->holder == 0)
		panic("_mutex_lock(): using unitialized lock %p", lock);
#else
	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
		lock->flags &= ~MUTEX_FLAG_RELEASED;
		return B_OK;
	}
#endif

	// enqueue in waiter list
	mutex_waiter waiter;
	waiter.thread = thread_get_current_thread();
	waiter.next = NULL;

	if (lock->waiters != NULL) {
		lock->waiters->last->next = &waiter;
	} else
		lock->waiters = &waiter;

	lock->waiters->last = &waiter;

	// block
	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
	status_t error = thread_block_locked(waiter.thread);

#if KDEBUG
	if (error == B_OK)
		lock->holder = waiter.thread->id;
#endif

	return error;
}
예제 #8
0
void
acquire_spinlock(spinlock* lock)
{
#if DEBUG_SPINLOCKS
	if (are_interrupts_enabled()) {
		panic("acquire_spinlock: attempt to acquire lock %p with interrupts "
			"enabled", lock);
	}
#endif

	if (sNumCPUs > 1) {
		int currentCPU = smp_get_current_cpu();
#if B_DEBUG_SPINLOCK_CONTENTION
		while (atomic_add(&lock->lock, 1) != 0)
			process_all_pending_ici(currentCPU);
#else
		while (1) {
			uint32 count = 0;
			while (*lock != 0) {
				if (++count == SPINLOCK_DEADLOCK_COUNT) {
					panic("acquire_spinlock(): Failed to acquire spinlock %p "
						"for a long time!", lock);
					count = 0;
				}

				process_all_pending_ici(currentCPU);
				PAUSE();
			}
			if (atomic_or((int32*)lock, 1) == 0)
				break;
		}

#	if DEBUG_SPINLOCKS
		push_lock_caller(arch_debug_get_caller(), lock);
#	endif
#endif
	} else {
#if DEBUG_SPINLOCKS
		int32 oldValue;
		oldValue = atomic_or((int32*)lock, 1);
		if (oldValue != 0) {
			panic("acquire_spinlock: attempt to acquire lock %p twice on "
				"non-SMP system (last caller: %p, value %" B_PRId32 ")", lock,
				find_lock_caller(lock), oldValue);
		}

		push_lock_caller(arch_debug_get_caller(), lock);
#endif
	}
#if DEBUG_SPINLOCK_LATENCIES
	push_latency(lock);
#endif
}
예제 #9
0
MBED_WEAK void hal_critical_section_exit(void)
{
#ifndef FEATURE_UVISOR
    // Interrupts must be disabled on invoking an exit from a critical section
    MBED_ASSERT(!are_interrupts_enabled());
#endif
    state_saved = false;

    // Restore the IRQs to their state prior to entering the critical section
    if (critical_interrupts_enabled == true) {
        __enable_irq();
    }
}
예제 #10
0
MBED_WEAK void hal_critical_section_enter(void)
{
    const bool interrupt_state = are_interrupts_enabled();

    __disable_irq();

    if (state_saved == true) {
        return;
    }

    critical_interrupts_enabled = interrupt_state;
    state_saved = true;
}
예제 #11
0
bool in_syscall(void)
{
   ASSERT(!are_interrupts_enabled());
   bool res = false;

   for (int i = nested_interrupts_count - 1; i >= 0; i--) {
      if (nested_interrupts[i] == SYSCALL_SOFT_INTERRUPT) {
         res = true;
         break;
      }
   }

   return res;
}
예제 #12
0
void soft_interrupt_entry(regs *r)
{
   const int int_num = regs_intnum(r);
   ASSERT(!are_interrupts_enabled());

   if (int_num == SYSCALL_SOFT_INTERRUPT)
      DEBUG_check_preemption_enabled_for_usermode();

   push_nested_interrupt(int_num);
   disable_preemption();

   if (int_num == SYSCALL_SOFT_INTERRUPT) {

      enable_interrupts_forced();
      {
         handle_syscall(r);
      }
      disable_interrupts_forced(); /* restore IF = 0 */

   } else {

      /*
       * General rule: fault handlers get control with interrupts disabled but
       * they are supposed to call enable_interrupts_forced() ASAP.
       */
      handle_fault(r);

      /* Faults are expected to return with interrupts disabled. */
      ASSERT(!are_interrupts_enabled());
   }

   enable_preemption();
   pop_nested_interrupt();

   if (int_num == SYSCALL_SOFT_INTERRUPT)
      DEBUG_check_preemption_enabled_for_usermode();
}
예제 #13
0
static void DEBUG_check_not_same_interrupt_nested(int int_num)
{
   ASSERT(!are_interrupts_enabled());

   for (int i = nested_interrupts_count - 1; i >= 0; i--)
      if (nested_interrupts[i] == int_num) {

         if (int_num == 32) {
            /* tollarate nested IRQ 0 for debug purposes */
            return;
         }

         panic("Same interrupt (%i) twice in nested_interrupts[]", int_num);
      }
}
예제 #14
0
void panic_dump_nested_interrupts(void)
{
   VERIFY(in_panic());
   ASSERT(!are_interrupts_enabled());

   char buf[128];
   int written = 0;

   written += snprintk(buf + written, sizeof(buf), "Interrupts: [ ");

   for (int i = nested_interrupts_count - 1; i >= 0; i--) {
      written += snprintk(buf + written, sizeof(buf) - (u32)written,
                          "%i ", nested_interrupts[i]);
   }

   /* written += */ snprintk(buf + written, sizeof(buf) - (u32) written, "]\n");
   printk("%s", buf);
}
예제 #15
0
파일: lock.cpp 프로젝트: mylegacy/haiku
status_t
recursive_lock_lock(recursive_lock *lock)
{
	thread_id thread = thread_get_current_thread_id();

	if (!gKernelStartup && !are_interrupts_enabled()) {
		panic("recursive_lock_lock: called with interrupts disabled for lock "
			"%p (\"%s\")\n", lock, lock->lock.name);
	}

	if (thread != RECURSIVE_LOCK_HOLDER(lock)) {
		mutex_lock(&lock->lock);
#if !KDEBUG
		lock->holder = thread;
#endif
	}

	lock->recursion++;
	return B_OK;
}
void
X86PagingStructures32Bit::Delete()
{
	// remove from global list
	InterruptsSpinLocker locker(sPagingStructuresListLock);
	sPagingStructuresList.Remove(this);
	locker.Unlock();

#if 0
	// this sanity check can be enabled when corruption due to
	// overwriting an active page directory is suspected
	uint32 activePageDirectory = x86_read_cr3();
	if (activePageDirectory == pgdir_phys)
		panic("deleting a still active page directory\n");
#endif

	if (are_interrupts_enabled())
		delete this;
	else
		deferred_delete(this);
}
예제 #17
0
static void
acquire_spinlock_nocheck(spinlock *lock)
{
#if DEBUG_SPINLOCKS
	if (are_interrupts_enabled()) {
		panic("acquire_spinlock_nocheck: attempt to acquire lock %p with "
			"interrupts enabled", lock);
	}
#endif

	if (sNumCPUs > 1) {
#if B_DEBUG_SPINLOCK_CONTENTION
		while (atomic_add(&lock->lock, 1) != 0) {
		}
#else
		while (1) {
			uint32 count = 0;
			while (*lock != 0) {
				if (++count == SPINLOCK_DEADLOCK_COUNT_NO_CHECK) {
					panic("acquire_spinlock(): Failed to acquire spinlock %p "
						"for a long time!", lock);
					count = 0;
				}

				PAUSE();
			}

			if (atomic_or((int32*)lock, 1) == 0)
				break;
		}
#endif
	} else {
#if DEBUG_SPINLOCKS
		if (atomic_or((int32*)lock, 1) != 0) {
			panic("acquire_spinlock_nocheck: attempt to acquire lock %p twice "
				"on non-SMP system\n", lock);
		}
#endif
	}
}
예제 #18
0
bool
try_acquire_spinlock(spinlock* lock)
{
#if DEBUG_SPINLOCKS
	if (are_interrupts_enabled()) {
		panic("try_acquire_spinlock: attempt to acquire lock %p with "
			"interrupts enabled", lock);
	}
#endif

#if B_DEBUG_SPINLOCK_CONTENTION
	if (atomic_add(&lock->lock, 1) != 0)
		return false;
#else
	if (atomic_or((int32*)lock, 1) != 0)
		return false;

#	if DEBUG_SPINLOCKS
	push_lock_caller(arch_debug_get_caller(), lock);
#	endif
#endif

	return true;
}
예제 #19
0
파일: sem.cpp 프로젝트: looncraz/haiku
status_t
release_sem_etc(sem_id id, int32 count, uint32 flags)
{
	int32 slot = id % sMaxSems;

	if (gKernelStartup)
		return B_OK;
	if (sSemsActive == false)
		return B_NO_MORE_SEMS;
	if (id < 0)
		return B_BAD_SEM_ID;
	if (count <= 0 && (flags & B_RELEASE_ALL) == 0)
		return B_BAD_VALUE;
#if KDEBUG
	if ((flags & B_DO_NOT_RESCHEDULE) == 0 && !are_interrupts_enabled()) {
		panic("release_sem_etc(): called with interrupts disabled and "
			"rescheduling allowed for sem_id %" B_PRId32, id);
	}
#endif

	InterruptsLocker _;
	SpinLocker semLocker(sSems[slot].lock);

	if (sSems[slot].id != id) {
		TRACE(("sem_release_etc: invalid sem_id %ld\n", id));
		return B_BAD_SEM_ID;
	}

	// ToDo: the B_CHECK_PERMISSION flag should be made private, as it
	//	doesn't have any use outside the kernel
	if ((flags & B_CHECK_PERMISSION) != 0
		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
		dprintf("thread %" B_PRId32 " tried to release kernel semaphore.\n",
			thread_get_current_thread_id());
		return B_NOT_ALLOWED;
	}

	KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count,
		flags);

	sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer;
#if DEBUG_SEM_LAST_ACQUIRER
	sSems[slot].u.used.last_releaser = thread_get_current_thread_id();
	sSems[slot].u.used.last_release_count = count;
#endif

	if (flags & B_RELEASE_ALL) {
		count = sSems[slot].u.used.net_count - sSems[slot].u.used.count;

		// is there anything to do for us at all?
		if (count == 0)
			return B_OK;

		// Don't release more than necessary -- there might be interrupted/
		// timed out threads in the queue.
		flags |= B_RELEASE_IF_WAITING_ONLY;
	}

	// Grab the scheduler lock, so thread_is_blocked() is reliable (due to
	// possible interruptions or timeouts, it wouldn't be otherwise).
	while (count > 0) {
		queued_thread* entry = sSems[slot].queue.Head();
		if (entry == NULL) {
			if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) {
				sSems[slot].u.used.count += count;
				sSems[slot].u.used.net_count += count;
			}
			break;
		}

		SpinLocker schedulerLock(entry->thread->scheduler_lock);
		if (thread_is_blocked(entry->thread)) {
			// The thread is still waiting. If its count is satisfied,
			// unblock it. Otherwise we can't unblock any other thread.
			if (entry->count > sSems[slot].u.used.net_count + count) {
				sSems[slot].u.used.count += count;
				sSems[slot].u.used.net_count += count;
				break;
			}

			thread_unblock_locked(entry->thread, B_OK);

			int delta = min_c(count, entry->count);
			sSems[slot].u.used.count += delta;
			sSems[slot].u.used.net_count += delta - entry->count;
			count -= delta;
		} else {
			// The thread is no longer waiting, but still queued, which
			// means acquiration failed and we can just remove it.
			sSems[slot].u.used.count += entry->count;
		}

		sSems[slot].queue.Remove(entry);
		entry->queued = false;
	}

	if (sSems[slot].u.used.count > 0)
		notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE);

	// If we've unblocked another thread reschedule, if we've not explicitly
	// been told not to.
	if ((flags & B_DO_NOT_RESCHEDULE) == 0) {
		semLocker.Unlock();

		SpinLocker _(thread_get_current_thread()->scheduler_lock);
		scheduler_reschedule_if_necessary_locked();
	}

	return B_OK;
}
예제 #20
0
파일: lock.cpp 프로젝트: mylegacy/haiku
status_t
_mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
{
#if KDEBUG
	if (!gKernelStartup && !are_interrupts_enabled()) {
		panic("_mutex_lock(): called with interrupts disabled for lock %p",
			lock);
	}
#endif

	InterruptsSpinLocker locker(lock->lock);

	// Might have been released after we decremented the count, but before
	// we acquired the spinlock.
#if KDEBUG
	if (lock->holder < 0) {
		lock->holder = thread_get_current_thread_id();
		return B_OK;
	} else if (lock->holder == thread_get_current_thread_id()) {
		panic("_mutex_lock(): double lock of %p by thread %" B_PRId32, lock,
			lock->holder);
	} else if (lock->holder == 0)
		panic("_mutex_lock(): using unitialized lock %p", lock);
#else
	if ((lock->flags & MUTEX_FLAG_RELEASED) != 0) {
		lock->flags &= ~MUTEX_FLAG_RELEASED;
		return B_OK;
	}
#endif

	// enqueue in waiter list
	mutex_waiter waiter;
	waiter.thread = thread_get_current_thread();
	waiter.next = NULL;

	if (lock->waiters != NULL) {
		lock->waiters->last->next = &waiter;
	} else
		lock->waiters = &waiter;

	lock->waiters->last = &waiter;

	// block
	thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_MUTEX, lock);
	locker.Unlock();

	status_t error = thread_block_with_timeout(timeoutFlags, timeout);

	if (error == B_OK) {
#if KDEBUG
		lock->holder = waiter.thread->id;
#endif
	} else {
		locker.Lock();

		// If the timeout occurred, we must remove our waiter structure from
		// the queue.
		mutex_waiter* previousWaiter = NULL;
		mutex_waiter* otherWaiter = lock->waiters;
		while (otherWaiter != NULL && otherWaiter != &waiter) {
			previousWaiter = otherWaiter;
			otherWaiter = otherWaiter->next;
		}
		if (otherWaiter == &waiter) {
			// the structure is still in the list -- dequeue
			if (&waiter == lock->waiters) {
				if (waiter.next != NULL)
					waiter.next->last = waiter.last;
				lock->waiters = waiter.next;
			} else {
				if (waiter.next == NULL)
					lock->waiters->last = previousWaiter;
				previousWaiter->next = waiter.next;
			}

#if !KDEBUG
			// we need to fix the lock count
			if (atomic_add(&lock->count, 1) == -1) {
				// This means we were the only thread waiting for the lock and
				// the lock owner has already called atomic_add() in
				// mutex_unlock(). That is we probably would get the lock very
				// soon (if the lock holder has a low priority, that might
				// actually take rather long, though), but the timeout already
				// occurred, so we don't try to wait. Just increment the ignore
				// unlock count.
				lock->ignore_unlock_count++;
			}
#endif
		}
	}

	return error;
}
예제 #21
0
status_t
switch_sem_etc(sem_id semToBeReleased, sem_id id, int32 count,
	uint32 flags, bigtime_t timeout)
{
	int slot = id % sMaxSems;
	int state;
	status_t status = B_OK;

	if (gKernelStartup)
		return B_OK;
	if (sSemsActive == false)
		return B_NO_MORE_SEMS;

	if (!are_interrupts_enabled()) {
		panic("switch_sem_etc: called with interrupts disabled for sem %ld\n",
			id);
	}

	if (id < 0)
		return B_BAD_SEM_ID;
	if (count <= 0
		|| (flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) {
		return B_BAD_VALUE;
	}

	state = disable_interrupts();
	GRAB_SEM_LOCK(sSems[slot]);

	if (sSems[slot].id != id) {
		TRACE(("switch_sem_etc: bad sem %ld\n", id));
		status = B_BAD_SEM_ID;
		goto err;
	}

	// TODO: the B_CHECK_PERMISSION flag should be made private, as it
	//	doesn't have any use outside the kernel
	if ((flags & B_CHECK_PERMISSION) != 0
		&& sSems[slot].u.used.owner == team_get_kernel_team_id()) {
		dprintf("thread %ld tried to acquire kernel semaphore %ld.\n",
			thread_get_current_thread_id(), id);
		status = B_NOT_ALLOWED;
		goto err;
	}

	if (sSems[slot].u.used.count - count < 0) {
		if ((flags & B_RELATIVE_TIMEOUT) != 0 && timeout <= 0) {
			// immediate timeout
			status = B_WOULD_BLOCK;
			goto err;
		} else if ((flags & B_ABSOLUTE_TIMEOUT) != 0 && timeout < 0) {
			// absolute negative timeout
			status = B_TIMED_OUT;
			goto err;
		}
	}

	KTRACE("switch_sem_etc(semToBeReleased: %ld, sem: %ld, count: %ld, "
		"flags: 0x%lx, timeout: %lld)", semToBeReleased, id, count, flags,
		timeout);

	if ((sSems[slot].u.used.count -= count) < 0) {
		// we need to block
		Thread *thread = thread_get_current_thread();

		TRACE(("switch_sem_etc(id = %ld): block name = %s, thread = %p,"
			" name = %s\n", id, sSems[slot].u.used.name, thread, thread->name));

		// do a quick check to see if the thread has any pending signals
		// this should catch most of the cases where the thread had a signal
		SpinLocker schedulerLocker(gSchedulerLock);
		if (thread_is_interrupted(thread, flags)) {
			schedulerLocker.Unlock();
			sSems[slot].u.used.count += count;
			status = B_INTERRUPTED;
				// the other semaphore will be released later
			goto err;
		}

		if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) == 0)
			timeout = B_INFINITE_TIMEOUT;

		// enqueue in the semaphore queue and get ready to wait
		queued_thread queueEntry(thread, count);
		sSems[slot].queue.Add(&queueEntry);
		queueEntry.queued = true;

		thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SEMAPHORE,
			(void*)(addr_t)id);

		RELEASE_SEM_LOCK(sSems[slot]);

		// release the other semaphore, if any
		if (semToBeReleased >= 0) {
			release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
			semToBeReleased = -1;
		}

		schedulerLocker.Lock();

		status_t acquireStatus = timeout == B_INFINITE_TIMEOUT
			? thread_block_locked(thread)
			: thread_block_with_timeout_locked(flags, timeout);

		schedulerLocker.Unlock();
		GRAB_SEM_LOCK(sSems[slot]);

		// If we're still queued, this means the acquiration failed, and we
		// need to remove our entry and (potentially) wake up other threads.
		if (queueEntry.queued)
			remove_thread_from_sem(&queueEntry, &sSems[slot]);

		if (acquireStatus >= B_OK) {
			sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
#if DEBUG_SEM_LAST_ACQUIRER
			sSems[slot].u.used.last_acquire_count = count;
#endif
		}

		RELEASE_SEM_LOCK(sSems[slot]);
		restore_interrupts(state);

		TRACE(("switch_sem_etc(sem %ld): exit block name %s, "
			"thread %ld (%s)\n", id, sSems[slot].u.used.name, thread->id,
			thread->name));
		KTRACE("switch_sem_etc() done: 0x%lx", acquireStatus);
		return acquireStatus;
	} else {
		sSems[slot].u.used.net_count -= count;
		sSems[slot].u.used.last_acquirer = thread_get_current_thread_id();
#if DEBUG_SEM_LAST_ACQUIRER
		sSems[slot].u.used.last_acquire_count = count;
#endif
	}

err:
	RELEASE_SEM_LOCK(sSems[slot]);
	restore_interrupts(state);

	if (status == B_INTERRUPTED && semToBeReleased >= B_OK) {
		// depending on when we were interrupted, we need to still
		// release the semaphore to always leave in a consistent
		// state
		release_sem_etc(semToBeReleased, 1, B_DO_NOT_RESCHEDULE);
	}

#if 0
	if (status == B_NOT_ALLOWED)
	_user_debugger("Thread tried to acquire kernel semaphore.");
#endif

	KTRACE("switch_sem_etc() done: 0x%lx", status);

	return status;
}
예제 #22
0
int get_nested_interrupts_count(void)
{
   ASSERT(!are_interrupts_enabled());
   return nested_interrupts_count;
}