static status_t delete_sem_internal(sem_id id, bool checkPermission) { if (sSemsActive == false) return B_NO_MORE_SEMS; if (id < 0) return B_BAD_SEM_ID; int32 slot = id % sMaxSems; cpu_status state = disable_interrupts(); GRAB_SEM_LIST_LOCK(); GRAB_SEM_LOCK(sSems[slot]); if (sSems[slot].id != id) { RELEASE_SEM_LOCK(sSems[slot]); RELEASE_SEM_LIST_LOCK(); restore_interrupts(state); TRACE(("delete_sem: invalid sem_id %ld\n", id)); return B_BAD_SEM_ID; } if (checkPermission && sSems[slot].u.used.owner == team_get_kernel_team_id()) { RELEASE_SEM_LOCK(sSems[slot]); RELEASE_SEM_LIST_LOCK(); restore_interrupts(state); dprintf("thread %ld tried to delete kernel semaphore %ld.\n", thread_get_current_thread_id(), id); return B_NOT_ALLOWED; } if (sSems[slot].u.used.owner >= 0) { list_remove_link(&sSems[slot].u.used.team_link); sSems[slot].u.used.owner = -1; } else panic("sem %ld has no owner", id); RELEASE_SEM_LIST_LOCK(); char* name; uninit_sem_locked(sSems[slot], &name); SpinLocker schedulerLocker(gSchedulerLock); scheduler_reschedule_if_necessary_locked(); schedulerLocker.Unlock(); restore_interrupts(state); free(name); return B_OK; }
status_t release_sem_etc(sem_id id, int32 count, uint32 flags) { int32 slot = id % sMaxSems; if (gKernelStartup) return B_OK; if (sSemsActive == false) return B_NO_MORE_SEMS; if (id < 0) return B_BAD_SEM_ID; if (count <= 0 && (flags & B_RELEASE_ALL) == 0) return B_BAD_VALUE; InterruptsLocker _; SpinLocker semLocker(sSems[slot].lock); if (sSems[slot].id != id) { TRACE(("sem_release_etc: invalid sem_id %ld\n", id)); return B_BAD_SEM_ID; } // ToDo: the B_CHECK_PERMISSION flag should be made private, as it // doesn't have any use outside the kernel if ((flags & B_CHECK_PERMISSION) != 0 && sSems[slot].u.used.owner == team_get_kernel_team_id()) { dprintf("thread %ld tried to release kernel semaphore.\n", thread_get_current_thread_id()); return B_NOT_ALLOWED; } KTRACE("release_sem_etc(sem: %ld, count: %ld, flags: 0x%lx)", id, count, flags); sSems[slot].u.used.last_acquirer = -sSems[slot].u.used.last_acquirer; #if DEBUG_SEM_LAST_ACQUIRER sSems[slot].u.used.last_releaser = thread_get_current_thread_id(); sSems[slot].u.used.last_release_count = count; #endif if (flags & B_RELEASE_ALL) { count = sSems[slot].u.used.net_count - sSems[slot].u.used.count; // is there anything to do for us at all? if (count == 0) return B_OK; // Don't release more than necessary -- there might be interrupted/ // timed out threads in the queue. flags |= B_RELEASE_IF_WAITING_ONLY; } // Grab the scheduler lock, so thread_is_blocked() is reliable (due to // possible interruptions or timeouts, it wouldn't be otherwise). SpinLocker schedulerLocker(gSchedulerLock); while (count > 0) { queued_thread* entry = sSems[slot].queue.Head(); if (entry == NULL) { if ((flags & B_RELEASE_IF_WAITING_ONLY) == 0) { sSems[slot].u.used.count += count; sSems[slot].u.used.net_count += count; } break; } if (thread_is_blocked(entry->thread)) { // The thread is still waiting. If its count is satisfied, // unblock it. Otherwise we can't unblock any other thread. if (entry->count > sSems[slot].u.used.net_count + count) { sSems[slot].u.used.count += count; sSems[slot].u.used.net_count += count; break; } thread_unblock_locked(entry->thread, B_OK); int delta = min_c(count, entry->count); sSems[slot].u.used.count += delta; sSems[slot].u.used.net_count += delta - entry->count; count -= delta; } else { // The thread is no longer waiting, but still queued, which // means acquiration failed and we can just remove it. sSems[slot].u.used.count += entry->count; } sSems[slot].queue.Remove(entry); entry->queued = false; } schedulerLocker.Unlock(); if (sSems[slot].u.used.count > 0) notify_sem_select_events(&sSems[slot], B_EVENT_ACQUIRE_SEMAPHORE); // If we've unblocked another thread reschedule, if we've not explicitly // been told not to. if ((flags & B_DO_NOT_RESCHEDULE) == 0) { semLocker.Unlock(); schedulerLocker.Lock(); scheduler_reschedule_if_necessary_locked(); } return B_OK; }
int send_signal_etc(pid_t id, uint signal, uint32 flags) { status_t status = B_BAD_THREAD_ID; struct thread *thread; cpu_status state = 0; if (signal < 0 || signal > MAX_SIGNO) return B_BAD_VALUE; T(SendSignal(id, signal, flags)); if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0) state = disable_interrupts(); if (id > 0) { // send a signal to the specified thread GRAB_THREAD_LOCK(); thread = thread_get_thread_struct_locked(id); if (thread != NULL) status = deliver_signal(thread, signal, flags); } else { // send a signal to the specified process group // (the absolute value of the id) struct process_group *group; // TODO: handle -1 correctly if (id == 0 || id == -1) { // send a signal to the current team id = thread_get_current_thread()->team->id; } else id = -id; if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0) GRAB_TEAM_LOCK(); group = team_get_process_group_locked(NULL, id); if (group != NULL) { struct team *team, *next; // Send a signal to all teams in this process group for (team = group->teams; team != NULL; team = next) { next = team->group_next; id = team->id; GRAB_THREAD_LOCK(); thread = thread_get_thread_struct_locked(id); if (thread != NULL) { // we don't stop because of an error sending the signal; we // rather want to send as much signals as possible status = deliver_signal(thread, signal, flags); } RELEASE_THREAD_LOCK(); } } if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0) RELEASE_TEAM_LOCK(); GRAB_THREAD_LOCK(); } if ((flags & (B_DO_NOT_RESCHEDULE | SIGNAL_FLAG_TEAMS_LOCKED)) == 0) scheduler_reschedule_if_necessary_locked(); RELEASE_THREAD_LOCK(); if ((flags & SIGNAL_FLAG_TEAMS_LOCKED) == 0) restore_interrupts(state); return status; }