static void notify_loading_app(status_t result, bool suspend) { Team* team = thread_get_current_thread()->team; TeamLocker teamLocker(team); if (team->loading_info) { // there's indeed someone waiting struct team_loading_info* loadingInfo = team->loading_info; team->loading_info = NULL; loadingInfo->result = result; loadingInfo->done = true; // we're done with the team stuff, get the scheduler lock instead teamLocker.Unlock(); InterruptsSpinLocker schedulerLocker(gSchedulerLock); // wake up the waiting thread if (loadingInfo->thread->state == B_THREAD_SUSPENDED) scheduler_enqueue_in_run_queue(loadingInfo->thread); // suspend ourselves, if desired if (suspend) { thread_get_current_thread()->next_state = B_THREAD_SUSPENDED; scheduler_reschedule(); } } }
status_t user_timer_get_clock(clockid_t clockID, bigtime_t& _time) { switch (clockID) { case CLOCK_MONOTONIC: _time = system_time(); return B_OK; case CLOCK_REALTIME: _time = real_time_clock_usecs(); return B_OK; case CLOCK_THREAD_CPUTIME_ID: { Thread* thread = thread_get_current_thread(); InterruptsSpinLocker timeLocker(thread->time_lock); _time = thread->CPUTime(false); return B_OK; } case CLOCK_PROCESS_USER_CPUTIME_ID: { Team* team = thread_get_current_thread()->team; InterruptsSpinLocker timeLocker(team->time_lock); _time = team->UserCPUTime(); return B_OK; } case CLOCK_PROCESS_CPUTIME_ID: default: { // get the ID of the target team (or the respective placeholder) team_id teamID; if (clockID == CLOCK_PROCESS_CPUTIME_ID) { teamID = B_CURRENT_TEAM; } else { if (clockID < 0) return B_BAD_VALUE; if (clockID == team_get_kernel_team_id()) return B_NOT_ALLOWED; teamID = clockID; } // get the team Team* team = Team::Get(teamID); if (team == NULL) return B_BAD_VALUE; BReference<Team> teamReference(team, true); // get the time InterruptsSpinLocker timeLocker(team->time_lock); _time = team->CPUTime(false); return B_OK; } } }
semaphore_t *semaphore_create(int value) { interrupt_status_t intr_status; static int next = 0; int i; int sem_id; KERNEL_ASSERT(value >= 0); intr_status = _interrupt_disable(); spinlock_acquire(&semaphore_table_slock); /* Find free semaphore from semaphore table */ for(i = 0; i < CONFIG_MAX_SEMAPHORES; i++) { sem_id = next; next = (next + 1) % CONFIG_MAX_SEMAPHORES; if (semaphore_table[sem_id].creator == -1) { semaphore_table[sem_id].creator = thread_get_current_thread(); break; } } spinlock_release(&semaphore_table_slock); _interrupt_set_state(intr_status); if (i == CONFIG_MAX_SEMAPHORES) { /* semaphore table does not have any free semaphores, creation fails */ return NULL; } semaphore_table[sem_id].value = value; spinlock_reset(&semaphore_table[sem_id].slock); return &semaphore_table[sem_id]; }
status_t M68KVMTranslationMap040::ClearFlags(addr_t va, uint32 flags) { return ENOSYS; #if 0 int index = VADDR_TO_PDENT(va); page_directory_entry* pd = fPagingStructures->pgdir_virt; if ((pd[index] & M68K_PDE_PRESENT) == 0) { // no pagetable here return B_OK; } uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? M68K_PTE_DIRTY : 0) | ((flags & PAGE_ACCESSED) ? M68K_PTE_ACCESSED : 0); struct thread* thread = thread_get_current_thread(); ThreadCPUPinner pinner(thread); page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt( pd[index] & M68K_PDE_ADDRESS_MASK); index = VADDR_TO_PTENT(va); // clear out the flags we've been requested to clear page_table_entry oldEntry = M68KPagingMethod040::ClearPageTableEntryFlags(&pt[index], flagsToClear); pinner.Unlock(); if ((oldEntry & flagsToClear) != 0) InvalidatePage(va); return B_OK; #endif }
status_t M68KVMTranslationMap040::Query(addr_t va, phys_addr_t *_physical, uint32 *_flags) { // default the flags to not present *_flags = 0; *_physical = 0; TRACE("040::Query(0x%lx,)\n", va); int index = VADDR_TO_PRENT(va); page_root_entry *pr = fPagingStructures->pgroot_virt; if (PRE_TYPE(pr[index]) != DT_ROOT) { // no pagetable here return B_OK; } Thread* thread = thread_get_current_thread(); ThreadCPUPinner pinner(thread); page_directory_entry* pd = (page_directory_entry*)MapperGetPageTableAt( pr[index] & M68K_PDE_ADDRESS_MASK); index = VADDR_TO_PDENT(va); if (PDE_TYPE(pd[index]) != DT_DIR) { // no pagetable here return B_OK; } page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt( pd[index] & M68K_PDE_ADDRESS_MASK); index = VADDR_TO_PTENT(va); if (PTE_TYPE(pt[index]) == DT_INDIRECT) { pt = (page_table_entry*)MapperGetPageTableAt( pt[index] & M68K_PIE_ADDRESS_MASK); index = 0; } page_table_entry entry = pt[index]; *_physical = entry & M68K_PTE_ADDRESS_MASK; // read in the page state flags if ((entry & M68K_PTE_SUPERVISOR) == 0) { *_flags |= ((entry & M68K_PTE_READONLY) == 0 ? B_WRITE_AREA : 0) | B_READ_AREA; } *_flags |= ((entry & M68K_PTE_READONLY) == 0 ? B_KERNEL_WRITE_AREA : 0) | B_KERNEL_READ_AREA | ((entry & M68K_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0) | ((entry & M68K_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0) | ((PTE_TYPE(entry) == DT_PAGE) ? PAGE_PRESENT : 0); pinner.Unlock(); TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va); return B_OK; }
/*! Wait for the specified signals, and return the signal retrieved in \a _signal. */ int sigwait(const sigset_t *set, int *_signal) { struct thread *thread = thread_get_current_thread(); while (!has_signals_pending(thread)) { thread_prepare_to_block(thread, B_CAN_INTERRUPT, THREAD_BLOCK_TYPE_SIGNAL, NULL); thread_block(); } int signalsPending = atomic_get(&thread->sig_pending) & *set; update_current_thread_signals_flag(); if (signalsPending) { // select the lowest pending signal to return in _signal for (int signal = 1; signal < NSIG; signal++) { if ((SIGNAL_TO_MASK(signal) & signalsPending) != 0) { *_signal = signal; return B_OK; } } } return B_INTERRUPTED; }
/*! Sets the alarm timer for the current thread. The timer fires at the specified time in the future, periodically or just once, as determined by \a mode. \return the time left until a previous set alarm would have fired. */ bigtime_t set_alarm(bigtime_t time, uint32 mode) { struct thread *thread = thread_get_current_thread(); bigtime_t remainingTime = 0; ASSERT(B_ONE_SHOT_RELATIVE_ALARM == B_ONE_SHOT_RELATIVE_TIMER); // just to be sure no one changes the headers some day TRACE(("set_alarm: thread = %p\n", thread)); if (thread->alarm.period) remainingTime = (bigtime_t)thread->alarm.schedule_time - system_time(); cancel_timer(&thread->alarm); if (time != B_INFINITE_TIMEOUT) add_timer(&thread->alarm, &alarm_event, time, mode); else { // this marks the alarm as canceled (for returning the remaining time) thread->alarm.period = 0; } return remainingTime; }
int sigprocmask(int how, const sigset_t *set, sigset_t *oldSet) { struct thread *thread = thread_get_current_thread(); sigset_t oldMask = atomic_get(&thread->sig_block_mask); if (set != NULL) { T(SigProcMask(how, *set)); switch (how) { case SIG_BLOCK: atomic_or(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS); break; case SIG_UNBLOCK: atomic_and(&thread->sig_block_mask, ~*set); break; case SIG_SETMASK: atomic_set(&thread->sig_block_mask, *set & BLOCKABLE_SIGNALS); break; default: return B_BAD_VALUE; } update_current_thread_signals_flag(); } if (oldSet != NULL) *oldSet = oldMask; return B_OK; }
void update_current_thread_signals_flag() { InterruptsSpinLocker locker(gThreadSpinlock); update_thread_signals_flag(thread_get_current_thread()); }
status_t X86VMTranslationMap64Bit::ClearFlags(addr_t address, uint32 flags) { TRACE("X86VMTranslationMap64Bit::ClearFlags(%#" B_PRIxADDR ", %#" B_PRIx32 ")\n", address, flags); ThreadCPUPinner pinner(thread_get_current_thread()); uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress( fPagingStructures->VirtualPML4(), address, fIsKernelMap, false, NULL, fPageMapper, fMapCount); if (entry == NULL) return B_OK; uint64 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_64_PTE_DIRTY : 0) | ((flags & PAGE_ACCESSED) ? X86_64_PTE_ACCESSED : 0); uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags(entry, flagsToClear); if ((oldEntry & flagsToClear) != 0) InvalidatePage(address); return B_OK; }
IFrameScope(struct iframe *iframe) { fThread = thread_get_current_thread(); if (fThread) arm_push_iframe(&fThread->arch_info.iframes, iframe); else arm_push_iframe(&gBootFrameStack, iframe); }
static void scheduler_reschedule_no_op(void) { struct thread* thread = thread_get_current_thread(); if (thread != NULL && thread->next_state != B_THREAD_READY) panic("scheduler_reschedule_no_op() called in non-ready thread"); }
status_t _user_system_profiler_next_buffer(size_t bytesRead, uint64* _droppedEvents) { if (_droppedEvents != NULL && !IS_USER_ADDRESS(_droppedEvents)) return B_BAD_ADDRESS; team_id team = thread_get_current_thread()->team->id; InterruptsSpinLocker locker(sProfilerLock); if (sProfiler == NULL || sProfiler->TeamID() != team) return B_BAD_VALUE; // get a reference to the profiler SystemProfiler* profiler = sProfiler; BReference<SystemProfiler> reference(profiler); locker.Unlock(); uint64 droppedEvents; status_t error = profiler->NextBuffer(bytesRead, _droppedEvents != NULL ? &droppedEvents : NULL); if (error == B_OK && _droppedEvents != NULL) user_memcpy(_droppedEvents, &droppedEvents, sizeof(droppedEvents)); return error; }
static void notify_loading_app(status_t result, bool suspend) { Team* team = thread_get_current_thread()->team; TeamLocker teamLocker(team); if (team->loading_info) { // there's indeed someone waiting struct team_loading_info* loadingInfo = team->loading_info; team->loading_info = NULL; loadingInfo->result = result; // we're done with the team stuff, get the scheduler lock instead teamLocker.Unlock(); thread_prepare_suspend(); // wake up the waiting thread loadingInfo->condition.NotifyAll(); // suspend ourselves, if desired if (suspend) thread_suspend(true); } }
void SystemProfiler::_DoSample() { Thread* thread = thread_get_current_thread(); int cpu = thread->cpu->cpu_num; CPUProfileData& cpuData = fCPUData[cpu]; // get the samples int32 count = arch_debug_get_stack_trace(cpuData.buffer, fStackDepth, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER); InterruptsSpinLocker locker(fLock); system_profiler_samples* event = (system_profiler_samples*) _AllocateBuffer(sizeof(system_profiler_samples) + count * sizeof(addr_t), B_SYSTEM_PROFILER_SAMPLES, cpu, count); if (event == NULL) return; event->thread = thread->id; memcpy(event->samples, cpuData.buffer, count * sizeof(addr_t)); fHeader->size = fBufferSize; }
static status_t rw_lock_wait(rw_lock* lock, bool writer, InterruptsSpinLocker& locker) { // enqueue in waiter list rw_lock_waiter waiter; waiter.thread = thread_get_current_thread(); waiter.next = NULL; waiter.writer = writer; if (lock->waiters != NULL) lock->waiters->last->next = &waiter; else lock->waiters = &waiter; lock->waiters->last = &waiter; // block thread_prepare_to_block(waiter.thread, 0, THREAD_BLOCK_TYPE_RW_LOCK, lock); locker.Unlock(); status_t result = thread_block(); locker.Lock(); return result; }
static int dump_images_list(int argc, char **argv) { struct image *image = NULL; Team *team; if (argc > 1) { team_id id = strtol(argv[1], NULL, 0); team = team_get_team_struct_locked(id); if (team == NULL) { kprintf("No team with ID %ld found\n", id); return 1; } } else team = thread_get_current_thread()->team; kprintf("Registered images of team %ld\n", team->id); kprintf(" ID text size data size name\n"); while ((image = (struct image*)list_get_next_item(&team->image_list, image)) != NULL) { image_info *info = &image->info; kprintf("%6ld %p %-7ld %p %-7ld %s\n", info->id, info->text, info->text_size, info->data, info->data_size, info->name); } return 0; }
status_t IORequest::Init(off_t offset, size_t firstVecOffset, const iovec* vecs, size_t count, size_t length, bool write, uint32 flags) { fBuffer = IOBuffer::Create(count, (flags & B_VIP_IO_REQUEST) != 0); if (fBuffer == NULL) return B_NO_MEMORY; fBuffer->SetVecs(firstVecOffset, vecs, count, length, flags); fOwner = NULL; fOffset = offset; fLength = length; fRelativeParentOffset = 0; fTransferSize = 0; fFlags = flags; struct thread* thread = thread_get_current_thread(); fTeam = thread->team->id; fThread = thread->id; fIsWrite = write; fPartialTransfer = false; fSuppressChildNotifications = false; // these are for iteration fVecIndex = 0; fVecOffset = 0; fRemainingBytes = length; fPendingChildren = 0; fStatus = 1; return B_OK; }
status_t ARMVMTranslationMap32Bit::ClearFlags(addr_t va, uint32 flags) { int index = VADDR_TO_PDENT(va); page_directory_entry* pd = fPagingStructures->pgdir_virt; if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) { // no pagetable here return B_OK; } #if 0 //IRA uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? ARM_PTE_DIRTY : 0) | ((flags & PAGE_ACCESSED) ? ARM_PTE_ACCESSED : 0); #else uint32 flagsToClear = 0; #endif Thread* thread = thread_get_current_thread(); ThreadCPUPinner pinner(thread); page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( pd[index] & ARM_PDE_ADDRESS_MASK); index = VADDR_TO_PTENT(va); // clear out the flags we've been requested to clear page_table_entry oldEntry = ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index], flagsToClear); pinner.Unlock(); //XXX IRA if ((oldEntry & flagsToClear) != 0) InvalidatePage(va); return B_OK; }
status_t X86VMTranslationMap64Bit::Map(addr_t virtualAddress, phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType, vm_page_reservation* reservation) { TRACE("X86VMTranslationMap64Bit::Map(%#" B_PRIxADDR ", %#" B_PRIxPHYSADDR ")\n", virtualAddress, physicalAddress); ThreadCPUPinner pinner(thread_get_current_thread()); // Look up the page table for the virtual address, allocating new tables // if required. Shouldn't fail. uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress( fPagingStructures->VirtualPML4(), virtualAddress, fIsKernelMap, true, reservation, fPageMapper, fMapCount); ASSERT(entry != NULL); // The entry should not already exist. ASSERT_PRINT((*entry & X86_64_PTE_PRESENT) == 0, "virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64, virtualAddress, *entry); // Fill in the table entry. X86PagingMethod64Bit::PutPageTableEntryInTable(entry, physicalAddress, attributes, memoryType, fIsKernelMap); // Note: We don't need to invalidate the TLB for this address, as previously // the entry was not present and the TLB doesn't cache those entries. fMapCount++; return 0; }
static int dump_images_list(int argc, char **argv) { struct image *image = NULL; Team *team; if (argc > 1) { team_id id = strtol(argv[1], NULL, 0); team = team_get_team_struct_locked(id); if (team == NULL) { kprintf("No team with ID %" B_PRId32 " found\n", id); return 1; } } else team = thread_get_current_thread()->team; kprintf("Registered images of team %" B_PRId32 "\n", team->id); kprintf(" ID %-*s size %-*s size name\n", B_PRINTF_POINTER_WIDTH, "text", B_PRINTF_POINTER_WIDTH, "data"); while ((image = (struct image*)list_get_next_item(&team->image_list, image)) != NULL) { image_info *info = &image->info.basic_info; kprintf("%6" B_PRId32 " %p %-7" B_PRId32 " %p %-7" B_PRId32 " %s\n", info->id, info->text, info->text_size, info->data, info->data_size, info->name); } return 0; }
ReadRequest(file_cookie* cookie) : fThread(thread_get_current_thread()), fCookie(cookie), fNotified(true) { B_INITIALIZE_SPINLOCK(&fLock); }
SigProcMask(int how, sigset_t mask) : fHow(how), fMask(mask), fOldMask(thread_get_current_thread()->sig_block_mask) { Initialized(); }
void M68KVMTranslationMap::Flush() { if (fInvalidPagesCount <= 0) return; Thread* thread = thread_get_current_thread(); thread_pin_to_current_cpu(thread); if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) { // invalidate all pages TRACE("flush_tmap: %d pages to invalidate, invalidate all\n", fInvalidPagesCount); if (fIsKernelMap) { arch_cpu_global_TLB_invalidate(); smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC); } else { cpu_status state = disable_interrupts(); arch_cpu_user_TLB_invalidate(); restore_interrupts(state); int cpu = smp_get_current_cpu(); CPUSet cpuMask = PagingStructures()->active_on_cpus; cpuMask.ClearBit(cpu); if (!cpuMask.IsEmpty()) { smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC); } } } else { TRACE("flush_tmap: %d pages to invalidate, invalidate list\n", fInvalidPagesCount); arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount); if (fIsKernelMap) { smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST, (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL, SMP_MSG_FLAG_SYNC); } else { int cpu = smp_get_current_cpu(); CPUSet cpuMask = PagingStructures()->active_on_cpus; cpuMask.ClearBit(cpu); if (!cpuMask.IsEmpty()) { smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST, (addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL, SMP_MSG_FLAG_SYNC); } } } fInvalidPagesCount = 0; thread_unpin_from_current_cpu(thread); }
void x86_invalid_exception(iframe* frame) { Thread* thread = thread_get_current_thread(); char name[32]; panic("unhandled trap 0x%lx (%s) at ip 0x%lx, thread %" B_PRId32 "!\n", frame->vector, exception_name(frame->vector, name, sizeof(name)), frame->ip, thread ? thread->id : -1); }
status_t SystemProfiler::NextBuffer(size_t bytesRead, uint64* _droppedEvents) { InterruptsSpinLocker locker(fLock); if (fWaitingProfilerThread != NULL || !fProfilingActive || bytesRead > fBufferSize) { return B_BAD_VALUE; } fBufferSize -= bytesRead; fBufferStart += bytesRead; if (fBufferStart > fBufferCapacity) fBufferStart -= fBufferCapacity; fHeader->size = fBufferSize; fHeader->start = fBufferStart; // already enough data in the buffer to return? if (fBufferSize > fBufferCapacity / 2) return B_OK; // Wait until the buffer gets too full or an error or a timeout occurs. while (true) { Thread* thread = thread_get_current_thread(); fWaitingProfilerThread = thread; thread_prepare_to_block(thread, B_CAN_INTERRUPT, THREAD_BLOCK_TYPE_OTHER, "system profiler buffer"); locker.Unlock(); status_t error = thread_block_with_timeout(B_RELATIVE_TIMEOUT, 1000000); locker.Lock(); if (error == B_OK) { // the caller has unset fWaitingProfilerThread for us break; } fWaitingProfilerThread = NULL; if (error != B_TIMED_OUT) return error; // just the timeout -- return, if the buffer is not empty if (fBufferSize > 0) break; } if (_droppedEvents != NULL) { *_droppedEvents = fDroppedEvents; fDroppedEvents = 0; } return B_OK; }
status_t X86VMTranslationMap64Bit::UnmapPage(VMArea* area, addr_t address, bool updatePageQueue) { ASSERT(address % B_PAGE_SIZE == 0); TRACE("X86VMTranslationMap64Bit::UnmapPage(%#" B_PRIxADDR ")\n", address); ThreadCPUPinner pinner(thread_get_current_thread()); // Look up the page table for the virtual address. uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress( fPagingStructures->VirtualPML4(), address, fIsKernelMap, false, NULL, fPageMapper, fMapCount); if (entry == NULL) return B_ENTRY_NOT_FOUND; RecursiveLocker locker(fLock); uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntry(entry); pinner.Unlock(); if ((oldEntry & X86_64_PTE_PRESENT) == 0) return B_ENTRY_NOT_FOUND; fMapCount--; if ((oldEntry & X86_64_PTE_ACCESSED) != 0) { // Note, that we only need to invalidate the address, if the // accessed flags was set, since only then the entry could have been // in any TLB. InvalidatePage(address); Flush(); // NOTE: Between clearing the page table entry and Flush() other // processors (actually even this processor with another thread of the // same team) could still access the page in question via their cached // entry. We can obviously lose a modified flag in this case, with the // effect that the page looks unmodified (and might thus be recycled), // but is actually modified. // In most cases this is harmless, but for vm_remove_all_page_mappings() // this is actually a problem. // Interestingly FreeBSD seems to ignore this problem as well // (cf. pmap_remove_all()), unless I've missed something. } locker.Detach(); // PageUnmapped() will unlock for us PageUnmapped(area, (oldEntry & X86_64_PTE_ADDRESS_MASK) / B_PAGE_SIZE, (oldEntry & X86_64_PTE_ACCESSED) != 0, (oldEntry & X86_64_PTE_DIRTY) != 0, updatePageQueue); return B_OK; }
void x86_64_push_iframe(struct iframe *frame) { struct thread *t = thread_get_current_thread(); ASSERT(t->arch_info.iframe_ptr < IFRAME_TRACE_DEPTH); // dprintf("x86_64_push_iframe: frame %p, depth %d\n", frame, t->arch_info.iframe_ptr); t->arch_info.iframes[t->arch_info.iframe_ptr++] = frame; }
int has_signals_pending(void *_thread) { struct thread *thread = (struct thread *)_thread; if (thread == NULL) thread = thread_get_current_thread(); return atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask); }
AbstractTraceEntry::AbstractTraceEntry() { struct thread* thread = thread_get_current_thread(); if (thread != NULL) { fThread = thread->id; if (thread->team) fTeam = thread->team->id; } fTime = system_time(); }