/** Invalidate all TLB entries belonging to specified address space. * * @param asid Address space identifier. * */ void tlb_invalidate_asid(asid_t asid) { ASSERT(asid != ASID_INVALID); entry_hi_t hi_save; hi_save.value = cp0_entry_hi_read(); ipl_t ipl = interrupts_disable(); for (unsigned int i = 0; i < TLB_ENTRY_COUNT; i++) { cp0_index_write(i); tlbr(); entry_hi_t hi; hi.value = cp0_entry_hi_read(); if (hi.asid == asid) { entry_lo_t lo0; lo0.value = cp0_entry_lo0_read(); entry_lo_t lo1; lo1.value = cp0_entry_lo1_read(); lo0.v = 0; lo1.v = 0; cp0_entry_lo0_write(lo0.value); cp0_entry_lo1_write(lo1.value); tlbwi(); } } interrupts_restore(ipl); cp0_entry_hi_write(hi_save.value); }
/** Invalidate all not wired TLB entries. */ void tlb_invalidate_all(void) { entry_hi_t hi_save; hi_save.value = cp0_entry_hi_read(); ipl_t ipl = interrupts_disable(); for (unsigned int i = TLB_WIRED; i < TLB_ENTRY_COUNT; i++) { cp0_index_write(i); tlbr(); entry_lo_t lo0; lo0.value = cp0_entry_lo0_read(); entry_lo_t lo1; lo1.value = cp0_entry_lo1_read(); lo0.v = 0; lo1.v = 0; cp0_entry_lo0_write(lo0.value); cp0_entry_lo1_write(lo1.value); tlbwi(); } interrupts_restore(ipl); cp0_entry_hi_write(hi_save.value); }
/* * NAME * * pthread_spin_unlock - unlock a spin lock object * * SYNOPSIS * * #include <pthread.h> * * int pthread_spin_unlock(pthread_spinlock_t *lock); * * DESCRIPTION * * The pthread_spin_unlock() function shall release the spin lock * referenced by lock which was locked via the pthread_spin_lock() or * pthread_spin_trylock() functions. * * The results are undefined if the lock is not held by the calling thread. * * If there are threads spinning on the lock when pthread_spin_unlock() * is called, the lock becomes available and an unspecified spinning thread * shall acquire the lock. * * The results are undefined if this function is called with an uninitialized * thread spin lock. * * RETURN VALUE * * Upon successful completion, the pthread_spin_unlock() function shall * return zero; otherwise, an error number shall be returned to indicate * the error. * * ERRORS * * This function shall not return an error code of [EINTR]. */ int pthread_spin_unlock ( pthread_spinlock_t *lock ) { ipl_t flags; flags = lock->flags; spinlock_unlock(&lock->lock); interrupts_restore(flags); return OK; }
/** Invalidate TLB entries for specified page range belonging to specified * address space. * * @param asid Address space identifier. * @param page First page whose TLB entry is to be invalidated. * @param cnt Number of entries to invalidate. * */ void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt) { if (asid == ASID_INVALID) return; entry_hi_t hi_save; hi_save.value = cp0_entry_hi_read(); ipl_t ipl = interrupts_disable(); for (unsigned int i = 0; i < cnt + 1; i += 2) { entry_hi_t hi; hi.value = 0; tlb_prepare_entry_hi(&hi, asid, page + i * PAGE_SIZE); cp0_entry_hi_write(hi.value); tlbp(); tlb_index_t index; index.value = cp0_index_read(); if (!index.p) { /* * Entry was found, index register contains valid * index. */ tlbr(); entry_lo_t lo0; lo0.value = cp0_entry_lo0_read(); entry_lo_t lo1; lo1.value = cp0_entry_lo1_read(); lo0.v = 0; lo1.v = 0; cp0_entry_lo0_write(lo0.value); cp0_entry_lo1_write(lo1.value); tlbwi(); } } interrupts_restore(ipl); cp0_entry_hi_write(hi_save.value); }
/** Free frames of physical memory. * * Find respective frame structures for supplied physical frames. * Decrement each frame reference count. If it drops to zero, mark * the frames as available. * * @param start Physical Address of the first frame to be freed. * @param count Number of frames to free. * @param flags Flags to control memory reservation. * */ void frame_free_generic(uintptr_t start, size_t count, frame_flags_t flags) { size_t freed = 0; irq_spinlock_lock(&zones.lock, true); for (size_t i = 0; i < count; i++) { /* * First, find host frame zone for addr. */ pfn_t pfn = ADDR2PFN(start) + i; size_t znum = find_zone(pfn, 1, 0); ASSERT(znum != (size_t) -1); freed += zone_frame_free(&zones.info[znum], pfn - zones.info[znum].base); } irq_spinlock_unlock(&zones.lock, true); /* * Signal that some memory has been freed. * Since the mem_avail_mtx is an active mutex, * we need to disable interruptsto prevent deadlock * with TLB shootdown. */ ipl_t ipl = interrupts_disable(); mutex_lock(&mem_avail_mtx); if (mem_avail_req > 0) mem_avail_req -= min(mem_avail_req, freed); if (mem_avail_req == 0) { mem_avail_gen++; condvar_broadcast(&mem_avail_cv); } mutex_unlock(&mem_avail_mtx); interrupts_restore(ipl); if (!(flags & FRAME_NO_RESERVE)) reserve_free(freed); }
/* * pthread_spin_trylock - lock a spin lock object * * SEE pthread_spin_lock() for more infromation. */ int pthread_spin_trylock ( pthread_spinlock_t *lock ) { ipl_t flags; flags = interrupts_disable(); if (spinlock_trylock(&lock->lock)) { interrupts_restore(flags); return EBUSY; } else { lock->flags = flags; return OK; } }
/** Allocate frames of physical memory. * * @param count Number of continuous frames to allocate. * @param flags Flags for host zone selection and address processing. * @param constraint Indication of physical address bits that cannot be * set in the address of the first allocated frame. * @param pzone Preferred zone. * * @return Physical address of the allocated frame. * */ uintptr_t frame_alloc_generic(size_t count, frame_flags_t flags, uintptr_t constraint, size_t *pzone) { ASSERT(count > 0); size_t hint = pzone ? (*pzone) : 0; pfn_t frame_constraint = ADDR2PFN(constraint); /* * If not told otherwise, we must first reserve the memory. */ if (!(flags & FRAME_NO_RESERVE)) reserve_force_alloc(count); loop: irq_spinlock_lock(&zones.lock, true); /* * First, find suitable frame zone. */ size_t znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), frame_constraint, hint); /* * If no memory, reclaim some slab memory, * if it does not help, reclaim all. */ if ((znum == (size_t) -1) && (!(flags & FRAME_NO_RECLAIM))) { irq_spinlock_unlock(&zones.lock, true); size_t freed = slab_reclaim(0); irq_spinlock_lock(&zones.lock, true); if (freed > 0) znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), frame_constraint, hint); if (znum == (size_t) -1) { irq_spinlock_unlock(&zones.lock, true); freed = slab_reclaim(SLAB_RECLAIM_ALL); irq_spinlock_lock(&zones.lock, true); if (freed > 0) znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), frame_constraint, hint); } } if (znum == (size_t) -1) { if (flags & FRAME_ATOMIC) { irq_spinlock_unlock(&zones.lock, true); if (!(flags & FRAME_NO_RESERVE)) reserve_free(count); return 0; } size_t avail = frame_total_free_get_internal(); irq_spinlock_unlock(&zones.lock, true); if (!THREAD) panic("Cannot wait for %zu frames to become available " "(%zu available).", count, avail); /* * Sleep until some frames are available again. */ #ifdef CONFIG_DEBUG log(LF_OTHER, LVL_DEBUG, "Thread %" PRIu64 " waiting for %zu frames " "%zu available.", THREAD->tid, count, avail); #endif /* * Since the mem_avail_mtx is an active mutex, we need to * disable interrupts to prevent deadlock with TLB shootdown. */ ipl_t ipl = interrupts_disable(); mutex_lock(&mem_avail_mtx); if (mem_avail_req > 0) mem_avail_req = min(mem_avail_req, count); else mem_avail_req = count; size_t gen = mem_avail_gen; while (gen == mem_avail_gen) condvar_wait(&mem_avail_cv, &mem_avail_mtx); mutex_unlock(&mem_avail_mtx); interrupts_restore(ipl); #ifdef CONFIG_DEBUG log(LF_OTHER, LVL_DEBUG, "Thread %" PRIu64 " woken up.", THREAD->tid); #endif goto loop; } pfn_t pfn = zone_frame_alloc(&zones.info[znum], count, frame_constraint) + zones.info[znum].base; irq_spinlock_unlock(&zones.lock, true); if (pzone) *pzone = znum; return PFN2ADDR(pfn); }