/** * Helper to really allocate memory either with filling degree * hinting on or off. only used from the real pmem_alloc. * * @param[out] addr the allocated address if successfull * @param length the block length to allocate * @param align the minimum alignment for the block. * @param use_fdeg whether to take filling degree into * account to speed up free region search. * @return true on success, false on failure. */ static bool pmem_alloc_helper(phys_addr_t* addr, size_t length, off_t align, bool use_fdeg) { pmem_region_t* reg = pmem_region_head; while(reg) { size_t fdeg = (use_fdeg ? bmap_fdeg(reg->bmap) : 0); /* make sure there is enough free room in the region to * fit the block to be allocated. */ if(fdeg == 0 || ((reg->length * (100-fdeg))) > (length * 100)) { size_t idx; spl_lock(&pmem_lock); if(bmap_search(reg->bmap, &idx, 0, PMEM_PAGES(length), (align / PMEM_PAGESIZE), BMAP_SRCH_HINTED)) { if(bmap_fill(reg->bmap, 1, idx, idx + PMEM_PAGES(length))) { *addr = PMEM_FROM_REGBIT(reg, idx); spl_unlock(&pmem_lock); return true; } } spl_unlock(&pmem_lock); } reg = reg->next; } return false; }
bool pmem_reserve(phys_addr_t addr, size_t length) { register phys_addr_t top, cur; register bool checkPass = true; if(ALIGN_RST(addr, PMEM_PAGESIZE) != 0) { fatal("misaligned physical address!\n"); } debug("try to reserve physical memory at %p (length: %d bytes)\n", addr, length); spl_lock(&pmem_lock); next_pass: top = (addr + length); cur = addr; while(top > cur) { pmem_region_t* reg = pmem_region_head; while(reg) { while(pmem_reg_contains(reg, cur)) { register size_t idx = PMEM_TO_REGBIT(reg, cur); if(checkPass) { if(bmap_get(reg->bmap, idx)) { spl_unlock(&pmem_lock); debug("failed to reserve region, %p already reserved\n", cur); return false; } } else { bmap_set(reg->bmap, idx, 1); } cur += PMEM_PAGESIZE; if(top <= cur) goto pass_ok; } reg = reg->next; } cur += PMEM_PAGESIZE; } pass_ok: if(checkPass) { checkPass = false; goto next_pass; } spl_unlock(&pmem_lock); return true; }
void sched_schedule() { // find a thread to schedule. the scheduled thread is removed from the // queue, and the currently-run thread is re-added to the queue. spl_lock(&_sched_lock); thread_t* old = thr_current(); if(old) { switch(old->state) { case Runnable: // don't stop if the thread still has time! if(old->preempt_at > systime()) { goto done; } break; case Yielded: // timeslice is given up. old->state = Runnable; break; default: // not relevant here. break; } } // BUG: this algorithm will start to choose the idle thread when // only one other thread is remaining runnable. thread_t* thr = sched_choose(old); if(thr) { trace("switching: %d:%d (%d)\n", thr->parent->id, thr->id, thr->priority); thr->preempt_at = systime() + SCHED_TIMESLICE_US; thr_switch(thr); sched_remove_unlocked(thr); if(old) sched_add_unlocked(old); goto done; } // let things stay as they are if only one thread exists. if(old->state == Runnable) { goto done; } fatal("no thread left to schedule - this is bad!\n"); done: spl_unlock(&_sched_lock); }
void sched_remove(thread_t* thread) { spl_lock(&_sched_lock); sched_remove_unlocked(thread); spl_unlock(&_sched_lock); }
void sched_add(thread_t* thread) { spl_lock(&_sched_lock); sched_add_unlocked(thread); spl_unlock(&_sched_lock); }