void RegAlloc::retire(Register r) { NanoAssert(r != UnknownReg); NanoAssert(active[r] != NULL); active[r] = NULL; free |= rmask(r); }
void RegAlloc::removeActive(Register r) { //registerReleaseCount++; NanoAssert(r != UnknownReg); NanoAssert(active[r] != NULL); // remove the given register from the active list active[r] = NULL; }
void* Allocator::allocSlow(size_t nbytes, bool fallible) { NanoAssert((nbytes & 7) == 0); if (fill(nbytes, fallible)) { NanoAssert(current_top + nbytes <= current_limit); void* p = current_top; current_top += nbytes; return p; } return NULL; }
void RegAlloc::addActive(Register r, LIns* v) { // Count++; NanoAssert(v && r != UnknownReg && active[r] == NULL ); active[r] = v; useActive(r); }
void Fragmento::pagesGrow(int32_t count) { NanoAssert(!_pageList); MMGC_MEM_TYPE("NanojitFragmentoMem"); Page* memory = 0; if (_stats.pages < _max_pages) { // make sure we don't grow beyond _max_pages if (_stats.pages + count > _max_pages) count = _max_pages - _stats.pages; if (count < 0) count = 0; // @todo nastiness that needs a fix'n _gcHeap = _core->GetGC()->GetGCHeap(); NanoAssert(int32_t(NJ_PAGE_SIZE)<=_gcHeap->kNativePageSize); // convert _max_pages to gc page count int32_t gcpages = (count*NJ_PAGE_SIZE) / _gcHeap->kNativePageSize; MMGC_MEM_TYPE("NanoJitMem"); memory = (Page*)_gcHeap->Alloc(gcpages); #ifdef MEMORY_INFO ChangeSizeExplicit("NanoJitMem", 1, _gcHeap->Size(memory)); #endif NanoAssert((int*)memory == pageTop(memory)); //fprintf(stderr,"head alloc of %d at %x of %d pages using nj page size of %d\n", gcpages, (intptr_t)memory, (intptr_t)_gcHeap->kNativePageSize, NJ_PAGE_SIZE); _allocList.add(memory); Page* page = memory; _pageList = page; _stats.pages += count; _stats.freePages += count; trackFree(0); while(--count > 0) { Page *next = page + 1; //fprintf(stderr,"Fragmento::pageGrow adding page %x ; %d\n", (intptr_t)page, count); page->next = next; page = next; } page->next = 0; NanoAssert(pageCount()==_stats.freePages); //fprintf(stderr,"Fragmento::pageGrow adding page %x ; %d\n", (intptr_t)page, count); } }
Page* Fragmento::pageAlloc() { NanoAssert(sizeof(Page) == NJ_PAGE_SIZE); if (!_pageList) { pagesGrow(_pageGrowth); // try to get more mem if ((_pageGrowth << 1) < _max_pages) _pageGrowth <<= 1; } Page *page = _pageList; if (page) { _pageList = page->next; trackFree(-1); } //fprintf(stderr, "Fragmento::pageAlloc %X, %d free pages of %d\n", (int)page, _stats.freePages, _stats.pages); NanoAssert(pageCount()==_stats.freePages); return page; }
void Fragmento::pageFree(Page* page) { //fprintf(stderr, "Fragmento::pageFree %X, %d free pages of %d\n", (int)page, _stats.freePages+1, _stats.pages); // link in the page page->next = _pageList; _pageList = page; trackFree(+1); NanoAssert(pageCount()==_stats.freePages); }
// scan table for instruction with the lowest priority, meaning it is used // furthest in the future. LIns* Assembler::findVictim(RegAlloc ®s, RegisterMask allow) { NanoAssert(allow != 0); LIns *i, *a=0; int allow_pri = 0x7fffffff; for (Register r=FirstReg; r <= LastReg; r = nextreg(r)) { if ((allow & rmask(r)) && (i = regs.getActive(r)) != 0) { int pri = canRemat(i) ? 0 : regs.getPriority(r); if (!a || pri < allow_pri) { a = i; allow_pri = pri; } } } NanoAssert(a != 0); return a; }
bool Allocator::fill(size_t nbytes, bool fallible) { if (nbytes < MIN_CHUNK_SZB) nbytes = MIN_CHUNK_SZB; size_t chunkbytes = sizeof(Chunk) + nbytes - sizeof(int64_t); void* mem = allocChunk(chunkbytes, fallible); if (mem) { Chunk* chunk = (Chunk*) mem; chunk->prev = current_chunk; current_chunk = chunk; current_top = (char*)chunk->data; current_limit = (char*)mem + chunkbytes; return true; } else { NanoAssert(fallible); return false; } }
Fragmento::~Fragmento() { clearFrags(); _frags->clear(); while( _allocList.size() > 0 ) { //fprintf(stderr,"dealloc %x\n", (intptr_t)_allocList.get(_allocList.size()-1)); #ifdef MEMORY_INFO ChangeSizeExplicit("NanoJitMem", -1, _gcHeap->Size(_allocList.last())); #endif _gcHeap->Free( _allocList.removeLast() ); } delete _frags; delete _assm; #if defined(NJ_VERBOSE) delete enterCounts; delete mergeCounts; #endif NanoAssert(_stats.freePages == _stats.pages ); }
void Assembler::asm_call(LInsp ins) { Register retReg = ( ins->isop(LIR_fcall) ? F0 : retRegs[0] ); prepResultReg(ins, rmask(retReg)); // Do this after we've handled the call result, so we don't // force the call result to be spilled unnecessarily. evictScratchRegs(); const CallInfo* call = ins->callInfo(); underrunProtect(8); NOP(); ArgSize sizes[MAXARGS]; uint32_t argc = call->get_sizes(sizes); NanoAssert(ins->isop(LIR_pcall) || ins->isop(LIR_fcall)); verbose_only(if (_logc->lcbits & LC_Assembly) outputf(" %p:", _nIns); )
void RegAlloc::checkCount() { NanoAssert(count == (countActive() + countFree())); }
void RegAlloc::useActive(Register r) { NanoAssert(r != UnknownReg && active[r] != NULL); usepri[r] = priority++; }
void RegAlloc::removeFree(Register r) { NanoAssert(isFree(r)); free &= ~rmask(r); }
void RegAlloc::addFree(Register r) { NanoAssert(!isFree(r)); free |= rmask(r); }
bool RegAlloc::isFree(Register r) { NanoAssert(r != UnknownReg); return (free & rmask(r)) != 0; }
bool RegAlloc::isConsistent(Register r, LIns* i) { NanoAssert(r != UnknownReg); return (isFree(r) && !getActive(r) && !i) || (!isFree(r) && getActive(r)== i && i ); }