/* ** Test du noyau preemptif. Lier ce fichier avec noyau.c et noyaufil.c */ int main(void) { serial_init(115200); printf("Kernel started !"); file_init(); ajoute(3); affic_file(); ajoute(5); ajoute(1); ajoute(0); ajoute(2); affic_file(); suivant(); affic_file(); retire(0); affic_file(); ajoute(6); affic_file(); while(1); return 0; }
/*--------------------------------------------------------------------------* * --- Fin d'une tache --- * * Entree : Neant * * Sortie : Neant * * Descrip: Cette proc. doit etre appelee a la fin des taches * * * *----------------------------------------------------------------------- --*/ void fin_tache(void) { /* on interdit les interruptions */ _irq_disable_(); /* la tache est enlevee de la file des taches */ retire(_tache_c); schedule(); }
bool GPUSMProcessor::advance_clock(FlodID fid) { if (!active) { // time to remove from the running queue TaskHandler::removeFromRunning(cpu_id); return false; } fetch(fid); if (!busy) return false; clockTicks.inc(); setWallClock(); if (unlikely(throttlingRatio>1)) { throttling_cntr++; uint32_t skip = ceil(throttlingRatio/getTurboRatioGPU()); if (throttling_cntr < skip) { return true; } throttling_cntr = 1; } // ID Stage (insert to instQueue) if (spaceInInstQueue >= FetchWidth) { //MSG("\nFor CPU %d:",getId()); IBucket *bucket = pipeQ.pipeLine.nextItem(); if( bucket ) { I(!bucket->empty()); spaceInInstQueue -= bucket->size(); pipeQ.instQueue.push(bucket); }else{ noFetch2.inc(); } }else{ noFetch.inc(); } // RENAME Stage if ( !pipeQ.instQueue.empty() ) { // FIXME: Clear the per PE counter spaceInInstQueue += issue(pipeQ); }else if (ROB.empty() && rROB.empty()) { //I(0); // Still busy if we have some in-flight requests busy = pipeQ.pipeLine.hasOutstandingItems(); return true; } retire(); return true; }
HeapRegion* G1AllocRegion::release() { trace("releasing"); HeapRegion* alloc_region = _alloc_region; retire(false /* fill_up */); assert(_alloc_region == _dummy_region, ar_ext_msg(this, "post-condition of retire()")); _alloc_region = NULL; trace("released"); return (alloc_region == _dummy_region) ? NULL : alloc_region; }
void filter( int op, struct tlist *l) { filter_pass = 1; int i, num; do { //verifica opção selecionada pelo usuario. if(op == 1) { cabecalho("INSERIR"); if(l->last == MAX) { printf("\t\t# ERRO: Lista cheia! [ENTER]"); getche(); break; } else { printf("\t\tDIGITE UM NUMERO: "); geti(&num); i = search(num, l); if(i != -1) { printf("\n\t\t# NUMERO JA ESTA NA LISTA! [ENTER]"); getche(); } else add(num, l); } } else { cabecalho("REMOVER"); printf("\t\tDIGITE O NUMERO: "); geti(&num); i = search(num, l); if(i == -1) { printf("\n\t\t# NUMERO NAO ENCONTRADO! [ENTER]"); getche(); } else retire(i, l); } printf("\n\n\t\t# DESEJA CONTINUAR? [1] SIM [9] NAO"); printf("\n\t\t# OPCAO: "); geti(&op); } while(op == 1); }
// Flush the stats supporting ergonomic sizing of PLAB's // and retire the current buffer. void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) { // We flush the stats first in order to get a reading of // unused space in the last buffer. if (ResizePLAB) { flush_stats(stats); // Since we have flushed the stats we need to clear // the _allocated and _wasted fields. Not doing so // will artifically inflate the values in the stats // to which we add them. // The next time we flush these values, we will add // what we have just flushed in addition to the size // of the buffers allocated between now and then. _allocated = 0; _wasted = 0; } // Retire the last allocation buffer. retire(end_of_gc, retain); }
inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size, bool bot_updates) { // First we have to tedo the allocation, assuming we're holding the // appropriate lock, in case another thread changed the region while // we were waiting to get the lock. HeapWord* result = attempt_allocation(word_size, bot_updates); if (result != NULL) { return result; } retire(true /* fill_up */); result = new_alloc_region_and_allocate(word_size, false /* force */); if (result != NULL) { trace("alloc locked (second attempt)", word_size, result); return result; } trace("alloc locked failed", word_size); return NULL; }
void Processor::advanceClock() { #ifdef TS_STALL if (isStall()) return; #endif clockTicks++; // GMSG(!ROB.empty(),"robTop %d Ul %d Us %d Ub %d",ROB.getIdFromTop(0) // ,unresolvedLoad, unresolvedStore, unresolvedBranch); // Fetch Stage if (IFID.hasWork() ) { IBucket *bucket = pipeQ.pipeLine.newItem(); if( bucket ) { IFID.fetch(bucket); } } // ID Stage (insert to instQueue) if (spaceInInstQueue >= FetchWidth) { IBucket *bucket = pipeQ.pipeLine.nextItem(); if( bucket ) { I(!bucket->empty()); // I(bucket->top()->getInst()->getAddr()); spaceInInstQueue -= bucket->size(); pipeQ.instQueue.push(bucket); }else{ noFetch2.inc(); } }else{ noFetch.inc(); } // RENAME Stage if ( !pipeQ.instQueue.empty() ) { spaceInInstQueue += issue(pipeQ); // spaceInInstQueue += issue(pipeQ); } retire(); }
inline void* MarkedAllocator::tryAllocateHelper(size_t bytes) { if (m_currentBlock) { ASSERT(m_currentBlock == m_nextBlockToSweep); m_currentBlock->didConsumeFreeList(); m_nextBlockToSweep = m_currentBlock->next(); } MarkedBlock* next; for (MarkedBlock*& block = m_nextBlockToSweep; block; block = next) { next = block->next(); MarkedBlock::FreeList freeList = block->sweep(MarkedBlock::SweepToFreeList); double utilization = ((double)MarkedBlock::blockSize - (double)freeList.bytes) / (double)MarkedBlock::blockSize; if (utilization >= Options::minMarkedBlockUtilization()) { ASSERT(freeList.bytes || !freeList.head); retire(block, freeList); continue; } if (bytes > block->cellSize()) { block->stopAllocating(freeList); continue; } m_currentBlock = block; m_freeList = freeList; break; } if (!m_freeList.head) { m_currentBlock = 0; return 0; } ASSERT(m_freeList.head); void* head = tryPopFreeList(bytes); ASSERT(head); m_markedSpace->didAllocateInBlock(m_currentBlock); return head; }
void MarkedAllocator::reset() { m_lastActiveBlock = 0; m_currentBlock = 0; m_freeList = MarkedBlock::FreeList(); if (m_heap->operationInProgress() == FullCollection) m_blockList.append(m_retiredBlocks); m_nextBlockToSweep = m_blockList.head(); if (UNLIKELY(Options::useImmortalObjects())) { MarkedBlock* next; for (MarkedBlock*& block = m_nextBlockToSweep; block; block = next) { next = block->next(); MarkedBlock::FreeList freeList = block->sweep(MarkedBlock::SweepToFreeList); retire(block, freeList); } } }