uint64_t SimpleMemory::access(MemReq& req) { switch (req.type) { case PUTS: case PUTX: *req.state = I; break; case GETS: *req.state = req.is(MemReq::NOEXCL)? S : E; break; case GETX: *req.state = M; break; default: panic("!?"); } uint64_t respCycle = req.cycle + latency; assert(respCycle > req.cycle); /* if ((req.type == GETS || req.type == GETX) && eventRecorders[req.srcId]) { Address addr = req.lineAddr<<lineBits; MemAccReqEvent* memEv = new (eventRecorders[req.srcId]->alloc<MemAccReqEvent>()) MemAccReqEvent(nullptr, false, addr); TimingRecord tr = {addr, req.cycle, respCycle, req.type, memEv, memEv}; eventRecorders[req.srcId]->pushRecord(tr); } */ return respCycle; }
uint64_t DDRMemory::access(MemReq& req) { switch (req.type) { case PUTS: case PUTX: *req.state = I; break; case GETS: *req.state = req.is(MemReq::NOEXCL)? S : E; break; case GETX: *req.state = M; break; default: panic("!?"); } if (req.type == PUTS) { return req.cycle; //must return an absolute value, 0 latency } else { bool isWrite = (req.type == PUTX); uint64_t respCycle = req.cycle + (isWrite? minWrLatency : minRdLatency); if (zinfo->eventRecorders[req.srcId]) { DDRMemoryAccEvent* memEv = new (zinfo->eventRecorders[req.srcId]) DDRMemoryAccEvent(this, isWrite, req.lineAddr, domain, preDelay, isWrite? postDelayWr : postDelayRd); memEv->setMinStartCycle(req.cycle); TimingRecord tr = {req.lineAddr, req.cycle, respCycle, req.type, memEv, memEv}; zinfo->eventRecorders[req.srcId]->pushRecord(tr); } //info("Access to %lx at %ld, %ld latency", req.lineAddr, req.cycle, minLatency); return respCycle; } }
uint64_t DRAMSimMemory::access(MemReq& req) { switch (req.type) { case PUTS: case PUTX: *req.state = I; break; case GETS: *req.state = req.is(MemReq::NOEXCL)? S : E; break; case GETX: *req.state = M; break; default: panic("!?"); } uint64_t respCycle = req.cycle + minLatency; assert(respCycle > req.cycle); if ((req.type != PUTS /*discard clean writebacks*/) && zinfo->eventRecorders[req.srcId]) { Address addr = req.lineAddr << lineBits; bool isWrite = (req.type == PUTX); DRAMSimAccEvent* memEv = new (zinfo->eventRecorders[req.srcId]) DRAMSimAccEvent(this, isWrite, addr, domain); memEv->setMinStartCycle(req.cycle); TimingRecord tr = {addr, req.cycle, respCycle, req.type, memEv, memEv}; zinfo->eventRecorders[req.srcId]->pushRecord(tr); } return respCycle; }
uint64_t MD1Memory::access(MemReq& req) { if (zinfo->numPhases > lastPhase) { futex_lock(&updateLock); //Recheck, someone may have updated already if (zinfo->numPhases > lastPhase) { updateLatency(); } futex_unlock(&updateLock); } switch (req.type) { case PUTX: //Dirty wback profWrites.atomicInc(); profTotalWrLat.atomicInc(curLatency); __sync_fetch_and_add(&curPhaseAccesses, 1); //Note no break case PUTS: //Not a real access -- memory must treat clean wbacks as if they never happened. *req.state = I; break; case GETS: profReads.atomicInc(); profTotalRdLat.atomicInc(curLatency); __sync_fetch_and_add(&curPhaseAccesses, 1); *req.state = req.is(MemReq::NOEXCL)? S : E; break; case GETX: profReads.atomicInc(); profTotalRdLat.atomicInc(curLatency); __sync_fetch_and_add(&curPhaseAccesses, 1); *req.state = M; break; default: panic("!?"); } return req.cycle + ((req.type == PUTS)? 0 /*PUTS is not a real access*/ : curLatency); }
uint64_t NVMainMemory::access(MemReq& req) { futex_lock(&access_lock); switch (req.type) { case PUTS: profPUTS.inc(); *req.state = I; break; case PUTX: profPUTX.inc(); *req.state = I; break; case GETS: *req.state = req.is(MemReq::NOEXCL)? S : E; break; case GETX: *req.state = M; break; default: panic("!?"); } uint64_t respCycle = req.cycle + minLatency; assert(respCycle > req.cycle); if ((zinfo->hasDRAMCache || (req.type != PUTS) /*discard clean writebacks going to mainMemory*/) && zinfo->eventRecorders[req.srcId]) { Address addr = req.lineAddr << lineBits; bool isWrite = ((req.type == PUTX) || (req.type == PUTS)); nvmain_access_count++; if(isWrite) nvmain_write_access_count++; else nvmain_read_access_count++; uint32_t core_id = req.srcId; //access PCM main memory && LLC miss NVMainAccEvent* memEv = new (zinfo->eventRecorders[req.srcId]) NVMainAccEvent(this, isWrite, addr, domain); //##########counter tlb ##############/ if( zinfo->counter_tlb && req.type!=PUTX) { bool is_itlb; ExtendTlbEntry* entry; TLBSearchResult over_thres; LookupTlb(core_id, addr,isWrite, entry, is_itlb , over_thres); if(!entry) { std::cout<<"core "<<req.srcId<<" error, no entry found, ppn:"<<std::hex<<(req.lineAddr)<<" type:"<<req.type<<std::endl; } else { if( over_thres == InDRAM) { Address dram_addr = (entry->get_counter()) << (zinfo->page_shift); Address offset = addr & (zinfo->page_size -1); dram_addr |= offset; //request type is write, get block id and set corresponding block dirty debug_printf("already in DRAM,reset to %llx ",dram_addr); memEv->setBufferAddr(); memEv->setAddr( dram_addr ); } else{ //in PCM,over threshold if( over_thres == OverThres ) { debug_printf("over thres"); MemReq request; //update page table and TLB Address vpn = entry->v_page_no; request.lineAddr = addr; request.cycle = req.cycle; Address dram_addr = dynamic_cast<PageTableWalker*>(zinfo->pg_walkers[core_id])->do_dram_page_fault(request,vpn ,core_id , DRAM_BUFFER_FAULT , entry , is_itlb , evict); memEv->setBufferAddr(); memEv->setAddr( dram_addr ); if( zinfo->prefetch_set && zinfo->dram_manager->get_memory_usage()<= 0.3 ) { Prefetch(core_id, vpn , request.cycle); } } } } } //###############counter tlb ended#################/ /**********************************************/ if( zinfo->counter_tlb) { //###########****Begin Dynamic***################# if( zinfo->dynamic_threshold ) { //####access information profiling#################### uint32_t proc_id = 0; if( zinfo->proc_fairness) { proc_id = zinfo->cores[req.srcId]->GetProcIdx(); } if( memEv->isBufferAddr() ) { period_access_vec[proc_id]++; Address ppn = memEv->getAddr()>>(zinfo->page_shift); if( period_touch_vec[proc_id].count(ppn)) period_touch_vec[proc_id][ppn]++; else period_touch_vec[proc_id][ppn] = 1; } else {