void MQMigrator::Translate( uint64_t address, uint64_t *row, uint64_t *col, uint64_t *bank, uint64_t *rank, uint64_t *channel, uint64_t *subarray ) { /* Use the default -- We will only change the channel if needed. */ AddressTranslator::Translate( address, row, col, bank, rank, channel, subarray ); /** std::cout << "\nMQMigrator::Translate address " << std::hex << address << " To:\n" \ "old channel " << *channel << "\n" \ "rank " << *channel << "\n" \ "bank " << *channel << "\n" \ "row " << *channel << "\n" \ "col " << *channel << "\n" \ "subarray " << *channel << "\n" \ << std::endl; */ /* This should be a unique key for this address. */ NVMAddress keyAddress; keyAddress.SetTranslatedAddress( *row, *col, *bank, *rank, *channel, *subarray ); keyAddress.SetPhysicalAddress( address ); uint64_t key = GetAddressKey( keyAddress ); ncounter_t r_channel = *channel; if( access_times.count( r_channel)==0) { access_times[ r_channel]= 1; } else access_times[r_channel]++; /* Check if the page was migrated and migration is complete. */ if( migrationMap.count( key ) != 0 ) { if( migrationState[key] == MQ_MIGRATION_DONE ) { *channel = migrationMap[key]; // std::cout << "\n------new channel " << std::hex << *channel <<std::endl; migratedAccesses++; } } ncounter_t m_channel = *channel; if( migrate_access_times.count( m_channel)==0) { migrate_access_times[ m_channel]= 1; } else migrate_access_times[m_channel]++; }
void Migrator::Translate( uint64_t address, uint64_t *row, uint64_t *col, uint64_t *bank, uint64_t *rank, uint64_t *channel, uint64_t *subarray ) { /* Use the default -- We will only change the channel if needed. */ AddressTranslator::Translate( address, row, col, bank, rank, channel, subarray ); /* This should be a unique key for this address. */ NVMAddress keyAddress; keyAddress.SetTranslatedAddress( *row, *col, *bank, *rank, *channel, *subarray ); keyAddress.SetPhysicalAddress( address ); uint64_t key = GetAddressKey( keyAddress ); /* Check if the page was migrated and migration is complete. */ if( migrationMap.count( key ) != 0 ) { if( migrationState[key] == MIGRATION_DONE ) { *channel = migrationMap[key]; migratedAccesses++; } } }
/* * This trace is printed from nvmain.cpp. The format is: * * CYCLE OP ADDRESS DATA THREADID */ bool NVMainTraceReader::GetNextAccess( TraceLine *nextAccess ) { /* If there is no trace file, we can't do anything. */ if( traceFile == "" ) { std::cerr << "No trace file specified!" << std::endl; return false; } /* If the trace file is not open, open it if possible. */ if( !trace.is_open( ) ) { trace.open( traceFile.c_str( ) ); if( !trace.is_open( ) ) { std::cerr << "Could not open trace file: " << traceFile << "!" << std::endl; return false; } } std::string fullLine; /* We will read in a full line and fill in these values */ unsigned int cycle = 0; OpType operation = READ; uint64_t address; NVMDataBlock dataBlock; unsigned int threadId = 0; /* There are no more lines in the trace... Send back a "dummy" line */ getline( trace, fullLine ); if( trace.eof( ) ) { NVMAddress nAddress; nAddress.SetPhysicalAddress( 0xDEADC0DEDEADBEEFULL ); nextAccess->SetLine( nAddress, NOP, 0, dataBlock, 0 ); std::cout << "NVMainTraceReader: Reached EOF!" << std::endl; return false; } std::istringstream lineStream( fullLine ); std::string field; unsigned char fieldId = 0; /* * Again, the format is : CYCLE OP ADDRESS DATA THREADID * So the field ids are : 0 1 2 3 4 */ while( getline( lineStream, field, ' ' ) ) { if( field != "" ) { if( fieldId == 0 ) cycle = atoi( field.c_str( ) ); else if( fieldId == 1 ) { if( field == "R" ) operation = READ; else if( field == "W" ) operation = WRITE; else std::cout << "Warning: Unknown operation `" << field << "'" << std::endl; } else if( fieldId == 2 ) { std::stringstream fmat; fmat << std::hex << field; fmat >> address; } else if( fieldId == 3 ) { int byte; int start, end; /* Assumes 64-byte memory words.... */ // TODO: Drop assumption and use field.length()/2 bytes assert(sizeof(uint64_t)*8 == 64); assert(field.length() == 128); // 1 char per 4 bits dataBlock.SetSize( 64 ); uint64_t *rawData = reinterpret_cast<uint64_t*>(dataBlock.rawData); memset(rawData, 0, 64); for( byte = 0; byte < 8; byte++ ) { std::stringstream fmat; end = (int)field.length( ) - 16*byte; start = (int)field.length( ) - 16*byte - 16; fmat << std::hex << field.substr( start, end - start ); fmat >> rawData[byte]; } }
/* * Parse the trace file and find the next access to main memory. May read * multiple lines before a memory access is returned. */ bool RubyTraceReader::GetNextAccess( TraceLine *nextAccess ) { /* If trace file is not specified, we can't know what to do. */ if( traceFile == "" ) { std::cerr << "No trace file specified!" << std::endl; return false; } /* If trace file is not opened, we can't read from it. */ if( !trace.is_open( ) ) { trace.open( traceFile.c_str() ); if( !trace.is_open( ) ) { std::cerr << "Could not open trace file: " << traceFile << "!" << std::endl; return false; } } /* * Read the next few lines from the file, looking for transactions that end * and do not end at one of the caches. Once the first one is found, return it. */ std::string fullLine; /* We will break at errors / finishing points in the loop. */ while( 1 ) { NVMDataBlock dataBlock; NVMDataBlock oldDataBlock; unsigned int threadId; threadId = 0; /* * Read a full line from the trace, ensuring we are not at the end of * the file */ if( trace.eof( ) ) { NVMAddress nAddress; nAddress.SetPhysicalAddress( 0xDEADC0DEDEADBEEFULL ); nextAccess->SetLine( nAddress, NOP, 0, dataBlock, oldDataBlock, 0 ); return false; } getline( trace, fullLine ); /* * Insert the full ine into a string stream. We will use the string stream * to separate the fields in the trace files into useful data we need. */ std::istringstream lineStream( fullLine ); std::string field; unsigned char fieldId; /* * Interesting fields are the cycles, the unit issuing the trace command, * the command being executed on the memory, the address of the memory * operation, and the operation- such as load/store/fetch */ std::string cycle, unit, command, address, memory, operation; uint64_t decAddress; unsigned int currentCycle = 0; unsigned int cycles = 0; OpType memOp; /* * In a Ruby Trace, most of the fields are not necessary for main * memory purposes. * We will increment the field ID and use it as a reference to * determine what we are interested in. In this case, the format is * as follows: * * 207 1 -1 Seq Done > [0x7ba4ce80, line 0x7ba4ce80] 206 cycles NULL IFETCH No * * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 * * Here we are interested in fields 3, 4, 6, 11, and 12. Field 3 is * the unit * generating the memory request. Field 4 is the unit's * command. Field 6 is the address. Field 11 is the memory region where * the result ends. Field 12 is the memory operation. */ fieldId = 0; while( getline( lineStream, field, ' ' ) ) { if( field != "" ) { if( fieldId == 0 ) currentCycle = atoi( field.c_str( ) ); else if( fieldId == 3 ) unit = field; else if( fieldId == 4 ) command = field; else if( fieldId == 6 ) address = field.substr( 1, field.length( ) - 2 ); else if( fieldId == 9 ) cycles = atoi( field.c_str( ) ); else if( fieldId == 11 ) memory = field; else if( fieldId == 12 ) operation = field; fieldId++; } } /* * If the unit generating the result is "Seq," it is the GEMS sequencer * stepping through the instructions. We want to find sequencer * executing the "Done" command. * If the memory is "NULL," this is main memory. Other possibilites are * "L1Cache" or "L2Cache" for example. * * If it is a main memory request, we need to convert to either a read * or write. * Ruby uses LD for load, IFETCH for instruction fetch, and ST for store. * Both LD and IFETCH will be mapped to a read command for the simulator. * Stores are mapped to write commands. */ if( unit == "Seq" ) { if( command == "Done" && memory == "NULL" ) { std::stringstream fmat; fmat << std::hex << address; fmat >> decAddress; if( operation == "IFETCH" || operation == "LD" ) memOp = READ; else if( operation == "ST" || operation == "ATOMIC" ) memOp = WRITE; else { memOp = NOP; std::cout << "RubyTraceReader: Unknown memory operation! " << operation << std::endl; } NVMAddress nAddress; nAddress.SetPhysicalAddress( decAddress ); nextAccess->SetLine( nAddress, memOp, currentCycle - cycles, dataBlock, oldDataBlock, threadId ); break; } } }
void CommonMigrator::ChooseVictim( Migrator *at, NVMAddress& /*promotee*/, NVMAddress& victim ) { /* * Since this is no method called after every module in the system is * initialized, we check here to see if we have queried the memory system * about the information we need. */ if( !queriedMemory ) { /* * Our naive replacement policy will simply circle through all the pages * in the fast memory. In order to count the pages we need to count the * number of rows in the fast memory channel. We do this by creating a * dummy request which would route to the fast memory channel. From this * we can grab it's config pointer and calculate the page count. */ NVMainRequest queryRequest; //set query request's channel to promotionChannel queryRequest.address.SetTranslatedAddress( 0, 0, 0, 0, promotionChannel, 0 ); queryRequest.address.SetPhysicalAddress( 0 ); queryRequest.type = READ; queryRequest.owner = this; NVMObject *curObject = NULL; //search all children of parent , only if find the child node that can cast to SubArray safely , //and assign it to curObject(find Subarray Object ) FindModuleChildType( &queryRequest, SubArray, curObject, parent->GetTrampoline( ) ); SubArray *promotionChannelSubarray = NULL; promotionChannelSubarray = dynamic_cast<SubArray *>( curObject ); assert( promotionChannelSubarray != NULL ); Params *p = promotionChannelSubarray->GetParams( ); promotionChannelParams = p; totalPromotionPages = p->RANKS * p->BANKS * p->ROWS; currentPromotionPage = totalPromotionPages; if( p->COLS != numCols ) { std::cout << "Warning: Page size of fast and slow memory differs." << std::endl; } queriedMemory = true; } /* * From the current promotion page, simply craft some translated address together * as the victim address. */ uint64_t victimRank, victimBank, victimRow, victimSubarray, subarrayCount; ncounter_t promoPage = currentPromotionPage; victimRank = promoPage % promotionChannelParams->RANKS; promoPage >>= NVM::mlog2( promotionChannelParams->RANKS ); victimBank = promoPage % promotionChannelParams->BANKS; promoPage >>= NVM::mlog2( promotionChannelParams->BANKS ); subarrayCount = promotionChannelParams->ROWS / promotionChannelParams->MATHeight; victimSubarray = promoPage % subarrayCount; promoPage >>= NVM::mlog2( subarrayCount ); victimRow = promoPage; victim.SetTranslatedAddress( victimRow, 0, victimBank, victimRank, promotionChannel, victimSubarray ); uint64_t victimAddress = at->ReverseTranslate( victimRow, 0, victimBank, victimRank, promotionChannel, victimSubarray ); victim.SetPhysicalAddress( victimAddress ); if( currentPromotionPage>0) currentPromotionPage = (currentPromotionPage - 1) % totalPromotionPages; else currentPromotionPage = ( currentPromotionPage + totalPromotionPages)% totalPromotionPages; }