MemBlockInfo::MemBlockInfo(u64 _addr, u32 _size) : MemInfo(_addr, PAGE_4K(_size)) { void* real_addr = (void*)((u64)Memory.GetBaseAddr() + _addr); #ifdef _WIN32 mem = VirtualAlloc(real_addr, size, MEM_COMMIT, PAGE_READWRITE); #else if (::mprotect(real_addr, size, PROT_READ | PROT_WRITE)) { mem = nullptr; } else { mem = real_addr; } #endif if (mem != real_addr) { LOG_ERROR(MEMORY, "Memory allocation failed (addr=0x%llx, size=0x%llx)", addr, size); Emu.Pause(); } else { Memory.RegisterPages(_addr, PAGE_4K(_size)); memset(mem, 0, size); } }
Core::Core(const Config& configs, int coreid, const char* trace_fname, function<bool(Request)> send_next, Cache* llc, std::shared_ptr<CacheSystem> cachesys, MemoryBase& memory) : id(coreid), no_core_caches(!configs.has_core_caches()), no_shared_cache(!configs.has_l3_cache()), llc(llc), trace(trace_fname), memory(memory) { // Build cache hierarchy if (no_core_caches) { send = send_next; } else { // L2 caches[0] caches.emplace_back(new Cache( l2_size, l2_assoc, l2_blocksz, l2_mshr_num, Cache::Level::L2, cachesys)); // L1 caches[1] caches.emplace_back(new Cache( l1_size, l1_assoc, l1_blocksz, l1_mshr_num, Cache::Level::L1, cachesys)); send = bind(&Cache::send, caches[1].get(), placeholders::_1); if (llc != nullptr) { caches[0]->concatlower(llc); } caches[1]->concatlower(caches[0].get()); } if (no_core_caches) { more_reqs = trace.get_filtered_request( bubble_cnt, req_addr, req_type); req_addr = memory.page_allocator(req_addr, id); } else { more_reqs = trace.get_unfiltered_request( bubble_cnt, req_addr, req_type); req_addr = memory.page_allocator(req_addr, id); } // set expected limit instruction for calculating weighted speedup expected_limit_insts = configs.get_expected_limit_insts(); // regStats record_cycs.name("record_cycs_core_" + to_string(id)) .desc("Record cycle number for calculating weighted speedup. (Only valid when expected limit instruction number is non zero in config file.)") .precision(0) ; record_insts.name("record_insts_core_" + to_string(id)) .desc("Retired instruction number when record cycle number. (Only valid when expected limit instruction number is non zero in config file.)") .precision(0) ; memory_access_cycles.name("memory_access_cycles_core_" + to_string(id)) .desc("memory access cycles in memory time domain") .precision(0) ; memory_access_cycles = 0; cpu_inst.name("cpu_instructions_core_" + to_string(id)) .desc("cpu instruction number") .precision(0) ; cpu_inst = 0; }
void MemoryBlock::Init() { range_start = 0; range_size = 0; mem = Memory.GetMemFromAddr(0); }
bool MemoryBlock::SetMemFromAddr(void* src, const u64 addr, const u32 size) { if(!IsMyAddress(addr) || FixAddr(addr) + size > GetSize()) return false; // mem cpy(GetMem(FixAddr(addr)), src, size); return Memory.CopyFromReal((u32)addr, src, size); }
bool MemoryBlock::GetMemFromAddr(void* dst, const u64 addr, const u32 size) { if(!IsMyAddress(addr) || FixAddr(addr) + size > GetSize()) return false; // mem cpy(dst, GetMem(FixAddr(addr)), size); return Memory.CopyToReal(dst, (u32)addr, size); }
bool VirtualMemoryBlock::Write128(const u64 addr, const u128 value) { u64 realAddr; if(!getRealAddr(addr, realAddr)) return false; Memory.Write128(realAddr, value); return true; }
bool VirtualMemoryBlock::Read128(const u64 addr, u128* value) { u64 realAddr; if(!getRealAddr(addr, realAddr)) return false; *value = Memory.Read128(realAddr); return true; }
bool VirtualMemoryBlock::Read32(const u64 addr, u32* value) { u64 realAddr = getRealAddr(addr); if (realAddr == 0) return false; *value = Memory.Read32(realAddr); return true; }
void MemoryBlock::InitMemory() { if (!range_size) { mem = Memory.GetMemFromAddr(range_start); } else { Free(); mem_inf = new MemBlockInfo(range_start, range_size); mem = (u8*)mem_inf->mem; } }
void MemBlockInfo::Free() { if (mem) { Memory.UnregisterPages(addr, size); #ifdef _WIN32 if (!VirtualFree(mem, size, MEM_DECOMMIT)) #else if (::mprotect(mem, size, PROT_NONE)) #endif { LOG_ERROR(MEMORY, "Memory deallocation failed (addr=0x%llx, size=0x%llx)", addr, size); Emu.Pause(); } } }
bool VirtualMemoryBlock::Write128(const u64 addr, const u128 value) { u64 realAddr; Memory.Write128(realAddr = getRealAddr(addr), value); return realAddr != 0; }
bool VirtualMemoryBlock::Read128(const u64 addr, u128* value) { u64 realAddr; *value = Memory.Read128(realAddr = getRealAddr(addr)); return realAddr != 0; }
STDMETHODIMP CGfxAndroidISurfOverlay::Open(DWORD dwWidth, DWORD dwHeight, DWORD dwBuffers, const DWORD *pdwFourCC, IMcPreInit* pPrepareData, RECT* pDst) { UINT32 CamIFState = 0; int res = 0; if(dwWidth==0 || dwHeight==0) { DP("[CGfxAndroidISurfOverlay::Open] dwWidth==0 || dwHeight==0\n"); return E_FAIL; } if(m_bOpen) { DP("[CGfxAndroidISurfOverlay::Open] Invalid call \n"); return E_FAIL; } m_dwFourCC = 0; m_dpFormat = 0; m_bytesPerPixel = 0.0f; m_isurface = NULL; m_pOverlay = NULL; m_dwWidth = dwWidth; m_dwHeight = dwHeight; ZeroMemory(&m_rectSrc,sizeof(m_rectSrc)); ZeroMemory(&m_rectDst,sizeof(m_rectDst)); SetRect(&m_rectSrc, 0, 0 ,m_dwWidth, m_dwHeight); if(pDst && (pDst->right - pDst->left != 0) && (pDst->bottom - pDst->top != 0)) { m_rectDst = *pDst; } if(pdwFourCC) m_pdwFourCCList = pdwFourCC; else m_pdwFourCCList = const_dwFourCC; for(DWORD i=0;m_dwFourCC=m_pdwFourCCList[i];i++) { //D3DDISPLAYMODE dpMode; switch(m_dwFourCC) { case MAKEFOURCC('Y','V','1','2'): #ifndef RENESAS_ANDROID #if ANDROID_VER==23 m_dpFormat =HAL_PIXEL_FORMAT_YV12; #else m_dpFormat =HAL_PIXEL_FORMAT_YCbCr_420_P; #endif #else m_dpFormat = OVERLAY_FORMAT_YCbCr_420_P; #endif m_bytesPerPixel = 1.5f; break; case MAKEFOURCC('N','V','1','2'): #ifndef RENESAS_ANDROID m_dpFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP; #else m_dpFormat = OVERLAY_FORMAT_YCbCr_420_SP; #endif m_bytesPerPixel = 1.5f; break; default: break; } if(m_dpFormat != 0) break; } DP("Open :%x ,w,h:%d,%d\n", m_dwFourCC, m_dwWidth, m_dwHeight); if(m_dpFormat == 0) { DP("[CGfxAndroidISurfOverlay::Open] m_dpFormat == 0 \n"); return E_FAIL; } /* m_heap = new MemoryHeapBase(m_dwWidth*m_dwHeight*m_bytesPerPixel* ISURFACEOVL_BACKBUFF_NUM); if(!m_heap) { LOGE("ERROR !!! null heap \n"); } */ #if defined(RENESAS_ANDROID) int ret; unsigned long phys_start; unsigned long phys_size = 6*1024*1024; int test_count; m_renesas_fd = open("/dev/siz", O_RDWR | O_NONBLOCK | O_NOCTTY); if(m_renesas_fd < 0) return E_FAIL; DP("========= mmap dynamic ========== \n"); ioctl(m_renesas_fd, EMXX_CIDANA_PHYSTART, &m_renesas_phymemory_addr); ioctl(m_renesas_fd, EMXX_CIDANA_PHYSIZE, &m_renesas_memory_size); m_renesas_memory_addr = (unsigned char *)mmap(0, m_renesas_memory_size, PROT_READ | PROT_WRITE, MAP_SHARED, m_renesas_fd, m_renesas_phymemory_addr); DP("mmap virtual addr: %p, phys_start:%p, phys_size=%x \n", m_renesas_memory_addr, m_renesas_phymemory_addr, m_renesas_memory_size); #endif for(int i=0;i<ISURFACEOVL_BACKBUFF_NUM;i++) { #ifdef INFOMAX m_heap = new MemoryHeapBase("/dev/pmem_adsp", m_dwWidth*m_dwHeight*m_bytesPerPixel); if(m_heap->heapID() >= 0) { m_PBuffer = new MemoryBase(m_heap, 0, m_dwWidth*m_dwHeight*m_bytesPerPixel); m_pBuff[i] = (unsigned char*)(m_PBuffer->getMemory(NULL, NULL)->base()); DP("malloc m_pBuff[%d]:%p \n",i,m_pBuff[i]); } else { DP("failed to new pmem_adsp"); return E_FAIL; } #elif defined(RENESAS_ANDROID) if(i* m_dwWidth*m_dwHeight*m_bytesPerPixel > m_renesas_memory_size) { DP("physical memory isn't enough i:%d eachBuf:%d \n",i,m_dwWidth*m_dwHeight*m_bytesPerPixel); return E_FAIL; } m_pBuff[i] = m_renesas_memory_addr + (LONG)(i* m_dwWidth*m_dwHeight*m_bytesPerPixel); #else m_pBuff[i] = (unsigned char*)malloc(m_dwWidth*m_dwHeight*m_bytesPerPixel); #endif LOGE("m_pBuff[%d] :%p",i,m_pBuff[i]); memset(m_pBuff[i],0, i*m_dwWidth*m_dwHeight*m_bytesPerPixel); if(!m_pBuff[i]) { DP("[<<<<<<<<<<<<<<CGfxAndroidISurfOverlay]malloc bufer %d failed \n",i); return E_FAIL; } } DP("[<<<<<<<<<<<<<<CGfxAndroidISurfOverlay]Open w:%d h:%d \n",m_dwWidth, m_dwHeight); m_bOpen = TRUE; return S_OK; }