static T& HandleFault(EbbId id) { kassert(id == T::static_id); { // acquire read only to find rep LocalIdMap::const_accessor accessor; auto found = local_id_map->find(accessor, id); kassert(found); auto rep_map = boost::any_cast<rep_map_t>(accessor->second); auto it = rep_map.find(my_cpu()); if (it != rep_map.end()) { cache_ref(id, *it->second); return *it->second; } } // we failed to find a rep, we must construct one auto rep = new T; //TODO: make the rep_map thread safe so we can acquire r/o access LocalIdMap::accessor accessor; auto found = local_id_map->find(accessor, id); kassert(found); auto rep_map = boost::any_cast<rep_map_t>(accessor->second); rep_map[my_cpu()] = rep; cache_ref(id, *rep); return *rep; }
static int tailcall_frame_sniffer (const struct frame_unwind *self, struct frame_info *this_frame, void **this_cache) { struct frame_info *next_frame; int next_levels; struct tailcall_cache *cache; /* Inner tail call element does not make sense for a sentinel frame. */ next_frame = get_next_frame (this_frame); if (next_frame == NULL) return 0; cache = cache_find (next_frame); if (cache == NULL) return 0; cache_ref (cache); next_levels = existing_next_levels (this_frame, cache); /* NEXT_LEVELS is -1 only in dwarf2_tailcall_sniffer_first. */ gdb_assert (next_levels >= 0); gdb_assert (next_levels <= cache->chain_levels); if (next_levels == cache->chain_levels) { cache_unref (cache); return 0; } *this_cache = cache; return 1; }
void data_write(ev_data_write* e) { int res; res = cache_ref(e->addr, e->len); // printf(" > Store by T%d at %p, size %2d: %s\n", // tid, (void*) e->addr, e->len, res ? "Hit ":"Miss"); stores++; if (res == 0) smisses++; }
void data_read(ev_data_read* e) { int res; res = cache_ref(e->addr, e->len); // printf(" > Load by T%d at %p, size %2d: %s\n", // tid, (void*) e->addr, e->len, res ? "Hit ":"Miss"); loads++; if (res == 0) lmisses++; }
void HELPER(dinero_access)(uint32_t addr,uint32_t rw, uint32_t size) { if(VPMU_enabled) { /* we first calculate IRQ into account int mode = env->uncached_cpsr & CPSR_M; if(mode == ARM_CPU_MODE_IRQ) return; */ #if 0 char *state = &(GlobalVPMU.state); char *timer_interrupt_exception = &(GlobalVPMU.timer_interrupt_exception); /* In timer interrupt state */ if(unlikely(*state == 1 && *timer_interrupt_exception == 0)) { return; } #endif if(vpmu_simulator_status(&GlobalVPMU, VPMU_DCACHE_SIM)) { if(rw == 0xff)/* PLD */ { //cache_ref(env->regs[rd]+shift, D4XREAD , 32); addr = env->regs[(addr>>16)&0xf] + (addr&0xfff); rw = D4XREAD; } //evo0209 if (GlobalVPMU.cpu_model == 1) { //uint32_t pa = cpu_get_phys_page_debug(env, addr); //uint32_t pt = pa & 0xFFFFE000; //cortex-a9 L1 cache has 256-entry, blocksize 32bytes, mask 13bits //addr = (addr & 0x00001FFF) | pt; addr = cpu_get_phys_page_debug(env, addr); } //evo0209 temp test if (GlobalVPMU.iomem_test == 1) { //printf("helper I/O addr: %x", addr); GlobalVPMU.iomem_test = 0; GlobalVPMU.iomem_qemu++; } else cache_ref(addr, rw, size); //chiaheng #if 0 /* Considering the performance impact of I/O memory accesses. */ if(cpu_get_phys_page_debug(env, addr) >= SYSTEM_RAM_START && cpu_get_phys_page_debug(env, addr) < SYSTEM_RAM_END){ /* The cache memory simulation. */ cache_ref(addr, rw, size); } else { /* Accounting for I/O memory accesses. */ GlobalVPMU.iomem_count++; #if 0 /* Debuggin. */ if (cpu_get_phys_page_debug(env, addr) >= VPMU_BASE_ADDR && cpu_get_phys_page_debug(env, addr) < (VPMU_BASE_ADDR+VPMU_IOMEM_SIZE)) { printf("%s: Address (virtual, physical) = (%x, %x).\n", __FUNCTION__, addr, cpu_get_phys_page_debug(env, addr)); //fflush(stdout); } #endif } #endif } }