/* Returns it_index >= 0 if any valid update is made, negative otherwise. */ static int prefetcher_update_tables(struct mod_stack_t *stack, struct mod_t *target_mod) { struct prefetcher_t *pref = target_mod->cache->prefetcher; int ghb_index; unsigned int addr = stack->addr; int it_index, prev; unsigned it_tag; assert(pref); /* Get the index table index */ get_it_index_tag(pref, stack, &it_index, &it_tag); if (it_index < 0) return -1; assert(it_index < pref->it_size); ghb_index = (++(pref->ghb_head)) % pref->ghb_size; /* Remove the current entry in ghb_index, if its valid */ if (pref->ghb[ghb_index].addr > 0) { prev = pref->ghb[ghb_index].prev; /* The prev field may point to either index table or ghb. */ if (pref->ghb[ghb_index].prev_it_ghb == prefetcher_ptr_ghb) { /* prev_it_gb == 0 implies the previous element is in the GHB */ assert(prev >= 0 && prev < pref->ghb_size); assert(pref->ghb[prev].next == ghb_index); pref->ghb[prev].next = -1; } else { assert(pref->ghb[ghb_index].prev_it_ghb == prefetcher_ptr_it); /* The element in index_table may have been replaced since this * entry was put into the ghb. */ if (prev >= 0) { assert(pref->index_table[prev].ptr == ghb_index); pref->index_table[prev].ptr = -1; } } } pref->ghb[ghb_index].addr = 0; pref->ghb[ghb_index].next = -1; pref->ghb[ghb_index].prev = -1; if (pref->index_table[it_index].tag > 0) { /* Replace entry in index_table if necessary. */ if (pref->index_table[it_index].tag != it_tag) { mem_debug(" %lld it_index = %d, old_tag = 0x%x, new_tag = 0x%x" "prefetcher: replace index_table entry\n", stack->id, it_index, pref->index_table[it_index].tag, it_tag); prev = pref->index_table[it_index].ptr; /* The element in the ghb may have gone out by now. */ if (prev >= 0) { /* The element that this is pointing to must be pointing back. */ assert(pref->ghb[prev].prev_it_ghb == prefetcher_ptr_it && pref->ghb[prev].prev == it_index); pref->ghb[prev].prev = -1; } pref->index_table[it_index].tag = 0; pref->index_table[it_index].ptr = -1; } } else { /* Just an initialization. Tag == 0 implies the entry has never been used. */ pref->index_table[it_index].ptr = -1; } /* Add new element into ghb. */ pref->ghb[ghb_index].addr = addr; pref->ghb[ghb_index].next = pref->index_table[it_index].ptr; if (pref->index_table[it_index].ptr >= 0) { prev = pref->index_table[it_index].ptr; assert(pref->ghb[prev].prev_it_ghb == prefetcher_ptr_it && pref->ghb[prev].prev == it_index); pref->ghb[prev].prev_it_ghb = prefetcher_ptr_ghb; pref->ghb[prev].prev = ghb_index; } pref->ghb[ghb_index].prev_it_ghb = prefetcher_ptr_it; pref->ghb[ghb_index].prev = it_index; /* Make the index table entries point to current ghb_index. */ pref->index_table[it_index].tag = it_tag; pref->index_table[it_index].ptr = ghb_index; /* Update pref->ghb_head so that its in the range possible. */ pref->ghb_head = ghb_index; return it_index; }
int dir_entry_lock(struct dir_t *dir, int x, int y, int event, struct mod_stack_t *stack) { struct dir_lock_t *dir_lock; struct mod_stack_t *lock_queue_iter; /* Get lock */ assert(x < dir->xsize && y < dir->ysize); dir_lock = &dir->dir_lock[x * dir->ysize + y]; /* If the entry is already locked, enqueue a new waiter and * return failure to lock. */ if (dir_lock->lock) { /* Enqueue the stack to the end of the lock queue */ stack->dir_lock_next = NULL; stack->dir_lock_event = event; stack->ret_stack->way = stack->way; if (!dir_lock->lock_queue) { /* Special case: queue is empty */ dir_lock->lock_queue = stack; } else { lock_queue_iter = dir_lock->lock_queue; /* FIXME - Code below is the queue insertion algorithm based on stack id. * This causes a deadlock when, for example, A-10 keeps retrying an up-down access and * gets always priority over A-20, which is waiting to finish a down-up access. */ #if 0 while (stack->id > lock_queue_iter->id) { if (!lock_queue_iter->dir_lock_next) break; lock_queue_iter = lock_queue_iter->dir_lock_next; } #endif /* ------------------------------------------------------------------------ */ /* FIXME - Replaced with code below, just inserting at the end of the queue. * But this seems to be what this function was doing before, isn't it? Why * weren't we happy with this policy? */ while (lock_queue_iter->dir_lock_next) lock_queue_iter = lock_queue_iter->dir_lock_next; /* ------------------------------------------------------------------------ */ if (!lock_queue_iter->dir_lock_next) { /* Stack goes at end of queue */ lock_queue_iter->dir_lock_next = stack; } else { /* Stack goes in front or middle of queue */ stack->dir_lock_next = lock_queue_iter->dir_lock_next; lock_queue_iter->dir_lock_next = stack; } } mem_debug(" 0x%x access suspended\n", stack->tag); return 0; } /* Trace */ mem_trace("mem.new_access_block cache=\"%s\" access=\"A-%lld\" set=%d way=%d\n", dir->name, stack->id, x, y); /* Lock entry */ dir_lock->lock = 1; dir_lock->stack_id = stack->id; return 1; }
void SIGpuMemConfigParseEntry(Timing *self, struct config_t *config, char *section) { char *file_name; char *vector_module_name; char *scalar_module_name; int unified_present; int separate_present; int compute_unit_id; struct si_compute_unit_t *compute_unit; /* Get configuration file name */ file_name = config_get_file_name(config); /* Allow these sections in case we quit before reading them. */ config_var_allow(config, section, "DataModule"); config_var_allow(config, section, "ConstantDataModule"); config_var_allow(config, section, "Module"); unified_present = config_var_exists(config, section, "Module"); separate_present = config_var_exists(config, section, "DataModule") && config_var_exists(config, section, "ConstantDataModule"); if (!unified_present && !separate_present) { fatal( "%s: section [%s]: variable 'Module' missing.\n" "\tPlease run use '--mem-help' for more information on the\n" "\tconfiguration file format, or consult the Multi2Sim Guide.\n", file_name, section); } if (!(unified_present ^ separate_present)) { fatal( "%s: section [%s]: invalid combination of modules.\n" "\tA Southern Islands entry to the memory hierarchy needs to specify\n" "\teither a unified entry for vector and scalar caches (variable \n" "\t'Module'), or two separate entries for data and scalar (constant)\n" "\tdata (variables 'DataModule' and 'ConstantDataModule'), but not\n" "\tboth.\n", file_name, section); } /* Read compute unit */ compute_unit_id = config_read_int(config, section, "ComputeUnit", -1); if (compute_unit_id < 0) { fatal("%s: section [%s]: invalid or missing value for " "'ComputeUnit'", file_name, section); } /* Check compute unit boundaries */ if (compute_unit_id >= si_gpu_num_compute_units) { warning( "%s: section [%s] ignored, referring to Southern Islands \n" "\tcompute unit %d. This section refers to a compute unit that\n" "\tdoes not currently exist. Please review your Southern Islands\n" "\tconfiguration file if this is not the desired behavior.\n", file_name, section, compute_unit_id); return; } /* Check that entry has not been assigned before */ compute_unit = si_gpu->compute_units[compute_unit_id]; if (compute_unit->vector_cache) { fatal( "%s: section [%s]: entry from compute unit %d already assigned.\n" "\tA different [Entry <name>] section in the memory configuration\n" "\tfile has already assigned an entry for this particular compute \n" "\tunit. Please review your tconfiguration file to avoid duplicates.\n", file_name, section, compute_unit_id); } /* Read modules */ if (separate_present) { vector_module_name = config_read_string(config, section, "DataModule", NULL); scalar_module_name = config_read_string(config, section, "ConstantDataModule", NULL); } else { vector_module_name = scalar_module_name = config_read_string(config, section, "Module", NULL); } assert(vector_module_name); assert(scalar_module_name); /* Assign modules */ compute_unit->vector_cache = mem_system_get_mod(vector_module_name); if (!compute_unit->vector_cache) { fatal( "%s: section [%s]: '%s' is not a valid module name.\n" "\tThe given module name must match a module declared in a section\n" "\t[Module <name>] in the memory configuration file.\n", file_name, section, vector_module_name); } compute_unit->scalar_cache = mem_system_get_mod(scalar_module_name); if (!compute_unit->scalar_cache) { fatal( "%s: section [%s]: '%s' is not a valid module name.\n" "\tThe given module name must match a module declared in a section\n" "\t[Module <name>] in the memory configuration file.\n", file_name, section, scalar_module_name); } /* Add modules to list of memory entries */ linked_list_add(arch_southern_islands->mem_entry_mod_list, compute_unit->vector_cache); linked_list_add(arch_southern_islands->mem_entry_mod_list, compute_unit->scalar_cache); /* Debug */ mem_debug("\tSouthern Islands compute unit %d\n", compute_unit_id); mem_debug("\t\tEntry for vector mem -> %s\n", compute_unit->vector_cache->name); mem_debug("\t\tEntry for scalar mem -> %s\n", compute_unit->scalar_cache->name); mem_debug("\n"); }
void mod_handler_local_mem_load(int event, void *data) { struct mod_stack_t *stack = data; struct mod_stack_t *new_stack; struct mod_t *mod = stack->mod; if (event == EV_MOD_LOCAL_MEM_LOAD) { struct mod_stack_t *master_stack; mem_debug(" %lld %lld 0x%x %s load\n", esim_time, stack->id, stack->addr, mod->name); mem_trace("mem.new_access name=\"A-%lld\" type=\"load\" " "state=\"%s:load\" addr=0x%x\n", stack->id, mod->name, stack->addr); /* Record access */ mod_access_start(mod, stack, mod_access_load); /* Coalesce access */ master_stack = mod_can_coalesce(mod, mod_access_load, stack->addr, stack); if (master_stack) { mod->reads++; mod_coalesce(mod, master_stack, stack); mod_stack_wait_in_stack(stack, master_stack, EV_MOD_LOCAL_MEM_LOAD_FINISH); return; } esim_schedule_event(EV_MOD_LOCAL_MEM_LOAD_LOCK, stack, 0); return; } if (event == EV_MOD_LOCAL_MEM_LOAD_LOCK) { struct mod_stack_t *older_stack; mem_debug(" %lld %lld 0x%x %s load lock\n", esim_time, stack->id, stack->addr, mod->name); mem_trace("mem.access name=\"A-%lld\" state=\"%s:load_lock\"\n", stack->id, mod->name); /* If there is any older write, wait for it */ older_stack = mod_in_flight_write(mod, stack); if (older_stack) { mem_debug(" %lld wait for write %lld\n", stack->id, older_stack->id); mod_stack_wait_in_stack(stack, older_stack, EV_MOD_LOCAL_MEM_LOAD_LOCK); return; } /* If there is any older access to the same address that this access could not * be coalesced with, wait for it. */ older_stack = mod_in_flight_address(mod, stack->addr, stack); if (older_stack) { mem_debug(" %lld wait for access %lld\n", stack->id, older_stack->id); mod_stack_wait_in_stack(stack, older_stack, EV_MOD_LOCAL_MEM_LOAD_LOCK); return; } /* Call find and lock to lock the port */ new_stack = mod_stack_create_vishesh(stack->id, mod, stack->addr, stack->vtl_addr, EV_MOD_LOCAL_MEM_LOAD_FINISH, stack); new_stack->read = 1; esim_schedule_event(EV_MOD_LOCAL_MEM_FIND_AND_LOCK, new_stack, 0); return; } if (event == EV_MOD_LOCAL_MEM_LOAD_FINISH) { mem_debug("%lld %lld 0x%x %s load finish\n", esim_time, stack->id, stack->addr, mod->name); mem_trace("mem.access name=\"A-%lld\" state=\"%s:load_finish\"\n", stack->id, mod->name); mem_trace("mem.end_access name=\"A-%lld\"\n", stack->id); /* Increment witness variable */ if (stack->witness_ptr) { (*stack->witness_ptr)++; } /* Return event queue element into event queue */ if (stack->event_queue && stack->event_queue_item) linked_list_add(stack->event_queue, stack->event_queue_item); /* Finish access */ mod_access_finish(mod, stack); /* Return */ mod_stack_return(stack); return; } abort(); }
void mod_handler_local_mem_find_and_lock(int event, void *data) { struct mod_stack_t *stack = data; struct mod_stack_t *ret = stack->ret_stack; struct mod_t *mod = stack->mod; if (event == EV_MOD_LOCAL_MEM_FIND_AND_LOCK) { mem_debug(" %lld %lld 0x%x %s find and lock\n", esim_time, stack->id, stack->addr, mod->name); mem_trace("mem.access name=\"A-%lld\" state=\"%s:find_and_lock\"\n", stack->id, mod->name); /* Get a port */ mod_lock_port(mod, stack, EV_MOD_LOCAL_MEM_FIND_AND_LOCK_PORT); return; } if (event == EV_MOD_LOCAL_MEM_FIND_AND_LOCK_PORT) { mem_debug(" %lld %lld 0x%x %s find and lock port\n", esim_time, stack->id, stack->addr, mod->name); mem_trace("mem.access name=\"A-%lld\" state=\"%s:find_and_lock_port\"\n", stack->id, mod->name); /* Set parent stack flag expressing that port has already been locked. * This flag is checked by new writes to find out if it is already too * late to coalesce. */ ret->port_locked = 1; /* Statistics */ mod->accesses++; if (stack->read) { mod->reads++; mod->effective_reads++; } else { mod->writes++; mod->effective_writes++; /* Increment witness variable when port is locked */ if (stack->witness_ptr) { (*stack->witness_ptr)++; stack->witness_ptr = NULL; } } /* Access latency */ esim_schedule_event(EV_MOD_LOCAL_MEM_FIND_AND_LOCK_ACTION, stack, mod->latency); return; } if (event == EV_MOD_LOCAL_MEM_FIND_AND_LOCK_ACTION) { struct mod_port_t *port = stack->port; assert(port); mem_debug(" %lld %lld 0x%x %s find and lock action\n", esim_time, stack->id, stack->tag, mod->name); mem_trace("mem.access name=\"A-%lld\" state=\"%s:find_and_lock_action\"\n", stack->id, mod->name); /* Release port */ mod_unlock_port(mod, port, stack); /* Continue */ esim_schedule_event(EV_MOD_LOCAL_MEM_FIND_AND_LOCK_FINISH, stack, 0); return; } if (event == EV_MOD_LOCAL_MEM_FIND_AND_LOCK_FINISH) { mem_debug(" %lld %lld 0x%x %s find and lock finish (err=%d)\n", esim_time, stack->id, stack->tag, mod->name, stack->err); mem_trace("mem.access name=\"A-%lld\" state=\"%s:find_and_lock_finish\"\n", stack->id, mod->name); mod_stack_return(stack); return; } abort(); }
void mod_handler_local_mem_store(int event, void *data) { struct mod_stack_t *stack = data; struct mod_stack_t *new_stack; struct mod_t *mod = stack->mod; if (event == EV_MOD_LOCAL_MEM_STORE) { struct mod_stack_t *master_stack; mem_debug("%lld %lld 0x%x %s store\n", esim_time, stack->id, stack->addr, mod->name); mem_trace("mem.new_access name=\"A-%lld\" type=\"store\" " "state=\"%s:store\" addr=0x%x\n", stack->id, mod->name, stack->addr); /* Record access */ mod_access_start(mod, stack, mod_access_store); /* Coalesce access */ master_stack = mod_can_coalesce(mod, mod_access_store, stack->addr, stack); if (master_stack) { mod->writes++; mod_coalesce(mod, master_stack, stack); mod_stack_wait_in_stack(stack, master_stack, EV_MOD_LOCAL_MEM_STORE_FINISH); /* Increment witness variable */ if (stack->witness_ptr) (*stack->witness_ptr)++; return; } /* Continue */ esim_schedule_event(EV_MOD_LOCAL_MEM_STORE_LOCK, stack, 0); return; } if (event == EV_MOD_LOCAL_MEM_STORE_LOCK) { struct mod_stack_t *older_stack; mem_debug(" %lld %lld 0x%x %s store lock\n", esim_time, stack->id, stack->addr, mod->name); mem_trace("mem.access name=\"A-%lld\" state=\"%s:store_lock\"\n", stack->id, mod->name); /* If there is any older access, wait for it */ older_stack = stack->access_list_prev; if (older_stack) { mem_debug(" %lld wait for access %lld\n", stack->id, older_stack->id); mod_stack_wait_in_stack(stack, older_stack, EV_MOD_LOCAL_MEM_STORE_LOCK); return; } /* Call find and lock */ new_stack = mod_stack_create_vishesh(stack->id, mod, stack->addr, stack->vtl_addr, EV_MOD_LOCAL_MEM_STORE_FINISH, stack); new_stack->read = 0; new_stack->witness_ptr = stack->witness_ptr; esim_schedule_event(EV_MOD_LOCAL_MEM_FIND_AND_LOCK, new_stack, 0); /* Set witness variable to NULL so that retries from the same * stack do not increment it multiple times */ stack->witness_ptr = NULL; return; } if (event == EV_MOD_LOCAL_MEM_STORE_FINISH) { mem_debug("%lld %lld 0x%x %s store finish\n", esim_time, stack->id, stack->addr, mod->name); mem_trace("mem.access name=\"A-%lld\" state=\"%s:store_finish\"\n", stack->id, mod->name); mem_trace("mem.end_access name=\"A-%lld\"\n", stack->id); /* Return event queue element into event queue */ if (stack->event_queue && stack->event_queue_item) linked_list_add(stack->event_queue, stack->event_queue_item); /* Finish access */ mod_access_finish(mod, stack); /* Return */ mod_stack_return(stack); return; } abort(); }
int main(int argc, char *argv[]) { bool daemon = true; int err = 0; struct pl opt; (void)sys_coredump_set(true); #ifdef HAVE_GETOPT for (;;) { const int c = getopt(argc, argv, "dhnf:"); if (0 > c) break; switch (c) { case 'd': force_debug = true; restund_log_enable_debug(true); break; case 'f': configfile = optarg; break; case 'n': daemon = false; break; case '?': err = EINVAL; /*@fallthrough@*/ case 'h': usage(); return err; } } #else (void)argc; (void)argv; #endif restund_cmd_subscribe(&cmd_reload); err = fd_setsize(4096); if (err) { restund_warning("fd_setsize error: %m\n", err); goto out; } err = libre_init(); if (err) { restund_error("re init failed: %m\n", err); goto out; } /* configuration file */ err = conf_alloc(&conf, configfile); if (err) { restund_error("error loading configuration: %s: %m\n", configfile, err); goto out; } /* debug config */ if (!conf_get(conf, "debug", &opt) && !pl_strcasecmp(&opt, "yes")) restund_log_enable_debug(true); /* udp */ err = restund_udp_init(); if (err) goto out; /* tcp */ err = restund_tcp_init(); if (err) goto out; /* daemon config */ if (!conf_get(conf, "daemon", &opt) && !pl_strcasecmp(&opt, "no")) daemon = false; /* module config */ if (conf_get(conf, "module_path", &opt)) pl_set_str(&opt, "."); err = conf_apply(conf, "module", module_handler, &opt); if (err) goto out; /* daemon */ if (daemon) { err = sys_daemon(); if (err) { restund_error("daemon error: %m\n", err); goto out; } restund_log_enable_stderr(false); } /* database */ err = restund_db_init(); if (err) { restund_warning("database error: %m\n", err); goto out; } restund_info("stun server ready\n"); /* main loop */ err = re_main(signal_handler); out: restund_db_close(); mod_close(); restund_udp_close(); restund_tcp_close(); conf = mem_deref(conf); libre_close(); restund_cmd_unsubscribe(&cmd_reload); /* check for memory leaks */ tmr_debug(); mem_debug(); return err; }
int main(int argc, char *argv[]) { struct config *config; int err; err = libre_init(); if (err) return err; log_enable_info(false); for (;;) { const int c = getopt(argc, argv, "v"); if (0 > c) break; switch (c) { case '?': case 'h': usage(); return -2; case 'v': log_enable_info(true); break; default: break; } } re_printf("running baresip selftest version %s with %zu tests\n", BARESIP_VERSION, ARRAY_SIZE(tests)); /* note: run SIP-traffic on localhost */ config = conf_config(); if (!config) { err = ENOENT; goto out; } str_ncpy(config->sip.local, "127.0.0.1:0", sizeof(config->sip.local)); /* XXX: needed for ua tests */ err = ua_init("test", true, true, true, false); if (err) goto out; err = run_tests(); if (err) goto out; #if 1 ua_stop_all(true); #endif re_printf("\x1b[32mOK. %zu tests passed successfully\x1b[;m\n", ARRAY_SIZE(tests)); out: if (err) { warning("test failed (%m)\n", err); re_printf("%H\n", re_debug, 0); } ua_stop_all(true); ua_close(); libre_close(); tmr_debug(); mem_debug(); return err; }