static int64_t eval_ifunc(int64_t val, enum ifunc func) { int errtype; uint64_t uval = (uint64_t)val; int64_t rv; switch (func) { case IFUNC_ILOG2E: case IFUNC_ILOG2W: errtype = (func == IFUNC_ILOG2E) ? ERR_NONFATAL : ERR_WARNING; if (!is_power2(uval)) nasm_error(errtype, "ilog2 argument is not a power of two"); /* fall through */ case IFUNC_ILOG2F: rv = ilog2_64(uval); break; case IFUNC_ILOG2C: rv = (uval < 2) ? 0 : ilog2_64(uval-1) + 1; break; default: nasm_panic(0, "invalid IFUNC token %d", func); rv = 0; break; } return rv; }
/* parse section attributes */ void section_attrib(char *name, char *attr, int pass, uint32_t *flags_and, uint32_t *flags_or, uint64_t *align, int *type) { char *opt, *val, *next; opt = nasm_skip_spaces(attr); if (!opt || !*opt) return; while ((opt = nasm_opt_val(opt, &val, &next))) { if (!nasm_stricmp(opt, "align")) { *align = atoi(val); if (*align == 0) { *align = SHA_ANY; } else if (!is_power2(*align)) { nasm_error(ERR_NONFATAL, "section alignment %"PRId64" is not a power of two", *align); *align = SHA_ANY; } } else if (!nasm_stricmp(opt, "alloc")) { *flags_and |= SHF_ALLOC; *flags_or |= SHF_ALLOC; } else if (!nasm_stricmp(opt, "noalloc")) { *flags_and |= SHF_ALLOC; *flags_or &= ~SHF_ALLOC; } else if (!nasm_stricmp(opt, "exec")) { *flags_and |= SHF_EXECINSTR; *flags_or |= SHF_EXECINSTR; } else if (!nasm_stricmp(opt, "noexec")) { *flags_and |= SHF_EXECINSTR; *flags_or &= ~SHF_EXECINSTR; } else if (!nasm_stricmp(opt, "write")) { *flags_and |= SHF_WRITE; *flags_or |= SHF_WRITE; } else if (!nasm_stricmp(opt, "tls")) { *flags_and |= SHF_TLS; *flags_or |= SHF_TLS; } else if (!nasm_stricmp(opt, "nowrite")) { *flags_and |= SHF_WRITE; *flags_or &= ~SHF_WRITE; } else if (!nasm_stricmp(opt, "progbits")) { *type = SHT_PROGBITS; } else if (!nasm_stricmp(opt, "nobits")) { *type = SHT_NOBITS; } else if (pass == 1) { nasm_error(ERR_WARNING, "Unknown section attribute '%s' ignored on" " declaration of section `%s'", opt, name); } opt = next; } }
void CAMDataDePrivate::alloc(int decoder_id, CAMReplacePolicy repl_policy, int num_sets, int blk_byte, int VLB_num_sets) { m_decoder_id = decoder_id; m_repl_policy = repl_policy; m_num_sets = num_sets; assert(is_power2(blk_byte)); m_blk_byte = blk_byte; clearStats(); assert(num_sets > 0); m_buf_alloc = true; }
int main(int argc, char * argv[]) { if (3 != argc) { printf("Usage: ./fractal_pattern max_col_cnt max_col_offset\n"); return 1; } int max_col_cnt = strtol(argv[1], NULL, 10); int max_col_offset = strtol(argv[2], NULL, 10); if (0 == is_power2(max_col_cnt)) { printf("max_col_cnt needs to be power of 2\n"); return 1; } pattern(max_col_cnt, max_col_offset); return 0; }
static void ieee_sectalign(int32_t seg, unsigned int value) { struct ieeeSection *s; list_for_each(s, seghead) { if (s->index == seg) break; } /* * 256 is maximum there, note it may happen * that align is issued on "absolute" segment * it's fine since SEG_ABS > 256 and we never * get escape this test */ if (!s || !is_power2(value) || value > 256) return; if ((unsigned int)s->align < value) s->align = value; }
int fast_allocator_init_ex(struct fast_allocator_context *acontext, struct fast_region_info *regions, const int region_count, const int64_t alloc_bytes_limit, const double expect_usage_ratio, const int reclaim_interval, const bool need_lock) { int result; int bytes; int previous_end; struct fast_region_info *pRegion; struct fast_region_info *region_end; srand(time(NULL)); memset(acontext, 0, sizeof(*acontext)); if (region_count <= 0) { return EINVAL; } bytes = sizeof(struct fast_region_info) * region_count; acontext->regions = (struct fast_region_info *)malloc(bytes); if (acontext->regions == NULL) { result = errno != 0 ? errno : ENOMEM; logError("file: "__FILE__", line: %d, " "malloc %d bytes fail, errno: %d, error info: %s", __LINE__, bytes, result, STRERROR(result)); return result; } memcpy(acontext->regions, regions, bytes); acontext->region_count = region_count; acontext->alloc_bytes_limit = alloc_bytes_limit; if (expect_usage_ratio < 0.01 || expect_usage_ratio > 1.00) { acontext->allocator_array.expect_usage_ratio = 0.80; } else { acontext->allocator_array.expect_usage_ratio = expect_usage_ratio; } acontext->allocator_array.malloc_bytes_limit = alloc_bytes_limit / acontext->allocator_array.expect_usage_ratio; acontext->allocator_array.reclaim_interval = reclaim_interval; acontext->need_lock = need_lock; result = 0; previous_end = 0; region_end = acontext->regions + acontext->region_count; for (pRegion=acontext->regions; pRegion<region_end; pRegion++) { if (pRegion->start != previous_end) { logError("file: "__FILE__", line: %d, " "invalid start: %d != last end: %d", __LINE__, pRegion->start, previous_end); result = EINVAL; break; } if (pRegion->start >= pRegion->end) { logError("file: "__FILE__", line: %d, " "invalid start: %d >= end: %d", __LINE__, pRegion->start, pRegion->end); result = EINVAL; break; } if (pRegion->step <= 0 || !is_power2(pRegion->step)) { logError("file: "__FILE__", line: %d, " "invalid step: %d", __LINE__, pRegion->step); result = EINVAL; break; } if (pRegion->start % pRegion->step != 0) { logError("file: "__FILE__", line: %d, " "invalid start: %d, must multiple of step: %d", __LINE__, pRegion->start, pRegion->step); result = EINVAL; break; } if (pRegion->end % pRegion->step != 0) { logError("file: "__FILE__", line: %d, " "invalid end: %d, must multiple of step: %d", __LINE__, pRegion->end, pRegion->step); result = EINVAL; break; } previous_end = pRegion->end; if ((result=region_init(acontext, pRegion)) != 0) { break; } } if (result != 0) { return result; } if ((result=allocator_array_check_capacity(acontext, 1)) != 0) { return result; } ADD_ALLOCATOR_TO_ARRAY(acontext, &malloc_allocator, false); /* logInfo("sizeof(struct allocator_wrapper): %d, allocator_array count: %d", (int)sizeof(struct allocator_wrapper), acontext->allocator_array.count); */ return result; }
int mtrropt(u64t wc_addr, u64t wc_len, u32t *memlimit) { u32t reg; int ii, sv4idx = 0; mtrrentry save4[16]; memset(&save4,0,sizeof(save4)); *memlimit = 0; if (is_included(wc_addr,wc_len)<0 && is_intersection(wc_addr, wc_len)<0 && is_regavail(®)) { mtrr[reg].start = wc_addr; mtrr[reg].len = wc_len; mtrr[reg].cache = MTRRF_WC; mtrr[reg].on = 1; return 0; } // video memory in not in 4th GB if (wc_addr<_3GbLL || wc_addr+wc_len>_4GbLL) return OPTERR_VIDMEM3GB; /* turn off previous write combine on the same memory, but leave this block to catch low UC border successfully */ ii = is_include(wc_addr, wc_len); if (ii>=0 && mtrr[ii].cache==MTRRF_WC) mtrr[ii].cache=MTRRF_UC; // only WB and UC allowed in first 4Gb for (ii=0; ii<regs; ii++) if (mtrr[ii].on && mtrr[ii].cache!=MTRRF_UC && mtrr[ii].cache!=MTRRF_WB && mtrr[ii].start<_4GbLL) return OPTERR_UNKCT; // is block intersected with someone? ii = is_intersection(wc_addr, wc_len); if (ii>=0) return OPTERR_INTERSECT; // remove/truncate all above 4Gb (but save it) for (ii=0; ii<regs; ii++) if (mtrr[ii].on) if (mtrr[ii].start<_4GbLL && mtrr[ii].start+mtrr[ii].len>_4GbLL) { u64t newlen = _4GbLL - mtrr[ii].start, remain = mtrr[ii].len - newlen; if (!is_power2(newlen)) return OPTERR_SPLIT4GB; mtrr[ii].len = newlen; // save block if (is_power2(remain)) { save4[sv4idx].start = _4GbLL; save4[sv4idx].len = remain; save4[sv4idx].cache = mtrr[ii].cache; sv4idx++; } else if (is_power2(remain/3)) { } } else if (mtrr[ii].start>=_4GbLL || mtrr[ii].start+mtrr[ii].len>_4GbLL) { save4[sv4idx].start = mtrr[ii].start; save4[sv4idx].len = mtrr[ii].len; save4[sv4idx].cache = mtrr[ii].cache; sv4idx++; clearreg(ii); } u64t wbend = 0, ucstart = FFFF64; // searching for upper WB border for (ii=0; ii<regs; ii++) if (mtrr[ii].on) if (mtrr[ii].cache==MTRRF_WB) if (mtrr[ii].start+mtrr[ii].len > wbend) wbend = mtrr[ii].start+mtrr[ii].len; // searching for lower UC border (but ignore small blocks) for (ii=0; ii<regs; ii++) if (mtrr[ii].on) if (mtrr[ii].cache==MTRRF_UC) { int pwr = bsf64(mtrr[ii].len); if (pwr>=27) { if (ucstart>mtrr[ii].start) ucstart = mtrr[ii].start; clearreg(ii); } } // pass #2 - removing small blocks above selected border for (ii=0; ii<regs; ii++) if (mtrr[ii].on) if (mtrr[ii].cache==MTRRF_UC && ucstart<=mtrr[ii].start) clearreg(ii); // if no UC entries - use the end of WB as border if (ucstart>wbend) ucstart = wbend; // this can occur on small video memory size (<128Mb) if (wc_addr<ucstart) return OPTERR_BELOWUC; // build new WB list if (ucstart<wbend) { if (ucstart<_1GbLL) return OPTERR_LOWUC; for (ii=0; ii<regs; ii++) if (mtrr[ii].on) if (mtrr[ii].cache==MTRRF_WB) clearreg(ii); int regsfree = regsavail() - sv4idx - 1; log_it(2, "regs free: %i \n", regsfree); // force 3 registers (some memory above 4Gb can be lost) if (regsfree<3) regsfree = 3; // split memory to list u64t nextpos = 0; ii = 0; for (u64t size=_2GbLL; size>=_64MbLL; size>>=1) { if (ucstart>=size) { if (!is_regavail(®)) return OPTERR_NOREG; mtrr[reg].start = nextpos; nextpos += (mtrr[reg].len = size); mtrr[reg].cache = MTRRF_WB; mtrr[reg].on = 1; ucstart -= size; // use only 3 mtrr regs if (++ii==regsfree) break; } } // save memlimit value *memlimit = nextpos>>20; /** and again removing small blocks above selected border... splitted blocks sum can be smaller than previously selected UC border and some blocks can be cleared here */ for (ii=0; ii<regs; ii++) if (mtrr[ii].on) if (mtrr[ii].cache==MTRRF_UC && nextpos<=mtrr[ii].start) clearreg(ii); } // final check if (is_included(wc_addr,wc_len)>=0 || is_intersection(wc_addr,wc_len)>=0 || is_include(wc_addr,wc_len)>=0) return OPTERR_OPTERR; // add entry if (is_regavail(®)) { mtrr[reg].start = wc_addr; mtrr[reg].len = wc_len; mtrr[reg].cache = MTRRF_WC; mtrr[reg].on = 1; } // restore some of above 4Gb memory blocks if (sv4idx && regsavail()>0) { for (ii=0; ii<sv4idx; ii++) { if (!is_regavail(®)) break; mtrr[reg].start = save4[ii].start; mtrr[reg].len = save4[ii].len; mtrr[reg].cache = save4[ii].cache; mtrr[reg].on = 1; } // check lost items for included UC entries while (ii<sv4idx) { if (mtrr[ii].cache==MTRRF_UC) { int idx = is_included(save4[ii].start,save4[ii].len); if (idx<0) idx = is_intersection(save4[ii].start,save4[ii].len); // check it multiple times (for intersection) if (idx>=0) { clearreg(idx); continue; } } ii++; } } return 0; }
void TopologyFatTree::buildTopology(int num_way) { assert(is_power2(g_cfg.core_num)); assert(is_power2(num_way)); m_num_way = num_way; m_max_level = log_int(g_cfg.core_num, m_num_way); m_num_router_per_level = g_cfg.core_num/m_num_way; fprintf(stderr, "[Fat Tree] m_num_way=%d m_max_level=%d\n", m_num_way, m_max_level); fprintf(stderr, "[Fat Tree] m_num_router_per_level=%d\n", m_num_router_per_level); // check relationship between #cores and #routers. if (g_cfg.router_num != m_num_router_per_level*m_max_level) { fprintf(stderr, "[Fat Tree] g_cfg.core_num=%d g_cfg.router_num=%d #routers_required=%d\n", g_cfg.core_num, g_cfg.router_num, m_num_router_per_level*m_max_level); g_cfg.router_num = m_num_router_per_level*m_max_level; fprintf(stderr, "[Fat Tree] We changed g_cfg.router_num=%d.\n", g_cfg.router_num); } // topology name m_topology_name = int2str(m_num_way) + "-way Fat Tree"; // create cores for (int n=0; n<g_cfg.core_num; n++) { Core* p_Core = new Core(n, g_cfg.core_num_NIs); g_Core_vec.push_back(p_Core); } // create routers unsigned int router_id = 0; for (int l=0; l<m_max_level; l++) { for (int w=0; w<m_num_router_per_level; w++) { int router_num_pc; int router_num_ipc, router_num_epc; if (l == 0) { // 0-level(external) router router_num_epc = router_num_ipc = (m_num_way * g_cfg.core_num_NIs); router_num_pc = m_num_way + router_num_epc; } else { // internal-level router router_num_epc = router_num_ipc = 0; router_num_pc = m_num_way * 2; } Router* pRouter = new Router(router_id, router_num_pc, g_cfg.router_num_vc, router_num_ipc, router_num_epc, g_cfg.router_inbuf_depth); #ifdef _DEBUG_TOPOLOGY_FTREE printf("[Fat Tree] router=%d level=%d num_pc=%d num_ipc=%d num_epc=%d\n", pRouter->id(), l, router_num_pc, router_num_ipc, router_num_epc); #endif g_Router_vec.push_back(pRouter); router_id++; } } #ifdef _DEBUG_TOPOLOGY_FTREE printf("[Fat Tree] # of created routers=%d\n", g_Router_vec.size()); #endif // core and router connection int num_cores_in_dim = (int)sqrt(g_cfg.core_num); int num_routers_in_dim = (int)sqrt(m_num_router_per_level); // printf("num_cores_in_dim = %d, num_routers_in_dim = %d.\n", num_cores_in_dim, num_routers_in_dim); for(int n=0; n<m_num_router_per_level; n++){ int router_x_coord = n % num_routers_in_dim; int router_y_coord = n / num_routers_in_dim; // FIXME: support only 4-way fat-trees vector< int > core_id_vec; core_id_vec.resize(m_num_way); core_id_vec[0] = 2 * router_x_coord + 2 * num_cores_in_dim * router_y_coord; core_id_vec[1] = core_id_vec[0] + 1; core_id_vec[2] = core_id_vec[0] + num_cores_in_dim; core_id_vec[3] = core_id_vec[2] + 1; // core to router map: core_id -> (router_id, port_pos (relative)) m_core2router_map[core_id_vec[0]] = make_pair(n, 0); m_core2router_map[core_id_vec[1]] = make_pair(n, 1); m_core2router_map[core_id_vec[2]] = make_pair(n, 2); m_core2router_map[core_id_vec[3]] = make_pair(n, 3); // router to core map: (router_id, port_pos (relative)) -> core_id m_router2core_map[make_pair(n, 0)] = core_id_vec[0]; m_router2core_map[make_pair(n, 1)] = core_id_vec[1]; m_router2core_map[make_pair(n, 2)] = core_id_vec[2]; m_router2core_map[make_pair(n, 3)] = core_id_vec[3]; #ifdef _DEBUG_TOPOLOGY_FTREE printf(" router_id=%d first_core=%d second_core=%d third_core=%d fourth_core=%d\n", n, core_id_vec[0], core_id_vec[1], core_id_vec[2], core_id_vec[3]); #endif for (int w=0; w<m_num_way; w++) { Core * p_Core = g_Core_vec[core_id_vec[w]]; Router * p_Router = g_Router_vec[n]; // map PCs between input-NI and router for (int ipc=0; ipc<g_cfg.core_num_NIs; ipc++) { int router_in_pc = p_Router->num_internal_pc() + w * g_cfg.core_num_NIs + ipc; p_Core->getNIInput(ipc)->attachRouter(p_Router, router_in_pc); p_Router->appendNIInput(p_Core->getNIInput(ipc)); } // map PCs between output-NI and router for (int epc=0; epc<g_cfg.core_num_NIs; epc++) { int router_out_pc = p_Router->num_internal_pc() + w * g_cfg.core_num_NIs + epc; p_Core->getNIOutput(epc)->attachRouter(p_Router, router_out_pc); p_Router->appendNIOutput(p_Core->getNIOutput(epc)); } } } // setup link configuration for (unsigned int i=0; i<g_Router_vec.size(); i++) { Router* p_router = g_Router_vec[i]; int router_tree_level = getTreeLevel(p_router); for (int out_pc=0; out_pc<p_router->num_pc(); out_pc++) { Link& link = p_router->getLink(out_pc); link.m_valid = true; if (router_tree_level == 0) { // external routers if (out_pc < p_router->num_internal_pc()) { // up link link.m_link_name = "U" + int2str(out_pc); link.m_length_mm *= pow(2.0, (double) (router_tree_level+1)); link.m_delay_factor *= 2; } else { // link for core int attached_core_id = p_router->id() * m_num_way + (out_pc - p_router->num_internal_pc())/g_cfg.core_num_NIs; link.m_link_name = "C" + int2str(attached_core_id) + "-P" + int2str((out_pc - p_router->num_internal_pc())%g_cfg.core_num_NIs); link.m_length_mm = 0.0; } } else { // internal routers if (out_pc < p_router->num_pc()/2) { // up link if (router_tree_level < m_max_level-1) { link.m_link_name = "U" + int2str(out_pc); link.m_length_mm *= pow(2.0, (double) (router_tree_level+1)); link.m_delay_factor *= pow_int(2, router_tree_level+1); } else { link.m_valid = false; } } else { // down link link.m_link_name = "D" + int2str(out_pc - p_router->num_pc()/2); link.m_length_mm *= pow(2.0, (double) (router_tree_level+0)); link.m_delay_factor *= pow_int(2, router_tree_level); } } } } // setup routers for (int r=0; r<g_cfg.router_num; r++) { Router * p_router = g_Router_vec[r]; int num_pc = p_router->num_pc(); int num_internal_pc = p_router->num_internal_pc(); int level = getTreeLevel(p_router); if (level == 0) {//level 0, only need to setup UP connection vector< pair< int, int > > connNextRouter_vec; connNextRouter_vec.resize(num_pc); vector< pair< int, int > > connPrevRouter_vec; connPrevRouter_vec.resize(num_pc); //UP for (int out_pc=0; out_pc<num_pc; out_pc++) { int next_router_id; int next_in_pc; if (out_pc >= num_internal_pc) { // ejection pc ? next_router_id = INVALID_ROUTER_ID; next_in_pc = DIR_INVALID; } else { // internal pc int up_router_id_scale = (int)pow((double)m_num_way,(double)(level+1)); int up_router_id_base = ((p_router->id() % m_num_router_per_level) / up_router_id_scale) * up_router_id_scale + m_num_router_per_level * (level+1); next_router_id = m_num_router_per_level + p_router->id() + out_pc*(int)pow((double)m_num_way, (double)level); if (next_router_id >= (up_router_id_base + up_router_id_scale)) next_router_id -= up_router_id_scale; next_in_pc = g_Router_vec[next_router_id]->num_internal_pc()/2 + out_pc; } connNextRouter_vec[out_pc] = make_pair(next_router_id, next_in_pc); connPrevRouter_vec[out_pc] = make_pair(next_router_id, next_in_pc); }//pc g_Router_vec[r]->setNextRouters(connNextRouter_vec); g_Router_vec[r]->setPrevRouters(connPrevRouter_vec); } else if (level>0 && level< m_max_level-1) {// need to setup UP and DOWN connection //for internal router, num_pc = num_internal_pc //don't need to separate set ejection pc connection vector< pair< int, int > > connNextRouter_vec; connNextRouter_vec.resize(num_pc); vector< pair< int, int > > connPrevRouter_vec; connPrevRouter_vec.resize(num_pc); //UP for (int out_pc=0; out_pc<num_internal_pc/2; out_pc++){ int next_router_id; int next_in_pc; int up_router_id_scale = (int)pow((double)m_num_way,(double)(level+1)); int up_router_id_base = ((p_router->id() % m_num_router_per_level) / up_router_id_scale) * up_router_id_scale + m_num_router_per_level * (level+1); next_router_id = m_num_router_per_level + p_router->id() + out_pc*(int)pow((double)m_num_way, (double)level); if (next_router_id >= (up_router_id_base + up_router_id_scale)) next_router_id -= up_router_id_scale; next_in_pc = g_Router_vec[next_router_id]->num_internal_pc()/2 + out_pc; connNextRouter_vec[out_pc] = make_pair(next_router_id, next_in_pc); connPrevRouter_vec[out_pc] = make_pair(next_router_id, next_in_pc); }//pc //DOWN for (int out_pc=num_internal_pc/2; out_pc<num_internal_pc; out_pc++){ // out_pc in for in just used for index, not the real out pc which will be set. int next_router_id = INVALID_ROUTER_ID; int next_in_pc = INVALID_PC; int real_out_pc = INVALID_PC;//this is the real out pc which will be set. int down_router_id_scale = (int)pow((double)m_num_way,(double)level); int down_router_id_base = ((p_router->id() % m_num_router_per_level) / down_router_id_scale) * down_router_id_scale + m_num_router_per_level * (level-1); next_router_id = p_router->id() - m_num_router_per_level + (out_pc - num_internal_pc/2) * (int)pow((double)m_num_way, (double)(level-1)); if (next_router_id >= (down_router_id_base + down_router_id_scale)) next_router_id -= down_router_id_scale; Router * p_next_router = g_Router_vec[next_router_id]; for ( vector< pair< int, int > >::iterator iter= p_next_router->nextRouters().begin(); iter<p_next_router->nextRouters().begin()+num_internal_pc/2; iter++){//find the proper down_router_id and in_pc if((*iter).first == p_router->id()){//find the correct one real_out_pc = (*iter).second; next_in_pc = real_out_pc - num_internal_pc/2; break; } } connNextRouter_vec[real_out_pc] = make_pair(next_router_id, next_in_pc); connPrevRouter_vec[real_out_pc] = make_pair(next_router_id, next_in_pc); }//pc g_Router_vec[r]->setNextRouters(connNextRouter_vec); g_Router_vec[r]->setPrevRouters(connPrevRouter_vec); }// internal level else { vector< pair< int, int > > connNextRouter_vec; connNextRouter_vec.resize(num_internal_pc); vector< pair< int, int > > connPrevRouter_vec; connPrevRouter_vec.resize(num_internal_pc); //DOWN //UP set as invalid for (int out_pc=0; out_pc<num_internal_pc; out_pc++) { int next_router_id = INVALID_ROUTER_ID; int next_in_pc = INVALID_PC; if ( out_pc < num_internal_pc/2){//up next_router_id = INVALID_ROUTER_ID; next_in_pc = DIR_INVALID; connNextRouter_vec[out_pc] = make_pair(next_router_id, next_in_pc); connPrevRouter_vec[out_pc] = make_pair(next_router_id, next_in_pc); } else{//down int real_out_pc = INVALID_PC;//this is the real out pc which will be set. int down_router_id_scale = (int)pow((double)m_num_way,(double)level); int down_router_id_base = ((p_router->id() % m_num_router_per_level) / down_router_id_scale) * down_router_id_scale + m_num_router_per_level * (level-1); next_router_id = p_router->id() - m_num_router_per_level + (out_pc - num_internal_pc/2) * (int)pow((double)m_num_way, (double)(level-1)); if (next_router_id >= (down_router_id_base + down_router_id_scale)) next_router_id -= down_router_id_scale; Router * p_next_router = g_Router_vec[next_router_id]; for ( vector< pair< int, int > >::iterator iter= p_next_router->nextRouters().begin(); iter<p_next_router->nextRouters().begin()+num_internal_pc/2; iter++){//find the proper down_router_id and in_pc if((*iter).first == p_router->id()){//find the correct one real_out_pc = (*iter).second; next_in_pc = real_out_pc - num_internal_pc/2; break; } } connNextRouter_vec[real_out_pc] = make_pair(next_router_id, next_in_pc); connPrevRouter_vec[real_out_pc] = make_pair(next_router_id, next_in_pc); } }//pc g_Router_vec[r]->setNextRouters(connNextRouter_vec); g_Router_vec[r]->setPrevRouters(connPrevRouter_vec); }// top level }//router }