int update_elap_entry(const char *fpath, const struct stat *sb, int tflag, struct FTW *ftwbuf){ //bj_ostream& os = bj_out; MARK_USED(sb); MARK_USED(ftwbuf); BRAIN_CK(glb_test_tak_mak != NULL_PT); switch (tflag) { case FTW_D: case FTW_DNR: case FTW_DP: break; default:{ ch_string pth_str = fpath; ch_string cnn_nm = SKG_CANON_NAME; if(path_ends_with(pth_str, cnn_nm)){ tak_mak& gg = *glb_test_tak_mak; long upd_it = gg.gen_rand_int32_ie(0, 2); if(upd_it > 0){ ch_string full_pth = path_get_directory(pth_str, true); ch_string elp_nm = full_pth + SKG_ELAPSED_NAME; update_elapsed(elp_nm); } } } break; } return (0); }
void test_row_reduc(){ bj_ostream& os = bj_out; MARK_USED(os); row<long> rr1; row<long> rr2; rr1 << (long)14 << (long)34 << (long)54 << (long)15 << (long)67 << (long)78 << (long)98 << (long)13 << (long)45 << (long)93; rr2 << (long)15 << (long)67 << (long)78 << (long)13 << (long)45 << (long)93; rr1.mix_sort(cmp_long); rr2.mix_sort(cmp_long); os << "rr1=" << rr1 << bj_eol; os << "rr2=" << rr2 << bj_eol; rr1.sorted_set_reduce(rr2, cmp_long); os << "AFTER" << bj_eol; os << "rr1=" << rr1 << bj_eol; os << "rr2=" << rr2 << bj_eol; }
void config_reader::parse_debug_line(row<long>& dbg_line, ch_string& str_ln){ #ifdef FULL_DEBUG bj_ostream& os = bj_out; MARK_USED(os); const char* pt_in = str_ln.c_str(); dbg_line.clear(); long num_ln = 0; if(isalnum(*pt_in)){ skip_whitespace(pt_in, num_ln); while(isdigit(*pt_in) || isspace(*pt_in)){ if(isspace(*pt_in)){ pt_in++; continue; } //os << pt_in << "$\n"; long val = parse_long(pt_in, num_ln); //skip_whitespace(pt_in, num_ln); dbg_line.push(val); } } else { skip_line(pt_in, num_ln); } #endif }
void config_reader::add_config_line(debug_info& dbg_info, ch_string& str_ln){ #ifdef FULL_DEBUG bj_ostream& os = bj_out; MARK_USED(os); row<long>& dbg_ln = dbg_config_line; parse_debug_line(dbg_ln, str_ln); //os << " dbg_ln=" << dbg_ln << "\n"; if(! dbg_ln.is_empty()){ debug_entry& start_dbg = dbg_info.dbg_start_dbg_entries.inc_sz(); long debug_id = dbg_ln[0]; start_dbg.dbg_id = debug_id; if(dbg_ln.size() > 1){ start_dbg.dbg_recoil = dbg_ln[1]; } if(dbg_ln.size() > 2){ debug_entry& stop_dbg = dbg_info.dbg_stop_dbg_entries.inc_sz(); stop_dbg.dbg_id = debug_id; stop_dbg.dbg_recoil = dbg_ln[2]; } } #endif }
void dbg_update_config_entries(debug_info& dbg_info, bj_big_int_t curr_reco){ #ifdef FULL_DEBUG row<bool>& dbg_arr = dbg_info.dbg_levs_arr; bj_ostream& os = bj_out; MARK_USED(os); long& start_idx = dbg_info.dbg_current_start_idx; long& stop_idx = dbg_info.dbg_current_stop_idx; row<debug_entry>& start_lst = dbg_info.dbg_start_dbg_entries; row<debug_entry>& stop_lst = dbg_info.dbg_stop_dbg_entries; while( (start_idx < start_lst.size()) && (start_lst[start_idx].dbg_recoil <= curr_reco)) { long start_dbg_id = start_lst[start_idx].dbg_id; CONFIG_CK(dbg_arr.is_valid_idx(start_dbg_id)); dbg_arr[start_dbg_id] = true; start_idx++; } while( (stop_idx < stop_lst.size()) && (stop_lst[stop_idx].dbg_recoil < curr_reco)) { long stop_dbg_id = stop_lst[stop_idx].dbg_id; CONFIG_CK(dbg_arr.is_valid_idx(stop_dbg_id)); dbg_arr[stop_dbg_id] = false; stop_idx++; } #endif }
bool neuron::ck_no_source_of_any(){ #ifdef FULL_DEBUG for(long ii = 0; ii < fib_sz(); ii++){ quanton* qua = ne_fibres[ii]; MARK_USED(qua); BRAIN_CK_0(qua->get_source() != this); } #endif return true; }
void test_num1(){ bj_ostream& os = bj_out; MARK_USED(os); bj_big_int_t num_vnts; num_vnts = "-1"; os << "num_vnts=" << num_vnts << bj_eol; num_vnts = "987654321"; os << "num_vnts=" << num_vnts << bj_eol; }
/* Inicijalizacija ostaje ista kao kod prvog odgovarajućeg. Iz bloka slobodne memorije, registriramo se na blok veličine size i na njemu ćemo raditi alokaciju. Veličina podsustava za alokaciju određen je u jezgri i inicijalizaciju podsustava radi jezgra! */ void *bf_init ( void *mem_segm, size_t size ) { size_t start, end; bf_hdr_t *chunk, *border; bf_mpool_t *mpool; ASSERT ( mem_segm && size > sizeof (bf_hdr_t) * 2 ); /* align all on 'size_t' (if already not aligned) */ start = (size_t) mem_segm; end = start + size; ALIGN_FW ( start ); mpool = (void *) start; /* place mm descriptor here */ start += sizeof (bf_mpool_t); ALIGN ( end ); mpool->first = NULL; if ( end - start < 2 * HEADER_SIZE ) return NULL; border = (bf_hdr_t *) start; border->size = sizeof (size_t); MARK_USED ( border ); chunk = GET_AFTER ( border ); chunk->size = end - start - 2 * sizeof(size_t); MARK_FREE ( chunk ); CLONE_SIZE_TO_TAIL ( chunk ); border = GET_AFTER ( chunk ); border->size = sizeof (size_t); MARK_USED ( border ); bf_insert_chunk ( mpool, chunk ); /* first and only free chunk */ //vraćamo blok nad kojim ćemo raditi alokacije return mpool; }
bool ck_motives(brain& brn, row_quanton_t& mots){ #ifdef FULL_DEBUG for(long ii = 0; ii < mots.size(); ii++){ quanton* mot = mots[ii]; MARK_USED(mot); BRAIN_CK(mot != NULL_PT); BRAIN_CK(mot->qlevel() <= brn.level()); BRAIN_CK(mot->get_charge() == cg_negative); } #endif return true; }
static struct page * rmqueue(zone_t *zone, unsigned int order) { free_area_t * area = zone->free_area + order; unsigned int curr_order = order; struct list_head *head, *curr; unsigned long flags; struct page *page; int i; spin_lock_irqsave(&zone->lock, flags); do { head = &area->free_list; curr = head->next; if (curr != head) { unsigned int index; page = list_entry(curr, struct page, list); if (BAD_RANGE(zone,page)) BUG(); list_del(curr); index = page - zone->zone_mem_map; if (curr_order != MAX_ORDER-1) MARK_USED(index, curr_order, area); zone->free_pages -= 1UL << order; page = expand(zone, page, index, order, curr_order, area); spin_unlock_irqrestore(&zone->lock, flags); set_page_count(page, 1); if (BAD_RANGE(zone,page)) BUG(); if (PageLRU(page)) BUG(); if (PageActive(page)) BUG(); /* * we need to reference all the pages for this order, * otherwise if anyone accesses one of the pages with * (get/put) it * will be freed :-( */ for (i = 1; i < (1 << order); i++) set_page_count(&page[i], 1); return page; } curr_order++; area++; } while (curr_order < MAX_ORDER);
int load_entry(const char *fpath, const struct stat *sb, int tflag, struct FTW *ftwbuf){ bj_ostream& os = bj_out; MARK_USED(sb); MARK_USED(ftwbuf); switch (tflag) { case FTW_D: case FTW_DNR: case FTW_DP: break; default:{ ch_string full_pth = fpath; ch_string cnn_nm = SKG_CANON_NAME; if(path_ends_with(full_pth, cnn_nm)){ canon_cnf the_cnf; os << full_pth; the_cnf.release_and_init(GSKE, true); bool all_ok = the_cnf.load_from(GSKE, full_pth); if(all_ok){ os << "LOAD OK"; //os << "LOAD OK"; //the_cnf.print_canon_cnf(os); } else { os << "LOAD OF " << full_pth << " FAILED !!"; } //os << bj_eol; the_cnf.release_and_init(GSKE, true); } } break; } return (0); }
void test_creat(int argc, char** argv){ bj_ostream& os = bj_out; if(argc < 2){ os << "Faltan args" << bj_eol; return; } ch_string pth = argv[1]; os << "pth=" << pth << bj_eol; bool pth_ok = path_create(pth); MARK_USED(pth_ok); BRAIN_CK(pth_ok); }
bool test_pair_subsets(tak_mak& rnd_gen, long n_iter){ bj_ostream& os = bj_out; MARK_USED(os); canon_cnf the_cnf; canon_cnf sub_cnf; long max_ccl_sz = 5; long max_num_ccls_cnf = 100; long max_num_vars_cnf = 10; gen_ccls_cnf(rnd_gen, the_cnf, max_ccl_sz, max_num_ccls_cnf, max_num_vars_cnf); gen_sub_cnf(rnd_gen, the_cnf, sub_cnf); row<canon_clause*>& all_ccls = the_cnf.cf_clauses; row<canon_clause*>& all_sub_ccls = sub_cnf.cf_clauses; //the_cnf.print_canon_cnf(os); //sub_cnf.print_canon_cnf(os); //os.flush(); bool are_eq = false; cmp_is_sub cmp_resp = all_ccls.sorted_set_is_subset(all_sub_ccls, cmp_clauses, are_eq); bool is_sub = (are_eq || (cmp_resp == k_rgt_is_sub)); MARK_USED(is_sub); BRAIN_CK(is_sub); the_cnf.release_and_init(GSKE, true); sub_cnf.release_and_init(GSKE, true); return true; }
/* * Logika alokacije ista je kao i kod prvog odgovarajućeg. * Dodjeljuje se blok zahtjevane veličine. * Pretraživanje započinje od početka liste. * * VAŽNO: * Glavna razlika u odnosu na prvog odgovarajućeg je što je lista SORTIRANA (uzlazno!). * */ void *bf_alloc ( bf_mpool_t *mpool, size_t size ) { bf_hdr_t *iter, *chunk; ASSERT ( mpool ); size += sizeof (size_t) * 2; /* add header and tail size */ if ( size < HEADER_SIZE ) size = HEADER_SIZE; /* align request size to higher 'size_t' boundary */ ALIGN_FW ( size ); //TRAŽENJE SLOBODNOG BLOKA //PRETRAGA ZAPOČINJE OD PRVOG SLOBODNOG BLOKA iter = mpool->first; //dok postoje slobodni blokovi i dok je veličina manja od potrebne veličine, traži while ( iter != NULL && iter->size < size ) iter = iter->next; //ako nisam našao blok dovoljne veličine, vrati null if ( iter == NULL ) return NULL; /* no adequate free chunk found */ //inače, ako je veličina pronađenog slobodnog bloka if ( iter->size >= size + HEADER_SIZE ) { /* split chunk */ /* first part remains in free list, just update size */ iter->size -= size; CLONE_SIZE_TO_TAIL ( iter ); chunk = GET_AFTER ( iter ); chunk->size = size; } else { /* give whole chunk */ chunk = iter; /* remove it from free list */ bf_remove_chunk ( mpool, chunk ); } //označi kao zauzetog MARK_USED ( chunk ); CLONE_SIZE_TO_TAIL ( chunk ); //vrati blok return ( (void *) chunk ) + sizeof (size_t); }
bool dbg_run_satex_is_no_sat(ch_string f_nam){ bool is_no_sat = true; #ifdef FULL_DEBUG if(file_exists(f_nam)){ ch_string o_str = "satex -s " + f_nam; system_exec(o_str); ch_string lg_nm = get_satex_log_name(f_nam, LOG_SATEX_NM_RESULTS); RSATX_CK(file_exists(lg_nm)); is_no_sat = all_results_batch_instances(lg_nm, bjr_no_satisf); MARK_USED(is_no_sat); } #endif return is_no_sat; }
/*! * Get free chunk with required size (or slightly bigger) * \param mpool Memory pool to be used (if NULL default pool is used) * \param size Requested chunk size * \return Block address, NULL if can't find adequate free chunk */ void *ffs_alloc ( ffs_mpool_t *mpool, size_t size ) { ffs_hdr_t *iter, *chunk; ASSERT ( mpool ); size += sizeof (size_t) * 2; /* add header and tail size */ if ( size < HEADER_SIZE ) size = HEADER_SIZE; /* align request size to higher 'size_t' boundary */ ALIGN_FW ( size ); iter = mpool->first; while ( iter != NULL && iter->size < size ) iter = iter->next; if ( iter == NULL ) return NULL; /* no adequate free chunk found */ if ( iter->size >= size + HEADER_SIZE ) { /* split chunk */ /* first part remains in free list, just update size */ iter->size -= size; CLONE_SIZE_TO_TAIL ( iter ); chunk = GET_AFTER ( iter ); chunk->size = size; } else { /* give whole chunk */ chunk = iter; /* remove it from free list */ ffs_remove_chunk ( mpool, chunk ); } MARK_USED ( chunk ); CLONE_SIZE_TO_TAIL ( chunk ); return ( (void *) chunk ) + sizeof (size_t); }
void gen_clause(tak_mak& gg, row_long_t& the_ccl, long max_ccl_sz, long num_vars_cnf) { //bj_ostream& os = bj_out; unsigned long ccl_sz = gg.gen_rand_int32_ie(2, max_ccl_sz); long num_neg = gg.gen_rand_int32_ie(1, ccl_sz); long num_pos = ccl_sz - num_neg; MARK_USED(num_pos); BRAIN_CK(num_pos >= 0); typedef std::set<long> set_long_t; typedef set_long_t::iterator set_long_iter_t; //os << "ccl_sz=" << ccl_sz << bj_eol; //os << "num_vars_cnf=" << num_vars_cnf << bj_eol; std::set<long> varset; for(unsigned long aa = 0; aa < ccl_sz; aa++){ // leave it like this even if ccl_sz is not reached // to avoid loop because (ccl_sz > num_vars_cnf) can happen long vv = gg.gen_rand_int32_ie(1, num_vars_cnf); varset.insert(vv); } the_ccl.clear(true); for(set_long_iter_t aa = varset.begin(); aa != varset.end(); aa++){ long vv = (*aa); if(the_ccl.size() < num_neg){ vv = -vv; } BRAIN_CK(vv != 0); the_ccl.push(vv); } //os << "start_sorting=" << the_ccl << bj_eol; the_ccl.mix_sort(cmp_canon_ids); BRAIN_CK(the_ccl.is_sorted(cmp_canon_ids)); }
static inline struct page * expand (zone_t *zone, struct page *page, unsigned long index, int low, int high, free_area_t * area) { unsigned long size = 1 << high; while (high > low) { if (BAD_RANGE(zone,page)) BUG(); area--; high--; size >>= 1; list_add(&(page)->list, &(area)->free_list); MARK_USED(index, high, area); index += size; page += size; } if (BAD_RANGE(zone,page)) BUG(); return page; }
void test_lk_name(){ bj_ostream& os = bj_out; MARK_USED(os); ch_string str1 = "CADENA DE PRUEBA"; uchar_t* arr_to_sha = (uchar_t*)(str1.c_str()); long arr_to_sha_sz = str1.size(); ch_string the_sha = sha_txt_of_arr(arr_to_sha, arr_to_sha_sz); dima_dims dim0; dim0.dd_tot_lits = 245; dim0.dd_tot_ccls = 25; dim0.dd_tot_vars = 70; dim0.dd_tot_twolits = 18; //ch_string lk_nm = canon_lock_name(dim0, the_sha); //os << "lk_nm=" << lk_nm << bj_eol; //os << "sz_lk_nm=" << lk_nm.size() << bj_eol; }
static struct page * rmqueue(zone_t *zone, unsigned int order) { free_area_t * area = zone->free_area + order; unsigned int curr_order = order; struct list_head *head, *curr; unsigned long flags; struct page *page; spin_lock_irqsave(&zone->lock, flags); do { head = &area->free_list; curr = head->next; if (curr != head) { unsigned int index; page = list_entry(curr, struct page, list); if (BAD_RANGE(zone,page)) BUG(); list_del(curr); index = page - zone->zone_mem_map; if (curr_order != MAX_ORDER-1) MARK_USED(index, curr_order, area); zone->free_pages -= 1UL << order; page = expand(zone, page, index, order, curr_order, area); spin_unlock_irqrestore(&zone->lock, flags); set_page_count(page, 1); if (BAD_RANGE(zone,page)) BUG(); if (PageLRU(page)) BUG(); if (PageActive(page)) BUG(); return page; } curr_order++; area++; } while (curr_order < MAX_ORDER);
void test_subsets(){ bj_ostream& os = bj_out; MARK_USED(os); row<bool> dbg_arr; dbg_arr.fill(false, DBG_NUM_LEVS); dbg_arr[2] = true; GSKE.kg_root_path = TEST_ROOT_PATH; GSKE.init_paths(); GSKE.print_paths(os); ch_string GSKE_ROOT = GSKE.kg_root_path + SKG_SKELETON_DIR; os << "WARNING !!! deleting '" << GSKE_ROOT << "'" << bj_eol; delete_directory(GSKE_ROOT); //dbg_arr[80] = true; long num_test_to_gen = 1000; //unsigned long init_val = (long)(run_time()); unsigned long init_val = 10; tak_mak rnd_gen(init_val); for(long aa = 0; aa < num_test_to_gen; aa++){ test_pair_subsets(rnd_gen, aa); os << aa << " "; os.flush(); } os << bj_eol; GSKE.clear_all(); os << "2. FINISHING ..." << bj_eol; os.flush(); }
bool dbg_run_satex_on(brain& brn, ch_string f_nam, neuromap* dbg_nmp){ #ifdef FULL_DEBUG bool is_no = dbg_run_satex_is_no_sat(f_nam); bool has_nmp = (dbg_nmp != NULL_PT); bool is_min_wrt = (has_nmp && (dbg_nmp->na_upd_to_write)); MARK_USED(is_no); DBG_COMM_WITH(70, brn, if(! is_no){ ch_string msg_htm = "ABORTING_WITH"; if(has_nmp){ msg_htm = dbg_nmp->map_dbg_html_data_str(msg_htm); } if(has_nmp){ dbg_nmp->map_dbg_set_cy_maps(); } brn.dbg_update_html_cy_graph(CY_NMP_KIND, &(brn.br_tmp_ini_tauto_col), msg_htm); if(has_nmp){ dbg_nmp->map_dbg_reset_cy_maps(); } }
unsigned long mm_getFreePages(int gfp_mask, unsigned long order) { unsigned long flags; unsigned long ret_address; unsigned long page_order ; stat_allocs++; ret_address = 0; page_order = order; if (order >= NR_MEM_LISTS) return ret_address; spin_lock_irqsave(&free_area_lock, flags); do { struct free_mem_area_struct * area = free_mem_area+order; unsigned long new_order = order; do { struct page *prev = memory_head(area), *ret = prev->next; while (memory_head(area) != ret) { if ( CAN_DMA(ret)) { unsigned long map_nr; (prev->next = ret->next)->prev = prev; map_nr = ret - g_mem_map; MARK_USED(map_nr, new_order, area); area->stat_count--; g_nr_free_pages -= 1 << order; EXPAND(ret, map_nr, order, new_order, area); DEBUG(" Page alloc return address: %x mask:%x order:%d \n",ADDRESS(map_nr),gfp_mask,order); if (gfp_mask & MEM_CLEAR) ut_memset(ADDRESS(map_nr),0,PAGE_SIZE<<order); if (!(gfp_mask & MEM_FOR_CACHE)) memleakHook_alloc(ADDRESS(map_nr),PAGE_SIZE<<order,0,0); ret_address = ADDRESS(map_nr); goto last; } prev = ret; ret = ret->next; } new_order++; area++; } while (new_order < NR_MEM_LISTS); } while (0); last: if (ret_address > 0) { unsigned long i = (1 << page_order); struct page *page = virt_to_page(ret_address); while (i--) { #ifdef MEMORY_DEBUG if (PageReferenced(page)){ ut_log("Page Backtrace in Alloc page :\n"); ut_printBackTrace(page->bt_addr_list,MAX_BACKTRACE_LENGTH); } #endif assert(!PageReferenced(page)); PageSetReferenced(page); #ifdef MEMORY_DEBUG ut_storeBackTrace(page->bt_addr_list,MAX_BACKTRACE_LENGTH); #endif page++; } } spin_unlock_irqrestore(&free_area_lock, flags); if (ret_address ==0) return ret_address; if ((ret_address >= (KADDRSPACE_START+g_phy_mem_size)) || (ret_address < KADDRSPACE_START)){ ut_log(" ERROR: frames execeeding the max frames :%x\n",ret_address); BUG(); } return ret_address; }
bj_ostream& neuron::print_neu_base(bj_ostream& os, bool from_pt, bool from_tee, bool sort_fib){ #ifdef FULL_DEBUG brain* pt_brn = get_dbg_brn(); MARK_USED(pt_brn); bool tg0 = has_tag0(); bool tg1 = has_tag1(); bool tg2 = has_tag2(); bool tg3 = has_tag3(); bool tg4 = has_tag4(); bool tg5 = has_tag5(); if(from_pt){ os << "ne={"; //os << ((void*)(this)); os << " ne_idx='" << ne_index << "' "; if(ne_original){ os << "o"; } else { os << "+"; } if(tg0){ os << ".g0"; } if(tg1){ os << ".g1"; } if(tg2){ os << ".g2"; } if(tg3){ os << ".g3"; } if(tg4){ os << ".g4"; } if(tg5){ os << ".g5"; } if(! from_tee){ os << ne_fibres; } else { print_tees(os); } //os << " w_tk=" << ne_to_wrt_tk; os << " pf_tk=" << ne_proof_tk.get_str(); bool in_abort_nmp = false; if((pt_brn != NULL) && (pt_brn->br_dbg_abort_nmp != NULL_PT)){ ticket& nmp_tk = pt_brn->br_dbg_abort_nmp->na_candidate_tk; in_abort_nmp = ! ne_to_wrt_tk.is_older_than(nmp_tk); } if(in_abort_nmp){ os << ".in"; } os << "}"; os.flush(); return os; } os << "\n"; os << "pt=" << ((void*)(this)) << bj_eol; os << "INDEX " << ne_index << " "; os << "orig=" << ((ne_original)?("yes"):("no")) << "\n"; os << "fz=" << fib_sz() << " "; os << "fb[ "; for(long ii = 0; ii < fib_sz(); ii++){ os << ne_fibres[ii] << " "; } os << "] "; ck_tunnels(); os << "f0i=" << ne_fibre_0_idx << " "; os << "f1i=" << ne_fibre_1_idx << " "; os << "\n"; os << "eg=" << ne_edge << " "; os << "egtk{" << ne_edge_tk << "} "; os << "src_of:"; if(! ne_fibres.is_empty() && (ne_fibres.first()->get_source() == this)){ os << ne_fibres.first(); } os << "\n"; os << "syns" << ne_fibres << "\n"; os << "\n"; os.flush(); #endif return os; }
bj_ostream& neuromap::print_neuromap(bj_ostream& os, bool from_pt){ #ifdef FULL_DEBUG if(na_brn == NULL_PT){ bool is_vgn = is_na_virgin(); if(is_vgn){ os << "VIRGIN !!! "; } os << "NO BRN for nmp=" << (void*)this << " !!!!!!!!"; os.flush(); return os; } brain& brn = get_brn(); MARK_USED(brn); MARK_USED(from_pt); if(from_pt){ row<prop_signal>& all_ps = brn.br_tmp_prt_ps; map_get_all_propag_ps(all_ps); if(na_dbg_cand_sys){ os << "ca{"; } else { os << "na{"; } print_nmp_hd(os); os << " #qu=" << all_ps.size(); row_quanton_t all_f_qu; map_get_all_quas(all_f_qu); os << "\n nmp_quas=" << all_f_qu << "\n"; row_neuron_t all_f_ne; map_get_all_neus(all_f_ne); os << "\n nmp_neus_idxs=["; for(long aa = 0; aa < all_f_ne.size(); aa++){ neuron* neu = all_f_ne[aa]; BRAIN_CK(neu != NULL_PT); os << neu->ne_index << "."; } os << "]\n"; os << "}"; os.flush(); return os; } os << "######################################################\n"; os << "NMP_NMP_NMP_NMP_NMP_NMP_NMP_NMP_NMP_NMP_NMP_NMP_NMP_NMP\n"; os << "######################################################\n"; print_nmp_hd(os); print_all_subnmp(os); os << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"; os << "END_OF_NMP_END_OF_NMP_END_OF_NMP_END_OF_NMP_END_OF_NMP_\n"; os << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"; #endif return os; }
void test_skl(){ row<bool> dbg_arr; dbg_arr.fill(false, DBG_NUM_LEVS); dbg_arr[2] = true; //dbg_arr[95] = true; bj_ostream& os = bj_out; MARK_USED(os); GSKE.kg_root_path = TEST_ROOT_PATH; ch_string GSKE_ROOT = GSKE.kg_root_path + SKG_SKELETON_DIR; os << "WARNING !!! deleting '" << GSKE_ROOT << "'" << bj_eol; delete_directory(GSKE_ROOT); GSKE.init_paths(); GSKE.print_paths(os); GSKE.kg_find_cnn_pth = true; GSKE.kg_dbg_verifying_skeleton_tree = false; GSKE.kg_dbg_only_save = false; os << "Type RETURN ..." << bj_eol; getchar(); GSKE.kg_dbg_save_canon = true; //dbg_arr[107] = true; dbg_arr[73] = true; //dbg_arr[76] = true; //dbg_arr[78] = true; //dbg_arr[108] = true; //dbg_arr[109] = true; //ccl_skl_walker& skl_wlkr = GSKE.kg_ccl_wlkr; //clause_walker& wlkr = skl_wlkr.fw_walker; canon_cnf the_cnf; //wlkr.cw_cnf = &the_cnf; //unsigned long init_val = (long)(run_time()); unsigned long init_val = 10; tak_mak rnd_gen(init_val); tak_mak rnd_gen2(init_val); glb_test_tak_mak = &rnd_gen2; long NUM_CNFS = 50; long max_ccl_sz = 5; long max_num_ccls_cnf = 200; long max_num_vars_cnf = 50; ch_string rr_pth = SKG_REF_DIR; ch_string r1_pth = rr_pth + "/test_ref1/"; ch_string r2_pth = rr_pth + "/test_ref2/"; ch_string r3_pth = rr_pth + "/test_ref3/"; GSKE.ref_create(r1_pth); GSKE.ref_create(r2_pth); GSKE.ref_create(r3_pth); //GSKE.report_err("a_missing_path", GSKE.kg_missing_path); ch_string skl_nt_pth = GSKE.as_full_path(SKG_CNF_DIR); os << "1. SAVING TEST ..." << bj_eol; for(long aa = 0; aa < NUM_CNFS; aa++){ the_cnf.release_and_init(GSKE, true); //gen_phases(rnd_gen, the_cnf.cf_phdat); gen_ccls_cnf(rnd_gen, the_cnf, max_ccl_sz, max_num_ccls_cnf, max_num_vars_cnf); the_cnf.cf_kind = fk_diff; ch_string cnn_base_name = the_cnf.get_cnf_path(); if(! GSKE.ref_exists(cnn_base_name)){ os << "num_test=" << aa << bj_eol; the_cnf.save_cnf(GSKE, cnn_base_name); row<neuron*> all_found; ch_string vpth = the_cnf.first_vnt_i_super_of(GSKE, all_found); MARK_USED(vpth); BRAIN_CK(vpth != SKG_INVALID_PTH); } else { os << bj_eol; os << bj_eol; os << "!!! SKIPED num_test=" << aa; } the_cnf.release_and_init(GSKE, true); update_rnd_elap_in_dir(rnd_gen2, skl_nt_pth); } os << "3. LOADING TEST ..." << bj_eol; bool fnd1 = GSKE.find_skl_path(GSKE.as_full_path(rr_pth + "/test_ref1/")); bool fnd2 = GSKE.find_skl_path(GSKE.as_full_path(rr_pth + "/test_ref2/")); bool fnd3 = GSKE.find_skl_path(GSKE.as_full_path(rr_pth + "/test_ref3/")); MARK_USED(fnd1); MARK_USED(fnd2); MARK_USED(fnd3); BRAIN_CK(fnd1); BRAIN_CK(fnd2); BRAIN_CK(fnd3); load_all_in_dir(skl_nt_pth); the_cnf.release_and_init(GSKE, true); GSKE.clear_all(); os << "4. FINISHING ..." << bj_eol; os.flush(); }
bool brain::ck_trail(){ #ifdef FULL_DEBUG bj_ostream& os = bj_dbg; brain& brn = *this; row_quanton_t& the_trl = br_tmp_trail; br_charge_trail.get_all_ordered_quantons(the_trl); long num_null_src = 0; quanton* last_choice = NULL_PT; quanton* prev_qua = NULL_PT; MARK_USED(prev_qua); ch_string ab_mm; long ch_idx = 0; long prev_tier = INVALID_TIER; for(long ii = 0; ii < the_trl.size(); ii++){ quanton* qua = the_trl[ii]; if(qua == NULL_PT){ ab_mm = "NULL qua !!." + br_file_name; abort_func(-1, ab_mm.c_str()); } if((prev_tier != INVALID_TIER) && (prev_tier > qua->qu_tier)){ os << "qua= " << qua << bj_eol; print_trail(os); ab_mm = "case0." + br_file_name; abort_func(-1, ab_mm.c_str()); } prev_tier = qua->qu_tier; //if((qua->qu_source == NULL) && (qua->qlevel() != ROOT_LEVEL)){ if(qua->is_lv_choice(brn)){ num_null_src++; } if(qua->get_charge() == cg_neutral){ print_trail(os); ab_mm = "case2." + br_file_name; abort_func(-1, ab_mm.c_str()); } if(qua->has_note0()){ print_trail(os); ab_mm = "case3." + br_file_name; abort_func(-1, ab_mm.c_str()); } qua->ck_charge(brn); bool cho = qua->is_choice(); if(cho){ last_choice = qua; if((ch_idx >= br_chosen.size()) || (br_chosen[ch_idx] != qua)){ os << "qua= " << qua << bj_eol; if(ch_idx < br_chosen.size()){ os << "chosen= " << br_chosen[ch_idx] << bj_eol; } print_trail(os); os << "chosen" << bj_eol; os << br_chosen << bj_eol; } BRAIN_CK_0(ch_idx < br_chosen.size()); BRAIN_CK_0(br_chosen[ch_idx] == qua); ch_idx++; } if( !cho && ! qua->has_source()) { quanton* cls = qua; BRAIN_CK_0(cls->qlevel() == qua->qlevel()); if((cls != last_choice) && (cls->qlevel() != 0)){ print_trail(os); ab_mm = "case5." + br_file_name; abort_func(-1, ab_mm.c_str()); } } prev_qua = qua; //prev_tk = qua->qu_charge_tk; } if((num_null_src != level()) && ((num_null_src + 1) != level())){ os << "num_null_src=" << num_null_src << bj_eol; os << "lv=" << level() << bj_eol; print_trail(os); dbg_prt_lvs_cho(os); ab_mm = "case6." + br_file_name; abort_func(-1, ab_mm.c_str()); } #endif return true; }
int main(int argc, char** argv){ MARK_USED(argc); MARK_USED(argv); std::ostream& os = std::cout; MEM_CTRL(mem_size tt_mem_in_u = MEM_STATS.num_bytes_in_use;)
void test_nfwt(int argc, char** argv) { bj_ostream& os = bj_out; MARK_USED(os); row<bool> dbg_arr; dbg_arr.fill(false, DBG_NUM_LEVS); dbg_arr[2] = true; dbg_arr[78] = true; dbg_arr[81] = true; //dbg_arr[82] = true; dbg_arr[88] = true; dbg_arr[89] = true; //dbg_arr[92] = true; dbg_arr[93] = true; dbg_arr[94] = true; dbg_arr[95] = true; //ccl_skl_walker& skl_wlkr = GSKE.kg_ccl_wlkr; //clause_walker& wlkr = skl_wlkr.fw_walker; canon_cnf the_cnf; //wlkr.cw_cnf = &the_cnf; bool exe_wlk = false; bool exe_sub = false; bool exe_sup = false; bool gen_skl = false; bool del_skl = false; bool ver_skl = false; MARK_USED(exe_wlk); MARK_USED(exe_sub); MARK_USED(exe_sup); MARK_USED(ver_skl); row<ch_string> load_ops; row<ch_string> pth_ld_ops; row<ch_string> delete_ops; GSKE.kg_root_path = TEST_ROOT_PATH; for(long ii = 1; ii < argc; ii++){ ch_string the_arg = argv[ii]; if(strcmp(argv[ii], "-sub") == 0){ exe_sub = true; } else if(strcmp(argv[ii], "-sup") == 0){ exe_sup = true; } else if(strcmp(argv[ii], "-g") == 0){ gen_skl = true; } else if(strcmp(argv[ii], "-D") == 0){ del_skl = true; } else if(strcmp(argv[ii], "-w") == 0){ exe_wlk = true; } else if(strcmp(argv[ii], "-v") == 0){ ver_skl = true; } else if((strcmp(argv[ii], "-d") == 0) && ((ii + 1) < argc)){ int kk_idx = ii + 1; ii++; ch_string& str_1 = delete_ops.inc_sz(); str_1 = argv[kk_idx]; } else if((strcmp(argv[ii], "-l") == 0) && ((ii + 1) < argc)){ int kk_idx = ii + 1; ii++; ch_string& str_1 = load_ops.inc_sz(); str_1 = argv[kk_idx]; } else if((strcmp(argv[ii], "-L") == 0) && ((ii + 1) < argc)){ int kk_idx = ii + 1; ii++; ch_string& str_1 = pth_ld_ops.inc_sz(); str_1 = argv[kk_idx]; } else if((strcmp(argv[ii], "-r") == 0) && ((ii + 1) < argc)){ int kk_idx = ii + 1; ii++; GSKE.kg_root_path = argv[kk_idx]; } } GSKE.init_paths(); GSKE.print_paths(os); if(del_skl){ ch_string GSKE_ROOT = GSKE.kg_root_path + SKG_SKELETON_DIR; os << "WARNING !!! deleting '" << GSKE_ROOT << "'" << bj_eol; delete_directory(GSKE_ROOT); } if(gen_skl){ test_skl(); } while(! load_ops.is_empty()){ ch_string& to_load = load_ops.last(); ch_string rel_pth = SKG_CNF_DIR + to_load; ch_string full_pth = GSKE.as_full_path(rel_pth); os << "to_load='" << full_pth << "'" << bj_eol; bool all_ok = the_cnf.load_from(GSKE, full_pth); if(all_ok){ the_cnf.print_canon_cnf(os); } else { os << "LOAD OF " << full_pth << " FAILED !!" << bj_eol; } os << bj_eol; load_ops.dec_sz(); } while(! pth_ld_ops.is_empty()){ ch_string& to_load = pth_ld_ops.last(); ch_string full_pth = to_load; os << "to_load='" << full_pth << "'" << bj_eol; bool all_ok = the_cnf.load_from(GSKE, full_pth); if(all_ok){ the_cnf.print_canon_cnf(os); } else { os << "LOAD OF " << full_pth << " FAILED !!" << bj_eol; } os << bj_eol; pth_ld_ops.dec_sz(); } while(! delete_ops.is_empty()){ ch_string& to_del = delete_ops.last(); os << "to_del='" << to_del << "'" << bj_eol; delete_ops.dec_sz(); } }
page_t * buddy_page_alloc(uint32_t order, pgflags_t flags) { /* * TODO: Aca tendria que adquirir un lock * para protejer el manejo de listas. En linux * usa un interrupt safe spinlock * Lo que seria bueno es lockear el acceso cuando es absolutamente * necesario, esto nos permitiria que se puedan satisfaces * multiples alocaciones al mismo tiempo. */ if (order >= MAX_ORDER) return NULL; uint32_t current_order = order; buddy_list_t *free_list = &buddy.free_lists[order]; struct list_head *head; struct list_head *curr; page_t *page; do { /* Lista de bloques de paginas libres del orden actual */ head = &free_list->head; /* el primer bloque de paginas libre */ curr = head->next; /* Si la lista no esta vacia, quiere decir que hay paginas para sacar */ if (!list_empty(head)) { /* Indice de la pagina dentro del mem_map */ unsigned int index; /* Obtenemos la estructura page (head no es la variable, es el miembro) */ page = list_entry(curr, page_t, head); /* Eliminamos el bloque actual de paginas de la lista */ list_del(curr); /* Calculamos el indice en el mem_map de la primer pagina del bloque */ index = page - mem_map; #define MARK_USED(index, order, free_list) \ bit_complement((index) >> (1+(order)), (free_list)->page_bitmap) /* Si current order es MAX_ORDER no existe un buddy */ if (current_order != MAX_ORDER-1) MARK_USED(index, current_order, free_list); /* Tamano del bloque actual */ uint32_t size = 1 << current_order; /* * Si current_order > order, quiere decir que no habia un bloque * de justo tamano 2**order por lo que se busco un order mayor * para dividirlo. */ while (current_order > order) { /* Obtenemos la free list de un orden anterior */ free_list--; /* Decrementamos el current order */ current_order--; /* Calculamos el tamano del bloque de 2**current_order */ size >>= 1; /* Agregamos al buddy1 a la lista de bloques libres */ list_add(&(page)->head, &free_list->head); /* * Aca marcamos que uno de los dos buddies * esta siendo usado, lo que no quiere decir * cual de los dos. */ MARK_USED(index, current_order, free_list); /* Seguimos con la siguiente */ index += size; /* * Siguiente pagina, size es en realidad la * cantidad de paginas que vamos a saltear, es * decir el el tamano del buddy1 */ page += size; } /* current_order = order -> tenemos una pagina */ //set_page_count(page, 1); return page; } current_order++; free_list++; } while (current_order < MAX_ORDER);