void requirement_data::check_consistency() { for( const auto &r : all() ) { check_consistency( r.second.tools, r.first.str() ); check_consistency( r.second.components, r.first.str() ); check_consistency( r.second.qualities, r.first.str() ); } }
template <typename T, typename X> void static_matrix<T, X>::cross_out_row(unsigned k) { #ifdef LEAN_DEBUG check_consistency(); #endif cross_out_row_from_columns(k, m_rows[k]); fix_row_indices_in_each_column_for_crossed_row(k); m_rows.erase(m_rows.begin() + k); #ifdef LEAN_DEBUG regen_domain(); check_consistency(); #endif }
void ZSolveAPI<T>::compute() { check_consistency(); Algorithm <T>* algorithm; DefaultController <T> * controller; std::ofstream* log_file = 0; if (options.loglevel () > 0) { std::string log_name = options.project () + ".log"; log_file = new std::ofstream (log_name.c_str(), options.resume () ? std::ios::out | std::ios::app : std::ios::out); } controller = new DefaultController <T> (&std::cout, log_file, options); std::string backup_name = options.project () + ".backup"; std::ifstream backup_file (backup_name.c_str()); if (backup_file.good () && !options.resume ()) { throw IOException ("Found backup file. Please restart with -r or delete backup file!\n", false); } if (options.resume()) { if (!backup_file.good()) { throw IOException ("Started in resume mode, but no backup file found!\n", false); } backup_file >> options; algorithm = new Algorithm <T> (backup_file, controller); } else if (mat) {
int main() { char points[STRAND_COUNT * LEN] = {'A', 'A', 'A', 'A', 'T', 'T', 'T', 'T', 'C', 'C', 'C', 'C'}; float epsilon = .001; int32_t max_iters = 20; int32_t group0[2] = {0, -1}; int32_t group1[2] = {1, -1}; int32_t group2[2] = {2, -1}; int32_t *groups[3] = {group0, group1, group2}; struct k_means_converge_t *converge = malloc(sizeof(*converge)); converge->epsilon = epsilon; converge->max_iters = max_iters; struct k_means_t *km = dna_cluster_new(NULL, converge, K, points, LEN, STRAND_COUNT); check_consistency(km); check_clustering(km, K, groups); k_means_free(km); free(converge); }
int internal_function _dl_make_stack_executable (void **stack_endp) { /* This gives us the highest/lowest page that needs to be changed. */ uintptr_t page = ((uintptr_t) *stack_endp & -(intptr_t) GLRO(dl_pagesize)); int result = 0; /* Challenge the caller. */ if (__builtin_expect (__check_caller (RETURN_ADDRESS (0), allow_ldso|allow_libpthread) != 0, 0) || __builtin_expect (*stack_endp != __libc_stack_end, 0)) return EPERM; if (__builtin_expect (__mprotect ((void *) page, GLRO(dl_pagesize), __stack_prot) == 0, 1)) goto return_success; result = errno; goto out; return_success: /* Clear the address. */ *stack_endp = NULL; /* Remember that we changed the permission. */ GL(dl_stack_flags) |= PF_X; out: #ifdef check_consistency check_consistency (); #endif return result; }
int main(int argc, char **argv) { float points[POINT_COUNT * DIM] = {1., 1.1, 0.9, 1., 1.1, 1., 2., 1.1, 1.9, 1., 2.1, 1., 1., 2.1, 0.9, 2., 1.1, 2.}; float epsilon = 0.001; int32_t max_iters = 10; int32_t group0[4] = {0, 1, 2, -1}; int32_t group1[4] = {3, 4, 5, -1}; int32_t group2[4] = {6, 7, 8, -1}; int32_t *groups[3] = {group0, group1, group2}; struct k_means_converge_t *converge = malloc(sizeof(*converge)); converge->epsilon = epsilon; converge->max_iters = max_iters; // NNED TO PASS A PARAMETER TO SAY MPI OR NOT struct k_means_t *km = point_cluster_new(argc, argv, NULL, converge, K, points, DIM, POINT_COUNT, false); check_consistency(km); check_clustering(km, K, groups); k_means_free(km); free(converge); }
static inline void unlock_ncurses(void){ if(active){ assert(top_panel(active->p) != ERR); } screen_update(); check_consistency(); assert(pthread_mutex_unlock(&bfl) == 0); }
typename std::enable_if< util::all_callable<Fs...>::value, continue_helper >::type then(Fs... fs) { check_consistency(); self->become_waiting_for(fs2bhvr(std::move(fs)...), m_mid); return {m_mid}; }
int main (void) { tests_start (); check_consistency (); tests_end (); exit (0); }
// usage: LIMIT soft hard ... -- executable arg1 arg2 ... int main(int c, char *v[]) { check_consistency(); traced_program p[1]; read_args(p, c, v); run_limited_program(p); return p->report_exit_fail ? p->report_exit_status : EXIT_SUCCESS; }
void DynamicDataLoader::finalize_loaded_data() { g->init_missions(); // Needs overmap terrain. init_data_mappings(); finalize_overmap_terrain(); calculate_mapgen_weights(); MonsterGenerator::generator().finalize_mtypes(); MonsterGroupManager::FinalizeMonsterGroups(); g->finalize_vehicles(); item_controller->finialize_item_blacklist(); finalize_recipes(); check_consistency(); }
mastermind::namespace_state_t::data_t::data_t(std::string name_, const kora::config_t &config , const user_settings_factory_t &factory) try : name(std::move(name_)) , settings(name, config.at("settings"), factory) , couples(config.at("couples")) , weights(config.at("weights"), settings.groups_count, !settings.static_groups.empty()) , statistics(config.at("statistics")) { check_consistency(); } catch (const std::exception &ex) { throw std::runtime_error("cannot create ns-state " + name + ": " + ex.what()); }
void server_state::load(const char* chk_point) { FILE* fp = ::fopen(chk_point, "rb"); int32_t len; ::fread((void*)&len, sizeof(int32_t), 1, fp); std::shared_ptr<char> buffer(new char[len]); ::fread((void*)buffer.get(), len, 1, fp); blob bb(buffer, 0, len); binary_reader reader(bb); unmarshall(reader, _apps); ::fclose(fp); dassert(_apps.size() == 1, ""); auto& app = _apps[0]; for (int i = 0; i < app.partition_count; i++) { auto& ps = app.partitions[i]; if (ps.primary.is_invalid() == false) { _nodes[ps.primary].primaries.insert(ps.gpid); _nodes[ps.primary].partitions.insert(ps.gpid); } for (auto& ep : ps.secondaries) { dassert(ep.is_invalid() == false, ""); _nodes[ep].partitions.insert(ps.gpid); } } for (auto& node : _nodes) { node.second.address = node.first; node.second.is_alive = true; _node_live_count++; } for (auto& app : _apps) { for (auto& par : app.partitions) { check_consistency(par.gpid); } } }
void main(void) { /* 程序主函数 */ int key; if (!check_consistency()) // 如果文件有缺失,显示错误信息并退出 { locate_OS(1,1); Print_OS("Can't find font file.",0,0); while (1) GetKey(&key); } browse_main(); // 否则进入文件浏览器 return; }
void DynamicDataLoader::finalize_loaded_data() { mission_type::initialize(); // Needs overmap terrain. set_ter_ids(); set_furn_ids(); set_oter_ids(); trap::finalize(); finalize_overmap_terrain(); g->finalize_vehicles(); calculate_mapgen_weights(); MonsterGenerator::generator().finalize_mtypes(); MonsterGenerator::generator().finalize_monfactions(); MonsterGroupManager::FinalizeMonsterGroups(); item_controller->finialize_item_blacklist(); finalize_recipes(); check_consistency(); }
std::pair<bool, file_md5_anim::data_anim> file_md5_anim::process( std::string file_path ){ data_anim d {}; std::fstream f( file_path, std::fstream::in ); std::pair< file_md5_common::token, std::string> t = file_md5_common::get_token( _keyword_map, f ); while( file_md5_common::token::END != t.first ){ if( file_md5_common::token::INVALID == t.first ){ assert( 0 && "invalid token encountered" ); break; } if( !process_token( t, f, (void*)&d ) ){ return { false, {} }; } t = file_md5_common::get_token( _keyword_map, f ); } if( !check_consistency( d ) ){ return { false, {} }; } return std::pair<bool, data_anim>( true, std::move(d) ); }
void s390_realize_cpu_model(CPUState *cs, Error **errp) { S390CPUClass *xcc = S390_CPU_GET_CLASS(cs); S390CPU *cpu = S390_CPU(cs); const S390CPUModel *max_model; if (xcc->kvm_required && !kvm_enabled()) { error_setg(errp, "CPU definition requires KVM"); return; } if (!cpu->model) { /* no host model support -> perform compatibility stuff */ apply_cpu_model(NULL, errp); return; } max_model = get_max_cpu_model(errp); if (*errp) { error_prepend(errp, "CPU models are not available: "); return; } /* copy over properties that can vary */ cpu->model->lowest_ibc = max_model->lowest_ibc; cpu->model->cpu_id = max_model->cpu_id; cpu->model->cpu_id_format = max_model->cpu_id_format; cpu->model->cpu_ver = max_model->cpu_ver; check_consistency(cpu->model); check_compatibility(max_model, cpu->model, errp); if (*errp) { return; } apply_cpu_model(cpu->model, errp); cpu->env.cpuid = s390_cpuid_from_cpu_model(cpu->model); if (tcg_enabled()) { /* basic mode, write the cpu address into the first 4 bit of the ID */ cpu->env.cpuid = deposit64(cpu->env.cpuid, 54, 4, cpu->env.cpu_num); } }
void DynamicDataLoader::finalize_loaded_data( loading_ui &ui ) { assert( !finalized && "Can't finalize the data twice." ); ui.new_context( _( "Finalizing" ) ); using named_entry = std::pair<std::string, std::function<void()>>; const std::vector<named_entry> entries = {{ { _( "Body parts" ), &body_part_struct::finalize_all }, { _( "Items" ), []() { item_controller->finalize(); } }, { _( "Crafting requirements" ), []() { requirement_data::finalize(); } }, { _( "Vehicle parts" ), &vpart_info::finalize }, { _( "Traps" ), &trap::finalize }, { _( "Terrain" ), &set_ter_ids }, { _( "Furniture" ), &set_furn_ids }, { _( "Overmap terrain" ), &overmap_terrains::finalize }, { _( "Overmap connections" ), &overmap_connections::finalize }, { _( "Overmap specials" ), &overmap_specials::finalize }, { _( "Vehicle prototypes" ), &vehicle_prototype::finalize }, { _( "Mapgen weights" ), &calculate_mapgen_weights }, { _( "Monster types" ), []() { MonsterGenerator::generator().finalize_mtypes(); } }, { _( "Monster groups" ), &MonsterGroupManager::FinalizeMonsterGroups }, { _( "Monster factions" ), &monfactions::finalize }, { _( "Crafting recipes" ), &recipe_dictionary::finalize }, { _( "Martial arts" ), &finialize_martial_arts }, { _( "Constructions" ), &finalize_constructions }, { _( "NPC classes" ), &npc_class::finalize_all }, { _( "Harvest lists" ), &harvest_list::finalize_all }, { _( "Anatomies" ), &anatomy::finalize_all } }}; for( const named_entry &e : entries ) { ui.add_entry( e.first ); } ui.show(); for( const named_entry &e : entries ) { e.second(); ui.proceed(); } check_consistency( ui ); finalized = true; }
void DynamicDataLoader::finalize_loaded_data() { item_controller->finalize(); vpart_info::finalize(); mission_type::initialize(); // Needs overmap terrain. set_ter_ids(); set_furn_ids(); set_oter_ids(); trap::finalize(); finalize_overmap_terrain(); vehicle_prototype::finalize(); calculate_mapgen_weights(); MonsterGenerator::generator().finalize_mtypes(); MonsterGroupManager::FinalizeMonsterGroups(); monfactions::finalize(); finalize_recipes(); finialize_martial_arts(); finalize_constructions(); check_consistency(); }
QuadParams::QuadParams(int argc, char** argv) : outname(NULL), vcovname(NULL), max_level(3), discard_weight(0.0) { if (argc == 1) { // print the help and exit exit(1); } outname = argv[argc-1]; argc--; struct option const opts [] = { {"max-level", required_argument, NULL, opt_max_level}, {"discard-weight", required_argument, NULL, opt_discard_weight}, {"vcov", required_argument, NULL, opt_vcov}, {NULL, 0, NULL, 0} }; int ret; int index; while (-1 != (ret = getopt_long(argc, argv, "", opts, &index))) { switch (ret) { case opt_max_level: if (1 != sscanf(optarg, "%d", &max_level)) fprintf(stderr, "Couldn't parse integer %s, ignored\n", optarg); break; case opt_discard_weight: if (1 != sscanf(optarg, "%lf", &discard_weight)) fprintf(stderr, "Couldn't parse float %s, ignored\n", optarg); break; case opt_vcov: vcovname = optarg; break; } } check_consistency(); }
void s390_realize_cpu_model(CPUState *cs, Error **errp) { S390CPUClass *xcc = S390_CPU_GET_CLASS(cs); S390CPU *cpu = S390_CPU(cs); const S390CPUModel *max_model; if (xcc->kvm_required && !kvm_enabled()) { error_setg(errp, "CPU definition requires KVM"); return; } if (!cpu->model) { /* no host model support -> perform compatibility stuff */ apply_cpu_model(NULL, errp); return; } max_model = get_max_cpu_model(errp); if (*errp) { error_prepend(errp, "CPU models are not available: "); return; } /* copy over properties that can vary */ cpu->model->lowest_ibc = max_model->lowest_ibc; cpu->model->cpu_id = max_model->cpu_id; cpu->model->cpu_ver = max_model->cpu_ver; check_consistency(cpu->model); check_compatibility(max_model, cpu->model, errp); if (*errp) { return; } apply_cpu_model(cpu->model, errp); }
void GraverAPI<T>::compute() { check_consistency(); Algorithm <T>* algorithm; DefaultController <T> * controller; std::ofstream* log_file = 0; if (ZSolveAPI<T>::options.loglevel () > 0) { std::string log_name = ZSolveAPI<T>::options.project () + ".log"; log_file = new std::ofstream (log_name.c_str(), ZSolveAPI<T>::options.resume () ? std::ios::out | std::ios::app : std::ios::out); } controller = new DefaultController <T> (&std::cout, log_file, ZSolveAPI<T>::options); if (ZSolveAPI<T>::mat) { /// @TODO: transfer rhs, ub, lb, sign and rel. T* rhs_vec = create_zero_vector <T> (ZSolveAPI<T>::mat->data.height()); if (ZSolveAPI<T>::rhs) { for (size_t i = 0; i < ZSolveAPI<T>::rhs->data.width(); ++i) { rhs_vec[i] = ZSolveAPI<T>::rhs->data[0][i]; } } LinearSystem <T> * system = new LinearSystem <T> (ZSolveAPI<T>::mat->data, rhs_vec, ZSolveAPI<T>::free_default, ZSolveAPI<T>::lower_default, ZSolveAPI<T>::upper_default); delete_vector(rhs_vec); if (ZSolveAPI<T>::sign) { for (size_t i = 0; i < ZSolveAPI<T>::sign->data.width(); ++i) { switch (ZSolveAPI<T>::sign->data[0][i]) { case 0: system->get_variable(i).set(true); break; case 1: system->get_variable(i).set(false, 0, -1); break; case -1: system->get_variable(i).set(false, 1, 0); break; case 2: system->get_variable(i).set(false); break; default: /// @TODO: The following error message should be more informative. throw IOException("Unknown sign value."); } } } if (ZSolveAPI<T>::rel) { for (size_t i = 0; i < ZSolveAPI<T>::rel->data.width(); ++i) { switch (ZSolveAPI<T>::rel->data[0][i]) { case 0: system->get_relation(i).set(Relation<T> :: Equal); break; case 1: system->get_relation(i).set(Relation<T> :: GreaterEqual); break; case -1: system->get_relation(i).set(Relation<T> :: LesserEqual); break; default: /// @TODO: The following error message should be more informative. throw IOException("Unknown relation value."); } } } if (ZSolveAPI<T>::lb) { for (size_t i = 0; i < ZSolveAPI<T>::lb->data.width(); ++i) { system->get_variable(i).set_bound(true, ZSolveAPI<T>::lb->data[0][i]); } } if (ZSolveAPI<T>::ub) { for (size_t i = 0; i < ZSolveAPI<T>::ub->data.width(); ++i) { system->get_variable(i).set_bound(false, ZSolveAPI<T>::ub->data[0][i]); } } system->cancel_down(); algorithm = new ExtendedPottier <T>; algorithm->init(system, controller); delete system; } else if (ZSolveAPI<T>::lat) { /// @TODO: transfer ub, lb, and sign. Lattice <T> * lattice = new Lattice <T> (& ZSolveAPI<T>::lat->data, ZSolveAPI<T>::free_default, ZSolveAPI<T>::lower_default, ZSolveAPI<T>::upper_default); if (ZSolveAPI<T>::sign) { for (size_t i = 0; i < ZSolveAPI<T>::sign->data.width(); ++i) { switch (ZSolveAPI<T>::sign->data[0][i]) { case 0: lattice->get_variable(i).set(true); break; case 1: lattice->get_variable(i).set(false, 0, -1); break; case -1: lattice->get_variable(i).set(false, 1, 0); break; case 2: lattice->get_variable(i).set(false); break; default: /// @TODO: The following error message should be more informative. throw IOException("Unknown sign value."); } } } if (ZSolveAPI<T>::lb) { for (size_t i = 0; i < ZSolveAPI<T>::lb->data.width(); ++i) { lattice->get_variable(i).set_bound(true, ZSolveAPI<T>::lb->data[0][i]); } } if (ZSolveAPI<T>::ub) { for (size_t i = 0; i < ZSolveAPI<T>::ub->data.width(); ++i) { lattice->get_variable(i).set_bound(false, ZSolveAPI<T>::ub->data[0][i]); } } lattice->reduce_gaussian(); algorithm = new ExtendedPottier <T>; algorithm->init(lattice, controller); delete lattice; } else { throw IOException ("Neither " + ZSolveAPI<T>::options.project () + ".mat, " + ZSolveAPI<T>::options.project () + ".lat, nor " + ZSolveAPI<T>::options.project () + ".backup found!"); } // Actual computation starts here. algorithm->compute (ZSolveAPI<T>::options.backup_frequency ()); algorithm->log_maxnorm (); extract_results(algorithm); delete algorithm; delete controller; if (log_file) { delete log_file; } }
/* * Parse the next entry from the file. * Creates inode, file, path and archive info and * inserts them into the database. * Returns 0 on success, -1 on error. */ static int load_entry(sam_db_context_t *con, char *entry) { static int line_num = 0; char *field[LOAD_MAX_FIELDS]; sam_db_inode_t inode; sam_db_file_t file; sam_db_path_t path; sam_db_archive_t archive; sam_event_t check_event; char pid_path[MAXPATHLEN]; int len; struct sam_stat pid_stat; memset(&inode, 0, sizeof (sam_db_inode_t)); memset(&file, 0, sizeof (sam_db_file_t)); memset(&path, 0, sizeof (sam_db_path_t)); memset(&archive, 0, sizeof (sam_db_archive_t)); memset(&pid_stat, 0, sizeof (struct sam_stat)); memset(&check_event, 0, sizeof (sam_event_t)); /* Load first line */ line_num++; if ((entry = parse_fields(entry, field, INODE_NUM_FIELDS)) == NULL) { fprintf(stderr, "Error parsing inode line %d\n", line_num); return (-1); } inode.ino = atoi(field[0]); inode.gen = atoi(field[1]); inode.type = atoi(field[2]); file.ino = inode.ino; file.gen = inode.gen; path.ino = inode.ino; path.gen = inode.gen; archive.ino = inode.ino; archive.gen = inode.gen; inode.size = atoll(field[3]); strncpy(inode.csum, field[4], sizeof (inode.csum)); inode.create_time = atoi(field[5]); inode.modify_time = atoi(field[6]); inode.uid = atoi(field[7]); inode.gid = atoi(field[8]); inode.online = atoi(field[9]); /* Load path line */ line_num++; if ((entry = parse_fields(entry, field, FILE_NUM_FIELDS)) == NULL) { fprintf(stderr, "Error parsing path line %d\n", line_num); return (-1); } if (normalize_path(path.path, field[0], con->mount_point, MAXPATHLEN) < -1) { fprintf(stderr, "Invalid path: %s line %d\n", field[0], line_num); return (-1); } strncpy(file.name, field[1], MAXNAMELEN); file.name_hash = sam_dir_gennamehash(strlen(file.name), file.name); /* Get parent information */ strcpy(pid_path, con->mount_point); len = strlcat(pid_path, path.path, MAXPATHLEN); pid_path[len-1] = '\0'; /* Remove trailing / for stat */ if (sam_stat(pid_path, &pid_stat, sizeof (pid_stat))) { fprintf(stderr, "Failed getting parent stat %s line %d\n", pid_path, line_num); return (-1); } file.p_ino = pid_stat.st_ino; file.p_gen = pid_stat.gen; /* Load link line */ if (inode.type == FTYPE_LINK) { line_num++; if ((entry = parse_fields(entry, field, LINK_NUM_FIELDS)) == NULL) { fprintf(stderr, "Error parsing entry link line %d.\n", line_num); return (-1); } strncpy(path.path, field[0], MAXPATHLEN); } else if (inode.type == FTYPE_DIR) { strcat(path.path, file.name); strcat(path.path, "/"); } /* Insert into the database */ if (sam_db_inode_insert(con, &inode) < 0) { goto check_error; } if (sam_db_file_insert(con, &file) < 0) { goto check_error; } if (inode.type == FTYPE_LINK || inode.type == FTYPE_DIR) { if (sam_db_path_insert(con, &path) < 0) { goto check_error; } } /* Process archive lines */ while (*entry != '\0') { line_num++; if ((entry = parse_fields(entry, field, ARCH_NUM_FIELDS)) == NULL) { fprintf(stderr, "Error parsing archive line %d.\n", line_num); return (-1); } archive.copy = atoi(field[0]); archive.seq = atoi(field[1]); strncpy(archive.media_type, field[2], sizeof (archive.media_type)); strncpy(archive.vsn, field[3], sizeof (archive.vsn)); archive.position = atoll(field[4]); archive.offset = atoi(field[5]); archive.size = atoll(field[6]); archive.create_time = atoi(field[7]); archive.stale = atoi(field[8]); /* Replace any existing archive entries */ if (sam_db_archive_replace(con, &archive) < 0) { goto check_error; } } return (0); check_error: /* * If there was an error run check_consistency to see * if we can recover from it. */ check_event.ev_num = ev_create; check_event.ev_id.ino = inode.ino; check_event.ev_id.gen = inode.gen; check_event.ev_pid.ino = file.p_ino; check_event.ev_pid.gen = file.p_gen; return (check_consistency(con, &check_event, TRUE)); }
void server_state::update_configuration_internal(configuration_update_request& request, /*out*/ configuration_update_response& response) { app_state& app = _apps[request.config.gpid.app_id - 1]; partition_configuration& old = app.partitions[request.config.gpid.pidx]; if (old.ballot + 1 == request.config.ballot) { response.err = ERR_OK; response.config = request.config; auto it = _nodes.find(request.node); dassert(it != _nodes.end(), ""); node_state& node = it->second; switch (request.type) { case CT_ASSIGN_PRIMARY: # ifdef _DEBUG dassert(old.primary != request.node, ""); dassert(std::find(old.secondaries.begin(), old.secondaries.end(), request.node) == old.secondaries.end(), ""); # endif node.partitions.insert(old.gpid); node.primaries.insert(old.gpid); break; case CT_UPGRADE_TO_PRIMARY: # ifdef _DEBUG dassert(old.primary != request.node, ""); dassert(std::find(old.secondaries.begin(), old.secondaries.end(), request.node) != old.secondaries.end(), ""); # endif node.partitions.insert(old.gpid); node.primaries.insert(old.gpid); break; case CT_ADD_SECONDARY: dassert(false, "invalid execution flow"); break; case CT_DOWNGRADE_TO_SECONDARY: # ifdef _DEBUG dassert(old.primary == request.node, ""); dassert(std::find(old.secondaries.begin(), old.secondaries.end(), request.node) == old.secondaries.end(), ""); # endif node.primaries.erase(old.gpid); break; case CT_DOWNGRADE_TO_INACTIVE: case CT_REMOVE: # ifdef _DEBUG dassert(old.primary == request.node || std::find(old.secondaries.begin(), old.secondaries.end(), request.node) != old.secondaries.end(), ""); # endif if (request.node == old.primary) { node.primaries.erase(old.gpid); } node.partitions.erase(old.gpid); break; case CT_UPGRADE_TO_SECONDARY: # ifdef _DEBUG dassert(old.primary != request.node, ""); dassert(std::find(old.secondaries.begin(), old.secondaries.end(), request.node) == old.secondaries.end(), ""); # endif node.partitions.insert(old.gpid); break; default: dassert(false, "invalid config type %x", static_cast<int>(request.type)); } // update to new config old = request.config; std::stringstream cf; cf << "{primary:" << request.config.primary.name() << ":" << request.config.primary.port() << ", secondaries = ["; for (auto& s : request.config.secondaries) { cf << s.name() << ":" << s.port() << ","; } cf << "]}"; ddebug("%d.%d metaupdateok to ballot %lld, type = %s, config = %s", request.config.gpid.app_id, request.config.gpid.pidx, request.config.ballot, enum_to_string(request.type), cf.str().c_str() ); } else { response.err = ERR_INVALID_VERSION; response.config = old; } #ifdef _DEBUG check_consistency(request.config.gpid); #endif }
static inline void lock_ncurses(void){ assert(pthread_mutex_lock(&bfl) == 0); check_consistency(); }
int main( int argc, /* Argument count */ char **argv) /* Argument vector */ { sam_event_t event; event_handler_t ev_handler; if (dbupd_init(argc, argv) < 0) { return (EXIT_FAILURE); } connect: /* Keep trying to connect to database until shutdown or retry limit */ while (num_retry < RETRY_MAX && !is_shutdown && dbupd_connect() < 0) { num_retry++; sleep(RETRY_SLEEP); } /* Keep trying to process events until retry limit or shutdown */ while (num_retry < RETRY_MAX && !is_shutdown) { int status = sam_fsa_read_event(&fsa_inv, &event); if (status < 0) { /* Error reading event for fs_name */ SendCustMsg(HERE, 26003, fs_name); num_retry++; sleep(RETRY_SLEEP); goto connect; } else if (status == FSA_EOF) { /* Reached the end of events, goto sleep for a while */ sleep(EOF_SLEEP); } else if (IS_DB_INODE(event.ev_id.ino) && IS_DB_INODE(event.ev_pid.ino)) { /* Got next event, get event handler and process */ ev_handler = get_event_handler(event.ev_num); if (ev_handler == NULL) { /* Unrecognized event for fsname */ SendCustMsg(HERE, 26006, event.ev_num, fs_name); continue; } /* Handle event */ if (ev_handler(db_ctx, &event) < 0) { mysql_rollback(db_ctx->mysql); /* Event processing failed, running check */ SendCustMsg(HERE, 26007, get_event_name(event.ev_num), event.ev_id.ino, event.ev_id.gen, fs_name); if (check_consistency(db_ctx, &event, TRUE) < 0) { mysql_rollback(db_ctx->mysql); sam_fsa_rollback(&fsa_inv); /* Consistency check failed, retrying */ SendCustMsg(HERE, 26008); num_retry++; goto connect; } else { mysql_commit(db_ctx->mysql); } } else { mysql_commit(db_ctx->mysql); } num_retry = 0; } else { /* * Event's inode doesn't belong in database, * run consistency to be sure. */ (void) check_consistency(db_ctx, &event, TRUE); } } /* Close event log */ sam_fsa_close_inv(&fsa_inv); /* Close database */ sam_db_disconnect(db_ctx); sam_db_context_free(db_ctx); if (num_retry >= RETRY_MAX) { /* Retry max reached, exiting */ SendCustMsg(HERE, 26005, RETRY_MAX); } return (is_shutdown && num_retry < RETRY_MAX ? EXIT_SUCCESS : EXIT_FAILURE); }
w_rc_t ShoreTPCCEnv::warmup() { return (check_consistency()); }
/* * Checks the database against the current state of the filesystem. * * Algorithm description: * The information in the database should match precisely with the * information in the filesystem. * * 1. Execute a query against the database for all inode information * in order by inode number. * 2. For each entry in .inodes compare against the database. Since * both lists are ordered by inode number, read and compare against * each other. If there is an mismatch then add the inode to a * repair list. * 3. For each directory in .inodes compare its entries against the * sam_file table. This is done similiar to the overall loop by * sorting the directory entries by inode number and comparing * the result against the database. This approach was taken to * reduce the number of queries against the database. * 4. After all checks are made, run a consistency check for any inodes * in the repair list. */ int samdb_check(samdb_args_t *args) { sam_db_context_t dir_con; inode_cb_arg_t cb_arg; memset(&cb_arg, 0, sizeof (inode_cb_arg_t)); cb_arg.check_con = args->con; cb_arg.dir_con = &dir_con; /* * Need a second connection for the directory checks. This is * so we can execute multiple queries simulateously. Only one * query per connection is allowed. */ memcpy(&dir_con, args->con, sizeof (sam_db_context_t)); if (sam_db_connect(args->con) < 0) { fprintf(stderr, "Could not connect to %s database.\n", args->fsname); goto out; } if (!fast_scan) { /* Second connection for directory checks */ if (sam_db_connect(&dir_con) < 0) { fprintf(stderr, "Could not connect to %s database.\n", args->fsname); goto out; } /* Prepare the directory check query (reused many times) */ if ((cb_arg.dir_stmt = prepare_dir_query(&dir_con)) == NULL) { fprintf(stderr, "Error preparing directory check."); goto out; } } /* * Set read and write timeout because checking dirents may take longer * than the default timeout value causing the connection to be * dropped. */ if (mysql_query(args->con->mysql, "SET net_read_timeout=3600") != 0) { fprintf(stderr, "Error setting read timeout.\n"); goto out; } if (mysql_query(args->con->mysql, "SET net_write_timeout=3600") != 0) { fprintf(stderr, "Error setting read timeout.\n"); goto out; } /* Execute the inode check query */ if ((cb_arg.check_stmt = execute_check_query(args->con)) == NULL) { fprintf(stderr, "Error executing check query.\n"); goto out; } /* Get the first result */ if (next_check_ino(cb_arg.check_stmt) < 0) { fprintf(stderr, "Error getting check value.\n"); goto out; } /* Read the inodes file, check database for each inode read */ if (read_inodes(args->con->mount_point, inode_callback, &cb_arg) < 0) { fprintf(stderr, "Error reading inodes file\n"); goto out; } /* Close check statement so we can use connection for repairs */ if (cb_arg.check_stmt != NULL) { mysql_stmt_close(cb_arg.check_stmt); cb_arg.check_stmt = NULL; } printf("%d problems found\n", repair_list_size); if (repair_list_size > 0 && !scan_only) { int i; sam_event_t check_event; memset(&check_event, 0, sizeof (check_event)); printf("Running consistency repairs.."); for (i = 0; i < repair_list_size; i++) { check_event.ev_id = repair_list[i].id; check_event.ev_pid = repair_list[i].pid; if (check_consistency(cb_arg.check_con, &check_event, repair_list[i].repair_dir) < 0) { fprintf(stderr, "Error checking inode %d\n", check_event.ev_id.ino); break; } if (i%1000 == 0) { printf("."); fflush(stdout); } } printf(" done\n"); } out: if (cb_arg.check_stmt != NULL) { mysql_stmt_close(cb_arg.check_stmt); } if (!fast_scan && cb_arg.dir_stmt != NULL) { mysql_stmt_close(cb_arg.dir_stmt); } sam_db_disconnect(cb_arg.check_con); sam_db_disconnect(cb_arg.dir_con); return (0); }
void requirement_data::check_consistency( const std::string &display_name ) const { check_consistency( tools, display_name ); check_consistency( components, display_name ); check_consistency( qualities, display_name ); }
int main(int argc, char *argv[]) { START(argc, argv, "pmreorder_simple"); util_init(); if ((argc != 3) || (strchr("gbcm", argv[1][0]) == NULL) || argv[1][1] != '\0') UT_FATAL("usage: %s g|b|c|m file", argv[0]); int fd = OPEN(argv[2], O_RDWR); size_t size; /* mmap and register in valgrind pmemcheck */ void *map = pmem_map_file(argv[2], 0, 0, 0, &size, NULL); UT_ASSERTne(map, NULL); struct three_field *structp = map; char opt = argv[1][0]; /* clear the struct to get a consistent start state for writing */ if (strchr("gb", opt)) pmem_memset_persist(structp, 0, sizeof(*structp)); else if (strchr("m", opt)) { /* set test values to log an inconsistent start state */ pmem_memset_persist(&structp->flag, 1, sizeof(int)); pmem_memset_persist(&structp->first_field, 0, sizeof(int) * 2); pmem_memset_persist(&structp->third_field, 1, sizeof(int)); /* clear the struct to get back a consistent start state */ pmem_memset_persist(structp, 0, sizeof(*structp)); } /* verify that DEFAULT_REORDER restores default engine */ VALGRIND_EMIT_LOG("PMREORDER_MARKER_CHANGE.BEGIN"); switch (opt) { case 'g': write_consistent(structp); break; case 'b': write_inconsistent(structp); break; case 'm': write_consistent(structp); break; case 'c': return check_consistency(structp); default: UT_FATAL("Unrecognized option %c", opt); } VALGRIND_EMIT_LOG("PMREORDER_MARKER_CHANGE.END"); /* check if undefined marker will not cause an issue */ VALGRIND_EMIT_LOG("PMREORDER_MARKER_UNDEFINED.BEGIN"); VALGRIND_EMIT_LOG("PMREORDER_MARKER_UNDEFINED.END"); CLOSE(fd); DONE(NULL); }