/* * Run the bash script and read its output, which consists of variables needed to configure * postgres-xc cluster in pgxc_ctl. * * Be careful that pgxc_ctl changes its working directory to pgxc home directory, * typically $HOME/pgxc_ctl, which can be changed with pgxc_ctl options. * See pgxc_ctl.c or pgxc_ctl document for details. */ void read_config_file(char *path, char *conf) { FILE *vars; char cmd[1024]; if (conf) sprintf(cmd, "bash %s/pgxc_ctl_bash --configure %s print_values", path, conf); else sprintf(cmd, "bash %s/pgxc_ctl_bash print_values", path); vars = popen(cmd, "r"); read_vars(vars); fclose(vars); }
ifstream* CovOptimData::read(const char* filename, CovOptimData& cov, std::stack<unsigned int>& format_id, std::stack<unsigned int>& format_version) { ifstream* f = CovList::read(filename, cov, format_id, format_version); if (format_id.empty() || format_id.top()!=subformat_number || format_version.top()!=FORMAT_VERSION) { cov.data->_optim_optimizer_status = (unsigned int) Optimizer::SUCCESS; cov.data->_optim_is_extended_space = false; cov.data->_optim_uplo = NEG_INFINITY; cov.data->_optim_loup = POS_INFINITY; cov.data->_optim_loup_point.resize((int) cov.n); cov.data->_optim_loup_point = IntervalVector::empty(cov.n); cov.data->_optim_time = -1; cov.data->_optim_nb_cells = 0; } else { format_id.pop(); format_version.pop(); read_vars(*f, cov.n, cov.data->_optim_var_names); unsigned int status = read_pos_int(*f); switch (status) { case 0: cov.data->_optim_optimizer_status = (unsigned int) Optimizer::SUCCESS; break; case 1: cov.data->_optim_optimizer_status = (unsigned int) Optimizer::INFEASIBLE; break; case 2: cov.data->_optim_optimizer_status = (unsigned int) Optimizer::NO_FEASIBLE_FOUND; break; case 3: cov.data->_optim_optimizer_status = (unsigned int) Optimizer::UNBOUNDED_OBJ; break; case 4: cov.data->_optim_optimizer_status = (unsigned int) Optimizer::TIME_OUT; break; case 5: cov.data->_optim_optimizer_status = (unsigned int) Optimizer::UNREACHED_PREC; break; default: ibex_error("[CovOptimData]: invalid optimizer status."); } cov.data->_optim_is_extended_space = (bool) read_pos_int(*f); cov.data->_optim_uplo = read_double(*f); cov.data->_optim_uplo_of_epsboxes = read_double(*f); cov.data->_optim_loup = read_double(*f); unsigned int loup_found = read_pos_int(*f); unsigned int nb_var = cov.is_extended_space() ? cov.n-1 : cov.n; cov.data->_optim_loup_point.resize((int) nb_var); // TODO: we assume here that the goal var is n-1 cov.data->_optim_loup_point = loup_found==1? cov[0].subvector(0,nb_var-1) : IntervalVector::empty(nb_var); cov.data->_optim_time = read_double(*f); cov.data->_optim_nb_cells = read_pos_int(*f); } return f; }
static void read_configuration(void) { FILE *conf; char cmd[MAXPATH+1]; if (pgxc_ctl_config_path[0]) snprintf(cmd, MAXPATH, "%s --home %s --configuration %s", pgxc_ctl_bash_path, pgxc_ctl_home, pgxc_ctl_config_path); else snprintf(cmd, MAXPATH, "%s --home %s", pgxc_ctl_bash_path, pgxc_ctl_home); elog(NOTICE, "Reading configuration using %s\n", cmd); conf = popen(cmd, "r"); if (conf == NULL) { elog(ERROR, "ERROR: Cannot execute %s, %s", cmd, strerror(errno)); return; } read_vars(conf); pclose(conf); uninstall_pgxc_ctl_bash(pgxc_ctl_bash_path); elog(INFO, "Finished to read configuration.\n"); }
void NemSpread<T,INT>::read_restart_data () /* Function which reads the restart variable data from the EXODUS II * database which contains the results information. Then distribute * it to the processors, and write it to the parallel exodus files. * *---------------------------------------------------------------------------- * * Functions called: * * read_vars -- function which reads the variable values from the restart * file, and then distributes them to the processors * * write_var_timestep -- function which writes out the variables for a * to a parallel ExodusII file. * *---------------------------------------------------------------------------- */ { const char *yo="read_restart_data"; /* need to get the element block ids and counts */ std::vector<INT> eb_ids_global(globals.Num_Elem_Blk); std::vector<INT> eb_cnts_global(globals.Num_Elem_Blk); std::vector<INT> ss_ids_global(globals.Num_Side_Set); std::vector<INT> ss_cnts_global(globals.Num_Side_Set); std::vector<INT> ns_ids_global(globals.Num_Node_Set); std::vector<INT> ns_cnts_global(globals.Num_Node_Set); INT ***eb_map_ptr = NULL, **eb_cnts_local = NULL; int exoid=0, *par_exoid = NULL; float vers; char cTemp[512]; /* computing precision should be the same as the database precision * * EXCEPTION: if the io_ws is smaller than the machine precision, * ie - database with io_ws == 4 on a Cray (sizeof(float) == 8), * then the cpu_ws must be the machine precision. */ int cpu_ws; if (io_ws < (int)sizeof(float)) cpu_ws = sizeof(float); else cpu_ws = io_ws; /* Open the ExodusII file */ { cpu_ws = io_ws; int mode = EX_READ | int64api; if ((exoid=ex_open(Exo_Res_File, mode, &cpu_ws, &io_ws, &vers)) < 0) { fprintf(stderr, "%s: Could not open file %s for restart info\n", yo, Exo_Res_File); exit(1); } } /* allocate space for the global variables */ Restart_Info.Glob_Vals.resize(Restart_Info.NVar_Glob); if (Restart_Info.NVar_Elem > 0 ) { /* allocate storage space */ Restart_Info.Elem_Vals.resize(Proc_Info[2]); /* now allocate storage for the values */ for (int iproc = 0; iproc <Proc_Info[2]; iproc++) { size_t array_size = Restart_Info.NVar_Elem * (globals.Num_Internal_Elems[iproc] + globals.Num_Border_Elems[iproc]); Restart_Info.Elem_Vals[iproc].resize(array_size); } /* * at this point, I need to broadcast the global element block ids * and counts to the processors. I know that this is redundant data * since they will all receive this information in read_mesh, but * the variables which contain that information are static in * el_exoII_io.c, and cannot be used here. So, take a second and * broadcast all of this out. * * I want to do this here so that it is done only once no matter * how many time steps are retrieved */ /* Get the Element Block IDs from the input file */ if (ex_get_ids (exoid, EX_ELEM_BLOCK, TOPTR(eb_ids_global)) < 0) { fprintf(stderr, "%s: unable to get element block IDs", yo); exit(1); } /* Get the count of elements in each element block */ for (int cnt = 0; cnt < globals.Num_Elem_Blk; cnt++) { if (ex_get_block(exoid, EX_ELEM_BLOCK, eb_ids_global[cnt], cTemp, &(eb_cnts_global[cnt]), NULL, NULL, NULL, NULL) < 0) { fprintf(stderr, "%s: unable to get element count for block id "ST_ZU"", yo, (size_t)eb_ids_global[cnt]); exit(1); } } /* * in order to speed up finding matches in the global element * number map, set up an array of pointers to the start of * each element block's global element number map. That way * only entries for the current element block have to be searched */ eb_map_ptr = (INT ***) array_alloc (__FILE__, __LINE__, 2,Proc_Info[2], globals.Num_Elem_Blk, sizeof(INT *)); if (!eb_map_ptr) { fprintf(stderr, "[%s]: ERROR, insufficient memory!\n", yo); exit(1); } eb_cnts_local = (INT **) array_alloc (__FILE__, __LINE__, 2,Proc_Info[2], globals.Num_Elem_Blk, sizeof(INT)); if (!eb_cnts_local) { fprintf(stderr, "[%s]: ERROR, insufficient memory!\n", yo); exit(1); } /* * for now, assume that element blocks have been * stored in the same order as the global blocks */ for (int iproc = 0; iproc <Proc_Info[2]; iproc++) { int ifound = 0; size_t offset = 0; int ilocal; for (int cnt = 0; cnt < globals.Num_Elem_Blk; cnt++) { for (ilocal = ifound; ilocal < globals.Proc_Num_Elem_Blk[iproc]; ilocal++) { if (globals.Proc_Elem_Blk_Ids[iproc][ilocal] == eb_ids_global[cnt]) break; } if (ilocal < globals.Proc_Num_Elem_Blk[iproc]) { eb_map_ptr[iproc][cnt] = &globals.GElems[iproc][offset]; eb_cnts_local[iproc][cnt] = globals.Proc_Num_Elem_In_Blk[iproc][ilocal]; offset += globals.Proc_Num_Elem_In_Blk[iproc][ilocal]; ifound = ilocal; /* don't search the same part of the list over */ } else { eb_map_ptr[iproc][cnt] = NULL; eb_cnts_local[iproc][cnt] = 0; } } } } /* End: "if (Restart_Info.NVar_Elem > 0 )" */ if (Restart_Info.NVar_Node > 0 ) { /* allocate storage space */ Restart_Info.Node_Vals.resize(Proc_Info[2]); /* now allocate storage for the values */ for (int iproc = 0; iproc <Proc_Info[2]; iproc++) { size_t array_size = Restart_Info.NVar_Node * (globals.Num_Internal_Nodes[iproc] + globals.Num_Border_Nodes[iproc] + globals.Num_External_Nodes[iproc]); Restart_Info.Node_Vals[iproc].resize(array_size); } } if (Restart_Info.NVar_Sset > 0 ) { /* allocate storage space */ Restart_Info.Sset_Vals.resize(Proc_Info[2]); /* now allocate storage for the values */ for (int iproc = 0; iproc <Proc_Info[2]; iproc++) { size_t array_size = Restart_Info.NVar_Sset * globals.Proc_SS_Elem_List_Length[iproc]; Restart_Info.Sset_Vals[iproc].resize(array_size); } /* * at this point, I need to broadcast the ids and counts to the * processors. I know that this is redundant data since they will * all receive this information in read_mesh, but the variables * which contain that information are static in el_exoII_io.c, and * cannot be used here. So, take a second and broadcast all of * this out. * * I want to do this here so that it is done only once no matter * how many time steps are retrieved */ /* Get the Sideset IDs from the input file */ if (ex_get_ids (exoid, EX_SIDE_SET, TOPTR(ss_ids_global)) < 0) { fprintf(stderr, "%s: unable to get sideset IDs", yo); exit(1); } /* Get the count of elements in each sideset */ for (int cnt = 0; cnt < globals.Num_Side_Set; cnt++) { if (ex_get_set_param(exoid, EX_SIDE_SET, ss_ids_global[cnt], &(ss_cnts_global[cnt]), NULL) < 0) { fprintf(stderr, "%s: unable to get element count for sideset id "ST_ZU"", yo, (size_t)ss_ids_global[cnt]); exit(1); } } } /* End: "if (Restart_Info.NVar_Sset > 0 )" */ if (Restart_Info.NVar_Nset > 0 ) { /* allocate storage space */ Restart_Info.Nset_Vals.resize(Proc_Info[2]); /* now allocate storage for the values */ for (int iproc = 0; iproc <Proc_Info[2]; iproc++) { size_t array_size = Restart_Info.NVar_Nset * globals.Proc_NS_List_Length[iproc]; Restart_Info.Nset_Vals[iproc].resize(array_size); } /* * at this point, I need to broadcast the ids and counts to the * processors. I know that this is redundant data since they will * all receive this information in read_mesh, but the variables * which contain that information are static in el_exoII_io.c, and * cannot be used here. So, take a second and broadcast all of * this out. * * I want to do this here so that it is done only once no matter * how many time steps are retrieved */ /* Get the Nodeset IDs from the input file */ if (ex_get_ids (exoid, EX_NODE_SET, TOPTR(ns_ids_global)) < 0) { fprintf(stderr, "%s: unable to get nodeset IDs", yo); exit(1); } /* Get the count of elements in each nodeset */ for (int cnt = 0; cnt < globals.Num_Node_Set; cnt++) { if (ex_get_set_param(exoid, EX_NODE_SET, ns_ids_global[cnt], &(ns_cnts_global[cnt]), NULL) < 0) { fprintf(stderr, "%s: unable to get element count for nodeset id "ST_ZU"", yo, (size_t)ns_ids_global[cnt]); exit(1); } } } /* End: "if (Restart_Info.NVar_Nset > 0 )" */ /* * NOTE: A possible place to speed this up would be to * get the global node and element lists here, and broadcast * them out only once. */ par_exoid = (int*)malloc(Proc_Info[2] * sizeof(int)); if(!par_exoid) { fprintf(stderr, "[%s]: ERROR, insufficient memory!\n", yo); exit(1); } /* See if any '/' in the name. IF present, isolate the basename of the file */ if (strrchr(PIO_Info.Scalar_LB_File_Name, '/') != NULL) { /* There is a path separator. Get the portion after the * separator */ strcpy(cTemp, strrchr(PIO_Info.Scalar_LB_File_Name, '/')+1); } else { /* No separator; this is already just the basename... */ strcpy(cTemp, PIO_Info.Scalar_LB_File_Name); } if (strlen(PIO_Info.Exo_Extension) == 0) add_fname_ext(cTemp, ".par"); else add_fname_ext(cTemp, PIO_Info.Exo_Extension); int open_file_count = get_free_descriptor_count(); if (open_file_count >Proc_Info[5]) { printf("All output files opened simultaneously.\n"); for (int iproc=Proc_Info[4]; iproc <Proc_Info[4]+Proc_Info[5]; iproc++) { gen_par_filename(cTemp, Par_Nem_File_Name, Proc_Ids[iproc], Proc_Info[0]); /* Open the parallel Exodus II file for writing */ cpu_ws = io_ws; int mode = EX_WRITE | int64api | int64db; if ((par_exoid[iproc]=ex_open(Par_Nem_File_Name, mode, &cpu_ws, &io_ws, &vers)) < 0) { fprintf(stderr,"[%d] %s Could not open parallel Exodus II file: %s\n", iproc, yo, Par_Nem_File_Name); exit(1); } } } else { printf("All output files opened one-at-a-time.\n"); } /* Now loop over the number of time steps */ for (int time_idx = 0; time_idx < Restart_Info.Num_Times; time_idx++) { double start_t = second (); /* read and distribute the variables for this time step */ if (read_vars(exoid, Restart_Info.Time_Idx[time_idx], TOPTR(eb_ids_global), TOPTR(eb_cnts_global), eb_map_ptr, eb_cnts_local, TOPTR(ss_ids_global), TOPTR(ss_cnts_global), TOPTR(ns_ids_global), TOPTR(ns_cnts_global)) < 0) { fprintf(stderr, "%s: Error occured while reading variables\n", yo); exit(1); } double end_t = second () - start_t; printf ("\tTime to read vars for timestep %d: %f (sec.)\n", (time_idx+1), end_t); start_t = second (); for (int iproc=Proc_Info[4]; iproc <Proc_Info[4]+Proc_Info[5]; iproc++) { if (open_file_count <Proc_Info[5]) { gen_par_filename(cTemp, Par_Nem_File_Name, Proc_Ids[iproc], Proc_Info[0]); /* Open the parallel Exodus II file for writing */ cpu_ws = io_ws; int mode = EX_WRITE | int64api | int64db; if ((par_exoid[iproc]=ex_open(Par_Nem_File_Name, mode, &cpu_ws, &io_ws, &vers)) < 0) { fprintf(stderr,"[%d] %s Could not open parallel Exodus II file: %s\n", iproc, yo, Par_Nem_File_Name); exit(1); } } /* * Write out the variable data for the time steps in this * block to each parallel file. */ write_var_timestep(par_exoid[iproc], iproc, (time_idx+1), TOPTR(eb_ids_global), TOPTR(ss_ids_global), TOPTR(ns_ids_global)); if (iproc%10 == 0 || iproc ==Proc_Info[2]-1) printf("%d", iproc); else printf("."); if (open_file_count <Proc_Info[5]) { if (ex_close(par_exoid[iproc]) == -1) { fprintf(stderr, "[%d] %s Could not close the parallel Exodus II file.\n", iproc, yo); exit(1); } } } /* End "for (iproc=0; iproc <Proc_Info[2]; iproc++)" */ end_t = second () - start_t; printf ("\n\tTime to write vars for timestep %d: %f (sec.)\n", (time_idx+1), end_t); } if (Restart_Info.NVar_Elem > 0 ) { safe_free((void **) &eb_map_ptr); safe_free((void **) &eb_cnts_local); } /* Close the restart exodus II file */ if (ex_close(exoid) == -1) { fprintf(stderr, "%sCould not close the restart Exodus II file\n", yo); exit(1); } if (open_file_count >Proc_Info[5]) { for (int iproc=Proc_Info[4]; iproc <Proc_Info[4]+Proc_Info[5]; iproc++) { /* Close the parallel exodus II file */ if (ex_close(par_exoid[iproc]) == -1) { fprintf(stderr, "[%d] %s Could not close the parallel Exodus II file.\n", iproc, yo); exit(1); } } } if (par_exoid != NULL) { free(par_exoid); par_exoid = NULL; } }
template <typename INT> void ExoII_Read<INT>::Get_Init_Data() { SMART_ASSERT(Check_State()); SMART_ASSERT(file_id >= 0); // Determine max size of entity and variable names on the database int name_length = ex_inquire_int(file_id, EX_INQ_DB_MAX_USED_NAME_LENGTH); ex_set_max_name_length(file_id, name_length); ex_init_params info; info.title[0] = '\0'; int err = ex_get_init_ext(file_id, &info); if (err < 0) { std::cout << "EXODIFF ERROR: Failed to get init data!" << " Error number = " << err << ". Aborting..." << '\n'; exit(1); } dimension = info.num_dim; num_nodes = info.num_nodes; num_elmts = info.num_elem; num_elmt_blocks = info.num_elem_blk; num_node_sets = info.num_node_sets; num_side_sets = info.num_side_sets; title = info.title; if (err > 0 && !interface.quiet_flag) std::cout << "EXODIFF WARNING: was issued, number = " << err << '\n'; if (dimension < 1 || dimension > 3 || num_elmt_blocks < 0 || num_node_sets < 0 || num_side_sets < 0) { std::cout << "EXODIFF ERROR: Init data appears corrupt:" << '\n' << " dimension = " << dimension << '\n' << " num_nodes = " << num_nodes << '\n' << " num_elmts = " << num_elmts << '\n' << " num_elmt_blocks = " << num_elmt_blocks << '\n' << " num_node_sets = " << num_node_sets << '\n' << " num_side_sets = " << num_side_sets << '\n' << " ... Aborting..." << '\n'; exit(1); } int num_qa = ex_inquire_int(file_id, EX_INQ_QA); int num_info = ex_inquire_int(file_id, EX_INQ_INFO); if (num_qa < 0 || num_info < 0) { std::cout << "EXODIFF ERROR: inquire data appears corrupt:" << '\n' << " num_qa = " << num_qa << '\n' << " num_info = " << num_info << '\n' << " ... Aborting..." << '\n'; exit(1); } // Coordinate Names... char **coords = get_name_array(3, name_length); err = ex_get_coord_names(file_id, coords); if (err < 0) { std::cout << "EXODIFF ERROR: Failed to get coordinate" << " names! Aborting..." << '\n'; exit(1); } coord_names.clear(); for (size_t i = 0; i < dimension; ++i) { coord_names.push_back(coords[i]); } free_name_array(coords, 3); // Element Block Data... if (eblocks) delete[] eblocks; eblocks = nullptr; if (num_elmt_blocks > 0) { eblocks = new Exo_Block<INT>[num_elmt_blocks]; SMART_ASSERT(eblocks != nullptr); std::vector<INT> ids(num_elmt_blocks); err = ex_get_ids(file_id, EX_ELEM_BLOCK, TOPTR(ids)); if (err < 0) { std::cout << "EXODIFF ERROR: Failed to get element" << " block ids! Aborting..." << '\n'; exit(1); } size_t e_count = 0; for (size_t b = 0; b < num_elmt_blocks; ++b) { if (ids[b] <= EX_INVALID_ID) { std::cout << "EXODIFF WARNING: Element block Id " << "for block index " << b << " is " << ids[b] << " which is negative. This was returned by call to ex_get_elem_blk_ids()." << '\n'; } eblocks[b].initialize(file_id, ids[b]); e_count += eblocks[b].Size(); } if (e_count != num_elmts && !interface.quiet_flag) { std::cout << "EXODIFF WARNING: Total number of elements " << num_elmts << " does not equal the sum of the number of elements " << "in each block " << e_count << '\n'; } // Gather the attribute names (even though not all attributes are on all blocks) std::set<std::string> names; for (size_t b = 0; b < num_elmt_blocks; ++b) { for (int a = 0; a < eblocks[b].attr_count(); a++) { names.insert(eblocks[b].Get_Attribute_Name(a)); } } elmt_atts.resize(names.size()); std::copy(names.begin(), names.end(), elmt_atts.begin()); } // Node & Side sets... if (nsets) delete[] nsets; nsets = nullptr; if (num_node_sets > 0) { nsets = new Node_Set<INT>[num_node_sets]; SMART_ASSERT(nsets != nullptr); std::vector<INT> ids(num_node_sets); err = ex_get_ids(file_id, EX_NODE_SET, TOPTR(ids)); if (err < 0) { std::cout << "EXODIFF ERROR: Failed to get " << "nodeset ids! Aborting..." << '\n'; exit(1); } for (size_t nset = 0; nset < num_node_sets; ++nset) { if (ids[nset] <= EX_INVALID_ID) { std::cout << "EXODIFF WARNING: Nodeset Id " << "for nodeset index " << nset << " is " << ids[nset] << " which is negative. This was returned by call to ex_get_ids()." << '\n'; } nsets[nset].initialize(file_id, ids[nset]); } } if (ssets) delete[] ssets; ssets = nullptr; if (num_side_sets) { ssets = new Side_Set<INT>[num_side_sets]; SMART_ASSERT(ssets != nullptr); std::vector<INT> ids(num_side_sets); err = ex_get_ids(file_id, EX_SIDE_SET, TOPTR(ids)); if (err < 0) { std::cout << "EXODIFF ERROR: Failed to get " << "sideset ids! Aborting..." << '\n'; exit(1); } for (size_t sset = 0; sset < num_side_sets; ++sset) { if (ids[sset] <= EX_INVALID_ID) { std::cout << "EXODIFF WARNING: Sideset Id " << "for sideset index " << sset << " is " << ids[sset] << " which is negative. This was returned by call to ex_get_ids()." << '\n'; } ssets[sset].initialize(file_id, ids[sset]); } } // ************** RESULTS info *************** // int num_global_vars, num_nodal_vars, num_elmt_vars, num_ns_vars, num_ss_vars; err = ex_get_variable_param(file_id, EX_GLOBAL, &num_global_vars); if (err < 0) { std::cout << "EXODIFF ERROR: Failed to get number of" << " global variables! Aborting..." << '\n'; exit(1); } err = ex_get_variable_param(file_id, EX_NODAL, &num_nodal_vars); if (err < 0) { std::cout << "EXODIFF ERROR: Failed to get number of" << " nodal variables! Aborting..." << '\n'; exit(1); } err = ex_get_variable_param(file_id, EX_ELEM_BLOCK, &num_elmt_vars); if (err < 0) { std::cout << "EXODIFF ERROR: Failed to get number of" << " element variables! Aborting..." << '\n'; exit(1); } err = ex_get_variable_param(file_id, EX_NODE_SET, &num_ns_vars); if (err < 0) { std::cout << "EXODIFF ERROR: Failed to get number of" << " nodeset variables! Aborting..." << '\n'; exit(1); } err = ex_get_variable_param(file_id, EX_SIDE_SET, &num_ss_vars); if (err < 0) { std::cout << "EXODIFF ERROR: Failed to get number of" << " sideset variables! Aborting..." << '\n'; exit(1); } if (num_global_vars < 0 || num_nodal_vars < 0 || num_elmt_vars < 0 || num_ns_vars < 0 || num_ss_vars < 0) { std::cout << "EXODIFF ERROR: Data appears corrupt for" << " number of variables !" << '\n' << "\tnum global vars = " << num_global_vars << '\n' << "\tnum nodal vars = " << num_nodal_vars << '\n' << "\tnum element vars = " << num_elmt_vars << '\n' << " ... Aborting..." << '\n'; exit(1); } read_vars(file_id, EX_GLOBAL, "Global", num_global_vars, global_vars); read_vars(file_id, EX_NODAL, "Nodal", num_nodal_vars, nodal_vars); read_vars(file_id, EX_ELEM_BLOCK, "Element", num_elmt_vars, elmt_vars); read_vars(file_id, EX_NODE_SET, "Nodeset", num_ns_vars, ns_vars); read_vars(file_id, EX_SIDE_SET, "Sideset", num_ss_vars, ss_vars); // Times: num_times = ex_inquire_int(file_id, EX_INQ_TIME); if (num_times < 0) { std::cout << "EXODIFF ERROR: Number of time steps came" << " back negative (" << num_times << ")! Aborting..." << '\n'; exit(1); } if ((num_global_vars > 0 || num_nodal_vars > 0 || num_elmt_vars > 0 || num_ns_vars > 0 || num_ss_vars > 0) && num_times == 0) { std::cout << "EXODIFF Consistency error -- The database contains transient variables, but no " "timesteps!" << '\n'; exit(1); } if (num_times) { times = new double[num_times]; SMART_ASSERT(times != nullptr); err = ex_get_all_times(file_id, times); } if (num_nodal_vars) { if (num_times == 0) { std::cout << "EXODIFF Consistency error--The database contains " << num_nodal_vars << " nodal variables, but there are no time steps defined." << '\n'; } if (num_times) { results = new double *[num_nodal_vars]; for (int i = 0; i < num_nodal_vars; ++i) results[i] = nullptr; } } } // End of EXODIFF