Atom ScriptObject::getAtomPropertyFromProtoChain(Atom name, ScriptObject* o, Traits *origObjTraits) const { // todo will delegate always be non-null here? if (o != NULL) { Atom searchname = name; Stringp s = core()->atomToString(name); AvmAssert(s->isInterned()); Atom ival = s->getIntAtom(); if (ival) { searchname = ival; } do { // ensure prototype is dynamic if (!o->vtable->traits->getHashtableOffset()) continue; Atom const value = o->getTable()->getNonEmpty(searchname); if (!InlineHashtable::isEmpty(value)) return value; } while ((o = o->delegate) != NULL); } // NOTE use default public since name is not used Multiname multiname(core()->getAnyPublicNamespace(), AvmCore::atomToString(name)); toplevel()->throwReferenceError(kReadSealedError, &multiname, origObjTraits); // unreached return undefinedAtom; }
void get_eigen_outfile_name(char *in_name, int j, int k) { char prefix[MAX_FNL]; char suffix[MAX_FNL]; char new_name[MAX_FNL]; char mode_string[MAX_FNL]; char wave_string[MAX_FNL]; int i; int suffix_length; int prefix_length; /* Initialize string pieces */ for ( i=0; i<MAX_FNL; i++) { prefix[i] = '\0'; suffix[i] = '\0'; new_name[i] = '\0'; mode_string[i] = '\0'; wave_string[i] = '\0'; } /* Save beginning and end of specified file name */ suffix_length = get_suffix(suffix, in_name); prefix_length = get_prefix(prefix, in_name); /* Determine if doing 3D of 2D LSA; set k = -1 if not */ if ( !(Linear_Stability == LSA_3D_OF_2D) && !(Linear_Stability == LSA_3D_OF_2D_SAVE)) k = -1; /* Always construct mode string */ sprintf(mode_string, "_mode%d", j); /* Construct wave string for 3D of 2D LSA only */ if (k != -1) sprintf(wave_string, "_wn=%g", LSA_wave_numbers[k]); /* Assemble the pieces of the name string */ if (prefix_length > 0) strcpy(new_name, prefix); strcat(new_name, mode_string); if (k != -1) strcat(new_name, wave_string); if (suffix_length > 0) strcat(new_name, suffix); /* Multiname for this processor if parallel */ if (Num_Proc > 1) multiname(new_name, ProcID, Num_Proc); /* Return the name as in_name */ strcpy(in_name, new_name); return; }
void ObjectClass::_setPropertyIsEnumerable(Atom thisAtom, Stringp name, bool enumerable) { AvmCore* core = this->core(); name = name ? core->internString(name) : (Stringp)core->knull; if ((thisAtom&7) == kObjectType) { ScriptObject* obj = AvmCore::atomToScriptObject(thisAtom); obj->setStringPropertyIsEnumerable(name, enumerable); } else { // cannot create properties on a sealed object. Multiname multiname(core->publicNamespace, name); toplevel()->throwReferenceError(kWriteSealedError, &multiname, traits()); } }
ClassClosure* DomainObject::getClass(Stringp name) { AvmCore *core = this->core(); if (name == NULL) { toplevel()->throwArgumentError(kNullArgumentError, core->toErrorString("name")); } // Search for a dot from the end. int dot = name->lastIndexOf(core->cachedChars[(int)'.']); // If there is a '.', this is a fully-qualified // class name in a package. Must turn it into // a namespace-qualified multiname. Namespace* ns; Stringp className; if (dot >= 0) { Stringp uri = core->internString(name->substring(0, dot)); ns = core->internNamespace(core->newNamespace(uri)); className = core->internString(name->substring(dot+1, name->length())); } else { ns = core->publicNamespace; className = core->internString(name); } Multiname multiname(ns, className); // OOO: In the distribution, we search core->codeContext()->domainEnv rather than // our own domainEnv, which is surely a bug? ScriptObject *container = finddef(multiname, domainEnv); if (!container) { toplevel()->throwTypeError(kClassNotFoundError, core->toErrorString(&multiname)); } Atom atom = toplevel()->getproperty(container->atom(), &multiname, container->vtable); if (!AvmCore::istype(atom, core->traits.class_itraits)) { toplevel()->throwTypeError(kClassNotFoundError, core->toErrorString(&multiname)); } return (ClassClosure*)AvmCore::atomToScriptObject(atom); }
void ObjectClass::_setPropertyIsEnumerable(Atom thisAtom, Stringp name, bool enumerable) { AvmCore* core = this->core(); name = name ? core->internString(name) : (Stringp)core->knull; if (atomKind(thisAtom) == kObjectType) { ScriptObject* obj = AvmCore::atomToScriptObject(thisAtom); obj->setStringPropertyIsEnumerable(name, enumerable); } else { // cannot create properties on a sealed object. // NOTE just use the unmarked version Multiname multiname(core->getAnyPublicNamespace(), name); // NOTE use default public toplevel()->throwReferenceError(kWriteSealedError, &multiname, traits()); } }
int main(int argc, char **argv) /* * Initial main driver for GOMA. Derived from a (1/93) release of * the rf_salsa program by * * Original Authors: John Shadid (1421) * Scott Hutchinson (1421) * Harry Moffat (1421) * * Date: 12/3/92 * * * Updates and Changes by: * Randy Schunk (9111) * P. A. Sackinger (9111) * R. R. Rao (9111) * R. A. Cairncross (Univ. of Delaware) * Dates: 2/93 - 6/96 * * Modified for continuation * Ian Gates * Dates: 2/98 - 10/98 * Dates: 7/99 - 8/99 * * Last modified: Wed June 26 14:21:35 MST 1994 [email protected] * Hello. * * Note: Many modifications from an early 2/93 pre-release * version of rf_salsa were made by various persons * in order to test ideas about moving/deforming meshes... */ { /* Local Declarations */ double time_start, total_time; /* timing variables */ #ifndef PARALLEL /* struct tm *tm_ptr; additional serial timing variables */ time_t now; #endif int error; int i; int j; char **ptmp; char *yo; struct Command_line_command **clc=NULL; /* point to command line structure */ int nclc = 0; /* number of command line commands */ /********************** BEGIN EXECUTION ***************************************/ #ifdef FP_EXCEPT feenableexcept ((FE_OVERFLOW | FE_DIVBYZERO | FE_INVALID)); #endif /* assume number of commands is less than or equal to the number of * arguments in the command line minus 1 (1st is program name) */ /* * Get the name of the executable, yo */ yo = argv[0]; #ifdef PARALLEL MPI_Init(&argc, &argv); time_start = MPI_Wtime(); #endif /* PARALLEL */ #ifndef PARALLEL (void)time(&now); time_start = (double)now; #endif /* PARALLEL */ time_goma_started = time_start; Argv = argv; Argc = argc; #ifdef PARALLEL /* * Determine the parallel processing status, if any. We need to know * pretty early if we're "one of many" or the only process. */ error = MPI_Comm_size(MPI_COMM_WORLD, &Num_Proc); error = MPI_Comm_rank(MPI_COMM_WORLD, &ProcID); /* * Setup a default Proc_config so we can use utility routines * from Aztec */ AZ_set_proc_config(Proc_Config, MPI_COMM_WORLD); /* set the output limit flag if need be */ if( Num_Proc > DP_PROC_PRINT_LIMIT ) Unlimited_Output = FALSE; #ifdef HAVE_MPE_H error = MPE_Init_log(); #endif /* HAVE_MPE_H */ Dim = 0; /* for any hypercube legacy code... */ #endif /* PARALLEL */ #ifndef PARALLEL Dim = 0; ProcID = 0; Num_Proc = 1; #endif /* PARALLEL */ /* * HKM - Change the ieee exception handling based on the machine and * the level of debugging/speed desired. This call currently causes * core dumps for floating point exceptions. */ handle_ieee(); log_msg("--------------"); log_msg("GOMA begins..."); /* * Some initial stuff that only the master process does. */ if ( ProcID == 0 ) { if (argc > 1) { log_msg("Preprocessing command line options."); clc = (struct Command_line_command **) smalloc( argc * sizeof(struct Command_line_command *)); for (i=0; i<argc; i++) { clc[i] = (struct Command_line_command *) smalloc(sizeof(struct Command_line_command)); clc[i]->type = 0; /* initialize command line structure */ clc[i]->i_val = 0; clc[i]->r_val = 0.; clc[i]->string = (char *) smalloc(MAX_COMMAND_LINE_LENGTH*sizeof(char)); for ( j=0; j<MAX_COMMAND_LINE_LENGTH; j++) { clc[i]->string[j] = '\0'; } #ifdef DEBUG fprintf(stderr, "clc[%d]->string is at 0x%x\n", i, clc[i]->string); fprintf(stderr, "clc[%d] is at 0x%x\n", i, clc[i]); #endif } } strcpy(Input_File, "input"); strcpy(Echo_Input_File , "echo_input"); if (argc > 1) translate_command_line(argc, argv, clc, &nclc); ECHO("OPEN", Echo_Input_File); echo_command_line( argc, argv, Echo_Input_File ); print_code_version(); ptmp = legal_notice; while ( strcmp(*ptmp, LAST_LEGAL_STRING) != 0 ) { fprintf(stderr, "%s", *ptmp++); } } /* * Allocate the uniform problem description structure and * the problem description structures on all processors */ error = pd_alloc(); EH(error, "pd_alloc problem"); #ifdef DEBUG fprintf(stderr, "P_%d at barrier after pd_alloc\n", ProcID); #ifdef PARALLEL error = MPI_Barrier(MPI_COMM_WORLD); #endif #endif log_msg("Allocating mp, gn, ..."); error = mp_alloc(); EH(error, "mp_alloc problem"); error = gn_alloc(); EH(error, "gn_alloc problem"); error = ve_alloc(); EH(error, "ve_alloc problem"); error = elc_alloc(); EH(error, "elc_alloc problem"); error = elc_rs_alloc(); EH(error, "elc_alloc problem"); error = cr_alloc(); EH(error, "cr_alloc problem"); error = evp_alloc(); EH(error, "evp_alloc problem"); error = tran_alloc(); EH(error, "tran_alloc problem"); error = eigen_alloc(); EH(error, "eigen_alloc problem"); error = cont_alloc(); EH(error, "cont_alloc problem"); error = loca_alloc(); EH(error, "loca_alloc problem"); error = efv_alloc(); EH(error, "efv_alloc problem"); #ifdef DEBUG fprintf(stderr, "P_%d at barrier before read_input_file()\n", ProcID); #ifdef PARALLEL error = MPI_Barrier(MPI_COMM_WORLD); #endif #endif /* * Read ASCII input file, data files, related exodusII FEM databases. */ if ( ProcID == 0 ) { log_msg("Reading input file ..."); read_input_file(clc, nclc); /* Read ascii input file get file names */ /* update inputed data to account for command line arguments that * might override the input deck... */ log_msg("Overriding any input file specs w/ any command line specs..."); if (argc > 1) apply_command_line(clc, nclc); #ifdef DEBUG DPRINTF(stderr, "apply_command_line() is done.\n"); #endif } /* * The user-defined material properties, etc. available to goma users * mean that some dynamically allocated data needs to be communicated. * * To handle this, sizing information from the input file scan is * broadcast in stages so that the other processors can allocate space * accordingly to hold the data. * * Note: instead of handpacking a data structure, use MPI derived datatypes * to gather and scatter. Pray this is done efficiently. Certainly it costs * less from a memory standpoint. */ #ifdef PARALLEL /* * Make sure the input file was successully processed before moving on */ check_parallel_error("Input file error"); /* * This is some sizing information that helps fit a little bit more * onto the ark later on. */ #ifdef DEBUG fprintf(stderr, "P_%d at barrier before noahs_raven()\n", ProcID); error = MPI_Barrier(MPI_COMM_WORLD); #endif noahs_raven(); #ifdef DEBUG fprintf(stderr, "P_%d at barrier before MPI_Bcast of Noahs_Raven\n", ProcID); error = MPI_Barrier(MPI_COMM_WORLD); #endif MPI_Bcast(MPI_BOTTOM, 1, Noahs_Raven->new_type, 0, MPI_COMM_WORLD); #ifdef DEBUG fprintf(stderr, "P_%d at barrier after Bcast/before raven_landing()\n", ProcID); error = MPI_Barrier(MPI_COMM_WORLD); #endif /* * Get the other processors ready to handle ark data. */ raven_landing(); #ifdef DEBUG fprintf(stderr, "P_%d at barrier before noahs_ark()\n", ProcID); error = MPI_Barrier(MPI_COMM_WORLD); #endif /* * This is the main body of communicated information, including some * whose sizes were determined because of advanced legwork by the raven. */ noahs_ark(); MPI_Bcast(MPI_BOTTOM, 1, Noahs_Ark->new_type, 0, MPI_COMM_WORLD); /* * Chemkin was initialized on processor zero during the input file * process. Now, distribute it to all processors */ #ifdef USE_CHEMKIN if (Chemkin_Needed) { chemkin_initialize_mp(); } #endif /* * Once the ark has landed, there are additional things that will need to * be sent by dove. Example: BC_Types[]->u-BC arrays. * */ ark_landing(); noahs_dove(); MPI_Bcast(MPI_BOTTOM, 1, Noahs_Dove->new_type, 0, MPI_COMM_WORLD); #endif /* End of ifdef PARALLEL */ /* * We sent the packed line to all processors that contained geometry * creation commands. Now we need to step through it and create * geometry as we go (including possibly reading an ACIS .sat file). * */ /* Check to see if BRK File option exists and if so check if file exits */ if (Brk_Flag == 1) { check_for_brkfile(Brk_File); } check_parallel_error("Error encountered in check for brkfile"); /* Now break the exodus files */ if (Num_Proc > 1 && ProcID == 0 && Brk_Flag == 1) { call_brk(); } check_parallel_error("Error in brking exodus files"); MPI_Barrier(MPI_COMM_WORLD); /* * For parallel execution, assume the following variables will be changed * to reflect the multiple file aspect of the problem. * * FEM file = file.exoII --> file_3of15.exoII * * Output EXODUS II file = out.exoII --> out_3of15.exoII * */ /* * Allocate space for structures holding the EXODUS II finite element * database information and for the Distributed Processing information. * * These are mostly skeletons with pointers that get allocated in the * rd_exoII and rd_dpi routines. Remember to free up those arrays first * before freeing the major pointers. */ EXO_ptr = alloc_struct_1(Exo_DB, 1); init_exo_struct(EXO_ptr); DPI_ptr = alloc_struct_1(Dpi, 1); init_dpi_struct(DPI_ptr); log_msg("Reading mesh from EXODUS II file..."); error = read_mesh_exoII(EXO_ptr, DPI_ptr); /* * Missing files on any processor are detected at a lower level * forcing a return to the higher level * rd_exo --> rd_mesh --> main * Shutdown now, if any of the exodus files weren't found */ if (error < 0) { #ifdef PARALLEL MPI_Finalize(); #endif return(-1); } /* * All of the MPI_Type_commit() calls called behind the scenes that build * the dove, ark and raven really allocated memory. Let's free it up now that * the initial information has been communicated. */ #ifdef PARALLEL MPI_Type_free(&(Noahs_Raven->new_type)); MPI_Type_free(&(Noahs_Ark->new_type)); MPI_Type_free(&(Noahs_Dove->new_type)); #endif /* * Setup the rest of the Problem Description structure that depends on * the mesh that was read in from the EXODUS II file... * * Note that memory allocation and some setup has already been performed * in mm_input()... */ error = setup_pd(); EH( error, "Problem setting up Problem_Description."); /* * Let's check to see if we need the large elasto-plastic global tensors * and allocate them if so */ error = evp_tensor_alloc(EXO_ptr); EH( error, "Problems setting up evp tensors"); /* * Now that we know about what kind of problem we're solving and the * mesh information, let's allocate space for elemental assembly structures * */ #ifdef DEBUG DPRINTF(stderr, "About to assembly_alloc()...\n"); #endif log_msg("Assembly allocation..."); error = assembly_alloc(EXO_ptr); EH( error, "Problem from assembly_alloc"); if (Debug_Flag) { DPRINTF(stderr, "%s: setting up EXODUS II output files...\n", yo); } /* * These are not critical - just niceties. Also, they should not overburden * your db with too much of this - they're capped verbiage compliant routines. */ add_qa_stamp(EXO_ptr); add_info_stamp(EXO_ptr); #ifdef DEBUG fprintf(stderr, "added qa and info stamps\n"); #endif /* * If the output EXODUS II database file is different from the input * file, then we'll need to replicate all the basic mesh information. * But, remember that if we're parallel, that the output file names must * be multiplexed first... */ if ( Num_Proc > 1 ) { multiname(ExoFileOut, ProcID, Num_Proc); multiname(Init_GuessFile, ProcID, Num_Proc); if ( strcmp( Soln_OutFile, "" ) != 0 ) { multiname(Soln_OutFile, ProcID, Num_Proc); } if( strcmp( ExoAuxFile, "" ) != 0 ) { multiname(ExoAuxFile, ProcID, Num_Proc); } if( efv->Num_external_field != 0 ) { for( i=0; i<efv->Num_external_field; i++ ) { multiname(efv->file_nm[i], ProcID, Num_Proc); } } } /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ /* * Preprocess the exodus mesh * -> Allocate pointers to structures containing element * side bc info, First_Elem_Side_BC_Array, and * element edge info, First_Elem_Edge_BC_Array. * -> Determine Unique_Element_Types[] array */ #ifdef DEBUG fprintf(stderr, "pre_process()...\n"); #endif log_msg("Pre processing of mesh..."); #ifdef PARALLEL error = MPI_Barrier(MPI_COMM_WORLD); #endif pre_process(EXO_ptr); /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ /* * Load up a few key indeces in the bfd prototype basis function structures * and make sure that each active eqn/vbl has a bf[v] that points to the * right bfd[]...needs pre_process to find out the number of unique * element types in the problem. */ #ifdef DEBUG fprintf(stderr, "bf_init()...\n"); #endif log_msg("Basis function initialization..."); error = bf_init(EXO_ptr); EH( error, "Problem from bf_init"); /* * check for parallel errors before continuing */ check_parallel_error("Error encountered in problem setup"); /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ /* * Allocate space for each communication exchange description. */ #ifdef PARALLEL #ifdef DEBUG fprintf(stderr, "P_%d: Parallel cx allocation\n", ProcID); #endif if (DPI_ptr->num_neighbors > 0) { cx = alloc_struct_1(Comm_Ex, DPI_ptr->num_neighbors); Request = alloc_struct_1(MPI_Request, Num_Requests * DPI_ptr->num_neighbors); Status = alloc_struct_1(MPI_Status, Num_Requests * DPI_ptr->num_neighbors); } #endif /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ /* * SET UP THE PROBLEM * * Setup node-based structures * Finalise how boundary conditions are to be handled * Determine what unknowns are at each owned node and then tell * neighboring processors about your nodes * Set up communications pattern for fast unknown updates between * processors. */ (void) setup_problem(EXO_ptr, DPI_ptr); /* * check for parallel errors before continuing */ check_parallel_error("Error encountered in problem setup"); /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ /* * CREATE BRK_FILE IF ONE DOES NOT EXIST * * If no Brk_File exists but the option was configured in the input or * optional command we create one now and exit from goma. */ if ( Brk_Flag == 2 ) { write_brk_file(Brk_File, EXO_ptr); exit(0); } /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ /* * WRITE OUT INITIAL INFO TO EXODUS FILE */ /* * Only have to initialize the exodus file if we are using different * files for the output versus the input mesh */ if (strcmp(ExoFile, ExoFileOut)) { /* * Temporarily we'll need to renumber the nodes and elements in the * mesh to be 1-based. After writing, return to the 0 based indexing * that is more convenient in C. */ #ifdef DEBUG fprintf(stderr, "1-base; wr_mesh; 0-base\n"); #endif one_base(EXO_ptr); wr_mesh_exo(EXO_ptr, ExoFileOut, 0); zero_base(EXO_ptr); /* * If running on a distributed computer, augment the plain finite * element information of EXODUS with the description of how this * piece fits into the global problem. */ if (Num_Proc > 1) { #ifdef PARALLEL #ifdef DEBUG fprintf(stderr, "P_%d at barrier before wr_dpi()\n", ProcID); fprintf(stderr, "P_%d ExoFileOut = \"%s\"\n", ProcID, ExoFileOut); error = MPI_Barrier(MPI_COMM_WORLD); #endif #endif wr_dpi(DPI_ptr, ExoFileOut, 0); } } /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ /* * SOLVE THE PROBLEM */ if (Debug_Flag) { switch (Continuation) { case ALC_ZEROTH: P0PRINTF("%s: continue_problem (zeroth order) ...\n", yo); break; case ALC_FIRST: P0PRINTF("%s: continue_problem (first order) ...\n", yo); break; case HUN_ZEROTH: P0PRINTF("%s: hunt_problem (zeroth order) ...\n", yo); break; case HUN_FIRST: P0PRINTF("%s: hunt_problem (first order) ...\n", yo); break; case LOCA: P0PRINTF("%s: do_loca ...\n", yo); break; default: P0PRINTF("%s: solve_problem...\n", yo); break; } } #ifdef DEBUG switch (Continuation) { case ALC_ZEROTH: DPRINTF(stderr, "%s: continue_problem (zeroth order) ...\n", yo); break; case ALC_FIRST: DPRINTF(stderr, "%s: continue_problem (first order) ...\n", yo); break; case HUN_ZEROTH: DPRINTF(stderr, "%s: hunt_problem (zeroth order) ...\n", yo); break; case HUN_FIRST: DPRINTF(stderr, "%s: hunt_problem (first order) ...\n", yo); break; case LOCA: DPRINTF(stderr, "%s: do_loca ...\n", yo); break; default: DPRINTF(stderr, "%s: solve_problem...\n", yo); break; } #endif if( TimeIntegration == TRANSIENT) { Continuation = ALC_NONE; if (Debug_Flag) { P0PRINTF("%s: solve_problem...TRANSIENT superceded Continuation...\n", yo); } #ifdef DEBUG DPRINTF(stderr, "%s: solve_problem...TRANSIENT superceded Continuation...\n", yo); #endif solve_problem(EXO_ptr, DPI_ptr, NULL); } switch (Continuation) { case ALC_ZEROTH: case ALC_FIRST: log_msg("Solving continuation problem"); continue_problem(cx, EXO_ptr, DPI_ptr); break; case HUN_ZEROTH: case HUN_FIRST: log_msg("Solving hunt problem"); hunt_problem(cx, EXO_ptr, DPI_ptr); break; case LOCA: log_msg("Solving continuation problem with LOCA"); error = do_loca(cx, EXO_ptr, DPI_ptr); break; default: log_msg("Solving problem"); if (loca_in->Cont_Alg == LOCA_LSA_ONLY) { error = do_loca(cx, EXO_ptr, DPI_ptr); } else if(TimeIntegration != TRANSIENT) { solve_problem(EXO_ptr, DPI_ptr, NULL); } break; } #ifdef PARALLEL MPI_Barrier(MPI_COMM_WORLD); #endif if (ProcID == 0 && Brk_Flag == 1 && Num_Proc > 1) { fix_output(); } /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ /* * PRINT A MESSAGE TO STDOUT SAYING WE ARE DONE */ P0PRINTF("\n-done\n\n"); /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ /* * FREE MEMORY ALLOCATED BY THE PROGRAM */ /* * free the element block / element based structures */ free_element_blocks(EXO_ptr); /* * free nodal based structures */ free_nodes(); #ifdef FREE_PROBLEM free_problem ( EXO_ptr, DPI_ptr ); #endif /* * Free command line stuff */ if ( ProcID == 0 ) { if ( argc > 1 ) { for (i=0; i<argc; i++) { #ifdef DEBUG fprintf(stderr, "clc[%d]->string &= 0x%x\n", i, clc[i]->string); fprintf(stderr, "clc[%d] &= 0x%x\n", i, clc[i]); #endif safer_free((void **) &(clc[i]->string)); safer_free((void **) (clc + i)); } safer_free((void **) &clc); } } /* * Free exodus database structures */ free_exo(EXO_ptr); safer_free((void **) &EXO_ptr); if ( Num_Proc > 1 ) { free_dpi(DPI_ptr); } else { free_dpi_uni(DPI_ptr); } safer_free((void **) &DPI_ptr); /* * Remove front scratch file [/tmp/lu.'pid'.0] */ if (Linear_Solver == FRONT) { unlerr = unlink(front_scratch_directory); WH(unlerr, "Unlink problem with front scratch file"); } #ifdef PARALLEL total_time = ( MPI_Wtime() - time_start )/ 60. ; DPRINTF(stderr, "\nProc 0 runtime: %10.2f Minutes.\n\n",total_time); MPI_Finalize(); #endif #ifndef PARALLEL (void)time(&now); total_time = (double)(now) - time_start; fprintf(stderr, "\nProc 0 runtime: %10.2f Minutes.\n\n",total_time/60); #endif fflush(stdout); fflush(stderr); log_msg("GOMA ends normally."); return (0); }
void output_stability_matrices(double *mass_matrix, double *jacobian_matrix, int *ija, int num_total_nodes, int NumUnknowns, int NZeros) { static int fake = 0; int i, j, k, num_B_nonzeros, num_J_nonzeros; FILE *mass_file, *jacobian_file, *vars_file; char LSA_mass_output_filename[256], LSA_jacobian_output_filename[256], LSA_vars_output_filename[256]; /* Get the filenames. These will be different if we're doing a * normal mode analysis for 3D stability of a 2D flow. */ if(LSA_3D_of_2D_wave_number == -1.0) { sprintf(LSA_mass_output_filename, "LSA_mass_coo.out"); sprintf(LSA_jacobian_output_filename, "LSA_jac_coo.out"); sprintf(LSA_vars_output_filename, "LSA_vars.out"); } else { sprintf(LSA_mass_output_filename, "LSA_mass_coo-%g.out", (double)fake); sprintf(LSA_jacobian_output_filename, "LSA_jac_coo-%g.out", (double)fake); sprintf(LSA_vars_output_filename, "LSA_vars-%g.out", (double)fake++); } /* Multiplex output names in parallel */ if (Num_Proc > 1) { multiname(LSA_mass_output_filename, ProcID, Num_Proc); multiname(LSA_jacobian_output_filename, ProcID, Num_Proc); multiname(LSA_vars_output_filename, ProcID, Num_Proc); } printf("Writing matrix files (%s, %s, %s)...", LSA_mass_output_filename, LSA_jacobian_output_filename, LSA_vars_output_filename); fflush(stdout); /* First, count out how many actual nonzeros there are. This will * reduce filesizes substantially (especially for the mass * matrix. */ num_J_nonzeros = 0; num_B_nonzeros = 0; for (i=0; i<NumUnknowns; i++) { if(fabs(jacobian_matrix[i]) > LSA_MATRIX_OUTPUT_TOLERANCE) num_J_nonzeros++; if(fabs(mass_matrix[i]) > LSA_MATRIX_OUTPUT_TOLERANCE) num_B_nonzeros++; for (j=ija[i]; j<ija[i+1]; j++) { if(fabs(jacobian_matrix[j]) > LSA_MATRIX_OUTPUT_TOLERANCE) num_J_nonzeros++; if(fabs(mass_matrix[j]) > LSA_MATRIX_OUTPUT_TOLERANCE) num_B_nonzeros++; } } mass_file = fopen(LSA_mass_output_filename, "w"); if(!mass_file) EH(-1, "Could not open mass matrix output file."); jacobian_file = fopen(LSA_jacobian_output_filename, "w"); if(!jacobian_file) EH(-1, "Could not open jacobian matrix output file."); /* Print out header line containing #rows, #nonzeros. */ fprintf(jacobian_file, "%12d\n%12d\n", num_J_nonzeros, NumUnknowns); fprintf(mass_file, "%12d\n%12d\n", num_B_nonzeros, NumUnknowns); k = 0; for (i=0; i<NumUnknowns; i++) { if(fabs(mass_matrix[i]) > LSA_MATRIX_OUTPUT_TOLERANCE) fprintf(mass_file, " %5d %5d % 17.14e\n", i, i, mass_matrix[i]); if(fabs(jacobian_matrix[i]) > LSA_MATRIX_OUTPUT_TOLERANCE) fprintf(jacobian_file, " %5d %5d % 17.14e\n", i, i, jacobian_matrix[i]); k++; for (j=ija[i]; j<ija[i+1]; j++) { if(fabs(mass_matrix[j]) > LSA_MATRIX_OUTPUT_TOLERANCE) fprintf(mass_file, " %5d %5d % 17.14e\n", i, ija[j], mass_matrix[j]); if(fabs(jacobian_matrix[j]) > LSA_MATRIX_OUTPUT_TOLERANCE) fprintf(jacobian_file, " %5d %5d % 17.14e\n", i, ija[j], jacobian_matrix[j]); k++; } } fclose(mass_file); fclose(jacobian_file); /* MMH Find out which components of the solution vector correspond * to which unknowns. I know there's a better way to do this, but * I just don't know what it is. Oh well... */ vars_file = fopen(LSA_vars_output_filename, "w"); if(!vars_file) EH(-1, "Could not open variable listing output file."); for(i = 0; i < num_total_nodes; i++) { if( (j = Index_Solution(i, VELOCITY1, 0, 0, -1)) > -1 ) fprintf(vars_file, "u %d\n", j); if( (j = Index_Solution(i, VELOCITY2, 0, 0, -1)) > -1 ) fprintf(vars_file, "v %d\n", j); if( (j = Index_Solution(i, VELOCITY3, 0, 0, -1)) > -1 ) fprintf(vars_file, "w %d\n", j); if( (j = Index_Solution(i, MESH_DISPLACEMENT1, 0, 0, -1)) > -1 ) fprintf(vars_file, "x %d\n", j); if( (j = Index_Solution(i, MESH_DISPLACEMENT2, 0, 0, -1)) > -1 ) fprintf(vars_file, "y %d\n", j); if( (j = Index_Solution(i, PRESSURE, 0, 0, -1)) > -1 ) fprintf(vars_file, "p %d\n", j); if( (j = Index_Solution(i, PRESSURE, 0, 1, -1)) > -1 ) fprintf(vars_file, "p %d\n", j); if( (j = Index_Solution(i, PRESSURE, 0, 2, -1)) > -1 ) fprintf(vars_file, "p %d\n", j); } fclose(vars_file); puts("done."); printf("LSA (in) nnz = %d\n (out) nnz = %d\n (out) J_nnz = %d\n (out) B_nnz = %d\n\n", NZeros, k, num_J_nonzeros, num_B_nonzeros); fflush(stdout); }
int goma_init_(dbl *time1, int *nnodes, int *nelems, int *nnv_in, int *nev_in, int *i_soln, int *i_post) /* * Initial main driver for GOMA. Derived from a (1/93) release of * the rf_salsa program by * * Original Authors: John Shadid (1421) * Scott Hutchinson (1421) * Harry Moffat (1421) * * Date: 12/3/92 * * * Updates and Changes by: * Randy Schunk (9111) * P. A. Sackinger (9111) * R. R. Rao (9111) * R. A. Cairncross (Univ. of Delaware) * Dates: 2/93 - 6/96 * * Modified for continuation * Ian Gates * Dates: 2/98 - 10/98 * Dates: 7/99 - 8/99 * * Last modified: Wed June 26 14:21:35 MST 1994 [email protected] * Hello. * * Note: Many modifications from an early 2/93 pre-release * version of rf_salsa were made by various persons * in order to test ideas about moving/deforming meshes... */ { /* Local Declarations */ double time_start, total_time; /* timing variables */ #ifndef PARALLEL struct tm *tm_ptr; /* additional serial timing variables */ time_t the_time; #endif int error; int i; int j; static int first_goma_call=TRUE; char **ptmp; static const char *yo="goma_init"; struct Command_line_command **clc=NULL; /* point to command line structure */ int nclc = 0; /* number of command line commands */ /********************** BEGIN EXECUTION ***************************************/ /* assume number of commands is less than or equal to the number of * arguments in the command line minus 1 (1st is program name) */ /* * Get the name of the executable, yo */ #ifdef PARALLEL if( first_goma_call ) { Argc = 1; Argv = (char **) smalloc( Argc*sizeof(char *) ); Argv[0] = (char *) yo; MPI_Init(&Argc, &Argv); /*PRS will have to fix this. Too late TAB already did. */ } time_start = MPI_Wtime(); #else /* PARALLEL */ (void) time(&the_time); tm_ptr = gmtime(&the_time); time_start = (double) ( tm_ptr->tm_sec + 60. * ( 60. * ( tm_ptr->tm_yday * 24. + tm_ptr->tm_hour ) + tm_ptr->tm_min ) ); #endif /* PARALLEL */ *time1 = time_start; /* Argv = argv; */ /* Argc = argc; */ time_goma_started = time_start; #ifdef PARALLEL /* * Determine the parallel processing status, if any. We need to know * pretty early if we're "one of many" or the only process. */ error = MPI_Comm_size(MPI_COMM_WORLD, &Num_Proc); error = MPI_Comm_rank(MPI_COMM_WORLD, &ProcID); /* * Setup a default Proc_config so we can use utility routines * from Aztec */ AZ_set_proc_config(Proc_Config, MPI_COMM_WORLD); /* set the output limit flag if need be */ if( Num_Proc > DP_PROC_PRINT_LIMIT ) Unlimited_Output = FALSE; #ifdef HAVE_MPE_H error = MPE_Init_log(); #endif /* HAVE_MPE_H */ Dim = 0; /* for any hypercube legacy code... */ #endif /* PARALLEL */ #ifndef PARALLEL Dim = 0; ProcID = 0; Num_Proc = 1; #endif /* PARALLEL */ /* * HKM - Change the ieee exception handling based on the machine and * the level of debugging/speed desired. This call currently causes * core dumps for floating point exceptions. */ handle_ieee(); log_msg("--------------"); log_msg("GOMA begins..."); #ifdef USE_CGM cgm_initialize(); #endif /* * Some initial stuff that only the master process does. */ /*PRS: Disable this command line stuff for the jas coupled version */ /*-----------------------------------------------------------------*/ /* if ( ProcID == 0 ) */ /* { */ /* if (argc > 1) */ /* { */ /* log_msg("Preprocessing command line options."); */ /* clc = (struct Command_line_command **) */ /* smalloc( argc * sizeof(struct Command_line_command *)); */ /* for (i=0; i<argc; i++) */ /* { */ /* clc[i] = (struct Command_line_command *) */ /* smalloc(sizeof(struct Command_line_command)); */ /* clc[i]->type = 0; /\* initialize command line structure *\/ */ /* clc[i]->i_val = 0; */ /* clc[i]->r_val = 0.; */ /* clc[i]->string = (char *) */ /* smalloc(MAX_COMMAND_LINE_LENGTH*sizeof(char)); */ /* for ( j=0; j<MAX_COMMAND_LINE_LENGTH; j++) */ /* { */ /* clc[i]->string[j] = '\0'; */ /* } */ /* #ifdef DEBUG */ /* fprintf(stderr, "clc[%d]->string is at 0x%x\n", i, clc[i]->string); */ /* fprintf(stderr, "clc[%d] is at 0x%x\n", i, clc[i]); */ /* #endif */ /* } */ /* } */ /* PRS For the JAS version we will use the default input file name "input" */ strcpy(Input_File, "input"); /* if (argc > 1) translate_command_line(argc, argv, clc, &nclc); */ /* print_code_version(); */ /* ptmp = legal_notice; */ /* while ( strcmp(*ptmp, LAST_LEGAL_STRING) != 0 ) */ /* { */ /* fprintf(stderr, "%s", *ptmp++); */ /* } */ /* } */ /* * Allocate the uniform problem description structure and * the problem description structures on all processors */ error = pd_alloc(); EH(error, "pd_alloc problem"); #ifdef DEBUG fprintf(stderr, "P_%d at barrier after pd_alloc\n", ProcID); #ifdef PARALLEL error = MPI_Barrier(MPI_COMM_WORLD); #endif #endif log_msg("Allocating mp, gn, ..."); error = mp_alloc(); EH(error, "mp_alloc problem"); error = gn_alloc(); EH(error, "gn_alloc problem"); error = ve_alloc(); EH(error, "ve_alloc problem"); error = elc_alloc(); EH(error, "elc_alloc problem"); error = elc_rs_alloc(); EH(error, "elc_alloc problem"); error = cr_alloc(); EH(error, "cr_alloc problem"); error = evp_alloc(); EH(error, "evp_alloc problem"); error = tran_alloc(); EH(error, "tran_alloc problem"); error = libio_alloc(); EH(error, "libio_alloc problem"); error = eigen_alloc(); EH(error, "eigen_alloc problem"); error = cont_alloc(); EH(error, "cont_alloc problem"); error = loca_alloc(); EH(error, "loca_alloc problem"); error = efv_alloc(); EH(error, "efv_alloc problem"); #ifdef DEBUG fprintf(stderr, "P_%d at barrier before read_input_file()\n", ProcID); #ifdef PARALLEL error = MPI_Barrier(MPI_COMM_WORLD); #endif #endif /*PRS AGAIN, NO COMMAND LINE OVERRIDES IN THIS JAS3D VERSION */ /* * Read ASCII input file, data files, related exodusII FEM databases. */ if ( ProcID == 0 ) { log_msg("Reading input file ..."); read_input_file(clc, nclc); } /* * The user-defined material properties, etc. available to goma users * mean that some dynamically allocated data needs to be communicated. * * To handle this, sizing information from the input file scan is * broadcast in stages so that the other processors can allocate space * accordingly to hold the data. * * Note: instead of handpacking a data structure, use MPI derived datatypes * to gather and scatter. Pray this is done efficiently. Certainly it costs * less from a memory standpoint. */ #ifdef PARALLEL /* * Make sure the input file was successully processed before moving on */ check_parallel_error("Input file error"); /* * This is some sizing information that helps fit a little bit more * onto the ark later on. */ #ifdef DEBUG fprintf(stderr, "P_%d at barrier before noahs_raven()\n", ProcID); error = MPI_Barrier(MPI_COMM_WORLD); #endif noahs_raven(); #ifdef DEBUG fprintf(stderr, "P_%d at barrier before MPI_Bcast of Noahs_Raven\n", ProcID); error = MPI_Barrier(MPI_COMM_WORLD); #endif MPI_Bcast(MPI_BOTTOM, 1, Noahs_Raven->new_type, 0, MPI_COMM_WORLD); #ifdef DEBUG fprintf(stderr, "P_%d at barrier after Bcast/before raven_landing()\n", ProcID); error = MPI_Barrier(MPI_COMM_WORLD); #endif /* * Get the other processors ready to handle ark data. */ raven_landing(); #ifdef DEBUG fprintf(stderr, "P_%d at barrier before noahs_ark()\n", ProcID); error = MPI_Barrier(MPI_COMM_WORLD); #endif /* * This is the main body of communicated information, including some * whose sizes were determined because of advanced legwork by the raven. */ noahs_ark(); MPI_Bcast(MPI_BOTTOM, 1, Noahs_Ark->new_type, 0, MPI_COMM_WORLD); /* * Chemkin was initialized on processor zero during the input file * process. Now, distribute it to all processors */ #ifdef USE_CHEMKIN if (Chemkin_Needed) { chemkin_initialize_mp(); } #endif /* * Once the ark has landed, there are additional things that will need to * be sent by dove. Example: BC_Types[]->u-BC arrays. * */ ark_landing(); noahs_dove(); MPI_Bcast(MPI_BOTTOM, 1, Noahs_Dove->new_type, 0, MPI_COMM_WORLD); #endif /* End of ifdef PARALLEL */ /* * We sent the packed line to all processors that contained geometry * creation commands. Now we need to step through it and create * geometry as we go (including possibly reading an ACIS .sat file). * */ #ifdef USE_CGM create_cgm_geometry(); #endif /* * For parallel execution, assume the following variables will be changed * to reflect the multiple file aspect of the problem. * * FEM file = file.exoII --> file_3of15.exoII * * Output EXODUS II file = out.exoII --> out_3of15.exoII * */ /* * Allocate space for structures holding the EXODUS II finite element * database information and for the Distributed Processing information. * * These are mostly skeletons with pointers that get allocated in the * rd_exoII and rd_dpi routines. Remember to free up those arrays first * before freeing the major pointers. */ EXO_ptr = alloc_struct_1(Exo_DB, 1); init_exo_struct(EXO_ptr); DPI_ptr = alloc_struct_1(Dpi, 1); init_dpi_struct(DPI_ptr); log_msg("Reading mesh from EXODUS II file..."); error = read_mesh_exoII(EXO_ptr, DPI_ptr); /* * Missing files on any processor are detected at a lower level * forcing a return to the higher level * rd_exo --> rd_mesh --> main * Shutdown now, if any of the exodus files weren't found */ if (error < 0) { #ifdef PARALLEL MPI_Finalize(); #endif return(-1); } /* * All of the MPI_Type_commit() calls called behind the scenes that build * the dove, ark and raven really allocated memory. Let's free it up now that * the initial information has been communicated. */ #ifdef PARALLEL MPI_Type_free(&(Noahs_Raven->new_type)); MPI_Type_free(&(Noahs_Ark->new_type)); MPI_Type_free(&(Noahs_Dove->new_type)); #endif /* * Setup the rest of the Problem Description structure that depends on * the mesh that was read in from the EXODUS II file... * * Note that memory allocation and some setup has already been performed * in mm_input()... */ error = setup_pd(); EH( error, "Problem setting up Problem_Description."); /* * Let's check to see if we need the large elasto-plastic global tensors * and allocate them if so */ error = evp_tensor_alloc(EXO_ptr); EH( error, "Problems setting up evp tensors"); /* * Now that we know about what kind of problem we're solving and the * mesh information, let's allocate space for elemental assembly structures * */ #ifdef DEBUG DPRINTF(stderr, "About to assembly_alloc()...\n"); #endif log_msg("Assembly allocation..."); error = assembly_alloc(EXO_ptr); EH( error, "Problem from assembly_alloc"); if (Debug_Flag) { DPRINTF(stderr, "%s: setting up EXODUS II output files...\n", yo); } /* * These are not critical - just niceties. Also, they should not overburden * your db with too much of this - they're capped verbiage compliant routines. */ add_qa_stamp(EXO_ptr); add_info_stamp(EXO_ptr); #ifdef DEBUG fprintf(stderr, "added qa and info stamps\n"); #endif /* * If the output EXODUS II database file is different from the input * file, then we'll need to replicate all the basic mesh information. * But, remember that if we're parallel, that the output file names must * be multiplexed first... */ if ( Num_Proc > 1 ) { multiname(ExoFileOut, ProcID, Num_Proc); multiname(Init_GuessFile, ProcID, Num_Proc); if ( strcmp( Soln_OutFile, "" ) != 0 ) { multiname(Soln_OutFile, ProcID, Num_Proc); } if( strcmp( ExoAuxFile, "" ) != 0 ) { multiname(ExoAuxFile, ProcID, Num_Proc); } if( efv->Num_external_field != 0 ) { for( i=0; i<efv->Num_external_field; i++ ) { multiname(efv->file_nm[i], ProcID, Num_Proc); } } } /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ /* * Preprocess the exodus mesh * -> Allocate pointers to structures containing element * side bc info, First_Elem_Side_BC_Array, and * element edge info, First_Elem_Edge_BC_Array. * -> Determine Unique_Element_Types[] array */ #ifdef DEBUG fprintf(stderr, "pre_process()...\n"); #endif log_msg("Pre processing of mesh..."); #ifdef PARALLEL error = MPI_Barrier(MPI_COMM_WORLD); #endif pre_process(EXO_ptr); /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ /* * Load up a few key indeces in the bfd prototype basis function structures * and make sure that each active eqn/vbl has a bf[v] that points to the * right bfd[]...needs pre_process to find out the number of unique * element types in the problem. */ #ifdef DEBUG fprintf(stderr, "bf_init()...\n"); #endif log_msg("Basis function initialization..."); error = bf_init(EXO_ptr); EH( error, "Problem from bf_init"); /* * check for parallel errors before continuing */ check_parallel_error("Error encountered in problem setup"); /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ /* * Allocate space for each communication exchange description. */ #ifdef PARALLEL #ifdef DEBUG fprintf(stderr, "P_%d: Parallel cx allocation\n", ProcID); #endif if (DPI_ptr->num_neighbors > 0) { cx = alloc_struct_1(Comm_Ex, DPI_ptr->num_neighbors); Request = alloc_struct_1(MPI_Request, Num_Requests * DPI_ptr->num_neighbors); Status = alloc_struct_1(MPI_Status, Num_Requests * DPI_ptr->num_neighbors); } #endif /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ /* * SET UP THE PROBLEM * * Setup node-based structures * Finalise how boundary conditions are to be handled * Determine what unknowns are at each owned node and then tell * neighboring processors about your nodes * Set up communications pattern for fast unknown updates between * processors. */ (void) setup_problem(EXO_ptr, DPI_ptr); /* * check for parallel errors before continuing */ check_parallel_error("Error encountered in problem setup"); /***********************************************************************/ /***********************************************************************/ /***********************************************************************/ /* * WRITE OUT INITIAL INFO TO EXODUS FILE */ /* * Only have to initialize the exodus file if we are using different * files for the output versus the input mesh */ if (strcmp(ExoFile, ExoFileOut)) { /* * Temporarily we'll need to renumber the nodes and elements in the * mesh to be 1-based. After writing, return to the 0 based indexing * that is more convenient in C. */ #ifdef DEBUG fprintf(stderr, "1-base; wr_mesh; 0-base\n"); #endif one_base(EXO_ptr); wr_mesh_exo(EXO_ptr, ExoFileOut, 0); zero_base(EXO_ptr); /* * If running on a distributed computer, augment the plain finite * element information of EXODUS with the description of how this * piece fits into the global problem. */ if (Num_Proc > 1) { #ifdef PARALLEL #ifdef DEBUG fprintf(stderr, "P_%d at barrier before wr_dpi()\n", ProcID); fprintf(stderr, "P_%d ExoFileOut = \"%s\"\n", ProcID, ExoFileOut); error = MPI_Barrier(MPI_COMM_WORLD); #endif #endif wr_dpi(DPI_ptr, ExoFileOut, 0); } } if (Num_Import_NV > 0 || Num_Import_EV > 0) printf (" Goma will import %d nodal and %d element variables.\n", Num_Import_NV, Num_Import_EV); if (Num_Export_XS > 0 || Num_Export_XP > 0) printf (" Goma will export %d solution and %d post-processing variables.\n", Num_Export_XS, Num_Export_XP); /* Return counts to calling program */ *nnodes = EXO_ptr->num_nodes; *nelems = EXO_ptr->num_elems; *nnv_in = Num_Import_NV; *nev_in = Num_Import_EV; *i_soln = Num_Export_XS; *i_post = Num_Export_XP; return (0); /* Back to animas*/ }