/** * * @brief Program entry point * * @param argc The argument counter * * @param argv The argument vector * * @return EXIT_SUCCESS on success, EXIT_ERROR in case of an error * */ int main(int argc, char** argv) { /* Define a subroutine where the program jumps to if it exits gracefully */ (void) atexit (cleanup); /* Input / Output Character Count */ int in_ccount, out_ccount; /* Save the name of the program */ pgm_name = argv[0]; /* Program is called with arguments */ if (argc > 1) { for (int i = 1; i < argc; i++) { char* out_name = malloc(sizeof(char)* (strlen(argv[i])+5)); //Allocate memory for the name of the output string (void) open_stream (argv[i], out_name); (void) compress (&in_ccount, &out_ccount); (void) output_summary (argv[i], out_name, in_ccount, out_ccount); free(out_name); } } else { char* out_name = malloc(sizeof(char)* (strlen("stdin.txt")+5)); //Allocate memory for the name of the output string (void) open_stream ("stdin", out_name ); (void) compress (&in_ccount, &out_ccount); (void) output_summary ("stdin", out_name, in_ccount, out_ccount); free(out_name); } exit (EXIT_SUCCESS); }
void video_export_destroy(struct video_export *s) { if(s) { // poison struct output_entry *entry = calloc(sizeof(struct output_entry), 1); pthread_mutex_lock(&s->lock); { if(s->head) { s->tail->next = entry; s->tail = entry; } else { s->head = s->tail = entry; } } pthread_mutex_unlock(&s->lock); platform_sem_post(&s->semaphore); pthread_join(s->thread_id, NULL); pthread_mutex_destroy(&s->lock); // write summary if(s->total > 0) { output_summary(s); } free(s); } }
static RETSIGTYPE sigusr2_handler(UNUSED(int val)) { if (!am_server) output_summary(); close_all(); if (log_got_error) _exit(RERR_PARTIAL); _exit(0); }
int main(int argc, char *argv[]) { int i;//-- counters HashTable* BT_Node_Ptr; HashTable* BT_Elem_Ptr; //-- MPI int myid, master, numprocs; int namelen; char processor_name[MPI_MAX_PROCESSOR_NAME]; MPI_Status status; MPI_Init(&argc,&argv); //PetscInitializeNoArguments(); PetscInitialize(&argc,&argv,PETSC_NULL,PETSC_NULL); MPI_Comm_size(MPI_COMM_WORLD, &numprocs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); MPI_Get_processor_name(processor_name, &namelen); char debugfilename[256]; sprintf(debugfilename,"hpfem%04d.debug",myid); double start, end; start = MPI_Wtime(); /* create new MPI datastructures for class objects */ MPI_New_Datatype(); char filename[50]; sprintf(filename,"debug.main.%04d",myid); /* read original data from serial preprocessing code and then initialize element stiffness routines info */ int material_count=0, output_flag; double epsilon = 1., intfrictang = 1, *bedfrictang = NULL, gamma = 1; double frict_tiny = 0.1, mu = 1.0e-03, rho = 1600, rhof = 1000, porosity = 1; char **matnames=NULL; int xdmerr; StatProps statprops; MatProps matprops(material_count, matnames, intfrictang, bedfrictang, porosity, mu, rho, rhof, epsilon, gamma, frict_tiny, 1.0, 1.0, 1.0); TimeProps timeprops; timeprops.starttime=time(NULL); MapNames mapnames; PileProps pileprops; FluxProps fluxprops; OutLine outline; DISCHARGE discharge; int adaptflag; double end_time = 10000.0; /* * viz_flag is used to determine which viz output to use * nonzero 1st bit of viz_flag means output tecplotxxxx.plt * nonzero 2nd bit of viz_flag means output mshplotxxxx.plt * nonzero 4th bit of viz_flag means output xdmfxxxxx.xmf * nonzero 5th bit of viz_flag means output grass_sites files order_flag == 1 means use first order method order_flag == 2 means use second order method */ int viz_flag = 0, order_flag, savefileflag=1; //savefileflag will be flipped so first savefile will end in 0 int Init_Node_Num, Init_Elem_Num, srctype; double v_star; // v/v_slump double nz_star; /* temporary... used for negligible velocity as stopping criteria paper... plan to include in v_star implicitly later */ Read_data(myid, &matprops, &pileprops, &statprops, &timeprops, &fluxprops, &adaptflag, &viz_flag, &order_flag, &mapnames, &discharge, &outline, &srctype ); if(!loadrun(myid, numprocs, &BT_Node_Ptr, &BT_Elem_Ptr, &matprops, &timeprops, &mapnames, &adaptflag, &order_flag, &statprops, &discharge, &outline)) { Read_grid(myid, numprocs, &BT_Node_Ptr, &BT_Elem_Ptr, &matprops, &outline); setup_geoflow(BT_Elem_Ptr, BT_Node_Ptr, myid, numprocs, &matprops,&timeprops); move_data(numprocs, myid, BT_Elem_Ptr, BT_Node_Ptr,&timeprops); AssertMeshErrorFree(BT_Elem_Ptr,BT_Node_Ptr,numprocs,myid,-1.0); //initialize pile height and if appropriate perform initial adaptation init_piles(BT_Elem_Ptr, BT_Node_Ptr, myid, numprocs, adaptflag, &matprops, &timeprops, &mapnames, &pileprops, &fluxprops, &statprops); } if (myid==0) { for(int imat=1; imat<=matprops.material_count; imat++) printf("bed friction angle for \"%s\" is %g\n",matprops.matnames[imat], matprops.bedfrict[imat]*180.0/PI); printf("internal friction angle is %g, epsilon is %g \n method order = %i\n", matprops.intfrict*180.0/PI, matprops.epsilon, order_flag ); printf("REFINE_LEVEL=%d\n",REFINE_LEVEL); } //printdate(BT_Elem_Ptr, BT_Node_Ptr,&matprops, &fluxprops,&timeprops); MPI_Barrier(MPI_COMM_WORLD); calc_stats(BT_Elem_Ptr, BT_Node_Ptr, myid, &matprops, &timeprops, &statprops, &discharge, 0.0); output_discharge(&matprops, &timeprops, &discharge, myid); move_data(numprocs, myid, BT_Elem_Ptr, BT_Node_Ptr,&timeprops); // int jiter; // for( jiter=0; jiter <3; jiter++){ // LaplacianData Laplacian (BT_Elem_Ptr, BT_Node_Ptr ,1 ,.001); // implicit_solver(&Laplacian); // } // HashEntryPtr *buck = BT_Elem_Ptr->getbucketptr(); // for(jiter=0; jiter<BT_Elem_Ptr->get_no_of_buckets(); jiter++){ // if(*(buck+jiter)) // { // HashEntryPtr currentPtr = *(buck+jiter); // while(currentPtr) // { // Element* Curr_El=(Element*)(currentPtr->value); // if(Curr_El->get_adapted_flag()>0)//||(timeprops_ptr->iter%5==2))) // { //if this is a refined element don't involve!!! // double phi = *(Curr_El->get_state_vars()+4); // if(phi>1) phi=1; // if(phi<0) phi=0; // Curr_El->update_phase2(phi); // Curr_El->update_phase1(phi); // } // currentPtr=currentPtr->next; // } // } // } //============================================================================================================= // int num_buck=BT_Elem_Ptr->get_no_of_buckets(); // HashEntryPtr* buck = BT_Elem_Ptr->getbucketptr(); // int num_nonzero_elem=0, *all_num_nonzero_elem; // for(i=0; i<num_buck; i++) // if(*(buck+i)){ // HashEntryPtr currentPtr = *(buck+i); // while(currentPtr){ // Element* Curr_El=(Element*)(currentPtr->value); // if((Curr_El->get_adapted_flag()>0)&& // (myid==Curr_El->get_myprocess())) // if(*(Curr_El->pass_key())==3842346279 && *(Curr_El->pass_key()+1)==2368179492) // { // int xp=Curr_El->get_positive_x_side(); // int yp=(xp+1)%4, xm=(xp+2)%4, ym=(xp+3)%4; // Node* nym = (Node*) BT_Node_Ptr->lookup(Curr_El->getNode()+(ym+4)*2); // assert(nym->flux[1]); // } // currentPtr=currentPtr->next; // } // } //===================================================================================================== if(myid==0) output_summary(&timeprops, &statprops, savefileflag); if(viz_flag&1) tecplotter(BT_Elem_Ptr, BT_Node_Ptr, &matprops, &timeprops, &mapnames, statprops.vstar ); if(viz_flag&2) meshplotter(BT_Elem_Ptr, BT_Node_Ptr, &matprops, &timeprops, &mapnames, statprops.vstar); //printdate(BT_Elem_Ptr, BT_Node_Ptr,&matprops, &fluxprops,&timeprops); #ifdef HAVE_HDF5 if(viz_flag&8) xdmerr=write_xdmf(BT_Elem_Ptr,BT_Node_Ptr,&timeprops,&matprops,&mapnames,XDMF_NEW); #endif if(viz_flag&16){ if(myid==0) grass_sites_header_output(&timeprops); grass_sites_proc_output(BT_Elem_Ptr, BT_Node_Ptr, myid, &matprops, &timeprops);} /* cccccccccccccccccccccccccccccccccccccccccccccccccccccccccc cccccccccccccccccccccccccccccccccccccccccccccccccccccccccc Time Stepping Loop cccccccccccccccccccccccccccccccccccccccccccccccccccccccccc cccccccccccccccccccccccccccccccccccccccccccccccccccccccccc */ long element_counter=0; // for performance count elements/timestep/proc int ifstop=0; double max_momentum=100; //nondimensional /* ifend(0.5*statprops.vmean) is a hack, the original intent (when we were intending to use vstar as a stopping criteria) whas to have the calculation when vstar dropped back down below 1, instead we're using the ifend() function to stop the simulation when the volume averaged velocity falls back down below 2 meters... this hack is only for the colima hazard map runs, otherwise pass ifend() a constant valued */ while(!(timeprops.ifend(0)) && !ifstop)//(timeprops.ifend(0.5*statprops.vmean)) && !ifstop) { /* * mesh adaption routines */ double TARGET = .05; //double UNREFINE_TARGET = .005; double UNREFINE_TARGET = .01; int h_count = 0; if (timeprops.iter < 50) matprops.frict_tiny=0.1; else matprops.frict_tiny=0.000000001; //check for changes in topography and update if necessary //may want to put an "if(timeprops.iter %20==0)" (20 is arbitrary) here if(timeprops.iter==200){ update_topo(BT_Elem_Ptr, BT_Node_Ptr, myid, numprocs, &matprops, &timeprops,&mapnames); } if((adaptflag!=0)&&(timeprops.iter%5==4)) { AssertMeshErrorFree(BT_Elem_Ptr,BT_Node_Ptr,numprocs,myid,-2.0); H_adapt(BT_Elem_Ptr, BT_Node_Ptr, h_count, TARGET, &matprops, &fluxprops, &timeprops, 5); move_data(numprocs, myid, BT_Elem_Ptr, BT_Node_Ptr,&timeprops); unrefine(BT_Elem_Ptr, BT_Node_Ptr, UNREFINE_TARGET, myid, numprocs, &timeprops, &matprops); MPI_Barrier(MPI_COMM_WORLD);//for debug move_data(numprocs, myid, BT_Elem_Ptr, BT_Node_Ptr,&timeprops); //this move_data() here for debug... to make AssertMeshErrorFree() Work if((numprocs>1)&&(timeprops.iter%10==9)) { repartition2(BT_Elem_Ptr, BT_Node_Ptr, &timeprops); move_data(numprocs, myid, BT_Elem_Ptr, BT_Node_Ptr,&timeprops); //this move_data() here for debug... to make AssertMeshErrorFree() Work } move_data(numprocs, myid, BT_Elem_Ptr, BT_Node_Ptr,&timeprops); } step(BT_Elem_Ptr, BT_Node_Ptr, myid, numprocs, &matprops, &timeprops, &pileprops, &fluxprops, &statprops, &order_flag, &outline, &discharge,adaptflag); /* * output results to file */ if(timeprops.ifoutput()) { // if (timeprops.iter%30==1){//(timeprops.iter<1000 && timeprops.iter%60==58) //output_flag=1; //else if ( timeprops.iter%200==198) //output_flag=1; // if(output_flag) // { move_data(numprocs, myid, BT_Elem_Ptr, BT_Node_Ptr,&timeprops); output_discharge(&matprops, &timeprops, &discharge, myid); //output_flag=0; if(myid==0){ output_summary(&timeprops, &statprops, savefileflag); } if(viz_flag&1) tecplotter(BT_Elem_Ptr, BT_Node_Ptr, &matprops, &timeprops, &mapnames,statprops.vstar); if(viz_flag&2) meshplotter(BT_Elem_Ptr, BT_Node_Ptr, &matprops, &timeprops, &mapnames,statprops.vstar); #ifdef HAVE_HDF5 if(viz_flag&8) xdmerr=write_xdmf(BT_Elem_Ptr,BT_Node_Ptr,&timeprops,&matprops,&mapnames,XDMF_OLD); #endif if(viz_flag&16){ if(myid==0) grass_sites_header_output(&timeprops); grass_sites_proc_output(BT_Elem_Ptr, BT_Node_Ptr, myid, &matprops, &timeprops); } } #ifdef PERFTEST int countedvalue=timeprops.iter%2+1; int e_buckets=BT_Elem_Ptr->get_no_of_buckets(); HashEntry* entryp; for(i=0; i<e_buckets; i++) { entryp = *(BT_Elem_Ptr->getbucketptr() + i); while(entryp) { Element * EmTemp = (Element*)entryp->value; assert(EmTemp); assert(EmTemp->get_counted()!=countedvalue); if((EmTemp->get_adapted_flag()>=NOTRECADAPTED)&& (EmTemp->get_adapted_flag()<=BUFFER) ) { //if this element doesn't belong on this processor don't involve element_counter++; EmTemp->put_counted(countedvalue); } entryp = entryp->next; } } MPI_Barrier(MPI_COMM_WORLD); #endif } MPI_Barrier(MPI_COMM_WORLD); move_data(numprocs, myid, BT_Elem_Ptr, BT_Node_Ptr,&timeprops); MPI_Barrier(MPI_COMM_WORLD); output_discharge(&matprops, &timeprops, &discharge, myid); MPI_Barrier(MPI_COMM_WORLD); if(myid==0) output_summary(&timeprops, &statprops, savefileflag); //printf("hpfem.C 1: xcen=%g\n",statprops.xcen); if(viz_flag&1) tecplotter(BT_Elem_Ptr, BT_Node_Ptr, &matprops, &timeprops, &mapnames, statprops.vstar); //printf("hpfem.C 2: xcen=%g\n",statprops.xcen); MPI_Barrier(MPI_COMM_WORLD); if(viz_flag&2) meshplotter(BT_Elem_Ptr, BT_Node_Ptr, &matprops, &timeprops, &mapnames, statprops.vstar); MPI_Barrier(MPI_COMM_WORLD); #ifdef HAVE_HDF5 if(viz_flag&8) xdmerr=write_xdmf(BT_Elem_Ptr,BT_Node_Ptr,&timeprops,&matprops,&mapnames,XDMF_CLOSE); MPI_Barrier(MPI_COMM_WORLD); #endif if(viz_flag&16){ if(myid==0) grass_sites_header_output(&timeprops); grass_sites_proc_output(BT_Elem_Ptr, BT_Node_Ptr, myid, &matprops, &timeprops);} MPI_Barrier(MPI_COMM_WORLD); // write out ending warning, maybe flow hasn't finished moving sim_end_warning(BT_Elem_Ptr, &matprops, &timeprops, statprops.vstar); MPI_Barrier(MPI_COMM_WORLD); //write out the final pile statistics (and run time) if(myid==0) out_final_stats(&timeprops, &statprops); MPI_Barrier(MPI_COMM_WORLD); //write out stochastic simulation statistics //if(statprops.lhs.runid>=0) if(myid==0) output_stoch_stats(&matprops, &statprops); MPI_Barrier(MPI_COMM_WORLD); //saverun(&BT_Node_Ptr, myid, numprocs, &BT_Elem_Ptr, &matprops, &timeprops, &mapnames, // adaptflag, order_flag, &statprops, &discharge, &outline, &savefileflag); Was not possible because the saverun function doesn't write the information for laplacian, moreover element constructor requires some modifications MPI_Barrier(MPI_COMM_WORLD); //output maximum flow depth a.k.a. flow outline OutLine outline2; double dxy[2]; dxy[0]=outline.dx; dxy[1]=outline.dy; outline2.init2(dxy,outline.xminmax,outline.yminmax); int NxNyout=outline.Nx*outline.Ny; MPI_Reduce(*(outline.pileheight),*(outline2.pileheight),NxNyout, MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD); if(myid==0) outline2.output(&matprops,&statprops); #ifdef PERFTEST long m = element_counter, ii; MPI_Allreduce ( &element_counter, &ii, 1, MPI_LONG, MPI_SUM, MPI_COMM_WORLD ); end=MPI_Wtime(); char perffilename[256]; sprintf(perffilename,"perform%04d.%04d",numprocs,myid); FILE *fpperf=fopen(perffilename,"w"); fprintf(fpperf,"%d Finished -- used %ld elements of %ld total in %e seconds, %e\n",myid,m,ii,end-start, ii/(end-start)); fclose(fpperf); #endif PetscFinalize(); MPI_Finalize(); return(0); }
/* * This is called once the connection has been negotiated. It is used * for rsyncd, remote-shell, and local connections. */ int client_run(int f_in, int f_out, pid_t pid, int argc, char *argv[]) { struct file_list *flist = NULL; int status = 0, status2 = 0; char *local_name = NULL; cleanup_child_pid = pid; if (!read_batch) { set_nonblocking(f_in); set_nonblocking(f_out); } io_set_sock_fds(f_in, f_out); setup_protocol(f_out,f_in); if (protocol_version >= 23 && !read_batch) io_start_multiplex_in(); /* We set our stderr file handle to blocking because ssh might have * set it to non-blocking. This can be particularly troublesome if * stderr is a clone of stdout, because ssh would have set our stdout * to non-blocking at the same time (which can easily cause us to lose * output from our print statements). This kluge shouldn't cause ssh * any problems for how we use it. Note also that we delayed setting * this until after the above protocol setup so that we know for sure * that ssh is done twiddling its file descriptors. */ set_blocking(STDERR_FILENO); if (am_sender) { keep_dirlinks = 0; /* Must be disabled on the sender. */ io_start_buffering_out(); if (!filesfrom_host) set_msg_fd_in(f_in); send_filter_list(f_out); if (filesfrom_host) filesfrom_fd = f_in; if (write_batch && !am_server) start_write_batch(f_out); flist = send_file_list(f_out, argc, argv); set_msg_fd_in(-1); if (verbose > 3) rprintf(FINFO,"file list sent\n"); the_file_list = flist; io_flush(NORMAL_FLUSH); send_files(flist,f_out,f_in); io_flush(FULL_FLUSH); handle_stats(-1); if (protocol_version >= 24) read_final_goodbye(f_in, f_out); if (pid != -1) { if (verbose > 3) rprintf(FINFO,"client_run waiting on %d\n", (int) pid); io_flush(FULL_FLUSH); wait_process(pid, &status); } output_summary(); io_flush(FULL_FLUSH); exit_cleanup(status); } if (need_messages_from_generator && !read_batch) io_start_multiplex_out(); if (argc == 0) list_only |= 1; send_filter_list(read_batch ? -1 : f_out); if (filesfrom_fd >= 0) { io_set_filesfrom_fds(filesfrom_fd, f_out); filesfrom_fd = -1; } if (write_batch && !am_server) start_write_batch(f_in); flist = recv_file_list(f_in); the_file_list = flist; if (flist && flist->count > 0) { local_name = get_local_name(flist, argv[0]); status2 = do_recv(f_in, f_out, flist, local_name); } else { handle_stats(-1); output_summary(); } if (pid != -1) { if (verbose > 3) rprintf(FINFO,"client_run2 waiting on %d\n", (int) pid); io_flush(FULL_FLUSH); wait_process(pid, &status); } return MAX(status, status2); }