t_tetrimino *init_main(char **av, char **env, t_config *config, t_tetrimino *tetri) { if (env[0] == NULL) return (NULL); srand(time(NULL)); tetri = NULL; config->error = 0; if (load_tetriminos(&tetri) == -1) config->error = 1; if (init_config(config) == -1) return (NULL); if (user_config(av, config) == -1) { write(2, "Error: bad parameter\n", 21); return (NULL); } if (init_sequences(config, env) == -1 || get_highscore(config) == -1) return (NULL); debug_part(config, &tetri); if (config->error == 1) return (NULL); prep_screen(); clean_list(&tetri); config->form = choose_tetrimino(tetri); config->next = choose_tetrimino(tetri); config->pos[0] = (config->width / 2) - (config->form->width / 2); display_game(config, 1); return (tetri); }
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // loader_thread // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ void* loader_thread(void* t) { // Load sequences init_sequences(); //exit thread pthread_exit(NULL); }
int read_file_to_sequences(char *fname) { FILE *pr; pr = fopen(fname, "r"); if (pr == NULL){ printf("%s cannot be opened\n", fname); return -1; } if (sequences == NULL) { if (NULL == (sequences = init_sequences( ))) return -1; } if ( read_multiple_embl_format_sequence ( pr ) != 0) return -1; return 0; }
int main(int argc, char *argv[]) { struct tms usage; FILE *finp; int i,j, ticks; int numinfirst; char chkfile[255]; i=0; dump_file=NULL; do_cluster=do_pairwise_cluster; srandom(563573); bzero(&prog_opts,sizeof(ProgOptionsType)); outf=stdout; // set default distance function dist = d2; distpair= d2pair; #ifdef MPI MPI_Init(&argc, &argv); MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN); MPI_Comm_size(MPI_COMM_WORLD, &numprocs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); #endif if(myid==0) { // Master process_options(argc, argv); } else { process_slave_options(argc, argv); } if (prog_opts.show_version || (argc==1)) { if (myid==0) printf("Version \n%s\n",version); #ifdef MPI MPI_Finalize(); #endif exit(0); } // Allocate space for the RC table for big words rc_big = calloc(BIG_WORD_TSIZE, sizeof(SeqElt)); // work is an array of work blocks. If non-parallel, there'll only // be one. work[0] acts a template work = (WorkPtr) calloc(num_threads,sizeof(WorkBlock)); work->filename = argv[optind]; work->index = NULL; if(prog_opts.do_dump) dump_file = fopen(prog_opts.dname,"w"); #ifdef MPI if (numprocs > 1) if (myid>0) { // slaves if (prog_opts.split) { MPI_Finalize(); return 0; } handleMPISlaveSetup(&num_seqs); initialise(work, prog_opts.edfile); internalTest(); perform_clustering(work); transmitMPISlaveResponse(work); if (prog_opts.show_perf) show_performance(outf); MPI_Finalize(); exit(0); } #else if (numprocs > 1) { printf("This version of wcd is not compiled with MPI\n"); printf("You cannot run it with a multiple processes\n"); printf("Either only run it with one process or do a \n"); printf(" ./configure --enable-mpi\n"); printf(" make clean\n"); printf(" make \n"); exit(5); } #endif // work out number of sequences // if the user has specified a value for num_seqs then // use that, else use the number of sequences in the file num_seqs = count_seqs(argv[optind], &data_size)+reindex_value; seq = (SeqPtr *) calloc(num_seqs,sizeof(SeqPtr)); seqInfo = (SeqInfoPtr) calloc(num_seqs,sizeof(SeqInfoStruct)); tree= (UnionFindPtr) calloc(num_seqs,sizeof(UnionFindStruct)); data= (SeqPtr) calloc(data_size,sizeof(SeqElt)); init_dummy_sequences(); #ifndef AUXINFO seqID = (SeqIDPtr) calloc(num_seqs,sizeof(SeqIDStruct)); #endif if (seq == NULL) { perror("SeqStruct allocation"); exit(50); } numinfirst = global_i_end = num_seqs; global_j_beg = 0; // if merging, need to check the other file too if (prog_opts.domerge || prog_opts.doadd ) { global_j_beg = global_i_end; num_seqs = handleMerge(argv[optind+2], num_seqs); if (prog_opts.doadd) global_i_end = num_seqs; } initialise(work, prog_opts.edfile); if (data == NULL) { sprintf(chkfile,"Main data store (%d bytes)",data_size); perror(chkfile); exit(51); } for(i=0; i<num_seqs; i++) seqInfo[i].flag=0; // reopen sequence file for reading finp = fopen(argv[optind],"r"); if (finp == NULL) { perror(argv[optind]); exit(51); } // Some messy stuff to hande auxiliary options // Skip to next comment on first reading if (prog_opts.pairwise==1) { sscanf(argv[optind+1], "%d", &i); sscanf(argv[optind+2], "%d", &j); show_pairwise(finp,i,j); return 0; } if (prog_opts.statgen) { compared2nummatches(finp,prog_opts.statgen); return 0; } if (prog_opts.range) { sscanf(argv[optind+1], "%d", &global_i_beg); sscanf(argv[optind+2], "%d", &global_i_end); } if (prog_opts.show_comp==41) { char * fname; fname = malloc(255); sscanf(argv[optind+1], "%s", fname); read_sequences(finp,reindex_value,num_seqs); checkfile = fopen(fname,"r"); sscanf(argv[optind+2], "%d", &j); while (fscanf(checkfile,"%d", &i) != -1) { do_compare(finp,i,j,1); } return 0; } if (prog_opts.show_comp) { sscanf(argv[optind+1], "%d", &i); sscanf(argv[optind+2], "%d", &j); //printf("Comparing %d and %d of %d flag %d\n",i,j,num_seqs,prog_opts.flag); read_sequences(finp,reindex_value,num_seqs); do_compare(finp,i,j,prog_opts.flag); return 0; } if (prog_opts.show_index) { show_sequence(finp, prog_opts.index,prog_opts.flag); return 0; } // Now read in the sequences if (do_cluster == do_pairwise_cluster||do_cluster==do_MPImaster_cluster||do_cluster == do_suffix_cluster) read_sequences(finp,reindex_value,numinfirst); else init_sequences(finp,reindex_value,numinfirst); fclose(finp); //printf("%d Allocated %d, start=%d, last=%d\n",num_seqs,data_size,data,seq[num_seqs-1].seq); if (prog_opts.split) { process_split(prog_opts.clfname1, prog_opts.split); #ifdef MPI MPI_Finalize(); #endif return 0; } if (prog_opts.consfname1) process_constraints(prog_opts.consfname1,0); if (prog_opts.clustercomp) { cluster_compare(argv[optind+1]); return 0; } // If merging or adding need to open the second sequence file if (prog_opts.domerge || prog_opts.doadd) { finp = fopen(argv[optind+2], "r"); if (finp == NULL) { perror(argv[optind]); exit(1); } if (do_cluster == do_pairwise_cluster) read_sequences(finp,numinfirst+reindex_value,num_seqs); else init_sequences(finp,numinfirst+reindex_value,num_seqs); get_clustering(argv[optind+1],0); if (prog_opts.domerge) get_clustering(argv[optind+3],numinfirst); } if (prog_opts.init_cluster) get_clustering(prog_opts.clfname1, 0); if (prog_opts.recluster) reclustering(work,prog_opts.clfname2); else { // This really assumes there is only one thread for suffix if (prog_opts.pairwise==2) { matrix_compare(finp); return 0; } work->workflag = prog_opts.noninterleavednlc;//kludge for suffixarray global_j_end = num_seqs; perform_clustering(work); #ifdef MPI if (myid>0) transmitMPISlaveResponse(work); #endif } if (prog_opts.show_ext) show_EXT(outf); if (prog_opts.show_histo) show_histogram(work); if (prog_opts.show_clust&1) show_clusters(outf); if (prog_opts.show_clust&8) produce_clusters(prog_opts.clthresh,prog_opts.dirname); if (prog_opts.show_perf) show_performance(outf); if (prog_opts.do_dump) { strcpy(chkfile,prog_opts.dname); strcat(chkfile,"-FIN"); fclose(dump_file); dump_file = fopen(chkfile,"w"); times(&usage); ticks = sysconf(_SC_CLK_TCK); fprintf(dump_file,"Completed %ld %ld", usage.tms_utime/ticks, usage.tms_stime*1000/ticks); fclose(dump_file); } if (prog_opts.show_version) fprintf(outf,"\n%s\n",version); fclose(outf); #ifdef MPI MPI_Finalize(); #endif exit(0); }