GRAPH *create_graph(void){ GRAPH *temp=malloc(1*sizeof(GRAPH)); if(temp==NULL){ perror("MALLOC GRAPH"); exit(1); } temp->size=get_size_of_vertex(); temp->labels=create_points(temp); temp->edges=alloc_edges(temp); add_edges(temp); view_edges(temp); return temp; }
// ----------------------------------------------------------------------------- // Draw all segments, ignoring connectivity. // A segment is drawn if its segnum != -1. void draw_mine_all(segment *sp, int automap_flag) { int s; int i; // clear visited list for (i=0; i<=Highest_segment_index; i++) Been_visited[i] = 0; edge_list_size = min(Num_vertices*4,MAX_EDGES); //make maybe smaller than max // clear edge list for (i=0; i<edge_list_size; i++) { edge_list[i].type = ET_EMPTY; edge_list[i].face_count = 0; edge_list[i].backface_count = 0; } n_used = 0; for (s=0; s<=Highest_segment_index; s++) if (sp[s].segnum != -1) { for (i=0; i<MAX_SIDES_PER_SEGMENT; i++) if (sp[s].sides[i].wall_num != -1) draw_special_wall(&sp[s], i); if (Search_mode) check_segment(&sp[s]); else { add_edges(&sp[s]); draw_seg_objects(&sp[s]); } } draw_mine_edges(automap_flag); }
InteractionGraph get_interaction_graph(ScoringFunctionAdaptor rsi, const ParticlesTemp& ps) { if (ps.empty()) return InteractionGraph(); InteractionGraph ret(ps.size()); Restraints rs= IMP::create_decomposition(rsi->create_restraints()); //Model *m= ps[0]->get_model(); IMP::base::map<ModelObject*, int> map; InteractionGraphVertexName pm= boost::get(boost::vertex_name, ret); DependencyGraph dg = get_dependency_graph(ps[0]->get_model()); DependencyGraphVertexIndex index= IMP::get_vertex_index(dg); /*IMP_IF_LOG(VERBOSE) { IMP_LOG_VERBOSE( "dependency graph is \n"); IMP::internal::show_as_graphviz(dg, std::cout); }*/ for (unsigned int i=0; i< ps.size(); ++i) { ParticlesTemp t= get_dependent_particles(ps[i], ParticlesTemp(ps.begin(), ps.end()), dg, index); for (unsigned int j=0; j< t.size(); ++j) { IMP_USAGE_CHECK(map.find(t[j]) == map.end(), "Currently particles which depend on more " << "than one particle " << "from the input set are not supported." << " Particle \"" << t[j]->get_name() << "\" depends on \"" << ps[i]->get_name() << "\" and \"" << ps[map.find(t[j])->second]->get_name() << "\""); map[t[j]]= i; } IMP_IF_LOG(VERBOSE) { IMP_LOG_VERBOSE( "Particle \"" << ps[i]->get_name() << "\" controls "); for (unsigned int i=0; i< t.size(); ++i) { IMP_LOG_VERBOSE( "\""<< t[i]->get_name() << "\" "); } IMP_LOG_VERBOSE( std::endl); } pm[i]= ps[i]; } IMP::Restraints all_rs= IMP::get_restraints(rs); for (Restraints::const_iterator it= all_rs.begin(); it != all_rs.end(); ++it) { ModelObjectsTemp pl= (*it)->get_inputs(); add_edges(ps, pl, map, *it, ret); } /* Make sure that composite score states (eg the normalizer for rigid body rotations) don't induce interactions among unconnected particles.*/ ScoreStatesTemp ss= get_required_score_states(rs, dg, index); for (ScoreStatesTemp::const_iterator it= ss.begin(); it != ss.end(); ++it) { ModelObjectsTemps interactions=(*it)->get_interactions(); for (unsigned int i=0; i< interactions.size(); ++i) { add_edges(ps, interactions[i], map, *it, ret); } } IMP_INTERNAL_CHECK(boost::num_vertices(ret) == ps.size(), "Wrong number of vertices " << boost::num_vertices(ret) << " vs " << ps.size()); return ret; }
int maxcut_haplotyping(char* fragmentfile,char* variantfile,int snps,char* outputfile,int maxiter_hapcut) { // IMP NOTE: all SNPs start from 1 instead of 0 and all offsets are 1+ fprintf(stderr,"calling MAXCUT based haplotype assembly algorithm\n"); int fragments=0,iter=0,components=0; int i=0,j=0,k=0,t=0,component; int* slist; int flag =0; float bestscore_mec = 0,calls=0, miscalls=0,ll = 0; char buffer[MAXBUF]; /****************************** READ FRAGMENT MATRIX*************************************************/ struct fragment* Flist; FILE* ff = fopen(fragmentfile,"r"); if (ff == NULL) { fprintf(stderr,"couldn't open fragment file %s\n",fragmentfile); exit(0);} fragments =0; while ( fgets(buffer,MAXBUF,ff) != NULL) fragments++; fclose(ff); Flist = (struct fragment*)malloc(sizeof(struct fragment)*fragments); flag = read_fragment_matrix(fragmentfile,Flist,fragments); if (flag < 0) { fprintf(stderr,"unable to read fragment matrix file %s \n",fragmentfile); return -1; } if (VCFformat ==0) snps = count_variants(variantfile); else snps = count_variants_vcf(variantfile); if (snps < 0) { fprintf(stderr,"unable to read variant file %s \n",variantfile); return -1; } fprintf(stderr,"processed fragment file and variant file: fragments %d variants %d\n",fragments,snps); /****************************** READ FRAGMENT MATRIX*************************************************/ struct SNPfrags* snpfrag = (struct SNPfrags*)malloc(sizeof(struct SNPfrags)*snps); update_snpfrags(Flist,fragments,snpfrag,snps,&components); double MEM_ALLOC = 0; for (i=0;i<snps;i++) MEM_ALLOC += snpfrag[i].edges*0.002; MEM_ALLOC *= 0.016; fprintf(stderr,"%f MB memory needs to be allocated for graph edges\n",MEM_ALLOC); // size of struct edge is 16/1000 bytes if (MEM_ALLOC >= MAX_MEMORY) { fprintf(stderr,"\nstoring the HAPCUT graph structure requires more than %d MB of memory:\n 1. increase the maximum memory available using option \"--maxmem 12000\" where the memory is specified in megabytes OR \n 2. run the program with the options \"--longreads 1 \" to reduce the number of edges stored \n\n",MAX_MEMORY); return -1; } // too much memory allocated here for fosmid based data... for (i=0;i<snps;i++) snpfrag[i].elist = (struct edge*)malloc(sizeof(struct edge)*snpfrag[i].edges); for (i=0;i<snps;i++) snpfrag[i].telist = (struct edge*)malloc(sizeof(struct edge)*snpfrag[i].edges); if (FOSMIDS ==0) add_edges(Flist,fragments,snpfrag,snps,&components); else if (FOSMIDS >=1) add_edges_fosmids(Flist,fragments,snpfrag,snps,&components); // this considers only components with at least two nodes fprintf(stderr,"fragments %d snps %d component(blocks) %d\n",fragments,snps,components); struct BLOCK* clist = (struct BLOCK*)malloc(sizeof(struct BLOCK)*components); component =0; generate_clist_structure(Flist,fragments,snpfrag,snps,components,clist); /*****************************************************************************************************/ char* HAP1 = (char*)malloc(snps+1); char* besthap_mec = (char*)malloc(snps+1); char* HAP2 = (char*)malloc(snps+1); struct tm *ts1; char buf[80]; time_t now; slist = (int*)malloc(sizeof(int)*snps); char fn[1000]; if (VCFformat ==0) read_variantfile(variantfile,snpfrag,snps); else read_vcffile(variantfile,snpfrag,snps); /*****************************************************************************************************/ if (RANDOM_START ==1) { fprintf(stdout,"starting from a completely random solution SOLUTION \n"); for (i=0;i<snps;i++) { if (snpfrag[i].frags ==0) { HAP1[i] = '-'; HAP2[i] = '-'; } else { if (drand48() < 0.5) { HAP1[i] = '0'; HAP2[i] = '1'; } else {HAP1[i] = '1'; HAP2[i] = '0'; } } } } for (i=0;i<snps;i++) { besthap_mec[i] = HAP1[i]; } // for each block, we maintain best haplotype solution under MFR criterion // compute the component-wise score for 'initHAP' haplotype miscalls=0;bestscore_mec=0; for (k=0;k<components;k++) { clist[k].MEC =0; clist[k].bestMEC =0; clist[k].calls =0; clist[k].LL = 0; for (i=0;i<clist[k].frags;i++) { update_fragscore(Flist,clist[k].flist[i],HAP1); clist[k].MEC += Flist[clist[k].flist[i]].currscore; clist[k].LL += Flist[clist[k].flist[i]].ll; clist[k].calls += Flist[clist[k].flist[i]].calls; } clist[k].bestMEC = clist[k].MEC; bestscore_mec += clist[k].bestMEC; miscalls += clist[k].MEC; clist[k].bestLL = clist[k].LL; } // annealing_haplotyping(Flist,fragments,snpfrag,snps,maxiter,HAP1,HAP2,clist,components,slist); return 1; // annealing_haplotyping_full(Flist,fragments,snpfrag,snps,maxiter,HAP1,HAP2,0); return 1; /************************** RUN THE MAX_CUT ALGORITHM ITERATIVELY TO IMPROVE MEC SCORE*********************************/ for (iter=0;iter<maxiter_hapcut;iter++) { mecscore(Flist,fragments,HAP1,&ll,&calls,&miscalls); time(&now); ts1 = localtime(&now); strftime(buf, sizeof(buf), "%a %Y-%m-%d %H:%M:%S %Z", ts1); fprintf(stdout,"iter %d current haplotype MEC %f calls %d LL %f %s \n",iter,miscalls,(int)calls,ll,buf); fprintf(stderr,"iter %d current haplotype MEC %f calls %d LL %f %s \n",iter,miscalls,(int)calls,ll,buf); if ((iter%10==0 && iter > 0)) { // new code added april 7 2012 for (k=0;k<components;k++) find_bestvariant_segment(Flist,fragments,snpfrag,clist,k,HAP1,HAP2); sprintf(fn,"%s",outputfile); // newfile for every update to score.... //sprintf(fn,"%s.%f",outputfile,miscalls); // newfile for every update to score.... fprintf(stdout,"OUTPUTTING HAPLOTYPE ASSEMBLY TO FILE %s\n",fn); fprintf(stderr,"OUTPUTTING HAPLOTYPE ASSEMBLY TO FILE %s\n",fn); //if (VCFformat ==1) print_haplotypes_vcf(clist,components,HAP1,Flist,fragments,snpfrag,snps,fn); print_hapfile(clist,components,HAP1,Flist,fragments,snpfrag,variantfile,miscalls,fn); // do this only if some option is specified if (PRINT_FRAGMENT_SCORES ==1) { print_fragmentmatrix_MEC(Flist,fragments,HAP1,outputfile); //print_matrix(clist,components,HAP1,Flist,outputfile); } } for (k=0;k<components;k++) // COMPUTATION OF TREE FOR EACH COMPONENT { //if ((k*50)%components ==0) fprintf(stderr,"#"); if (iter ==0) fprintf(stdout,"\n component %d length %d phased %d %d...%d \n",k,clist[k].length,clist[k].phased,clist[k].offset,clist[k].lastvar); // call function for each component only if MEC > 0 april 17 2012 if (clist[k].MEC > 0) evaluate_cut_component(Flist,snpfrag,clist,k,slist,HAP1,iter); } for (i=0;i<snps;i++) { // commented out on april 6 4pm 2012 //if (HAP1[i] == '0') HAP2[i] = '1'; else if (HAP1[i] == '1') HAP2[i] = '0'; else HAP2[i] = HAP1[i]; } } /************************** RUN THE MAX_CUT ALGORITHM ITERATIVELY TO IMPROVE MEC SCORE*********************************/ // annealing_haplotyping_full(Flist,fragments,snpfrag,snps,maxiter+100,HAP1,HAP2,0); return 1; return 1; }
void edges(std::initializer_list<std::initializer_list<int>> pairs) { clear(); add_edges(pairs); }
adjacency_matrix(std::initializer_list<std::initializer_list<int>> pairs, std::size_t nodes_count, bool directed = false) : adjacency_matrix{nodes_count, directed} { add_edges(pairs); }
int maxcut_haplotyping(char* fragmentfile, char* variantfile, char* outputfile) { // IMP NOTE: all SNPs start from 1 instead of 0 and all offsets are 1+ fprintf_time(stderr, "Calling Max-Likelihood-Cut based haplotype assembly algorithm\n"); int snps = 0; int fragments = 0, iter = 0, components = 0; int i = 0, k = 0; int* slist; int flag = 0; float bestscore = 0, miscalls = 0; int hic_iter=0; struct SNPfrags* snpfrag = NULL; struct BLOCK* clist; char* HAP1; float HIC_LL_SCORE = -80; float OLD_HIC_LL_SCORE = -80; int converged_count=0, split_count, new_components, component; int new_fragments = 0; struct fragment* new_Flist; // READ FRAGMENT MATRIX fragments = get_num_fragments(fragmentfile); struct fragment* Flist; Flist = (struct fragment*) malloc(sizeof (struct fragment)* fragments); flag = read_fragment_matrix(fragmentfile, Flist, fragments); if (MAX_IS != -1){ // we are going to filter out some insert sizes new_fragments = 0; new_Flist = (struct fragment*) malloc(sizeof (struct fragment)* fragments); for(i = 0; i < fragments; i++){ if (Flist[i].isize < MAX_IS) new_Flist[new_fragments++] = Flist[i]; } Flist = new_Flist; fragments = new_fragments; } if (flag < 0) { fprintf_time(stderr, "unable to read fragment matrix file %s \n", fragmentfile); return -1; } //ADD EDGES BETWEEN SNPS snps = count_variants_vcf(variantfile); if (snps < 0) { fprintf_time(stderr, "unable to read variant file %s \n", variantfile); return -1; } snpfrag = (struct SNPfrags*) malloc(sizeof (struct SNPfrags)*snps); update_snpfrags(Flist, fragments, snpfrag, snps, &components); detect_long_reads(Flist,fragments); // 10/25/2014, edges are only added between adjacent nodes in each fragment and used for determining connected components... for (i = 0; i < snps; i++) snpfrag[i].elist = (struct edge*) malloc(sizeof (struct edge)*(snpfrag[i].edges+1)); if (LONG_READS ==0){ add_edges(Flist,fragments,snpfrag,snps,&components); }else if (LONG_READS >=1){ add_edges_fosmids(Flist,fragments,snpfrag,snps,&components); } for (i = 0; i < snps; i++) snpfrag[i].telist = (struct edge*) malloc(sizeof (struct edge)*(snpfrag[i].edges+1)); // this considers only components with at least two nodes fprintf_time(stderr, "fragments %d snps %d component(blocks) %d\n", fragments, snps, components); // BUILD COMPONENT LIST clist = (struct BLOCK*) malloc(sizeof (struct BLOCK)*components); generate_clist_structure(Flist, fragments, snpfrag, snps, components, clist); // READ VCF FILE read_vcffile(variantfile, snpfrag, snps); // INITIALIZE RANDOM HAPLOTYPES HAP1 = (char*) malloc(snps + 1); for (i = 0; i < snps; i++) { if (snpfrag[i].frags == 0 || (SNVS_BEFORE_INDELS && (strlen(snpfrag[i].allele0) != 1 || strlen(snpfrag[i].allele1) != 1))) { HAP1[i] = '-'; } else if (drand48() < 0.5) { HAP1[i] = '0'; } else { HAP1[i] = '1'; } } // for each block, we maintain best haplotype solution under MFR criterion // compute the component-wise score for 'initHAP' haplotype miscalls = 0; bestscore = 0; for (k = 0; k < components; k++) { clist[k].SCORE = 0; clist[k].bestSCORE = 0; for (i = 0; i < clist[k].frags; i++) { update_fragscore(Flist, clist[k].flist[i], HAP1); clist[k].SCORE += Flist[clist[k].flist[i]].currscore; } clist[k].bestSCORE = clist[k].SCORE; bestscore += clist[k].bestSCORE; miscalls += clist[k].SCORE; } fprintf_time(stderr, "processed fragment file and variant file: fragments %d variants %d\n", fragments, snps); int MAXIS = -1; if (HIC){ // determine the probability of an h-trans interaction for read for (i=0; i<fragments;i++){ Flist[i].htrans_prob = -80; if (Flist[i].isize > MAXIS) MAXIS = Flist[i].isize; } HTRANS_MAXBINS = MAXIS/HTRANS_BINSIZE + 1; }else{ HTRANS_MAXBINS = 0; } // read in file with estimated probabilities of Hi-C h-trans interactions with distance if (strcmp(HTRANS_DATA_INFILE, "None") != 0){ int num_bins = count_htrans_bins(HTRANS_DATA_INFILE); float* htrans_probs = (float*) malloc(sizeof(float) * num_bins); read_htrans_file(HTRANS_DATA_INFILE, htrans_probs, num_bins); for (i=0; i<fragments;i++){ Flist[i].htrans_prob = log10(htrans_probs[Flist[i].isize / HTRANS_BINSIZE]); } free(htrans_probs); } slist = (int*) malloc(sizeof (int)*snps); OLD_HIC_LL_SCORE = bestscore; for (hic_iter = 0; hic_iter < MAX_HIC_EM_ITER; hic_iter++){ if (VERBOSE) fprintf_time(stdout, "HIC ITER %d\n", hic_iter); for (k = 0; k < components; k++){ clist[k].iters_since_improvement = 0; } for (i=0; i<snps; i++){ snpfrag[i].post_hap = 0; } // RUN THE MAX_CUT ALGORITHM ITERATIVELY TO IMPROVE LIKELIHOOD for (iter = 0; iter < MAXITER; iter++) { if (VERBOSE) fprintf_time(stdout, "PHASING ITER %d\n", iter); converged_count = 0; for (k = 0; k < components; k++){ if(VERBOSE && iter == 0) fprintf_time(stdout, "component %d length %d phased %d %d...%d\n", k, clist[k].length, clist[k].phased, clist[k].offset, clist[k].lastvar); if (clist[k].SCORE > 0) converged_count += evaluate_cut_component(Flist, snpfrag, clist, k, slist, HAP1); else converged_count++; } if (converged_count == components) { //fprintf(stdout, "Haplotype assembly terminated early because no improvement seen in blocks after %d iterations\n", CONVERGE); break; } } // H-TRANS ESTIMATION FOR HIC if (MAX_HIC_EM_ITER > 1){ // Possibly break if we're done improving HIC_LL_SCORE = 0; for (k = 0; k < components; k++){ HIC_LL_SCORE += clist[k].bestSCORE; } if (HIC_LL_SCORE >= OLD_HIC_LL_SCORE){ break; } OLD_HIC_LL_SCORE = HIC_LL_SCORE; likelihood_pruning(snps, Flist, snpfrag, HAP1, 0); // prune for only very high confidence SNPs // estimate the h-trans probabilities for the next round estimate_htrans_probs(Flist, fragments, HAP1, snpfrag); } } // BLOCK SPLITTING new_components = components; if (SPLIT_BLOCKS){ split_count = 0; for (k=0; k<components; k++){ // attempt to split block split_count += split_block(HAP1, clist, k, Flist, snpfrag, &new_components); } if (split_count > 0){ // regenerate clist if necessary free(clist); clist = (struct BLOCK*) malloc(sizeof (struct BLOCK)*new_components); generate_clist_structure(Flist, fragments, snpfrag, snps, new_components, clist); } components = new_components; }else if(ERROR_ANALYSIS_MODE && !HIC){ for (k=0; k<components; k++){ // run split_block but don't actually split, just get posterior probabilities split_block(HAP1, clist, k, Flist, snpfrag, &new_components); } } // PRUNE SNPS if (!SKIP_PRUNE){ discrete_pruning(snps, fragments, Flist, snpfrag, HAP1); likelihood_pruning(snps, Flist, snpfrag, HAP1, CALL_HOMOZYGOUS); } // PRINT OUTPUT FILE fprintf_time(stderr, "OUTPUTTING PRUNED HAPLOTYPE ASSEMBLY TO FILE %s\n", outputfile); print_hapfile(clist, components, HAP1, Flist, fragments, snpfrag, variantfile, miscalls, outputfile); char assignfile[4096]; sprintf(assignfile,"%s.fragments",outputfile); if (OUTPUT_RH_ASSIGNMENTS ==1) fragment_assignments(Flist,fragments,snpfrag,HAP1,assignfile); // added 03/10/2018 to output read-haplotype assignments char outvcffile[4096]; sprintf(outvcffile,"%s.phased.VCF",outputfile); if (OUTPUT_VCF ==1) { fprintf_time(stderr, "OUTPUTTING PHASED VCF TO FILE %s\n", outvcffile); output_vcf(variantfile,snpfrag,snps,HAP1,Flist,fragments,outvcffile,0); } // FREE UP MEMORY for (i = 0; i < snps; i++) free(snpfrag[i].elist); for (i = 0; i < snps; i++) free(snpfrag[i].telist); component = 0; for (i = 0; i < snps; i++) { free(snpfrag[i].flist); free(snpfrag[i].alist); free(snpfrag[i].jlist); free(snpfrag[i].klist); if (snpfrag[i].component == i && snpfrag[i].csize > 1) // root node of component { free(clist[component].slist); component++; } } for (i = 0; i < components; i++) free(clist[i].flist); free(snpfrag); free(clist); free(Flist); return 0; }
bool RoutePlanner::solve(const AGeoPoint& origin, const AGeoPoint& destination, const RoutePlannerConfig& config, const short h_ceiling) { on_solve(origin, destination); rpolars_route.set_config(config, std::max(destination.altitude, origin.altitude), h_ceiling); rpolars_reach.set_config(config, std::max(destination.altitude, origin.altitude), h_ceiling); m_reach_polar_mode = config.reach_polar_mode; { const AFlatGeoPoint s_origin(task_projection.project(origin), origin.altitude); const AFlatGeoPoint s_destination(task_projection.project(destination), destination.altitude); if (!(s_origin == origin_last) || !(s_destination == destination_last)) dirty = true; if (is_trivial()) return false; dirty = false; origin_last = s_origin; destination_last = s_destination; h_min = std::min(s_origin.altitude, s_destination.altitude); h_max = rpolars_route.cruise_altitude; } solution_route.clear(); solution_route.push_back(origin); solution_route.push_back(destination); if (!rpolars_route.terrain_enabled() && !rpolars_route.airspace_enabled()) return false; // trivial m_search_hull.clear(); m_search_hull.push_back(SearchPoint(origin_last, task_projection)); RoutePoint start = origin_last; m_astar_goal = destination_last; RouteLink e_test(start, m_astar_goal, task_projection); if (e_test.is_short()) return false; if (!rpolars_route.achievable(e_test)) return false; count_dij=0; count_airspace=0; count_terrain=0; count_supressed=0; bool retval = false; m_planner.restart(start); unsigned best_d = UINT_MAX; if (verbose) { printf("# goal node (%d,%d,%d)\n", m_astar_goal.Longitude, m_astar_goal.Latitude, m_astar_goal.altitude); printf("# start node (%d,%d,%d)\n", start.Longitude, start.Latitude, start.altitude); } while (!m_planner.empty()) { const RoutePoint node = m_planner.pop(); if (verbose>1) { printf("# processing node (%d,%d,%d) %d,%d q size %d\n", node.Longitude, node.Latitude, node.altitude, m_planner.get_node_value(node).g, m_planner.get_node_value(node).h, m_planner.queue_size()); } h_min = std::min(h_min, node.altitude); h_max = std::max(h_max, node.altitude); bool is_final = (node == m_astar_goal); if (is_final) { if (!retval) best_d = UINT_MAX; retval = true; } if (is_final) // @todo: allow fallback if failed { // copy improving solutions Route this_solution; unsigned d = find_solution(node, this_solution); if (d< best_d) { best_d = d; solution_route = this_solution; } } if (retval) break; // want top solution only // shoot for final RouteLink e(node, m_astar_goal, task_projection); if (set_unique(e)) add_edges(e); while (!m_links.empty()) { add_edges(m_links.front()); m_links.pop(); } } count_unique = m_unique.size(); if (retval && verbose) { printf("# solved with %d intermediate points\n", (int)(solution_route.size()-2)); } if (retval) { // correct solution for rounding assert(solution_route.size()>=2); for (unsigned i=0; i< solution_route.size(); ++i) { FlatGeoPoint p(task_projection.project(solution_route[i])); if (p== origin_last) { solution_route[i] = AGeoPoint(origin, solution_route[i].altitude); } else if (p== destination_last) { solution_route[i] = AGeoPoint(destination, solution_route[i].altitude); } } } else { solution_route.clear(); solution_route.push_back(origin); solution_route.push_back(destination); } m_planner.clear(); m_unique.clear(); // m_search_hull.clear(); return retval; }