HIDDEN int wdb_do_paren(struct bu_list *hp) { struct tokens *tok; for (BU_LIST_FOR(tok, tokens, hp)) { struct tokens *prev, *next; if (tok->type != WDB_TOK_TREE) continue; prev = BU_LIST_PREV(tokens, &tok->l); next = BU_LIST_NEXT(tokens, &tok->l); if (prev->type !=WDB_TOK_LPAREN || next->type != WDB_TOK_RPAREN) continue; /* this is an eligible operand surrounded by parens */ BU_LIST_DEQUEUE(&next->l); bu_free((char *)next, "next"); BU_LIST_DEQUEUE(&prev->l); bu_free((char *)prev, "prev"); } if (hp->forw == hp->back && hp->forw != hp) return 1; /* done */ else if (BU_LIST_IS_EMPTY(hp)) return -1; /* empty tree!!!! */ else return 0; /* more to do */ }
HIDDEN void cho_deleteProc(ClientData clientData) { struct bu_cmdhist_obj *chop = (struct bu_cmdhist_obj *)clientData; struct bu_cmdhist *curr, *next; /* free list of commands */ curr = BU_LIST_NEXT(bu_cmdhist, &chop->cho_head.l); while (BU_LIST_NOT_HEAD(curr, &chop->cho_head.l)) { curr = BU_LIST_NEXT(bu_cmdhist, &chop->cho_head.l); next = BU_LIST_PNEXT(bu_cmdhist, curr); bu_vls_free(&curr->h_command); BU_LIST_DEQUEUE(&curr->l); bu_free((genptr_t)curr, "cho_deleteProc: curr"); curr = next; } bu_vls_free(&chop->cho_name); bu_vls_free(&chop->cho_head.h_command); BU_LIST_DEQUEUE(&chop->l); BU_PUT(chop, struct bu_cmdhist_obj); }
/* * R T _ N U R B _ B E Z I E R * * Given a single snurb, if it is in Bezier form, * duplicate the snurb, and enqueue it on the bezier_hd list. * If the original snurb is NOT in Bezier form, * subdivide it a set of snurbs which are, * each of which are enqueued on the bezier_hd list. * * In either case, the original surface remains untouched. * * Returns - * 0 Surface splitting was done. * 1 Original surface was Bezier, only a copy was done. */ int rt_nurb_bezier(struct bu_list *bezier_hd, const struct face_g_snurb *orig_surf, struct resource *res) { struct face_g_snurb *s; int dir; struct bu_list todo; NMG_CK_SNURB(orig_surf); if ( (dir = rt_bez_check( orig_surf )) == -1) { s = rt_nurb_scopy( orig_surf, res ); BU_LIST_APPEND( bezier_hd, &s->l ); return 1; /* Was already Bezier, nothing done */ } BU_LIST_INIT( &todo ); rt_nurb_s_split( &todo, orig_surf, dir, res ); while ( BU_LIST_WHILE( s, face_g_snurb, &todo ) ) { if ( (dir = rt_bez_check(s)) == -1) { /* This snurb is now a Bezier */ BU_LIST_DEQUEUE( &s->l ); BU_LIST_APPEND( bezier_hd, &s->l ); } else { /* Split, and keep going */ BU_LIST_DEQUEUE( &s->l ); rt_nurb_s_split( &todo, s, dir, res ); rt_nurb_free_snurb(s, res); } } return 0; /* Bezier snurbs on bezier_hd list */ }
HIDDEN void wdb_do_inter(struct bu_list *hp) { struct tokens *tok; for (BU_LIST_FOR(tok, tokens, hp)) { struct tokens *prev, *next; union tree *tp; if (tok->type != WDB_TOK_INTER) continue; prev = BU_LIST_PREV(tokens, &tok->l); next = BU_LIST_NEXT(tokens, &tok->l); if (prev->type !=WDB_TOK_TREE || next->type != WDB_TOK_TREE) continue; /* this is an eligible intersection operation */ BU_ALLOC(tp, union tree); RT_TREE_INIT(tp); tp->tr_b.tb_op = OP_INTERSECT; tp->tr_b.tb_regionp = (struct region *)NULL; tp->tr_b.tb_left = prev->tp; tp->tr_b.tb_right = next->tp; BU_LIST_DEQUEUE(&tok->l); bu_free((char *)tok, "tok"); BU_LIST_DEQUEUE(&prev->l); bu_free((char *)prev, "prev"); next->tp = tp; tok = next; } }
int rt_process_casec(struct edge_g_cnurb *trim, fastf_t u, fastf_t v) { struct edge_g_cnurb * clip; int jordan_hit; struct bu_list plist; int trim_flag = 0; int caset; /* determine if the the u, v values are on the curve */ if ( rt_nurb_uv_dist(trim, u, v) == TRIM_ON) return TRIM_IN; jordan_hit = 0; BU_LIST_INIT(&plist); if ( nurb_crv_is_bezier( trim ) ) rt_clip_cnurb(&plist, trim, u, v); else nurb_c_to_bezier( &plist, trim ); while ( BU_LIST_WHILE( clip, edge_g_cnurb, &plist ) ) { BU_LIST_DEQUEUE( &clip->l ); caset = rt_trim_case(clip, u, v); trim_flag = 0; if ( caset == CASE_B) trim_flag = rt_process_caseb(clip, u, v); if ( caset == CASE_C) trim_flag = rt_process_casec(clip, u, v); rt_nurb_free_cnurb( clip ); if ( trim_flag == TRIM_IN) jordan_hit++; if ( trim_flag == TRIM_ON) break; } while ( BU_LIST_WHILE( clip, edge_g_cnurb, &plist) ) { BU_LIST_DEQUEUE( &clip->l ); rt_nurb_free_cnurb( clip ); } if ( trim_flag == TRIM_ON) return TRIM_ON; else if ( jordan_hit & 01 ) return TRIM_IN; else return TRIM_OUT; }
static void _bu_close_files() { struct _bu_tf_list *popped; if (!_bu_tf) { return; } /* close all files, free their nodes, and unlink */ while (BU_LIST_WHILE(popped, _bu_tf_list, &(_bu_tf->l))) { BU_LIST_DEQUEUE(&(popped->l)); if (popped) { if (popped->fd != -1) { close(popped->fd); popped->fd = -1; } if (BU_VLS_IS_INITIALIZED(&popped->fn) && bu_vls_addr(&popped->fn)) { unlink(bu_vls_addr(&popped->fn)); bu_vls_free(&popped->fn); } bu_free(popped, "free bu_temp_file node"); } } /* free the head */ if (_bu_tf->fd != -1) { close(_bu_tf->fd); _bu_tf->fd = -1; } if (BU_VLS_IS_INITIALIZED(&_bu_tf->fn) && bu_vls_addr(&_bu_tf->fn)) { unlink(bu_vls_addr(&_bu_tf->fn)); bu_vls_free(&_bu_tf->fn); } bu_free(_bu_tf, "free bu_temp_file head"); }
/** * Stub function which will "simulate" a call to a vector shot routine */ HIDDEN void vshot_stub(struct soltab **stp, struct xray **rp, struct seg *segp, int n, struct application *ap) /* An array of solid pointers */ /* An array of ray pointers */ /* array of segs (results returned) */ /* Number of ray/object pairs */ /* pointer to an application */ { register int i; register struct seg *tmp_seg; struct seg seghead; int ret; BU_LIST_INIT(&(seghead.l)); /* go through each ray/solid pair and call a scalar function */ for (i = 0; i < n; i++) { if (stp[i] != 0) { /* skip call if solid table pointer is NULL */ /* do scalar call, place results in segp array */ ret = -1; if (OBJ[stp[i]->st_id].ft_shot) { ret = OBJ[stp[i]->st_id].ft_shot(stp[i], rp[i], ap, &seghead); } if (ret <= 0) { segp[i].seg_stp=(struct soltab *) 0; } else { tmp_seg = BU_LIST_FIRST(seg, &(seghead.l)); BU_LIST_DEQUEUE(&(tmp_seg->l)); segp[i] = *tmp_seg; /* structure copy */ RT_FREE_SEG(tmp_seg, ap->a_resource); } } } }
void bu_free_mapped_files(int verbose) { struct bu_mapped_file *mp, *next; if (UNLIKELY(bu_debug&BU_DEBUG_MAPPED_FILE)) bu_log("bu_free_mapped_files(verbose=%d)\n", verbose); bu_semaphore_acquire(BU_SEM_MAPPEDFILE); next = BU_LIST_FIRST(bu_mapped_file, &bu_mapped_file_list); while (BU_LIST_NOT_HEAD(next, &bu_mapped_file_list)) { BU_CK_MAPPED_FILE(next); mp = next; next = BU_LIST_NEXT(bu_mapped_file, &mp->l); if (mp->uses > 0) continue; /* Found one that needs to have storage released */ if (UNLIKELY(verbose || (bu_debug&BU_DEBUG_MAPPED_FILE))) bu_pr_mapped_file("freeing", mp); BU_LIST_DEQUEUE(&mp->l); /* If application pointed mp->apbuf at mp->buf, break that * association so we don't double-free the buffer. */ if (mp->apbuf == mp->buf) mp->apbuf = (void *)NULL; #ifdef HAVE_SYS_MMAN_H if (mp->is_mapped) { int ret; bu_semaphore_acquire(BU_SEM_SYSCALL); ret = munmap(mp->buf, (size_t)mp->buflen); bu_semaphore_release(BU_SEM_SYSCALL); if (UNLIKELY(ret < 0)) perror("munmap"); /* XXX How to get this chunk of address space back to malloc()? */ } else #endif { bu_free(mp->buf, "bu_mapped_file.buf[]"); } mp->buf = (void *)NULL; /* sanity */ bu_free((void *)mp->name, "bu_mapped_file.name"); if (mp->appl) bu_free((void *)mp->appl, "bu_mapped_file.appl"); bu_free((void *)mp, "struct bu_mapped_file"); } bu_semaphore_release(BU_SEM_MAPPEDFILE); }
void Parser::parse(struct pc_pc_set *pcs) { /*Iterate through the parameter set first*/ struct pc_param *par; struct pc_constrnt *con; while (BU_LIST_WHILE(par, pc_param, &(pcs->ps->l))) { name.clear(); //std::cout<<"Parameter expression Input: "<<(char *) bu_vls_addr(&(par->name))<<std::endl; if (par->ctype == PC_DB_BYEXPR) { boost::spirit::classic::parse_info<> p_info = \ boost::spirit::classic::parse(\ (char *) bu_vls_addr(&(par->data.expression)), \ *var_gram, boost::spirit::classic::space_p); if (p_info.full) { //vcset.pushVar(); } else { std::cout << "Error during Variable expression parsing\n"; } bu_vls_free(&(par->data.expression)); } else { vcset.addParameter((char *) bu_vls_addr(&(par->name)), \ par->dtype, par->data.ptr); } bu_vls_free(&(par->name)); BU_LIST_DEQUEUE(&(par->l)); bu_free(par, "free parameter"); } while (BU_LIST_WHILE(con, pc_constrnt, &(pcs->cs->l))) { if (con->ctype == PC_DB_BYEXPR) { bu_vls_free(&(con->data.expression)); } else if (con->ctype == PC_DB_BYSTRUCT) { //std::cout << "Constraint by Struct -> \n"; vcset.addConstraint(con); bu_free(con->args, "free argument array"); } /*boost::spirit::classic::parse((char *) bu_vls_addr(&(con->name)), *con_gram, boost::spirit::space_p);*/ bu_vls_free(&(con->name)); BU_LIST_DEQUEUE(&(con->l)); bu_free(con, "free constraint"); } }
/* * Called by Tcl when the object is destroyed. */ HIDDEN void fbo_deleteProc(ClientData clientData) { struct fb_obj *fbop = (struct fb_obj *)clientData; /* close framebuffer */ fb_close(fbop->fbo_fbs.fbs_fbp); bu_vls_free(&fbop->fbo_name); BU_LIST_DEQUEUE(&fbop->l); bu_free((genptr_t)fbop, "fbo_deleteProc: fbop"); }
/** * R T _ F R E E _ S O L T A B * * Decrement use count on soltab structure. If no longer needed, * release associated storage, and free the structure. * * This routine semaphore protects against other copies of itself * running in parallel, and against other routines (such as * rt_find_identical_solid()) which might also be modifying the linked * list heads. * * Called by - * db_free_tree() * rt_clean() * rt_gettrees() * rt_kill_deal_solid_refs() */ void rt_free_soltab(struct soltab *stp) { int hash; RT_CK_SOLTAB(stp); if ( stp->st_id < 0 ) bu_bomb("rt_free_soltab: bad st_id"); hash = db_dirhash(stp->st_dp->d_namep); ACQUIRE_SEMAPHORE_TREE(hash); /* start critical section */ if ( --(stp->st_uses) > 0 ) { RELEASE_SEMAPHORE_TREE(hash); return; } BU_LIST_DEQUEUE( &(stp->l2) ); /* remove from st_dp->d_use_hd list */ BU_LIST_DEQUEUE( &(stp->l) ); /* uses rti_solidheads[] */ RELEASE_SEMAPHORE_TREE(hash); /* end critical section */ if ( stp->st_aradius > 0 ) { stp->st_meth->ft_free( stp ); stp->st_aradius = 0; } if ( stp->st_matp ) bu_free( (char *)stp->st_matp, "st_matp"); stp->st_matp = (matp_t)0; /* Sanity */ bu_ptbl_free(&stp->st_regions); stp->st_dp = DIR_NULL; /* Sanity */ if ( stp->st_path.magic ) { RT_CK_FULL_PATH( &stp->st_path ); db_free_full_path( &stp->st_path ); } bu_free( (char *)stp, "struct soltab" ); }
HIDDEN void wdb_free_tokens(struct bu_list *hp) { struct tokens *tok; BU_CK_LIST_HEAD(hp); while (BU_LIST_WHILE(tok, tokens, hp)) { BU_LIST_DEQUEUE(&tok->l); if (tok->type == WDB_TOK_TREE) { db_free_tree(tok->tp, &rt_uniresource); } } }
void bu_delete_hook(struct bu_hook_list *hlp, bu_hook_t func, genptr_t clientdata) { struct bu_hook_list *cur = hlp; for (BU_LIST_FOR(cur, bu_hook_list, &hlp->l)) { if (cur->hookfunc == func && cur->clientdata == clientdata) { struct bu_hook_list *old = BU_LIST_PLAST(bu_hook_list, cur); BU_LIST_DEQUEUE(&(cur->l)); bu_free((genptr_t)cur, "bu_delete_hook"); cur = old; } } }
HIDDEN void do_union_subtr(struct bu_list *hp) { struct tokens *tok; for (BU_LIST_FOR(tok, tokens, hp)) { struct tokens *prev, *next; union tree *tp; if (tok->type != TOK_UNION && tok->type != TOK_SUBTR) continue; prev = BU_LIST_PREV(tokens, &tok->l); next = BU_LIST_NEXT(tokens, &tok->l); if (prev->type !=TOK_TREE || next->type != TOK_TREE) continue; /* this is an eligible operation */ BU_ALLOC(tp, union tree); RT_TREE_INIT(tp); if (tok->type == TOK_UNION) tp->tr_b.tb_op = OP_UNION; else tp->tr_b.tb_op = OP_SUBTRACT; tp->tr_b.tb_regionp = (struct region *)NULL; tp->tr_b.tb_left = prev->tp; tp->tr_b.tb_right = next->tp; BU_LIST_DEQUEUE(&tok->l); bu_free((char *)tok, "tok"); BU_LIST_DEQUEUE(&prev->l); bu_free((char *)prev, "prev"); next->tp = tp; tok = next; } }
HIDDEN void wdb_do_union_subtr(struct bu_list *hp) { struct tokens *tok; for (BU_LIST_FOR(tok, tokens, hp)) { struct tokens *prev, *next; union tree *tp; if (tok->type != WDB_TOK_UNION && tok->type != WDB_TOK_SUBTR) continue; prev = BU_LIST_PREV( tokens, &tok->l ); next = BU_LIST_NEXT( tokens, &tok->l ); if (prev->type !=WDB_TOK_TREE || next->type != WDB_TOK_TREE) continue; /* this is an eligible operation */ tp = (union tree *)bu_malloc( sizeof( union tree ), "tp" ); tp->magic = RT_TREE_MAGIC; if (tok->type == WDB_TOK_UNION) tp->tr_b.tb_op = OP_UNION; else tp->tr_b.tb_op = OP_SUBTRACT; tp->tr_b.tb_regionp = (struct region *)NULL; tp->tr_b.tb_left = prev->tp; tp->tr_b.tb_right = next->tp; BU_LIST_DEQUEUE(&tok->l); bu_free((char *)tok, "tok"); BU_LIST_DEQUEUE(&prev->l); bu_free((char *)prev, "prev"); next->tp = tp; tok = next; } }
HIDDEN void gauss_free(void *cp) { register struct gauss_specific *gauss_sp = (struct gauss_specific *)cp; struct reg_db_internals *p; while (BU_LIST_WHILE(p, reg_db_internals, &gauss_sp->dbil)) { BU_LIST_DEQUEUE(&(p->l)); bu_free(p->ip.idb_ptr, "internal ptr"); bu_free((void *)p, "gauss reg_db_internals"); } BU_PUT(cp, struct gauss_specific); }
/** * free saved rays */ void free_rays(struct fitness_state *fstate) { int i; struct part *p; for (i = 0; i < fstate->res[X] * fstate->res[Y]; i++) { if (fstate->ray[i] == NULL) continue; while (BU_LIST_WHILE(p, part, &fstate->ray[i]->l)) { BU_LIST_DEQUEUE(&p->l); bu_free(p, "part"); } bu_free(fstate->ray[i], "part"); } bu_free(fstate->ray, "fstate->ray"); }
void bn_vlist_cleanup(struct bu_list *hd) { register struct bn_vlist *vp; if (!BU_LIST_IS_INITIALIZED(hd)) { BU_LIST_INIT(hd); return; } while (BU_LIST_WHILE(vp, bn_vlist, hd)) { BN_CK_VLIST(vp); BU_LIST_DEQUEUE(&(vp->l)); bu_free((char *)vp, "bn_vlist"); } }
void nurb_c_to_bezier(struct bu_list *clist, struct edge_g_cnurb *crv) { fastf_t knot_min, knot_max; int i; struct edge_g_cnurb *crv1, *crv_copy; int done; /* make a copy of original curve */ crv_copy = rt_nurb_crv_copy( crv ); /* split curve at each knot value */ done = 0; while ( !done ) { fastf_t split; knot_min = crv_copy->k.knots[0]; knot_max = crv_copy->k.knots[crv_copy->k.k_size-1]; split = MAX_FASTF; for ( i=1; i<crv_copy->k.k_size-1; i++ ) { if ( crv_copy->k.knots[i] != knot_min && crv_copy->k.knots[i] != knot_max ) { split = crv_copy->k.knots[i]; break; } } if ( split == MAX_FASTF ) { done = 1; BU_LIST_APPEND( clist, &crv_copy->l ); break; } crv1 = rt_nurb_c_xsplit( crv_copy, split ); rt_nurb_free_cnurb( crv_copy ); crv_copy = BU_LIST_PNEXT( edge_g_cnurb, &crv1->l ); BU_LIST_DEQUEUE( &crv_copy->l ); BU_LIST_APPEND( clist, &crv1->l ); } }
int main(int argc, char **argv) { static long ncells; bu_debug = BU_DEBUG_MEM_CHECK | BU_DEBUG_MEM_LOG; bu_debug = 0; BU_LIST_INIT(&(gp_locs.l)); if (! pars_Argv(argc, argv)) { prnt_Usage(); return 1; } grid = (Cell *) bu_malloc(sizeof(Cell) * maxcells, "grid"); if (debug_flag & CFB_DBG_MEM) bu_log("grid = %p... %ld cells @ %lu bytes/cell\n", (void *)grid, maxcells, sizeof(Cell)); do { struct locrec *lrp; init_Globs(); if ((ncells = read_Cell_Data()) == 0) { bu_log("cell-fb: failed to read view\n"); return 1; } if (BU_LIST_NON_EMPTY(&(gp_locs.l))) { while (BU_LIST_WHILE(lrp, locrec, (&(gp_locs.l)))) { BU_LIST_DEQUEUE(&(lrp->l)); bu_log("%g %g %d %d\n", lrp->h, lrp->v, (int) H2SCRX(lrp->h), (int) V2SCRY(lrp->v)); bu_free((char *) lrp, "location record"); } } else { bu_log("Displaying %ld cells\n", ncells); if (! display_Cells(ncells)) { bu_log("cell-fb: failed to display %ld cells\n", ncells); return 1; } if (log_flag) log_Run(); } } while ((view_flag == 0) && ! feof(filep) && get_OK()); return 0; }
/** * B U _ L O G _ D E L E T E _ H O O K * * Removes the hook matching the function and clientdata parameters from * the hook list. Note that it is not necessarily the active (top) hook. */ void bu_log_delete_hook(bu_hook_t func, genptr_t clientdata) { #if 0 struct bu_hook_list *cur = &bu_log_hook_list; for ( BU_LIST_FOR( cur, bu_hook_list, &(bu_log_hook_list.l) ) ) { if ( cur->hookfunc == func && cur->clientdata == clientdata) { struct bu_hook_list *old = BU_LIST_PLAST(bu_hook_list, cur); BU_LIST_DEQUEUE( &(cur->l) ); bu_free((genptr_t)cur, "bu_log hook"); cur = old; } } #else bu_delete_hook(&bu_log_hook_list, func, clientdata); #endif }
void bubblesort(void) { struct frame *a, *b; a = (struct frame *)head.forw; while (a->l.forw != &head) { b = (struct frame *)a->l.forw; if (a->number > b->number) { BU_LIST_DEQUEUE(&b->l); BU_LIST_INSERT(&a->l, &b->l); if (b->l.back != &head) { a = (struct frame *)b->l.back; }; } else { a=(struct frame *)a->l.forw; } } }
/** * Free the storage associated with the rt_db_internal version of this * solid. This only effects the in-memory copy. */ void rt_metaball_ifree(struct rt_db_internal *ip) { register struct rt_metaball_internal *metaball; register struct wdb_metaballpt *mbpt; RT_CK_DB_INTERNAL(ip); metaball = (struct rt_metaball_internal*)ip->idb_ptr; RT_METABALL_CK_MAGIC(metaball); if (metaball->metaball_ctrl_head.magic != 0) while (BU_LIST_WHILE(mbpt, wdb_metaballpt, &metaball->metaball_ctrl_head)) { BU_LIST_DEQUEUE(&(mbpt->l)); BU_PUT(mbpt, struct wdb_metaballpt); } bu_free(ip->idb_ptr, "metaball ifree"); ip->idb_ptr = ((void *)0); }
static void run_scripts(struct bu_list *sl, struct rt_i *rtip) { struct script_rec *srp; char *cp; FILE *fPtr; if (nirt_debug & DEBUG_SCRIPTS) show_scripts(sl, "before running them"); while (BU_LIST_WHILE(srp, script_rec, sl)) { BU_LIST_DEQUEUE(&(srp->l)); BU_CKMAG(srp, SCRIPT_REC_MAGIC, "script record"); cp = bu_vls_addr(&(srp->sr_script)); if (nirt_debug & DEBUG_SCRIPTS) { bu_log(" Attempting to run %s '%s'\n", (srp->sr_type == READING_STRING) ? "literal" : (srp->sr_type == READING_FILE) ? "file" : "???", cp); } switch (srp->sr_type) { case READING_STRING: interact(READING_STRING, cp, rtip); break; case READING_FILE: if ((fPtr = fopen(cp, "rb")) == NULL) { bu_log("Cannot open script file '%s'\n", cp); } else { interact(READING_FILE, fPtr, rtip); fclose(fPtr); } break; default: bu_exit (1, "%s:%d: script of type %d. This shouldn't happen\n", __FILE__, __LINE__, srp->sr_type); } free_script(srp); } if (nirt_debug & DEBUG_SCRIPTS) show_scripts(sl, "after running them"); }
HIDDEN union tree * wdb_eval_bool(struct bu_list *hp) { int done=0; union tree *final_tree; struct tokens *tok; while (done != 1) { wdb_do_inter(hp); wdb_do_union_subtr(hp); done = wdb_do_paren(hp); } tok = BU_LIST_NEXT(tokens, hp); final_tree = tok->tp; BU_LIST_DEQUEUE(&tok->l); bu_free((char *)tok, "tok"); return final_tree; }
void merge(void) { struct frame *cur, *next; for (BU_LIST_FOR(cur, frame, &head)) { next = BU_LIST_NEXT(frame, &cur->l); if (BU_LIST_IS_HEAD(next, &head)) break; if (cur->number == next->number) { if (next->text) addtext(cur, next->text); cur->flags |= next->flags; BU_LIST_DEQUEUE(&next->l); if (next->text) bu_free(next->text, "text area"); next->text = NULL; next->l.magic = -1; bu_free(next, "struct frame"); cur = BU_LIST_PREV(frame, &cur->l); } } }
/** * Given a ray, shoot it at all the relevant parts of the model, * (building the HeadSeg chain), and then call rt_boolregions() to * build and evaluate the partition chain. If the ray actually hit * anything, call the application's a_hit() routine with a pointer to * the partition chain, otherwise, call the application's a_miss() * routine. * * It is important to note that rays extend infinitely only in the * positive direction. The ray is composed of all points P, where * * P = r_pt + K * r_dir * * for K ranging from 0 to +infinity. There is no looking backwards. * * It is also important to note that the direction vector r_dir must * have unit length; this is mandatory, and is not ordinarily checked, * in the name of efficiency. * * Input: Pointer to an application structure, with these mandatory fields: * a_ray.r_pt Starting point of ray to be fired * a_ray.r_dir UNIT VECTOR with direction to fire in (dir cosines) * a_hit Routine to call when something is hit * a_miss Routine to call when ray misses everything * * Calls user's a_miss() or a_hit() routine as appropriate. Passes * a_hit() routine list of partitions, with only hit_dist fields * valid. Normal computation deferred to user code, to avoid needless * computation here. * * Returns: whatever the application function returns (an int). * * NOTE: The application functions may call rt_shootray() recursively. * Thus, none of the local variables may be static. * * An open issue for execution in a PARALLEL environment is locking of * the statistics variables. */ int rt_vshootray(struct application *ap) { struct seg *HeadSeg; int ret; vect_t inv_dir; /* inverses of ap->a_ray.r_dir */ struct bu_bitv *solidbits; /* bits for all solids shot so far */ struct bu_ptbl *regionbits; /* bits for all involved regions */ char *status; struct partition InitialPart; /* Head of Initial Partitions */ struct partition FinalPart; /* Head of Final Partitions */ int nrays = 1; /* for now */ int vlen; int id; int i; struct soltab **ary_stp; /* array of pointers */ struct xray **ary_rp; /* array of pointers */ struct seg *ary_seg; /* array of structures */ struct rt_i *rtip; int done; #define BACKING_DIST (-2.0) /* mm to look behind start point */ rtip = ap->a_rt_i; RT_AP_CHECK(ap); if (!ap->a_resource) { ap->a_resource = &rt_uniresource; } RT_CK_RESOURCE(ap->a_resource); if (RT_G_DEBUG&(DEBUG_ALLRAYS|DEBUG_SHOOT|DEBUG_PARTITION)) { bu_log("\n**********mshootray cpu=%d %d, %d lvl=%d (%s)\n", ap->a_resource->re_cpu, ap->a_x, ap->a_y, ap->a_level, ap->a_purpose != (char *)0 ? ap->a_purpose : "?"); VPRINT("Pnt", ap->a_ray.r_pt); VPRINT("Dir", ap->a_ray.r_dir); } rtip->rti_nrays++; if (rtip->needprep) rt_prep(rtip); /* Allocate dynamic memory */ vlen = nrays * rtip->rti_maxsol_by_type; ary_stp = (struct soltab **)bu_calloc(vlen, sizeof(struct soltab *), "*ary_stp[]"); ary_rp = (struct xray **)bu_calloc(vlen, sizeof(struct xray *), "*ary_rp[]"); ary_seg = (struct seg *)bu_calloc(vlen, sizeof(struct seg), "ary_seg[]"); /**** for each ray, do this ****/ InitialPart.pt_forw = InitialPart.pt_back = &InitialPart; FinalPart.pt_forw = FinalPart.pt_back = &FinalPart; HeadSeg = RT_SEG_NULL; solidbits = rt_get_solidbitv(rtip->nsolids, ap->a_resource); if (BU_LIST_IS_EMPTY(&ap->a_resource->re_region_ptbl)) { BU_ALLOC(regionbits, struct bu_ptbl); bu_ptbl_init(regionbits, 7, "rt_shootray() regionbits ptbl"); } else { regionbits = BU_LIST_FIRST(bu_ptbl, &ap->a_resource->re_region_ptbl); BU_LIST_DEQUEUE(®ionbits->l); BU_CK_PTBL(regionbits); } /* Compute the inverse of the direction cosines */ if (!ZERO(ap->a_ray.r_dir[X])) { inv_dir[X]=1.0/ap->a_ray.r_dir[X]; } else { inv_dir[X] = INFINITY; ap->a_ray.r_dir[X] = 0.0; } if (!ZERO(ap->a_ray.r_dir[Y])) { inv_dir[Y]=1.0/ap->a_ray.r_dir[Y]; } else { inv_dir[Y] = INFINITY; ap->a_ray.r_dir[Y] = 0.0; } if (!ZERO(ap->a_ray.r_dir[Z])) { inv_dir[Z]=1.0/ap->a_ray.r_dir[Z]; } else { inv_dir[Z] = INFINITY; ap->a_ray.r_dir[Z] = 0.0; } /* * XXX handle infinite solids here, later. */ /* * If ray does not enter the model RPP, skip on. * If ray ends exactly at the model RPP, trace it. */ if (!rt_in_rpp(&ap->a_ray, inv_dir, rtip->mdl_min, rtip->mdl_max) || ap->a_ray.r_max < 0.0) { rtip->nmiss_model++; if (ap->a_miss) ret = ap->a_miss(ap); else ret = 0; status = "MISS model"; goto out; } /* For each type of solid to be shot at, assemble the vectors */ for (id = 1; id <= ID_MAX_SOLID; id++) { register int nsol; if ((nsol = rtip->rti_nsol_by_type[id]) <= 0) continue; /* For each instance of this solid type */ for (i = nsol-1; i >= 0; i--) { ary_stp[i] = rtip->rti_sol_by_type[id][i]; ary_rp[i] = &(ap->a_ray); /* XXX, sb [ray] */ ary_seg[i].seg_stp = SOLTAB_NULL; BU_LIST_INIT(&ary_seg[i].l); } /* bounding box check */ /* bit vector per ray check */ /* mark elements to be skipped with ary_stp[] = SOLTAB_NULL */ ap->a_rt_i->nshots += nsol; /* later: skipped ones */ if (OBJ[id].ft_vshot) { OBJ[id].ft_vshot(ary_stp, ary_rp, ary_seg, nsol, ap); } else { vshot_stub(ary_stp, ary_rp, ary_seg, nsol, ap); } /* set bits for all solids shot at for each ray */ /* append resulting seg list to input for boolweave */ for (i = nsol-1; i >= 0; i--) { register struct seg *seg2; if (ary_seg[i].seg_stp == SOLTAB_NULL) { /* MISS */ ap->a_rt_i->nmiss++; continue; } ap->a_rt_i->nhits++; /* For now, do it the slow way. sb [ray] */ /* MUST dup it -- all segs have to live till after a_hit() */ RT_GET_SEG(seg2, ap->a_resource); *seg2 = ary_seg[i]; /* struct copy */ /* rt_boolweave(seg2, &InitialPart, ap); */ bu_bomb("FIXME: need to call boolweave here"); /* Add seg chain to list of used segs awaiting reclaim */ #if 0 /* FIXME: need to use waiting_segs/finished_segs here in * conjunction with rt_boolweave() { register struct seg *seg3 = seg2; while (seg3->seg_next != RT_SEG_NULL) seg3 = seg3->seg_next; seg3->seg_next = HeadSeg; HeadSeg = seg2; } */ #endif } } /* * Ray has finally left known space. */ if (InitialPart.pt_forw == &InitialPart) { if (ap->a_miss) ret = ap->a_miss(ap); else ret = 0; status = "MISSed all primitives"; goto freeup; } /* * All intersections of the ray with the model have been computed. * Evaluate the boolean trees over each partition. */ done = rt_boolfinal(&InitialPart, &FinalPart, BACKING_DIST, INFINITY, regionbits, ap, solidbits); if (done > 0) goto hitit; if (FinalPart.pt_forw == &FinalPart) { if (ap->a_miss) ret = ap->a_miss(ap); else ret = 0; status = "MISS bool"; goto freeup; } /* * Ray/model intersections exist. Pass the list to the user's * a_hit() routine. Note that only the hit_dist elements of * pt_inhit and pt_outhit have been computed yet. To compute both * hit_point and hit_normal, use the * * RT_HIT_NORMAL(NULL, hitp, stp, rayp, 0); * * macro. To compute just hit_point, use * * VJOIN1(hitp->hit_point, rp->r_pt, hitp->hit_dist, rp->r_dir); */ hitit: if (RT_G_DEBUG&DEBUG_SHOOT) rt_pr_partitions(rtip, &FinalPart, "a_hit()"); if (ap->a_hit) ret = ap->a_hit(ap, &FinalPart, HeadSeg/* &finished_segs */); else ret = 0; status = "HIT"; /* * Processing of this ray is complete. Free dynamic resources. */ freeup: { register struct partition *pp; /* Free up initial partition list */ for (pp = InitialPart.pt_forw; pp != &InitialPart;) { register struct partition *newpp; newpp = pp; pp = pp->pt_forw; FREE_PT(newpp, ap->a_resource); } /* Free up final partition list */ for (pp = FinalPart.pt_forw; pp != &FinalPart;) { register struct partition *newpp; newpp = pp; pp = pp->pt_forw; FREE_PT(newpp, ap->a_resource); } } /* Segs can't be freed until after a_hit() has returned */ #if 0 /* FIXME: depends on commented out code above */ if (HeadSeg) RT_FREE_SEG_LIST(HeadSeg, ap->a_resource); #endif out: bu_free((char *)ary_stp, "*ary_stp[]"); bu_free((char *)ary_rp, "*ary_rp[]"); bu_free((char *)ary_seg, "ary_seg[]"); if (solidbits != NULL) { bu_bitv_free(solidbits); } if (RT_G_DEBUG&(DEBUG_ALLRAYS|DEBUG_SHOOT|DEBUG_PARTITION)) { bu_log("----------mshootray cpu=%d %d, %d lvl=%d (%s) %s ret=%d\n", ap->a_resource->re_cpu, ap->a_x, ap->a_y, ap->a_level, ap->a_purpose != (char *)0 ? ap->a_purpose : "?", status, ret); } return ret; }
int main(int argc, char **argv) { int n; if (argc < 2) { fprintf(stderr, "%s", srv_usage); return 1; } while (argv[1][0] == '-') { if (BU_STR_EQUAL(argv[1], "-d")) { debug++; } else if (BU_STR_EQUAL(argv[1], "-x")) { sscanf(argv[2], "%x", (unsigned int *)&RTG.debug); argc--; argv++; } else if (BU_STR_EQUAL(argv[1], "-X")) { sscanf(argv[2], "%x", (unsigned int *)&rdebug); argc--; argv++; } else { fprintf(stderr, "%s", srv_usage); return 3; } argc--; argv++; } if (argc != 3 && argc != 4) { fprintf(stderr, "%s", srv_usage); return 2; } control_host = argv[1]; tcp_port = argv[2]; /* Note that the LIBPKG error logger can not be * "bu_log", as that can cause bu_log to be entered recursively. * Given the special version of bu_log in use here, * that will result in a deadlock in bu_semaphore_acquire(res_syscall)! * libpkg will default to stderr via pkg_errlog(), which is fine. */ pcsrv = pkg_open(control_host, tcp_port, "tcp", "", "", pkgswitch, NULL); if (pcsrv == PKC_ERROR) { fprintf(stderr, "rtsrv: unable to contact %s, port %s\n", control_host, tcp_port); return 1; } if (argc == 4) { /* Slip one command to dispatcher */ (void)pkg_send(MSG_CMD, argv[3], strlen(argv[3])+1, pcsrv); /* Prevent chasing the package with an immediate TCP close */ sleep(1); pkg_close(pcsrv); return 0; } #ifdef SO_SNDBUF /* increase the default send buffer size to 32k since we're * sending pixels more than likely. */ { int val = 32767; n = setsockopt(pcsrv->pkc_fd, SOL_SOCKET, SO_SNDBUF, (const void *)&val, sizeof(val)); if (n < 0) perror("setsockopt: SO_SNDBUF"); } #endif if (!debug) { /* A fresh process */ if (fork()) return 0; /* Go into our own process group */ n = bu_process_id(); #ifdef HAVE_SETPGID if (setpgid(n, n) < 0) perror("setpgid"); #else /* SysV uses setpgrp with no args and it can't fail, * obsoleted by setpgid. */ setpgrp(); #endif /* * Unless controller process has specifically said * that this is an interactive session, e.g., for a demo, * drop to the lowest sensible priority. */ if (!interactive) { bu_nice_set(19); /* lowest priority */ } /* Close off the world */ fclose(stdin); fclose(stdout); fclose(stderr); (void)close(0); (void)close(1); (void)close(2); /* For stdio & perror safety, reopen 0, 1, 2 */ (void)open("/dev/null", 0); /* to fd 0 */ n = dup(0); /* to fd 1 */ if (n == -1) perror("dup"); n = dup(0); /* to fd 2 */ if (n == -1) perror("dup"); #if defined(HAVE_SYS_IOCTL_H) && defined(TIOCNOTTY) n = open("/dev/tty", 2); if (n >= 0) { (void)ioctl(n, TIOCNOTTY, 0); (void)close(n); } #endif } /* Send our version string */ if (pkg_send(MSG_VERSION, PROTOCOL_VERSION, strlen(PROTOCOL_VERSION)+1, pcsrv) < 0) { fprintf(stderr, "pkg_send MSG_VERSION error\n"); return 1; } if (debug) fprintf(stderr, "PROTOCOL_VERSION='%s'\n", PROTOCOL_VERSION); /* * Now that the fork() has been done, it is safe to initialize * the parallel processing support. */ avail_cpus = bu_avail_cpus(); /* Need to set rtg_parallel non_zero here for RES_INIT to work */ npsw = avail_cpus; if (npsw > 1) { RTG.rtg_parallel = 1; } else RTG.rtg_parallel = 0; bu_semaphore_init(RT_SEM_LAST); bu_log("using %d of %d cpus\n", npsw, avail_cpus); /* * Initialize the non-parallel memory resource. * The parallel guys are initialized after the rt_dirbuild(). */ rt_init_resource(&rt_uniresource, MAX_PSW, NULL); bn_rand_init(rt_uniresource.re_randptr, MAX_PSW); BU_LIST_INIT(&WorkHead); for (;;) { struct pkg_queue *lp; fd_set ifds; struct timeval tv; /* First, process any packages in library buffers */ if (pkg_process(pcsrv) < 0) { bu_log("pkg_get error\n"); break; } /* Second, see if any input to read */ FD_ZERO(&ifds); FD_SET(pcsrv->pkc_fd, &ifds); tv.tv_sec = BU_LIST_NON_EMPTY(&WorkHead) ? 0L : 9999L; tv.tv_usec = 0L; if (select(pcsrv->pkc_fd+1, &ifds, (fd_set *)0, (fd_set *)0, &tv) != 0) { n = pkg_suckin(pcsrv); if (n < 0) { bu_log("pkg_suckin error\n"); break; } else if (n == 0) { /* EOF detected */ break; } else { /* All is well */ } } /* Third, process any new packages in library buffers */ if (pkg_process(pcsrv) < 0) { bu_log("pkg_get error\n"); break; } /* Finally, more work may have just arrived, check our list */ if (BU_LIST_NON_EMPTY(&WorkHead)) { lp = BU_LIST_FIRST(pkg_queue, &WorkHead); BU_LIST_DEQUEUE(&lp->l); switch (lp->type) { case MSG_MATRIX: ph_matrix((struct pkg_conn *)0, lp->buf); break; case MSG_LINES: ph_lines((struct pkg_conn *)0, lp->buf); break; case MSG_OPTIONS: ph_options((struct pkg_conn *)0, lp->buf); break; case MSG_GETTREES: ph_gettrees((struct pkg_conn *)0, lp->buf); break; default: bu_log("bad list element, type=%d\n", lp->type); return 33; } BU_PUT(lp, struct pkg_queue); } } return 0; /* bu_exit(0, NULL) */ }
int ged_rmap(struct ged *gedp, int argc, const char *argv[]) { int i; struct directory *dp; struct rt_db_internal intern; struct rt_comb_internal *comb; struct _ged_id_to_names headIdName; struct _ged_id_to_names *itnp; struct _ged_id_names *inp; GED_CHECK_DATABASE_OPEN(gedp, GED_ERROR); GED_CHECK_ARGC_GT_0(gedp, argc, GED_ERROR); /* initialize result */ bu_vls_trunc(gedp->ged_result_str, 0); if (argc != 1) { bu_vls_printf(gedp->ged_result_str, "Usage: %s", argv[0]); return GED_ERROR; } if (db_version(gedp->ged_wdbp->dbip) < 5) { bu_vls_printf(gedp->ged_result_str, "%s is not available prior to version 5 of the .g file format\n", argv[0]); return GED_ERROR; } BU_LIST_INIT(&headIdName.l); /* For all regions not hidden */ for (i = 0; i < RT_DBNHASH; i++) { for (dp = gedp->ged_wdbp->dbip->dbi_Head[i]; dp != RT_DIR_NULL; dp = dp->d_forw) { int found = 0; if (!(dp->d_flags & RT_DIR_REGION) || (dp->d_flags & RT_DIR_HIDDEN)) continue; if (rt_db_get_internal(&intern, dp, gedp->ged_wdbp->dbip, (fastf_t *)NULL, &rt_uniresource) < 0) { bu_vls_printf(gedp->ged_result_str, "%s: Database read error, aborting", argv[0]); return GED_ERROR; } comb = (struct rt_comb_internal *)intern.idb_ptr; /* check to see if the region id or air code matches one in our list */ for (BU_LIST_FOR(itnp, _ged_id_to_names, &headIdName.l)) { if ((comb->region_id == itnp->id) || (comb->aircode != 0 && -comb->aircode == itnp->id)) { /* add region name to our name list for this region */ BU_GET(inp, struct _ged_id_names); bu_vls_init(&inp->name); bu_vls_strcpy(&inp->name, dp->d_namep); BU_LIST_INSERT(&itnp->headName.l, &inp->l); found = 1; break; } } if (!found) { /* create new id_to_names node */ BU_GET(itnp, struct _ged_id_to_names); if (0 < comb->region_id) itnp->id = comb->region_id; else itnp->id = -comb->aircode; BU_LIST_INSERT(&headIdName.l, &itnp->l); BU_LIST_INIT(&itnp->headName.l); /* add region name to our name list for this region */ BU_GET(inp, struct _ged_id_names); bu_vls_init(&inp->name); bu_vls_strcpy(&inp->name, dp->d_namep); BU_LIST_INSERT(&itnp->headName.l, &inp->l); } rt_db_free_internal(&intern); } } /* place data in the result string */ while (BU_LIST_WHILE(itnp, _ged_id_to_names, &headIdName.l)) { /* add this id to the list */ bu_vls_printf(gedp->ged_result_str, "%d {", itnp->id); /* start sublist of names associated with this id */ while (BU_LIST_WHILE(inp, _ged_id_names, &itnp->headName.l)) { /* add the this name to this sublist */ bu_vls_printf(gedp->ged_result_str, " %s", bu_vls_addr(&inp->name)); BU_LIST_DEQUEUE(&inp->l); bu_vls_free(&inp->name); BU_PUT(inp, struct _ged_id_names); } bu_vls_printf(gedp->ged_result_str, " } "); /* , itnp->id); */ BU_LIST_DEQUEUE(&itnp->l); BU_PUT(itnp, struct _ged_id_to_names); } return GED_OK; }
struct rt_nurb_uv_hit * rt_nurb_intersect(const struct face_g_snurb *srf, fastf_t *plane1, fastf_t *plane2, double uv_tol, struct resource *res, struct bu_list *plist) { struct rt_nurb_uv_hit * h; struct face_g_snurb * psrf, * osrf; int dir, sub; point_t vmin, vmax; fastf_t u[2], v[2]; struct bu_list rni_plist; NMG_CK_SNURB(srf); h = (struct rt_nurb_uv_hit *) 0; if (plist == NULL) { plist = &rni_plist; BU_LIST_INIT(plist); } /* project the surface to a 2 dimensional problem */ /* NOTE that this gives a single snurb back, NOT a list */ psrf = rt_nurb_project_srf(srf, plane2, plane1, res); psrf->dir = 1; BU_LIST_APPEND(plist, &psrf->l); if (RT_G_DEBUG & DEBUG_SPLINE) rt_nurb_s_print("srf", psrf); /* This list starts out with only a single snurb, but more may be * added on as work progresses. */ while (BU_LIST_WHILE(psrf, face_g_snurb, plist)) { int flat; BU_LIST_DEQUEUE(&psrf->l); NMG_CK_SNURB(psrf); sub = 0; flat = 0; dir = psrf->dir; while (!flat) { fastf_t smin = 0.0, smax = 0.0; sub++; dir = (dir == 0)?1:0; /* change direction */ if (RT_G_DEBUG & DEBUG_SPLINE) rt_nurb_s_print("psrf", psrf); rt_nurb_pbound(psrf, vmin, vmax); /* Check for origin to be included in the bounding box */ if (!(vmin[0] <= 0.0 && vmin[1] <= 0.0 && vmax[0] >= 0.0 && vmax[1] >= 0.0)) { if (RT_G_DEBUG & DEBUG_SPLINE) bu_log("this srf doesn't include the origin\n"); flat = 1; rt_nurb_free_snurb(psrf, res); continue; } rt_nurb_clip_srf(psrf, dir, &smin, &smax); if ((smax - smin) > .8) { struct rt_nurb_uv_hit *hp; /* Split surf, requeue both sub-surfs at head */ /* New surfs will have same dir as arg, here */ if (RT_G_DEBUG & DEBUG_SPLINE) bu_log("splitting this surface\n"); rt_nurb_s_split(plist, psrf, dir, res); rt_nurb_free_snurb(psrf, res); hp = rt_nurb_intersect(srf, plane1, plane2, uv_tol, res, plist); return hp; } if (smin > 1.0 || smax < 0.0) { if (RT_G_DEBUG & DEBUG_SPLINE) bu_log("eliminating this surface (smin=%g, smax=%g)\n", smin, smax); flat = 1; rt_nurb_free_snurb(psrf, res); continue; } if (dir == RT_NURB_SPLIT_ROW) { smin = (1.0 - smin) * psrf->u.knots[0] + smin * psrf->u.knots[ psrf->u.k_size -1]; smax = (1.0 - smax) * psrf->u.knots[0] + smax * psrf->u.knots[ psrf->u.k_size -1]; } else { smin = (1.0 - smin) * psrf->v.knots[0] + smin * psrf->v.knots[ psrf->v.k_size -1]; smax = (1.0 - smax) * psrf->v.knots[0] + smax * psrf->v.knots[ psrf->v.k_size -1]; } osrf = psrf; psrf = (struct face_g_snurb *) rt_nurb_region_from_srf( osrf, dir, smin, smax, res); psrf->dir = dir; rt_nurb_free_snurb(osrf, res); if (RT_G_DEBUG & DEBUG_SPLINE) { bu_log("After call to rt_nurb_region_from_srf() (smin=%g, smax=%g)\n", smin, smax); rt_nurb_s_print("psrf", psrf); } u[0] = psrf->u.knots[0]; u[1] = psrf->u.knots[psrf->u.k_size -1]; v[0] = psrf->v.knots[0]; v[1] = psrf->v.knots[psrf->v.k_size -1]; if ((u[1] - u[0]) < uv_tol && (v[1] - v[0]) < uv_tol) { struct rt_nurb_uv_hit * hit; if (RT_G_DEBUG & DEBUG_SPLINE) { fastf_t p1[4], p2[4]; int coords; vect_t diff; coords = RT_NURB_EXTRACT_COORDS(srf->pt_type); rt_nurb_s_eval(srf, u[0], v[0], p1); rt_nurb_s_eval(srf, u[1], v[1], p2); if (RT_NURB_IS_PT_RATIONAL(srf->pt_type)) { fastf_t inv_w; inv_w = 1.0 / p1[coords-1]; VSCALE(p1, p1, inv_w); inv_w = 1.0 / p2[coords-1]; VSCALE(p2, p2, inv_w); } VSUB2(diff, p1, p2); bu_log("Precision of hit point = %g (%f %f %f) <-> (%f %f %f)\n", MAGNITUDE(diff), V3ARGS(p1), V3ARGS(p2)); } hit = (struct rt_nurb_uv_hit *) bu_malloc( sizeof(struct rt_nurb_uv_hit), "hit"); hit->next = (struct rt_nurb_uv_hit *)0; hit->sub = sub; hit->u = (u[0] + u[1])/2.0; hit->v = (v[0] + v[1])/2.0; if (h == (struct rt_nurb_uv_hit *)0) h = hit; else { hit->next = h; h = hit; } flat = 1; rt_nurb_free_snurb(psrf, res); } if ((u[1] - u[0]) > (v[1] - v[0])) dir = 1; else dir = 0; } } return (struct rt_nurb_uv_hit *)h; }