/** * Intersect a ray with a xxx. If an intersection occurs, a struct * seg will be acquired and filled in. * * Returns - * 0 MISS * >0 HIT */ int rt_xxx_shot(struct soltab *stp, struct xray *rp, struct application *ap, struct seg *seghead) { struct xxx_specific *xxx; if (!stp) return -1; RT_CK_SOLTAB(stp); xxx = (struct xxx_specific *)stp->st_specific; if (!xxx) return -1; if (rp) RT_CK_RAY(rp); if (ap) RT_CK_APPLICATION(ap); if (!seghead) return -1; /* the EXAMPLE_NEW_SEGMENT block shows how one might add a new result * if the ray did hit the primitive. the segment values would need to * be adjusted accordingly to match real values instead of -1. */ #ifdef EXAMPLE_NEW_SEGMENT /* allocate a segment */ RT_GET_SEG(segp, ap->a_resource); segp->seg_stp = stp; /* stash a pointer to the primitive */ segp->seg_in.hit_dist = -1; /* XXX set to real distance to entry point */ segp->seg_out.hit_dist = -1; /* XXX set to real distance to exit point */ segp->seg_in.hit_surfno = -1; /* XXX set to a non-negative ID for entry surface */ segp->seg_out.hit_surfno = -1; /* XXX set to a non-negative ID for exit surface */ /* add segment to list of those encountered for this primitive */ BU_LIST_INSERT(&(seghead->l), &(segp->l)); return 2; /* num surface intersections == in + out == 2 */ #endif return 0; /* MISS */ }
int rt_obj_prep(struct soltab *stp, struct rt_db_internal *ip, struct rt_i *rtip) { int id; const struct rt_functab *ft; if (!stp || !ip) return -1; RT_CK_SOLTAB(stp); RT_CK_DB_INTERNAL(ip); if (rtip) RT_CK_RTI(rtip); id = stp->st_id; if (id < 0) return -2; ft = &OBJ[id]; if (!ft) return -3; if (!ft->ft_prep) return -4; return ft->ft_prep(stp, ip, rtip); }
int rt_obj_norm(struct hit *hitp, struct soltab *stp, struct xray *rp) { int id; const struct rt_functab *ft; if (!hitp || !stp) return -1; RT_CK_SOLTAB(stp); RT_CK_HIT(hitp); if (rp) RT_CK_RAY(rp); id = stp->st_id; if (id < 0) return -2; ft = &OBJ[id]; if (!ft) return -3; if (!ft->ft_norm) return -4; ft->ft_norm(hitp, stp, rp); return 0; }
/** * For a hit on the surface of an METABALL, return the (u, v) * coordinates of the hit point, 0 <= u, v <= 1. * * u = azimuth * v = elevation */ void rt_metaball_uv(struct application *ap, struct soltab *stp, struct hit *hitp, struct uvcoord *uvp) { struct rt_metaball_internal *metaball = (struct rt_metaball_internal *)stp->st_specific; vect_t work, pprime; fastf_t r; if (ap) RT_CK_APPLICATION(ap); if (stp) RT_CK_SOLTAB(stp); if (hitp) RT_CK_HIT(hitp); if (!uvp) return; if (!metaball) return; /* stuff stolen from sph */ VSUB2(work, hitp->hit_point, stp->st_center); VSCALE(pprime, work, 1.0/MAGNITUDE(work)); /* Assert that pprime has unit length */ /* U is azimuth, atan() range: -pi to +pi */ uvp->uv_u = bn_atan2(pprime[Y], pprime[X]) * M_1_2PI; if (uvp->uv_u < 0) uvp->uv_u += 1.0; /* * V is elevation, atan() range: -pi/2 to +pi/2, because sqrt() * ensures that X parameter is always >0 */ uvp->uv_v = bn_atan2(pprime[Z], sqrt(pprime[X] * pprime[X] + pprime[Y] * pprime[Y])) * M_1_2PI; /* approximation: r / (circumference, 2 * pi * aradius) */ r = ap->a_rbeam + ap->a_diverge * hitp->hit_dist; uvp->uv_du = uvp->uv_dv = M_1_2PI * r / stp->st_aradius; return; }
int rt_obj_uv(struct application *ap, struct soltab *stp, struct hit *hitp, struct uvcoord *uvp) { int id; const struct rt_functab *ft; if (!stp || !hitp || !uvp) return -1; RT_CK_SOLTAB(stp); RT_CK_HIT(hitp); if (ap) RT_CK_APPLICATION(ap); id = stp->st_id; if (id < 0) return -2; ft = &rt_functab[id]; if (!ft) return -3; if (!ft->ft_uv) return -4; ft->ft_uv(ap, stp, hitp, uvp); return 0; }
int rt_obj_shot(struct soltab *stp, struct xray *rp, struct application *ap, struct seg *seghead) { int id; const struct rt_functab *ft; if (!stp || !rp) return -1; RT_CK_SOLTAB(stp); RT_CK_RAY(rp); if (ap) RT_CK_APPLICATION(ap); id = stp->st_id; if (id < 0) return -2; ft = &OBJ[id]; if (!ft) return -3; if (!ft->ft_shot) return -4; return ft->ft_shot(stp, rp, ap, seghead); }
/** * JRA's tree pretty-printer. Formats the tree compactly into a * dynamically allocated string. Uses recursion and lots of * malloc/free activity. */ char * rt_pr_tree_str(const union tree *tree) { char *left, *right; char *return_str; char op = OP_GUARD; size_t return_length; if (tree == NULL) return bu_strdup("NULL_ptr"); RT_CK_TREE(tree); if (tree->tr_op == OP_UNION || tree->tr_op == OP_SUBTRACT || tree->tr_op == OP_INTERSECT) { char *blankl, *blankr; left = rt_pr_tree_str(tree->tr_b.tb_left); right = rt_pr_tree_str(tree->tr_b.tb_right); switch (tree->tr_op) { case OP_UNION: op = 'u'; break; case OP_SUBTRACT: op = '-'; break; case OP_INTERSECT: op = '+'; break; } return_length = strlen(left) + strlen(right) + 8; return_str = (char *)bu_malloc(return_length, "rt_pr_tree_str: return string"); blankl = strchr(left, ' '); blankr = strchr(right, ' '); if (blankl && blankr) snprintf(return_str, return_length, "(%s) %c (%s)", left, op, right); else if (blankl && !blankr) snprintf(return_str, return_length, "(%s) %c %s", left, op, right); else if (!blankl && blankr) snprintf(return_str, return_length, "%s %c (%s)", left, op, right); else snprintf(return_str, return_length, "%s %c %s", left, op, right); if (tree->tr_b.tb_left->tr_op != OP_DB_LEAF) bu_free((void *)left, "rt_pr_tree_str: left string"); if (tree->tr_b.tb_right->tr_op != OP_DB_LEAF) bu_free((void *)right, "rt_pr_tree_str: right string"); return return_str; } else if (tree->tr_op == OP_DB_LEAF) return bu_strdup(tree->tr_l.tl_name); else if (tree->tr_op == OP_REGION) return db_path_to_string(&tree->tr_c.tc_ctsp->cts_p); else if (tree->tr_op == OP_SOLID) { RT_CK_SOLTAB(tree->tr_a.tu_stp); return bu_strdup(tree->tr_a.tu_stp->st_dp->d_namep); } return bu_strdup("Unknown:tr_op"); }
void rt_pr_pt_vls(struct bu_vls *v, const struct rt_i *rtip, register const struct partition *pp) { register const struct soltab *stp; register struct seg **segpp; RT_CHECK_RTI(rtip); RT_CHECK_PT(pp); BU_CK_VLS(v); bu_log_indent_vls(v); bu_vls_printf(v, "%p: PT ", (void *)pp); stp = pp->pt_inseg->seg_stp; bu_vls_printf(v, "%s (%s#%ld) ", stp->st_dp->d_namep, OBJ[stp->st_id].ft_name+3, stp->st_bit); stp = pp->pt_outseg->seg_stp; bu_vls_printf(v, "%s (%s#%ld) ", stp->st_dp->d_namep, OBJ[stp->st_id].ft_name+3, stp->st_bit); bu_vls_printf(v, "(%g, %g)", pp->pt_inhit->hit_dist, pp->pt_outhit->hit_dist); if (pp->pt_inflip) bu_vls_strcat(v, " Iflip"); if (pp->pt_outflip) bu_vls_strcat(v, " Oflip"); bu_vls_strcat(v, "\n"); rt_pr_hit_vls(v, " In", pp->pt_inhit); rt_pr_hit_vls(v, " Out", pp->pt_outhit); bu_log_indent_vls(v); bu_vls_strcat(v, " Primitives: "); for (BU_PTBL_FOR(segpp, (struct seg **), &pp->pt_seglist)) { stp = (*segpp)->seg_stp; RT_CK_SOLTAB(stp); bu_vls_strcat(v, stp->st_dp->d_namep); bu_vls_strcat(v, ", "); } bu_vls_strcat(v, "\n"); bu_log_indent_vls(v); bu_vls_strcat(v, " Untrimmed Segments spanning this interval:\n"); bu_log_indent_delta(4); for (BU_PTBL_FOR(segpp, (struct seg **), &pp->pt_seglist)) { RT_CK_SEG(*segpp); rt_pr_seg_vls(v, *segpp); } bu_log_indent_delta(-4); if (pp->pt_regionp) { RT_CK_REGION(pp->pt_regionp); bu_log_indent_vls(v); bu_vls_printf(v, " Region: %s\n", pp->pt_regionp->reg_name); } }
void rt_xxx_print(const struct soltab *stp) { const struct xxx_specific *xxx; if (!stp) return; xxx = (struct xxx_specific *)stp->st_specific; if (!xxx) return; RT_CK_SOLTAB(stp); }
int rt_metaball_class(const struct soltab *stp, const fastf_t *min, const fastf_t *max, const struct bn_tol *tol) { if (stp) RT_CK_SOLTAB(stp); if (tol) BN_CK_TOL(tol); if (!min) return 0; if (!max) return 0; return 0; }
/** * R T _ P G _ N O R M */ void rt_pg_norm(struct hit *hitp, struct soltab *stp, struct xray *rp) { if (!hitp || !stp || !rp) return; RT_CK_HIT(hitp); RT_CK_SOLTAB(stp); RT_CK_RAY(rp); /* Normals computed in rt_pg_shot, nothing to do here */ }
/** * R T _ P G _ C U R V E */ void rt_pg_curve(struct curvature *cvp, struct hit *hitp, struct soltab *stp) { if (!cvp || !hitp) return; RT_CK_HIT(hitp); if (stp) RT_CK_SOLTAB(stp); bn_vec_ortho(cvp->crv_pdir, hitp->hit_normal); cvp->crv_c1 = cvp->crv_c2 = 0; }
/** * Return the curvature of the superellipsoid. */ void rt_superell_curve(struct curvature *cvp, struct hit *hitp, struct soltab *stp) { if (!cvp || !hitp || !stp) return; RT_CK_HIT(hitp); RT_CK_SOLTAB(stp); bu_log("called rt_superell_curve()\n"); return; }
/** * Given ONE ray distance, return the normal and entry/exit point. */ void rt_xxx_norm(struct hit *hitp, struct soltab *stp, struct xray *rp) { struct xxx_specific *xxx; if (!stp) return; xxx = (struct xxx_specific *)stp->st_specific; if (!xxx) return; RT_CK_SOLTAB(stp); VJOIN1(hitp->hit_point, rp->r_pt, hitp->hit_dist, rp->r_dir); }
/** * For a hit on the surface of an SUPERELL, return the (u, v) coordinates * of the hit point, 0 <= u, v <= 1. * u = azimuth * v = elevation */ void rt_superell_uv(struct application *ap, struct soltab *stp, struct hit *hitp, struct uvcoord *uvp) { if (ap) RT_CK_APPLICATION(ap); if (!stp || !hitp || !uvp) return; RT_CK_SOLTAB(stp); RT_CK_HIT(hitp); bu_log("called rt_superell_uv()\n"); return; }
void rt_xxx_free(struct soltab *stp) { struct xxx_specific *xxx; if (!stp) return; RT_CK_SOLTAB(stp); xxx = (struct xxx_specific *)stp->st_specific; if (!xxx) return; bu_free((char *)xxx, "xxx_specific"); }
/** * For a hit on the surface of an xxx, return the (u, v) coordinates * of the hit point, 0 <= u, v <= 1. * u = azimuth, v = elevation */ void rt_xxx_uv(struct application *ap, struct soltab *stp, struct hit *hitp, struct uvcoord *uvp) { struct xxx_specific *xxx; if (ap) RT_CK_APPLICATION(ap); if (!stp || !uvp) return; RT_CK_SOLTAB(stp); if (hitp) RT_CK_HIT(hitp); xxx = (struct xxx_specific *)stp->st_specific; if (!xxx) return; }
/** * R T _ P G _ U V */ void rt_pg_uv(struct application *ap, struct soltab *stp, struct hit *hitp, struct uvcoord *uvp) { if (ap) RT_CK_APPLICATION(ap); if (stp) RT_CK_SOLTAB(stp); if (hitp) RT_CK_HIT(hitp); if (!uvp) return; /* Do nothing. Really, should do what ARB does. */ uvp->uv_u = uvp->uv_v = 0; uvp->uv_du = uvp->uv_dv = 0; }
/** * Return the curvature of the revolve. */ void rt_revolve_curve(struct curvature *cvp, struct hit *hitp, struct soltab *stp) { if (!cvp || !hitp) return; RT_CK_HIT(hitp); if (stp) RT_CK_SOLTAB(stp); cvp->crv_c1 = cvp->crv_c2 = 0; /* any tangent direction */ bn_vec_ortho(cvp->crv_pdir, hitp->hit_normal); }
/** * Given a pointer to a GED database record, and a transformation * matrix, determine if this is a valid XXX, and if so, precompute * various terms of the formula. * * Returns - * 0 XXX is OK * !0 Error in description * * Implicit return - * A struct xxx_specific is created, and its address is stored in * stp->st_specific for use by xxx_shot(). */ int rt_xxx_prep(struct soltab *stp, struct rt_db_internal *ip, struct rt_i *rtip) { struct rt_xxx_internal *xxx_ip; if (stp) RT_CK_SOLTAB(stp); RT_CK_DB_INTERNAL(ip); if (rtip) RT_CK_RTI(rtip); xxx_ip = (struct rt_xxx_internal *)ip->idb_ptr; RT_XXX_CK_MAGIC(xxx_ip); return 0; }
/** * Return the curvature of the xxx. */ void rt_xxx_curve(struct curvature *cvp, struct hit *hitp, struct soltab *stp) { struct xxx_specific *xxx; if (!stp) return; xxx = (struct xxx_specific *)stp->st_specific; if (!xxx) return; RT_CK_SOLTAB(stp); cvp->crv_c1 = cvp->crv_c2 = 0; /* any tangent direction */ bn_vec_ortho(cvp->crv_pdir, hitp->hit_normal); }
/** * R T _ F R E E _ S O L T A B * * Decrement use count on soltab structure. If no longer needed, * release associated storage, and free the structure. * * This routine semaphore protects against other copies of itself * running in parallel, and against other routines (such as * rt_find_identical_solid()) which might also be modifying the linked * list heads. * * Called by - * db_free_tree() * rt_clean() * rt_gettrees() * rt_kill_deal_solid_refs() */ void rt_free_soltab(struct soltab *stp) { int hash; RT_CK_SOLTAB(stp); if ( stp->st_id < 0 ) bu_bomb("rt_free_soltab: bad st_id"); hash = db_dirhash(stp->st_dp->d_namep); ACQUIRE_SEMAPHORE_TREE(hash); /* start critical section */ if ( --(stp->st_uses) > 0 ) { RELEASE_SEMAPHORE_TREE(hash); return; } BU_LIST_DEQUEUE( &(stp->l2) ); /* remove from st_dp->d_use_hd list */ BU_LIST_DEQUEUE( &(stp->l) ); /* uses rti_solidheads[] */ RELEASE_SEMAPHORE_TREE(hash); /* end critical section */ if ( stp->st_aradius > 0 ) { stp->st_meth->ft_free( stp ); stp->st_aradius = 0; } if ( stp->st_matp ) bu_free( (char *)stp->st_matp, "st_matp"); stp->st_matp = (matp_t)0; /* Sanity */ bu_ptbl_free(&stp->st_regions); stp->st_dp = DIR_NULL; /* Sanity */ if ( stp->st_path.magic ) { RT_CK_FULL_PATH( &stp->st_path ); db_free_full_path( &stp->st_path ); } bu_free( (char *)stp, "struct soltab" ); }
/* * This routine is called (at prep time) * once for each region which uses this shader. * Any shader-specific initialization should be done here. * * Returns: * 1 success * 0 success, but delete region * -1 failure */ HIDDEN int bbd_setup(struct region *rp, struct bu_vls *matparm, void **dpp, const struct mfuncs *mfp, struct rt_i *rtip) { register struct bbd_specific *bbd_sp; struct rt_db_internal intern; struct rt_tgc_internal *tgc; int s; mat_t mat; struct bbd_img *bi; double angle; vect_t vtmp; int img_num; vect_t vv; /* check the arguments */ RT_CHECK_RTI(rtip); BU_CK_VLS(matparm); RT_CK_REGION(rp); if (rdebug&RDEBUG_SHADE) bu_log("bbd_setup(%s)\n", rp->reg_name); RT_CK_TREE(rp->reg_treetop); if (rp->reg_treetop->tr_a.tu_op != OP_SOLID) { bu_log("--- Warning: Region %s shader %s", rp->reg_name, mfp->mf_name); bu_bomb("Shader should be used on region of single (rec/rcc) primitive\n"); } RT_CK_SOLTAB(rp->reg_treetop->tr_a.tu_stp); if (rp->reg_treetop->tr_a.tu_stp->st_id != ID_REC) { bu_log("--- Warning: Region %s shader %s", rp->reg_name, mfp->mf_name); bu_log("Shader should be used on region of single REC/RCC primitive %d\n", rp->reg_treetop->tr_a.tu_stp->st_id); bu_bomb("oops\n"); } /* Get memory for the shader parameters and shader-specific data */ BU_GET(bbd_sp, struct bbd_specific); *dpp = bbd_sp; /* initialize the default values for the shader */ memcpy(bbd_sp, &bbd_defaults, sizeof(struct bbd_specific)); bu_vls_init(&bbd_sp->img_filename); BU_LIST_INIT(&bbd_sp->imgs); bbd_sp->rtip = rtip; /* because new_image() needs this */ bbd_sp->img_count = 0; /* parse the user's arguments for this use of the shader. */ if (bu_struct_parse(matparm, bbd_parse_tab, (char *)bbd_sp, NULL) < 0) return -1; if (bbd_sp->img_count > MAX_IMAGES) { bu_log("too many images (%zu) in shader for %s sb < %d\n", bbd_sp->img_count, rp->reg_name, MAX_IMAGES); bu_bomb("excessive image count\n"); } MAT_IDN(mat); RT_DB_INTERNAL_INIT(&intern); s = rt_db_get_internal(&intern, rp->reg_treetop->tr_a.tu_stp->st_dp, rtip->rti_dbip, mat, &rt_uniresource); if (intern.idb_minor_type != ID_TGC && intern.idb_minor_type != ID_REC) { bu_log("What did I get? %d\n", intern.idb_minor_type); } if (s < 0) { bu_log("%s:%d didn't get internal", __FILE__, __LINE__); bu_bomb(""); } tgc = (struct rt_tgc_internal *)intern.idb_ptr; RT_TGC_CK_MAGIC(tgc); angle = M_PI / (double)bbd_sp->img_count; img_num = 0; VMOVE(vv, tgc->h); VUNITIZE(vv); for (BU_LIST_FOR(bi, bbd_img, &bbd_sp->imgs)) { static const point_t o = VINIT_ZERO; bn_mat_arb_rot(mat, o, vv, angle*img_num); /* compute plane equation */ MAT4X3VEC(bi->img_plane, mat, tgc->a); VUNITIZE(bi->img_plane); bi->img_plane[H] = VDOT(tgc->v, bi->img_plane); MAT4X3VEC(vtmp, mat, tgc->b); VADD2(bi->img_origin, tgc->v, vtmp); /* image origin in 3d space */ /* calculate image u vector */ VREVERSE(bi->img_x, vtmp); VUNITIZE(bi->img_x); bi->img_xlen = MAGNITUDE(vtmp) * 2; /* calculate image v vector */ VMOVE(bi->img_y, tgc->h); VUNITIZE(bi->img_y); bi->img_ylen = MAGNITUDE(tgc->h); if (rdebug&RDEBUG_SHADE) { HPRINT("\nimg_plane", bi->img_plane); VPRINT("vtmp", vtmp); VPRINT("img_origin", bi->img_origin); bu_log("img_xlen:%g ", bi->img_xlen); VPRINT("img_x", bi->img_x); bu_log("img_ylen:%g ", bi->img_ylen); VPRINT("img_y", bi->img_y); } img_num++; } rt_db_free_internal(&intern); if (rdebug&RDEBUG_SHADE) { bu_struct_print(" Parameters:", bbd_print_tab, (char *)bbd_sp); } return 1; }
/** * R T _ G E T T R E E S _ M U V E S * * User-called function to add a set of tree hierarchies to the active * set. Includes getting the indicated list of attributes and a * Tcl_HashTable for use with the ORCA man regions. (stashed in the * rt_i structure). * * This function may run in parallel, but is not multiply re-entrant * itself, because db_walk_tree() isn't multiply re-entrant. * * Semaphores used for critical sections in parallel mode: * RT_SEM_TREE* protects rti_solidheads[] lists, d_uses(solids) * RT_SEM_RESULTS protects HeadRegion, mdl_min/max, d_uses(reg), nregions * RT_SEM_WORKER (db_walk_dispatcher, from db_walk_tree) * RT_SEM_STATS nsolids * * INPUTS * rtip - RT instance pointer * attrs - array of pointers (NULL terminated) to strings (attribute names). A corresponding * array of "bu_mro" objects containing the attribute values will be attached to region * structures ("attr_values") * argc - number of trees to get * argv - array of char pointers to the names of the tree tops * ncpus - number of cpus to use * * Returns - * 0 Ordinarily * -1 On major error */ int rt_gettrees_muves(struct rt_i *rtip, const char **attrs, int argc, const char **argv, int ncpus) { register struct soltab *stp; register struct region *regp; Tcl_HashTable *tbl; int prev_sol_count; int i; int num_attrs=0; point_t region_min, region_max; RT_CHECK_RTI(rtip); RT_CK_DBI(rtip->rti_dbip); if (!rtip->needprep) { bu_log("ERROR: rt_gettree() called again after rt_prep!\n"); return(-1); /* FAIL */ } if ( argc <= 0 ) return(-1); /* FAIL */ tbl = (Tcl_HashTable *)bu_malloc( sizeof( Tcl_HashTable ), "rtip->Orca_hash_tbl" ); Tcl_InitHashTable( tbl, TCL_ONE_WORD_KEYS ); rtip->Orca_hash_tbl = (genptr_t)tbl; prev_sol_count = rtip->nsolids; { struct db_tree_state tree_state; tree_state = rt_initial_tree_state; /* struct copy */ tree_state.ts_dbip = rtip->rti_dbip; tree_state.ts_rtip = rtip; tree_state.ts_resp = NULL; /* sanity. Needs to be updated */ if ( attrs ) { if ( rtip->rti_dbip->dbi_version < 5 ) { bu_log( "WARNING: requesting attributes from an old database version (ignored)\n" ); bu_avs_init_empty( &tree_state.ts_attrs ); } else { while ( attrs[num_attrs] ) { num_attrs++; } if ( num_attrs ) { bu_avs_init( &tree_state.ts_attrs, num_attrs, "avs in tree_state" ); num_attrs = 0; while ( attrs[num_attrs] ) { bu_avs_add( &tree_state.ts_attrs, attrs[num_attrs], NULL ); num_attrs++; } } else { bu_avs_init_empty( &tree_state.ts_attrs ); } } } else { bu_avs_init_empty( &tree_state.ts_attrs ); } /* ifdef this out for now, it is only using memory. perhaps a * better way of initiating ORCA stuff can be found. */ #if 0 bu_avs_add( &tree_state.ts_attrs, "ORCA_Comp", (char *)NULL ); #endif i = db_walk_tree( rtip->rti_dbip, argc, argv, ncpus, &tree_state, rt_gettree_region_start, rt_gettree_region_end, rt_gettree_leaf, (genptr_t)tbl ); bu_avs_free( &tree_state.ts_attrs ); } /* DEBUG: Ensure that all region trees are valid */ for ( BU_LIST_FOR( regp, region, &(rtip->HeadRegion) ) ) { RT_CK_REGION(regp); db_ck_tree(regp->reg_treetop); } /* * Eliminate any "dead" solids that parallel code couldn't change. * First remove any references from the region tree, then remove * actual soltab structs from the soltab list. */ for ( BU_LIST_FOR( regp, region, &(rtip->HeadRegion) ) ) { RT_CK_REGION(regp); rt_tree_kill_dead_solid_refs( regp->reg_treetop ); (void)rt_tree_elim_nops( regp->reg_treetop, &rt_uniresource ); } again: RT_VISIT_ALL_SOLTABS_START( stp, rtip ) { RT_CK_SOLTAB(stp); if ( stp->st_aradius <= 0 ) { bu_log("rt_gettrees() cleaning up dead solid '%s'\n", stp->st_dp->d_namep ); rt_free_soltab(stp); /* Can't do rtip->nsolids--, that doubles as max bit number! */ /* The macro makes it hard to regain place, punt */ goto again; } } RT_VISIT_ALL_SOLTABS_END
/** * R E C _ P R E P * * Given a pointer to a GED database record, and a transformation matrix, * determine if this is a valid REC, * and if so, precompute various terms of the formulas. * * Returns - * 0 REC is OK * !0 Error in description * * Implicit return - A struct rec_specific is created, and its * address is stored in stp->st_specific for use by rt_rec_shot(). If * the TGC is really an REC, stp->st_id is modified to ID_REC. */ int rt_rec_prep(struct soltab *stp, struct rt_db_internal *ip, struct rt_i *rtip) { struct rt_tgc_internal *tip; struct rec_specific *rec; double magsq_h, magsq_a, magsq_b; double mag_h, mag_a, mag_b; mat_t R; mat_t Rinv; mat_t S; vect_t invsq; /* [ 1/(|A|**2), 1/(|B|**2), 1/(|Hv|**2) ] */ vect_t work; fastf_t f; if (!stp || !ip) return -1; RT_CK_SOLTAB(stp); RT_CK_DB_INTERNAL(ip); if (rtip) RT_CK_RTI(rtip); tip = (struct rt_tgc_internal *)ip->idb_ptr; RT_TGC_CK_MAGIC(tip); /* Validate that |H| > 0, compute |A| |B| |C| |D| */ mag_h = sqrt(magsq_h = MAGSQ(tip->h)); mag_a = sqrt(magsq_a = MAGSQ(tip->a)); mag_b = sqrt(magsq_b = MAGSQ(tip->b)); /* Check for |H| > 0, |A| > 0, |B| > 0 */ if (NEAR_ZERO(mag_h, RT_LEN_TOL) || NEAR_ZERO(mag_a, RT_LEN_TOL) || NEAR_ZERO(mag_b, RT_LEN_TOL)) { return 1; /* BAD, too small */ } /* Make sure that A == C, B == D */ VSUB2(work, tip->a, tip->c); f = MAGNITUDE(work); if (! NEAR_ZERO(f, RT_LEN_TOL)) { return 1; /* BAD, !cylinder */ } VSUB2(work, tip->b, tip->d); f = MAGNITUDE(work); if (! NEAR_ZERO(f, RT_LEN_TOL)) { return 1; /* BAD, !cylinder */ } /* Check for A.B == 0, H.A == 0 and H.B == 0 */ f = VDOT(tip->a, tip->b) / (mag_a * mag_b); if (! NEAR_ZERO(f, RT_DOT_TOL)) { return 1; /* BAD */ } f = VDOT(tip->h, tip->a) / (mag_h * mag_a); if (! NEAR_ZERO(f, RT_DOT_TOL)) { return 1; /* BAD */ } f = VDOT(tip->h, tip->b) / (mag_h * mag_b); if (! NEAR_ZERO(f, RT_DOT_TOL)) { return 1; /* BAD */ } /* * This TGC is really an REC */ stp->st_id = ID_REC; /* "fix" soltab ID */ stp->st_meth = &rt_functab[ID_REC]; BU_GET(rec, struct rec_specific); stp->st_specific = (genptr_t)rec; VMOVE(rec->rec_Hunit, tip->h); VUNITIZE(rec->rec_Hunit); VMOVE(rec->rec_V, tip->v); VMOVE(rec->rec_A, tip->a); VMOVE(rec->rec_B, tip->b); rec->rec_iAsq = 1.0/magsq_a; rec->rec_iBsq = 1.0/magsq_b; VSET(invsq, 1.0/magsq_a, 1.0/magsq_b, 1.0/magsq_h); /* Compute R and Rinv matrices */ MAT_IDN(R); f = 1.0/mag_a; VSCALE(&R[0], tip->a, f); f = 1.0/mag_b; VSCALE(&R[4], tip->b, f); f = 1.0/mag_h; VSCALE(&R[8], tip->h, f); bn_mat_trn(Rinv, R); /* inv of rot mat is trn */ /* Compute S */ MAT_IDN(S); S[ 0] = sqrt(invsq[0]); S[ 5] = sqrt(invsq[1]); S[10] = sqrt(invsq[2]); /* Compute SoR and invRoS */ bn_mat_mul(rec->rec_SoR, S, R); bn_mat_mul(rec->rec_invRoS, Rinv, S); /* Compute bounding sphere and RPP */ { fastf_t dx, dy, dz; /* For bounding sphere */ if (stp->st_meth->ft_bbox(ip, &(stp->st_min), &(stp->st_max), &(rtip->rti_tol))) return 1; VSET(stp->st_center, (stp->st_max[X] + stp->st_min[X])/2, (stp->st_max[Y] + stp->st_min[Y])/2, (stp->st_max[Z] + stp->st_min[Z])/2); dx = (stp->st_max[X] - stp->st_min[X])/2; f = dx; dy = (stp->st_max[Y] - stp->st_min[Y])/2; if (dy > f) f = dy; dz = (stp->st_max[Z] - stp->st_min[Z])/2; if (dz > f) f = dz; stp->st_aradius = f; stp->st_bradius = sqrt(dx*dx + dy*dy + dz*dz); } return 0; /* OK */ }
/* * This is called (from viewshade() in shade.c) once for each hit point * to be shaded. The purpose here is to fill in values in the shadework * structure. */ int gauss_render(struct application *ap, const struct partition *pp, struct shadework *swp, void *dp) /* defined in material.h */ /* ptr to the shader-specific struct */ { register struct gauss_specific *gauss_sp = (struct gauss_specific *)dp; struct seg *seg_p; struct reg_db_internals *dbint_p; double optical_density = 0.0; /* check the validity of the arguments we got */ RT_AP_CHECK(ap); RT_CHECK_PT(pp); CK_gauss_SP(gauss_sp); if (rdebug&RDEBUG_SHADE) { bu_struct_print("gauss_render Parameters:", gauss_print_tab, (char *)gauss_sp); bu_log("r_pt(%g %g %g) r_dir(%g %g %g)\n", V3ARGS(ap->a_ray.r_pt), V3ARGS(ap->a_ray.r_dir)); } BU_CK_LIST_HEAD(&swp->sw_segs->l); BU_CK_LIST_HEAD(&gauss_sp->dbil); /* look at each segment that participated in the ray partition(s) */ for (BU_LIST_FOR(seg_p, seg, &swp->sw_segs->l)) { if (rdebug&RDEBUG_SHADE) { bu_log("seg %g -> %g\n", seg_p->seg_in.hit_dist, seg_p->seg_out.hit_dist); } RT_CK_SEG(seg_p); RT_CK_SOLTAB(seg_p->seg_stp); /* check to see if the seg/solid is in this partition */ if (bu_ptbl_locate(&pp->pt_seglist, (long *)seg_p) != -1) { /* XXX You might use a bu_ptbl list of the solid pointers... */ /* check to see if the solid is from this region */ for (BU_LIST_FOR(dbint_p, reg_db_internals, &gauss_sp->dbil)) { CK_DBINT(dbint_p); if (dbint_p->st_p == seg_p->seg_stp) { /* The solid from the region is * the solid from the segment * from the partition */ optical_density += eval_seg(ap, dbint_p, seg_p); break; } } } else { if (rdebug&RDEBUG_SHADE) bu_log("gauss_render() bittest failed\n"); } }
/** * R T _ F I N D _ I D E N T I C A L _ S O L I D * * See if solid "dp" as transformed by "mat" already exists in the * soltab list. If it does, return the matching stp, otherwise, * create a new soltab structure, enrole it in the list, and return a * pointer to that. * * "mat" will be a null pointer when an identity matrix is signified. * This greatly speeds the comparison process. * * The two cases can be distinguished by the fact that stp->st_id will * be 0 for a new soltab structure, and non-zero for an existing one. * * This routine will run in parallel. * * In order to avoid a race between searching the soltab list and * adding new solids to it, the new solid to be added *must* be * enrolled in the list before exiting the critical section. * * To limit the size of the list to be searched, there are many lists. * The selection of which list is determined by the hash value * computed from the solid's name. This is the same optimization used * in searching the directory lists. * * This subroutine is the critical bottleneck in parallel tree walking. * * It is safe, and much faster, to use several different critical * sections when searching different lists. * * There are only 4 dedicated semaphores defined, TREE0 through TREE3. * This unfortunately limits the code to having only 4 CPUs doing list * searching at any one time. Hopefully, this is enough parallelism * to keep the rest of the CPUs doing I/O and actual solid prepping. * * Since the algorithm has been reduced from an O((nsolid/128)**2) * search on the entire rti_solidheads[hash] list to an O(ninstance) * search on the dp->d_use_head list for this one solid, the critical * section should be relatively short-lived. Having the 3-way split * should provide ample opportunity for parallelism through here, * while still ensuring that the necessary variables are protected. * * There are two critical variables which *both* need to be protected: * the specific rti_solidhead[hash] list head, and the specific * dp->d_use_hd list head. Fortunately, since the selection of * critical section is based upon db_dirhash(dp->d_namep), any other * processor that wants to search this same 'dp' will get the same * hash as the current thread, and will thus wait for the appropriate * semaphore to be released. Similarly, any other thread that wants * to search the same rti_solidhead[hash] list as the current thread * will be using the same hash, and will thus wait for the proper * semaphore. */ HIDDEN struct soltab *rt_find_identical_solid(register const matp_t mat, register struct directory *dp, struct rt_i *rtip) { register struct soltab *stp = RT_SOLTAB_NULL; int hash; RT_CK_DIR(dp); RT_CK_RTI(rtip); hash = db_dirhash( dp->d_namep ); /* Enter the appropriate dual critical-section */ ACQUIRE_SEMAPHORE_TREE(hash); /* * If solid has not been referenced yet, the search can be * skipped. If solid is being referenced a _lot_, it certainly * isn't all going to be in the same place, so don't bother * searching. Consider the case of a million instances of the * same tree submodel solid. */ if ( dp->d_uses > 0 && dp->d_uses < 100 && rtip->rti_dont_instance == 0 ) { struct bu_list *mid; /* Search dp->d_use_hd list for other instances */ for ( BU_LIST_FOR( mid, bu_list, &dp->d_use_hd ) ) { stp = BU_LIST_MAIN_PTR( soltab, mid, l2 ); RT_CK_SOLTAB(stp); if ( stp->st_matp == (matp_t)0 ) { if ( mat == (matp_t)0 ) { /* Both have identity matrix */ goto more_checks; } continue; } if ( mat == (matp_t)0 ) continue; /* doesn't match */ if ( !bn_mat_is_equal(mat, stp->st_matp, &rtip->rti_tol)) continue; more_checks: /* Don't instance this solid from some other model * instance. As this is nearly always equal, check it * last */ if ( stp->st_rtip != rtip ) continue; /* * stp now points to re-referenced solid. stp->st_id is * non-zero, indicating pre-existing solid. */ RT_CK_SOLTAB(stp); /* sanity */ /* Only increment use counter for non-dead solids. */ if ( !(stp->st_aradius <= -1) ) stp->st_uses++; /* dp->d_uses is NOT incremented, because number of * soltab's using it has not gone up. */ if ( RT_G_DEBUG & DEBUG_SOLIDS ) { bu_log( mat ? "rt_find_identical_solid: %s re-referenced %d\n" : "rt_find_identical_solid: %s re-referenced %d (identity mat)\n", dp->d_namep, stp->st_uses ); } /* Leave the appropriate dual critical-section */ RELEASE_SEMAPHORE_TREE(hash); return stp; } } /* * Create and link a new solid into the list. * * Ensure the search keys "dp", "st_mat" and "st_rtip" are stored * now, while still inside the critical section, because they are * searched on, above. */ BU_GETSTRUCT(stp, soltab); stp->l.magic = RT_SOLTAB_MAGIC; stp->l2.magic = RT_SOLTAB2_MAGIC; stp->st_rtip = rtip; stp->st_dp = dp; dp->d_uses++; stp->st_uses = 1; /* stp->st_id is intentionally left zero here, as a flag */ if ( mat ) { stp->st_matp = (matp_t)bu_malloc( sizeof(mat_t), "st_matp" ); MAT_COPY( stp->st_matp, mat ); } else { stp->st_matp = (matp_t)0; } /* Add to the appropriate soltab list head */ /* PARALLEL NOTE: Uses critical section on rt_solidheads element */ BU_LIST_INSERT( &(rtip->rti_solidheads[hash]), &(stp->l) ); /* Also add to the directory structure list head */ /* PARALLEL NOTE: Uses critical section on this 'dp' */ BU_LIST_INSERT( &dp->d_use_hd, &(stp->l2) ); /* * Leave the 4-way critical-section protecting dp and [hash] */ RELEASE_SEMAPHORE_TREE(hash); /* Enter an exclusive critical section to protect nsolids. * nsolids++ needs to be locked to a SINGLE thread */ bu_semaphore_acquire(RT_SEM_STATS); stp->st_bit = rtip->nsolids++; bu_semaphore_release(RT_SEM_STATS); /* * Fill in the last little bit of the structure in full parallel * mode, outside of any critical section. */ /* Init tables of regions using this solid. Usually small. */ bu_ptbl_init( &stp->st_regions, 7, "st_regions ptbl" ); return stp; }