int ged_put_comb(struct ged *gedp, int argc, const char *argv[]) { struct directory *dp; struct rt_db_internal intern; struct rt_comb_internal *comb; char new_name_v4[NAMESIZE+1]; char *new_name; int offset; int save_comb_flag = 0; static const char *usage = "comb_name is_Region id air material los color shader inherit boolean_expr"; static const char *noregionusage = "comb_name n color shader inherit boolean_expr"; static const char *regionusage = "comb_name y id air material los color shader inherit boolean_expr"; const char *saved_name = NULL; GED_CHECK_DATABASE_OPEN(gedp, GED_ERROR); GED_CHECK_READ_ONLY(gedp, GED_ERROR); GED_CHECK_ARGC_GT_0(gedp, argc, GED_ERROR); /* initialize result */ bu_vls_trunc(gedp->ged_result_str, 0); /* must be wanting help */ if (argc == 1) { bu_vls_printf(gedp->ged_result_str, "Usage: %s %s", argv[0], usage); return GED_HELP; } if (argc < 7 || 11 < argc) { bu_vls_printf(gedp->ged_result_str, "Usage: %s %s", argv[0], usage); return GED_ERROR; } comb = (struct rt_comb_internal *)NULL; dp = db_lookup(gedp->ged_wdbp->dbip, argv[1], LOOKUP_QUIET); if (dp != RT_DIR_NULL) { if (!(dp->d_flags & RT_DIR_COMB)) { bu_vls_printf(gedp->ged_result_str, "%s: %s is not a combination, so cannot be edited this way\n", argv[0], argv[1]); return GED_ERROR; } if (rt_db_get_internal(&intern, dp, gedp->ged_wdbp->dbip, (fastf_t *)NULL, &rt_uniresource) < 0) { bu_vls_printf(gedp->ged_result_str, "%s: Database read error, aborting\n", argv[0]); return GED_ERROR; } comb = (struct rt_comb_internal *)intern.idb_ptr; saved_name = save_comb(gedp, dp); /* Save combination to a temp name */ save_comb_flag = 1; } /* empty the existing combination */ if (comb) { db_free_tree(comb->tree, &rt_uniresource); comb->tree = NULL; } else { /* make an empty combination structure */ BU_ALLOC(comb, struct rt_comb_internal); if (comb == NULL) bu_bomb("Unable to allocate comb memory"); RT_COMB_INTERNAL_INIT(comb); } if (db_version(gedp->ged_wdbp->dbip) < 5) { new_name = new_name_v4; if (dp == RT_DIR_NULL) NAMEMOVE(argv[1], new_name_v4); else NAMEMOVE(dp->d_namep, new_name_v4); } else { if (dp == RT_DIR_NULL) new_name = (char *)argv[1]; else new_name = dp->d_namep; } if (*argv[2] == 'y' || *argv[2] == 'Y') comb->region_flag = 1; else comb->region_flag = 0; if (comb->region_flag) { if (argc != 11) { bu_vls_printf(gedp->ged_result_str, "region_flag is set, incorrect number of arguments supplied.\n"); bu_vls_printf(gedp->ged_result_str, "Usage: %s %s", argv[0], regionusage); return GED_ERROR; } comb->region_id = atoi(argv[3]); comb->aircode = atoi(argv[4]); comb->GIFTmater = atoi(argv[5]); comb->los = atoi(argv[6]); offset = 6; } else { if (argc != 7) { bu_vls_printf(gedp->ged_result_str, "region_flag is not set, incorrect number of arguments supplied.\n"); bu_vls_printf(gedp->ged_result_str, "Usage: %s %s", argv[0], noregionusage); return GED_ERROR; } offset = 2; } put_rgb_into_comb(comb, argv[offset + 1]); bu_vls_strcpy(&comb->shader, argv[offset +2]); if (*argv[offset + 3] == 'y' || *argv[offset + 3] == 'Y') comb->inherit = 1; else comb->inherit = 0; if (put_tree_into_comb(gedp, comb, dp, argv[1], new_name, argv[offset + 4]) == GED_ERROR) { if (comb && dp) { restore_comb(gedp, dp, saved_name); bu_vls_printf(gedp->ged_result_str, "%s: \toriginal restored\n", argv[0]); } bu_file_delete(_ged_tmpfil); return GED_ERROR; } else if (save_comb_flag) { /* eliminate the temporary combination */ const char *av[3]; av[0] = "kill"; av[1] = saved_name; av[2] = NULL; (void)ged_kill(gedp, 2, (const char **)av); } bu_file_delete(_ged_tmpfil); return GED_OK; }
/** * Allows caller control over zero-suppression feature. */ HIDDEN void hist_pr_suppress(register const struct bu_hist *histp, const char *title, int zero_suppress) { long maxcount; static const char marks[] = "################################################################"; #define NMARKS 50 char buf[256]; int percent; unsigned int mark_count; double val; size_t i; size_t nbins; BU_CK_HIST(histp); /* Find entry with highest count */ maxcount = 0L; for (i=0; i<=histp->hg_nbins; i++) { if (histp->hg_bins[i] > maxcount) maxcount = histp->hg_bins[i]; } if (maxcount < 1) maxcount = 1; nbins = histp->hg_nbins; if (zero_suppress) { /* Suppress trailing bins with zero counts. nbins s/b >= 1 */ for (; nbins >= 1; nbins--) { if (histp->hg_bins[nbins] > 0) break; } } /* 12345678 12345678 123 .... */ bu_log("\nHistogram of %s\nmin=%g, max=%g, nbins=%zd, clumpsize=%g\n%ld samples collected, highest count was %ld\n\n Value Count Rel%%| Bar Graph\n", title, histp->hg_min, histp->hg_max, histp->hg_nbins, histp->hg_clumpsize, histp->hg_nsamples, maxcount); /* Print each bin. */ i = 0; if (zero_suppress) { /* Leading bins with zero counts are suppressed. */ for (; i <= nbins; i++) { if (histp->hg_bins[i] > 0) break; } } for (; i <= nbins; i++) { percent = (int)(((double)histp->hg_bins[i])*100.0/maxcount); mark_count = percent*NMARKS/100; if (mark_count <= 0 && histp->hg_bins[i] > 0) mark_count = 1; if (mark_count > NMARKS) { bu_log("mark_count=%d, NMARKS=%d, hg_bins[%d]=%ld, maxcount=%ld\n", mark_count, NMARKS, i, histp->hg_bins[i], maxcount); bu_bomb("bu_hist_pr() bogus mark_count\n"); } if (mark_count <= 0) { buf[0] = '\0'; } else { memcpy(buf, marks, mark_count); buf[mark_count] = '\0'; } val = histp->hg_min + i*histp->hg_clumpsize; bu_log("%8g %8ld %3d |%s\n", val, histp->hg_bins[i], percent, buf); } }
int split_face_single(struct soup_s *s, unsigned long int fid, point_t isectpt[2], struct face_s *opp_face, const struct bn_tol *tol) { struct face_s *f = s->faces+fid; int a, i, j, isv[2] = {0, 0}; #define VERT_INT 0x10 #define LINE_INT 0x20 #define FACE_INT 0x40 #define ALL_INT (VERT_INT|LINE_INT|FACE_INT) /****** START hoistable ******/ for (i=0;i<2;i++) for (j=0;j<3;j++) { if (isv[i] == 0) { fastf_t dist; switch ( bn_isect_pt_lseg( &dist, (fastf_t *)&f->vert[j], (fastf_t *)&f->vert[j==2?0:j+1], (fastf_t *)&isectpt[i], tol) ) { case -2: case -1: continue; case 1: isv[i] = VERT_INT|j; break; case 2: isv[i] = VERT_INT|(j==2?0:j+1); break; case 3: isv[i] = LINE_INT|j; break; default: bu_log("Whu?\n"); break; } } } /*** test if intersect is middle of face ***/ for (i=0;i<2;i++) /* test for face in plane */ if (isv[i] == 0) /* assume that the isectpt is necessarily on the vert, line or face... if it's not seen on the vert or line, it must be face. This should probably be a real check. */ isv[i] = FACE_INT; if (isv[0] == 0 || isv[1] == 0) { bu_log("Something real bad %x %x\n", isv[0], isv[1]); return -1; } if ((isv[0]&ALL_INT) > (isv[1]&ALL_INT)) { int tmpi; point_t tmpp; VMOVE(tmpp, isectpt[0]); VMOVE(isectpt[0], isectpt[1]); VMOVE(isectpt[1], tmpp); tmpi = isv[0]; isv[0]=isv[1]; isv[1]=tmpi; } /****** END hoistable ******/ /* test if both ends of the intersect line are on vertices */ /* if VERT+VERT, abort */ if ((isv[0]&VERT_INT) && (isv[1]&VERT_INT)) { return 1; } a = isv[0]&~ALL_INT; if ( a != 0 && a != 1 && a != 2) { bu_log("Bad a value: %d\n", a); bu_bomb("Exploding\n"); } /* if VERT+LINE, break into 2 */ if (isv[0]&VERT_INT && isv[1]&LINE_INT) { vect_t muh; int meh; VSUB2(muh, isectpt[1], f->vert[a==2?0:a+1]); meh = VDOT(opp_face->plane, muh) > 0; soup_add_face_precomputed(s, f->vert[a], isectpt[1], f->vert[a==2?0:a+1], f->plane, meh == 1 ? OUTSIDE : INSIDE); soup_add_face_precomputed(s, f->vert[a], f->vert[a==0?2:a-1], isectpt[1], f->plane, meh == 1 ? INSIDE : OUTSIDE); soup_rm_face(s, fid); return 2; } /* if LINE+LINE, break into 3, figure out which side has two verts and cut * that */ if (isv[0]&LINE_INT && isv[1]&LINE_INT) { return 1; } /* if VERT+FACE, break into 3, intersect is one line, other two to the * opposing verts */ if (isv[0]&VERT_INT ) { soup_add_face_precomputed(s, f->vert[0], f->vert[1], isectpt[1], f->plane, 0); soup_add_face_precomputed(s, f->vert[1], f->vert[2], isectpt[1], f->plane, 0); soup_add_face_precomputed(s, f->vert[2], f->vert[0], isectpt[1], f->plane, 0); soup_rm_face(s, fid); return 3; } /* if LINE+FACE, break into 4 */ if (isv[0]&LINE_INT ) { soup_add_face_precomputed(s, f->vert[a], isectpt[0], isectpt[1], f->plane, 0); soup_add_face_precomputed(s, f->vert[a==2?0:a+1], isectpt[1], isectpt[0], f->plane, 0); soup_add_face_precomputed(s, f->vert[a==2?0:a+1], f->vert[a==0?2:a-1], isectpt[1], f->plane, 0); soup_add_face_precomputed(s, f->vert[a], isectpt[1], f->vert[a==0?2:a-1], f->plane, 0); soup_rm_face(s, fid); return 4; } /* if FACE+FACE, break into 3 */ if (isv[0]&FACE_INT ) { /* extend intersect line to triangle edges, could be 2 or 3? */ /* make sure isectpt[0] is closest to vert[0] */ if (DIST_PT_PT_SQ(f->vert[0], isectpt[0]) > DIST_PT_PT_SQ(f->vert[0], isectpt[1])) { point_t tmp; VMOVE(tmp, isectpt[1]); VMOVE(isectpt[1], isectpt[0]); VMOVE(isectpt[0], tmp); } soup_add_face_precomputed(s, f->vert[0], isectpt[0], f->vert[2], f->plane, 0); soup_add_face_precomputed(s, f->vert[0], f->vert[1], isectpt[0], f->plane, 0); soup_add_face_precomputed(s, f->vert[1], isectpt[1], isectpt[0], f->plane, 0); soup_add_face_precomputed(s, f->vert[2], isectpt[0], isectpt[1], f->plane, 0); soup_add_face_precomputed(s, f->vert[1], f->vert[2], isectpt[1], f->plane, 0); return 5; } #undef VERT_INT #undef LINE_INT #undef ALL_INT #undef FACE_INT /* this should never be reached */ bu_log("derp?\n"); return 0; }
union tree * evaluate(union tree *tr, const struct rt_tess_tol *ttol, const struct bn_tol *tol) { RT_CK_TREE(tr); switch (tr->tr_op) { case OP_NOP: return tr; case OP_NMG_TESS: /* ugh, keep it as nmg_tess and just shove the rt_bot_internal ptr * in as nmgregion. :/ Also, only doing the first shell of the first * model. Primitives should only provide a single shell, right? */ { struct rt_db_internal ip; struct nmgregion *nmgr = BU_LIST_FIRST(nmgregion, &tr->tr_d.td_r->m_p->r_hd); /* the bot temporary format may be unnecessary if we can walk * the nmg shells and generate soup from them directly. */ struct rt_bot_internal *bot = nmg_bot(BU_LIST_FIRST(shell, &nmgr->s_hd), tol); /* causes a crash. nmg_kr(nmgr); free(nmgr); */ tr->tr_d.td_r->m_p = (struct model *)bot2soup(bot, tol); SOUP_CKMAG((struct soup_s *)tr->tr_d.td_r->m_p); /* fill in a db_internal with our new bot so we can free it */ RT_DB_INTERNAL_INIT(&ip); ip.idb_major_type = DB5_MAJORTYPE_BRLCAD; ip.idb_minor_type = ID_BOT; ip.idb_meth = &OBJ[ID_BOT]; ip.idb_ptr = bot; ip.idb_meth->ft_ifree(&ip); } return tr; case OP_UNION: case OP_INTERSECT: case OP_SUBTRACT: RT_CK_TREE(tr->tr_b.tb_left); RT_CK_TREE(tr->tr_b.tb_right); tr->tr_b.tb_left = evaluate(tr->tr_b.tb_left, ttol, tol); tr->tr_b.tb_right = evaluate(tr->tr_b.tb_right, ttol, tol); RT_CK_TREE(tr->tr_b.tb_left); RT_CK_TREE(tr->tr_b.tb_right); SOUP_CKMAG(tr->tr_b.tb_left->tr_d.td_r->m_p); SOUP_CKMAG(tr->tr_b.tb_right->tr_d.td_r->m_p); split_faces(tr->tr_b.tb_left, tr->tr_b.tb_right, tol); RT_CK_TREE(tr->tr_b.tb_left); RT_CK_TREE(tr->tr_b.tb_right); SOUP_CKMAG(tr->tr_b.tb_left->tr_d.td_r->m_p); SOUP_CKMAG(tr->tr_b.tb_right->tr_d.td_r->m_p); break; default: bu_bomb("bottess evaluate(): bad op (first pass)\n"); } switch (tr->tr_op) { case OP_UNION: return compose(tr->tr_b.tb_left, tr->tr_b.tb_right, OUTSIDE, SAME, OUTSIDE); case OP_INTERSECT: return compose(tr->tr_b.tb_left, tr->tr_b.tb_right, INSIDE, SAME, INSIDE); case OP_SUBTRACT: return invert(compose(tr->tr_b.tb_left, invert(tr->tr_b.tb_right), OUTSIDE, OPPOSITE, INSIDE)); default: bu_bomb("bottess evaluate(): bad op (second pass, CSG)\n"); } bu_bomb("Got somewhere I shouldn't have\n"); return NULL; }
/** * Evaluate a boolean operation on the two shells "A" and "B", of the * form "answer = A op B". As input, each element (loop-in-face, wire * loop, wire edge, vertex) in both A and B has been classified as * being "in", "on", or "out" of the other shell. Using these * classifications, operate on the input shells. At the end, shell A * contains the resultant object, and shell B is destroyed. * */ void nmg_evaluate_boolean(struct shell *sA, struct shell *sB, int op, char **classlist, const struct bn_tol *tol) { int const *actions; struct nmg_bool_state bool_state; NMG_CK_SHELL(sA); NMG_CK_SHELL(sB); BN_CK_TOL(tol); if (RTG.NMG_debug & DEBUG_BOOLEVAL) { bu_log("nmg_evaluate_boolean(sA=%p, sB=%p, op=%d) START\n", (void *)sA, (void *)sB, op); } switch (op) { case NMG_BOOL_SUB: actions = subtraction_actions; nmg_invert_shell(sB); /* FLIP all faceuse normals */ break; case NMG_BOOL_ADD: actions = union_actions; break; case NMG_BOOL_ISECT: actions = intersect_actions; break; default: actions = union_actions; /* shut up lint */ bu_log("ERROR nmg_evaluate_boolean() op=%d.\n", op); bu_bomb("bad boolean\n"); } bool_state.bs_dest = sA; bool_state.bs_src = sB; bool_state.bs_classtab = classlist; bool_state.bs_actions = actions; bool_state.bs_tol = tol; bool_state.bs_isA = 1; nmg_eval_shell(sA, &bool_state); bool_state.bs_isA = 0; nmg_eval_shell(sB, &bool_state); if (RTG.NMG_debug & DEBUG_BOOLEVAL) { bu_log("nmg_evaluate_boolean(sA=%p, sB=%p, op=%d), evaluations done\n", (void *)sA, (void *)sB, op); } /* Write sA and sB into separate files, if wanted? */ /* Move everything left in sB into sA. sB is killed. */ nmg_js(sA, sB, tol); /* Plot the result */ if (RTG.NMG_debug & DEBUG_BOOLEVAL && RTG.NMG_debug & DEBUG_PLOTEM) { FILE *fp; if ((fp=fopen("bool_ans.plot3", "wb")) == (FILE *)NULL) { (void)perror("bool_ans.plot3"); bu_bomb("unable to open bool_ans.plot3 for writing"); } bu_log("plotting bool_ans.plot3\n"); nmg_pl_s(fp, sA); (void)fclose(fp); } /* Remove loops/edges/vertices that appear more than once in result */ nmg_rm_redundancies(sA, tol); }
/** * R T _ S P E C T _ M A K E _ C I E _ X Y Z * * Given as input a spectral sampling distribution, generate the 3 * curves to match the human eye's response in CIE color parameters X, * Y, and Z. XYZ space can be readily converted to RGB with a 3x3 * matrix. * * The tabulated data is linearly interpolated. * * Pointers to the three spectral weighting functions are "returned", * storage for the X, Y, and Z curves is allocated by this routine and * must be freed by the caller. */ void rt_spect_make_CIE_XYZ(struct bn_tabdata **x, struct bn_tabdata **y, struct bn_tabdata **z, const struct bn_table *tabp) { struct bn_tabdata *a, *b, *c; fastf_t xyz_scale; int i; int j; BN_CK_TABLE(tabp); i = bn_table_interval_num_samples( tabp, 430., 650. ); if ( i <= 4 ) bu_log("rt_spect_make_CIE_XYZ: insufficient samples (%d) in visible band\n", i); BN_GET_TABDATA( a, tabp ); BN_GET_TABDATA( b, tabp ); BN_GET_TABDATA( c, tabp ); *x = a; *y = b; *z = c; /* No CIE data below 380 nm */ for ( j=0; tabp->x[j] < 380 && j < tabp->nx; j++ ) { a->y[j] = b->y[j] = c->y[j] = 0; } /* Traverse the CIE table. Produce as many output values as * possible before advancing to next CIE table entry. */ for ( i = 0; i < 81-1; i++ ) { fastf_t fract; /* fraction from [i] to [i+1] */ again: if ( j >= tabp->nx ) break; if ( tabp->x[j] < rt_CIE_XYZ[i][0] ) bu_bomb("rt_spect_make_CIE_XYZ assertion1 failed\n"); if ( tabp->x[j] >= rt_CIE_XYZ[i+1][0] ) continue; /* The CIE table has 5nm spacing */ fract = (tabp->x[j] - rt_CIE_XYZ[i][0] ) / 5; if ( fract < 0 || fract > 1 ) bu_bomb("rt_spect_make_CIE_XYZ assertion2 failed\n"); a->y[j] = (1-fract) * rt_CIE_XYZ[i][1] + fract * rt_CIE_XYZ[i+1][1]; b->y[j] = (1-fract) * rt_CIE_XYZ[i][2] + fract * rt_CIE_XYZ[i+1][2]; c->y[j] = (1-fract) * rt_CIE_XYZ[i][3] + fract * rt_CIE_XYZ[i+1][3]; j++; goto again; } /* No CIE data above 780 nm */ for (; j < tabp->nx; j++ ) { a->y[j] = b->y[j] = c->y[j] = 0; } /* Normalize the curves so that area under Y curve is 1.0 */ xyz_scale = bn_tabdata_area2( b ); if ( fabs(xyz_scale) < VDIVIDE_TOL ) { bu_log("rt_spect_make_CIE_XYZ(): Area = 0 (no luminance) in this part of the spectrum, skipping normalization step\n"); return; } xyz_scale = 1 / xyz_scale; bn_tabdata_scale( a, a, xyz_scale ); bn_tabdata_scale( b, b, xyz_scale ); bn_tabdata_scale( c, c, xyz_scale ); }
/* * T R E E T H E R M _ R E N D E R * * This is called (from viewshade() in shade.c) once for each hit point * to be shaded. The purpose here is to fill in values in the shadework * structure. */ int tthrm_render(struct application *ap, const struct partition *pp, struct shadework *swp, genptr_t dp) /* defined in material.h */ /* ptr to the shader-specific struct */ { register struct tthrm_specific *tthrm_sp = (struct tthrm_specific *)dp; struct rt_part_internal *part_p; point_t pt; vect_t pt_v; vect_t v; int solid_number; struct thrm_seg *thrm_seg; int best_idx; double best_val; double Vdot; int node; /* check the validity of the arguments we got */ RT_AP_CHECK(ap); RT_CHECK_PT(pp); CK_tthrm_SP(tthrm_sp); /* We are performing the shading in "region" space. We must * transform the hit point from "model" space to "region" space. * See the call to db_region_mat in tthrm_setup(). */ MAT4X3PNT(pt, tthrm_sp->tthrm_m_to_sh, swp->sw_hit.hit_point); if (rdebug&RDEBUG_SHADE) bu_log("tthrm_render(%s, %g %g %g)\n", tthrm_sp->tt_name, V3ARGS(pt)); solid_number = get_solid_number(pp); if (solid_number > tthrm_sp->tt_max_seg) { bu_log("%s:%d solid name %s has solid number higher than %ld\n", __FILE__, __LINE__, tthrm_sp->tt_max_seg); bu_bomb("Choke! ack! gasp! wheeeeeeze.\n"); } thrm_seg = &tthrm_sp->tt_segs[solid_number]; CK_THRM_SEG(thrm_seg); /* Extract the solid parameters for the particle we hit, * Compare them to the values for the node extracted. If they * don't match, then we probably have a mis-match between the * geometry and the treetherm output files. */ if (pp->pt_inseg->seg_stp->st_id != ID_PARTICLE) { bu_log("%d != ID_PART\n", pp->pt_inseg->seg_stp->st_id); bu_bomb(""); } part_p = (struct rt_part_internal *)pp->pt_inseg->seg_stp->st_specific; RT_PART_CK_MAGIC(part_p); VSUB2(v, part_p->part_V, thrm_seg->pt); if (MAGSQ(v) > 100.0) { double dist; dist = MAGNITUDE(v); /* Distance between particle origin and centroid of thermal * segment nodes is > 10.0mm (1cm). This suggests that * they aren't related. */ bu_log( "----------------------------- W A R N I N G -----------------------------\n\ %s:%d distance %g between origin of particle and thermal node centroid is\n\ too large. Probable mis-match between geometry and thermal data\n", __FILE__, __LINE__, dist); bu_bomb(""); }
/* * This is called (from viewshade() in shade.c) once for each hit point * to be shaded. The purpose here is to fill in values in the shadework * structure. * * dp is a pointer to the shader-specific struct */ int bbd_render(struct application *ap, const struct partition *pp, struct shadework *swp, void *dp) { register struct bbd_specific *bbd_sp = (struct bbd_specific *)dp; union tree *tp; struct bbd_img *bi; struct imgdist id[MAX_IMAGES]; size_t i; /* check the validity of the arguments we got */ RT_AP_CHECK(ap); RT_CHECK_PT(pp); CK_bbd_SP(bbd_sp); if (rdebug&RDEBUG_SHADE) { bu_struct_print("bbd_render Parameters:", bbd_print_tab, (char *)bbd_sp); bu_log("pixel %d %d\n", ap->a_x, ap->a_y); bu_log("bbd region: %s\n", pp->pt_regionp->reg_name); } tp = pp->pt_regionp->reg_treetop; if (tp->tr_a.tu_op != OP_SOLID) { bu_log("%s:%d region %s rendering bbd should have found OP_SOLID, not %d\n", __FILE__, __LINE__, pp->pt_regionp->reg_name, tp->tr_a.tu_op); bu_bomb("\n"); } swp->sw_transmit = 1.0; VSETALL(swp->sw_color, 0.0); VSETALL(swp->sw_basecolor, 1.0); i = 0; for (BU_LIST_FOR(bi, bbd_img, &bbd_sp->imgs)) { /* find out if the ray hits the plane */ id[i].index = i; id[i].bi = bi; id[i].status = bn_isect_line3_plane(&id[i].dist, ap->a_ray.r_pt, ap->a_ray.r_dir, bi->img_plane, &ap->a_rt_i->rti_tol); i++; } bu_sort(id, bbd_sp->img_count, sizeof(id[0]), &imgdist_compare, NULL); for (i = 0; i < bbd_sp->img_count && swp->sw_transmit > 0.0; i++) { if (id[i].status > 0) do_ray_image(ap, pp, swp, bbd_sp, id[i].bi, id[i].dist); } if (rdebug&RDEBUG_SHADE) { bu_log("color %g %g %g\n", V3ARGS(swp->sw_color)); } /* shader must perform transmission/reflection calculations * * 0 < swp->sw_transmit <= 1 causes transmission computations * 0 < swp->sw_reflect <= 1 causes reflection computations */ if (swp->sw_reflect > 0 || swp->sw_transmit > 0) { int level = ap->a_level; ap->a_level = 0; /* Bogus hack to keep rr_render from giving up */ (void)rr_render(ap, pp, swp); ap->a_level = level; } if (rdebug&RDEBUG_SHADE) { bu_log("color %g %g %g\n", V3ARGS(swp->sw_color)); } return 1; }
/** * If there is no ve_dist structure for this edge, compute one and * add it to the list. * * Sort an edge_info structure into the loops list of edgeuse status */ static struct edge_info * nmg_class_pt_eu(struct fpi *fpi, struct edgeuse *eu, struct edge_info *edge_list, const int in_or_out_only) { struct bn_tol tmp_tol; struct edgeuse *next_eu; struct ve_dist *ved, *ed; struct edge_info *ei_p; struct edge_info *ei; pointp_t eu_pt; vect_t left; vect_t v_to_pt; int found_data = 0; NMG_CK_FPI(fpi); BN_CK_TOL(fpi->tol); if (RTG.NMG_debug & DEBUG_PT_FU) { bu_log("pt (%g %g %g) vs_edge (%g %g %g) (%g %g %g) (eu=%p)\n", V3ARGS(fpi->pt), V3ARGS(eu->vu_p->v_p->vg_p->coord), V3ARGS(eu->eumate_p->vu_p->v_p->vg_p->coord), (void *)eu); } /* we didn't find a ve_dist structure for this edge, so we'll * have to do the calculations. */ tmp_tol = (*fpi->tol); if (in_or_out_only) { tmp_tol.dist = 0.0; tmp_tol.dist_sq = 0.0; } BU_ALLOC(ved, struct ve_dist); ved->magic_p = &eu->e_p->magic; ved->status = bn_distsq_pt3_lseg3(&ved->dist, eu->vu_p->v_p->vg_p->coord, eu->eumate_p->vu_p->v_p->vg_p->coord, fpi->pt, &tmp_tol); ved->v1 = eu->vu_p->v_p; ved->v2 = eu->eumate_p->vu_p->v_p; BU_LIST_MAGIC_SET(&ved->l, NMG_VE_DIST_MAGIC); BU_LIST_APPEND(&fpi->ve_dh, &ved->l); eu_pt = ved->v1->vg_p->coord; if (RTG.NMG_debug & DEBUG_PT_FU) { bu_log("nmg_class_pt_eu: status for eu %p (%g %g %g)<->(%g %g %g) vs pt (%g %g %g) is %d\n", (void *)eu, V3ARGS(eu->vu_p->v_p->vg_p->coord), V3ARGS(eu->eumate_p->vu_p->v_p->vg_p->coord), V3ARGS(fpi->pt), ved->status); bu_log("\tdist = %g\n", ved->dist); } /* Add a struct for this edgeuse to the loop's list of dist-sorted * edgeuses. */ BU_ALLOC(ei, struct edge_info); ei->ved_p = ved; ei->eu_p = eu; BU_LIST_MAGIC_SET(&ei->l, NMG_EDGE_INFO_MAGIC); /* compute the status (ei->status) of the point WRT this edge */ switch (ved->status) { case 0: /* pt is on the edge(use) */ ei->nmg_class = NMG_CLASS_AonBshared; if (fpi->eu_func && (fpi->hits == NMG_FPI_PERUSE || (fpi->hits == NMG_FPI_PERGEOM && !found_data))) { /* need to cast eu_func pointer for actual use as a function */ void (*cfp)(struct edgeuse *, point_t, const char*); cfp = (void (*)(struct edgeuse *, point_t, const char *))fpi->eu_func; cfp(eu, fpi->pt, fpi->priv); } break; case 1: /* within tolerance of endpoint at ved->v1 */ ei->nmg_class = NMG_CLASS_AonBshared; /* add an entry for the vertex in the edge list so that * other uses of this vertex will claim the point is within * tolerance without re-computing */ BU_ALLOC(ed, struct ve_dist); ed->magic_p = &ved->v1->magic; ed->status = ved->status; ed->v1 = ed->v2 = ved->v1; BU_LIST_MAGIC_SET(&ed->l, NMG_VE_DIST_MAGIC); BU_LIST_APPEND(&fpi->ve_dh, &ed->l); if (fpi->vu_func && (fpi->hits == NMG_FPI_PERUSE || (fpi->hits == NMG_FPI_PERGEOM && !found_data))) { /* need to cast vu_func pointer for actual use as a function */ void (*cfp)(struct vertexuse *, point_t, const char*); cfp = (void (*)(struct vertexuse *, point_t, const char *))fpi->vu_func; cfp(eu->vu_p, fpi->pt, fpi->priv); } break; case 2: /* within tolerance of endpoint at ved->v2 */ ei->nmg_class = NMG_CLASS_AonBshared; /* add an entry for the vertex in the edge list so that * other uses of this vertex will claim the point is within * tolerance without re-computing */ BU_ALLOC(ed, struct ve_dist); ed->magic_p = &ved->v2->magic; ed->status = ved->status; ed->v1 = ed->v2 = ved->v2; BU_LIST_MAGIC_SET(&ed->l, NMG_VE_DIST_MAGIC); BU_LIST_APPEND(&fpi->ve_dh, &ed->l); if (fpi->vu_func && (fpi->hits == NMG_FPI_PERUSE || (fpi->hits == NMG_FPI_PERGEOM && !found_data))) { /* need to cast vu_func pointer for actual use as a function */ void (*cfp)(struct vertexuse *, point_t, const char*); cfp = (void (*)(struct vertexuse *, point_t, const char *))fpi->vu_func; cfp(eu->eumate_p->vu_p, fpi->pt, fpi->priv); } break; case 3: /* PCA of pt on line is within tolerance of ved->v1 of segment */ ei->nmg_class = nmg_class_pt_euvu(fpi->pt, eu, fpi->tol); if (ei->nmg_class == NMG_CLASS_Unknown) ei->ved_p->dist = MAX_FASTF; break; case 4: /* PCA of pt on line is within tolerance of ved->v2 of segment */ next_eu = BU_LIST_PNEXT_CIRC(edgeuse, &eu->l); ei->nmg_class = nmg_class_pt_euvu(fpi->pt, next_eu, fpi->tol); if (ei->nmg_class == NMG_CLASS_Unknown) ei->ved_p->dist = MAX_FASTF; break; case 5: /* PCA is along length of edge, but point is NOT on edge. */ if (nmg_find_eu_left_non_unit(left, eu)) bu_bomb("can't find left vector\n"); /* take dot product of v->pt vector with left to determine * if pt is inside/left of edge */ VSUB2(v_to_pt, fpi->pt, eu_pt); if (VDOT(v_to_pt, left) > -SMALL_FASTF) ei->nmg_class = NMG_CLASS_AinB; else ei->nmg_class = NMG_CLASS_AoutB; break; default: bu_log("%s:%d status = %d\n", __FILE__, __LINE__, ved->status); bu_bomb("Why did this happen?"); break; } if (RTG.NMG_debug & DEBUG_PT_FU) { bu_log("pt @ dist %g from edge classed %s vs edge\n", ei->ved_p->dist, nmg_class_name(ei->nmg_class)); /* pl_pt_e(fpi, ei); */ } /* now that it's complete, add ei to the edge list */ for (BU_LIST_FOR(ei_p, edge_info, &edge_list->l)) { /* if the distance to this edge is smaller, or * if the distance is the same & the edge is the same * Insert edge_info struct here in list */ if (ved->dist < ei_p->ved_p->dist || (ZERO(ved->dist - ei_p->ved_p->dist) && ei_p->ved_p->magic_p == ved->magic_p)) { break; } } BU_LIST_INSERT(&ei_p->l, &ei->l); return ei; }
/* T R E E T H E R M _ S E T U P * * This routine is called (at prep time) * once for each region which uses this shader. * Any shader-specific initialization should be done here. */ HIDDEN int tthrm_setup(register struct region *rp, struct bu_vls *matparm, genptr_t *dpp, const struct mfuncs *UNUSED(mfp), struct rt_i *rtip) /* pointer to reg_udata in *rp */ /* New since 4.4 release */ { register struct tthrm_specific *tthrm_sp; struct bu_mapped_file *tt_file; char *tt_data; long cyl_tot = 0; long tseg; float *fp; float fv[4]; double min_temp; double max_temp; point_t center; point_t pt; vect_t dir; static const double inv_nodes = 1.0/8.0; int node; int i; int long_size = 0; size_t file_size_long; size_t file_size_int; /* check the arguments */ RT_CHECK_RTI(rtip); BU_CK_VLS(matparm); RT_CK_REGION(rp); if (rdebug&RDEBUG_SHADE) bu_log("tthrm_setup(Region:\"%s\", tthrm(%s))\n", rp->reg_name, bu_vls_addr(matparm)); /* Get memory for the shader parameters and shader-specific data */ BU_GET(tthrm_sp, struct tthrm_specific); *dpp = tthrm_sp; tthrm_sp->magic = tthrm_MAGIC; tthrm_sp->tt_name[0] = '\0'; tthrm_sp->tt_min_temp = tthrm_sp->tt_max_temp = 0.0; if (rdebug&RDEBUG_SHADE) bu_log("Parsing: (%s)\n", bu_vls_addr(matparm)); if (bu_struct_parse(matparm, tthrm_parse, (char *)tthrm_sp) < 0) { bu_bomb(__FILE__); } if (tthrm_sp->tt_name[0] == '\0') { bu_log("Must specify file for tthrm shader on %s (got \"%s\"\n", rp->reg_name, bu_vls_addr(matparm)); bu_bomb(__FILE__); } tt_file = bu_open_mapped_file(tthrm_sp->tt_name, (char *)NULL); if (!tt_file) { bu_log("Error mapping \"%s\"\n", tthrm_sp->tt_name); bu_bomb("shader tthrm: can't get thermal data"); } tt_data = tt_file->buf; if (rdebug&RDEBUG_SHADE) bu_log("tthrm_setup() data: %p total\n", (void *)tt_data); /* Compute how big the file should be, so that we can guess * at the size of the integer at the front of the file */ file_size_int = sizeof(int) + *((int *)tt_data) * (sizeof(short) + sizeof(float) * 4 * NUM_NODES); file_size_long = sizeof(long) + *((long *)tt_data) * (sizeof(short) + sizeof(float) * 4 * NUM_NODES); switch (sizeof(long)) { case 8: if (tt_file->buflen == file_size_long) { /* 64bit data on 64bit host */ long_size = sizeof(long); tthrm_sp->tt_max_seg = cyl_tot = *((long *)tt_data); } else if (tt_file->buflen == file_size_int) { /* 32bit data on 32bit host */ long_size = sizeof(int); tthrm_sp->tt_max_seg = cyl_tot = *((int *)tt_data); } break; case 4: if (tt_file->buflen == file_size_long) { /* 32bit data on 32bit host */ long_size = sizeof(long); tthrm_sp->tt_max_seg = cyl_tot = *((long *)tt_data); } else if (tt_file->buflen == (file_size_long+4)) { /* 64bit data on 32bit host */ cyl_tot = *((int *)tt_data); if (cyl_tot != 0) { bu_log("%s:%d thermal data written on 64bit machine with more that 2^32 segs\n", __FILE__, __LINE__); bu_bomb(""); } long_size = sizeof(long) + 4; tthrm_sp->tt_max_seg = cyl_tot = ((int *)tt_data)[1]; } break; default: bu_log("a long int is %d bytes on this machine\n", sizeof(long)); bu_bomb("I can only handle 4 or 8 byte longs\n"); break; } if (rdebug&RDEBUG_SHADE) bu_log("cyl_tot = %ld\n", cyl_tot); tthrm_sp->tt_segs = (struct thrm_seg *) bu_calloc(cyl_tot, sizeof(struct thrm_seg), "thermal segs"); min_temp = MAX_FASTF; max_temp = -MAX_FASTF; #define CYL_DATA(_n) ((float *) (&tt_data[ \ long_size + \ (_n) * (sizeof(short) + sizeof(float) * 4 * NUM_NODES) + \ sizeof(short) \ ])) for (tseg = 0; tseg < cyl_tot; tseg++) { /* compute centerpoint, min/max temperature values */ fp = CYL_DATA(tseg); VSETALL(center, 0.0); for (node=0; node < NUM_NODES; node++, fp+=4) { /* this is necessary to assure that all float * values are aligned on 4-byte boundaries */ memcpy(fv, fp, sizeof(float)*4); if (rdebug&RDEBUG_SHADE) bu_log("tthrm_setup() node %d (%g %g %g) %g\n", node, fv[0], fv[1], fv[2], fv[3]); /* make sure we don't have any "infinity" values */ for (i=0; i < 4; i++) { if (fv[i] > MAX_FASTF || fv[i] < -MAX_FASTF) { bu_log("%s:%d seg %ld node %d coord %d out of bounds: %g\n", __FILE__, __LINE__, tseg, node, i, fv[i]); bu_bomb("choke, gasp, *croak*\n"); } } /* copy the values to the segment list, converting * from Meters to Millimeters in the process */ VSCALE(tthrm_sp->tt_segs[tseg].node[node], fv, 1000.0); tthrm_sp->tt_segs[tseg].temperature[node] = fv[3]; VADD2(center, center, fv); if (fv[3] > max_temp) max_temp = fv[3]; if (fv[3] < min_temp) min_temp = fv[3]; } VSCALE(center, center, 1000.0); VSCALE(tthrm_sp->tt_segs[tseg].pt, center, inv_nodes); if (rdebug&RDEBUG_SHADE) { bu_log("Center: (%g %g %g) (now in mm, not m)\n", V3ARGS(tthrm_sp->tt_segs[tseg].pt)); } /* compute vectors from center pt for each node */ fp = CYL_DATA(tseg); for (node=0; node < NUM_NODES; node++, fp+=4) { /* this is necessary to assure that all float * values are aligned on 4-byte boundaries */ memcpy(fv, fp, sizeof(float)*4); VSCALE(pt, fv, 1000.0); VSUB2(tthrm_sp->tt_segs[tseg].vect[node], pt, tthrm_sp->tt_segs[tseg].pt ); } /* compute a direction vector for the thermal segment */ VCROSS(dir, tthrm_sp->tt_segs[tseg].vect[0], tthrm_sp->tt_segs[tseg].vect[2]); VUNITIZE(dir); VMOVE(tthrm_sp->tt_segs[tseg].dir, dir); tthrm_sp->tt_segs[tseg].magic = THRM_SEG_MAGIC; } bu_close_mapped_file(tt_file); if (ZERO(tthrm_sp->tt_min_temp) && EQUAL(tthrm_sp->tt_max_temp, SMALL_FASTF)) { tthrm_sp->tt_min_temp = min_temp; tthrm_sp->tt_max_temp = max_temp; bu_log("computed temp min/max on %s: %g/%g\n", rp->reg_name, min_temp, max_temp); } else { min_temp =tthrm_sp->tt_min_temp; max_temp = tthrm_sp->tt_max_temp; bu_log("taking user specified on %s: min/max %g/%g\n", rp->reg_name, min_temp, max_temp); } if (!EQUAL(max_temp, min_temp)) { tthrm_sp->tt_temp_scale = 1.0 / (max_temp - min_temp); } else { /* min and max are equal, maybe zero */ if (ZERO(max_temp)) tthrm_sp->tt_temp_scale = 0.0; else tthrm_sp->tt_temp_scale = 255.0/max_temp; } /* The shader needs to operate in a coordinate system which stays * fixed on the region when the region is moved (as in animation) * we need to get a matrix to perform the appropriate transform(s). * * Shading is done in "region coordinates": */ db_region_mat(tthrm_sp->tthrm_m_to_sh, rtip->rti_dbip, rp->reg_name, &rt_uniresource); if (rdebug&RDEBUG_SHADE) { bu_log("min_temp: %17.14e max_temp %17.14e temp_scale: %17.14e\n", tthrm_sp->tt_min_temp, tthrm_sp->tt_max_temp, tthrm_sp->tt_temp_scale); bu_log("tthrm_setup(%s, %s)done\n", rp->reg_name, bu_vls_addr(matparm)); tthrm_print(rp, *dpp); } return 1; }
int nmg_class_pt_euvu(const fastf_t *pt, struct edgeuse *eu_in, const struct bn_tol *tol) { struct loopuse *lu; struct edgeuse *prev_eu; struct edgeuse *eu; struct vertex *v0, *v1, *v2; vect_t left; vect_t eu_dir; vect_t other_eudir; vect_t pt_dir; fastf_t xo, yo; fastf_t xpt, ypt; fastf_t len; int quado, quadpt; int nmg_class = NMG_CLASS_Unknown; int eu_is_crack = 0; int prev_eu_is_crack = 0; NMG_CK_EDGEUSE(eu_in); BN_CK_TOL(tol); eu = eu_in; if (UNLIKELY(RTG.NMG_debug & DEBUG_PT_FU)) bu_log("nmg_class_pt_euvu((%g %g %g), eu=%p)\n", V3ARGS(pt), (void *)eu); if (UNLIKELY(*eu->up.magic_p != NMG_LOOPUSE_MAGIC)) { bu_log("nmg_class_pt_euvu() called with eu (%p) that isn't part of a loop\n", (void *)eu); bu_bomb("nmg_class_pt_euvu() called with eu that isn't part of a loop"); } lu = eu->up.lu_p; NMG_CK_LOOPUSE(lu); eu_is_crack = nmg_eu_is_part_of_crack(eu); prev_eu = BU_LIST_PPREV_CIRC(edgeuse, &eu->l); prev_eu_is_crack = nmg_eu_is_part_of_crack(prev_eu); /* if both EU's are cracks, we cannot classify */ if (eu_is_crack && prev_eu_is_crack) return NMG_CLASS_Unknown; if (eu_is_crack) { struct edgeuse *eu_test; int done = 0; if (UNLIKELY(RTG.NMG_debug & DEBUG_PT_FU)) bu_log("nmg_class_pt_euvu: eu %p is a crack\n", (void *)eu); /* find next eu from this vertex that is not a crack */ eu_test = BU_LIST_PNEXT_CIRC(edgeuse, &eu->l); while (!done) { while (eu_test->vu_p->v_p != eu->vu_p->v_p && eu_test != eu) eu_test = BU_LIST_PNEXT_CIRC(edgeuse, &eu_test->l); if (eu_test == eu) done = 1; if (!nmg_eu_is_part_of_crack(eu_test)) done = 1; if (!done) eu_test = BU_LIST_PNEXT_CIRC(edgeuse, &eu_test->l); } if (eu_test == eu) /* can't get away from crack */ return NMG_CLASS_Unknown; else eu = eu_test; if (UNLIKELY(RTG.NMG_debug & DEBUG_PT_FU)) bu_log("\tUsing eu %p instead\n", (void *)eu); } if (prev_eu_is_crack) { struct edgeuse *eu_test; int done = 0; if (UNLIKELY(RTG.NMG_debug & DEBUG_PT_FU)) bu_log("nmg_class_pt_euvu: prev_eu (%p) is a crack\n", (void *)prev_eu); /* find previous eu ending at this vertex that is not a crack */ eu_test = BU_LIST_PPREV_CIRC(edgeuse, &prev_eu->l); while (!done) { while (eu_test->eumate_p->vu_p->v_p != eu->vu_p->v_p && eu_test != prev_eu) eu_test = BU_LIST_PPREV_CIRC(edgeuse, &eu_test->l); if (eu_test == prev_eu) done = 1; if (!nmg_eu_is_part_of_crack(eu_test)) done = 1; if (!done) eu_test = BU_LIST_PPREV_CIRC(edgeuse, &eu_test->l); } if (eu_test == prev_eu) /* can't get away from crack */ return NMG_CLASS_Unknown; else prev_eu = eu_test; if (UNLIKELY(RTG.NMG_debug & DEBUG_PT_FU)) bu_log("\tUsing prev_eu %p instead\n", (void *)prev_eu); } /* left is the Y-axis of our XY-coordinate system */ if (UNLIKELY(nmg_find_eu_leftvec(left, eu))) { bu_log("nmg_class_pt_euvu: nmg_find_eu_leftvec() for eu=%p failed!\n", (void *)eu); bu_bomb("nmg_class_pt_euvu: nmg_find_eu_leftvec() failed!"); } if (UNLIKELY(RTG.NMG_debug & DEBUG_PT_FU)) bu_log("\tprev_eu = %p, left = (%g %g %g)\n", (void *)prev_eu, V3ARGS(left)); /* v0 is the origin of the XY-coordinate system */ v0 = eu->vu_p->v_p; NMG_CK_VERTEX(v0); /* v1 is on the X-axis */ v1 = eu->eumate_p->vu_p->v_p; NMG_CK_VERTEX(v1); /* v2 determines angle prev_eu makes with X-axis */ v2 = prev_eu->vu_p->v_p; NMG_CK_VERTEX(v2); if (UNLIKELY(RTG.NMG_debug & DEBUG_PT_FU)) bu_log("\tv0=%p, v1=%p, v2=%p\n", (void *)v0, (void *)v1, (void *)v2); /* eu_dir is our X-direction */ VSUB2(eu_dir, v1->vg_p->coord, v0->vg_p->coord); /* other_eudir is direction along the previous EU (from origin) */ VSUB2(other_eudir, v2->vg_p->coord, v0->vg_p->coord); if (UNLIKELY(RTG.NMG_debug & DEBUG_PT_FU)) bu_log("\teu_dir=(%g %g %g), other_eudir=(%g %g %g)\n", V3ARGS(eu_dir), V3ARGS(other_eudir)); /* get X and Y components for other_eu */ xo = VDOT(eu_dir, other_eudir); yo = VDOT(left, other_eudir); /* which quadrant does this XY point lie in */ quado = Quadrant(xo, yo); if (UNLIKELY(RTG.NMG_debug & DEBUG_PT_FU)) bu_log("\txo=%g, yo=%g, quadrant=%d\n", xo, yo, quado); /* get direction to PT from origin */ VSUB2(pt_dir, pt, v0->vg_p->coord); if (UNLIKELY(RTG.NMG_debug & DEBUG_PT_FU)) bu_log("\tpt_dir=(%g %g %g)\n", V3ARGS(pt_dir)); /* get X and Y components for PT */ xpt = VDOT(eu_dir, pt_dir); ypt = VDOT(left, pt_dir); /* which quadrant does this XY point lie in */ quadpt = Quadrant(xpt, ypt); if (UNLIKELY(RTG.NMG_debug & DEBUG_PT_FU)) bu_log("\txpt=%g, ypt=%g, quadrant=%d\n", xpt, ypt, quadpt); /* do a quadrant comparison first (cheap!!!) */ if (quadpt < quado) return NMG_CLASS_AinB; if (quadpt > quado) return NMG_CLASS_AoutB; /* both are in the same quadrant, need to normalize the coordinates */ len = sqrt(xo*xo + yo*yo); xo = xo/len; yo = yo/len; len = sqrt(xpt*xpt + ypt*ypt); xpt = xpt/len; ypt = ypt/len; if (UNLIKELY(RTG.NMG_debug & DEBUG_PT_FU)) bu_log("\tNormalized xo, yo=(%g %g), xpt, ypt=(%g %g)\n", xo, yo, xpt, ypt); switch (quadpt) { case 1: if (xpt >= xo && ypt <= yo) nmg_class = NMG_CLASS_AinB; else nmg_class = NMG_CLASS_AoutB; break; case 2: if (xpt >= xo && ypt >= yo) nmg_class = NMG_CLASS_AinB; else nmg_class = NMG_CLASS_AoutB; break; case 3: if (xpt <= xo && ypt >= yo) nmg_class = NMG_CLASS_AinB; else nmg_class = NMG_CLASS_AoutB; break; case 4: if (xpt <= xo && ypt <= yo) nmg_class = NMG_CLASS_AinB; else nmg_class = NMG_CLASS_AoutB; break; default: bu_log("This can't happen (illegal quadrant %d)\n", quadpt); bu_bomb("This can't happen (illegal quadrant)\n"); break; } if (UNLIKELY(RTG.NMG_debug & DEBUG_PT_FU)) bu_log("returning %s\n", nmg_class_name(nmg_class)); return nmg_class; }
size_t bu_avail_cpus(void) { int ncpu = -1; #ifdef PARALLEL # if defined(__sp3__) if (ncpu < 0) { int status; int cmd; int parmlen; struct var p; cmd = SYS_GETPARMS; parmlen = sizeof(struct var); if (sysconfig(cmd, &p, parmlen) != 0) { bu_bomb("bu_parallel(): sysconfig error for sp3"); } ncpu = p.v_ncpus; } # endif /* __sp3__ */ # ifdef __FreeBSD__ if (ncpu < 0) { int maxproc; size_t len; len = 4; if (sysctlbyname("hw.ncpu", &maxproc, &len, NULL, 0) == -1) { perror("sysctlbyname"); } else { ncpu = maxproc; } } # endif # if defined(__APPLE__) if (ncpu < 0) { size_t len; int maxproc; int mib[] = {CTL_HW, HW_AVAILCPU}; len = sizeof(maxproc); if (sysctl(mib, 2, &maxproc, &len, NULL, 0) == -1) { perror("sysctl"); } else { ncpu = maxproc; /* should be able to get sysctl to return maxproc */ } } # endif /* __ppc__ */ # if defined(HAVE_GET_NPROCS) if (ncpu < 0) { ncpu = get_nprocs(); /* GNU extension from sys/sysinfo.h */ } # endif /* * multithreading support for SunOS 5.X / Solaris 2.x */ # if defined(_SC_NPROCESSORS_ONLN) /* SUNOS and linux (and now Mac 10.6+) */ if (ncpu < 0) { ncpu = sysconf(_SC_NPROCESSORS_ONLN); if (ncpu < 0) { perror("Unable to get the number of available CPUs"); } } #endif #if defined(_SC_NPROC_ONLN) if (ncpu < 0) { ncpu = sysconf(_SC_NPROC_ONLN); if (ncpu < 0) { perror("Unable to get the number of available CPUs"); } } #endif # if defined(linux) if (ncpu < 0) { /* old linux method */ /* * Ultra-kludgey way to determine the number of cpus in a * linux box--count the number of processor entries in * /proc/cpuinfo! */ # define CPUINFO_FILE "/proc/cpuinfo" FILE *fp; char buf[128]; fp = fopen (CPUINFO_FILE, "r"); if (fp == NULL) { perror (CPUINFO_FILE); } else { ncpu = 0; while (bu_fgets(buf, 80, fp) != NULL) { if (bu_strncmp (buf, "processor", 9) == 0) { ncpu++; } } fclose (fp); } } # endif # if defined(_WIN32) /* Windows */ if (ncpu < 0) { SYSTEM_INFO sysinfo; GetSystemInfo(&sysinfo); ncpu = (int)sysinfo.dwNumberOfProcessors; } # endif #endif /* PARALLEL */ if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) { /* do not use bu_log() here, this can get called before semaphores are initialized */ fprintf(stderr, "bu_avail_cpus: counted %d cpus.\n", ncpu); } if (LIKELY(ncpu > 0)) { return ncpu; } /* non-PARALLEL */ return 1; }
void bu_parallel(void (*func)(int, void *), int ncpu, void *arg) { /* avoid using the 'register' keyword in here "just in case" */ #ifndef PARALLEL bu_log("bu_parallel(%d., %p): Not compiled for PARALLEL machine, running single-threaded\n", ncpu, arg); /* do the work anyways */ (*func)(0, arg); #else struct thread_data *user_thread_data_bu; int avail_cpus = 1; int x; char *libbu_affinity = NULL; /* OFF by default until linux issue is debugged */ int affinity = 0; /* * multithreading support for SunOS 5.X / Solaris 2.x */ # if defined(SUNOS) && SUNOS >= 52 static int concurrency = 0; /* Max concurrency we have set */ # endif # if (defined(SUNOS) && SUNOS >= 52) || defined(HAVE_PTHREAD_H) int nthreadc; int nthreade; rt_thread_t thread; rt_thread_t thread_tbl[MAX_PSW]; int i; # endif /* SUNOS */ # ifdef WIN32 int nthreadc = ncpu; HANDLE hThreadArray[MAX_PSW] = {0}; int i; DWORD returnCode; # endif /* WIN32 */ if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(%d, %p)\n", ncpu, arg); if (UNLIKELY(pid_of_initiating_thread)) bu_bomb("bu_parallel() called from within parallel section\n"); pid_of_initiating_thread = bu_process_id(); if (ncpu > MAX_PSW) { bu_log("WARNING: bu_parallel() ncpu(%d) > MAX_PSW(%d), adjusting ncpu\n", ncpu, MAX_PSW); ncpu = MAX_PSW; } parallel_nthreads_started = 0; parallel_nthreads_finished = 0; libbu_affinity = getenv("LIBBU_AFFINITY"); if (libbu_affinity) affinity = (int)strtol(libbu_affinity, NULL, 0x10); if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) { if (affinity) bu_log("CPU affinity enabled. (LIBBU_AFFINITY=%d)\n", affinity); else bu_log("CPU affinity disabled.\n", affinity); } user_thread_data_bu = (struct thread_data *)bu_calloc(ncpu, sizeof(*user_thread_data_bu), "struct thread_data *user_thread_data_bu"); /* Fill in the data of user_thread_data_bu structures of all threads */ for (x = 0; x < ncpu; x++) { user_thread_data_bu[x].user_func = func; user_thread_data_bu[x].user_arg = arg; user_thread_data_bu[x].cpu_id = x; user_thread_data_bu[x].counted = 0; user_thread_data_bu[x].affinity = affinity; } /* if we're in debug mode, allow additional cpus */ if (!(bu_debug & BU_DEBUG_PARALLEL)) { avail_cpus = bu_avail_cpus(); if (ncpu > avail_cpus) { bu_log("%d cpus requested, but only %d available\n", ncpu, avail_cpus); ncpu = avail_cpus; } } /* * multithreading support for SunOS 5.X / Solaris 2.x */ # if defined(SUNOS) && SUNOS >= 52 thread = 0; nthreadc = 0; /* Give the thread system a hint... */ if (ncpu > concurrency) { if (thr_setconcurrency(ncpu)) { bu_log("ERROR parallel.c/bu_parallel(): thr_setconcurrency(%d) failed\n", ncpu); /* Not much to do, lump it */ } else { concurrency = ncpu; } } /* Create the threads */ for (x = 0; x < ncpu; x++) { if (thr_create(0, 0, (void *(*)(void *))parallel_interface_arg, &user_thread_data_bu[x], 0, &thread)) { bu_log("ERROR: bu_parallel: thr_create(0x0, 0x0, 0x%x, 0x0, 0, 0x%x) failed for processor thread # %d\n", parallel_interface_arg, &thread, x); /* Not much to do, lump it */ } else { if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): created thread: (thread: 0x%x) (loop:%d) (nthreadc:%d)\n", thread, x, nthreadc); thread_tbl[nthreadc] = thread; nthreadc++; } } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) for (i = 0; i < nthreadc; i++) bu_log("bu_parallel(): thread_tbl[%d] = 0x%x\n", i, thread_tbl[i]); /* * Wait for completion of all threads. We don't wait for threads * in order. We wait for any old thread but we keep track of how * many have returned and whether it is one that we started */ thread = 0; nthreade = 0; for (x = 0; x < nthreadc; x++) { if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): waiting for thread to complete:\t(loop:%d) (nthreadc:%d) (nthreade:%d)\n", x, nthreadc, nthreade); if (thr_join((rt_thread_t)0, &thread, NULL)) { /* badness happened */ perror("thr_join"); bu_log("thr_join() failed"); } /* Check to see if this is one the threads we created */ for (i = 0; i < nthreadc; i++) { if (thread_tbl[i] == thread) { thread_tbl[i] = (rt_thread_t)-1; nthreade++; break; } } if ((thread_tbl[i] != (rt_thread_t)-1) && i < nthreadc) { bu_log("bu_parallel(): unknown thread %d completed.\n", thread); } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): thread completed: (thread: %d)\t(loop:%d) (nthreadc:%d) (nthreade:%d)\n", thread, x, nthreadc, nthreade); } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): %d threads created. %d threads exited.\n", nthreadc, nthreade); # endif /* SUNOS */ # if defined(HAVE_PTHREAD_H) thread = 0; nthreadc = 0; /* Create the posix threads. * * Start at 1 so we can treat the parent as thread 0. */ for (x = 0; x < ncpu; x++) { pthread_attr_t attrs; pthread_attr_init(&attrs); pthread_attr_setstacksize(&attrs, 10*1024*1024); if (pthread_create(&thread, &attrs, (void *(*)(void *))parallel_interface_arg, &user_thread_data_bu[x])) { bu_log("ERROR: bu_parallel: pthread_create(0x0, 0x0, 0x%lx, 0x0, 0, %p) failed for processor thread # %d\n", (unsigned long int)parallel_interface_arg, (void *)&thread, x); /* Not much to do, lump it */ } else { if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) { bu_log("bu_parallel(): created thread: (thread: %p) (loop: %d) (nthreadc: %d)\n", (void*)thread, x, nthreadc); } thread_tbl[nthreadc] = thread; nthreadc++; } /* done with the attributes after create */ pthread_attr_destroy(&attrs); } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) { for (i = 0; i < nthreadc; i++) { bu_log("bu_parallel(): thread_tbl[%d] = %p\n", i, (void *)thread_tbl[i]); } # ifdef SIGINFO /* may be BSD-only (calls _thread_dump_info()) */ raise(SIGINFO); # endif } /* * Wait for completion of all threads. * Wait for them in order. */ thread = 0; nthreade = 0; for (x = 0; x < nthreadc; x++) { int ret; if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): waiting for thread %p to complete:\t(loop:%d) (nthreadc:%d) (nthreade:%d)\n", (void *)thread_tbl[x], x, nthreadc, nthreade); if ((ret = pthread_join(thread_tbl[x], NULL)) != 0) { /* badness happened */ bu_log("pthread_join(thread_tbl[%d]=%p) ret=%d\n", x, (void *)thread_tbl[x], ret); } nthreade++; thread_tbl[x] = (rt_thread_t)-1; if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): thread completed: (thread: %p)\t(loop:%d) (nthreadc:%d) (nthreade:%d)\n", (void *)thread, x, nthreadc, nthreade); } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(): %d threads created. %d threads exited.\n", nthreadc, nthreade); # endif /* end if posix threads */ # ifdef WIN32 /* Create the Win32 threads */ for (i = 0; i < nthreadc; i++) { hThreadArray[i] = CreateThread( NULL, 0, (LPTHREAD_START_ROUTINE)parallel_interface_arg_stub, &user_thread_data_bu[i], 0, NULL); /* Ensure that all successfully created threads are in sequential order.*/ if (hThreadArray[i] == NULL) { bu_log("bu_parallel(): Error in CreateThread, Win32 error code %d.\n", GetLastError()); --nthreadc; } } /* Wait for other threads in the array */ returnCode = WaitForMultipleObjects(nthreadc, hThreadArray, TRUE, INFINITE); if (returnCode == WAIT_FAILED) { bu_log("bu_parallel(): Error in WaitForMultipleObjects, Win32 error code %d.\n", GetLastError()); } for (x = 0; x < nthreadc; x++) { int ret; if ((ret = CloseHandle(hThreadArray[x]) == 0)) { /* Thread didn't close properly if return value is zero; don't retry and potentially loop forever. */ bu_log("bu_parallel(): Error closing thread %d of %d, Win32 error code %d.\n", x, nthreadc, GetLastError()); } } # endif /* end if Win32 threads */ /* * Ensure that all the threads are REALLY finished. On some * systems, if threads core dump, the rest of the gang keeps * going, so this can actually happen (sigh). */ if (UNLIKELY(parallel_nthreads_finished != parallel_nthreads_started)) { bu_log("*** ERROR bu_parallel(%d): %d workers did not finish!\n\n", ncpu, ncpu - parallel_nthreads_finished); } if (UNLIKELY(parallel_nthreads_started != ncpu)) { bu_log("bu_parallel() NOTICE: only %d workers started, expected %d\n", parallel_nthreads_started, ncpu); } if (UNLIKELY(bu_debug & BU_DEBUG_PARALLEL)) bu_log("bu_parallel(%d) complete, now serial\n", ncpu); # if defined(unix) || defined(__unix) /* Cray is known to wander among various pids, perhaps others. * * At this point, all multi-tasking activity should have ceased, * and we should be just a single UNIX process with our original * PID and open file table (kernel struct u). If not, then any * output may be written into the wrong file. */ x = bu_process_id(); if (UNLIKELY(pid_of_initiating_thread != x)) { bu_log("WARNING: bu_parallel(): PID of initiating thread changed from %d to %d, open file table may be botched!\n", pid_of_initiating_thread, x); } # endif pid_of_initiating_thread = 0; /* No threads any more */ bu_free(user_thread_data_bu, "struct thread_data *user_thread_data_bu"); #endif /* PARALLEL */ return; }
/* * R R _ R E N D E R */ int rr_render(register struct application *ap, const struct partition *pp, struct shadework *swp) { struct application sub_ap; vect_t work; vect_t incident_dir; fastf_t shader_fract; fastf_t reflect; fastf_t transmit; #ifdef RT_MULTISPECTRAL struct bn_tabdata *ms_filter_color = BN_TABDATA_NULL; struct bn_tabdata *ms_shader_color = BN_TABDATA_NULL; struct bn_tabdata *ms_reflect_color = BN_TABDATA_NULL; struct bn_tabdata *ms_transmit_color = BN_TABDATA_NULL; #else vect_t filter_color; vect_t shader_color; vect_t reflect_color; vect_t transmit_color; #endif fastf_t attenuation; vect_t to_eye; int code; RT_AP_CHECK(ap); RT_APPLICATION_INIT(&sub_ap); #ifdef RT_MULTISPECTRAL sub_ap.a_spectrum = BN_TABDATA_NULL; ms_reflect_color = bn_tabdata_get_constval(0.0, spectrum); #endif /* * sw_xmitonly is set primarily for light visibility rays. * Need to compute (partial) transmission through to the light, * or procedural shaders won't be able to cast shadows * and light won't be able to get through glass * (including "stained glass" and "filter glass"). * * On the other hand, light visibility rays shouldn't be refracted, * it is pointless to shoot at where the light isn't. */ if (swp->sw_xmitonly) { /* Caller wants transmission term only, don't fire reflected rays */ transmit = swp->sw_transmit + swp->sw_reflect; /* Don't loose energy */ reflect = 0; } else { reflect = swp->sw_reflect; transmit = swp->sw_transmit; } if (R_DEBUG&RDEBUG_REFRACT) { bu_log("rr_render(%s) START: lvl=%d reflect=%g, transmit=%g, xmitonly=%d\n", pp->pt_regionp->reg_name, ap->a_level, reflect, transmit, swp->sw_xmitonly); } if (reflect <= 0 && transmit <= 0) goto out; if (ap->a_level > max_bounces) { /* Nothing more to do for this ray */ static long count = 0; /* Not PARALLEL, should be OK */ if ((R_DEBUG&(RDEBUG_SHOWERR|RDEBUG_REFRACT)) && ( count++ < MSG_PROLOGUE || (count%MSG_INTERVAL) == 3 )) { bu_log("rr_render: %d, %d MAX BOUNCES=%d: %s\n", ap->a_x, ap->a_y, ap->a_level, pp->pt_regionp->reg_name); } /* * Return the basic color of the object, ignoring the * the fact that it is supposed to be * filtering or reflecting light here. * This is much better than returning just black, * but something better might be done. */ #ifdef RT_MULTISPECTRAL BN_CK_TABDATA(swp->msw_color); BN_CK_TABDATA(swp->msw_basecolor); bn_tabdata_copy(swp->msw_color, swp->msw_basecolor); #else VMOVE(swp->sw_color, swp->sw_basecolor); #endif ap->a_cumlen += pp->pt_inhit->hit_dist; goto out; } #ifdef RT_MULTISPECTRAL BN_CK_TABDATA(swp->msw_basecolor); ms_filter_color = bn_tabdata_dup(swp->msw_basecolor); #else VMOVE(filter_color, swp->sw_basecolor); #endif if ((swp->sw_inputs & (MFI_HIT|MFI_NORMAL)) != (MFI_HIT|MFI_NORMAL)) shade_inputs(ap, pp, swp, MFI_HIT|MFI_NORMAL); /* * If this ray is being fired from the exit point of * an object, and is directly entering another object, * (i.e., there is no intervening air-gap), and * the two refractive indices match, then do not fire a * reflected ray -- just take the transmission contribution. * This is important, e.g., for glass gun tubes projecting * through a glass armor plate. :-) */ if (NEAR_ZERO(pp->pt_inhit->hit_dist, AIR_GAP_TOL) && ZERO(ap->a_refrac_index - swp->sw_refrac_index)) { transmit += reflect; reflect = 0; } /* * Diminish base color appropriately, and add in * contributions from mirror reflection & transparency */ shader_fract = 1 - (reflect + transmit); if (shader_fract < 0) { shader_fract = 0; } else if (shader_fract >= 1) { goto out; } if (R_DEBUG&RDEBUG_REFRACT) { bu_log("rr_render: lvl=%d start shader=%g, reflect=%g, transmit=%g %s\n", ap->a_level, shader_fract, reflect, transmit, pp->pt_regionp->reg_name); } #ifdef RT_MULTISPECTRAL BN_GET_TABDATA(ms_shader_color, swp->msw_color->table); bn_tabdata_scale(ms_shader_color, swp->msw_color, shader_fract); #else VSCALE(shader_color, swp->sw_color, shader_fract); #endif /* * Compute transmission through an object. * There may be a mirror reflection, which will be handled * by the reflection code later */ if (transmit > 0) { if (R_DEBUG&RDEBUG_REFRACT) { bu_log("rr_render: lvl=%d transmit=%g. Calculate refraction at entrance to %s.\n", ap->a_level, transmit, pp->pt_regionp->reg_name); } /* * Calculate refraction at entrance. */ sub_ap = *ap; /* struct copy */ #ifdef RT_MULTISPECTRAL sub_ap.a_spectrum = bn_tabdata_dup((struct bn_tabdata *)ap->a_spectrum); #endif sub_ap.a_level = 0; /* # of internal reflections */ sub_ap.a_cumlen = 0; /* distance through the glass */ sub_ap.a_user = -1; /* sanity */ sub_ap.a_rbeam = ap->a_rbeam + swp->sw_hit.hit_dist * ap->a_diverge; sub_ap.a_diverge = 0.0; sub_ap.a_uptr = (genptr_t)(pp->pt_regionp); VMOVE(sub_ap.a_ray.r_pt, swp->sw_hit.hit_point); VMOVE(incident_dir, ap->a_ray.r_dir); /* If there is an air gap, reset ray's RI to air */ if (pp->pt_inhit->hit_dist > AIR_GAP_TOL) sub_ap.a_refrac_index = RI_AIR; if (!ZERO(sub_ap.a_refrac_index - swp->sw_refrac_index) && !rr_refract(incident_dir, /* input direction */ swp->sw_hit.hit_normal, /* exit normal */ sub_ap.a_refrac_index, /* current RI */ swp->sw_refrac_index, /* next RI */ sub_ap.a_ray.r_dir /* output direction */ )) { /* * Ray was mirror reflected back outside solid. * Just add contribution to reflection, * and quit. */ reflect += transmit; transmit = 0; #ifdef RT_MULTISPECTRAL ms_transmit_color = bn_tabdata_get_constval(0.0, spectrum); #else VSETALL(transmit_color, 0); #endif if (R_DEBUG&RDEBUG_REFRACT) { bu_log("rr_render: lvl=%d change xmit into reflection %s\n", ap->a_level, pp->pt_regionp->reg_name); } goto do_reflection; } if (R_DEBUG&RDEBUG_REFRACT) { bu_log("rr_render: lvl=%d begin transmission through %s.\n", ap->a_level, pp->pt_regionp->reg_name); } /* * Find new exit point from the inside. * We will iterate, but not recurse, due to the special * (non-recursing) hit and miss routines used here for * internal reflection. * * a_onehit is set to 3, so that where possible, * rr_hit() will be given three accurate hit points: * the entry and exit points of this glass region, * and the entry point into the next region. * This permits calculation of the departing * refraction angle based on the RI of the current and * *next* regions along the ray. */ sub_ap.a_purpose = "rr first glass transmission ray"; sub_ap.a_flag = 0; do_inside: sub_ap.a_hit = rr_hit; sub_ap.a_miss = rr_miss; sub_ap.a_logoverlap = ap->a_logoverlap; sub_ap.a_onehit = 3; sub_ap.a_rbeam = ap->a_rbeam + swp->sw_hit.hit_dist * ap->a_diverge; sub_ap.a_diverge = 0.0; switch (code = rt_shootray(&sub_ap)) { case 3: /* More glass to come. * uvec=exit_pt, vvec=N, a_refrac_index = next RI. */ break; case 2: /* No more glass to come. * uvec=exit_pt, vvec=N, a_refrac_index = next RI. */ break; case 1: /* Treat as escaping ray */ if (R_DEBUG&RDEBUG_REFRACT) bu_log("rr_refract: Treating as escaping ray\n"); goto do_exit; case 0: default: /* Dreadful error */ #ifdef RT_MULTISPECTRAL bu_bomb("rr_refract: Stuck in glass. Very green pixel, unsupported in multi-spectral mode\n"); #else VSET(swp->sw_color, 0, 99, 0); /* very green */ #endif goto out; /* abandon hope */ } if (R_DEBUG&RDEBUG_REFRACT) { bu_log("rr_render: calculating refraction @ exit from %s (green)\n", pp->pt_regionp->reg_name); bu_log("Start point to exit point:\n\ vdraw open rr;vdraw params c 00ff00; vdraw write n 0 %g %g %g; vdraw wwrite n 1 %g %g %g; vdraw send\n", V3ARGS(sub_ap.a_ray.r_pt), V3ARGS(sub_ap.a_uvec)); } /* NOTE: rr_hit returns EXIT Point in sub_ap.a_uvec, * and returns EXIT Normal in sub_ap.a_vvec, * and returns next RI in sub_ap.a_refrac_index */ if (R_DEBUG&RDEBUG_RAYWRITE) { wraypts(sub_ap.a_ray.r_pt, sub_ap.a_ray.r_dir, sub_ap.a_uvec, 2, ap, stdout); /* 2 = ?? */ } if (R_DEBUG&RDEBUG_RAYPLOT) { /* plotfp */ bu_semaphore_acquire(BU_SEM_SYSCALL); pl_color(stdout, 0, 255, 0); pdv_3line(stdout, sub_ap.a_ray.r_pt, sub_ap.a_uvec); bu_semaphore_release(BU_SEM_SYSCALL); } /* Advance. Exit point becomes new start point */ VMOVE(sub_ap.a_ray.r_pt, sub_ap.a_uvec); VMOVE(incident_dir, sub_ap.a_ray.r_dir); /* * Calculate refraction at exit point. * Use "look ahead" RI value from rr_hit. */ if (!ZERO(sub_ap.a_refrac_index - swp->sw_refrac_index) && !rr_refract(incident_dir, /* input direction */ sub_ap.a_vvec, /* exit normal */ swp->sw_refrac_index, /* current RI */ sub_ap.a_refrac_index, /* next RI */ sub_ap.a_ray.r_dir /* output direction */ )) { static long count = 0; /* not PARALLEL, should be OK */ /* Reflected internally -- keep going */ if ((++sub_ap.a_level) <= max_ireflect) { sub_ap.a_purpose = "rr reflected internal ray, probing for glass exit point"; sub_ap.a_flag = 0; goto do_inside; } /* * Internal Reflection limit exceeded -- just let * the ray escape, continuing on current course. * This will cause some energy from somewhere in the * scene to be received through this glass, * which is much better than just returning * grey or black, as before. */ if ((R_DEBUG&(RDEBUG_SHOWERR|RDEBUG_REFRACT)) && ( count++ < MSG_PROLOGUE || (count%MSG_INTERVAL) == 3 )) { bu_log("rr_render: %d, %d Int.reflect=%d: %s lvl=%d\n", sub_ap.a_x, sub_ap.a_y, sub_ap.a_level, pp->pt_regionp->reg_name, ap->a_level); } VMOVE(sub_ap.a_ray.r_dir, incident_dir); goto do_exit; } do_exit: /* * Compute internal spectral transmittance. * Bouger's law. pg 30 of "color science" * * Apply attenuation factor due to thickness of the glass. * sw_extinction is in terms of fraction of light absorbed * per linear meter of glass. a_cumlen is in mm. */ /* XXX extinction should be a spectral curve, not scalor */ if (swp->sw_extinction > 0 && sub_ap.a_cumlen > 0) { attenuation = pow(10.0, -1.0e-3 * sub_ap.a_cumlen * swp->sw_extinction); } else { attenuation = 1; } /* * Process the escaping refracted ray. * This is the only place we might recurse dangerously, * so we are careful to use our caller's recursion level+1. * NOTE: point & direction already filled in */ sub_ap.a_hit = ap->a_hit; sub_ap.a_miss = ap->a_miss; sub_ap.a_logoverlap = ap->a_logoverlap; sub_ap.a_onehit = ap->a_onehit; sub_ap.a_level = ap->a_level+1; sub_ap.a_uptr = ap->a_uptr; sub_ap.a_rbeam = ap->a_rbeam + swp->sw_hit.hit_dist * ap->a_diverge; sub_ap.a_diverge = 0.0; if (code == 3) { sub_ap.a_purpose = "rr recurse on next glass"; sub_ap.a_flag = 0; } else { sub_ap.a_purpose = "rr recurse on escaping internal ray"; sub_ap.a_flag = 1; sub_ap.a_onehit = sub_ap.a_onehit > -3 ? -3 : sub_ap.a_onehit; } /* sub_ap.a_refrac_index was set to RI of next material by rr_hit(). */ sub_ap.a_cumlen = 0; (void) rt_shootray(&sub_ap); /* a_user has hit/miss flag! */ if (sub_ap.a_user == 0) { #ifdef RT_MULTISPECTRAL ms_transmit_color = bn_tabdata_dup(background); #else VMOVE(transmit_color, background); #endif sub_ap.a_cumlen = 0; } else { #ifdef RT_MULTISPECTRAL ms_transmit_color = bn_tabdata_dup(sub_ap.a_spectrum); #else VMOVE(transmit_color, sub_ap.a_color); #endif } transmit *= attenuation; #ifdef RT_MULTISPECTRAL bn_tabdata_mul(ms_transmit_color, ms_filter_color, ms_transmit_color); #else VELMUL(transmit_color, filter_color, transmit_color); #endif if (R_DEBUG&RDEBUG_REFRACT) { bu_log("rr_render: lvl=%d end of xmit through %s\n", ap->a_level, pp->pt_regionp->reg_name); } } else {
/** * R T _ B I N U N I F _ I M P O R T 5 * * Import a uniform-array binary object from the database format to * the internal structure. */ int rt_binunif_import5( struct rt_db_internal *ip, const struct bu_external *ep, const mat_t mat, const struct db_i *dbip, struct resource *resp, const int minor_type) { struct rt_binunif_internal *bip; int i; unsigned char *srcp; unsigned long *ldestp; int in_cookie, out_cookie; int gotten; BU_CK_EXTERNAL( ep ); /* * There's no particular size to expect * * BU_ASSERT_LONG( ep->ext_nbytes, ==, SIZEOF_NETWORK_DOUBLE * 3*4 ); */ RT_CK_DB_INTERNAL( ip ); ip->idb_major_type = DB5_MAJORTYPE_BINARY_UNIF; ip->idb_minor_type = minor_type; ip->idb_meth = &rt_functab[ID_BINUNIF]; ip->idb_ptr = bu_malloc( sizeof(struct rt_binunif_internal), "rt_binunif_internal"); bip = (struct rt_binunif_internal *)ip->idb_ptr; bip->magic = RT_BINUNIF_INTERNAL_MAGIC; bip->type = minor_type; /* * Convert from database (network) to internal (host) format */ switch (bip->type) { case DB5_MINORTYPE_BINU_FLOAT: bip->count = ep->ext_nbytes/SIZEOF_NETWORK_FLOAT; bip->u.uint8 = (unsigned char *) bu_malloc( bip->count * sizeof(float), "rt_binunif_internal" ); ntohf( (unsigned char *) bip->u.uint8, ep->ext_buf, bip->count ); break; case DB5_MINORTYPE_BINU_DOUBLE: bip->count = ep->ext_nbytes/SIZEOF_NETWORK_DOUBLE; bip->u.uint8 = (unsigned char *) bu_malloc( bip->count * sizeof(double), "rt_binunif_internal" ); ntohd( (unsigned char *) bip->u.uint8, ep->ext_buf, bip->count ); break; case DB5_MINORTYPE_BINU_8BITINT: case DB5_MINORTYPE_BINU_8BITINT_U: bip->count = ep->ext_nbytes; bip->u.uint8 = (unsigned char *) bu_malloc( ep->ext_nbytes, "rt_binunif_internal" ); memcpy((char *) bip->u.uint8, (char *) ep->ext_buf, ep->ext_nbytes); break; case DB5_MINORTYPE_BINU_16BITINT: case DB5_MINORTYPE_BINU_16BITINT_U: bip->count = ep->ext_nbytes/2; bip->u.uint8 = (unsigned char *) bu_malloc( ep->ext_nbytes, "rt_binunif_internal" ); #if 0 srcp = (unsigned char *) ep->ext_buf; sdestp = (unsigned short *) bip->u.uint8; for (i = 0; i < bip->count; ++i, ++sdestp, srcp += 2) { *sdestp = bu_gshort( srcp ); bu_log("Just got %d", *sdestp); } #endif in_cookie = bu_cv_cookie("nus"); out_cookie = bu_cv_cookie("hus"); if (bu_cv_optimize(in_cookie) != bu_cv_optimize(out_cookie)) { gotten = bu_cv_w_cookie((genptr_t)bip->u.uint8, out_cookie, ep->ext_nbytes, ep->ext_buf, in_cookie, bip->count); if (gotten != bip->count) { bu_log("%s:%d: Tried to convert %d, did %d", __FILE__, __LINE__, bip->count, gotten); bu_bomb("\n"); } } else memcpy((char *) bip->u.uint8, (char *) ep->ext_buf, ep->ext_nbytes ); break; case DB5_MINORTYPE_BINU_32BITINT: case DB5_MINORTYPE_BINU_32BITINT_U: bip->count = ep->ext_nbytes/4; bip->u.uint8 = (unsigned char *) bu_malloc( ep->ext_nbytes, "rt_binunif_internal" ); srcp = (unsigned char *) ep->ext_buf; ldestp = (unsigned long *) bip->u.uint8; for (i = 0; i < bip->count; ++i, ++ldestp, srcp += 4) { *ldestp = bu_glong( srcp ); } break; case DB5_MINORTYPE_BINU_64BITINT: case DB5_MINORTYPE_BINU_64BITINT_U: bu_log("rt_binunif_import5() Can't handle 64-bit integers yet\n"); return -1; } return 0; /* OK */ }
/* * This routine is called (at prep time) * once for each region which uses this shader. * Any shader-specific initialization should be done here. * * Returns: * 1 success * 0 success, but delete region * -1 failure */ HIDDEN int bbd_setup(struct region *rp, struct bu_vls *matparm, void **dpp, const struct mfuncs *mfp, struct rt_i *rtip) { register struct bbd_specific *bbd_sp; struct rt_db_internal intern; struct rt_tgc_internal *tgc; int s; mat_t mat; struct bbd_img *bi; double angle; vect_t vtmp; int img_num; vect_t vv; /* check the arguments */ RT_CHECK_RTI(rtip); BU_CK_VLS(matparm); RT_CK_REGION(rp); if (rdebug&RDEBUG_SHADE) bu_log("bbd_setup(%s)\n", rp->reg_name); RT_CK_TREE(rp->reg_treetop); if (rp->reg_treetop->tr_a.tu_op != OP_SOLID) { bu_log("--- Warning: Region %s shader %s", rp->reg_name, mfp->mf_name); bu_bomb("Shader should be used on region of single (rec/rcc) primitive\n"); } RT_CK_SOLTAB(rp->reg_treetop->tr_a.tu_stp); if (rp->reg_treetop->tr_a.tu_stp->st_id != ID_REC) { bu_log("--- Warning: Region %s shader %s", rp->reg_name, mfp->mf_name); bu_log("Shader should be used on region of single REC/RCC primitive %d\n", rp->reg_treetop->tr_a.tu_stp->st_id); bu_bomb("oops\n"); } /* Get memory for the shader parameters and shader-specific data */ BU_GET(bbd_sp, struct bbd_specific); *dpp = bbd_sp; /* initialize the default values for the shader */ memcpy(bbd_sp, &bbd_defaults, sizeof(struct bbd_specific)); bu_vls_init(&bbd_sp->img_filename); BU_LIST_INIT(&bbd_sp->imgs); bbd_sp->rtip = rtip; /* because new_image() needs this */ bbd_sp->img_count = 0; /* parse the user's arguments for this use of the shader. */ if (bu_struct_parse(matparm, bbd_parse_tab, (char *)bbd_sp, NULL) < 0) return -1; if (bbd_sp->img_count > MAX_IMAGES) { bu_log("too many images (%zu) in shader for %s sb < %d\n", bbd_sp->img_count, rp->reg_name, MAX_IMAGES); bu_bomb("excessive image count\n"); } MAT_IDN(mat); RT_DB_INTERNAL_INIT(&intern); s = rt_db_get_internal(&intern, rp->reg_treetop->tr_a.tu_stp->st_dp, rtip->rti_dbip, mat, &rt_uniresource); if (intern.idb_minor_type != ID_TGC && intern.idb_minor_type != ID_REC) { bu_log("What did I get? %d\n", intern.idb_minor_type); } if (s < 0) { bu_log("%s:%d didn't get internal", __FILE__, __LINE__); bu_bomb(""); } tgc = (struct rt_tgc_internal *)intern.idb_ptr; RT_TGC_CK_MAGIC(tgc); angle = M_PI / (double)bbd_sp->img_count; img_num = 0; VMOVE(vv, tgc->h); VUNITIZE(vv); for (BU_LIST_FOR(bi, bbd_img, &bbd_sp->imgs)) { static const point_t o = VINIT_ZERO; bn_mat_arb_rot(mat, o, vv, angle*img_num); /* compute plane equation */ MAT4X3VEC(bi->img_plane, mat, tgc->a); VUNITIZE(bi->img_plane); bi->img_plane[H] = VDOT(tgc->v, bi->img_plane); MAT4X3VEC(vtmp, mat, tgc->b); VADD2(bi->img_origin, tgc->v, vtmp); /* image origin in 3d space */ /* calculate image u vector */ VREVERSE(bi->img_x, vtmp); VUNITIZE(bi->img_x); bi->img_xlen = MAGNITUDE(vtmp) * 2; /* calculate image v vector */ VMOVE(bi->img_y, tgc->h); VUNITIZE(bi->img_y); bi->img_ylen = MAGNITUDE(tgc->h); if (rdebug&RDEBUG_SHADE) { HPRINT("\nimg_plane", bi->img_plane); VPRINT("vtmp", vtmp); VPRINT("img_origin", bi->img_origin); bu_log("img_xlen:%g ", bi->img_xlen); VPRINT("img_x", bi->img_x); bu_log("img_ylen:%g ", bi->img_ylen); VPRINT("img_y", bi->img_y); } img_num++; } rt_db_free_internal(&intern); if (rdebug&RDEBUG_SHADE) { bu_struct_print(" Parameters:", bbd_print_tab, (char *)bbd_sp); } return 1; }
/** * R T _ B I N U N I F _ E X P O R T 5 * * Create the "body" portion of external form */ int rt_binunif_export5( struct bu_external *ep, const struct rt_db_internal *ip, double local2mm, /* we ignore */ const struct db_i *dbip, struct resource *resp, const int minor_type ) { struct rt_binunif_internal *bip; int i; unsigned char *destp; unsigned long *lsrcp; int in_cookie, out_cookie; int gotten; RT_CK_DB_INTERNAL(ip); if ( ip->idb_minor_type != minor_type ) { bu_log("ip->idb_minor_type(%d) != minor_type(%d)\n", ip->idb_minor_type, minor_type ); return -1; } bip = (struct rt_binunif_internal *)ip->idb_ptr; RT_CK_BINUNIF(bip); if ( bip->type != minor_type ) { bu_log("bip->type(%d) != minor_type(%d)\n", bip->type, minor_type ); return -1; } BU_INIT_EXTERNAL(ep); /* * Convert from internal (host) to database (network) format */ switch (bip->type) { case DB5_MINORTYPE_BINU_FLOAT: ep->ext_nbytes = bip->count * SIZEOF_NETWORK_FLOAT; ep->ext_buf = (genptr_t)bu_malloc( ep->ext_nbytes, "binunif external"); htonf( ep->ext_buf, (unsigned char *) bip->u.uint8, bip->count ); break; case DB5_MINORTYPE_BINU_DOUBLE: ep->ext_nbytes = bip->count * SIZEOF_NETWORK_DOUBLE; ep->ext_buf = (genptr_t)bu_malloc( ep->ext_nbytes, "binunif external"); htond( ep->ext_buf, (unsigned char *) bip->u.uint8, bip->count ); break; case DB5_MINORTYPE_BINU_8BITINT: case DB5_MINORTYPE_BINU_8BITINT_U: ep->ext_nbytes = bip->count; ep->ext_buf = (genptr_t)bu_malloc( ep->ext_nbytes, "binunif external"); memcpy((char *) ep->ext_buf, (char *) bip->u.uint8, bip->count); break; case DB5_MINORTYPE_BINU_16BITINT: case DB5_MINORTYPE_BINU_16BITINT_U: ep->ext_nbytes = bip->count * 2; ep->ext_buf = (genptr_t)bu_malloc( ep->ext_nbytes, "binunif external"); in_cookie = bu_cv_cookie("hus"); out_cookie = bu_cv_cookie("nus"); if (bu_cv_optimize(in_cookie) != bu_cv_optimize(out_cookie)) { gotten = bu_cv_w_cookie(ep->ext_buf, out_cookie, ep->ext_nbytes, (genptr_t) bip->u.uint8, in_cookie, bip->count); if (gotten != bip->count) { bu_log("%s:%d: Tried to convert %d, did %d", __FILE__, __LINE__, bip->count, gotten); bu_bomb("\n"); } } else { memcpy((char *) ep->ext_buf, (char *) bip->u.uint8, ep->ext_nbytes ); } break; case DB5_MINORTYPE_BINU_32BITINT: case DB5_MINORTYPE_BINU_32BITINT_U: ep->ext_nbytes = bip->count * 4; ep->ext_buf = (genptr_t)bu_malloc( ep->ext_nbytes, "binunif external"); lsrcp = (unsigned long *) bip->u.uint8; destp = (unsigned char *) ep->ext_buf; for (i = 0; i < bip->count; ++i, ++destp, ++lsrcp) { (void) bu_plong( destp, *lsrcp ); } break; case DB5_MINORTYPE_BINU_64BITINT: case DB5_MINORTYPE_BINU_64BITINT_U: bu_log("rt_binunif_export5() Can't handle 64-bit integers yet\n"); return -1; } return 0; }
/* * Scale a file of pixels to a different size. * * To scale down we make a square pixel assumption. * We will preserve the amount of light energy per unit area. * To scale up we use bilinear interpolation. */ int scale(FILE *ofp, int ix, int iy, int ox, int oy) { int i, j, k, l; double pxlen, pylen; /* # old pixels per new pixel */ double xstart, xend, ystart, yend; /* edges of new pixel in old coordinates */ double xdist, ydist; /* length of new pixel sides in old coord */ double sum; unsigned char *op; pxlen = (double)ix / (double)ox; pylen = (double)iy / (double)oy; if ((pxlen < 1.0 && pylen > 1.0) || (pxlen > 1.0 && pylen < 1.0)) { fprintf(stderr, "bwscale: can't stretch one way and compress another!\n"); return -1; } if (pxlen < 1.0 || pylen < 1.0) { /* scale up */ if (rflag) { /* nearest neighbor interpolate */ ninterp(ofp, ix, iy, ox, oy); } else { /* bilinear interpolate */ binterp(ofp, ix, iy, ox, oy); } return 0; } /* for each output pixel */ for (j = 0; j < oy; j++) { size_t ret; ystart = j * pylen; yend = ystart + pylen; op = outbuf; for (i = 0; i < ox; i++) { xstart = i * pxlen; xend = xstart + pxlen; sum = 0.0; /* * For each pixel of the original falling * inside this new pixel. */ for (l = FLOOR(ystart); l < CEILING(yend); l++) { /* Make sure we have this row in the buffer */ bufy = l - buf_start; if (bufy < 0 || bufy >= buflines) { fill_buffer(l); bufy = l - buf_start; } /* Compute height of this row */ if ((double)l < ystart) ydist = CEILING(ystart) - ystart; else ydist = MIN(1.0, yend - (double)l); for (k = FLOOR(xstart); k < CEILING(xend); k++) { /* Compute width of column */ if ((double)k < xstart) xdist = CEILING(xstart) - xstart; else xdist = MIN(1.0, xend - (double)k); /* Add this pixels contribution */ /* sum += old[l][k] * xdist * ydist; */ sum += buffer[bufy * scanlen + k] * xdist * ydist; } } *op++ = (int)(sum / (pxlen * pylen)); if (op > (outbuf+scanlen)) bu_bomb("unexpected buffer overrun"); } ret = fwrite(outbuf, 1, ox, ofp); if (ret != (size_t)ox) perror("fwrite"); } return 1; }
/** * Given a ray, shoot it at all the relevant parts of the model, * (building the HeadSeg chain), and then call rt_boolregions() to * build and evaluate the partition chain. If the ray actually hit * anything, call the application's a_hit() routine with a pointer to * the partition chain, otherwise, call the application's a_miss() * routine. * * It is important to note that rays extend infinitely only in the * positive direction. The ray is composed of all points P, where * * P = r_pt + K * r_dir * * for K ranging from 0 to +infinity. There is no looking backwards. * * It is also important to note that the direction vector r_dir must * have unit length; this is mandatory, and is not ordinarily checked, * in the name of efficiency. * * Input: Pointer to an application structure, with these mandatory fields: * a_ray.r_pt Starting point of ray to be fired * a_ray.r_dir UNIT VECTOR with direction to fire in (dir cosines) * a_hit Routine to call when something is hit * a_miss Routine to call when ray misses everything * * Calls user's a_miss() or a_hit() routine as appropriate. Passes * a_hit() routine list of partitions, with only hit_dist fields * valid. Normal computation deferred to user code, to avoid needless * computation here. * * Returns: whatever the application function returns (an int). * * NOTE: The application functions may call rt_shootray() recursively. * Thus, none of the local variables may be static. * * An open issue for execution in a PARALLEL environment is locking of * the statistics variables. */ int rt_vshootray(struct application *ap) { struct seg *HeadSeg; int ret; auto vect_t inv_dir; /* inverses of ap->a_ray.r_dir */ struct bu_bitv *solidbits; /* bits for all solids shot so far */ struct bu_ptbl *regionbits; /* bits for all involved regions */ char *status; auto struct partition InitialPart; /* Head of Initial Partitions */ auto struct partition FinalPart; /* Head of Final Partitions */ int nrays = 1; /* for now */ int vlen; int id; int i; struct soltab **ary_stp; /* array of pointers */ struct xray **ary_rp; /* array of pointers */ struct seg *ary_seg; /* array of structures */ struct rt_i *rtip; int done; #define BACKING_DIST (-2.0) /* mm to look behind start point */ rtip = ap->a_rt_i; RT_AP_CHECK(ap); if (!ap->a_resource) { ap->a_resource = &rt_uniresource; } RT_CK_RESOURCE(ap->a_resource); if (RT_G_DEBUG&(DEBUG_ALLRAYS|DEBUG_SHOOT|DEBUG_PARTITION)) { bu_log("\n**********mshootray cpu=%d %d, %d lvl=%d (%s)\n", ap->a_resource->re_cpu, ap->a_x, ap->a_y, ap->a_level, ap->a_purpose != (char *)0 ? ap->a_purpose : "?"); VPRINT("Pnt", ap->a_ray.r_pt); VPRINT("Dir", ap->a_ray.r_dir); } rtip->rti_nrays++; if (rtip->needprep) rt_prep(rtip); /* Allocate dynamic memory */ vlen = nrays * rtip->rti_maxsol_by_type; ary_stp = (struct soltab **)bu_calloc(vlen, sizeof(struct soltab *), "*ary_stp[]"); ary_rp = (struct xray **)bu_calloc(vlen, sizeof(struct xray *), "*ary_rp[]"); ary_seg = (struct seg *)bu_calloc(vlen, sizeof(struct seg), "ary_seg[]"); /**** for each ray, do this ****/ InitialPart.pt_forw = InitialPart.pt_back = &InitialPart; FinalPart.pt_forw = FinalPart.pt_back = &FinalPart; HeadSeg = RT_SEG_NULL; solidbits = rt_get_solidbitv(rtip->nsolids, ap->a_resource); if (BU_LIST_IS_EMPTY(&ap->a_resource->re_region_ptbl)) { BU_ALLOC(regionbits, struct bu_ptbl); bu_ptbl_init(regionbits, 7, "rt_shootray() regionbits ptbl"); } else { regionbits = BU_LIST_FIRST(bu_ptbl, &ap->a_resource->re_region_ptbl); BU_LIST_DEQUEUE(®ionbits->l); BU_CK_PTBL(regionbits); } /* Compute the inverse of the direction cosines */ if (!ZERO(ap->a_ray.r_dir[X])) { inv_dir[X]=1.0/ap->a_ray.r_dir[X]; } else { inv_dir[X] = INFINITY; ap->a_ray.r_dir[X] = 0.0; } if (!ZERO(ap->a_ray.r_dir[Y])) { inv_dir[Y]=1.0/ap->a_ray.r_dir[Y]; } else { inv_dir[Y] = INFINITY; ap->a_ray.r_dir[Y] = 0.0; } if (!ZERO(ap->a_ray.r_dir[Z])) { inv_dir[Z]=1.0/ap->a_ray.r_dir[Z]; } else { inv_dir[Z] = INFINITY; ap->a_ray.r_dir[Z] = 0.0; } /* * XXX handle infinite solids here, later. */ /* * If ray does not enter the model RPP, skip on. * If ray ends exactly at the model RPP, trace it. */ if (!rt_in_rpp(&ap->a_ray, inv_dir, rtip->mdl_min, rtip->mdl_max) || ap->a_ray.r_max < 0.0) { rtip->nmiss_model++; if (ap->a_miss) ret = ap->a_miss(ap); else ret = 0; status = "MISS model"; goto out; } /* For each type of solid to be shot at, assemble the vectors */ for (id = 1; id <= ID_MAX_SOLID; id++) { register int nsol; if ((nsol = rtip->rti_nsol_by_type[id]) <= 0) continue; /* For each instance of this solid type */ for (i = nsol-1; i >= 0; i--) { ary_stp[i] = rtip->rti_sol_by_type[id][i]; ary_rp[i] = &(ap->a_ray); /* XXX, sb [ray] */ ary_seg[i].seg_stp = SOLTAB_NULL; BU_LIST_INIT(&ary_seg[i].l); } /* bounding box check */ /* bit vector per ray check */ /* mark elements to be skipped with ary_stp[] = SOLTAB_NULL */ ap->a_rt_i->nshots += nsol; /* later: skipped ones */ if (rt_functab[id].ft_vshot) { rt_functab[id].ft_vshot(ary_stp, ary_rp, ary_seg, nsol, ap); } else { vshot_stub(ary_stp, ary_rp, ary_seg, nsol, ap); } /* set bits for all solids shot at for each ray */ /* append resulting seg list to input for boolweave */ for (i = nsol-1; i >= 0; i--) { register struct seg *seg2; if (ary_seg[i].seg_stp == SOLTAB_NULL) { /* MISS */ ap->a_rt_i->nmiss++; continue; } ap->a_rt_i->nhits++; /* For now, do it the slow way. sb [ray] */ /* MUST dup it -- all segs have to live till after a_hit() */ RT_GET_SEG(seg2, ap->a_resource); *seg2 = ary_seg[i]; /* struct copy */ /* rt_boolweave(seg2, &InitialPart, ap); */ bu_bomb("FIXME: need to call boolweave here"); /* Add seg chain to list of used segs awaiting reclaim */ #if 0 /* FIXME: need to use waiting_segs/finished_segs here in * conjunction with rt_boolweave() { register struct seg *seg3 = seg2; while (seg3->seg_next != RT_SEG_NULL) seg3 = seg3->seg_next; seg3->seg_next = HeadSeg; HeadSeg = seg2; } */ #endif } } /* * Ray has finally left known space. */ if (InitialPart.pt_forw == &InitialPart) { if (ap->a_miss) ret = ap->a_miss(ap); else ret = 0; status = "MISSed all primitives"; goto freeup; } /* * All intersections of the ray with the model have been computed. * Evaluate the boolean trees over each partition. */ done = rt_boolfinal(&InitialPart, &FinalPart, BACKING_DIST, INFINITY, regionbits, ap, solidbits); if (done > 0) goto hitit; if (FinalPart.pt_forw == &FinalPart) { if (ap->a_miss) ret = ap->a_miss(ap); else ret = 0; status = "MISS bool"; goto freeup; } /* * Ray/model intersections exist. Pass the list to the user's * a_hit() routine. Note that only the hit_dist elements of * pt_inhit and pt_outhit have been computed yet. To compute both * hit_point and hit_normal, use the * * RT_HIT_NORMAL(NULL, hitp, stp, rayp, 0); * * macro. To compute just hit_point, use * * VJOIN1(hitp->hit_point, rp->r_pt, hitp->hit_dist, rp->r_dir); */ hitit: if (RT_G_DEBUG&DEBUG_SHOOT) rt_pr_partitions(rtip, &FinalPart, "a_hit()"); if (ap->a_hit) ret = ap->a_hit(ap, &FinalPart, HeadSeg/* &finished_segs */); else ret = 0; status = "HIT"; /* * Processing of this ray is complete. Free dynamic resources. */ freeup: { register struct partition *pp; /* Free up initial partition list */ for (pp = InitialPart.pt_forw; pp != &InitialPart;) { register struct partition *newpp; newpp = pp; pp = pp->pt_forw; FREE_PT(newpp, ap->a_resource); } /* Free up final partition list */ for (pp = FinalPart.pt_forw; pp != &FinalPart;) { register struct partition *newpp; newpp = pp; pp = pp->pt_forw; FREE_PT(newpp, ap->a_resource); } } /* Segs can't be freed until after a_hit() has returned */ #if 0 /* FIXME: depends on commented out code above */ if (HeadSeg) RT_FREE_SEG_LIST(HeadSeg, ap->a_resource); #endif out: bu_free((char *)ary_stp, "*ary_stp[]"); bu_free((char *)ary_rp, "*ary_rp[]"); bu_free((char *)ary_seg, "ary_seg[]"); if (solidbits != NULL) { bu_bitv_free(solidbits); } if (RT_G_DEBUG&(DEBUG_ALLRAYS|DEBUG_SHOOT|DEBUG_PARTITION)) { bu_log("----------mshootray cpu=%d %d, %d lvl=%d (%s) %s ret=%d\n", ap->a_resource->re_cpu, ap->a_x, ap->a_y, ap->a_level, ap->a_purpose != (char *)0 ? ap->a_purpose : "?", status, ret); } return ret; }
/** * Make a life-and-death decision on every element of a shell. * Descend the "great chain of being" from the face to loop to edge to * vertex, saving or demoting along the way. * * Note that there is no moving of items from one shell to another. */ HIDDEN void nmg_eval_shell(register struct shell *s, struct nmg_bool_state *bs) { struct faceuse *fu; struct faceuse *nextfu; struct loopuse *lu; struct loopuse *nextlu; struct edgeuse *eu; struct edgeuse *nexteu; struct vertexuse *vu; int loops_retained; NMG_CK_SHELL(s); BN_CK_TOL(bs->bs_tol); if (RTG.NMG_debug & DEBUG_VERIFY) nmg_vshell(&s->r_p->s_hd, s->r_p); /* * For each face in the shell, process all the loops in the face, * and then handle the face and all loops as a unit. */ nmg_eval_plot(bs, nmg_eval_count++); /* debug */ fu = BU_LIST_FIRST(faceuse, &s->fu_hd); while (BU_LIST_NOT_HEAD(fu, &s->fu_hd)) { NMG_CK_FACEUSE(fu); nextfu = BU_LIST_PNEXT(faceuse, fu); /* Faceuse mates will be handled at same time as OT_SAME fu */ if (fu->orientation != OT_SAME) { fu = nextfu; continue; } if (fu->fumate_p == nextfu) nextfu = BU_LIST_PNEXT(faceuse, nextfu); /* Consider this face */ NMG_CK_FACE(fu->f_p); loops_retained = 0; lu = BU_LIST_FIRST(loopuse, &fu->lu_hd); while (BU_LIST_NOT_HEAD(lu, &fu->lu_hd)) { NMG_CK_LOOPUSE(lu); nextlu = BU_LIST_PNEXT(loopuse, lu); if (lu->lumate_p == nextlu) nextlu = BU_LIST_PNEXT(loopuse, nextlu); NMG_CK_LOOP(lu->l_p); nmg_ck_lu_orientation(lu, bs->bs_tol); switch (nmg_eval_action(&lu->l_p->magic, bs)) { case BACTION_KILL: /* Kill by demoting loop to edges */ if (BU_LIST_FIRST_MAGIC(&lu->down_hd) == NMG_VERTEXUSE_MAGIC) { /* loop of single vertex */ (void)nmg_klu(lu); } else if (nmg_demote_lu(lu) == 0) { nmg_eval_plot(bs, nmg_eval_count++); /* debug */ } lu = nextlu; continue; case BACTION_RETAIN: loops_retained++; break; default: bu_bomb("nmg_eval_shell() bad BACTION\n"); } lu = nextlu; } if (RTG.NMG_debug & DEBUG_BOOLEVAL) bu_log("faceuse %p loops retained=%d\n", (void *)fu, loops_retained); if (RTG.NMG_debug & DEBUG_VERIFY) nmg_vshell(&s->r_p->s_hd, s->r_p); /* * Here, faceuse will have 0 or more loopuses still in it. * Decide the fate of the face; if the face dies, * then any remaining loops, edges, etc., will die too. */ if (BU_LIST_IS_EMPTY(&fu->lu_hd)) { if (loops_retained) bu_bomb("nmg_eval_shell() empty faceuse with retained loops?\n"); /* faceuse is empty, face & mate die */ if (RTG.NMG_debug & DEBUG_BOOLEVAL) bu_log("faceuse %p empty, kill\n", (void *)fu); nmg_kfu(fu); /* kill face & mate, dequeue from shell */ if (RTG.NMG_debug & DEBUG_VERIFY) nmg_vshell(&s->r_p->s_hd, s->r_p); nmg_eval_plot(bs, nmg_eval_count++); /* debug */ fu = nextfu; continue; } if (loops_retained <= 0) { nmg_pr_fu(fu, (char *)NULL); bu_bomb("nmg_eval_shell() non-empty faceuse, no loops retained?\n"); } fu = nextfu; } if (RTG.NMG_debug & DEBUG_VERIFY) nmg_vshell(&s->r_p->s_hd, s->r_p); /* * For each loop in the shell, process. * Each loop is either a wire-loop, or a vertex-with-self-loop. * Only consider wire loops here. */ nmg_eval_plot(bs, nmg_eval_count++); /* debug */ lu = BU_LIST_FIRST(loopuse, &s->lu_hd); while (BU_LIST_NOT_HEAD(lu, &s->lu_hd)) { NMG_CK_LOOPUSE(lu); nextlu = BU_LIST_PNEXT(loopuse, lu); if (lu->lumate_p == nextlu) nextlu = BU_LIST_PNEXT(loopuse, nextlu); if (BU_LIST_FIRST_MAGIC(&lu->down_hd) == NMG_VERTEXUSE_MAGIC) { /* ignore vertex-with-self-loop */ lu = nextlu; continue; } NMG_CK_LOOP(lu->l_p); switch (nmg_eval_action(&lu->l_p->magic, bs)) { case BACTION_KILL: /* Demote the loopuse into wire edges */ /* kill loop & mate */ if (nmg_demote_lu(lu) == 0) nmg_eval_plot(bs, nmg_eval_count++); /* debug */ lu = nextlu; continue; case BACTION_RETAIN: break; default: bu_bomb("nmg_eval_shell() bad BACTION\n"); } lu = nextlu; } if (RTG.NMG_debug & DEBUG_VERIFY) nmg_vshell(&s->r_p->s_hd, s->r_p); /* * For each wire-edge in the shell, ... */ nmg_eval_plot(bs, nmg_eval_count++); /* debug */ eu = BU_LIST_FIRST(edgeuse, &s->eu_hd); while (BU_LIST_NOT_HEAD(eu, &s->eu_hd)) { NMG_CK_EDGEUSE(eu); nexteu = BU_LIST_PNEXT(edgeuse, eu); /* may be head */ if (eu->eumate_p == nexteu) nexteu = BU_LIST_PNEXT(edgeuse, nexteu); /* Consider this edge */ NMG_CK_EDGE(eu->e_p); switch (nmg_eval_action(&eu->e_p->magic, bs)) { case BACTION_KILL: /* Demote the edegeuse (and mate) into vertices */ if (nmg_demote_eu(eu) == 0) nmg_eval_plot(bs, nmg_eval_count++); /* debug */ eu = nexteu; continue; case BACTION_RETAIN: break; default: bu_bomb("nmg_eval_shell() bad BACTION\n"); } eu = nexteu; } /* * For each lone vertex-with-self-loop, process. * Note that these are intermixed in the loop list. * Each loop is either a wire-loop, or a vertex-with-self-loop. * Only consider cases of vertex-with-self-loop here. * * This case has to be handled separately, because a wire-loop * may be demoted to a set of wire-edges above, some of which * may be retained. The non-retained wire-edges may in turn * be demoted into vertex-with-self-loop objects above, * which will be processed here. */ nmg_eval_plot(bs, nmg_eval_count++); /* debug */ lu = BU_LIST_FIRST(loopuse, &s->lu_hd); while (BU_LIST_NOT_HEAD(lu, &s->lu_hd)) { NMG_CK_LOOPUSE(lu); nextlu = BU_LIST_PNEXT(loopuse, lu); if (BU_LIST_FIRST_MAGIC(&lu->down_hd) != NMG_VERTEXUSE_MAGIC) { /* ignore any remaining wire-loops */ lu = nextlu; continue; } if (nextlu == lu->lumate_p) nextlu = BU_LIST_PNEXT(loopuse, nextlu); vu = BU_LIST_PNEXT(vertexuse, &lu->down_hd); NMG_CK_VERTEXUSE(vu); NMG_CK_VERTEX(vu->v_p); switch (nmg_eval_action(&vu->v_p->magic, bs)) { case BACTION_KILL: /* Eliminate the loopuse, and mate */ nmg_klu(lu); lu = nextlu; continue; case BACTION_RETAIN: break; default: bu_bomb("nmg_eval_shell() bad BACTION\n"); } lu = nextlu; } if (RTG.NMG_debug & DEBUG_VERIFY) nmg_vshell(&s->r_p->s_hd, s->r_p); /* * Final case: shell of a single vertexuse */ vu = s->vu_p; if (vu) { NMG_CK_VERTEXUSE(vu); NMG_CK_VERTEX(vu->v_p); switch (nmg_eval_action(&vu->v_p->magic, bs)) { case BACTION_KILL: nmg_kvu(vu); nmg_eval_plot(bs, nmg_eval_count++); /* debug */ s->vu_p = (struct vertexuse *)0; /* sanity */ break; case BACTION_RETAIN: break; default: bu_bomb("nmg_eval_shell() bad BACTION\n"); } } if (RTG.NMG_debug & DEBUG_VERIFY) nmg_vshell(&s->r_p->s_hd, s->r_p); nmg_eval_plot(bs, nmg_eval_count++); /* debug */ }
int nmg_uv_in_lu(const fastf_t u, const fastf_t v, const struct loopuse *lu) { struct edgeuse *eu; int crossings=0; NMG_CK_LOOPUSE( lu ); if ( BU_LIST_FIRST_MAGIC( &lu->down_hd ) != NMG_EDGEUSE_MAGIC ) return( 0 ); for ( BU_LIST_FOR( eu, edgeuse, &lu->down_hd ) ) { struct edge_g_cnurb *eg; if ( !eu->g.magic_p ) { bu_log( "nmg_uv_in_lu: eu (x%x) has no geometry!!!\n", eu ); bu_bomb( "nmg_uv_in_lu: eu has no geometry!!!\n" ); } if ( *eu->g.magic_p != NMG_EDGE_G_CNURB_MAGIC ) { bu_log( "nmg_uv_in_lu: Called with lu (x%x) containing eu (x%x) that is not CNURB!!!!\n", lu, eu ); bu_bomb( "nmg_uv_in_lu: Called with lu containing eu that is not CNURB!!!\n" ); } eg = eu->g.cnurb_p; if ( eg->order <= 0 ) { struct vertexuse *vu1, *vu2; struct vertexuse_a_cnurb *vua1, *vua2; point_t uv1, uv2; fastf_t slope, intersept; fastf_t u_on_curve; vu1 = eu->vu_p; vu2 = eu->eumate_p->vu_p; if ( !vu1->a.magic_p || !vu2->a.magic_p ) { bu_log( "nmg_uv_in_lu: Called with lu (x%x) containing vu with no attribute!!!!\n", lu ); bu_bomb( "nmg_uv_in_lu: Called with lu containing vu with no attribute!!!\n" ); } if ( *vu1->a.magic_p != NMG_VERTEXUSE_A_CNURB_MAGIC || *vu2->a.magic_p != NMG_VERTEXUSE_A_CNURB_MAGIC ) { bu_log( "nmg_uv_in_lu: Called with lu (x%x) containing vu that is not CNURB!!!!\n", lu ); bu_bomb( "nmg_uv_in_lu: Called with lu containing vu that is not CNURB!!!\n" ); } vua1 = vu1->a.cnurb_p; vua2 = vu2->a.cnurb_p; VMOVE( uv1, vua1->param ); VMOVE( uv2, vua2->param ); if ( RT_NURB_IS_PT_RATIONAL( eg->pt_type ) ) { uv1[0] /= uv1[2]; uv1[1] /= uv1[2]; uv2[0] /= uv2[2]; uv2[1] /= uv2[2]; } if ( uv1[1] < v && uv2[1] < v ) continue; if ( uv1[1] > v && uv2[1] > v ) continue; if ( uv1[0] <= u && uv2[0] <= u ) continue; if ( uv1[0] == uv2[0] ) { if ( (uv1[1] <= v && uv2[1] >= v) || (uv2[1] <= v && uv1[1] >= v) ) crossings++; continue; } /* need to calculate intersection */ slope = (uv1[1] - uv2[1])/(uv1[0] - uv2[0]); intersept = uv1[1] - slope * uv1[0]; u_on_curve = (v - intersept)/slope; if ( u_on_curve > u ) crossings++; } else crossings += rt_uv_in_trim( eg, u, v ); } if ( crossings & 01 ) return( 1 ); else return( 0 ); }