/**
 * Create a place holder for a nurb surface.
 */
struct face_g_snurb *
rt_nurb_new_snurb(int u_order, int v_order, int n_u, int n_v, int n_rows, int n_cols, int pt_type, struct resource *res)
{
    register struct face_g_snurb * srf;
    int pnum;

    if (res) RT_CK_RESOURCE(res);

    GET_SNURB(srf);
    srf->order[0] = u_order;
    srf->order[1] = v_order;
    srf->dir = RT_NURB_SPLIT_ROW;

    srf->u.k_size = n_u;
    srf->v.k_size = n_v;
    srf->s_size[0] = n_rows;
    srf->s_size[1] = n_cols;
    srf->pt_type = pt_type;

    pnum = sizeof (fastf_t) * n_rows * n_cols * RT_NURB_EXTRACT_COORDS(pt_type);

    srf->u.knots = (fastf_t *) bu_malloc (
	n_u * sizeof (fastf_t), "rt_nurb_new_snurb: u kv knot values");
    srf->v.knots = (fastf_t *) bu_malloc (
	n_v * sizeof (fastf_t), "rt_nurb_new_snurb: v kv knot values");
    srf->ctl_points = (fastf_t *) bu_malloc(
	pnum, "rt_nurb_new_snurb: control mesh points");

    return srf;
}
int
rt_obj_export(struct bu_external *ep, const struct rt_db_internal *ip, double local2mm, const struct db_i *dbip, struct resource *resp)
{
    int id;
    const struct rt_functab *ft;
    int (*export_func)(struct bu_external *, const struct rt_db_internal *, double, const struct db_i *, struct resource *);

    if (!ep || !ip || !dbip || local2mm < 0.0)
	return -1;

    BU_CK_EXTERNAL(ep);
    RT_CK_DB_INTERNAL(ip);
    RT_CK_DBI(dbip);
    if (resp) RT_CK_RESOURCE(resp);

    id = ip->idb_minor_type;
    if (id < 0)
	return -2;

    ft = &OBJ[id];
    if (!ft)
	return -3;

    if (dbip->dbi_version < 5) {
	export_func = ft->ft_export4;
    } else {
	export_func = ft->ft_export5;
    }

    if (!export_func)
	return -4;

    return export_func(ep, ip, local2mm, dbip, resp);
}
Exemplo n.º 3
0
/**
 * rt_nurb_kvknot()
 *
 * Generate a open knot vector with n=order knots at the beginning of
 * the sequence and n knots at the end of the sequence with a lower,
 * and an upper value and num knots in between
 */
void
rt_nurb_kvknot(register struct knot_vector *new_knots, int order, fastf_t lower, fastf_t upper, int num, struct resource *res)
{
    register int i;
    int total;
    fastf_t knot_step;

    if (res) RT_CK_RESOURCE(res);

    total = order * 2 + num;

    knot_step = (upper - lower) / (num + 1);

    new_knots->k_size = total;

    new_knots->knots = (fastf_t *) bu_malloc (sizeof(fastf_t) * total,
					      "rt_nurb_kvknot: new knots values");

    for (i = 0; i < order; i++)
	new_knots->knots[i] = lower;

    for (i = order; i <= (num + order - 1); i++)
	new_knots->knots[i] = new_knots->knots[i-1] + knot_step;

    for (i = (num + order); i < total; i++)
	new_knots->knots[i] = upper;
}
Exemplo n.º 4
0
/**
 * rt_nurb_kvmerge()
 *
 * Merge two knot vectors together and return the new resulting knot
 * vector.
 */
void
rt_nurb_kvmerge(struct knot_vector *new_knots, const struct knot_vector *kv1, const struct knot_vector *kv2, struct resource *res)
{
    int kv1_ptr = 0;
    int kv2_ptr = 0;
    int new_ptr;

    if (res) RT_CK_RESOURCE(res);

    new_knots->k_size = kv1->k_size + kv2->k_size;

    new_knots->knots = (fastf_t *) bu_malloc(
	sizeof (fastf_t) * new_knots->k_size,
	"rt_nurb_kvmerge: new knot values");

    for (new_ptr = 0; new_ptr < new_knots->k_size; new_ptr++) {
	if (kv1_ptr >= kv1->k_size)
	    new_knots->knots[new_ptr] = kv2->knots[kv2_ptr++];
	else if (kv2_ptr >= kv2->k_size)
	    new_knots->knots[new_ptr] = kv1->knots[kv1_ptr++];
	else if (kv1->knots[kv1_ptr] < kv2->knots[kv2_ptr])
	    new_knots->knots[new_ptr] = kv1->knots[kv1_ptr++];
	else
	    new_knots->knots[new_ptr] = kv2->knots[kv2_ptr++];
    }
}
Exemplo n.º 5
0
void
db_alloc_directory_block(struct resource *resp)
{
    struct directory *dp;
    size_t bytes;

    RT_CK_RESOURCE(resp);
    BU_CK_PTBL(&resp->re_directory_blocks);

    BU_ASSERT_PTR(resp->re_directory_hd, ==, NULL);

    /* Get a BIG block */
    bytes = (size_t)bu_malloc_len_roundup(1024*sizeof(struct directory));
    dp = (struct directory *)bu_calloc(1, bytes, "re_directory_blocks from db_alloc_directory_block() " BU_FLSTR);

    /* Record storage for later */
    bu_ptbl_ins(&resp->re_directory_blocks, (long *)dp);

    while (bytes >= sizeof(struct directory)) {
	dp->d_magic = RT_DIR_MAGIC;
	dp->d_forw = resp->re_directory_hd;
	resp->re_directory_hd = dp;
	dp++;
	bytes -= sizeof(struct directory);
    }
}
Exemplo n.º 6
0
/**
 * rt_nurb_kvmult()
 *
 * Construct a new knot vector which is the same as the passed in knot
 * vector except it has multiplicity of num of val. It checks to see
 * if val already is a multiple knot.
 */
void
rt_nurb_kvmult(struct knot_vector *new_kv, const struct knot_vector *kv, int num, register fastf_t val, struct resource *res)
{
    int n;
    register int i;
    struct knot_vector check;

    if (res) RT_CK_RESOURCE(res);

    n = rt_nurb_kvcheck(val, kv);

    check.k_size = num - n;
    if (check.k_size <= 0) {
	bu_log("rt_nurb_kvmult(new_kv=%p, kv=%p, num=%d, val=%g)\n",
	       (void *)new_kv, (void *)kv, num, val);
	rt_nurb_pr_kv(kv);
	bu_bomb("rt_nurb_kvmult\n");
    }

    check.knots = (fastf_t *) bu_malloc(sizeof(fastf_t) * check.k_size,
					"rt_nurb_kvmult: check knots");

    for (i = 0; i < num - n; i++)
	check.knots[i] = val;

    rt_nurb_kvmerge(new_kv, &check, kv, res);

    /* free up old knot values */
    bu_free((char *)check.knots, "rt_nurb_kvmult:check knots");
}
int
rt_obj_import(struct rt_db_internal *ip, const struct bu_external *ep, const mat_t mat, const struct db_i *dbip, struct resource *resp)
{
    int id;
    const struct rt_functab *ft;
    int (*import)(struct rt_db_internal *, const struct bu_external *, const mat_t, const struct db_i *, struct resource *);

    if (!ip || !ep || !dbip)
	return -1;

    RT_CK_DB_INTERNAL(ip);
    BU_CK_EXTERNAL(ep);
    RT_CK_DBI(dbip);
    if (resp) RT_CK_RESOURCE(resp);

    id = ip->idb_minor_type;
    if (id < 0)
	return -2;

    ft = &OBJ[id];
    if (!ft)
	return -3;

    if (dbip->dbi_version < 5) {
	import = ft->ft_import4;
    } else {
	import = ft->ft_import5;
    }

    if (!import)
	return -4;

    return import(ip, ep, mat, dbip, resp);
}
int
rt_obj_describe(struct bu_vls *logstr, const struct rt_db_internal *ip, int verbose, double mm2local, struct resource *resp, struct db_i *dbip)
{
    int id;
    const struct rt_functab *ft;

    if (!logstr || !ip)
	return -1;

    BU_CK_VLS(logstr);
    RT_CK_DB_INTERNAL(ip);
    if (resp) RT_CK_RESOURCE(resp);
    if (dbip) RT_CK_DBI(dbip);

    id = ip->idb_minor_type;
    if (id < 0)
	return -2;

    ft = &OBJ[id];
    if (!ft)
	return -3;
    if (!ft->ft_describe)
	return -4;

    return ft->ft_describe(logstr, ip, verbose, mm2local, resp, dbip);
}
Exemplo n.º 9
0
/**
 * Free up the structures and links for the oslo matrix.
 */
void
rt_nurb_free_oslo(struct oslo_mat *om, struct resource *res)
{
    register struct oslo_mat * omp;

    if (res) RT_CK_RESOURCE(res);

    while (om != (struct oslo_mat *) 0) {
	omp = om;
	om = om->next;
	bu_free((char *)omp->o_vec, "rt_nurb_free_oslo: ovec");
	bu_free((char *)omp, "rt_nurb_free_oslo: struct oslo");
    }
}
Exemplo n.º 10
0
/**
 * rt_nurb_kvcopy()
 *
 * Generic copy the knot vector and pass a new one in.
 */
void
rt_nurb_kvcopy(struct knot_vector *new_kv, register const struct knot_vector *old_kv, struct resource *res)
{
    register int i;

    if (res) RT_CK_RESOURCE(res);

    new_kv->k_size = old_kv->k_size;

    new_kv->knots = (fastf_t *) bu_malloc(sizeof(fastf_t) *
					  new_kv->k_size, "spl_kvcopy: new knot values");

    for (i = 0; i < new_kv->k_size; i++)
	new_kv->knots[i] = old_kv->knots[i];
}
Exemplo n.º 11
0
/* ARGSUSED */
HIDDEN int rt_gettree_region_start(struct db_tree_state *tsp, struct db_full_path *pathp, const struct rt_comb_internal *combp, genptr_t client_data)
    /*const*/
    /*const*/


{
    RT_CK_RTI(tsp->ts_rtip);
    RT_CK_RESOURCE(tsp->ts_resp);

    /* Ignore "air" regions unless wanted */
    if ( tsp->ts_rtip->useair == 0 &&  tsp->ts_aircode != 0 )  {
	tsp->ts_rtip->rti_air_discards++;
	return(-1);	/* drop this region */
    }
    return(0);
}
Exemplo n.º 12
0
/**
 * This routine must be prepared to run in parallel.
 */
HIDDEN int
_rt_gettree_region_start(struct db_tree_state *tsp, const struct db_full_path *pathp, const struct rt_comb_internal *combp, void *UNUSED(client_data))
{
  if (tsp) {
    RT_CK_RTI(tsp->ts_rtip);
    RT_CK_RESOURCE(tsp->ts_resp);
    if (pathp) RT_CK_FULL_PATH(pathp);
    if (combp) RT_CHECK_COMB(combp);

    /* Ignore "air" regions unless wanted */
    if (tsp->ts_rtip->useair == 0 &&  tsp->ts_aircode != 0) {
      tsp->ts_rtip->rti_air_discards++;
      return -1;	/* drop this region */
    }
  }
  return 0;
}
Exemplo n.º 13
0
int
rt_comb_export4(
    struct bu_external *ep,
    const struct rt_db_internal *ip,
    double UNUSED(local2mm),
    const struct db_i *dbip,
    struct resource *resp)
{
    struct rt_comb_internal *comb;
    size_t node_count;
    size_t actual_count;
    struct rt_tree_array *rt_tree_array;
    union tree *tp;
    union record *rp;
    size_t j;
    char *endp;
    struct bu_vls tmp_vls = BU_VLS_INIT_ZERO;

    RT_CK_DB_INTERNAL(ip);
    if (dbip) RT_CK_DBI(dbip);
    RT_CK_RESOURCE(resp);
    if (ip->idb_type != ID_COMBINATION) bu_bomb("rt_comb_export4() type not ID_COMBINATION");
    comb = (struct rt_comb_internal *)ip->idb_ptr;
    RT_CK_COMB(comb);

    if (comb->tree && db_ck_v4gift_tree(comb->tree) < 0) {
	db_non_union_push(comb->tree, resp);
	if (db_ck_v4gift_tree(comb->tree) < 0) {
	    /* Need to further modify tree */
	    bu_log("rt_comb_export4() Unable to V4-ify tree, aborting.\n");
	    rt_pr_tree(comb->tree, 0);
	    return -1;
	}
    }

    /* Count # leaves in tree -- that's how many Member records needed. */
    node_count = db_tree_nleaves(comb->tree);
    if (node_count > 0) {
	rt_tree_array = (struct rt_tree_array *)bu_calloc(node_count, sizeof(struct rt_tree_array), "rt_tree_array");

	/* Convert tree into array form */
	actual_count = db_flatten_tree(rt_tree_array, comb->tree,
				       OP_UNION, 1, resp) - rt_tree_array;
	BU_ASSERT_SIZE_T(actual_count, ==, node_count);
	comb->tree = TREE_NULL;
    } else {
Exemplo n.º 14
0
/**
 * This routine will be called by db_walk_tree() once all the solids
 * in this region have been visited.
 *
 * This routine must be prepared to run in parallel.  As a result,
 * note that the details of the solids pointed to by the soltab
 * pointers in the tree may not be filled in when this routine is
 * called (due to the way multiple instances of solids are handled).
 * Therefore, everything which referred to the tree has been moved out
 * into the serial section.  (_rt_tree_region_assign, rt_bound_tree)
 */
HIDDEN union tree *
_rt_gettree_region_end(struct db_tree_state *tsp, const struct db_full_path *pathp, union tree *curtree, void *client_data)
{
    struct region *rp;
    struct directory *dp = NULL;
    size_t shader_len=0;
    struct rt_i *rtip;
    Tcl_HashTable *tbl = (Tcl_HashTable *)client_data;
    Tcl_HashEntry *entry;
    matp_t inv_mat;
    struct bu_attribute_value_set avs;
    struct bu_attribute_value_pair *avpp;

    RT_CK_DBI(tsp->ts_dbip);
    RT_CK_FULL_PATH(pathp);
    RT_CK_TREE(curtree);
    rtip =  tsp->ts_rtip;
    RT_CK_RTI(rtip);
    RT_CK_RESOURCE(tsp->ts_resp);

    if (curtree->tr_op == OP_NOP) {
	/* Ignore empty regions */
	return curtree;
    }

    BU_ALLOC(rp, struct region);
    rp->l.magic = RT_REGION_MAGIC;
    rp->reg_regionid = tsp->ts_regionid;
    rp->reg_is_fastgen = tsp->ts_is_fastgen;
    rp->reg_aircode = tsp->ts_aircode;
    rp->reg_gmater = tsp->ts_gmater;
    rp->reg_los = tsp->ts_los;

    dp = (struct directory *)DB_FULL_PATH_CUR_DIR(pathp);
    if (!dp)
	return TREE_NULL;

    bu_avs_init_empty(&avs);
    if (db5_get_attributes(tsp->ts_dbip, &avs, dp) == 0) {
	/* copy avs */
	bu_avs_init_empty(&(rp->attr_values));
	for (BU_AVS_FOR(avpp, &(tsp->ts_attrs))) {
	    bu_avs_add(&(rp->attr_values), avpp->name, bu_avs_get(&avs, avpp->name));
	}
    }
Exemplo n.º 15
0
/**
 * rt_nurb_kvgen()
 *
 * Generate a knot vector with num knots from lower value to the upper
 * value.
 */
void
rt_nurb_kvgen(register struct knot_vector *kv, fastf_t lower, fastf_t upper, int num, struct resource *res)
{
    register int i;
    register fastf_t inc;

    if (res) RT_CK_RESOURCE(res);

    inc = (upper - lower) / (num + 1);

    kv->k_size = num;

    kv->knots = (fastf_t *) bu_malloc (sizeof(fastf_t) * num,
				       "rt_nurb_kvgen: kv knots");

    for (i = 1; i <= num; i++)
	kv->knots[i-1] = lower + i * inc;
}
Exemplo n.º 16
0
/**
 * rt_nurb_kvextract()
 *
 * Extract the portion of the knot vector from kv->knots[lower] to
 * kv->knots[upper]
 */
void
rt_nurb_kvextract(struct knot_vector *new_kv, register const struct knot_vector *kv, int lower, int upper, struct resource *res)
{
    register int i;
    register fastf_t *ptr;

    if (res) RT_CK_RESOURCE(res);

    new_kv->knots = (fastf_t *) bu_malloc (
	sizeof (fastf_t) * (upper - lower),
	"spl_kvextract: nkw kv values");

    new_kv->k_size = upper - lower;
    ptr = new_kv->knots;

    for (i = lower; i < upper; i++)
	*ptr++ = kv->knots[i];
}
/**
 * Clean up the storage use of an snurb, but don't release the
 * pointer.  Often used by routines that allocate an array of nurb
 * pointers, or use automatic variables to hold one.
 */
void
rt_nurb_clean_snurb(struct face_g_snurb *srf, struct resource *res)
{
    NMG_CK_SNURB(srf);

    if (res) RT_CK_RESOURCE(res);

    bu_free((char *)srf->u.knots, "rt_nurb_clean_snurb() u.knots");
    bu_free((char *)srf->v.knots, "rt_nurb_free_snurb() v.knots");
    bu_free((char *)srf->ctl_points, "rt_nurb_free_snurb() ctl_points");

    /* Invalidate the structure */
    srf->u.knots = (fastf_t *)NULL;
    srf->v.knots = (fastf_t *)NULL;
    srf->ctl_points = (fastf_t *)NULL;
    srf->order[0] = srf->order[1] = -1;
    srf->l.magic = 0;
}
void
rt_nurb_free_snurb(struct face_g_snurb *srf, struct resource *res)
{
    NMG_CK_SNURB(srf);

    if (res) RT_CK_RESOURCE(res);

    /* assume that links to other surface and curves are already
     * deleted.
     */

    bu_free((char *)srf->u.knots, "rt_nurb_free_snurb: u kv knots");
    bu_free((char *)srf->v.knots, "rt_nurb_free_snurb: v kv knots");
    bu_free((char *)srf->ctl_points, "rt_nurb_free_snurb: mesh points");

    srf->l.magic = 0;
    bu_free((char *)srf, "rt_nurb_free_snurb: snurb struct");
}
Exemplo n.º 19
0
/**
 * rt_nurb_gen_knot_vector()
 *
 * Generate a open knot vector with n=order knots at the beginning of
 * the sequence and n knots at the end of the sequence.
 */
void
rt_nurb_gen_knot_vector(register struct knot_vector *new_knots, int order, fastf_t lower, fastf_t upper, struct resource *res)
{
    register int i;
    int total;

    if (res) RT_CK_RESOURCE(res);

    total = order * 2;

    new_knots->k_size = total;

    new_knots->knots = (fastf_t *) bu_malloc (sizeof(fastf_t) * total,
					      "rt_nurb_gen_knot_vector: new knots values");

    for (i = 0; i < order; i++)
	new_knots->knots[i] = lower;

    for (i = order; i < total; i++)
	new_knots->knots[i] = upper;
}
Exemplo n.º 20
0
struct rt_tree_array *
db_flatten_tree(
    struct rt_tree_array *rt_tree_array,
    union tree *tp,
    int op,
    int freeflag,
    struct resource *resp)
{

    RT_CK_TREE(tp);
    RT_CK_RESOURCE(resp);

    switch (tp->tr_op) {
	case OP_DB_LEAF:
	    rt_tree_array->tl_op = op;
	    rt_tree_array->tl_tree = tp;
	    return rt_tree_array+1;

	case OP_UNION:
	case OP_INTERSECT:
	case OP_SUBTRACT:
	    /* This node is known to be a binary op */
	    rt_tree_array = db_flatten_tree(rt_tree_array, tp->tr_b.tb_left, op, freeflag, resp);
	    rt_tree_array = db_flatten_tree(rt_tree_array, tp->tr_b.tb_right, tp->tr_op, freeflag, resp);
	    if (freeflag) {
		/* The leaves have been stolen, free the binary op */
		tp->tr_b.tb_left = TREE_NULL;
		tp->tr_b.tb_right = TREE_NULL;
		RT_FREE_TREE(tp, resp);
	    }
	    return rt_tree_array;

	default:
	    bu_log("db_flatten_tree: bad op %d\n", tp->tr_op);
	    bu_bomb("db_flatten_tree\n");
    }

    return (struct rt_tree_array *)NULL; /* for the compiler */
}
Exemplo n.º 21
0
/*
 * The only reason for this to be broken out is that
 * 2 separate locations in db_functree() call it.
 */
void
db_functree_subtree(struct db_i *dbip,
		    union tree *tp,
		    void (*comb_func) (struct db_i *, struct directory *, void *),
		    void (*leaf_func) (struct db_i *, struct directory *, void *),
		    struct resource *resp,
		    void *client_data)
{
    struct directory *dp;

    if (!tp)
	return;

    RT_CHECK_DBI(dbip);
    RT_CK_TREE(tp);
    if (resp) {
	RT_CK_RESOURCE(resp);
    }

    switch (tp->tr_op) {

	case OP_DB_LEAF:
	    if ((dp=db_lookup(dbip, tp->tr_l.tl_name, LOOKUP_NOISY)) == RT_DIR_NULL)
		return;
	    db_functree(dbip, dp, comb_func, leaf_func, resp, client_data);
	    break;

	case OP_UNION:
	case OP_INTERSECT:
	case OP_SUBTRACT:
	case OP_XOR:
	    db_functree_subtree(dbip, tp->tr_b.tb_left, comb_func, leaf_func, resp, client_data);
	    db_functree_subtree(dbip, tp->tr_b.tb_right, comb_func, leaf_func, resp, client_data);
	    break;
	default:
	    bu_log("db_functree_subtree: unrecognized operator %d\n", tp->tr_op);
	    bu_bomb("db_functree_subtree: unrecognized operator\n");
    }
}
Exemplo n.º 22
0
void
rt_alloc_seg_block(register struct resource *res)
{
    register struct seg *sp;
    size_t bytes;

    RT_CK_RESOURCE(res);

    if (!BU_LIST_IS_INITIALIZED(&res->re_seg)) {
	BU_LIST_INIT(&(res->re_seg));
	bu_ptbl_init(&res->re_seg_blocks, 64, "re_seg_blocks ptbl");
    }
    bytes = bu_malloc_len_roundup(64*sizeof(struct seg));
    sp = (struct seg *)bu_malloc(bytes, "rt_alloc_seg_block()");
    bu_ptbl_ins(&res->re_seg_blocks, (long *)sp);
    while (bytes >= sizeof(struct seg)) {
	sp->l.magic = RT_SEG_MAGIC;
	BU_LIST_INSERT(&(res->re_seg), &(sp->l));
	res->re_seglen++;
	sp++;
	bytes -= sizeof(struct seg);
    }
}
Exemplo n.º 23
0
/**
 * A generic traversal function.
 */
void
db_traverse_subtree(union tree *tp,
		    void (*traverse_func) (struct directory *, struct db_traverse *),
		    struct db_traverse *dtp)
{
    struct directory *dp;

    if (!tp)
	return;

    RT_CK_DB_TRAVERSE(dtp);
    RT_CHECK_DBI(dtp->dbip);
    RT_CK_TREE(tp);
    if (dtp->resp) {
	RT_CK_RESOURCE(dtp->resp);
    }

    switch (tp->tr_op) {

	case OP_DB_LEAF:
	    if ((dp=db_lookup(dtp->dbip, tp->tr_l.tl_name, LOOKUP_NOISY)) == RT_DIR_NULL)
		return;
	    traverse_func(dp, dtp);
	    break;

	case OP_UNION:
	case OP_INTERSECT:
	case OP_SUBTRACT:
	case OP_XOR:
	    db_traverse_subtree(tp->tr_b.tb_left, traverse_func, dtp);
	    db_traverse_subtree(tp->tr_b.tb_right, traverse_func, dtp);
	    break;
	default:
	    bu_log("db_functree_subtree: unrecognized operator %d\n", tp->tr_op);
	    bu_bomb("db_functree_subtree: unrecognized operator\n");
    }
}
Exemplo n.º 24
0
/**
 * Given a ray, shoot it at all the relevant parts of the model,
 * (building the HeadSeg chain), and then call rt_boolregions() to
 * build and evaluate the partition chain.  If the ray actually hit
 * anything, call the application's a_hit() routine with a pointer to
 * the partition chain, otherwise, call the application's a_miss()
 * routine.
 *
 * It is important to note that rays extend infinitely only in the
 * positive direction.  The ray is composed of all points P, where
 *
 * P = r_pt + K * r_dir
 *
 * for K ranging from 0 to +infinity.  There is no looking backwards.
 *
 * It is also important to note that the direction vector r_dir must
 * have unit length; this is mandatory, and is not ordinarily checked,
 * in the name of efficiency.
 *
 * Input:  Pointer to an application structure, with these mandatory fields:
 * a_ray.r_pt Starting point of ray to be fired
 * a_ray.r_dir UNIT VECTOR with direction to fire in (dir cosines)
 * a_hit Routine to call when something is hit
 * a_miss Routine to call when ray misses everything
 *
 * Calls user's a_miss() or a_hit() routine as appropriate.  Passes
 * a_hit() routine list of partitions, with only hit_dist fields
 * valid.  Normal computation deferred to user code, to avoid needless
 * computation here.
 *
 * Returns: whatever the application function returns (an int).
 *
 * NOTE: The application functions may call rt_shootray() recursively.
 * Thus, none of the local variables may be static.
 *
 * An open issue for execution in a PARALLEL environment is locking of
 * the statistics variables.
 */
int
rt_vshootray(struct application *ap)
{
    struct seg *HeadSeg;
    int ret;
    vect_t inv_dir;	/* inverses of ap->a_ray.r_dir */
    struct bu_bitv *solidbits;	/* bits for all solids shot so far */
    struct bu_ptbl *regionbits;	/* bits for all involved regions */
    char *status;
    struct partition InitialPart;	/* Head of Initial Partitions */
    struct partition FinalPart;	/* Head of Final Partitions */
    int nrays = 1;			/* for now */
    int vlen;
    int id;
    int i;
    struct soltab **ary_stp;	/* array of pointers */
    struct xray **ary_rp;	/* array of pointers */
    struct seg *ary_seg;	/* array of structures */
    struct rt_i *rtip;
    int done;

#define BACKING_DIST (-2.0)		/* mm to look behind start point */
    rtip = ap->a_rt_i;
    RT_AP_CHECK(ap);
    if (!ap->a_resource) {
	ap->a_resource = &rt_uniresource;
    }
    RT_CK_RESOURCE(ap->a_resource);

    if (RT_G_DEBUG&(DEBUG_ALLRAYS|DEBUG_SHOOT|DEBUG_PARTITION)) {
	bu_log("\n**********mshootray cpu=%d  %d, %d lvl=%d (%s)\n",
	       ap->a_resource->re_cpu,
	       ap->a_x, ap->a_y,
	       ap->a_level,
	       ap->a_purpose != (char *)0 ? ap->a_purpose : "?");
	VPRINT("Pnt", ap->a_ray.r_pt);
	VPRINT("Dir", ap->a_ray.r_dir);
    }

    rtip->rti_nrays++;
    if (rtip->needprep)
	rt_prep(rtip);

    /* Allocate dynamic memory */
    vlen = nrays * rtip->rti_maxsol_by_type;
    ary_stp = (struct soltab **)bu_calloc(vlen, sizeof(struct soltab *),
					  "*ary_stp[]");
    ary_rp = (struct xray **)bu_calloc(vlen, sizeof(struct xray *),
				       "*ary_rp[]");
    ary_seg = (struct seg *)bu_calloc(vlen, sizeof(struct seg),
				      "ary_seg[]");

    /**** for each ray, do this ****/

    InitialPart.pt_forw = InitialPart.pt_back = &InitialPart;
    FinalPart.pt_forw = FinalPart.pt_back = &FinalPart;

    HeadSeg = RT_SEG_NULL;

    solidbits = rt_get_solidbitv(rtip->nsolids, ap->a_resource);

    if (BU_LIST_IS_EMPTY(&ap->a_resource->re_region_ptbl)) {
	BU_ALLOC(regionbits, struct bu_ptbl);
	bu_ptbl_init(regionbits, 7, "rt_shootray() regionbits ptbl");
    } else {
	regionbits = BU_LIST_FIRST(bu_ptbl, &ap->a_resource->re_region_ptbl);
	BU_LIST_DEQUEUE(&regionbits->l);
	BU_CK_PTBL(regionbits);
    }

    /* Compute the inverse of the direction cosines */
    if (!ZERO(ap->a_ray.r_dir[X])) {
	inv_dir[X]=1.0/ap->a_ray.r_dir[X];
    } else {
	inv_dir[X] = INFINITY;
	ap->a_ray.r_dir[X] = 0.0;
    }
    if (!ZERO(ap->a_ray.r_dir[Y])) {
	inv_dir[Y]=1.0/ap->a_ray.r_dir[Y];
    } else {
	inv_dir[Y] = INFINITY;
	ap->a_ray.r_dir[Y] = 0.0;
    }
    if (!ZERO(ap->a_ray.r_dir[Z])) {
	inv_dir[Z]=1.0/ap->a_ray.r_dir[Z];
    } else {
	inv_dir[Z] = INFINITY;
	ap->a_ray.r_dir[Z] = 0.0;
    }

    /*
     * XXX handle infinite solids here, later.
     */

    /*
     * If ray does not enter the model RPP, skip on.
     * If ray ends exactly at the model RPP, trace it.
     */
    if (!rt_in_rpp(&ap->a_ray, inv_dir, rtip->mdl_min, rtip->mdl_max)  ||
	ap->a_ray.r_max < 0.0) {
	rtip->nmiss_model++;
	if (ap->a_miss)
	    ret = ap->a_miss(ap);
	else
	    ret = 0;
	status = "MISS model";
	goto out;
    }

    /* For each type of solid to be shot at, assemble the vectors */
    for (id = 1; id <= ID_MAX_SOLID; id++) {
	register int nsol;

	if ((nsol = rtip->rti_nsol_by_type[id]) <= 0) continue;

	/* For each instance of this solid type */
	for (i = nsol-1; i >= 0; i--) {
	    ary_stp[i] = rtip->rti_sol_by_type[id][i];
	    ary_rp[i] = &(ap->a_ray);	/* XXX, sb [ray] */
	    ary_seg[i].seg_stp = SOLTAB_NULL;
	    BU_LIST_INIT(&ary_seg[i].l);
	}
	/* bounding box check */
	/* bit vector per ray check */
	/* mark elements to be skipped with ary_stp[] = SOLTAB_NULL */
	ap->a_rt_i->nshots += nsol;	/* later: skipped ones */
	if (OBJ[id].ft_vshot) {
	    OBJ[id].ft_vshot(ary_stp, ary_rp, ary_seg, nsol, ap);
	} else {
	    vshot_stub(ary_stp, ary_rp, ary_seg, nsol, ap);
	}


	/* set bits for all solids shot at for each ray */

	/* append resulting seg list to input for boolweave */
	for (i = nsol-1; i >= 0; i--) {
	    register struct seg *seg2;

	    if (ary_seg[i].seg_stp == SOLTAB_NULL) {
		/* MISS */
		ap->a_rt_i->nmiss++;
		continue;
	    }
	    ap->a_rt_i->nhits++;

	    /* For now, do it the slow way.  sb [ray] */
	    /* MUST dup it -- all segs have to live till after a_hit() */
	    RT_GET_SEG(seg2, ap->a_resource);
	    *seg2 = ary_seg[i];	/* struct copy */
	    /* rt_boolweave(seg2, &InitialPart, ap); */
	    bu_bomb("FIXME: need to call boolweave here");

	    /* Add seg chain to list of used segs awaiting reclaim */

#if 0
	    /* FIXME: need to use waiting_segs/finished_segs here in
	     * conjunction with rt_boolweave()
	     {
	     register struct seg *seg3 = seg2;
	     while (seg3->seg_next != RT_SEG_NULL)
	     seg3 = seg3->seg_next;
	     seg3->seg_next = HeadSeg;
	     HeadSeg = seg2;
	     }
	    */
#endif
	}
    }

    /*
     * Ray has finally left known space.
     */
    if (InitialPart.pt_forw == &InitialPart) {
	if (ap->a_miss)
	    ret = ap->a_miss(ap);
	else
	    ret = 0;
	status = "MISSed all primitives";
	goto freeup;
    }

    /*
     * All intersections of the ray with the model have been computed.
     * Evaluate the boolean trees over each partition.
     */
    done = rt_boolfinal(&InitialPart, &FinalPart, BACKING_DIST, INFINITY, regionbits, ap, solidbits);

    if (done > 0) goto hitit;

    if (FinalPart.pt_forw == &FinalPart) {
	if (ap->a_miss)
	    ret = ap->a_miss(ap);
	else
	    ret = 0;
	status = "MISS bool";
	goto freeup;
    }

    /*
     * Ray/model intersections exist.  Pass the list to the user's
     * a_hit() routine.  Note that only the hit_dist elements of
     * pt_inhit and pt_outhit have been computed yet.  To compute both
     * hit_point and hit_normal, use the
     *
     * RT_HIT_NORMAL(NULL, hitp, stp, rayp, 0);
     *
     * macro.  To compute just hit_point, use
     *
     * VJOIN1(hitp->hit_point, rp->r_pt, hitp->hit_dist, rp->r_dir);
     */
hitit:
    if (RT_G_DEBUG&DEBUG_SHOOT) rt_pr_partitions(rtip, &FinalPart, "a_hit()");

    if (ap->a_hit)
	ret = ap->a_hit(ap, &FinalPart, HeadSeg/* &finished_segs */);
    else
	ret = 0;
    status = "HIT";

    /*
     * Processing of this ray is complete.  Free dynamic resources.
     */
freeup:
    {
	register struct partition *pp;

	/* Free up initial partition list */
	for (pp = InitialPart.pt_forw; pp != &InitialPart;) {
	    register struct partition *newpp;
	    newpp = pp;
	    pp = pp->pt_forw;
	    FREE_PT(newpp, ap->a_resource);
	}
	/* Free up final partition list */
	for (pp = FinalPart.pt_forw; pp != &FinalPart;) {
	    register struct partition *newpp;
	    newpp = pp;
	    pp = pp->pt_forw;
	    FREE_PT(newpp, ap->a_resource);
	}
    }

    /* Segs can't be freed until after a_hit() has returned */
#if 0
    /* FIXME: depends on commented out code above */
    if (HeadSeg)
	RT_FREE_SEG_LIST(HeadSeg, ap->a_resource);
#endif

out:
    bu_free((char *)ary_stp, "*ary_stp[]");
    bu_free((char *)ary_rp, "*ary_rp[]");
    bu_free((char *)ary_seg, "ary_seg[]");

    if (solidbits != NULL) {
	bu_bitv_free(solidbits);
    }
    if (RT_G_DEBUG&(DEBUG_ALLRAYS|DEBUG_SHOOT|DEBUG_PARTITION)) {
	bu_log("----------mshootray cpu=%d  %d, %d lvl=%d (%s) %s ret=%d\n",
	       ap->a_resource->re_cpu,
	       ap->a_x, ap->a_y,
	       ap->a_level,
	       ap->a_purpose != (char *)0 ? ap->a_purpose : "?",
	       status, ret);
    }
    return ret;
}
Exemplo n.º 25
0
/**
 * Apply a 4x4 transformation matrix to the internal form of a solid.
 *
 * If "free" flag is non-zero, storage for the original solid is
 * released.  If "os" is same as "is", storage for the original solid
 * is overwritten with the new, transformed solid.
 *
 * Returns -
 * -1 FAIL
 *  0 OK
 */
int
rt_generic_xform(
    struct rt_db_internal *op,
    const mat_t mat,
    struct rt_db_internal *ip,
    int release,
    struct db_i *dbip,
    struct resource *resp)
{
    struct bu_external ext;
    int id;
    struct bu_attribute_value_set avs;

    RT_CK_DB_INTERNAL(ip);
    RT_CK_DBI(dbip);
    RT_CK_RESOURCE(resp);

    memset(&avs, 0, sizeof(struct bu_attribute_value_set));

    id = ip->idb_type;
    BU_EXTERNAL_INIT(&ext);
    /* Scale change on export is 1.0 -- no change */
    switch (db_version(dbip)) {
	case 4:
	    if (OBJ[id].ft_export4(&ext, ip, 1.0, dbip, resp) < 0) {
		bu_log("rt_generic_xform():  %s export failure\n",
		       OBJ[id].ft_name);
		return -1;			/* FAIL */
	    }
	    if ((release || op == ip)) rt_db_free_internal(ip);

	    RT_DB_INTERNAL_INIT(op);
	    if (OBJ[id].ft_import4(op, &ext, mat, dbip, resp) < 0) {
		bu_log("rt_generic_xform():  solid import failure\n");
		return -1;			/* FAIL */
	    }
	    break;
	case 5:
	    if (OBJ[id].ft_export5(&ext, ip, 1.0, dbip, resp) < 0) {
		bu_log("rt_generic_xform():  %s export failure\n",
		       OBJ[id].ft_name);
		return -1;			/* FAIL */
	    }

	    if ((release || op == ip)) {
		if (ip->idb_avs.magic == BU_AVS_MAGIC) {
		    /* grab the attributes before they are lost
		     * by rt_db_free_internal or RT_DB_INTERNAL_INIT
		     */
		    bu_avs_init(&avs, ip->idb_avs.count, "avs");
		    bu_avs_merge(&avs, &ip->idb_avs);
		}
		rt_db_free_internal(ip);
	    }

	    RT_DB_INTERNAL_INIT(op);

	    if (!release && op != ip) {
		/* just copy the attributes from ip to op */
		if (ip->idb_avs.magic == BU_AVS_MAGIC) {
		    bu_avs_init(&op->idb_avs, ip->idb_avs.count, "avs");
		    bu_avs_merge(&op->idb_avs, &ip->idb_avs);
		}
	    } else if (avs.magic == BU_AVS_MAGIC) {
		/* put the saved attributes in the output */
		bu_avs_init(&op->idb_avs, avs.count, "avs");
		bu_avs_merge(&op->idb_avs, &avs);
		bu_avs_free(&avs);
	    }

	    if (OBJ[id].ft_import5(op, &ext, mat, dbip, resp) < 0) {
		bu_log("rt_generic_xform():  solid import failure\n");
		return -1;			/* FAIL */
	    }
	    break;
    }

    bu_free_external(&ext);

    RT_CK_DB_INTERNAL(op);
    return 0;				/* OK */
}
Exemplo n.º 26
0
void
db_update_nref( struct db_i *dbip, struct resource *resp )
{
    register int			i;
    register struct directory      *dp;
    struct rt_db_internal		intern;
    struct rt_comb_internal	       *comb;

    RT_CK_DBI( dbip );
    RT_CK_RESOURCE(resp);

    /* First, clear any existing counts */
    for ( i = 0; i < RT_DBNHASH; i++ )
	for ( dp = dbip->dbi_Head[i]; dp != DIR_NULL; dp = dp->d_forw )
	    dp->d_nref = 0;

    /* Examine all COMB nodes */
    for ( i = 0; i < RT_DBNHASH; i++ )  {
	for ( dp = dbip->dbi_Head[i]; dp != DIR_NULL; dp = dp->d_forw ) {

	    /* handle non-combination objects that reference other objects */
	    if ( dp->d_major_type == DB5_MAJORTYPE_BRLCAD ) {
		struct directory *dp2;

		if ( dp->d_minor_type == DB5_MINORTYPE_BRLCAD_EXTRUDE ) {
		    struct rt_extrude_internal *extr;

		    if ( rt_db_get_internal(&intern, dp, dbip, (fastf_t *)NULL, resp) < 0 )
			continue;
		    extr = (struct rt_extrude_internal *)intern.idb_ptr;
		    RT_EXTRUDE_CK_MAGIC( extr );
		    if ( extr->sketch_name ) {
			dp2 = db_lookup( dbip, extr->sketch_name, LOOKUP_QUIET );
			if ( dp2 != DIR_NULL ) {
			    dp2->d_nref++;
			}
		    }
		    rt_db_free_internal( &intern, resp );
		} else if ( dp->d_minor_type ==  DB5_MINORTYPE_BRLCAD_DSP ) {
		    struct rt_dsp_internal *dsp;

		    if ( rt_db_get_internal(&intern, dp, dbip, (fastf_t *)NULL, resp) < 0 )
			continue;
		    dsp = (struct rt_dsp_internal *)intern.idb_ptr;
		    RT_DSP_CK_MAGIC( dsp );
		    if ( dsp->dsp_datasrc == RT_DSP_SRC_OBJ && bu_vls_strlen( &dsp->dsp_name) > 0 ) {
			dp2 = db_lookup( dbip, bu_vls_addr( &dsp->dsp_name ), LOOKUP_QUIET );
			if ( dp2 != DIR_NULL ) {
			    dp2->d_nref++;
			}
		    }
		    rt_db_free_internal( &intern, resp );
		}
	    }
	    if ( !(dp->d_flags & DIR_COMB) )
		continue;
	    if ( rt_db_get_internal(&intern, dp, dbip, (fastf_t *)NULL, resp) < 0 )
		continue;
	    if ( intern.idb_type != ID_COMBINATION )  {
		bu_log("NOTICE: %s was marked a combination, but isn't one?  Clearing flag\n",
		       dp->d_namep);
		dp->d_flags &= ~DIR_COMB;
		rt_db_free_internal( &intern, resp );
		continue;
	    }
	    comb = (struct rt_comb_internal *)intern.idb_ptr;
	    db_tree_funcleaf( dbip, comb, comb->tree,
			      db_count_refs, (genptr_t)NULL,
			      (genptr_t)NULL, (genptr_t)NULL );
	    rt_db_free_internal( &intern, resp );
	}
    }
}
Exemplo n.º 27
0
/**
 * R T _ P G _ T O _ B O T
 *
 * Convert in-memory form of a polysolid (pg) to a bag of triangles (BoT)
 * There is no record in the V5 database for a polysolid.
 *
 * Depends on the "max_npts" parameter having been set.
 *
 * Returns -
 * -1 FAIL
 * 0 OK
 */
int
rt_pg_to_bot(struct rt_db_internal *ip, const struct bn_tol *tol, struct resource *resp)
{
    struct rt_pg_internal *ip_pg;
    struct rt_bot_internal *ip_bot;
    size_t max_pts;
    size_t max_tri;
    size_t p;
    size_t i;

    RT_CK_DB_INTERNAL(ip);
    BN_CK_TOL(tol);
    RT_CK_RESOURCE(resp);

    if (ip->idb_type != ID_POLY) {
	bu_log("ERROR: rt_pt_to_bot() called with a non-polysolid!!!\n");
	return -1;
    }
    ip_pg = (struct rt_pg_internal *)ip->idb_ptr;

    RT_PG_CK_MAGIC(ip_pg);

    BU_ALLOC(ip_bot, struct rt_bot_internal);
    ip_bot->magic = RT_BOT_INTERNAL_MAGIC;
    ip_bot->mode = RT_BOT_SOLID;
    ip_bot->orientation = RT_BOT_CCW;
    ip_bot->bot_flags = 0;

    /* maximum possible vertices */
    max_pts = ip_pg->npoly * ip_pg->max_npts;
    BU_ASSERT_SIZE_T(max_pts, >, 0);

    /* maximum possible triangular faces */
    max_tri = ip_pg->npoly * 3;
    BU_ASSERT_SIZE_T(max_tri, >, 0);

    ip_bot->num_vertices = 0;
    ip_bot->num_faces = 0;
    ip_bot->thickness = (fastf_t *)NULL;
    ip_bot->face_mode = (struct bu_bitv *)NULL;

    ip_bot->vertices = (fastf_t *)bu_calloc(max_pts * 3, sizeof(fastf_t), "BOT vertices");
    ip_bot->faces = (int *)bu_calloc(max_tri * 3, sizeof(int), "BOT faces");

    for (p=0; p<ip_pg->npoly; p++) {
	vect_t work[3], tmp;
	struct tri_specific trip;
	fastf_t m1, m2, m3, m4;
	size_t v0=0, v2=0;
	int first;

	first = 1;
	VMOVE(work[0], &ip_pg->poly[p].verts[0*3]);
	VMOVE(work[1], &ip_pg->poly[p].verts[1*3]);

	for (i=2; i < ip_pg->poly[p].npts; i++) {
	    VMOVE(work[2], &ip_pg->poly[p].verts[i*3]);

	    VSUB2(trip.tri_BA, work[1], work[0]);
	    VSUB2(trip.tri_CA, work[2], work[0]);
	    VCROSS(trip.tri_wn, trip.tri_BA, trip.tri_CA);

	    /* Check to see if this plane is a line or pnt */
	    m1 = MAGNITUDE(trip.tri_BA);
	    m2 = MAGNITUDE(trip.tri_CA);
	    VSUB2(tmp, work[1], work[2]);
	    m3 = MAGNITUDE(tmp);
	    m4 = MAGNITUDE(trip.tri_wn);
	    if (m1 >= tol->dist && m2 >= tol->dist &&
		m3 >= tol->dist && m4 >= tol->dist) {

		/* add this triangle to the BOT */
		if (first) {
		    ip_bot->faces[ip_bot->num_faces * 3] = ip_bot->num_vertices;
		    VMOVE(&ip_bot->vertices[ip_bot->num_vertices * 3], work[0]);
		    v0 = ip_bot->num_vertices;
		    ip_bot->num_vertices++;

		    ip_bot->faces[ip_bot->num_faces * 3 + 1] = ip_bot->num_vertices;
		    VMOVE(&ip_bot->vertices[ip_bot->num_vertices * 3], work[1]);
		    ip_bot->num_vertices++;
		    first = 0;
		} else {
		    ip_bot->faces[ip_bot->num_faces * 3] = v0;
		    ip_bot->faces[ip_bot->num_faces * 3 + 1] = v2;
		}
		VMOVE(&ip_bot->vertices[ip_bot->num_vertices * 3], work[2]);
		ip_bot->faces[ip_bot->num_faces * 3 + 2] = ip_bot->num_vertices;
		v2 = ip_bot->num_vertices;
		ip_bot->num_vertices++;

		ip_bot->num_faces++;
	    }

	    /* Chop off a triangle, and continue */
	    VMOVE(work[1], work[2]);
	}
    }

    rt_bot_vertex_fuse(ip_bot, tol);
    rt_bot_face_fuse(ip_bot);

    rt_db_free_internal(ip);

    ip->idb_major_type = DB5_MAJORTYPE_BRLCAD;
    ip->idb_type = ID_BOT;
    ip->idb_meth = &rt_functab[ID_BOT];
    ip->idb_ptr = ip_bot;

    return 0;
}
Exemplo n.º 28
0
/**
 * Apply a transformation matrix to the specified 'ip' input revolve
 * object, storing the results in the specified 'op' out pointer or
 * creating a copy if NULL.
 */
int
rt_revolve_xform(
    struct rt_db_internal *op,
    const mat_t mat,
    struct rt_db_internal *ip,
    int release,
    struct db_i *dbip,
    struct resource *resp)
{
    struct rt_revolve_internal *rip, *rop;
    point_t tmp_vec;

    if (dbip) RT_CK_DBI(dbip);
    RT_CK_DB_INTERNAL(ip);
    RT_CK_RESOURCE(resp);
    rip = (struct rt_revolve_internal *)ip->idb_ptr;
    RT_REVOLVE_CK_MAGIC(rip);

    if (bu_debug&BU_DEBUG_MEM_CHECK) {
	bu_log("Barrier check at start of revolve_xform():\n");
	bu_mem_barriercheck();
    }

    if (op != ip) {
	RT_DB_INTERNAL_INIT(op);
	BU_ALLOC(rop, struct rt_revolve_internal);
	rop->magic = RT_REVOLVE_INTERNAL_MAGIC;
	bu_vls_init(&rop->sketch_name);
	bu_vls_vlscat(&rop->sketch_name, &rip->sketch_name);
	op->idb_ptr = (void *)rop;
	op->idb_meth = &OBJ[ID_REVOLVE];
	op->idb_major_type = DB5_MAJORTYPE_BRLCAD;
	op->idb_type = ID_REVOLVE;
	if (ip->idb_avs.magic == BU_AVS_MAGIC) {
	    bu_avs_init(&op->idb_avs, ip->idb_avs.count, "avs");
	    bu_avs_merge(&op->idb_avs, &ip->idb_avs);
	}
    } else {
	rop = (struct rt_revolve_internal *)ip->idb_ptr;
    }
    MAT4X3PNT(tmp_vec, mat, rip->v3d);
    VMOVE(rop->v3d, tmp_vec);
    MAT4X3VEC(tmp_vec, mat, rip->axis3d);
    VMOVE(rop->axis3d, tmp_vec);
    V2MOVE(rop->v2d, rip->v2d);
    V2MOVE(rop->axis2d, rip->axis2d);

    if (release && ip != op) {
	rop->skt = rip->skt;
	rip->skt = (struct rt_sketch_internal *)NULL;
	rt_db_free_internal(ip);
    } else if (rip->skt) {
	rop->skt = rt_copy_sketch(rip->skt);
    } else {
	rop->skt = (struct rt_sketch_internal *)NULL;
    }

    if (bu_debug&BU_DEBUG_MEM_CHECK) {
	bu_log("Barrier check at end of revolve_xform():\n");
	bu_mem_barriercheck();
    }

    return 0;
}
Exemplo n.º 29
0
/**
 * Compute some pixels, and store them.
 *
 * This uses a "self-dispatching" parallel algorithm.  Executes until
 * there is no more work to be done, or is told to stop.
 *
 * In order to reduce the traffic through the res_worker critical
 * section, a multiple pixel block may be removed from the work queue
 * at once.
 *
 * For a general-purpose version, see LIBRT rt_shoot_many_rays()
 */
void
worker(int cpu, void *UNUSED(arg))
{
    int pixel_start;
    int pixelnum;
    int pat_num = -1;

    /* The more CPUs at work, the bigger the bites we take */
    if (per_processor_chunk <= 0) per_processor_chunk = npsw;

    if (cpu >= MAX_PSW) {
	bu_log("rt/worker() cpu %d > MAX_PSW %d, array overrun\n", cpu, MAX_PSW);
	bu_exit(EXIT_FAILURE, "rt/worker() cpu > MAX_PSW, array overrun\n");
    }
    RT_CK_RESOURCE(&resource[cpu]);

    pat_num = -1;
    if (hypersample) {
	int i, ray_samples;

	ray_samples = hypersample + 1;
	for (i=0; pt_pats[i].num_samples != 0; i++) {
	    if (pt_pats[i].num_samples == ray_samples) {
		pat_num = i;
		goto pat_found;
	    }
	}
    }
pat_found:

    if (transpose_grid) {
	int tmp;

	/* switch cur_pixel and last_pixel */
	tmp = cur_pixel;
	cur_pixel = last_pixel;
	last_pixel = tmp;

	while (1) {
	    if (stop_worker)
		return;

	    bu_semaphore_acquire(RT_SEM_WORKER);
	    pixel_start = cur_pixel;
	    cur_pixel -= per_processor_chunk;
	    bu_semaphore_release(RT_SEM_WORKER);

	    for (pixelnum = pixel_start; pixelnum > pixel_start-per_processor_chunk; pixelnum--) {
		if (pixelnum < last_pixel)
		    return;

		do_pixel(cpu, pat_num, pixelnum);
	    }
	}
    } else if (random_mode) {

	while (1) {
	    /* Generate a random pixel id between 0 and last_pixel
	       inclusive - TODO: check if there is any issue related
	       with multi-threaded RNG */
	    pixelnum = rand()*1.0/RAND_MAX*(last_pixel + 1);
	    if (pixelnum >= last_pixel) pixelnum = last_pixel;
	    do_pixel(cpu, pat_num, pixelnum);
	}

    } else {
	while (1) {
	    if (stop_worker)
		return;

	    bu_semaphore_acquire(RT_SEM_WORKER);
	    pixel_start = cur_pixel;
	    cur_pixel += per_processor_chunk;
	    bu_semaphore_release(RT_SEM_WORKER);

	    for (pixelnum = pixel_start; pixelnum < pixel_start+per_processor_chunk; pixelnum++) {

		if (pixelnum > last_pixel)
		    return;

		do_pixel(cpu, pat_num, pixelnum);
	    }
	}
    }
}
Exemplo n.º 30
0
int rt_nul_tcladjust(Tcl_Interp *interp, struct rt_db_internal *intern, int argc, char **argv, struct resource *resp)  {
    RT_CK_RESOURCE(resp);
    Tcl_AppendResult(interp, "rt_nul_tcladjust", (char *)NULL);
    return TCL_ERROR;
}