int
bu_ptbl_ins_unique(struct bu_ptbl *b, long int *p)
{
    register int k;
    register long **pp;

    BU_CK_PTBL(b);

    pp = b->buffer;

    /* search for existing */
    for (k = b->end-1; k >= 0; k--) {
	if (pp[k] == p) {
	    return k;
	}
    }

    if (UNLIKELY(bu_debug & BU_DEBUG_PTBL))
	bu_log("bu_ptbl_ins_unique(%p, %p)\n", (void *)b, (void *)p);

    if (b->blen <= 0 || (size_t)b->end >= b->blen) {
	/* Table needs to grow */
	bu_ptbl_ins(b, p);
	return -1;	/* To signal that it was added */
    }

    b->buffer[k=b->end++] = p;
    return -1;		/* To signal that it was added */
}
int
bu_ptbl_rm(struct bu_ptbl *b, const long int *p)
{
    register int end, j, k, l;
    register long **pp;
    int ndel = 0;

    BU_CK_PTBL(b);

    end = b->end;
    pp = b->buffer;

    for (l = b->end-1; l >= 0; --l) {
	if (pp[l] == p) {
	    /* delete consecutive occurrence(s) of p */
	    ndel++;

	    j=l+1;
	    while (l >= 1 && pp[l-1] == p) --l, ndel++;
	    /* pp[l] through pp[j-1] match p */

	    end -= j - l;
	    for (k=l; j < b->end;)
		b->buffer[k++] = b->buffer[j++];
	    b->end = end;
	}
    }
    if (UNLIKELY(bu_debug & BU_DEBUG_PTBL))
	bu_log("bu_ptbl_rm(%p, %p) ndel=%d\n", (void *)b, (void *)p, ndel);
    return ndel;
}
Beispiel #3
0
void
db_alloc_directory_block(struct resource *resp)
{
    struct directory *dp;
    size_t bytes;

    RT_CK_RESOURCE(resp);
    BU_CK_PTBL(&resp->re_directory_blocks);

    BU_ASSERT_PTR(resp->re_directory_hd, ==, NULL);

    /* Get a BIG block */
    bytes = (size_t)bu_malloc_len_roundup(1024*sizeof(struct directory));
    dp = (struct directory *)bu_calloc(1, bytes, "re_directory_blocks from db_alloc_directory_block() " BU_FLSTR);

    /* Record storage for later */
    bu_ptbl_ins(&resp->re_directory_blocks, (long *)dp);

    while (bytes >= sizeof(struct directory)) {
	dp->d_magic = RT_DIR_MAGIC;
	dp->d_forw = resp->re_directory_hd;
	resp->re_directory_hd = dp;
	dp++;
	bytes -= sizeof(struct directory);
    }
}
Beispiel #4
0
/**
 *			B U _ P T B L
 *
 *  This version maintained for source compatibility with existing NMG code.
 */
int
bu_ptbl(struct bu_ptbl *b, int func, long int *p)
{
    if (func == BU_PTBL_INIT) {
	bu_ptbl_init(b, 64, "bu_ptbl() buffer[]");
	return 0;
    } else if (func == BU_PTBL_RST) {
	bu_ptbl_reset(b);
	return 0;
    } else if (func == BU_PTBL_INS) {
	return bu_ptbl_ins(b, p);
    } else if (func == BU_PTBL_LOC) {
	return bu_ptbl_locate(b, p);
    } else if ( func == BU_PTBL_ZERO ) {
	bu_ptbl_zero(b, p);
	return( 0 );
    } else if (func == BU_PTBL_INS_UNIQUE) {
	return bu_ptbl_ins_unique(b, p);
    } else if (func == BU_PTBL_RM) {
	return bu_ptbl_rm(b, p);
    } else if (func == BU_PTBL_CAT) {
	bu_ptbl_cat( b, (const struct bu_ptbl *)p );
	return(0);
    } else if (func == BU_PTBL_FREE) {
	bu_ptbl_free(b);
	return (0);
    } else {
	BU_CK_PTBL(b);
	bu_log("bu_ptbl(%8x) Unknown table function %d\n", b, func);
	bu_bomb("bu_ptbl");
    }
    return(-1);/* this is here to keep lint happy */
}
Beispiel #5
0
/**
 *			B U _ P T B L _ C A T
 *
 *  Catenate one table onto end of another.
 *  There is no checking for duplication.
 */
void
bu_ptbl_cat(struct bu_ptbl *dest, const struct bu_ptbl *src)
{
    BU_CK_PTBL(dest);
    BU_CK_PTBL(src);
    if (bu_debug & BU_DEBUG_PTBL)
	bu_log("bu_ptbl_cat(%8x, %8x)\n", dest, src);

    if ((dest->blen - dest->end) < src->end) {
	dest->blen = (dest->blen + src->end) * 2 + 8;
	dest->buffer = (long **)bu_realloc( (char *)dest->buffer,
					    dest->blen * sizeof(long *),
					    "bu_ptbl.buffer[] (cat)");
    }
    memcpy((char *)&dest->buffer[dest->end], (char *)src->buffer, src->end*sizeof(long *));
    dest->end += src->end;
}
Beispiel #6
0
/**
 *			B U _ P T B L _ R E S E T
 *
 *  Reset the table to have no elements, but retain any existing storage.
 */
void
bu_ptbl_reset(struct bu_ptbl *b)
{
    BU_CK_PTBL(b);
    if (bu_debug & BU_DEBUG_PTBL)
	bu_log("bu_ptbl_reset(%8x)\n", b);
    b->end = 0;
    memset((char *)b->buffer, 0, b->blen*sizeof(long *));	/* no peeking */
}
Beispiel #7
0
/**
 *			B U _ P T B L _ Z E R O
 *
 *  Set all occurrences of "p" in the table to zero.
 *  This is different than deleting them.
 */
void
bu_ptbl_zero(struct bu_ptbl *b, const long int *p)
{
    register int		k;
    register const long	**pp;

    BU_CK_PTBL(b);
    pp = (const long **)b->buffer;
    for ( k = b->end-1; k >= 0; k-- )
	if (pp[k] == p) pp[k] = (long *)0;
}
Beispiel #8
0
/**
 *			B U _ P T B L _ F R E E
 *
 *  Deallocate dynamic buffer associated with a table,
 *  and render this table unusable without a subsequent bu_ptbl_init().
 */
void
bu_ptbl_free(struct bu_ptbl *b)
{
    BU_CK_PTBL(b);

    bu_free((genptr_t)b->buffer, "bu_ptbl.buffer[]");
    memset((char *)b, 0, sizeof(struct bu_ptbl));	/* sanity */

    if (bu_debug & BU_DEBUG_PTBL)
	bu_log("bu_ptbl_free(%8x)\n", b);
}
Beispiel #9
0
/**
 *			B U _ P T B L _ C A T _ U N I Q
 *
 *  Catenate one table onto end of another,
 *  ensuring that no entry is duplicated.
 *  Duplications between multiple items in 'src' are not caught.
 *  The search is a nasty n**2 one.  The tables are expected to be short.
 */
void
bu_ptbl_cat_uniq(struct bu_ptbl *dest, const struct bu_ptbl *src)
{
    register long	**p;

    BU_CK_PTBL(dest);
    BU_CK_PTBL(src);
    if (bu_debug & BU_DEBUG_PTBL)
	bu_log("bu_ptbl_cat_uniq(%8x, %8x)\n", dest, src);

    /* Assume the worst, ensure sufficient space to add all 'src' items */
    if ((dest->blen - dest->end) < src->end) {
	dest->buffer = (long **)bu_realloc( (char *)dest->buffer,
					    sizeof(long *)*(dest->blen += src->blen + 8),
					    "bu_ptbl.buffer[] (cat_uniq)");
    }
    for ( BU_PTBL_FOR( p, (long **), src ) )  {
	bu_ptbl_ins_unique( dest, *p );
    }
}
void
bu_ptbl_trunc(struct bu_ptbl *tbl, int end)
{
    BU_CK_PTBL(tbl);

    if (tbl->end <= end)
	return;

    tbl->end = end;
    return;
}
Beispiel #11
0
/**
 *			B U _ P T B L _ L O C A T E
 *
 *  locate a (long *) in an existing table
 *
 *
 * @return	index of first matching element in array, if found
 * @return	-1	if not found
 *
 * We do this a great deal, so make it go as fast as possible.
 * this is the biggest argument I can make for changing to an
 * ordered list.  Someday....
 */
int
bu_ptbl_locate(const struct bu_ptbl *b, const long int *p)
{
    register int		k;
    register const long	**pp;

    BU_CK_PTBL(b);
    pp = (const long **)b->buffer;
    for ( k = b->end-1; k >= 0; k-- )
	if (pp[k] == p) return(k);

    return(-1);
}
void
bu_ptbl_free(struct bu_ptbl *b)
{
    BU_CK_PTBL(b);

    if (b->buffer) {
	bu_free((void *)b->buffer, "bu_ptbl.buffer[]");
    }
    memset((char *)b, 0, sizeof(struct bu_ptbl));	/* sanity */

    if (UNLIKELY(bu_debug & BU_DEBUG_PTBL))
	bu_log("bu_ptbl_free(%p)\n", (void *)b);
}
Beispiel #13
0
/**
 *			B U _ P T B L _ I N S
 *
 *  Append/Insert a (long *) item to/into the table.
 */
int
bu_ptbl_ins(struct bu_ptbl *b, long int *p)
{
    register int i;

    BU_CK_PTBL(b);

    if (bu_debug & BU_DEBUG_PTBL)
	bu_log("bu_ptbl_ins(%8x, %8x)\n", b, p);

    if (b->blen == 0) bu_ptbl_init(b, 64, "bu_ptbl_ins() buffer");
    if (b->end >= b->blen)  {
	b->buffer = (long **)bu_realloc( (char *)b->buffer,
					 sizeof(p)*(b->blen *= 4),
					 "bu_ptbl.buffer[] (ins)" );
    }

    i=b->end++;
    b->buffer[i] = p;
    return(i);
}
Beispiel #14
0
/**
 *			B U _ P R _ P T B L
 *
 *  Print a bu_ptbl array for inspection.
 */
void
bu_pr_ptbl(const char *title, const struct bu_ptbl *tbl, int verbose)
{
    register long	**lp;

    BU_CK_PTBL(tbl);
    bu_log("%s: bu_ptbl array with %d entries\n",
	   title, tbl->end );

    if ( !verbose )  return;

    /* Go in ascending order */
    for ( lp = (long **)BU_PTBL_BASEADDR(tbl);
	  lp <= (long **)BU_PTBL_LASTADDR(tbl); lp++
	)  {
	if ( *lp == 0 )  {
	    bu_log("  %.8x NULL entry\n", *lp);
	    continue;
	}
	bu_log("  %.8x %s\n", *lp, bu_identify_magic(**lp) );
    }
}
int
bu_ptbl_ins(struct bu_ptbl *b, long int *p)
{
    register int i;

    BU_CK_PTBL(b);

    if (UNLIKELY(bu_debug & BU_DEBUG_PTBL))
	bu_log("bu_ptbl_ins(%p, %p)\n", (void *)b, (void *)p);

    if (b->blen == 0)
	bu_ptbl_init(b, 64, "bu_ptbl_ins() buffer");

    if ((size_t)b->end >= b->blen) {
	b->buffer = (long **)bu_realloc((char *)b->buffer,
					sizeof(long *)*(b->blen *= 4),
					"bu_ptbl.buffer[] (ins)");
    }

    i = b->end++;
    b->buffer[i] = p;
    return i;
}
Beispiel #16
0
/**
 *			B U _ P T B L _ I N S _ U N I Q U E
 *
 *  Append item to table, if not already present.  Unique insert.
 *
 *
 *  @return	index of first matchine element in array, if found.  (table unchanged)
 *  @return	-1	if table extended to hold new element
 *
 * We do this a great deal, so make it go as fast as possible.
 * this is the biggest argument I can make for changing to an
 * ordered list.  Someday....
 */
int
bu_ptbl_ins_unique(struct bu_ptbl *b, long int *p)
{
    register int	k;
    register long	**pp = b->buffer;

    BU_CK_PTBL(b);

    /* search for existing */
    for ( k = b->end-1; k >= 0; k-- )
	if (pp[k] == p) return(k);

    if (bu_debug & BU_DEBUG_PTBL)
	bu_log("bu_ptbl_ins_unique(%8x, %8x)\n", b, p);

    if (b->blen <= 0 || b->end >= b->blen)  {
	/* Table needs to grow */
	bu_ptbl_ins( b, p );
	return -1;	/* To signal that it was added */
    }

    b->buffer[k=b->end++] = p;
    return(-1);		/* To signal that it was added */
}
Beispiel #17
0
/**
 * Given a ray, shoot it at all the relevant parts of the model,
 * (building the HeadSeg chain), and then call rt_boolregions() to
 * build and evaluate the partition chain.  If the ray actually hit
 * anything, call the application's a_hit() routine with a pointer to
 * the partition chain, otherwise, call the application's a_miss()
 * routine.
 *
 * It is important to note that rays extend infinitely only in the
 * positive direction.  The ray is composed of all points P, where
 *
 * P = r_pt + K * r_dir
 *
 * for K ranging from 0 to +infinity.  There is no looking backwards.
 *
 * It is also important to note that the direction vector r_dir must
 * have unit length; this is mandatory, and is not ordinarily checked,
 * in the name of efficiency.
 *
 * Input:  Pointer to an application structure, with these mandatory fields:
 * a_ray.r_pt Starting point of ray to be fired
 * a_ray.r_dir UNIT VECTOR with direction to fire in (dir cosines)
 * a_hit Routine to call when something is hit
 * a_miss Routine to call when ray misses everything
 *
 * Calls user's a_miss() or a_hit() routine as appropriate.  Passes
 * a_hit() routine list of partitions, with only hit_dist fields
 * valid.  Normal computation deferred to user code, to avoid needless
 * computation here.
 *
 * Returns: whatever the application function returns (an int).
 *
 * NOTE: The application functions may call rt_shootray() recursively.
 * Thus, none of the local variables may be static.
 *
 * An open issue for execution in a PARALLEL environment is locking of
 * the statistics variables.
 */
int
rt_vshootray(struct application *ap)
{
    struct seg *HeadSeg;
    int ret;
    vect_t inv_dir;	/* inverses of ap->a_ray.r_dir */
    struct bu_bitv *solidbits;	/* bits for all solids shot so far */
    struct bu_ptbl *regionbits;	/* bits for all involved regions */
    char *status;
    struct partition InitialPart;	/* Head of Initial Partitions */
    struct partition FinalPart;	/* Head of Final Partitions */
    int nrays = 1;			/* for now */
    int vlen;
    int id;
    int i;
    struct soltab **ary_stp;	/* array of pointers */
    struct xray **ary_rp;	/* array of pointers */
    struct seg *ary_seg;	/* array of structures */
    struct rt_i *rtip;
    int done;

#define BACKING_DIST (-2.0)		/* mm to look behind start point */
    rtip = ap->a_rt_i;
    RT_AP_CHECK(ap);
    if (!ap->a_resource) {
	ap->a_resource = &rt_uniresource;
    }
    RT_CK_RESOURCE(ap->a_resource);

    if (RT_G_DEBUG&(DEBUG_ALLRAYS|DEBUG_SHOOT|DEBUG_PARTITION)) {
	bu_log("\n**********mshootray cpu=%d  %d, %d lvl=%d (%s)\n",
	       ap->a_resource->re_cpu,
	       ap->a_x, ap->a_y,
	       ap->a_level,
	       ap->a_purpose != (char *)0 ? ap->a_purpose : "?");
	VPRINT("Pnt", ap->a_ray.r_pt);
	VPRINT("Dir", ap->a_ray.r_dir);
    }

    rtip->rti_nrays++;
    if (rtip->needprep)
	rt_prep(rtip);

    /* Allocate dynamic memory */
    vlen = nrays * rtip->rti_maxsol_by_type;
    ary_stp = (struct soltab **)bu_calloc(vlen, sizeof(struct soltab *),
					  "*ary_stp[]");
    ary_rp = (struct xray **)bu_calloc(vlen, sizeof(struct xray *),
				       "*ary_rp[]");
    ary_seg = (struct seg *)bu_calloc(vlen, sizeof(struct seg),
				      "ary_seg[]");

    /**** for each ray, do this ****/

    InitialPart.pt_forw = InitialPart.pt_back = &InitialPart;
    FinalPart.pt_forw = FinalPart.pt_back = &FinalPart;

    HeadSeg = RT_SEG_NULL;

    solidbits = rt_get_solidbitv(rtip->nsolids, ap->a_resource);

    if (BU_LIST_IS_EMPTY(&ap->a_resource->re_region_ptbl)) {
	BU_ALLOC(regionbits, struct bu_ptbl);
	bu_ptbl_init(regionbits, 7, "rt_shootray() regionbits ptbl");
    } else {
	regionbits = BU_LIST_FIRST(bu_ptbl, &ap->a_resource->re_region_ptbl);
	BU_LIST_DEQUEUE(&regionbits->l);
	BU_CK_PTBL(regionbits);
    }

    /* Compute the inverse of the direction cosines */
    if (!ZERO(ap->a_ray.r_dir[X])) {
	inv_dir[X]=1.0/ap->a_ray.r_dir[X];
    } else {
	inv_dir[X] = INFINITY;
	ap->a_ray.r_dir[X] = 0.0;
    }
    if (!ZERO(ap->a_ray.r_dir[Y])) {
	inv_dir[Y]=1.0/ap->a_ray.r_dir[Y];
    } else {
	inv_dir[Y] = INFINITY;
	ap->a_ray.r_dir[Y] = 0.0;
    }
    if (!ZERO(ap->a_ray.r_dir[Z])) {
	inv_dir[Z]=1.0/ap->a_ray.r_dir[Z];
    } else {
	inv_dir[Z] = INFINITY;
	ap->a_ray.r_dir[Z] = 0.0;
    }

    /*
     * XXX handle infinite solids here, later.
     */

    /*
     * If ray does not enter the model RPP, skip on.
     * If ray ends exactly at the model RPP, trace it.
     */
    if (!rt_in_rpp(&ap->a_ray, inv_dir, rtip->mdl_min, rtip->mdl_max)  ||
	ap->a_ray.r_max < 0.0) {
	rtip->nmiss_model++;
	if (ap->a_miss)
	    ret = ap->a_miss(ap);
	else
	    ret = 0;
	status = "MISS model";
	goto out;
    }

    /* For each type of solid to be shot at, assemble the vectors */
    for (id = 1; id <= ID_MAX_SOLID; id++) {
	register int nsol;

	if ((nsol = rtip->rti_nsol_by_type[id]) <= 0) continue;

	/* For each instance of this solid type */
	for (i = nsol-1; i >= 0; i--) {
	    ary_stp[i] = rtip->rti_sol_by_type[id][i];
	    ary_rp[i] = &(ap->a_ray);	/* XXX, sb [ray] */
	    ary_seg[i].seg_stp = SOLTAB_NULL;
	    BU_LIST_INIT(&ary_seg[i].l);
	}
	/* bounding box check */
	/* bit vector per ray check */
	/* mark elements to be skipped with ary_stp[] = SOLTAB_NULL */
	ap->a_rt_i->nshots += nsol;	/* later: skipped ones */
	if (OBJ[id].ft_vshot) {
	    OBJ[id].ft_vshot(ary_stp, ary_rp, ary_seg, nsol, ap);
	} else {
	    vshot_stub(ary_stp, ary_rp, ary_seg, nsol, ap);
	}


	/* set bits for all solids shot at for each ray */

	/* append resulting seg list to input for boolweave */
	for (i = nsol-1; i >= 0; i--) {
	    register struct seg *seg2;

	    if (ary_seg[i].seg_stp == SOLTAB_NULL) {
		/* MISS */
		ap->a_rt_i->nmiss++;
		continue;
	    }
	    ap->a_rt_i->nhits++;

	    /* For now, do it the slow way.  sb [ray] */
	    /* MUST dup it -- all segs have to live till after a_hit() */
	    RT_GET_SEG(seg2, ap->a_resource);
	    *seg2 = ary_seg[i];	/* struct copy */
	    /* rt_boolweave(seg2, &InitialPart, ap); */
	    bu_bomb("FIXME: need to call boolweave here");

	    /* Add seg chain to list of used segs awaiting reclaim */

#if 0
	    /* FIXME: need to use waiting_segs/finished_segs here in
	     * conjunction with rt_boolweave()
	     {
	     register struct seg *seg3 = seg2;
	     while (seg3->seg_next != RT_SEG_NULL)
	     seg3 = seg3->seg_next;
	     seg3->seg_next = HeadSeg;
	     HeadSeg = seg2;
	     }
	    */
#endif
	}
    }

    /*
     * Ray has finally left known space.
     */
    if (InitialPart.pt_forw == &InitialPart) {
	if (ap->a_miss)
	    ret = ap->a_miss(ap);
	else
	    ret = 0;
	status = "MISSed all primitives";
	goto freeup;
    }

    /*
     * All intersections of the ray with the model have been computed.
     * Evaluate the boolean trees over each partition.
     */
    done = rt_boolfinal(&InitialPart, &FinalPart, BACKING_DIST, INFINITY, regionbits, ap, solidbits);

    if (done > 0) goto hitit;

    if (FinalPart.pt_forw == &FinalPart) {
	if (ap->a_miss)
	    ret = ap->a_miss(ap);
	else
	    ret = 0;
	status = "MISS bool";
	goto freeup;
    }

    /*
     * Ray/model intersections exist.  Pass the list to the user's
     * a_hit() routine.  Note that only the hit_dist elements of
     * pt_inhit and pt_outhit have been computed yet.  To compute both
     * hit_point and hit_normal, use the
     *
     * RT_HIT_NORMAL(NULL, hitp, stp, rayp, 0);
     *
     * macro.  To compute just hit_point, use
     *
     * VJOIN1(hitp->hit_point, rp->r_pt, hitp->hit_dist, rp->r_dir);
     */
hitit:
    if (RT_G_DEBUG&DEBUG_SHOOT) rt_pr_partitions(rtip, &FinalPart, "a_hit()");

    if (ap->a_hit)
	ret = ap->a_hit(ap, &FinalPart, HeadSeg/* &finished_segs */);
    else
	ret = 0;
    status = "HIT";

    /*
     * Processing of this ray is complete.  Free dynamic resources.
     */
freeup:
    {
	register struct partition *pp;

	/* Free up initial partition list */
	for (pp = InitialPart.pt_forw; pp != &InitialPart;) {
	    register struct partition *newpp;
	    newpp = pp;
	    pp = pp->pt_forw;
	    FREE_PT(newpp, ap->a_resource);
	}
	/* Free up final partition list */
	for (pp = FinalPart.pt_forw; pp != &FinalPart;) {
	    register struct partition *newpp;
	    newpp = pp;
	    pp = pp->pt_forw;
	    FREE_PT(newpp, ap->a_resource);
	}
    }

    /* Segs can't be freed until after a_hit() has returned */
#if 0
    /* FIXME: depends on commented out code above */
    if (HeadSeg)
	RT_FREE_SEG_LIST(HeadSeg, ap->a_resource);
#endif

out:
    bu_free((char *)ary_stp, "*ary_stp[]");
    bu_free((char *)ary_rp, "*ary_rp[]");
    bu_free((char *)ary_seg, "ary_seg[]");

    if (solidbits != NULL) {
	bu_bitv_free(solidbits);
    }
    if (RT_G_DEBUG&(DEBUG_ALLRAYS|DEBUG_SHOOT|DEBUG_PARTITION)) {
	bu_log("----------mshootray cpu=%d  %d, %d lvl=%d (%s) %s ret=%d\n",
	       ap->a_resource->re_cpu,
	       ap->a_x, ap->a_y,
	       ap->a_level,
	       ap->a_purpose != (char *)0 ? ap->a_purpose : "?",
	       status, ret);
    }
    return ret;
}