Exemplo n.º 1
0
static int
test_bn_mat_is_equal(int argc, char *argv[])
{
    mat_t m1, m2;
    int expected;
    struct bn_tol tol = BN_TOL_INIT_ZERO;
    tol.dist = BN_TOL_DIST;
    tol.dist = BN_TOL_DIST * BN_TOL_DIST;
    tol.perp = BN_TOL_DIST;

    if (argc != 5) {
	bu_exit(1, "<args> format: M1 M2 <expected_result> [%s]\n", argv[0]);
    }

    scan_mat_args(argv, 2, &m1);
    scan_mat_args(argv, 3, &m2);
    sscanf(argv[4], "%d", &expected);

    return !(bn_mat_is_equal(m1, m2, &tol) == expected);
}
Exemplo n.º 2
0
/**
 * R T _ G E T T R E E _ L E A F
 *
 * This routine must be prepared to run in parallel.
 */
HIDDEN union tree *rt_gettree_leaf(struct db_tree_state *tsp, struct db_full_path *pathp, struct rt_db_internal *ip, genptr_t client_data)
    /*const*/

    /*const*/

{
    register struct soltab	*stp;
    union tree		*curtree;
    struct directory	*dp;
    register matp_t		mat;
    int			i;
    struct rt_i		*rtip;

    RT_CK_DBTS(tsp);
    RT_CK_DBI(tsp->ts_dbip);
    RT_CK_FULL_PATH(pathp);
    RT_CK_DB_INTERNAL(ip);
    rtip = tsp->ts_rtip;
    RT_CK_RTI(rtip);
    RT_CK_RESOURCE(tsp->ts_resp);
    dp = DB_FULL_PATH_CUR_DIR(pathp);

    /* Determine if this matrix is an identity matrix */

    if ( !bn_mat_is_equal(tsp->ts_mat, bn_mat_identity, &rtip->rti_tol)) {
	/* Not identity matrix */
	mat = (matp_t)tsp->ts_mat;
    } else {
	/* Identity matrix */
	mat = (matp_t)0;
    }

    /*
     * Check to see if this exact solid has already been processed.
     * Match on leaf name and matrix.  Note that there is a race here
     * between having st_id filled in a few lines below (which is
     * necessary for calling ft_prep), and ft_prep filling in
     * st_aradius.  Fortunately, st_aradius starts out as zero, and
     * will never go down to -1 unless this soltab structure has
     * become a dead solid, so by testing against -1 (instead of <= 0,
     * like before, oops), it isn't a problem.
     */
    stp = rt_find_identical_solid( mat, dp, rtip );
    if ( stp->st_id != 0 )  {
	/* stp is an instance of a pre-existing solid */
	if ( stp->st_aradius <= -1 )  {
	    /* It's dead, Jim.  st_uses was not incremented. */
	    return( TREE_NULL );	/* BAD: instance of dead solid */
	}
	goto found_it;
    }

    if ( rtip->rti_add_to_new_solids_list ) {
	bu_ptbl_ins( &rtip->rti_new_solids, (long *)stp );
    }

    stp->st_id = ip->idb_type;
    stp->st_meth = &rt_functab[ip->idb_type];
    if ( mat )  {
	mat = stp->st_matp;
    } else {
	mat = (matp_t)bn_mat_identity;
    }

    RT_CK_DB_INTERNAL( ip );

    /* init solid's maxima and minima */
    VSETALL( stp->st_max, -INFINITY );
    VSETALL( stp->st_min,  INFINITY );

    /*
     * If the ft_prep routine wants to keep the internal structure,
     * that is OK, as long as idb_ptr is set to null.  Note that the
     * prep routine may have changed st_id.
     */
    if ( stp->st_meth->ft_prep( stp, ip, rtip ) )  {
	int	hash;
	/* Error, solid no good */
	bu_log("rt_gettree_leaf(%s):  prep failure\n", dp->d_namep );
	/* Too late to delete soltab entry; mark it as "dead" */
	hash = db_dirhash( dp->d_namep );
	ACQUIRE_SEMAPHORE_TREE(hash);
	stp->st_aradius = -1;
	stp->st_uses--;
	RELEASE_SEMAPHORE_TREE(hash);
	return( TREE_NULL );		/* BAD */
    }

    if ( rtip->rti_dont_instance )  {
	/*
	 * If instanced solid refs are not being compressed, then
	 * memory isn't an issue, and the application (such as
	 * solids_on_ray) probably cares about the full path of this
	 * solid, from root to leaf.  So make it available here.
	 * (stp->st_dp->d_uses could be the way to discriminate
	 * references uniquely, if the path isn't enough.  To locate
	 * given dp and d_uses, search dp->d_use_hd list.  Question
	 * is, where to stash current val of d_uses?)
	 */
	db_full_path_init( &stp->st_path );
	db_dup_full_path( &stp->st_path, pathp );
    } else {
	/*
	 * If there is more than just a direct reference to this leaf
	 * from it's containing region, copy that below-region path
	 * into st_path.  Otherwise, leave st_path's magic number 0.
	 *
	 * XXX nothing depends on this behavior yet, and this whole
	 * XXX 'else' clause might well be deleted. -Mike
	 */
	i = pathp->fp_len-1;
	if ( i > 0 && !(pathp->fp_names[i-1]->d_flags & DIR_REGION) )  {
	    /* Search backwards for region.  If no region, use whole path */
	    for ( --i; i > 0; i-- )  {
		if ( pathp->fp_names[i-1]->d_flags & DIR_REGION ) break;
	    }
	    if ( i < 0 )  i = 0;
	    db_full_path_init( &stp->st_path );
	    db_dup_path_tail( &stp->st_path, pathp, i );
	}
    }
    if (RT_G_DEBUG&DEBUG_TREEWALK && stp->st_path.magic == DB_FULL_PATH_MAGIC)  {
	char	*sofar = db_path_to_string(&stp->st_path);
	bu_log("rt_gettree_leaf() st_path=%s\n", sofar );
	bu_free(sofar, "path string");
    }

    if (RT_G_DEBUG&DEBUG_SOLIDS)  {
	struct bu_vls	str;
	bu_log("\n---Primitive %d: %s\n", stp->st_bit, dp->d_namep);
	bu_vls_init( &str );
	/* verbose=1, mm2local=1.0 */
	if ( stp->st_meth->ft_describe( &str, ip, 1, 1.0, tsp->ts_resp, tsp->ts_dbip ) < 0 )  {
	    bu_log("rt_gettree_leaf(%s):  solid describe failure\n",
		   dp->d_namep );
	}
	bu_log( "%s:  %s", dp->d_namep, bu_vls_addr( &str ) );
	bu_vls_free( &str );
    }

 found_it:
    RT_GET_TREE( curtree, tsp->ts_resp );
    curtree->magic = RT_TREE_MAGIC;
    curtree->tr_op = OP_SOLID;
    curtree->tr_a.tu_stp = stp;
    /* regionp will be filled in later by rt_tree_region_assign() */
    curtree->tr_a.tu_regionp = (struct region *)0;

    if (RT_G_DEBUG&DEBUG_TREEWALK)  {
	char	*sofar = db_path_to_string(pathp);
	bu_log("rt_gettree_leaf() %s\n", sofar );
	bu_free(sofar, "path string");
    }

    return(curtree);
}
Exemplo n.º 3
0
/**
 * R T _ F I N D _ I D E N T I C A L _ S O L I D
 *
 * See if solid "dp" as transformed by "mat" already exists in the
 * soltab list.  If it does, return the matching stp, otherwise,
 * create a new soltab structure, enrole it in the list, and return a
 * pointer to that.
 *
 * "mat" will be a null pointer when an identity matrix is signified.
 * This greatly speeds the comparison process.
 *
 * The two cases can be distinguished by the fact that stp->st_id will
 * be 0 for a new soltab structure, and non-zero for an existing one.
 *
 * This routine will run in parallel.
 *
 * In order to avoid a race between searching the soltab list and
 * adding new solids to it, the new solid to be added *must* be
 * enrolled in the list before exiting the critical section.
 *
 * To limit the size of the list to be searched, there are many lists.
 * The selection of which list is determined by the hash value
 * computed from the solid's name.  This is the same optimization used
 * in searching the directory lists.
 *
 * This subroutine is the critical bottleneck in parallel tree walking.
 *
 * It is safe, and much faster, to use several different critical
 * sections when searching different lists.
 *
 * There are only 4 dedicated semaphores defined, TREE0 through TREE3.
 * This unfortunately limits the code to having only 4 CPUs doing list
 * searching at any one time.  Hopefully, this is enough parallelism
 * to keep the rest of the CPUs doing I/O and actual solid prepping.
 *
 * Since the algorithm has been reduced from an O((nsolid/128)**2)
 * search on the entire rti_solidheads[hash] list to an O(ninstance)
 * search on the dp->d_use_head list for this one solid, the critical
 * section should be relatively short-lived.  Having the 3-way split
 * should provide ample opportunity for parallelism through here,
 * while still ensuring that the necessary variables are protected.
 *
 * There are two critical variables which *both* need to be protected:
 * the specific rti_solidhead[hash] list head, and the specific
 * dp->d_use_hd list head.  Fortunately, since the selection of
 * critical section is based upon db_dirhash(dp->d_namep), any other
 * processor that wants to search this same 'dp' will get the same
 * hash as the current thread, and will thus wait for the appropriate
 * semaphore to be released.  Similarly, any other thread that wants
 * to search the same rti_solidhead[hash] list as the current thread
 * will be using the same hash, and will thus wait for the proper
 * semaphore.
 */
HIDDEN struct soltab *rt_find_identical_solid(register const matp_t mat, register struct directory *dp, struct rt_i *rtip)
{
    register struct soltab	*stp = RT_SOLTAB_NULL;
    int			hash;

    RT_CK_DIR(dp);
    RT_CK_RTI(rtip);

    hash = db_dirhash( dp->d_namep );

    /* Enter the appropriate dual critical-section */
    ACQUIRE_SEMAPHORE_TREE(hash);

    /*
     * If solid has not been referenced yet, the search can be
     * skipped.  If solid is being referenced a _lot_, it certainly
     * isn't all going to be in the same place, so don't bother
     * searching.  Consider the case of a million instances of the
     * same tree submodel solid.
     */
    if ( dp->d_uses > 0 && dp->d_uses < 100 &&
	 rtip->rti_dont_instance == 0
	)  {
	struct bu_list	*mid;

	/* Search dp->d_use_hd list for other instances */
	for ( BU_LIST_FOR( mid, bu_list, &dp->d_use_hd ) )  {

	    stp = BU_LIST_MAIN_PTR( soltab, mid, l2 );
	    RT_CK_SOLTAB(stp);

	    if ( stp->st_matp == (matp_t)0 )  {
		if ( mat == (matp_t)0 )  {
		    /* Both have identity matrix */
		    goto more_checks;
		}
		continue;
	    }
	    if ( mat == (matp_t)0 )  continue;	/* doesn't match */

	    if ( !bn_mat_is_equal(mat, stp->st_matp, &rtip->rti_tol))
		continue;

	more_checks:
	    /* Don't instance this solid from some other model
	     * instance.  As this is nearly always equal, check it
	     * last
	     */
	    if ( stp->st_rtip != rtip )  continue;

	    /*
	     * stp now points to re-referenced solid.  stp->st_id is
	     * non-zero, indicating pre-existing solid.
	     */
	    RT_CK_SOLTAB(stp);		/* sanity */

	    /* Only increment use counter for non-dead solids. */
	    if ( !(stp->st_aradius <= -1) )
		stp->st_uses++;
	    /* dp->d_uses is NOT incremented, because number of
	     * soltab's using it has not gone up.
	     */
	    if ( RT_G_DEBUG & DEBUG_SOLIDS )  {
		bu_log( mat ?
			"rt_find_identical_solid:  %s re-referenced %d\n" :
			"rt_find_identical_solid:  %s re-referenced %d (identity mat)\n",
			dp->d_namep, stp->st_uses );
	    }

	    /* Leave the appropriate dual critical-section */
	    RELEASE_SEMAPHORE_TREE(hash);
	    return stp;
	}
    }

    /*
     * Create and link a new solid into the list.
     *
     * Ensure the search keys "dp", "st_mat" and "st_rtip" are stored
     * now, while still inside the critical section, because they are
     * searched on, above.
     */
    BU_GETSTRUCT(stp, soltab);
    stp->l.magic = RT_SOLTAB_MAGIC;
    stp->l2.magic = RT_SOLTAB2_MAGIC;
    stp->st_rtip = rtip;
    stp->st_dp = dp;
    dp->d_uses++;
    stp->st_uses = 1;
    /* stp->st_id is intentionally left zero here, as a flag */

    if ( mat )  {
	stp->st_matp = (matp_t)bu_malloc( sizeof(mat_t), "st_matp" );
	MAT_COPY( stp->st_matp, mat );
    } else {
	stp->st_matp = (matp_t)0;
    }

    /* Add to the appropriate soltab list head */
    /* PARALLEL NOTE:  Uses critical section on rt_solidheads element */
    BU_LIST_INSERT( &(rtip->rti_solidheads[hash]), &(stp->l) );

    /* Also add to the directory structure list head */
    /* PARALLEL NOTE:  Uses critical section on this 'dp' */
    BU_LIST_INSERT( &dp->d_use_hd, &(stp->l2) );

    /*
     * Leave the 4-way critical-section protecting dp and [hash]
     */
    RELEASE_SEMAPHORE_TREE(hash);

    /* Enter an exclusive critical section to protect nsolids.
     * nsolids++ needs to be locked to a SINGLE thread
     */
    bu_semaphore_acquire(RT_SEM_STATS);
    stp->st_bit = rtip->nsolids++;
    bu_semaphore_release(RT_SEM_STATS);

    /*
     * Fill in the last little bit of the structure in full parallel
     * mode, outside of any critical section.
     */

    /* Init tables of regions using this solid.  Usually small. */
    bu_ptbl_init( &stp->st_regions, 7, "st_regions ptbl" );

    return stp;
}