struct directory *
db_lookup(const struct db_i *dbip, const char *name, int noisy)
{
    struct directory *dp;
    char n0;
    char n1;

    if (!name || name[0] == '\0') {
	if (noisy || RT_G_DEBUG&DEBUG_DB)
	    bu_log("db_lookup received NULL or empty name\n");
	return RT_DIR_NULL;
    }

    n0 = name[0];
    n1 = name[1];

    RT_CK_DBI(dbip);

    dp = dbip->dbi_Head[db_dirhash(name)];
    for (; dp != RT_DIR_NULL; dp=dp->d_forw) {
	char *this_obj;

	/* first two checks are for speed */
	if ((n0 == *(this_obj=dp->d_namep)) && (n1 == this_obj[1]) && (BU_STR_EQUAL(name, this_obj))) {
	    if (RT_G_DEBUG&DEBUG_DB)
		bu_log("db_lookup(%s) %p\n", name, (void *)dp);
	    return dp;
	}
    }

    if (noisy || RT_G_DEBUG&DEBUG_DB)
	bu_log("db_lookup(%s) failed: %s does not exist\n", name, name);

    return RT_DIR_NULL;
}
int
db_dircheck(struct db_i *dbip,
	    struct bu_vls *ret_name,
	    int noisy,
	    struct directory ***headp)
{
    struct directory *dp;
    char *cp = bu_vls_addr(ret_name);
    char n0 = cp[0];
    char n1 = cp[1];

    /* Compute hash only once (almost always the case) */
    *headp = &(dbip->dbi_Head[db_dirhash(cp)]);

    for (dp = **headp; dp != RT_DIR_NULL; dp=dp->d_forw) {
	char *this_obj;
	if (n0 == *(this_obj=dp->d_namep)  &&	/* speed */
	    n1 == this_obj[1]  &&			/* speed */
	    BU_STR_EQUAL(cp, this_obj)) {
	    /* Name exists in directory already */
	    int c;

	    bu_vls_strcpy(ret_name, "A_");
	    bu_vls_strcat(ret_name, this_obj);
	    cp = bu_vls_addr(ret_name);

	    for (c = 'A'; c <= 'Z'; c++) {
		*cp = c;
		if (db_lookup(dbip, cp, noisy) == RT_DIR_NULL)
		    break;
	    }
	    if (c > 'Z') {
		bu_log("db_dircheck: Duplicate of name '%s', ignored\n",
		       cp);
		return -1;	/* fail */
	    }
	    bu_log("db_dircheck: Duplicate of '%s', given temporary name '%s'\n",
		   cp+2, cp);

	    /* no need to recurse, simply recompute the hash and break */
	    *headp = &(dbip->dbi_Head[db_dirhash(cp)]);
	    break;
	}
    }

    return 0;	/* success */
}
int
db_rename(struct db_i *dbip, struct directory *dp, const char *newname)
{
    struct directory *findp;
    struct directory **headp;

    RT_CK_DBI(dbip);
    RT_CK_DIR(dp);

    /* Remove from linked list */
    headp = &(dbip->dbi_Head[db_dirhash(dp->d_namep)]);
    if (*headp == dp) {
	/* Was first on list, dequeue */
	*headp = dp->d_forw;
    } else {
	for (findp = *headp; findp != RT_DIR_NULL; findp = findp->d_forw) {
	    if (findp->d_forw != dp)
		continue;
	    /* Dequeue */
	    findp->d_forw = dp->d_forw;
	    goto out;
	}
	return -1;		/* ERROR: can't find */
    }

out:
    /* Effect new name */
    RT_DIR_FREE_NAMEP(dp);			/* frees d_namep */
    RT_DIR_SET_NAMEP(dp, newname);	/* sets d_namep */

    /* Add to new linked list */
    headp = &(dbip->dbi_Head[db_dirhash(newname)]);
    dp->d_forw = *headp;
    *headp = dp;
    return 0;
}
Beispiel #4
0
/**
 * R T _ F R E E _ S O L T A B
 *
 * Decrement use count on soltab structure.  If no longer needed,
 * release associated storage, and free the structure.
 *
 * This routine semaphore protects against other copies of itself
 * running in parallel, and against other routines (such as
 * rt_find_identical_solid()) which might also be modifying the linked
 * list heads.
 *
 * Called by -
 *	db_free_tree()
 *	rt_clean()
 *	rt_gettrees()
 *	rt_kill_deal_solid_refs()
 */
void
rt_free_soltab(struct soltab *stp)
{
    int	hash;

    RT_CK_SOLTAB(stp);
    if ( stp->st_id < 0 )
	bu_bomb("rt_free_soltab:  bad st_id");

    hash = db_dirhash(stp->st_dp->d_namep);

    ACQUIRE_SEMAPHORE_TREE(hash);		/* start critical section */
    if ( --(stp->st_uses) > 0 )  {
	RELEASE_SEMAPHORE_TREE(hash);
	return;
    }
    BU_LIST_DEQUEUE( &(stp->l2) );		/* remove from st_dp->d_use_hd list */
    BU_LIST_DEQUEUE( &(stp->l) );		/* uses rti_solidheads[] */

    RELEASE_SEMAPHORE_TREE(hash);		/* end critical section */

    if ( stp->st_aradius > 0 )  {
	stp->st_meth->ft_free( stp );
	stp->st_aradius = 0;
    }
    if ( stp->st_matp )  bu_free( (char *)stp->st_matp, "st_matp");
    stp->st_matp = (matp_t)0;	/* Sanity */

    bu_ptbl_free(&stp->st_regions);

    stp->st_dp = DIR_NULL;		/* Sanity */

    if ( stp->st_path.magic )  {
	RT_CK_FULL_PATH( &stp->st_path );
	db_free_full_path( &stp->st_path );
    }

    bu_free( (char *)stp, "struct soltab" );
}
int
db_dirdelete(struct db_i *dbip, struct directory *dp)
{
    struct directory *findp;
    struct directory **headp;

    RT_CK_DBI(dbip);
    RT_CK_DIR(dp);

    headp = &(dbip->dbi_Head[db_dirhash(dp->d_namep)]);

    if (dp->d_flags & RT_DIR_INMEM) {
	if (dp->d_un.ptr != NULL)
	    bu_free(dp->d_un.ptr, "db_dirdelete() inmem ptr");
    }

    if (*headp == dp) {
	RT_DIR_FREE_NAMEP(dp);	/* frees d_namep */
	*headp = dp->d_forw;

	/* Put 'dp' back on the freelist */
	dp->d_forw = rt_uniresource.re_directory_hd;
	rt_uniresource.re_directory_hd = dp;
	return 0;
    }
    for (findp = *headp; findp != RT_DIR_NULL; findp = findp->d_forw) {
	if (findp->d_forw != dp)
	    continue;
	RT_DIR_FREE_NAMEP(dp);	/* frees d_namep */
	findp->d_forw = dp->d_forw;

	/* Put 'dp' back on the freelist */
	dp->d_forw = rt_uniresource.re_directory_hd;
	rt_uniresource.re_directory_hd = dp;
	return 0;
    }
    return -1;
}
Beispiel #6
0
/**
 * R T _ G E T T R E E _ L E A F
 *
 * This routine must be prepared to run in parallel.
 */
HIDDEN union tree *rt_gettree_leaf(struct db_tree_state *tsp, struct db_full_path *pathp, struct rt_db_internal *ip, genptr_t client_data)
    /*const*/

    /*const*/

{
    register struct soltab	*stp;
    union tree		*curtree;
    struct directory	*dp;
    register matp_t		mat;
    int			i;
    struct rt_i		*rtip;

    RT_CK_DBTS(tsp);
    RT_CK_DBI(tsp->ts_dbip);
    RT_CK_FULL_PATH(pathp);
    RT_CK_DB_INTERNAL(ip);
    rtip = tsp->ts_rtip;
    RT_CK_RTI(rtip);
    RT_CK_RESOURCE(tsp->ts_resp);
    dp = DB_FULL_PATH_CUR_DIR(pathp);

    /* Determine if this matrix is an identity matrix */

    if ( !bn_mat_is_equal(tsp->ts_mat, bn_mat_identity, &rtip->rti_tol)) {
	/* Not identity matrix */
	mat = (matp_t)tsp->ts_mat;
    } else {
	/* Identity matrix */
	mat = (matp_t)0;
    }

    /*
     * Check to see if this exact solid has already been processed.
     * Match on leaf name and matrix.  Note that there is a race here
     * between having st_id filled in a few lines below (which is
     * necessary for calling ft_prep), and ft_prep filling in
     * st_aradius.  Fortunately, st_aradius starts out as zero, and
     * will never go down to -1 unless this soltab structure has
     * become a dead solid, so by testing against -1 (instead of <= 0,
     * like before, oops), it isn't a problem.
     */
    stp = rt_find_identical_solid( mat, dp, rtip );
    if ( stp->st_id != 0 )  {
	/* stp is an instance of a pre-existing solid */
	if ( stp->st_aradius <= -1 )  {
	    /* It's dead, Jim.  st_uses was not incremented. */
	    return( TREE_NULL );	/* BAD: instance of dead solid */
	}
	goto found_it;
    }

    if ( rtip->rti_add_to_new_solids_list ) {
	bu_ptbl_ins( &rtip->rti_new_solids, (long *)stp );
    }

    stp->st_id = ip->idb_type;
    stp->st_meth = &rt_functab[ip->idb_type];
    if ( mat )  {
	mat = stp->st_matp;
    } else {
	mat = (matp_t)bn_mat_identity;
    }

    RT_CK_DB_INTERNAL( ip );

    /* init solid's maxima and minima */
    VSETALL( stp->st_max, -INFINITY );
    VSETALL( stp->st_min,  INFINITY );

    /*
     * If the ft_prep routine wants to keep the internal structure,
     * that is OK, as long as idb_ptr is set to null.  Note that the
     * prep routine may have changed st_id.
     */
    if ( stp->st_meth->ft_prep( stp, ip, rtip ) )  {
	int	hash;
	/* Error, solid no good */
	bu_log("rt_gettree_leaf(%s):  prep failure\n", dp->d_namep );
	/* Too late to delete soltab entry; mark it as "dead" */
	hash = db_dirhash( dp->d_namep );
	ACQUIRE_SEMAPHORE_TREE(hash);
	stp->st_aradius = -1;
	stp->st_uses--;
	RELEASE_SEMAPHORE_TREE(hash);
	return( TREE_NULL );		/* BAD */
    }

    if ( rtip->rti_dont_instance )  {
	/*
	 * If instanced solid refs are not being compressed, then
	 * memory isn't an issue, and the application (such as
	 * solids_on_ray) probably cares about the full path of this
	 * solid, from root to leaf.  So make it available here.
	 * (stp->st_dp->d_uses could be the way to discriminate
	 * references uniquely, if the path isn't enough.  To locate
	 * given dp and d_uses, search dp->d_use_hd list.  Question
	 * is, where to stash current val of d_uses?)
	 */
	db_full_path_init( &stp->st_path );
	db_dup_full_path( &stp->st_path, pathp );
    } else {
	/*
	 * If there is more than just a direct reference to this leaf
	 * from it's containing region, copy that below-region path
	 * into st_path.  Otherwise, leave st_path's magic number 0.
	 *
	 * XXX nothing depends on this behavior yet, and this whole
	 * XXX 'else' clause might well be deleted. -Mike
	 */
	i = pathp->fp_len-1;
	if ( i > 0 && !(pathp->fp_names[i-1]->d_flags & DIR_REGION) )  {
	    /* Search backwards for region.  If no region, use whole path */
	    for ( --i; i > 0; i-- )  {
		if ( pathp->fp_names[i-1]->d_flags & DIR_REGION ) break;
	    }
	    if ( i < 0 )  i = 0;
	    db_full_path_init( &stp->st_path );
	    db_dup_path_tail( &stp->st_path, pathp, i );
	}
    }
    if (RT_G_DEBUG&DEBUG_TREEWALK && stp->st_path.magic == DB_FULL_PATH_MAGIC)  {
	char	*sofar = db_path_to_string(&stp->st_path);
	bu_log("rt_gettree_leaf() st_path=%s\n", sofar );
	bu_free(sofar, "path string");
    }

    if (RT_G_DEBUG&DEBUG_SOLIDS)  {
	struct bu_vls	str;
	bu_log("\n---Primitive %d: %s\n", stp->st_bit, dp->d_namep);
	bu_vls_init( &str );
	/* verbose=1, mm2local=1.0 */
	if ( stp->st_meth->ft_describe( &str, ip, 1, 1.0, tsp->ts_resp, tsp->ts_dbip ) < 0 )  {
	    bu_log("rt_gettree_leaf(%s):  solid describe failure\n",
		   dp->d_namep );
	}
	bu_log( "%s:  %s", dp->d_namep, bu_vls_addr( &str ) );
	bu_vls_free( &str );
    }

 found_it:
    RT_GET_TREE( curtree, tsp->ts_resp );
    curtree->magic = RT_TREE_MAGIC;
    curtree->tr_op = OP_SOLID;
    curtree->tr_a.tu_stp = stp;
    /* regionp will be filled in later by rt_tree_region_assign() */
    curtree->tr_a.tu_regionp = (struct region *)0;

    if (RT_G_DEBUG&DEBUG_TREEWALK)  {
	char	*sofar = db_path_to_string(pathp);
	bu_log("rt_gettree_leaf() %s\n", sofar );
	bu_free(sofar, "path string");
    }

    return(curtree);
}
Beispiel #7
0
/**
 * R T _ F I N D _ I D E N T I C A L _ S O L I D
 *
 * See if solid "dp" as transformed by "mat" already exists in the
 * soltab list.  If it does, return the matching stp, otherwise,
 * create a new soltab structure, enrole it in the list, and return a
 * pointer to that.
 *
 * "mat" will be a null pointer when an identity matrix is signified.
 * This greatly speeds the comparison process.
 *
 * The two cases can be distinguished by the fact that stp->st_id will
 * be 0 for a new soltab structure, and non-zero for an existing one.
 *
 * This routine will run in parallel.
 *
 * In order to avoid a race between searching the soltab list and
 * adding new solids to it, the new solid to be added *must* be
 * enrolled in the list before exiting the critical section.
 *
 * To limit the size of the list to be searched, there are many lists.
 * The selection of which list is determined by the hash value
 * computed from the solid's name.  This is the same optimization used
 * in searching the directory lists.
 *
 * This subroutine is the critical bottleneck in parallel tree walking.
 *
 * It is safe, and much faster, to use several different critical
 * sections when searching different lists.
 *
 * There are only 4 dedicated semaphores defined, TREE0 through TREE3.
 * This unfortunately limits the code to having only 4 CPUs doing list
 * searching at any one time.  Hopefully, this is enough parallelism
 * to keep the rest of the CPUs doing I/O and actual solid prepping.
 *
 * Since the algorithm has been reduced from an O((nsolid/128)**2)
 * search on the entire rti_solidheads[hash] list to an O(ninstance)
 * search on the dp->d_use_head list for this one solid, the critical
 * section should be relatively short-lived.  Having the 3-way split
 * should provide ample opportunity for parallelism through here,
 * while still ensuring that the necessary variables are protected.
 *
 * There are two critical variables which *both* need to be protected:
 * the specific rti_solidhead[hash] list head, and the specific
 * dp->d_use_hd list head.  Fortunately, since the selection of
 * critical section is based upon db_dirhash(dp->d_namep), any other
 * processor that wants to search this same 'dp' will get the same
 * hash as the current thread, and will thus wait for the appropriate
 * semaphore to be released.  Similarly, any other thread that wants
 * to search the same rti_solidhead[hash] list as the current thread
 * will be using the same hash, and will thus wait for the proper
 * semaphore.
 */
HIDDEN struct soltab *rt_find_identical_solid(register const matp_t mat, register struct directory *dp, struct rt_i *rtip)
{
    register struct soltab	*stp = RT_SOLTAB_NULL;
    int			hash;

    RT_CK_DIR(dp);
    RT_CK_RTI(rtip);

    hash = db_dirhash( dp->d_namep );

    /* Enter the appropriate dual critical-section */
    ACQUIRE_SEMAPHORE_TREE(hash);

    /*
     * If solid has not been referenced yet, the search can be
     * skipped.  If solid is being referenced a _lot_, it certainly
     * isn't all going to be in the same place, so don't bother
     * searching.  Consider the case of a million instances of the
     * same tree submodel solid.
     */
    if ( dp->d_uses > 0 && dp->d_uses < 100 &&
	 rtip->rti_dont_instance == 0
	)  {
	struct bu_list	*mid;

	/* Search dp->d_use_hd list for other instances */
	for ( BU_LIST_FOR( mid, bu_list, &dp->d_use_hd ) )  {

	    stp = BU_LIST_MAIN_PTR( soltab, mid, l2 );
	    RT_CK_SOLTAB(stp);

	    if ( stp->st_matp == (matp_t)0 )  {
		if ( mat == (matp_t)0 )  {
		    /* Both have identity matrix */
		    goto more_checks;
		}
		continue;
	    }
	    if ( mat == (matp_t)0 )  continue;	/* doesn't match */

	    if ( !bn_mat_is_equal(mat, stp->st_matp, &rtip->rti_tol))
		continue;

	more_checks:
	    /* Don't instance this solid from some other model
	     * instance.  As this is nearly always equal, check it
	     * last
	     */
	    if ( stp->st_rtip != rtip )  continue;

	    /*
	     * stp now points to re-referenced solid.  stp->st_id is
	     * non-zero, indicating pre-existing solid.
	     */
	    RT_CK_SOLTAB(stp);		/* sanity */

	    /* Only increment use counter for non-dead solids. */
	    if ( !(stp->st_aradius <= -1) )
		stp->st_uses++;
	    /* dp->d_uses is NOT incremented, because number of
	     * soltab's using it has not gone up.
	     */
	    if ( RT_G_DEBUG & DEBUG_SOLIDS )  {
		bu_log( mat ?
			"rt_find_identical_solid:  %s re-referenced %d\n" :
			"rt_find_identical_solid:  %s re-referenced %d (identity mat)\n",
			dp->d_namep, stp->st_uses );
	    }

	    /* Leave the appropriate dual critical-section */
	    RELEASE_SEMAPHORE_TREE(hash);
	    return stp;
	}
    }

    /*
     * Create and link a new solid into the list.
     *
     * Ensure the search keys "dp", "st_mat" and "st_rtip" are stored
     * now, while still inside the critical section, because they are
     * searched on, above.
     */
    BU_GETSTRUCT(stp, soltab);
    stp->l.magic = RT_SOLTAB_MAGIC;
    stp->l2.magic = RT_SOLTAB2_MAGIC;
    stp->st_rtip = rtip;
    stp->st_dp = dp;
    dp->d_uses++;
    stp->st_uses = 1;
    /* stp->st_id is intentionally left zero here, as a flag */

    if ( mat )  {
	stp->st_matp = (matp_t)bu_malloc( sizeof(mat_t), "st_matp" );
	MAT_COPY( stp->st_matp, mat );
    } else {
	stp->st_matp = (matp_t)0;
    }

    /* Add to the appropriate soltab list head */
    /* PARALLEL NOTE:  Uses critical section on rt_solidheads element */
    BU_LIST_INSERT( &(rtip->rti_solidheads[hash]), &(stp->l) );

    /* Also add to the directory structure list head */
    /* PARALLEL NOTE:  Uses critical section on this 'dp' */
    BU_LIST_INSERT( &dp->d_use_hd, &(stp->l2) );

    /*
     * Leave the 4-way critical-section protecting dp and [hash]
     */
    RELEASE_SEMAPHORE_TREE(hash);

    /* Enter an exclusive critical section to protect nsolids.
     * nsolids++ needs to be locked to a SINGLE thread
     */
    bu_semaphore_acquire(RT_SEM_STATS);
    stp->st_bit = rtip->nsolids++;
    bu_semaphore_release(RT_SEM_STATS);

    /*
     * Fill in the last little bit of the structure in full parallel
     * mode, outside of any critical section.
     */

    /* Init tables of regions using this solid.  Usually small. */
    bu_ptbl_init( &stp->st_regions, 7, "st_regions ptbl" );

    return stp;
}