/** * Using a single call to db_put(), write multiple zeroed records out, * all with u_id field set to ID_FREE. * This will zap all records from "start" to the end of this entry. * * Returns: * 0 on success (from db_put()) * non-zero on failure */ int db_zapper(struct db_i *dbip, struct directory *dp, size_t start) { union record *rp; size_t i; size_t todo; int ret; RT_CK_DBI(dbip); RT_CK_DIR(dp); if (RT_G_DEBUG&DEBUG_DB) bu_log("db_zapper(%s) %p, %p, start=%zu\n", dp->d_namep, (void *)dbip, (void *)dp, start); if (dp->d_flags & RT_DIR_INMEM) bu_bomb("db_zapper() called on RT_DIR_INMEM object\n"); if (dbip->dbi_read_only) return -1; BU_ASSERT_LONG(dbip->dbi_version, ==, 4); if (dp->d_len < start) return -1; if ((todo = dp->d_len - start) == 0) return 0; /* OK -- trivial */ rp = (union record *)bu_malloc(todo * sizeof(union record), "db_zapper buf"); memset((char *)rp, 0, todo * sizeof(union record)); for (i=0; i < todo; i++) rp[i].u_id = ID_FREE; ret = db_put(dbip, dp, rp, (off_t)start, todo); bu_free((char *)rp, "db_zapper buf"); return ret; }
/** * Delete the indicated database record(s). * Arrange to write "free storage" database markers in its place, * positively erasing what had been there before. * * Returns: * 0 on success * non-zero on failure */ int db_delete(struct db_i *dbip, struct directory *dp) { int i = 0; RT_CK_DBI(dbip); RT_CK_DIR(dp); if (RT_G_DEBUG&DEBUG_DB) bu_log("db_delete(%s) %p, %p\n", dp->d_namep, (void *)dbip, (void *)dp); if (dp->d_flags & RT_DIR_INMEM) { bu_free(dp->d_un.ptr, "db_delete d_un.ptr"); dp->d_un.ptr = NULL; dp->d_len = 0; return 0; } if (db_version(dbip) == 4) { i = db_zapper(dbip, dp, 0); rt_memfree(&(dbip->dbi_freep), (unsigned)dp->d_len, dp->d_addr/(sizeof(union record))); } else if (db_version(dbip) == 5) { i = db5_write_free(dbip, dp, dp->d_len); rt_memfree(&(dbip->dbi_freep), dp->d_len, dp->d_addr); } else { bu_bomb("db_delete() unsupported database version\n"); } dp->d_len = 0; dp->d_addr = RT_DIR_PHONY_ADDR; return i; }
/** * Find a block of database storage of "count" granules. * * Returns: * 0 OK * non-0 failure */ int db_alloc(register struct db_i *dbip, register struct directory *dp, size_t count) { size_t len; union record rec; RT_CK_DBI(dbip); RT_CK_DIR(dp); if (RT_G_DEBUG&DEBUG_DB) bu_log("db_alloc(%s) %p, %p, count=%zu\n", dp->d_namep, (void *)dbip, (void *)dp, count); if (count <= 0) { bu_log("db_alloc(0)\n"); return -1; } if (dp->d_flags & RT_DIR_INMEM) { if (dp->d_un.ptr) { dp->d_un.ptr = bu_realloc(dp->d_un.ptr, count * sizeof(union record), "db_alloc() d_un.ptr"); } else { dp->d_un.ptr = bu_malloc(count * sizeof(union record), "db_alloc() d_un.ptr"); } dp->d_len = count; return 0; } if (dbip->dbi_read_only) { bu_log("db_alloc on READ-ONLY file\n"); return -1; } while (1) { len = rt_memalloc(&(dbip->dbi_freep), (unsigned)count); if (len == 0L) { /* No contiguous free block, append to file */ if ((dp->d_addr = dbip->dbi_eof) == RT_DIR_PHONY_ADDR) { bu_log("db_alloc: bad EOF\n"); return -1; } dp->d_len = count; dbip->dbi_eof += (off_t)(count * sizeof(union record)); dbip->dbi_nrec += count; break; } dp->d_addr = (off_t)(len * sizeof(union record)); dp->d_len = count; if (db_get(dbip, dp, &rec, 0, 1) < 0) return -1; if (rec.u_id != ID_FREE) { bu_log("db_alloc(): len %ld non-FREE (id %d), skipping\n", len, rec.u_id); continue; } } /* Clear out ALL the granules, for safety */ return db_zapper(dbip, dp, 0); }
/** * Delete a specific record from database entry * No longer supported. */ int db_delrec(struct db_i *dbip, register struct directory *dp, int recnum) { RT_CK_DBI(dbip); RT_CK_DIR(dp); if (RT_G_DEBUG&DEBUG_DB) bu_log("db_delrec(%s) %p, %p, recnum=%d\n", dp->d_namep, (void *)dbip, (void *)dp, recnum); bu_log("ERROR db_delrec() is no longer supported. Use combination import/export routines.\n"); return -1; }
void db_ck_directory(const struct db_i *dbip) { struct directory *dp; int i; RT_CK_DBI(dbip); for (i = 0; i < RT_DBNHASH; i++) { for (dp = dbip->dbi_Head[i]; dp != RT_DIR_NULL; dp = dp->d_forw) RT_CK_DIR(dp); } }
int get_de_pointers(union tree *tp, struct directory *dp, int de_len, int *de_pointers) { RT_CK_TREE(tp); RT_CK_DIR(dp); switch (tp->tr_op) { case OP_UNION: case OP_SUBTRACT: case OP_INTERSECT: get_de_pointers(tp->tr_b.tb_left, dp, de_len, de_pointers); get_de_pointers(tp->tr_b.tb_right, dp, de_len, de_pointers); break; case OP_DB_LEAF: { struct directory *dp_M; dp_M = db_lookup(DBIP, tp->tr_l.tl_name, LOOKUP_NOISY); if (dp_M == RT_DIR_NULL) return 1; if (dp_M->d_uses >= 0) { bu_log("g-iges: member (%s) in combination (%s) has not been written to iges file\n", dp_M->d_namep, dp->d_namep); de_pointers[de_pointer_number++] = 0; return 1; } if (tp->tr_l.tl_mat && !bn_mat_is_identity(tp->tr_l.tl_mat)) { /* write a solid instance entity for this member with a pointer to the new matrix */ if (!NEAR_ZERO(tp->tr_l.tl_mat[15] - 1.0, tol.dist)) { /* scale factor is not 1.0, IGES can't handle it. go ahead and write the solid instance anyway, but warn the user twice */ bu_log("g-iges WARNING: member (%s) of combination (%s) is scaled, IGES cannot handle this\n", dp_M->d_namep, dp->d_namep); scale_error++; } de_pointers[de_pointer_number++] = write_solid_instance(-dp_M->d_uses, tp->tr_l.tl_mat, fp_dir, fp_param); } else de_pointers[de_pointer_number++] = (-dp_M->d_uses); if (dp_M->d_nref) comb_form = 1; } break; default: bu_log("Unrecognized operator in combination!\n"); return 1; } return 0; }
int db_full_path_search(const struct db_full_path *a, const struct directory *dp) { long i; RT_CK_FULL_PATH(a); RT_CK_DIR(dp); BU_ASSERT_SIZE_T(a->fp_len, <, LONG_MAX); for (i = a->fp_len-1; i >= 0; i--) { if (a->fp_names[i] == dp) return 1; } return 0; }
int db_dirdelete(struct db_i *dbip, struct directory *dp) { struct directory *findp; struct directory **headp; RT_CK_DBI(dbip); RT_CK_DIR(dp); headp = &(dbip->dbi_Head[db_dirhash(dp->d_namep)]); if (dp->d_flags & RT_DIR_INMEM) { if (dp->d_un.ptr != NULL) bu_free(dp->d_un.ptr, "db_dirdelete() inmem ptr"); } if (*headp == dp) { RT_DIR_FREE_NAMEP(dp); /* frees d_namep */ *headp = dp->d_forw; /* Put 'dp' back on the freelist */ dp->d_forw = rt_uniresource.re_directory_hd; rt_uniresource.re_directory_hd = dp; return 0; } for (findp = *headp; findp != RT_DIR_NULL; findp = findp->d_forw) { if (findp->d_forw != dp) continue; RT_DIR_FREE_NAMEP(dp); /* frees d_namep */ findp->d_forw = dp->d_forw; /* Put 'dp' back on the freelist */ dp->d_forw = rt_uniresource.re_directory_hd; rt_uniresource.re_directory_hd = dp; return 0; } return -1; }
int db_dump(struct rt_wdb *wdbp, struct db_i *dbip) /* output */ /* input */ { register int i; register struct directory *dp; struct bu_external ext; RT_CK_DBI(dbip); RT_CK_WDB(wdbp); /* just in case since we don't actually handle it below */ if (db_version(dbip) != db_version(wdbp->dbip)) { bu_log("Internal Error: dumping a v%d database into a v%d database is untested\n", db_version(dbip), db_version(wdbp->dbip)); return -1; } /* Output all directory entries */ for (i = 0; i < RT_DBNHASH; i++) { for (dp = dbip->dbi_Head[i]; dp != RT_DIR_NULL; dp = dp->d_forw) { RT_CK_DIR(dp); /* XXX Need to go to internal form, if database versions don't match */ if (db_get_external(&ext, dp, dbip) < 0) { bu_log("db_dump() read failed on %s, skipping\n", dp->d_namep); continue; } if (wdb_export_external(wdbp, &ext, dp->d_namep, dp->d_flags & ~(RT_DIR_INMEM), dp->d_minor_type) < 0) { bu_log("db_dump() write failed on %s, aborting\n", dp->d_namep); bu_free_external(&ext); return -1; } bu_free_external(&ext); } } return 0; }
int db_rename(struct db_i *dbip, struct directory *dp, const char *newname) { struct directory *findp; struct directory **headp; RT_CK_DBI(dbip); RT_CK_DIR(dp); /* Remove from linked list */ headp = &(dbip->dbi_Head[db_dirhash(dp->d_namep)]); if (*headp == dp) { /* Was first on list, dequeue */ *headp = dp->d_forw; } else { for (findp = *headp; findp != RT_DIR_NULL; findp = findp->d_forw) { if (findp->d_forw != dp) continue; /* Dequeue */ findp->d_forw = dp->d_forw; goto out; } return -1; /* ERROR: can't find */ } out: /* Effect new name */ RT_DIR_FREE_NAMEP(dp); /* frees d_namep */ RT_DIR_SET_NAMEP(dp, newname); /* sets d_namep */ /* Add to new linked list */ headp = &(dbip->dbi_Head[db_dirhash(newname)]); dp->d_forw = *headp; *headp = dp; return 0; }
/* * This is a helper routine used in txt_setup() to load a texture either from * a file or from a db object. The resources are released in txt_free() * (there is no specific unload_datasource function). */ HIDDEN int txt_load_datasource(struct txt_specific *texture, struct db_i *dbInstance, const unsigned long int size) { struct directory *dirEntry; RT_CK_DBI(dbInstance); if (texture == (struct txt_specific *)NULL) { bu_bomb("ERROR: txt_load_datasource() received NULL arg (struct txt_specific *)\n"); } bu_log("Loading texture %s [%s]...", texture->tx_datasrc==TXT_SRC_AUTO?"from auto-determined datasource":texture->tx_datasrc==TXT_SRC_OBJECT?"from a database object":texture->tx_datasrc==TXT_SRC_FILE?"from a file":"from an unknown source (ERROR)", bu_vls_addr(&texture->tx_name)); /* if the source is auto or object, we try to load the object */ if ((texture->tx_datasrc==TXT_SRC_AUTO) || (texture->tx_datasrc==TXT_SRC_OBJECT)) { /* see if the object exists */ if ((dirEntry=db_lookup(dbInstance, bu_vls_addr(&texture->tx_name), LOOKUP_QUIET)) == RT_DIR_NULL) { /* unable to find the texture object */ if (texture->tx_datasrc!=TXT_SRC_AUTO) { return -1; } } else { struct rt_db_internal *dbip; BU_ALLOC(dbip, struct rt_db_internal); RT_DB_INTERNAL_INIT(dbip); RT_CK_DB_INTERNAL(dbip); RT_CK_DIR(dirEntry); /* the object was in the directory, so go get it */ if (rt_db_get_internal(dbip, dirEntry, dbInstance, NULL, NULL) <= 0) { /* unable to load/create the texture database record object */ return -1; } RT_CK_DB_INTERNAL(dbip); RT_CK_BINUNIF(dbip->idb_ptr); /* keep the binary object pointer */ texture->tx_binunifp=(struct rt_binunif_internal *)dbip->idb_ptr; /* make it so */ /* release the database instance pointer struct we created */ RT_DB_INTERNAL_INIT(dbip); bu_free(dbip, "txt_load_datasource"); /* check size of object */ if (texture->tx_binunifp->count < size) { bu_log("\nWARNING: %s needs %d bytes, binary object only has %lu\n", bu_vls_addr(&texture->tx_name), size, texture->tx_binunifp->count); } else if (texture->tx_binunifp->count > size) { bu_log("\nWARNING: Binary object is larger than specified texture size\n\tBinary Object: %zu pixels\n\tSpecified Texture Size: %zu pixels\n...continuing to load using image subsection...", texture->tx_binunifp->count); } } } /* if we are auto and we couldn't find a database object match, or if source * is explicitly a file then we load the file. */ if (((texture->tx_datasrc==TXT_SRC_AUTO) && (texture->tx_binunifp==NULL)) || (texture->tx_datasrc==TXT_SRC_FILE)) { texture->tx_mp = bu_open_mapped_file_with_path(dbInstance->dbi_filepath, bu_vls_addr(&texture->tx_name), NULL); if (texture->tx_mp==NULL) return -1; /* FAIL */ if (texture->tx_mp->buflen < size) { bu_log("\nWARNING: %s needs %d bytes, file only has %lu\n", bu_vls_addr(&texture->tx_name), size, texture->tx_mp->buflen); } else if (texture->tx_mp->buflen > size) { bu_log("\nWARNING: Texture file size is larger than specified texture size\n\tInput File: %zu pixels\n\tSpecified Texture Size: %lu pixels\n...continuing to load using image subsection...", texture->tx_mp->buflen, size); } } bu_log("done.\n"); return 0; }
int ged_unhide(struct ged *gedp, int argc, const char *argv[]) { struct directory *dp; struct db_i *dbip; struct bu_external ext; struct bu_external tmp; struct db5_raw_internal raw; int i; static const char *usage = "object(s)"; GED_CHECK_DATABASE_OPEN(gedp, GED_ERROR); GED_CHECK_READ_ONLY(gedp, GED_ERROR); GED_CHECK_ARGC_GT_0(gedp, argc, GED_ERROR); /* initialize result */ bu_vls_trunc(gedp->ged_result_str, 0); /* must be wanting help */ if (argc == 1) { bu_vls_printf(gedp->ged_result_str, "Usage: %s %s", argv[0], usage); return GED_HELP; } dbip = gedp->ged_wdbp->dbip; if (db_version(dbip) < 5) { bu_vls_printf(gedp->ged_result_str, "Database was created with a previous release of BRL-CAD.\nSelect \"Tools->Upgrade Database...\" to enable support for this feature."); return GED_ERROR; } for (i = 1; i < argc; i++) { if ((dp = db_lookup(dbip, argv[i], LOOKUP_NOISY)) == RT_DIR_NULL) { continue; } RT_CK_DIR(dp); BU_EXTERNAL_INIT(&ext); if (db_get_external(&ext, dp, dbip) < 0) { bu_vls_printf(gedp->ged_result_str, "db_get_external failed for %s\n", dp->d_namep); continue; } if (db5_get_raw_internal_ptr(&raw, ext.ext_buf) == NULL) { bu_vls_printf(gedp->ged_result_str, "db5_get_raw_internal_ptr() failed for %s\n", dp->d_namep); bu_free_external(&ext); continue; } raw.h_name_hidden = (unsigned char)(0x0); BU_EXTERNAL_INIT(&tmp); db5_export_object3(&tmp, DB5HDR_HFLAGS_DLI_APPLICATION_DATA_OBJECT, dp->d_namep, raw.h_name_hidden, &raw.attributes, &raw.body, raw.major_type, raw.minor_type, raw.a_zzz, raw.b_zzz); bu_free_external(&ext); if (db_put_external(&tmp, dp, dbip)) { bu_vls_printf(gedp->ged_result_str, "db_put_external() failed for %s\n", dp->d_namep); bu_free_external(&tmp); continue; } bu_free_external(&tmp); dp->d_flags &= (~RT_DIR_HIDDEN); } return GED_OK; }
int make_hole(struct rt_wdb *wdbp, /* database to be modified */ point_t hole_start, /* center of start of hole */ vect_t hole_depth, /* depth and direction of hole */ fastf_t hole_radius, /* radius of hole */ int num_objs, /* number of objects that this hole affects */ struct directory **dp) /* array of directory pointers * [num_objs] of objects to * get this hole applied */ { struct bu_vls tmp_name = BU_VLS_INIT_ZERO; int i, base_len, count=0; RT_CHECK_WDB(wdbp); /* make sure we are only making holes in combinations, they do not * have to be regions */ for (i=0; i<num_objs; i++) { RT_CK_DIR(dp[i]); if (!(dp[i]->d_flags & RT_DIR_COMB)) { bu_log("make_hole(): can only make holes in combinations\n"); bu_log("\t%s is not a combination\n", dp[i]->d_namep); return 4; } } /* make a unique name for the RCC we will use (of the form * "make_hole_%d") */ bu_vls_strcat(&tmp_name, "make_hole_"); base_len = bu_vls_strlen(&tmp_name); bu_vls_strcat(&tmp_name, "0"); while ((db_lookup(wdbp->dbip, bu_vls_addr(&tmp_name), LOOKUP_QUIET)) != RT_DIR_NULL) { count++; bu_vls_trunc(&tmp_name, base_len); bu_vls_printf(&tmp_name, "%d", count); } /* build the RCC based on parameters passed in */ if (mk_rcc(wdbp, bu_vls_addr(&tmp_name), hole_start, hole_depth, hole_radius)) { bu_log("Failed to create hole cylinder!!!\n"); bu_vls_free(&tmp_name); return 2; } /* subtract this RCC from each combination in the list passed in */ for (i=0; i<num_objs; i++) { struct rt_db_internal intern; struct rt_comb_internal *comb; union tree *tree; /* get the internal form of the combination */ if (rt_db_get_internal(&intern, dp[i], wdbp->dbip, NULL, wdbp->wdb_resp) < 0) { bu_log("Failed to get %s\n", dp[i]->d_namep); bu_vls_free(&tmp_name); return 3; } comb = (struct rt_comb_internal *)intern.idb_ptr; /* Build a new "subtract" node (will be the root of the new tree) */ BU_ALLOC(tree, union tree); RT_TREE_INIT(tree); tree->tr_b.tb_op = OP_SUBTRACT; tree->tr_b.tb_left = comb->tree; /* subtract from the original tree */ comb->tree = tree; /* Build a node for the RCC to be subtracted */ BU_ALLOC(tree, union tree); RT_TREE_INIT(tree); tree->tr_l.tl_op = OP_DB_LEAF; tree->tr_l.tl_mat = NULL; tree->tr_l.tl_name = bu_strdup(bu_vls_addr(&tmp_name)); /* copy name of RCC */ /* Put the RCC node to the right of the root */ comb->tree->tr_b.tb_right = tree; /* Save the modified combination. This will overwrite the * original combination if wdbp was opened with the * RT_WDB_TYPE_DB_DISK flag. If wdbp was opened with the * RT_WDB_TYPE_DB_INMEM flag, then the combination will be * temporarily over-written in memory only and the disk file * will not be modified. */ wdb_put_internal(wdbp, dp[i]->d_namep, &intern, 1.0); } return 0; }
void db_close(register struct db_i *dbip) { register int i; register struct directory *dp, *nextdp; if (!dbip) return; RT_CK_DBI(dbip); if (RT_G_DEBUG&DEBUG_DB) bu_log("db_close(%s) %p uses=%d\n", dbip->dbi_filename, (void *)dbip, dbip->dbi_uses); bu_semaphore_acquire(BU_SEM_LISTS); if ((--dbip->dbi_uses) > 0) { bu_semaphore_release(BU_SEM_LISTS); /* others are still using this database */ return; } bu_semaphore_release(BU_SEM_LISTS); /* ready to free the database -- use count is now zero */ /* free up any mapped files */ if (dbip->dbi_mf) { /* * We're using an instance of a memory mapped file. * We have two choices: * Either dissociate from the memory mapped file * by clearing dbi_mf->apbuf, or * keeping our already-scanned dbip ready for * further use, with our dbi_uses counter at 0. * For speed of re-open, at the price of some address space, * the second choice is taken. */ bu_close_mapped_file(dbip->dbi_mf); bu_free_mapped_files(0); dbip->dbi_mf = (struct bu_mapped_file *)NULL; } /* try to ensure/encourage that the file is written out */ db_sync(dbip); if (dbip->dbi_fp) { fclose(dbip->dbi_fp); } if (dbip->dbi_title) bu_free(dbip->dbi_title, "dbi_title"); if (dbip->dbi_filename) bu_free(dbip->dbi_filename, "dbi_filename"); db_free_anim(dbip); rt_color_free(); /* Free MaterHead list */ /* Release map of database holes */ rt_mempurge(&(dbip->dbi_freep)); rt_memclose(); dbip->dbi_inmem = NULL; /* sanity */ bu_ptbl_free(&dbip->dbi_clients); /* Free all directory entries */ for (i = 0; i < RT_DBNHASH; i++) { for (dp = dbip->dbi_Head[i]; dp != RT_DIR_NULL;) { RT_CK_DIR(dp); nextdp = dp->d_forw; RT_DIR_FREE_NAMEP(dp); /* frees d_namep */ if ((dp->d_flags & RT_DIR_INMEM) && (dp->d_un.ptr != NULL)) { bu_free(dp->d_un.ptr, "db_close d_un.ptr"); dp->d_un.ptr = NULL; dp->d_len = 0; } /* Put 'dp' back on the freelist */ dp->d_forw = rt_uniresource.re_directory_hd; rt_uniresource.re_directory_hd = dp; /* null'ing the forward pointer here is a huge * memory leak as it causes the loss of all * nodes on the freelist except the first. * (so don't do it) */ dp = nextdp; } dbip->dbi_Head[i] = RT_DIR_NULL; /* sanity*/ } if (dbip->dbi_filepath != NULL) { bu_free_argv(2, dbip->dbi_filepath); dbip->dbi_filepath = NULL; /* sanity */ } bu_free((char *)dbip, "struct db_i"); }
struct directory * db_diradd(struct db_i *dbip, const char *name, off_t laddr, size_t len, int flags, void *ptr) { struct directory **headp; struct directory *dp; const char *tmp_ptr; struct bu_vls local = BU_VLS_INIT_ZERO; RT_CK_DBI(dbip); if (RT_G_DEBUG&DEBUG_DB) { bu_log("db_diradd(dbip=%p, name='%s', addr=%ld, len=%zu, flags=0x%x, ptr=%p)\n", (void *)dbip, name, laddr, len, flags, ptr); } if ((tmp_ptr = strchr(name, '/')) != NULL) { /* if this is a version 4 database and the offending char is beyond NAMESIZE * then it is not really a problem */ if (db_version(dbip) < 5 && (tmp_ptr - name) < NAMESIZE) { bu_log("db_diradd() object named '%s' is illegal, ignored\n", name); return RT_DIR_NULL; } } if (db_version(dbip) < 5) { bu_vls_strncpy(&local, name, NAMESIZE); } else { /* must provide a valid minor type */ if (!ptr) { bu_log("WARNING: db_diradd() called with a null minor type pointer for object %s\nIgnoring %s\n", name, name); bu_vls_free(&local); return RT_DIR_NULL; } bu_vls_strcpy(&local, name); } if (db_dircheck(dbip, &local, 0, &headp) < 0) { bu_vls_free(&local); return RT_DIR_NULL; } /* 'name' not found in directory, add it */ RT_GET_DIRECTORY(dp, &rt_uniresource); RT_CK_DIR(dp); RT_DIR_SET_NAMEP(dp, bu_vls_addr(&local)); /* sets d_namep */ dp->d_addr = laddr; dp->d_flags = flags & ~(RT_DIR_INMEM); dp->d_len = len; dp->d_forw = *headp; BU_LIST_INIT(&dp->d_use_hd); *headp = dp; dp->d_animate = NULL; dp->d_nref = 0; dp->d_uses = 0; /* v4 geometry databases do not use d_major/minor_type */ if (db_version(dbip) > 4) { dp->d_major_type = DB5_MAJORTYPE_BRLCAD; if (ptr) dp->d_minor_type = *(unsigned char *)ptr; else dp->d_minor_type = 0; } else { dp->d_major_type = 0; dp->d_minor_type = 0; } bu_vls_free(&local); return dp; }
/** * R T _ F I N D _ I D E N T I C A L _ S O L I D * * See if solid "dp" as transformed by "mat" already exists in the * soltab list. If it does, return the matching stp, otherwise, * create a new soltab structure, enrole it in the list, and return a * pointer to that. * * "mat" will be a null pointer when an identity matrix is signified. * This greatly speeds the comparison process. * * The two cases can be distinguished by the fact that stp->st_id will * be 0 for a new soltab structure, and non-zero for an existing one. * * This routine will run in parallel. * * In order to avoid a race between searching the soltab list and * adding new solids to it, the new solid to be added *must* be * enrolled in the list before exiting the critical section. * * To limit the size of the list to be searched, there are many lists. * The selection of which list is determined by the hash value * computed from the solid's name. This is the same optimization used * in searching the directory lists. * * This subroutine is the critical bottleneck in parallel tree walking. * * It is safe, and much faster, to use several different critical * sections when searching different lists. * * There are only 4 dedicated semaphores defined, TREE0 through TREE3. * This unfortunately limits the code to having only 4 CPUs doing list * searching at any one time. Hopefully, this is enough parallelism * to keep the rest of the CPUs doing I/O and actual solid prepping. * * Since the algorithm has been reduced from an O((nsolid/128)**2) * search on the entire rti_solidheads[hash] list to an O(ninstance) * search on the dp->d_use_head list for this one solid, the critical * section should be relatively short-lived. Having the 3-way split * should provide ample opportunity for parallelism through here, * while still ensuring that the necessary variables are protected. * * There are two critical variables which *both* need to be protected: * the specific rti_solidhead[hash] list head, and the specific * dp->d_use_hd list head. Fortunately, since the selection of * critical section is based upon db_dirhash(dp->d_namep), any other * processor that wants to search this same 'dp' will get the same * hash as the current thread, and will thus wait for the appropriate * semaphore to be released. Similarly, any other thread that wants * to search the same rti_solidhead[hash] list as the current thread * will be using the same hash, and will thus wait for the proper * semaphore. */ HIDDEN struct soltab *rt_find_identical_solid(register const matp_t mat, register struct directory *dp, struct rt_i *rtip) { register struct soltab *stp = RT_SOLTAB_NULL; int hash; RT_CK_DIR(dp); RT_CK_RTI(rtip); hash = db_dirhash( dp->d_namep ); /* Enter the appropriate dual critical-section */ ACQUIRE_SEMAPHORE_TREE(hash); /* * If solid has not been referenced yet, the search can be * skipped. If solid is being referenced a _lot_, it certainly * isn't all going to be in the same place, so don't bother * searching. Consider the case of a million instances of the * same tree submodel solid. */ if ( dp->d_uses > 0 && dp->d_uses < 100 && rtip->rti_dont_instance == 0 ) { struct bu_list *mid; /* Search dp->d_use_hd list for other instances */ for ( BU_LIST_FOR( mid, bu_list, &dp->d_use_hd ) ) { stp = BU_LIST_MAIN_PTR( soltab, mid, l2 ); RT_CK_SOLTAB(stp); if ( stp->st_matp == (matp_t)0 ) { if ( mat == (matp_t)0 ) { /* Both have identity matrix */ goto more_checks; } continue; } if ( mat == (matp_t)0 ) continue; /* doesn't match */ if ( !bn_mat_is_equal(mat, stp->st_matp, &rtip->rti_tol)) continue; more_checks: /* Don't instance this solid from some other model * instance. As this is nearly always equal, check it * last */ if ( stp->st_rtip != rtip ) continue; /* * stp now points to re-referenced solid. stp->st_id is * non-zero, indicating pre-existing solid. */ RT_CK_SOLTAB(stp); /* sanity */ /* Only increment use counter for non-dead solids. */ if ( !(stp->st_aradius <= -1) ) stp->st_uses++; /* dp->d_uses is NOT incremented, because number of * soltab's using it has not gone up. */ if ( RT_G_DEBUG & DEBUG_SOLIDS ) { bu_log( mat ? "rt_find_identical_solid: %s re-referenced %d\n" : "rt_find_identical_solid: %s re-referenced %d (identity mat)\n", dp->d_namep, stp->st_uses ); } /* Leave the appropriate dual critical-section */ RELEASE_SEMAPHORE_TREE(hash); return stp; } } /* * Create and link a new solid into the list. * * Ensure the search keys "dp", "st_mat" and "st_rtip" are stored * now, while still inside the critical section, because they are * searched on, above. */ BU_GETSTRUCT(stp, soltab); stp->l.magic = RT_SOLTAB_MAGIC; stp->l2.magic = RT_SOLTAB2_MAGIC; stp->st_rtip = rtip; stp->st_dp = dp; dp->d_uses++; stp->st_uses = 1; /* stp->st_id is intentionally left zero here, as a flag */ if ( mat ) { stp->st_matp = (matp_t)bu_malloc( sizeof(mat_t), "st_matp" ); MAT_COPY( stp->st_matp, mat ); } else { stp->st_matp = (matp_t)0; } /* Add to the appropriate soltab list head */ /* PARALLEL NOTE: Uses critical section on rt_solidheads element */ BU_LIST_INSERT( &(rtip->rti_solidheads[hash]), &(stp->l) ); /* Also add to the directory structure list head */ /* PARALLEL NOTE: Uses critical section on this 'dp' */ BU_LIST_INSERT( &dp->d_use_hd, &(stp->l2) ); /* * Leave the 4-way critical-section protecting dp and [hash] */ RELEASE_SEMAPHORE_TREE(hash); /* Enter an exclusive critical section to protect nsolids. * nsolids++ needs to be locked to a SINGLE thread */ bu_semaphore_acquire(RT_SEM_STATS); stp->st_bit = rtip->nsolids++; bu_semaphore_release(RT_SEM_STATS); /* * Fill in the last little bit of the structure in full parallel * mode, outside of any critical section. */ /* Init tables of regions using this solid. Usually small. */ bu_ptbl_init( &stp->st_regions, 7, "st_regions ptbl" ); return stp; }