void bvh_done<SVBVHTree>(SVBVHTree *obj) { rtbuild_done(obj->builder, &obj->rayobj.control); //TODO find a away to exactly calculate the needed memory MemArena *arena1 = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE); BLI_memarena_use_malloc(arena1); MemArena *arena2 = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE); BLI_memarena_use_malloc(arena2); BLI_memarena_use_align(arena2, 16); //Build and optimize the tree if(0) { VBVHNode *root = BuildBinaryVBVH<VBVHNode>(arena1,&obj->rayobj.control).transform(obj->builder); if(RE_rayobjectcontrol_test_break(&obj->rayobj.control)) { BLI_memarena_free(arena1); BLI_memarena_free(arena2); return; } reorganize(root); remove_useless(root, &root); bvh_refit(root); pushup(root); pushdown(root); pushup_simd<VBVHNode,4>(root); obj->root = Reorganize_SVBVH<VBVHNode>(arena2).transform(root); } else { //Finds the optimal packing of this tree using a given cost model //TODO this uses quite a lot of memory, find ways to reduce memory usage during building OVBVHNode *root = BuildBinaryVBVH<OVBVHNode>(arena1,&obj->rayobj.control).transform(obj->builder); if(RE_rayobjectcontrol_test_break(&obj->rayobj.control)) { BLI_memarena_free(arena1); BLI_memarena_free(arena2); return; } VBVH_optimalPackSIMD<OVBVHNode,PackCost>(PackCost()).transform(root); obj->root = Reorganize_SVBVH<OVBVHNode>(arena2).transform(root); } //Free data BLI_memarena_free(arena1); obj->node_arena = arena2; obj->cost = 1.0; rtbuild_free( obj->builder ); obj->builder = NULL; }
void BKE_mesh_loop_islands_init( MeshIslandStore *island_store, const short item_type, const int items_num, const short island_type, const short innercut_type) { MemArena *mem = island_store->mem; if (mem == NULL) { mem = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, __func__); island_store->mem = mem; } /* else memarena should be cleared */ BLI_assert(ELEM(item_type, MISLAND_TYPE_VERT, MISLAND_TYPE_EDGE, MISLAND_TYPE_POLY, MISLAND_TYPE_LOOP)); BLI_assert(ELEM(island_type, MISLAND_TYPE_VERT, MISLAND_TYPE_EDGE, MISLAND_TYPE_POLY, MISLAND_TYPE_LOOP)); island_store->item_type = item_type; island_store->items_to_islands_num = items_num; island_store->items_to_islands = BLI_memarena_alloc(mem, sizeof(*island_store->items_to_islands) * (size_t)items_num); island_store->island_type = island_type; island_store->islands_num_alloc = MISLAND_DEFAULT_BUFSIZE; island_store->islands = BLI_memarena_alloc(mem, sizeof(*island_store->islands) * island_store->islands_num_alloc); island_store->innercut_type = innercut_type; island_store->innercuts = BLI_memarena_alloc(mem, sizeof(*island_store->innercuts) * island_store->islands_num_alloc); }
/* presumed to be called when no threads are running */ void IMB_tile_cache_params(int totthread, int maxmem) { int a; /* always one cache for non-threaded access */ totthread++; /* lazy initialize cache */ if(GLOBAL_CACHE.totthread == totthread && GLOBAL_CACHE.maxmem == maxmem) return; imb_tile_cache_exit(); memset(&GLOBAL_CACHE, 0, sizeof(ImGlobalTileCache)); GLOBAL_CACHE.tilehash= BLI_ghash_new(imb_global_tile_hash, imb_global_tile_cmp, "tile_cache_params gh"); GLOBAL_CACHE.memarena= BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, "ImTileCache arena"); BLI_memarena_use_calloc(GLOBAL_CACHE.memarena); GLOBAL_CACHE.maxmem= maxmem*1024*1024; GLOBAL_CACHE.totthread= totthread; for(a=0; a<totthread; a++) imb_thread_cache_init(&GLOBAL_CACHE.thread_cache[a]); BLI_mutex_init(&GLOBAL_CACHE.mutex); }
Heap *BLI_heap_new() { Heap *heap = (Heap*)MEM_callocN(sizeof(Heap), "BLIHeap"); heap->bufsize = 1; heap->tree = (HeapNode**)MEM_mallocN(sizeof(HeapNode*), "BLIHeapTree"); heap->arena = BLI_memarena_new(1<<16); return heap; }
BME_TransData_Head *BME_init_transdata(int bufsize) { BME_TransData_Head *td; td = MEM_callocN(sizeof(BME_TransData_Head), "BMesh transdata header"); td->gh = BLI_ghash_new(BLI_ghashutil_ptrhash,BLI_ghashutil_ptrcmp, "BME_init_transdata gh"); td->ma = BLI_memarena_new(bufsize, "BME_TransData arena"); BLI_memarena_use_calloc(td->ma); return td; }
StrandShadeCache *strand_shade_cache_create(void) { StrandShadeCache *cache; cache= MEM_callocN(sizeof(StrandShadeCache), "StrandShadeCache"); cache->resulthash= BLI_ghash_pair_new("strand_shade_cache_create1 gh"); cache->refcounthash= BLI_ghash_pair_new("strand_shade_cache_create2 gh"); cache->memarena= BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, "strand shade cache arena"); return cache; }
void bvh_done<QBVHTree>(QBVHTree *obj) { rtbuild_done(obj->builder, &obj->rayobj.control); //TODO find a away to exactly calculate the needed memory MemArena *arena1 = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, "qbvh arena"); BLI_memarena_use_malloc(arena1); MemArena *arena2 = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, "qbvh arena 2"); BLI_memarena_use_malloc(arena2); BLI_memarena_use_align(arena2, 16); //Build and optimize the tree //TODO do this in 1 pass (half memory usage during building) VBVHNode *root = BuildBinaryVBVH<VBVHNode>(arena1, &obj->rayobj.control).transform(obj->builder); if (RE_rayobjectcontrol_test_break(&obj->rayobj.control)) { BLI_memarena_free(arena1); BLI_memarena_free(arena2); return; } if (root) { pushup_simd<VBVHNode, 4>(root); obj->root = Reorganize_SVBVH<VBVHNode>(arena2).transform(root); } else obj->root = NULL; //Free data BLI_memarena_free(arena1); obj->node_arena = arena2; obj->cost = 1.0; rtbuild_free(obj->builder); obj->builder = NULL; }
static void push_propagate_stack(SmoothEdge *edge, SmoothVert *vert, SmoothMesh *mesh) { PropagateEdge *pedge = mesh->reusestack.first; if(pedge) { BLI_remlink(&mesh->reusestack, pedge); } else { if(!mesh->arena) { mesh->arena = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, "edgesplit arena"); BLI_memarena_use_calloc(mesh->arena); } pedge = BLI_memarena_alloc(mesh->arena, sizeof(PropagateEdge)); } pedge->edge = edge; pedge->vert = vert; BLI_addhead(&mesh->propagatestack, pedge); }
void scatter_tree_build(ScatterTree *tree) { ScatterPoint *newpoints, **tmppoints; float mid[3], size[3]; int totpoint= tree->totpoint; newpoints = MEM_callocN(sizeof(ScatterPoint) * totpoint, "ScatterPoints"); tmppoints = MEM_callocN(sizeof(ScatterPoint *) * totpoint, "ScatterTmpPoints"); tree->tmppoints= tmppoints; tree->arena= BLI_memarena_new(0x8000 * sizeof(ScatterNode), "sss tree arena"); BLI_memarena_use_calloc(tree->arena); /* build tree */ tree->root= BLI_memarena_alloc(tree->arena, sizeof(ScatterNode)); tree->root->points= newpoints; tree->root->totpoint= totpoint; mid[0]= (tree->min[0]+tree->max[0])*0.5f; mid[1]= (tree->min[1]+tree->max[1])*0.5f; mid[2]= (tree->min[2]+tree->max[2])*0.5f; size[0]= (tree->max[0]-tree->min[0])*0.5f; size[1]= (tree->max[1]-tree->min[1])*0.5f; size[2]= (tree->max[2]-tree->min[2])*0.5f; create_octree_node(tree, tree->root, mid, size, tree->refpoints, 0); MEM_freeN(tree->points); MEM_freeN(tree->refpoints); MEM_freeN(tree->tmppoints); tree->refpoints= NULL; tree->tmppoints= NULL; tree->points= newpoints; /* sum radiance at nodes */ sum_radiance(tree, tree->root); }
static void harmonic_coordinates_bind(Scene *UNUSED(scene), MeshDeformModifierData *mmd, MeshDeformBind *mdb) { MDefBindInfluence *inf; MDefInfluence *mdinf; MDefCell *cell; float center[3], vec[3], maxwidth, totweight; int a, b, x, y, z, totinside, offset; /* compute bounding box of the cage mesh */ INIT_MINMAX(mdb->min, mdb->max); for (a = 0; a < mdb->totcagevert; a++) minmax_v3v3_v3(mdb->min, mdb->max, mdb->cagecos[a]); /* allocate memory */ mdb->size = (2 << (mmd->gridsize - 1)) + 2; mdb->size3 = mdb->size * mdb->size * mdb->size; mdb->tag = MEM_callocN(sizeof(int) * mdb->size3, "MeshDeformBindTag"); mdb->phi = MEM_callocN(sizeof(float) * mdb->size3, "MeshDeformBindPhi"); mdb->totalphi = MEM_callocN(sizeof(float) * mdb->size3, "MeshDeformBindTotalPhi"); mdb->boundisect = MEM_callocN(sizeof(*mdb->boundisect) * mdb->size3, "MDefBoundIsect"); mdb->semibound = MEM_callocN(sizeof(int) * mdb->size3, "MDefSemiBound"); mdb->bvhtree = bvhtree_from_mesh_looptri(&mdb->bvhdata, mdb->cagedm, FLT_EPSILON * 100, 4, 6); mdb->inside = MEM_callocN(sizeof(int) * mdb->totvert, "MDefInside"); if (mmd->flag & MOD_MDEF_DYNAMIC_BIND) mdb->dyngrid = MEM_callocN(sizeof(MDefBindInfluence *) * mdb->size3, "MDefDynGrid"); else mdb->weights = MEM_callocN(sizeof(float) * mdb->totvert * mdb->totcagevert, "MDefWeights"); mdb->memarena = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, "harmonic coords arena"); BLI_memarena_use_calloc(mdb->memarena); /* initialize data from 'cagedm' for reuse */ { DerivedMesh *dm = mdb->cagedm; mdb->cagedm_cache.mpoly = dm->getPolyArray(dm); mdb->cagedm_cache.mloop = dm->getLoopArray(dm); mdb->cagedm_cache.looptri = dm->getLoopTriArray(dm); mdb->cagedm_cache.poly_nors = dm->getPolyDataArray(dm, CD_NORMAL); /* can be NULL */ } /* make bounding box equal size in all directions, add padding, and compute * width of the cells */ maxwidth = -1.0f; for (a = 0; a < 3; a++) if (mdb->max[a] - mdb->min[a] > maxwidth) maxwidth = mdb->max[a] - mdb->min[a]; for (a = 0; a < 3; a++) { center[a] = (mdb->min[a] + mdb->max[a]) * 0.5f; mdb->min[a] = center[a] - maxwidth * 0.5f; mdb->max[a] = center[a] + maxwidth * 0.5f; mdb->width[a] = (mdb->max[a] - mdb->min[a]) / (mdb->size - 4); mdb->min[a] -= 2.1f * mdb->width[a]; mdb->max[a] += 2.1f * mdb->width[a]; mdb->width[a] = (mdb->max[a] - mdb->min[a]) / mdb->size; mdb->halfwidth[a] = mdb->width[a] * 0.5f; } progress_bar(0, "Setting up mesh deform system"); totinside = 0; for (a = 0; a < mdb->totvert; a++) { copy_v3_v3(vec, mdb->vertexcos[a]); mdb->inside[a] = meshdeform_inside_cage(mdb, vec); if (mdb->inside[a]) totinside++; } /* free temporary MDefBoundIsects */ BLI_memarena_free(mdb->memarena); mdb->memarena = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, "harmonic coords arena"); /* start with all cells untyped */ for (a = 0; a < mdb->size3; a++) mdb->tag[a] = MESHDEFORM_TAG_UNTYPED; /* detect intersections and tag boundary cells */ for (z = 0; z < mdb->size; z++) for (y = 0; y < mdb->size; y++) for (x = 0; x < mdb->size; x++) meshdeform_add_intersections(mdb, x, y, z); /* compute exterior and interior tags */ meshdeform_bind_floodfill(mdb); for (z = 0; z < mdb->size; z++) for (y = 0; y < mdb->size; y++) for (x = 0; x < mdb->size; x++) meshdeform_check_semibound(mdb, x, y, z); /* solve */ meshdeform_matrix_solve(mmd, mdb); /* assign results */ if (mmd->flag & MOD_MDEF_DYNAMIC_BIND) { mmd->totinfluence = 0; for (a = 0; a < mdb->size3; a++) for (inf = mdb->dyngrid[a]; inf; inf = inf->next) mmd->totinfluence++; /* convert MDefBindInfluences to smaller MDefInfluences */ mmd->dyngrid = MEM_callocN(sizeof(MDefCell) * mdb->size3, "MDefDynGrid"); mmd->dyninfluences = MEM_callocN(sizeof(MDefInfluence) * mmd->totinfluence, "MDefInfluence"); offset = 0; for (a = 0; a < mdb->size3; a++) { cell = &mmd->dyngrid[a]; cell->offset = offset; totweight = 0.0f; mdinf = mmd->dyninfluences + cell->offset; for (inf = mdb->dyngrid[a]; inf; inf = inf->next, mdinf++) { mdinf->weight = inf->weight; mdinf->vertex = inf->vertex; totweight += mdinf->weight; cell->totinfluence++; } if (totweight > 0.0f) { mdinf = mmd->dyninfluences + cell->offset; for (b = 0; b < cell->totinfluence; b++, mdinf++) mdinf->weight /= totweight; } offset += cell->totinfluence; } mmd->dynverts = mdb->inside; mmd->dyngridsize = mdb->size; copy_v3_v3(mmd->dyncellmin, mdb->min); mmd->dyncellwidth = mdb->width[0]; MEM_freeN(mdb->dyngrid); } else { mmd->bindweights = mdb->weights; MEM_freeN(mdb->inside); } MEM_freeN(mdb->tag); MEM_freeN(mdb->phi); MEM_freeN(mdb->totalphi); MEM_freeN(mdb->boundisect); MEM_freeN(mdb->semibound); BLI_memarena_free(mdb->memarena); free_bvhtree_from_mesh(&mdb->bvhdata); }
void BLI_scanfill_begin(ScanFillContext *sf_ctx) { memset(sf_ctx, 0, sizeof(*sf_ctx)); sf_ctx->poly_nr = SF_POLY_UNSET; sf_ctx->arena = BLI_memarena_new(BLI_SCANFILL_ARENA_SIZE, __func__); }
void BKE_maskrasterize_handle_init(MaskRasterHandle *mr_handle, struct Mask *mask, const int width, const int height, const bool do_aspect_correct, const bool do_mask_aa, const bool do_feather) { const rctf default_bounds = {0.0f, 1.0f, 0.0f, 1.0f}; const float pixel_size = 1.0f / (float)min_ii(width, height); const float asp_xy[2] = {(do_aspect_correct && width > height) ? (float)height / (float)width : 1.0f, (do_aspect_correct && width < height) ? (float)width / (float)height : 1.0f}; const float zvec[3] = {0.0f, 0.0f, 1.0f}; MaskLayer *masklay; unsigned int masklay_index; MemArena *sf_arena; mr_handle->layers_tot = (unsigned int)BLI_countlist(&mask->masklayers); mr_handle->layers = MEM_mallocN(sizeof(MaskRasterLayer) * mr_handle->layers_tot, "MaskRasterLayer"); BLI_rctf_init_minmax(&mr_handle->bounds); sf_arena = BLI_memarena_new(BLI_SCANFILL_ARENA_SIZE, __func__); for (masklay = mask->masklayers.first, masklay_index = 0; masklay; masklay = masklay->next, masklay_index++) { /* we need to store vertex ranges for open splines for filling */ unsigned int tot_splines; MaskRasterSplineInfo *open_spline_ranges; unsigned int open_spline_index = 0; MaskSpline *spline; /* scanfill */ ScanFillContext sf_ctx; ScanFillVert *sf_vert = NULL; ScanFillVert *sf_vert_next = NULL; ScanFillFace *sf_tri; unsigned int sf_vert_tot = 0; unsigned int tot_feather_quads = 0; #ifdef USE_SCANFILL_EDGE_WORKAROUND unsigned int tot_boundary_used = 0; unsigned int tot_boundary_found = 0; #endif if (masklay->restrictflag & MASK_RESTRICT_RENDER) { /* skip the layer */ mr_handle->layers_tot--; masklay_index--; continue; } tot_splines = (unsigned int)BLI_countlist(&masklay->splines); open_spline_ranges = MEM_callocN(sizeof(*open_spline_ranges) * tot_splines, __func__); BLI_scanfill_begin_arena(&sf_ctx, sf_arena); for (spline = masklay->splines.first; spline; spline = spline->next) { const bool is_cyclic = (spline->flag & MASK_SPLINE_CYCLIC) != 0; const bool is_fill = (spline->flag & MASK_SPLINE_NOFILL) == 0; float (*diff_points)[2]; unsigned int tot_diff_point; float (*diff_feather_points)[2]; float (*diff_feather_points_flip)[2]; unsigned int tot_diff_feather_points; const unsigned int resol_a = BKE_mask_spline_resolution(spline, width, height) / 4; const unsigned int resol_b = BKE_mask_spline_feather_resolution(spline, width, height) / 4; const unsigned int resol = CLAMPIS(MAX2(resol_a, resol_b), 4, 512); diff_points = BKE_mask_spline_differentiate_with_resolution( spline, &tot_diff_point, resol); if (do_feather) { diff_feather_points = BKE_mask_spline_feather_differentiated_points_with_resolution( spline, &tot_diff_feather_points, resol, FALSE); BLI_assert(diff_feather_points); } else { tot_diff_feather_points = 0; diff_feather_points = NULL; } if (tot_diff_point > 3) { ScanFillVert *sf_vert_prev; unsigned int j; float co[3]; co[2] = 0.0f; sf_ctx.poly_nr++; if (do_aspect_correct) { if (width != height) { float *fp; float *ffp; unsigned int i; float asp; if (width < height) { fp = &diff_points[0][0]; ffp = tot_diff_feather_points ? &diff_feather_points[0][0] : NULL; asp = (float)width / (float)height; } else { fp = &diff_points[0][1]; ffp = tot_diff_feather_points ? &diff_feather_points[0][1] : NULL; asp = (float)height / (float)width; } for (i = 0; i < tot_diff_point; i++, fp += 2) { (*fp) = (((*fp) - 0.5f) / asp) + 0.5f; } if (tot_diff_feather_points) { for (i = 0; i < tot_diff_feather_points; i++, ffp += 2) { (*ffp) = (((*ffp) - 0.5f) / asp) + 0.5f; } } } } /* fake aa, using small feather */ if (do_mask_aa == TRUE) { if (do_feather == FALSE) { tot_diff_feather_points = tot_diff_point; diff_feather_points = MEM_mallocN(sizeof(*diff_feather_points) * (size_t)tot_diff_feather_points, __func__); /* add single pixel feather */ maskrasterize_spline_differentiate_point_outset(diff_feather_points, diff_points, tot_diff_point, pixel_size, FALSE); } else { /* ensure single pixel feather, on any zero feather areas */ maskrasterize_spline_differentiate_point_outset(diff_feather_points, diff_points, tot_diff_point, pixel_size, TRUE); } } if (is_fill) { /* applt intersections depending on fill settings */ if (spline->flag & MASK_SPLINE_NOINTERSECT) { BKE_mask_spline_feather_collapse_inner_loops(spline, diff_feather_points, tot_diff_feather_points); } copy_v2_v2(co, diff_points[0]); sf_vert_prev = BLI_scanfill_vert_add(&sf_ctx, co); sf_vert_prev->tmp.u = sf_vert_tot; sf_vert_prev->keyindex = sf_vert_tot + tot_diff_point; /* absolute index of feather vert */ sf_vert_tot++; /* TODO, an alternate functions so we can avoid double vector copy! */ for (j = 1; j < tot_diff_point; j++) { copy_v2_v2(co, diff_points[j]); sf_vert = BLI_scanfill_vert_add(&sf_ctx, co); sf_vert->tmp.u = sf_vert_tot; sf_vert->keyindex = sf_vert_tot + tot_diff_point; /* absolute index of feather vert */ sf_vert_tot++; } sf_vert = sf_vert_prev; sf_vert_prev = sf_ctx.fillvertbase.last; for (j = 0; j < tot_diff_point; j++) { ScanFillEdge *sf_edge = BLI_scanfill_edge_add(&sf_ctx, sf_vert_prev, sf_vert); #ifdef USE_SCANFILL_EDGE_WORKAROUND if (diff_feather_points) { sf_edge->tmp.c = SF_EDGE_IS_BOUNDARY; tot_boundary_used++; } #else (void)sf_edge; #endif sf_vert_prev = sf_vert; sf_vert = sf_vert->next; } if (diff_feather_points) { float co_feather[3]; co_feather[2] = 1.0f; BLI_assert(tot_diff_feather_points == tot_diff_point); /* note: only added for convenience, we don't infact use these to scanfill, * only to create feather faces after scanfill */ for (j = 0; j < tot_diff_feather_points; j++) { copy_v2_v2(co_feather, diff_feather_points[j]); sf_vert = BLI_scanfill_vert_add(&sf_ctx, co_feather); /* no need for these attrs */ #if 0 sf_vert->tmp.u = sf_vert_tot; sf_vert->keyindex = sf_vert_tot + tot_diff_point; /* absolute index of feather vert */ #endif sf_vert->keyindex = SF_KEYINDEX_TEMP_ID; sf_vert_tot++; } tot_feather_quads += tot_diff_point; } } else { /* unfilled spline */ if (diff_feather_points) { float co_diff[2]; float co_feather[3]; co_feather[2] = 1.0f; if (spline->flag & MASK_SPLINE_NOINTERSECT) { diff_feather_points_flip = MEM_mallocN(sizeof(float) * 2 * tot_diff_feather_points, "diff_feather_points_flip"); for (j = 0; j < tot_diff_point; j++) { sub_v2_v2v2(co_diff, diff_points[j], diff_feather_points[j]); add_v2_v2v2(diff_feather_points_flip[j], diff_points[j], co_diff); } BKE_mask_spline_feather_collapse_inner_loops(spline, diff_feather_points, tot_diff_feather_points); BKE_mask_spline_feather_collapse_inner_loops(spline, diff_feather_points_flip, tot_diff_feather_points); } else { diff_feather_points_flip = NULL; } open_spline_ranges[open_spline_index].vertex_offset = sf_vert_tot; open_spline_ranges[open_spline_index].vertex_total = tot_diff_point; /* TODO, an alternate functions so we can avoid double vector copy! */ for (j = 0; j < tot_diff_point; j++) { /* center vert */ copy_v2_v2(co, diff_points[j]); sf_vert = BLI_scanfill_vert_add(&sf_ctx, co); sf_vert->tmp.u = sf_vert_tot; sf_vert->keyindex = SF_KEYINDEX_TEMP_ID; sf_vert_tot++; /* feather vert A */ copy_v2_v2(co_feather, diff_feather_points[j]); sf_vert = BLI_scanfill_vert_add(&sf_ctx, co_feather); sf_vert->tmp.u = sf_vert_tot; sf_vert->keyindex = SF_KEYINDEX_TEMP_ID; sf_vert_tot++; /* feather vert B */ if (diff_feather_points_flip) { copy_v2_v2(co_feather, diff_feather_points_flip[j]); } else { sub_v2_v2v2(co_diff, co, co_feather); add_v2_v2v2(co_feather, co, co_diff); } sf_vert = BLI_scanfill_vert_add(&sf_ctx, co_feather); sf_vert->tmp.u = sf_vert_tot; sf_vert->keyindex = SF_KEYINDEX_TEMP_ID; sf_vert_tot++; tot_feather_quads += 2; } if (!is_cyclic) { tot_feather_quads -= 2; } if (diff_feather_points_flip) { MEM_freeN(diff_feather_points_flip); diff_feather_points_flip = NULL; } /* cap ends */ /* dummy init value */ open_spline_ranges[open_spline_index].vertex_total_cap_head = 0; open_spline_ranges[open_spline_index].vertex_total_cap_tail = 0; if (!is_cyclic) { float *fp_cent; float *fp_turn; unsigned int k; fp_cent = diff_points[0]; fp_turn = diff_feather_points[0]; #define CALC_CAP_RESOL \ clampis_uint((unsigned int )(len_v2v2(fp_cent, fp_turn) / \ (pixel_size * SPLINE_RESOL_CAP_PER_PIXEL)), \ SPLINE_RESOL_CAP_MIN, SPLINE_RESOL_CAP_MAX) { const unsigned int vertex_total_cap = CALC_CAP_RESOL; for (k = 1; k < vertex_total_cap; k++) { const float angle = (float)k * (1.0f / (float)vertex_total_cap) * (float)M_PI; rotate_point_v2(co_feather, fp_turn, fp_cent, angle, asp_xy); sf_vert = BLI_scanfill_vert_add(&sf_ctx, co_feather); sf_vert->tmp.u = sf_vert_tot; sf_vert->keyindex = SF_KEYINDEX_TEMP_ID; sf_vert_tot++; } tot_feather_quads += vertex_total_cap; open_spline_ranges[open_spline_index].vertex_total_cap_head = vertex_total_cap; } fp_cent = diff_points[tot_diff_point - 1]; fp_turn = diff_feather_points[tot_diff_point - 1]; { const unsigned int vertex_total_cap = CALC_CAP_RESOL; for (k = 1; k < vertex_total_cap; k++) { const float angle = (float)k * (1.0f / (float)vertex_total_cap) * (float)M_PI; rotate_point_v2(co_feather, fp_turn, fp_cent, -angle, asp_xy); sf_vert = BLI_scanfill_vert_add(&sf_ctx, co_feather); sf_vert->tmp.u = sf_vert_tot; sf_vert->keyindex = SF_KEYINDEX_TEMP_ID; sf_vert_tot++; } tot_feather_quads += vertex_total_cap; open_spline_ranges[open_spline_index].vertex_total_cap_tail = vertex_total_cap; } } open_spline_ranges[open_spline_index].is_cyclic = is_cyclic; open_spline_index++; #undef CALC_CAP_RESOL /* end capping */ } } } if (diff_points) { MEM_freeN(diff_points); } if (diff_feather_points) { MEM_freeN(diff_feather_points); } } { unsigned int (*face_array)[4], *face; /* access coords */ float (*face_coords)[3], *cos; /* xy, z 0-1 (1.0 == filled) */ unsigned int sf_tri_tot; rctf bounds; unsigned int face_index; int scanfill_flag = 0; bool is_isect = false; ListBase isect_remvertbase = {NULL, NULL}; ListBase isect_remedgebase = {NULL, NULL}; /* now we have all the splines */ face_coords = MEM_mallocN((sizeof(float) * 3) * sf_vert_tot, "maskrast_face_coords"); /* init bounds */ BLI_rctf_init_minmax(&bounds); /* coords */ cos = (float *)face_coords; for (sf_vert = sf_ctx.fillvertbase.first; sf_vert; sf_vert = sf_vert_next) { sf_vert_next = sf_vert->next; copy_v3_v3(cos, sf_vert->co); /* remove so as not to interfere with fill (called after) */ if (sf_vert->keyindex == SF_KEYINDEX_TEMP_ID) { BLI_remlink(&sf_ctx.fillvertbase, sf_vert); } /* bounds */ BLI_rctf_do_minmax_v(&bounds, cos); cos += 3; } /* --- inefficient self-intersect case --- */ /* if self intersections are found, its too trickty to attempt to map vertices * so just realloc and add entirely new vertices - the result of the self-intersect check */ if ((masklay->flag & MASK_LAYERFLAG_FILL_OVERLAP) && (is_isect = BLI_scanfill_calc_self_isect(&sf_ctx, &isect_remvertbase, &isect_remedgebase))) { unsigned int sf_vert_tot_isect = (unsigned int)BLI_countlist(&sf_ctx.fillvertbase); unsigned int i = sf_vert_tot; face_coords = MEM_reallocN(face_coords, sizeof(float[3]) * (sf_vert_tot + sf_vert_tot_isect)); cos = (float *)&face_coords[sf_vert_tot][0]; for (sf_vert = sf_ctx.fillvertbase.first; sf_vert; sf_vert = sf_vert->next) { copy_v3_v3(cos, sf_vert->co); sf_vert->tmp.u = i++; cos += 3; } sf_vert_tot += sf_vert_tot_isect; /* we need to calc polys after self intersect */ scanfill_flag |= BLI_SCANFILL_CALC_POLYS; } /* --- end inefficient code --- */ /* main scan-fill */ if ((masklay->flag & MASK_LAYERFLAG_FILL_DISCRETE) == 0) scanfill_flag |= BLI_SCANFILL_CALC_HOLES; sf_tri_tot = (unsigned int)BLI_scanfill_calc_ex(&sf_ctx, scanfill_flag, zvec); if (is_isect) { /* add removed data back, we only need edges for feather, * but add verts back so they get freed along with others */ BLI_movelisttolist(&sf_ctx.fillvertbase, &isect_remvertbase); BLI_movelisttolist(&sf_ctx.filledgebase, &isect_remedgebase); } face_array = MEM_mallocN(sizeof(*face_array) * ((size_t)sf_tri_tot + (size_t)tot_feather_quads), "maskrast_face_index"); face_index = 0; /* faces */ face = (unsigned int *)face_array; for (sf_tri = sf_ctx.fillfacebase.first; sf_tri; sf_tri = sf_tri->next) { *(face++) = sf_tri->v3->tmp.u; *(face++) = sf_tri->v2->tmp.u; *(face++) = sf_tri->v1->tmp.u; *(face++) = TRI_VERT; face_index++; FACE_ASSERT(face - 4, sf_vert_tot); } /* start of feather faces... if we have this set, * 'face_index' is kept from loop above */ BLI_assert(face_index == sf_tri_tot); if (tot_feather_quads) { ScanFillEdge *sf_edge; for (sf_edge = sf_ctx.filledgebase.first; sf_edge; sf_edge = sf_edge->next) { if (sf_edge->tmp.c == SF_EDGE_IS_BOUNDARY) { *(face++) = sf_edge->v1->tmp.u; *(face++) = sf_edge->v2->tmp.u; *(face++) = sf_edge->v2->keyindex; *(face++) = sf_edge->v1->keyindex; face_index++; FACE_ASSERT(face - 4, sf_vert_tot); #ifdef USE_SCANFILL_EDGE_WORKAROUND tot_boundary_found++; #endif } } } #ifdef USE_SCANFILL_EDGE_WORKAROUND if (tot_boundary_found != tot_boundary_used) { BLI_assert(tot_boundary_found < tot_boundary_used); } #endif /* feather only splines */ while (open_spline_index > 0) { const unsigned int vertex_offset = open_spline_ranges[--open_spline_index].vertex_offset; unsigned int vertex_total = open_spline_ranges[ open_spline_index].vertex_total; unsigned int vertex_total_cap_head = open_spline_ranges[ open_spline_index].vertex_total_cap_head; unsigned int vertex_total_cap_tail = open_spline_ranges[ open_spline_index].vertex_total_cap_tail; unsigned int k, j; j = vertex_offset; /* subtract one since we reference next vertex triple */ for (k = 0; k < vertex_total - 1; k++, j += 3) { BLI_assert(j == vertex_offset + (k * 3)); *(face++) = j + 3; /* next span */ /* z 1 */ *(face++) = j + 0; /* z 1 */ *(face++) = j + 1; /* z 0 */ *(face++) = j + 4; /* next span */ /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); *(face++) = j + 0; /* z 1 */ *(face++) = j + 3; /* next span */ /* z 1 */ *(face++) = j + 5; /* next span */ /* z 0 */ *(face++) = j + 2; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); } if (open_spline_ranges[open_spline_index].is_cyclic) { *(face++) = vertex_offset + 0; /* next span */ /* z 1 */ *(face++) = j + 0; /* z 1 */ *(face++) = j + 1; /* z 0 */ *(face++) = vertex_offset + 1; /* next span */ /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); *(face++) = j + 0; /* z 1 */ *(face++) = vertex_offset + 0; /* next span */ /* z 1 */ *(face++) = vertex_offset + 2; /* next span */ /* z 0 */ *(face++) = j + 2; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); } else { unsigned int midvidx = vertex_offset; /*************** * cap end 'a' */ j = midvidx + (vertex_total * 3); for (k = 0; k < vertex_total_cap_head - 2; k++, j++) { *(face++) = midvidx + 0; /* z 1 */ *(face++) = midvidx + 0; /* z 1 */ *(face++) = j + 0; /* z 0 */ *(face++) = j + 1; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); } j = vertex_offset + (vertex_total * 3); /* 2 tris that join the original */ *(face++) = midvidx + 0; /* z 1 */ *(face++) = midvidx + 0; /* z 1 */ *(face++) = midvidx + 1; /* z 0 */ *(face++) = j + 0; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); *(face++) = midvidx + 0; /* z 1 */ *(face++) = midvidx + 0; /* z 1 */ *(face++) = j + vertex_total_cap_head - 2; /* z 0 */ *(face++) = midvidx + 2; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); /*************** * cap end 'b' */ /* ... same as previous but v 2-3 flipped, and different initial offsets */ j = vertex_offset + (vertex_total * 3) + (vertex_total_cap_head - 1); midvidx = vertex_offset + (vertex_total * 3) - 3; for (k = 0; k < vertex_total_cap_tail - 2; k++, j++) { *(face++) = midvidx; /* z 1 */ *(face++) = midvidx; /* z 1 */ *(face++) = j + 1; /* z 0 */ *(face++) = j + 0; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); } j = vertex_offset + (vertex_total * 3) + (vertex_total_cap_head - 1); /* 2 tris that join the original */ *(face++) = midvidx + 0; /* z 1 */ *(face++) = midvidx + 0; /* z 1 */ *(face++) = j + 0; /* z 0 */ *(face++) = midvidx + 1; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); *(face++) = midvidx + 0; /* z 1 */ *(face++) = midvidx + 0; /* z 1 */ *(face++) = midvidx + 2; /* z 0 */ *(face++) = j + vertex_total_cap_tail - 2; /* z 0 */ face_index++; FACE_ASSERT(face - 4, sf_vert_tot); } } MEM_freeN(open_spline_ranges); // fprintf(stderr, "%u %u (%u %u), %u\n", face_index, sf_tri_tot + tot_feather_quads, sf_tri_tot, tot_feather_quads, tot_boundary_used - tot_boundary_found); #ifdef USE_SCANFILL_EDGE_WORKAROUND BLI_assert(face_index + (tot_boundary_used - tot_boundary_found) == sf_tri_tot + tot_feather_quads); #else BLI_assert(face_index == sf_tri_tot + tot_feather_quads); #endif { MaskRasterLayer *layer = &mr_handle->layers[masklay_index]; if (BLI_rctf_isect(&default_bounds, &bounds, &bounds)) { #ifdef USE_SCANFILL_EDGE_WORKAROUND layer->face_tot = (sf_tri_tot + tot_feather_quads) - (tot_boundary_used - tot_boundary_found); #else layer->face_tot = (sf_tri_tot + tot_feather_quads); #endif layer->face_coords = face_coords; layer->face_array = face_array; layer->bounds = bounds; layer_bucket_init(layer, pixel_size); BLI_rctf_union(&mr_handle->bounds, &bounds); } else { MEM_freeN(face_coords); MEM_freeN(face_array); layer_bucket_init_dummy(layer); } /* copy as-is */ layer->alpha = masklay->alpha; layer->blend = masklay->blend; layer->blend_flag = masklay->blend_flag; layer->falloff = masklay->falloff; } /* printf("tris %d, feather tris %d\n", sf_tri_tot, tot_feather_quads); */ } /* add trianges */ BLI_scanfill_end_arena(&sf_ctx, sf_arena); } BLI_memarena_free(sf_arena); }
static void layer_bucket_init(MaskRasterLayer *layer, const float pixel_size) { MemArena *arena = BLI_memarena_new(MEM_SIZE_OPTIMAL(1 << 16), __func__); const float bucket_dim_x = BLI_rctf_size_x(&layer->bounds); const float bucket_dim_y = BLI_rctf_size_y(&layer->bounds); layer->buckets_x = (unsigned int)((bucket_dim_x / pixel_size) / (float)BUCKET_PIXELS_PER_CELL); layer->buckets_y = (unsigned int)((bucket_dim_y / pixel_size) / (float)BUCKET_PIXELS_PER_CELL); // printf("bucket size %ux%u\n", layer->buckets_x, layer->buckets_y); CLAMP(layer->buckets_x, 8, 512); CLAMP(layer->buckets_y, 8, 512); layer->buckets_xy_scalar[0] = (1.0f / (bucket_dim_x + FLT_EPSILON)) * (float)layer->buckets_x; layer->buckets_xy_scalar[1] = (1.0f / (bucket_dim_y + FLT_EPSILON)) * (float)layer->buckets_y; { /* width and height of each bucket */ const float bucket_size_x = (bucket_dim_x + FLT_EPSILON) / (float)layer->buckets_x; const float bucket_size_y = (bucket_dim_y + FLT_EPSILON) / (float)layer->buckets_y; const float bucket_max_rad = (max_ff(bucket_size_x, bucket_size_y) * (float)M_SQRT2) + FLT_EPSILON; const float bucket_max_rad_squared = bucket_max_rad * bucket_max_rad; unsigned int *face = &layer->face_array[0][0]; float (*cos)[3] = layer->face_coords; const unsigned int bucket_tot = layer->buckets_x * layer->buckets_y; LinkNode **bucketstore = MEM_callocN(bucket_tot * sizeof(LinkNode *), __func__); unsigned int *bucketstore_tot = MEM_callocN(bucket_tot * sizeof(unsigned int), __func__); unsigned int face_index; for (face_index = 0; face_index < layer->face_tot; face_index++, face += 4) { float xmin; float xmax; float ymin; float ymax; if (face[3] == TRI_VERT) { const float *v1 = cos[face[0]]; const float *v2 = cos[face[1]]; const float *v3 = cos[face[2]]; xmin = min_ff(v1[0], min_ff(v2[0], v3[0])); xmax = max_ff(v1[0], max_ff(v2[0], v3[0])); ymin = min_ff(v1[1], min_ff(v2[1], v3[1])); ymax = max_ff(v1[1], max_ff(v2[1], v3[1])); } else { const float *v1 = cos[face[0]]; const float *v2 = cos[face[1]]; const float *v3 = cos[face[2]]; const float *v4 = cos[face[3]]; xmin = min_ff(v1[0], min_ff(v2[0], min_ff(v3[0], v4[0]))); xmax = max_ff(v1[0], max_ff(v2[0], max_ff(v3[0], v4[0]))); ymin = min_ff(v1[1], min_ff(v2[1], min_ff(v3[1], v4[1]))); ymax = max_ff(v1[1], max_ff(v2[1], max_ff(v3[1], v4[1]))); } /* not essential but may as will skip any faces outside the view */ if (!((xmax < 0.0f) || (ymax < 0.0f) || (xmin > 1.0f) || (ymin > 1.0f))) { CLAMP(xmin, 0.0f, 1.0f); CLAMP(ymin, 0.0f, 1.0f); CLAMP(xmax, 0.0f, 1.0f); CLAMP(ymax, 0.0f, 1.0f); { unsigned int xi_min = (unsigned int) ((xmin - layer->bounds.xmin) * layer->buckets_xy_scalar[0]); unsigned int xi_max = (unsigned int) ((xmax - layer->bounds.xmin) * layer->buckets_xy_scalar[0]); unsigned int yi_min = (unsigned int) ((ymin - layer->bounds.ymin) * layer->buckets_xy_scalar[1]); unsigned int yi_max = (unsigned int) ((ymax - layer->bounds.ymin) * layer->buckets_xy_scalar[1]); void *face_index_void = SET_UINT_IN_POINTER(face_index); unsigned int xi, yi; /* this should _almost_ never happen but since it can in extreme cases, * we have to clamp the values or we overrun the buffer and crash */ if (xi_min >= layer->buckets_x) xi_min = layer->buckets_x - 1; if (xi_max >= layer->buckets_x) xi_max = layer->buckets_x - 1; if (yi_min >= layer->buckets_y) yi_min = layer->buckets_y - 1; if (yi_max >= layer->buckets_y) yi_max = layer->buckets_y - 1; for (yi = yi_min; yi <= yi_max; yi++) { unsigned int bucket_index = (layer->buckets_x * yi) + xi_min; for (xi = xi_min; xi <= xi_max; xi++, bucket_index++) { // unsigned int bucket_index = (layer->buckets_x * yi) + xi; /* correct but do in outer loop */ BLI_assert(xi < layer->buckets_x); BLI_assert(yi < layer->buckets_y); BLI_assert(bucket_index < bucket_tot); /* check if the bucket intersects with the face */ /* note: there is a trade off here since checking box/tri intersections isn't * as optimal as it could be, but checking pixels against faces they will never intersect * with is likely the greater slowdown here - so check if the cell intersects the face */ if (layer_bucket_isect_test(layer, face_index, xi, yi, bucket_size_x, bucket_size_y, bucket_max_rad_squared)) { BLI_linklist_prepend_arena(&bucketstore[bucket_index], face_index_void, arena); bucketstore_tot[bucket_index]++; } } } } } } if (1) { /* now convert linknodes into arrays for faster per pixel access */ unsigned int **buckets_face = MEM_mallocN(bucket_tot * sizeof(*buckets_face), __func__); unsigned int bucket_index; for (bucket_index = 0; bucket_index < bucket_tot; bucket_index++) { if (bucketstore_tot[bucket_index]) { unsigned int *bucket = MEM_mallocN((bucketstore_tot[bucket_index] + 1) * sizeof(unsigned int), __func__); LinkNode *bucket_node; buckets_face[bucket_index] = bucket; for (bucket_node = bucketstore[bucket_index]; bucket_node; bucket_node = bucket_node->next) { *bucket = GET_UINT_FROM_POINTER(bucket_node->link); bucket++; } *bucket = TRI_TERMINATOR_ID; } else { buckets_face[bucket_index] = NULL; } } layer->buckets_face = buckets_face; } MEM_freeN(bucketstore); MEM_freeN(bucketstore_tot); } BLI_memarena_free(arena); }
/** * \param normal_proj Optional normal thats used to project the scanfill verts into 2d coords. * Pass this along if known since it saves time calculating the normal. * \param flipnormal Flip the normal (same as passing \a normal_proj negated) */ void BKE_displist_fill(ListBase *dispbase, ListBase *to, const float normal_proj[3], const bool flipnormal) { ScanFillContext sf_ctx; ScanFillVert *sf_vert, *sf_vert_new, *sf_vert_last; ScanFillFace *sf_tri; MemArena *sf_arena; DispList *dlnew = NULL, *dl; float *f1; int colnr = 0, charidx = 0, cont = 1, tot, a, *index, nextcol = 0; int totvert; const int scanfill_flag = BLI_SCANFILL_CALC_REMOVE_DOUBLES | BLI_SCANFILL_CALC_POLYS | BLI_SCANFILL_CALC_HOLES; if (dispbase == NULL) return; if (BLI_listbase_is_empty(dispbase)) return; sf_arena = BLI_memarena_new(BLI_SCANFILL_ARENA_SIZE, __func__); while (cont) { int dl_flag_accum = 0; cont = 0; totvert = 0; nextcol = 0; BLI_scanfill_begin_arena(&sf_ctx, sf_arena); dl = dispbase->first; while (dl) { if (dl->type == DL_POLY) { if (charidx < dl->charidx) cont = 1; else if (charidx == dl->charidx) { /* character with needed index */ if (colnr == dl->col) { sf_ctx.poly_nr++; /* make editverts and edges */ f1 = dl->verts; a = dl->nr; sf_vert = sf_vert_new = NULL; while (a--) { sf_vert_last = sf_vert; sf_vert = BLI_scanfill_vert_add(&sf_ctx, f1); totvert++; if (sf_vert_last == NULL) sf_vert_new = sf_vert; else { BLI_scanfill_edge_add(&sf_ctx, sf_vert_last, sf_vert); } f1 += 3; } if (sf_vert != NULL && sf_vert_new != NULL) { BLI_scanfill_edge_add(&sf_ctx, sf_vert, sf_vert_new); } } else if (colnr < dl->col) { /* got poly with next material at current char */ cont = 1; nextcol = 1; } } dl_flag_accum |= dl->flag; } dl = dl->next; } /* XXX (obedit && obedit->actcol) ? (obedit->actcol - 1) : 0)) { */ if (totvert && (tot = BLI_scanfill_calc_ex(&sf_ctx, scanfill_flag, normal_proj))) { if (tot) { dlnew = MEM_callocN(sizeof(DispList), "filldisplist"); dlnew->type = DL_INDEX3; dlnew->flag = (dl_flag_accum & (DL_BACK_CURVE | DL_FRONT_CURVE)); dlnew->col = colnr; dlnew->nr = totvert; dlnew->parts = tot; dlnew->index = MEM_mallocN(tot * 3 * sizeof(int), "dlindex"); dlnew->verts = MEM_mallocN(totvert * 3 * sizeof(float), "dlverts"); /* vert data */ f1 = dlnew->verts; totvert = 0; for (sf_vert = sf_ctx.fillvertbase.first; sf_vert; sf_vert = sf_vert->next) { copy_v3_v3(f1, sf_vert->co); f1 += 3; /* index number */ sf_vert->tmp.i = totvert; totvert++; } /* index data */ index = dlnew->index; for (sf_tri = sf_ctx.fillfacebase.first; sf_tri; sf_tri = sf_tri->next) { index[0] = sf_tri->v1->tmp.i; index[1] = sf_tri->v2->tmp.i; index[2] = sf_tri->v3->tmp.i; if (flipnormal) SWAP(int, index[0], index[2]); index += 3; } } BLI_addhead(to, dlnew); } BLI_scanfill_end_arena(&sf_ctx, sf_arena); if (nextcol) { /* stay at current char but fill polys with next material */ colnr++; } else { /* switch to next char and start filling from first material */ charidx++; colnr = 0; } } BLI_memarena_free(sf_arena); /* do not free polys, needed for wireframe display */ }
/* render call to fill in strands */ int zbuffer_strands_abuf(Render *re, RenderPart *pa, APixstrand *apixbuf, ListBase *apsmbase, unsigned int lay, int UNUSED(negzmask), float winmat[][4], int winx, int winy, int UNUSED(sample), float (*jit)[2], float clipcrop, int shadow, StrandShadeCache *cache) { ObjectRen *obr; ObjectInstanceRen *obi; ZSpan zspan; StrandRen *strand=0; StrandVert *svert; StrandBound *sbound; StrandPart spart; StrandSegment sseg; StrandSortSegment *sortsegments = NULL, *sortseg, *firstseg; MemArena *memarena; float z[4], bounds[4], obwinmat[4][4]; int a, b, c, i, totsegment, clip[4]; if (re->test_break(re->tbh)) return 0; if (re->totstrand == 0) return 0; /* setup StrandPart */ memset(&spart, 0, sizeof(spart)); spart.re= re; spart.rectx= pa->rectx; spart.recty= pa->recty; spart.apixbuf= apixbuf; spart.zspan= &zspan; spart.rectdaps= pa->rectdaps; spart.rectz= pa->rectz; spart.rectmask= pa->rectmask; spart.cache= cache; spart.shadow= shadow; spart.jit= jit; zbuf_alloc_span(&zspan, pa->rectx, pa->recty, clipcrop); /* needed for transform from hoco to zbuffer co */ zspan.zmulx= ((float)winx)/2.0f; zspan.zmuly= ((float)winy)/2.0f; zspan.zofsx= -pa->disprect.xmin; zspan.zofsy= -pa->disprect.ymin; /* to center the sample position */ if (!shadow) { zspan.zofsx -= 0.5f; zspan.zofsy -= 0.5f; } zspan.apsmbase= apsmbase; /* clipping setup */ bounds[0]= (2*pa->disprect.xmin - winx-1)/(float)winx; bounds[1]= (2*pa->disprect.xmax - winx+1)/(float)winx; bounds[2]= (2*pa->disprect.ymin - winy-1)/(float)winy; bounds[3]= (2*pa->disprect.ymax - winy+1)/(float)winy; memarena= BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, "strand sort arena"); firstseg= NULL; totsegment= 0; /* for all object instances */ for (obi=re->instancetable.first, i=0; obi; obi=obi->next, i++) { Material *ma; float widthx, widthy; obr= obi->obr; if (!obr->strandbuf || !(obr->strandbuf->lay & lay)) continue; /* compute matrix and try clipping whole object */ if (obi->flag & R_TRANSFORMED) mult_m4_m4m4(obwinmat, winmat, obi->mat); else copy_m4_m4(obwinmat, winmat); /* test if we should skip it */ ma = obr->strandbuf->ma; if (shadow && !(ma->mode & MA_SHADBUF)) continue; else if (!shadow && (ma->mode & MA_ONLYCAST)) continue; if (clip_render_object(obi->obr->boundbox, bounds, obwinmat)) continue; widthx= obr->strandbuf->maxwidth*obwinmat[0][0]; widthy= obr->strandbuf->maxwidth*obwinmat[1][1]; /* for each bounding box containing a number of strands */ sbound= obr->strandbuf->bound; for (c=0; c<obr->strandbuf->totbound; c++, sbound++) { if (clip_render_object(sbound->boundbox, bounds, obwinmat)) continue; /* for each strand in this bounding box */ for (a=sbound->start; a<sbound->end; a++) { strand= RE_findOrAddStrand(obr, a); svert= strand->vert; /* keep clipping and z depth for 4 control points */ clip[1]= strand_test_clip(obwinmat, &zspan, bounds, svert->co, &z[1], widthx, widthy); clip[2]= strand_test_clip(obwinmat, &zspan, bounds, (svert+1)->co, &z[2], widthx, widthy); clip[0]= clip[1]; z[0]= z[1]; for (b=0; b<strand->totvert-1; b++, svert++) { /* compute 4th point clipping and z depth */ if (b < strand->totvert-2) { clip[3]= strand_test_clip(obwinmat, &zspan, bounds, (svert+2)->co, &z[3], widthx, widthy); } else { clip[3]= clip[2]; z[3]= z[2]; } /* check clipping and add to sortsegments buffer */ if (!(clip[0] & clip[1] & clip[2] & clip[3])) { sortseg= BLI_memarena_alloc(memarena, sizeof(StrandSortSegment)); sortseg->obi= i; sortseg->strand= strand->index; sortseg->segment= b; sortseg->z= 0.5f*(z[1] + z[2]); sortseg->next= firstseg; firstseg= sortseg; totsegment++; } /* shift clipping and z depth */ clip[0]= clip[1]; z[0]= z[1]; clip[1]= clip[2]; z[1]= z[2]; clip[2]= clip[3]; z[2]= z[3]; } } } } if (!re->test_break(re->tbh)) { /* convert list to array and sort */ sortsegments= MEM_mallocN(sizeof(StrandSortSegment)*totsegment, "StrandSortSegment"); for (a=0, sortseg=firstseg; a<totsegment; a++, sortseg=sortseg->next) sortsegments[a]= *sortseg; qsort(sortsegments, totsegment, sizeof(StrandSortSegment), compare_strand_segment); } BLI_memarena_free(memarena); spart.totapixbuf= MEM_callocN(sizeof(int)*pa->rectx*pa->recty, "totapixbuf"); if (!re->test_break(re->tbh)) { /* render segments in sorted order */ sortseg= sortsegments; for (a=0; a<totsegment; a++, sortseg++) { if (re->test_break(re->tbh)) break; obi= &re->objectinstance[sortseg->obi]; obr= obi->obr; sseg.obi= obi; sseg.strand= RE_findOrAddStrand(obr, sortseg->strand); sseg.buffer= sseg.strand->buffer; sseg.sqadaptcos= sseg.buffer->adaptcos; sseg.sqadaptcos *= sseg.sqadaptcos; svert= sseg.strand->vert + sortseg->segment; sseg.v[0]= (sortseg->segment > 0)? (svert-1): svert; sseg.v[1]= svert; sseg.v[2]= svert+1; sseg.v[3]= (sortseg->segment < sseg.strand->totvert-2)? svert+2: svert+1; sseg.shaded= 0; spart.segment= &sseg; render_strand_segment(re, winmat, &spart, &zspan, 1, &sseg); } } if (sortsegments) MEM_freeN(sortsegments); MEM_freeN(spart.totapixbuf); zbuf_free_span(&zspan); return totsegment; }
void BKE_mball_polygonize(EvaluationContext *eval_ctx, Scene *scene, Object *ob, ListBase *dispbase) { MetaBall *mb; DispList *dl; unsigned int a; PROCESS process = {0}; mb = ob->data; process.thresh = mb->thresh; if (process.thresh < 0.001f) process.converge_res = 16; else if (process.thresh < 0.01f) process.converge_res = 8; else if (process.thresh < 0.1f) process.converge_res = 4; else process.converge_res = 2; if ((eval_ctx->mode != DAG_EVAL_RENDER) && (mb->flag == MB_UPDATE_NEVER)) return; if ((G.moving & (G_TRANSFORM_OBJ | G_TRANSFORM_EDIT)) && mb->flag == MB_UPDATE_FAST) return; if (eval_ctx->mode == DAG_EVAL_RENDER) { process.size = mb->rendersize; } else { process.size = mb->wiresize; if ((G.moving & (G_TRANSFORM_OBJ | G_TRANSFORM_EDIT)) && mb->flag == MB_UPDATE_HALFRES) { process.size *= 2.0f; } } process.delta = process.size * 0.001f; process.pgn_elements = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, "Metaball memarena"); /* initialize all mainb (MetaElems) */ init_meta(eval_ctx, &process, scene, ob); if (process.totelem > 0) { build_bvh_spatial(&process, &process.metaball_bvh, 0, process.totelem, &process.allbb); /* don't polygonize metaballs with too high resolution (base mball to small) * note: Eps was 0.0001f but this was giving problems for blood animation for durian, using 0.00001f */ if (ob->size[0] > 0.00001f * (process.allbb.max[0] - process.allbb.min[0]) || ob->size[1] > 0.00001f * (process.allbb.max[1] - process.allbb.min[1]) || ob->size[2] > 0.00001f * (process.allbb.max[2] - process.allbb.min[2])) { polygonize(&process); /* add resulting surface to displist */ if (process.curindex) { dl = MEM_callocN(sizeof(DispList), "mballdisp"); BLI_addtail(dispbase, dl); dl->type = DL_INDEX4; dl->nr = (int)process.curvertex; dl->parts = (int)process.curindex; dl->index = (int *)process.indices; for (a = 0; a < process.curvertex; a++) { normalize_v3(process.no[a]); } dl->verts = (float *)process.co; dl->nors = (float *)process.no; } } } freepolygonize(&process); }