void COM_execute(RenderData *rd, Scene *scene, bNodeTree *editingtree, int rendering, const ColorManagedViewSettings *viewSettings, const ColorManagedDisplaySettings *displaySettings, const char *viewName) { /* initialize mutex, TODO this mutex init is actually not thread safe and * should be done somewhere as part of blender startup, all the other * initializations can be done lazily */ if (is_compositorMutex_init == false) { BLI_mutex_init(&s_compositorMutex); is_compositorMutex_init = true; } BLI_mutex_lock(&s_compositorMutex); if (editingtree->test_break(editingtree->tbh)) { // during editing multiple calls to this method can be triggered. // make sure one the last one will be doing the work. BLI_mutex_unlock(&s_compositorMutex); return; } /* Make sure node tree has previews. * Don't create previews in advance, this is done when adding preview operations. * Reserved preview size is determined by render output for now. */ float aspect = rd->xsch > 0 ? (float)rd->ysch / (float)rd->xsch : 1.0f; BKE_node_preview_init_tree(editingtree, COM_PREVIEW_SIZE, (int)(COM_PREVIEW_SIZE * aspect), false); /* initialize workscheduler, will check if already done. TODO deinitialize somewhere */ bool use_opencl = (editingtree->flag & NTREE_COM_OPENCL) != 0; WorkScheduler::initialize(use_opencl, BKE_render_num_threads(rd)); /* set progress bar to 0% and status to init compositing */ editingtree->progress(editingtree->prh, 0.0); editingtree->stats_draw(editingtree->sdh, IFACE_("Compositing")); bool twopass = (editingtree->flag & NTREE_TWO_PASS) > 0 && !rendering; /* initialize execution system */ if (twopass) { ExecutionSystem *system = new ExecutionSystem(rd, scene, editingtree, rendering, twopass, viewSettings, displaySettings, viewName); system->execute(); delete system; if (editingtree->test_break(editingtree->tbh)) { // during editing multiple calls to this method can be triggered. // make sure one the last one will be doing the work. BLI_mutex_unlock(&s_compositorMutex); return; } } ExecutionSystem *system = new ExecutionSystem(rd, scene, editingtree, rendering, false, viewSettings, displaySettings, viewName); system->execute(); delete system; BLI_mutex_unlock(&s_compositorMutex); }
void BLI_task_pool_work_and_wait(TaskPool *pool) { TaskScheduler *scheduler = pool->scheduler; BLI_mutex_lock(&pool->num_mutex); while (pool->num != 0) { Task *task, *work_task = NULL; bool found_task = false; BLI_mutex_unlock(&pool->num_mutex); BLI_mutex_lock(&scheduler->queue_mutex); /* find task from this pool. if we get a task from another pool, * we can get into deadlock */ if (pool->num_threads == 0 || pool->currently_running_tasks < pool->num_threads) { for (task = scheduler->queue.first; task; task = task->next) { if (task->pool == pool) { work_task = task; found_task = true; BLI_remlink(&scheduler->queue, task); break; } } } BLI_mutex_unlock(&scheduler->queue_mutex); /* if found task, do it, otherwise wait until other tasks are done */ if (found_task) { /* run task */ atomic_add_z(&pool->currently_running_tasks, 1); work_task->run(pool, work_task->taskdata, 0); /* delete task */ if (work_task->free_taskdata) MEM_freeN(work_task->taskdata); MEM_freeN(work_task); /* notify pool task was done */ task_pool_num_decrease(pool, 1); } BLI_mutex_lock(&pool->num_mutex); if (pool->num == 0) break; if (!found_task) BLI_condition_wait(&pool->num_cond, &pool->num_mutex); } BLI_mutex_unlock(&pool->num_mutex); }
static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task) { bool found_task = false; BLI_mutex_lock(&scheduler->queue_mutex); while (!scheduler->queue.first && !scheduler->do_exit) BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex); do { Task *current_task; /* Assuming we can only have a void queue in 'exit' case here seems logical (we should only be here after * our worker thread has been woken up from a condition_wait(), which only happens after a new task was * added to the queue), but it is wrong. * Waiting on condition may wake up the thread even if condition is not signaled (spurious wake-ups), and some * race condition may also empty the queue **after** condition has been signaled, but **before** awoken thread * reaches this point... * See http://stackoverflow.com/questions/8594591 * * So we only abort here if do_exit is set. */ if (scheduler->do_exit) { BLI_mutex_unlock(&scheduler->queue_mutex); return false; } for (current_task = scheduler->queue.first; current_task != NULL; current_task = current_task->next) { TaskPool *pool = current_task->pool; if (scheduler->background_thread_only && !pool->run_in_background) { continue; } if (atomic_add_and_fetch_z(&pool->currently_running_tasks, 1) <= pool->num_threads || pool->num_threads == 0) { *task = current_task; found_task = true; BLI_remlink(&scheduler->queue, *task); break; } else { atomic_sub_and_fetch_z(&pool->currently_running_tasks, 1); } } if (!found_task) BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex); } while (!found_task); BLI_mutex_unlock(&scheduler->queue_mutex); return true; }
/* only this runs inside thread */ static void preview_startjob(void *data, short *stop, short *do_update, float *progress) { PreviewJob *pj = data; PreviewJobAudio *previewjb; BLI_mutex_lock(pj->mutex); previewjb = pj->previews.first; BLI_mutex_unlock(pj->mutex); while (previewjb) { PreviewJobAudio *preview_next; bSound *sound = previewjb->sound; BKE_sound_read_waveform(sound, stop); if (*stop || G.is_break) { BLI_mutex_lock(pj->mutex); previewjb = previewjb->next; BLI_mutex_unlock(pj->mutex); while (previewjb) { sound = previewjb->sound; /* make sure we cleanup the loading flag! */ BLI_spin_lock(sound->spinlock); sound->tags &= ~SOUND_TAGS_WAVEFORM_LOADING; BLI_spin_unlock(sound->spinlock); BLI_mutex_lock(pj->mutex); previewjb = previewjb->next; BLI_mutex_unlock(pj->mutex); } BLI_mutex_lock(pj->mutex); BLI_freelistN(&pj->previews); pj->total = 0; pj->processed = 0; BLI_mutex_unlock(pj->mutex); break; } BLI_mutex_lock(pj->mutex); preview_next = previewjb->next; BLI_freelinkN(&pj->previews, previewjb); previewjb = preview_next; pj->processed++; *progress = (pj->total > 0) ? (float)pj->processed / (float)pj->total : 1.0f; *do_update = true; BLI_mutex_unlock(pj->mutex); } }
static void task_scheduler_clear(TaskScheduler *scheduler, TaskPool *pool) { Task *task, *nexttask; size_t done = 0; BLI_mutex_lock(&scheduler->queue_mutex); /* free all tasks from this pool from the queue */ for (task = scheduler->queue.first; task; task = nexttask) { nexttask = task->next; if (task->pool == pool) { if (task->free_taskdata) MEM_freeN(task->taskdata); BLI_freelinkN(&scheduler->queue, task); done++; } } BLI_mutex_unlock(&scheduler->queue_mutex); /* notify done */ task_pool_num_decrease(pool, done); }
void KX_BlenderSceneConverter::MergeAsyncLoads() { vector<KX_Scene *> *merge_scenes; vector<KX_LibLoadStatus *>::iterator mit; vector<KX_Scene *>::iterator sit; BLI_mutex_lock(&m_threadinfo->m_mutex); for (mit = m_mergequeue.begin(); mit != m_mergequeue.end(); ++mit) { merge_scenes = (vector<KX_Scene *> *)(*mit)->GetData(); for (sit=merge_scenes->begin(); sit!=merge_scenes->end(); ++sit) { (*mit)->GetMergeScene()->MergeScene(*sit); delete (*sit); } delete merge_scenes; (*mit)->SetData(NULL); (*mit)->Finish(); } m_mergequeue.clear(); BLI_mutex_unlock(&m_threadinfo->m_mutex); }
static void UNUSED_FUNCTION(COM_freeCaches)() { if (is_compositorMutex_init) { BLI_mutex_lock(&s_compositorMutex); intern_freeCompositorCaches(); BLI_mutex_unlock(&s_compositorMutex); } }
static void task_pool_num_increase(TaskPool *pool) { BLI_mutex_lock(&pool->num_mutex); pool->num++; BLI_condition_notify_all(&pool->num_cond); BLI_mutex_unlock(&pool->num_mutex); }
void COM_deinitialize() { if (is_compositorMutex_init) { BLI_mutex_lock(&s_compositorMutex); WorkScheduler::deinitialize(); is_compositorMutex_init = false; BLI_mutex_unlock(&s_compositorMutex); BLI_mutex_end(&s_compositorMutex); } }
static void do_moviecache_put(MovieCache *cache, void *userkey, ImBuf *ibuf, int need_lock) { MovieCacheKey *key; MovieCacheItem *item; if (!limitor) IMB_moviecache_init(); IMB_refImBuf(ibuf); key = BLI_mempool_alloc(cache->keys_pool); key->cache_owner = cache; key->userkey = BLI_mempool_alloc(cache->userkeys_pool); memcpy(key->userkey, userkey, cache->keysize); item = BLI_mempool_alloc(cache->items_pool); PRINT("%s: cache '%s' put %p, item %p\n", __func__, cache-> name, ibuf, item); item->ibuf = ibuf; item->cache_owner = cache; item->c_handle = NULL; item->priority_data = NULL; if (cache->getprioritydatafp) { item->priority_data = cache->getprioritydatafp(userkey); } BLI_ghash_remove(cache->hash, key, moviecache_keyfree, moviecache_valfree); BLI_ghash_insert(cache->hash, key, item); if (cache->last_userkey) { memcpy(cache->last_userkey, userkey, cache->keysize); } if (need_lock) BLI_mutex_lock(&limitor_lock); item->c_handle = MEM_CacheLimiter_insert(limitor, item); MEM_CacheLimiter_ref(item->c_handle); MEM_CacheLimiter_enforce_limits(limitor); MEM_CacheLimiter_unref(item->c_handle); if (need_lock) BLI_mutex_unlock(&limitor_lock); /* cache limiter can't remove unused keys which points to destoryed values */ check_unused_keys(cache); if (cache->points) { MEM_freeN(cache->points); cache->points = NULL; } }
void COM_deinitialize() { if (is_compositorMutex_init) { BLI_mutex_lock(&s_compositorMutex); intern_freeCompositorCaches(); WorkScheduler::deinitialize(); is_compositorMutex_init = FALSE; BLI_mutex_unlock(&s_compositorMutex); BLI_mutex_end(&s_compositorMutex); } }
static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task) { BLI_mutex_lock(&scheduler->queue_mutex); while (!scheduler->queue.first && !scheduler->do_exit) BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex); if (!scheduler->queue.first) { BLI_mutex_unlock(&scheduler->queue_mutex); BLI_assert(scheduler->do_exit); return false; } *task = scheduler->queue.first; BLI_remlink(&scheduler->queue, *task); BLI_mutex_unlock(&scheduler->queue_mutex); return true; }
static void eevee_lightbake_delete_resources(EEVEE_LightBake *lbake) { if (!lbake->resource_only) { BLI_mutex_lock(lbake->mutex); } if (lbake->gl_context) { DRW_opengl_render_context_enable(lbake->gl_context); DRW_gawain_render_context_enable(lbake->gpu_context); } else if (!lbake->resource_only) { DRW_opengl_context_enable(); } /* XXX Free the resources contained in the viewlayer data * to be able to free the context before deleting the depsgraph. */ if (lbake->sldata) { EEVEE_view_layer_data_free(lbake->sldata); } DRW_TEXTURE_FREE_SAFE(lbake->rt_depth); DRW_TEXTURE_FREE_SAFE(lbake->rt_color); DRW_TEXTURE_FREE_SAFE(lbake->grid_prev); GPU_FRAMEBUFFER_FREE_SAFE(lbake->store_fb); for (int i = 0; i < 6; ++i) { GPU_FRAMEBUFFER_FREE_SAFE(lbake->rt_fb[i]); } if (lbake->gpu_context) { DRW_gawain_render_context_disable(lbake->gpu_context); DRW_gawain_render_context_enable(lbake->gpu_context); GPU_context_discard(lbake->gpu_context); } if (lbake->gl_context && lbake->own_resources) { /* Delete the baking context. */ DRW_opengl_render_context_disable(lbake->gl_context); WM_opengl_context_dispose(lbake->gl_context); lbake->gpu_context = NULL; lbake->gl_context = NULL; } else if (lbake->gl_context) { DRW_opengl_render_context_disable(lbake->gl_context); } else if (!lbake->resource_only) { DRW_opengl_context_disable(); } if (!lbake->resource_only) { BLI_mutex_unlock(lbake->mutex); } }
static void task_pool_num_decrease(TaskPool *pool, size_t done) { BLI_mutex_lock(&pool->num_mutex); BLI_assert(pool->num >= done); pool->num -= done; if (pool->num == 0) BLI_condition_notify_all(&pool->num_cond); BLI_mutex_unlock(&pool->num_mutex); }
static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task) { bool found_task = false; BLI_mutex_lock(&scheduler->queue_mutex); while (!scheduler->queue.first && !scheduler->do_exit) BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex); do { Task *current_task; if (!scheduler->queue.first) { BLI_mutex_unlock(&scheduler->queue_mutex); BLI_assert(scheduler->do_exit); return false; } for (current_task = scheduler->queue.first; current_task != NULL; current_task = current_task->next) { TaskPool *pool = current_task->pool; if (pool->num_threads == 0 || pool->currently_running_tasks < pool->num_threads) { *task = current_task; found_task = true; atomic_add_z(&pool->currently_running_tasks, 1); BLI_remlink(&scheduler->queue, *task); break; } } if (!found_task) BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex); } while (!found_task); BLI_mutex_unlock(&scheduler->queue_mutex); return true; }
void BLI_task_pool_cancel(TaskPool *pool) { pool->do_cancel = true; task_scheduler_clear(pool->scheduler, pool); /* wait until all entries are cleared */ BLI_mutex_lock(&pool->num_mutex); while (pool->num) BLI_condition_wait(&pool->num_cond, &pool->num_mutex); BLI_mutex_unlock(&pool->num_mutex); pool->do_cancel = false; }
void BLI_task_scheduler_free(TaskScheduler *scheduler) { Task *task; /* stop all waiting threads */ BLI_mutex_lock(&scheduler->queue_mutex); scheduler->do_exit = true; BLI_condition_notify_all(&scheduler->queue_cond); BLI_mutex_unlock(&scheduler->queue_mutex); /* delete threads */ if (scheduler->threads) { int i; for (i = 0; i < scheduler->num_threads; i++) { if (pthread_join(scheduler->threads[i], NULL) != 0) fprintf(stderr, "TaskScheduler failed to join thread %d/%d\n", i, scheduler->num_threads); } MEM_freeN(scheduler->threads); } /* Delete task thread data */ if (scheduler->task_threads) { MEM_freeN(scheduler->task_threads); } /* Delete task memory pool */ if (scheduler->task_mempool) { for (int i = 0; i <= scheduler->num_threads; ++i) { for (int j = 0; j < scheduler->task_mempool[i].num_tasks; ++j) { MEM_freeN(scheduler->task_mempool[i].tasks[j]); } } MEM_freeN(scheduler->task_mempool); } /* delete leftover tasks */ for (task = scheduler->queue.first; task; task = task->next) { task_data_free(task, 0); } BLI_freelistN(&scheduler->queue); /* delete mutex/condition */ BLI_mutex_end(&scheduler->queue_mutex); BLI_condition_end(&scheduler->queue_cond); MEM_freeN(scheduler); }
ListBase BKE_collection_object_cache_get(Collection *collection) { if (!(collection->flag & COLLECTION_HAS_OBJECT_CACHE)) { static ThreadMutex cache_lock = BLI_MUTEX_INITIALIZER; BLI_mutex_lock(&cache_lock); if (!(collection->flag & COLLECTION_HAS_OBJECT_CACHE)) { collection_object_cache_fill(&collection->object_cache, collection, 0); collection->flag |= COLLECTION_HAS_OBJECT_CACHE; } BLI_mutex_unlock(&cache_lock); } return collection->object_cache; }
static void task_pool_num_decrease(TaskPool *pool, size_t done) { BLI_mutex_lock(&pool->num_mutex); BLI_assert(pool->num >= done); pool->num -= done; atomic_sub_z(&pool->currently_running_tasks, done); pool->done += done; if (pool->num == 0) BLI_condition_notify_all(&pool->num_cond); BLI_mutex_unlock(&pool->num_mutex); }
static void task_scheduler_push(TaskScheduler *scheduler, Task *task, TaskPriority priority) { task_pool_num_increase(task->pool); /* add task to queue */ BLI_mutex_lock(&scheduler->queue_mutex); if (priority == TASK_PRIORITY_HIGH) BLI_addhead(&scheduler->queue, task); else BLI_addtail(&scheduler->queue, task); BLI_condition_notify_one(&scheduler->queue_cond); BLI_mutex_unlock(&scheduler->queue_mutex); }
void IMB_tiles_to_rect(ImBuf *ibuf) { ImBuf *mipbuf; ImGlobalTile *gtile; unsigned int *to, *from; int a, tx, ty, y, w, h; for(a=0; a<ibuf->miptot; a++) { mipbuf= IMB_getmipmap(ibuf, a); /* don't call imb_addrectImBuf, it frees all mipmaps */ if(!mipbuf->rect) { if((mipbuf->rect = MEM_mapallocN(ibuf->x*ibuf->y*sizeof(unsigned int), "imb_addrectImBuf"))) { mipbuf->mall |= IB_rect; mipbuf->flags |= IB_rect; } else break; } for(ty=0; ty<mipbuf->ytiles; ty++) { for(tx=0; tx<mipbuf->xtiles; tx++) { /* acquire tile through cache, this assumes cache is initialized, which it is always now but it's a weak assumption ... */ gtile= imb_global_cache_get_tile(mipbuf, tx, ty, NULL); /* setup pointers */ from= mipbuf->tiles[mipbuf->xtiles*ty + tx]; to= mipbuf->rect + mipbuf->x*ty*mipbuf->tiley + tx*mipbuf->tilex; /* exception in tile width/height for tiles at end of image */ w= (tx == mipbuf->xtiles-1)? mipbuf->x - tx*mipbuf->tilex: mipbuf->tilex; h= (ty == mipbuf->ytiles-1)? mipbuf->y - ty*mipbuf->tiley: mipbuf->tiley; for(y=0; y<h; y++) { memcpy(to, from, sizeof(unsigned int)*w); from += mipbuf->tilex; to += mipbuf->x; } /* decrease refcount for tile again */ BLI_mutex_lock(&GLOBAL_CACHE.mutex); gtile->refcount--; BLI_mutex_unlock(&GLOBAL_CACHE.mutex); } } } }
void BKE_cachefile_ensure_handle(const Main *bmain, CacheFile *cache_file) { BLI_spin_lock(&spin); if (cache_file->handle_mutex == NULL) { cache_file->handle_mutex = BLI_mutex_alloc(); } BLI_spin_unlock(&spin); BLI_mutex_lock(cache_file->handle_mutex); if (cache_file->handle == NULL) { BKE_cachefile_reload(bmain, cache_file); } BLI_mutex_unlock(cache_file->handle_mutex); }
bool IMB_moviecache_put_if_possible(MovieCache *cache, void *userkey, ImBuf *ibuf) { size_t mem_in_use, mem_limit, elem_size; bool result = false; elem_size = IMB_get_size_in_memory(ibuf); mem_limit = MEM_CacheLimiter_get_maximum(); BLI_mutex_lock(&limitor_lock); mem_in_use = MEM_CacheLimiter_get_memory_in_use(limitor); if (mem_in_use + elem_size <= mem_limit) { do_moviecache_put(cache, userkey, ibuf, FALSE); result = TRUE; } BLI_mutex_unlock(&limitor_lock); return result; }
static void view_layer_bases_hash_create(ViewLayer *view_layer) { static ThreadMutex hash_lock = BLI_MUTEX_INITIALIZER; if (view_layer->object_bases_hash == NULL) { BLI_mutex_lock(&hash_lock); if (view_layer->object_bases_hash == NULL) { view_layer->object_bases_hash = BLI_ghash_new( BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, __func__); for (Base *base = view_layer->object_bases.first; base; base = base->next) { if (base->object) { BLI_ghash_insert(view_layer->object_bases_hash, base->object, base); } } } BLI_mutex_unlock(&hash_lock); } }
void sequencer_preview_add_sound(const bContext *C, Sequence *seq) { /* first, get the preview job, if it exists */ wmJob *wm_job; PreviewJob *pj; ScrArea *sa = CTX_wm_area(C); PreviewJobAudio *audiojob = MEM_callocN(sizeof(PreviewJobAudio), "preview_audio"); wm_job = WM_jobs_get(CTX_wm_manager(C), CTX_wm_window(C), sa, "Strip Previews", WM_JOB_PROGRESS, WM_JOB_TYPE_SEQ_BUILD_PREVIEW); pj = WM_jobs_customdata_get(wm_job); if (!pj) { pj = MEM_callocN(sizeof(PreviewJob), "preview rebuild job"); pj->mutex = BLI_mutex_alloc(); pj->scene = CTX_data_scene(C); WM_jobs_customdata_set(wm_job, pj, free_preview_job); WM_jobs_timer(wm_job, 0.1, NC_SCENE | ND_SEQUENCER, NC_SCENE | ND_SEQUENCER); WM_jobs_callbacks(wm_job, preview_startjob, NULL, NULL, preview_endjob); } /* attempt to lock mutex of job here */ audiojob->sound = seq->sound; BLI_mutex_lock(pj->mutex); BLI_addtail(&pj->previews, audiojob); pj->total++; BLI_mutex_unlock(pj->mutex); if (!WM_jobs_is_running(wm_job)) { G.is_break = false; WM_jobs_start(CTX_wm_manager(C), wm_job); } ED_area_tag_redraw(sa); }
ImBuf *IMB_moviecache_get(MovieCache *cache, void *userkey) { MovieCacheKey key; MovieCacheItem *item; key.cache_owner = cache; key.userkey = userkey; item = (MovieCacheItem *)BLI_ghash_lookup(cache->hash, &key); if (item) { if (item->ibuf) { BLI_mutex_lock(&limitor_lock); MEM_CacheLimiter_touch(item->c_handle); BLI_mutex_unlock(&limitor_lock); IMB_refImBuf(item->ibuf); return item->ibuf; } } return NULL; }
/* external free */ void imb_tile_cache_tile_free(ImBuf *ibuf, int tx, int ty) { ImGlobalTile *gtile, lookuptile; BLI_mutex_lock(&GLOBAL_CACHE.mutex); lookuptile.ibuf = ibuf; lookuptile.tx = tx; lookuptile.ty = ty; gtile= BLI_ghash_lookup(GLOBAL_CACHE.tilehash, &lookuptile); if(gtile) { /* in case another thread is loading this */ while(gtile->loading) ; BLI_ghash_remove(GLOBAL_CACHE.tilehash, gtile, NULL, NULL); BLI_remlink(&GLOBAL_CACHE.tiles, gtile); BLI_addtail(&GLOBAL_CACHE.unused, gtile); } BLI_mutex_unlock(&GLOBAL_CACHE.mutex); }
void BKE_object_handle_data_update(EvaluationContext *eval_ctx, Scene *scene, Object *ob) { ID *data_id = (ID *)ob->data; AnimData *adt = BKE_animdata_from_id(data_id); Key *key; float ctime = BKE_scene_frame_get(scene); if (G.debug & G_DEBUG_DEPSGRAPH) printf("recalcdata %s\n", ob->id.name + 2); /* TODO(sergey): Only used by legacy depsgraph. */ if (adt) { /* evaluate drivers - datalevel */ /* XXX: for mesh types, should we push this to derivedmesh instead? */ BKE_animsys_evaluate_animdata(scene, data_id, adt, ctime, ADT_RECALC_DRIVERS); } /* TODO(sergey): Only used by legacy depsgraph. */ key = BKE_key_from_object(ob); if (key && key->block.first) { if (!(ob->shapeflag & OB_SHAPE_LOCK)) BKE_animsys_evaluate_animdata(scene, &key->id, key->adt, ctime, ADT_RECALC_DRIVERS); } /* includes all keys and modifiers */ switch (ob->type) { case OB_MESH: { BMEditMesh *em = (ob == scene->obedit) ? BKE_editmesh_from_object(ob) : NULL; uint64_t data_mask = scene->customdata_mask | CD_MASK_BAREMESH; #ifdef WITH_FREESTYLE /* make sure Freestyle edge/face marks appear in DM for render (see T40315) */ if (eval_ctx->mode != DAG_EVAL_VIEWPORT) { data_mask |= CD_MASK_FREESTYLE_EDGE | CD_MASK_FREESTYLE_FACE; } #endif if (em) { makeDerivedMesh(scene, ob, em, data_mask, false); /* was CD_MASK_BAREMESH */ } else { makeDerivedMesh(scene, ob, NULL, data_mask, false); } break; } case OB_ARMATURE: if (ob->id.lib && ob->proxy_from) { if (BKE_pose_copy_result(ob->pose, ob->proxy_from->pose) == false) { printf("Proxy copy error, lib Object: %s proxy Object: %s\n", ob->id.name + 2, ob->proxy_from->id.name + 2); } } else { BKE_pose_where_is(scene, ob); } break; case OB_MBALL: BKE_displist_make_mball(eval_ctx, scene, ob); break; case OB_CURVE: case OB_SURF: case OB_FONT: BKE_displist_make_curveTypes(scene, ob, 0); break; case OB_LATTICE: BKE_lattice_modifiers_calc(scene, ob); break; case OB_EMPTY: if (ob->empty_drawtype == OB_EMPTY_IMAGE && ob->data) if (BKE_image_is_animated(ob->data)) BKE_image_user_check_frame_calc(ob->iuser, (int)ctime, 0); break; } /* related materials */ /* XXX: without depsgraph tagging, this will always need to be run, which will be slow! * However, not doing anything (or trying to hack around this lack) is not an option * anymore, especially due to Cycles [#31834] */ if (ob->totcol) { int a; if (ob->totcol != 0) { BLI_mutex_lock(&material_lock); for (a = 1; a <= ob->totcol; a++) { Material *ma = give_current_material(ob, a); if (ma) { /* recursively update drivers for this material */ material_drivers_update(scene, ma, ctime); } } BLI_mutex_unlock(&material_lock); } } else if (ob->type == OB_LAMP) lamp_drivers_update(scene, ob->data, ctime); /* particles */ if (ob != scene->obedit && ob->particlesystem.first) { ParticleSystem *tpsys, *psys; DerivedMesh *dm; ob->transflag &= ~OB_DUPLIPARTS; psys = ob->particlesystem.first; while (psys) { /* ensure this update always happens even if psys is disabled */ if (psys->recalc & PSYS_RECALC_TYPE) { psys_changed_type(ob, psys); } if (psys_check_enabled(ob, psys)) { /* check use of dupli objects here */ if (psys->part && (psys->part->draw_as == PART_DRAW_REND || eval_ctx->mode == DAG_EVAL_RENDER) && ((psys->part->ren_as == PART_DRAW_OB && psys->part->dup_ob) || (psys->part->ren_as == PART_DRAW_GR && psys->part->dup_group))) { ob->transflag |= OB_DUPLIPARTS; } particle_system_update(scene, ob, psys); psys = psys->next; } else if (psys->flag & PSYS_DELETE) { tpsys = psys->next; BLI_remlink(&ob->particlesystem, psys); psys_free(ob, psys); psys = tpsys; } else psys = psys->next; } if (eval_ctx->mode == DAG_EVAL_RENDER && ob->transflag & OB_DUPLIPARTS) { /* this is to make sure we get render level duplis in groups: * the derivedmesh must be created before init_render_mesh, * since object_duplilist does dupliparticles before that */ CustomDataMask data_mask = CD_MASK_BAREMESH | CD_MASK_MFACE | CD_MASK_MTFACE | CD_MASK_MCOL; dm = mesh_create_derived_render(scene, ob, data_mask); dm->release(dm); for (psys = ob->particlesystem.first; psys; psys = psys->next) psys_get_modifier(ob, psys)->flag &= ~eParticleSystemFlag_psys_updated; } } /* quick cache removed */ }
void NodeOperation::unlockMutex() { BLI_mutex_unlock(&this->m_mutex); }
static ImGlobalTile *imb_global_cache_get_tile(ImBuf *ibuf, int tx, int ty, ImGlobalTile *replacetile) { ImGlobalTile *gtile, lookuptile; BLI_mutex_lock(&GLOBAL_CACHE.mutex); if(replacetile) replacetile->refcount--; /* find tile in global cache */ lookuptile.ibuf = ibuf; lookuptile.tx = tx; lookuptile.ty = ty; gtile= BLI_ghash_lookup(GLOBAL_CACHE.tilehash, &lookuptile); if(gtile) { /* found tile. however it may be in the process of being loaded by another thread, in that case we do stupid busy loop waiting for the other thread to load the tile */ gtile->refcount++; BLI_mutex_unlock(&GLOBAL_CACHE.mutex); while(gtile->loading) ; } else { /* not found, let's load it from disk */ /* first check if we hit the memory limit */ if(GLOBAL_CACHE.maxmem && GLOBAL_CACHE.totmem > GLOBAL_CACHE.maxmem) { /* find an existing tile to unload */ for(gtile=GLOBAL_CACHE.tiles.last; gtile; gtile=gtile->prev) if(gtile->refcount == 0 && gtile->loading == 0) break; } if(gtile) { /* found a tile to unload */ imb_global_cache_tile_unload(gtile); BLI_ghash_remove(GLOBAL_CACHE.tilehash, gtile, NULL, NULL); BLI_remlink(&GLOBAL_CACHE.tiles, gtile); } else { /* allocate a new tile or reuse unused */ if(GLOBAL_CACHE.unused.first) { gtile= GLOBAL_CACHE.unused.first; BLI_remlink(&GLOBAL_CACHE.unused, gtile); } else gtile= BLI_memarena_alloc(GLOBAL_CACHE.memarena, sizeof(ImGlobalTile)); } /* setup new tile */ gtile->ibuf= ibuf; gtile->tx= tx; gtile->ty= ty; gtile->refcount= 1; gtile->loading= 1; BLI_ghash_insert(GLOBAL_CACHE.tilehash, gtile, gtile); BLI_addhead(&GLOBAL_CACHE.tiles, gtile); /* mark as being loaded and unlock to allow other threads to load too */ GLOBAL_CACHE.totmem += sizeof(unsigned int)*ibuf->tilex*ibuf->tiley; BLI_mutex_unlock(&GLOBAL_CACHE.mutex); /* load from disk */ imb_global_cache_tile_load(gtile); /* mark as done loading */ gtile->loading= 0; } return gtile; }