static void print_threads_statistics(ThreadedObjectUpdateState *state) { int i, tot_thread; if ((G.debug & G_DEBUG) == 0) { return; } #ifdef DETAILED_ANALYSIS_OUTPUT if (state->has_updated_objects) { tot_thread = BLI_system_thread_count(); fprintf(stderr, "objects update base time %f\n", state->base_time); for (i = 0; i < tot_thread; i++) { StatisicsEntry *entry; for (entry = state->statistics[i].first; entry; entry = entry->next) { fprintf(stderr, "thread %d object %s start_time %f duration %f\n", i, entry->object->id.name + 2, entry->start_time, entry->duration); } BLI_freelistN(&state->statistics[i]); } } #else tot_thread = BLI_system_thread_count(); for (i = 0; i < tot_thread; i++) { int total_objects = 0; double total_time = 0.0; StatisicsEntry *entry; if (state->has_updated_objects) { /* Don't pollute output if no objects were updated. */ for (entry = state->statistics[i].first; entry; entry = entry->next) { total_objects++; total_time += entry->duration; } printf("Thread %d: total %d objects in %f sec.\n", i, total_objects, total_time); for (entry = state->statistics[i].first; entry; entry = entry->next) { printf(" %s in %f sec\n", entry->object->id.name + 2, entry->duration); } } BLI_freelistN(&state->statistics[i]); } #endif }
// open video file void VideoFFmpeg::openFile (char *filename) { if (openStream(filename, NULL, NULL) != 0) return; if (m_codecCtx->gop_size) m_preseek = (m_codecCtx->gop_size < 25) ? m_codecCtx->gop_size+1 : 25; else if (m_codecCtx->has_b_frames) m_preseek = 25; // should determine gopsize else m_preseek = 0; // get video time range m_range[0] = 0.0; m_range[1] = (double)m_formatCtx->duration / AV_TIME_BASE; // open base class VideoBase::openFile(filename); if ( // ffmpeg reports that http source are actually non stream // but it is really not desirable to seek on http file, so force streaming. // It would be good to find this information from the context but there are no simple indication !strncmp(filename, "http://", 7) || !strncmp(filename, "rtsp://", 7) || (m_formatCtx->pb && !m_formatCtx->pb->seekable) ) { // the file is in fact a streaming source, treat as cam to prevent seeking m_isFile = false; // but it's not handled exactly like a camera. m_isStreaming = true; // for streaming it is important to do non blocking read m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK; } if (m_isImage) { // the file is to be treated as an image, i.e. load the first frame only m_isFile = false; // in case of reload, the filename is taken from m_imageName, no need to change it if (m_imageName.Ptr() != filename) m_imageName = filename; m_preseek = 0; m_avail = false; play(); } // check if we should do multi-threading? if (!m_isImage && BLI_system_thread_count() > 1) { // never thread image: there are no frame to read ahead // no need to thread if the system has a single core m_isThreaded = true; } }
static void start_prefetch_threads(MovieClip *clip, int start_frame, int current_frame, int end_frame, short render_size, short render_flag, short *stop, short *do_update, float *progress) { ListBase threads; PrefetchQueue queue; PrefetchThread *handles; int tot_thread = BLI_system_thread_count(); int i; /* reserve one thread for the interface */ if (tot_thread > 1) tot_thread--; /* initialize queue */ BLI_spin_init(&queue.spin); queue.current_frame = current_frame; queue.initial_frame = current_frame; queue.start_frame = start_frame; queue.end_frame = end_frame; queue.render_size = render_size; queue.render_flag = render_flag; queue.direction = 1; queue.stop = stop; queue.do_update = do_update; queue.progress = progress; /* fill in thread handles */ handles = MEM_callocN(sizeof(PrefetchThread) * tot_thread, "prefetch threaded handles"); if (tot_thread > 1) BLI_init_threads(&threads, do_prefetch_thread, tot_thread); for (i = 0; i < tot_thread; i++) { PrefetchThread *handle = &handles[i]; handle->clip = clip; handle->queue = &queue; if (tot_thread > 1) BLI_insert_thread(&threads, handle); } /* run the threads */ if (tot_thread > 1) BLI_end_threads(&threads); else do_prefetch_thread(handles); MEM_freeN(handles); }
TaskScheduler *BLI_task_scheduler_get(void) { if (task_scheduler == NULL) { int tot_thread = BLI_system_thread_count(); /* Do a lazy initialization, so it happens after * command line arguments parsing */ task_scheduler = BLI_task_scheduler_create(tot_thread); } return task_scheduler; }
/* simple case for movies -- handle frame-by-frame, do threading within single frame */ static void do_movie_proxy(void *pjv, int *UNUSED(build_sizes), int UNUSED(build_count), int *build_undistort_sizes, int build_undistort_count, short *stop, short *do_update, float *progress) { ProxyJob *pj = pjv; Scene *scene = pj->scene; MovieClip *clip = pj->clip; struct MovieDistortion *distortion = NULL; int cfra, sfra = SFRA, efra = EFRA; if (pj->index_context) IMB_anim_index_rebuild(pj->index_context, stop, do_update, progress); if (!build_undistort_count) { if (*stop) pj->stop = 1; return; } else { sfra = 1; efra = clip->len; } if (build_undistort_count) { int threads = BLI_system_thread_count(); int width, height; BKE_movieclip_get_size(clip, NULL, &width, &height); distortion = BKE_tracking_distortion_new(&clip->tracking, width, height); BKE_tracking_distortion_set_threads(distortion, threads); } for (cfra = sfra; cfra <= efra; cfra++) { BKE_movieclip_build_proxy_frame(clip, pj->clip_flag, distortion, cfra, build_undistort_sizes, build_undistort_count, 1); if (*stop || G.is_break) break; *do_update = true; *progress = ((float) cfra - sfra) / (efra - sfra); } if (distortion) BKE_tracking_distortion_free(distortion); if (*stop) pj->stop = 1; }
TaskScheduler *BLI_task_scheduler_create(int num_threads) { TaskScheduler *scheduler = MEM_callocN(sizeof(TaskScheduler), "TaskScheduler"); /* multiple places can use this task scheduler, sharing the same * threads, so we keep track of the number of users. */ scheduler->do_exit = false; BLI_listbase_clear(&scheduler->queue); BLI_mutex_init(&scheduler->queue_mutex); BLI_condition_init(&scheduler->queue_cond); if (num_threads == 0) { /* automatic number of threads will be main thread + num cores */ num_threads = BLI_system_thread_count(); } /* main thread will also work, so we count it too */ num_threads -= 1; /* Add background-only thread if needed. */ if (num_threads == 0) { scheduler->background_thread_only = true; num_threads = 1; } /* launch threads that will be waiting for work */ if (num_threads > 0) { int i; scheduler->num_threads = num_threads; scheduler->threads = MEM_callocN(sizeof(pthread_t) * num_threads, "TaskScheduler threads"); scheduler->task_threads = MEM_callocN(sizeof(TaskThread) * num_threads, "TaskScheduler task threads"); for (i = 0; i < num_threads; i++) { TaskThread *thread = &scheduler->task_threads[i]; thread->scheduler = scheduler; thread->id = i + 1; if (pthread_create(&scheduler->threads[i], NULL, task_scheduler_thread_run, thread) != 0) { fprintf(stderr, "TaskScheduler failed to launch thread %d/%d\n", i, num_threads); } } scheduler->task_mempool = MEM_callocN(sizeof(*scheduler->task_mempool) * (num_threads + 1), "TaskScheduler task_mempool"); } return scheduler; }
bNodeTreeExec *ntreeTexBeginExecTree_internal(bNodeExecContext *context, bNodeTree *ntree, bNodeInstanceKey parent_key) { bNodeTreeExec *exec; bNode *node; /* common base initialization */ exec = ntree_exec_begin(context, ntree, parent_key); /* allocate the thread stack listbase array */ exec->tot_thread = BLI_system_thread_count(); exec->threadstack = MEM_callocN(exec->tot_thread * sizeof(ListBase), "thread stack array"); for (node = exec->nodetree->nodes.first; node; node = node->next) node->need_exec = 1; return exec; }
int BKE_render_num_threads(const RenderData *rd) { int threads; /* override set from command line? */ threads = BLI_system_num_threads_override_get(); if (threads > 0) return threads; /* fixed number of threads specified in scene? */ if (rd->mode & R_FIXED_THREADS) threads = rd->threads; else threads = BLI_system_thread_count(); return max_ii(threads, 1); }
/** * this method is called for the top execution groups. containing the compositor node or the preview node or the viewer node) */ void ExecutionGroup::execute(ExecutionSystem *graph) { const CompositorContext &context = graph->getContext(); const bNodeTree *bTree = context.getbNodeTree(); if (this->m_width == 0 || this->m_height == 0) {return; } /// @note: break out... no pixels to calculate. if (bTree->test_break && bTree->test_break(bTree->tbh)) {return; } /// @note: early break out for blur and preview nodes if (this->m_numberOfChunks == 0) {return; } /// @note: early break out unsigned int chunkNumber; this->m_executionStartTime = PIL_check_seconds_timer(); this->m_chunksFinished = 0; this->m_bTree = bTree; unsigned int index; unsigned int *chunkOrder = (unsigned int *)MEM_mallocN(sizeof(unsigned int) * this->m_numberOfChunks, __func__); for (chunkNumber = 0; chunkNumber < this->m_numberOfChunks; chunkNumber++) { chunkOrder[chunkNumber] = chunkNumber; } NodeOperation *operation = this->getOutputOperation(); float centerX = 0.5; float centerY = 0.5; OrderOfChunks chunkorder = COM_ORDER_OF_CHUNKS_DEFAULT; if (operation->isViewerOperation()) { ViewerOperation *viewer = (ViewerOperation *)operation; centerX = viewer->getCenterX(); centerY = viewer->getCenterY(); chunkorder = viewer->getChunkOrder(); } const int border_width = BLI_rcti_size_x(&this->m_viewerBorder); const int border_height = BLI_rcti_size_y(&this->m_viewerBorder); switch (chunkorder) { case COM_TO_RANDOM: for (index = 0; index < 2 * this->m_numberOfChunks; index++) { int index1 = rand() % this->m_numberOfChunks; int index2 = rand() % this->m_numberOfChunks; int s = chunkOrder[index1]; chunkOrder[index1] = chunkOrder[index2]; chunkOrder[index2] = s; } break; case COM_TO_CENTER_OUT: { ChunkOrderHotspot *hotspots[1]; hotspots[0] = new ChunkOrderHotspot(border_width * centerX, border_height * centerY, 0.0f); rcti rect; ChunkOrder *chunkOrders = (ChunkOrder *)MEM_mallocN(sizeof(ChunkOrder) * this->m_numberOfChunks, __func__); for (index = 0; index < this->m_numberOfChunks; index++) { determineChunkRect(&rect, index); chunkOrders[index].setChunkNumber(index); chunkOrders[index].setX(rect.xmin - this->m_viewerBorder.xmin); chunkOrders[index].setY(rect.ymin - this->m_viewerBorder.ymin); chunkOrders[index].determineDistance(hotspots, 1); } std::sort(&chunkOrders[0], &chunkOrders[this->m_numberOfChunks - 1]); for (index = 0; index < this->m_numberOfChunks; index++) { chunkOrder[index] = chunkOrders[index].getChunkNumber(); } delete hotspots[0]; MEM_freeN(chunkOrders); break; } case COM_TO_RULE_OF_THIRDS: { ChunkOrderHotspot *hotspots[9]; unsigned int tx = border_width / 6; unsigned int ty = border_height / 6; unsigned int mx = border_width / 2; unsigned int my = border_height / 2; unsigned int bx = mx + 2 * tx; unsigned int by = my + 2 * ty; float addition = this->m_numberOfChunks / COM_RULE_OF_THIRDS_DIVIDER; hotspots[0] = new ChunkOrderHotspot(mx, my, addition * 0); hotspots[1] = new ChunkOrderHotspot(tx, my, addition * 1); hotspots[2] = new ChunkOrderHotspot(bx, my, addition * 2); hotspots[3] = new ChunkOrderHotspot(bx, by, addition * 3); hotspots[4] = new ChunkOrderHotspot(tx, ty, addition * 4); hotspots[5] = new ChunkOrderHotspot(bx, ty, addition * 5); hotspots[6] = new ChunkOrderHotspot(tx, by, addition * 6); hotspots[7] = new ChunkOrderHotspot(mx, ty, addition * 7); hotspots[8] = new ChunkOrderHotspot(mx, by, addition * 8); rcti rect; ChunkOrder *chunkOrders = (ChunkOrder *)MEM_mallocN(sizeof(ChunkOrder) * this->m_numberOfChunks, __func__); for (index = 0; index < this->m_numberOfChunks; index++) { determineChunkRect(&rect, index); chunkOrders[index].setChunkNumber(index); chunkOrders[index].setX(rect.xmin - this->m_viewerBorder.xmin); chunkOrders[index].setY(rect.ymin - this->m_viewerBorder.ymin); chunkOrders[index].determineDistance(hotspots, 9); } std::sort(&chunkOrders[0], &chunkOrders[this->m_numberOfChunks]); for (index = 0; index < this->m_numberOfChunks; index++) { chunkOrder[index] = chunkOrders[index].getChunkNumber(); } delete hotspots[0]; delete hotspots[1]; delete hotspots[2]; delete hotspots[3]; delete hotspots[4]; delete hotspots[5]; delete hotspots[6]; delete hotspots[7]; delete hotspots[8]; MEM_freeN(chunkOrders); break; } case COM_TO_TOP_DOWN: default: break; } DebugInfo::execution_group_started(this); DebugInfo::graphviz(graph); bool breaked = false; bool finished = false; unsigned int startIndex = 0; const int maxNumberEvaluated = BLI_system_thread_count() * 2; while (!finished && !breaked) { bool startEvaluated = false; finished = true; int numberEvaluated = 0; for (index = startIndex; index < this->m_numberOfChunks && numberEvaluated < maxNumberEvaluated; index++) { chunkNumber = chunkOrder[index]; int yChunk = chunkNumber / this->m_numberOfXChunks; int xChunk = chunkNumber - (yChunk * this->m_numberOfXChunks); const ChunkExecutionState state = this->m_chunkExecutionStates[chunkNumber]; if (state == COM_ES_NOT_SCHEDULED) { scheduleChunkWhenPossible(graph, xChunk, yChunk); finished = false; startEvaluated = true; numberEvaluated++; if (bTree->update_draw) bTree->update_draw(bTree->udh); } else if (state == COM_ES_SCHEDULED) { finished = false; startEvaluated = true; numberEvaluated++; } else if (state == COM_ES_EXECUTED && !startEvaluated) { startIndex = index + 1; } } WorkScheduler::finish(); if (bTree->test_break && bTree->test_break(bTree->tbh)) { breaked = true; } } DebugInfo::execution_group_finished(this); DebugInfo::graphviz(graph); MEM_freeN(chunkOrder); }
static void do_multires_bake(MultiresBakeRender *bkr, Image *ima, int require_tangent, MPassKnownData passKnownData, MInitBakeData initBakeData, MApplyBakeData applyBakeData, MFreeBakeData freeBakeData) { DerivedMesh *dm = bkr->lores_dm; const int lvl = bkr->lvl; const int tot_face = dm->getNumTessFaces(dm); if (tot_face > 0) { MultiresBakeThread *handles; MultiresBakeQueue queue; ImBuf *ibuf = BKE_image_acquire_ibuf(ima, NULL, NULL); MVert *mvert = dm->getVertArray(dm); MFace *mface = dm->getTessFaceArray(dm); MTFace *mtface = dm->getTessFaceDataArray(dm, CD_MTFACE); float *precomputed_normals = dm->getTessFaceDataArray(dm, CD_NORMAL); float *pvtangent = NULL; ListBase threads; int i, tot_thread = bkr->threads > 0 ? bkr->threads : BLI_system_thread_count(); void *bake_data = NULL; if (require_tangent) { if (CustomData_get_layer_index(&dm->faceData, CD_TANGENT) == -1) DM_add_tangent_layer(dm); pvtangent = DM_get_tessface_data_layer(dm, CD_TANGENT); } /* all threads shares the same custom bake data */ if (initBakeData) bake_data = initBakeData(bkr, ima); if (tot_thread > 1) BLI_init_threads(&threads, do_multires_bake_thread, tot_thread); handles = MEM_callocN(tot_thread * sizeof(MultiresBakeThread), "do_multires_bake handles"); /* faces queue */ queue.cur_face = 0; queue.tot_face = tot_face; BLI_spin_init(&queue.spin); /* fill in threads handles */ for (i = 0; i < tot_thread; i++) { MultiresBakeThread *handle = &handles[i]; handle->bkr = bkr; handle->image = ima; handle->queue = &queue; handle->data.mface = mface; handle->data.mvert = mvert; handle->data.mtface = mtface; handle->data.pvtangent = pvtangent; handle->data.precomputed_normals = precomputed_normals; /* don't strictly need this */ handle->data.w = ibuf->x; handle->data.h = ibuf->y; handle->data.lores_dm = dm; handle->data.hires_dm = bkr->hires_dm; handle->data.lvl = lvl; handle->data.pass_data = passKnownData; handle->data.bake_data = bake_data; handle->data.ibuf = ibuf; init_bake_rast(&handle->bake_rast, ibuf, &handle->data, flush_pixel); if (tot_thread > 1) BLI_insert_thread(&threads, handle); } /* run threads */ if (tot_thread > 1) BLI_end_threads(&threads); else do_multires_bake_thread(&handles[0]); BLI_spin_end(&queue.spin); /* finalize baking */ if (applyBakeData) applyBakeData(bake_data); if (freeBakeData) freeBakeData(bake_data); BKE_image_release_ibuf(ima, ibuf, NULL); } }
// open video capture device void VideoFFmpeg::openCam (char *file, short camIdx) { // open camera source AVInputFormat *inputFormat; AVDictionary *formatParams = NULL; char filename[28], rateStr[20]; #ifdef WIN32 // video capture on windows only through Video For Windows driver inputFormat = av_find_input_format("vfwcap"); if (!inputFormat) // Video For Windows not supported?? return; sprintf(filename, "%d", camIdx); #else // In Linux we support two types of devices: VideoForLinux and DV1394. // the user specify it with the filename: // [<device_type>][:<standard>] // <device_type> : 'v4l' for VideoForLinux, 'dv1394' for DV1394. By default 'v4l' // <standard> : 'pal', 'secam' or 'ntsc'. By default 'ntsc' // The driver name is constructed automatically from the device type: // v4l : /dev/video<camIdx> // dv1394: /dev/dv1394/<camIdx> // If you have different driver name, you can specify the driver name explicitly // instead of device type. Examples of valid filename: // /dev/v4l/video0:pal // /dev/ieee1394/1:ntsc // dv1394:secam // v4l:pal char *p; if (file && strstr(file, "1394") != NULL) { // the user specifies a driver, check if it is v4l or d41394 inputFormat = av_find_input_format("dv1394"); sprintf(filename, "/dev/dv1394/%d", camIdx); } else { const char *formats[] = {"video4linux2,v4l2", "video4linux2", "video4linux"}; int i, formatsCount = sizeof(formats) / sizeof(char*); for (i = 0; i < formatsCount; i++) { inputFormat = av_find_input_format(formats[i]); if (inputFormat) break; } sprintf(filename, "/dev/video%d", camIdx); } if (!inputFormat) // these format should be supported, check ffmpeg compilation return; if (file && strncmp(file, "/dev", 4) == 0) { // user does not specify a driver strncpy(filename, file, sizeof(filename)); filename[sizeof(filename)-1] = 0; if ((p = strchr(filename, ':')) != 0) *p = 0; } if (file && (p = strchr(file, ':')) != NULL) { av_dict_set(&formatParams, "standard", p+1, 0); } #endif //frame rate if (m_captRate <= 0.f) m_captRate = defFrameRate; sprintf(rateStr, "%f", m_captRate); av_dict_set(&formatParams, "framerate", rateStr, 0); if (m_captWidth > 0 && m_captHeight > 0) { char video_size[64]; BLI_snprintf(video_size, sizeof(video_size), "%dx%d", m_captWidth, m_captHeight); av_dict_set(&formatParams, "video_size", video_size, 0); } if (openStream(filename, inputFormat, &formatParams) != 0) return; // for video capture it is important to do non blocking read m_formatCtx->flags |= AVFMT_FLAG_NONBLOCK; // open base class VideoBase::openCam(file, camIdx); // check if we should do multi-threading? if (BLI_system_thread_count() > 1) { // no need to thread if the system has a single core m_isThreaded = true; } av_dict_free(&formatParams); }
static void do_sequence_proxy(void *pjv, int *build_sizes, int build_count, int *build_undistort_sizes, int build_undistort_count, short *stop, short *do_update, float *progress) { ProxyJob *pj = pjv; MovieClip *clip = pj->clip; Scene *scene = pj->scene; int sfra = SFRA, efra = EFRA; ProxyThread *handles; ListBase threads; int i, tot_thread = BLI_system_thread_count(); ProxyQueue queue; BLI_spin_init(&queue.spin); queue.cfra = sfra; queue.sfra = sfra; queue.efra = efra; queue.stop = stop; queue.do_update = do_update; queue.progress = progress; handles = MEM_callocN(sizeof(ProxyThread) * tot_thread, "proxy threaded handles"); if (tot_thread > 1) BLI_init_threads(&threads, do_proxy_thread, tot_thread); for (i = 0; i < tot_thread; i++) { ProxyThread *handle = &handles[i]; handle->clip = clip; handle->queue = &queue; handle->build_count = build_count; handle->build_sizes = build_sizes; handle->build_undistort_count = build_undistort_count; handle->build_undistort_sizes = build_undistort_sizes; if (build_undistort_count) handle->distortion = BKE_tracking_distortion_new(); if (tot_thread > 1) BLI_insert_thread(&threads, handle); } if (tot_thread > 1) BLI_end_threads(&threads); else do_proxy_thread(handles); MEM_freeN(handles); if (build_undistort_count) { for (i = 0; i < tot_thread; i++) { ProxyThread *handle = &handles[i]; BKE_tracking_distortion_free(handle->distortion); } } }