int UnsharpMain::process_buffer(VFrame *frame, int64_t start_position, double frame_rate) { /*int need_reconfigure =*/ load_configuration(); if(!engine) engine = new UnsharpEngine(this, get_project_smp() + 1, get_project_smp() + 1); read_frame(frame, 0, get_source_position(), get_framerate()); engine->do_unsharp(frame); return 0; }
BlurMain::~BlurMain() { //printf("BlurMain::~BlurMain 1\n"); if(engine) { for(int i = 0; i < (get_project_smp() + 1); i++) delete engine[i]; delete [] engine; } if(overlayer) delete overlayer; }
void ThresholdMain::calculate_histogram(VFrame *frame) { if(!engine) engine = new HistogramEngine(get_project_smp() + 1, get_project_smp() + 1); engine->process_packages(frame); }
int TimeFrontMain::process_buffer(VFrame **frame, int64_t start_position, double frame_rate) //int TimeFrontMain::process_realtime(VFrame *input_ptr, VFrame *output_ptr) { VFrame **outframes = frame; VFrame *(framelist[1024]); framelist[0] = new VFrame ( outframes[0]->get_w(), outframes[0]->get_h(), outframes[0]->get_color_model()); read_frame(framelist[0], 0, start_position, frame_rate); this->input = framelist[0]; this->output = outframes[0]; need_reconfigure |= load_configuration(); if (config.shape == TimeFrontConfig::OTHERTRACK) { // this->output = frame[1]; if (get_total_buffers() != 2) { // FIXME, maybe this should go to some other notification area? printf("ERROR: TimeFront plugin - If you are using another track for timefront, you have to have it under shared effects\n"); return 0; } if (outframes[0]->get_w() != outframes[1]->get_w() || outframes[0]->get_h() != outframes[1]->get_h()) { printf("Sizes of master track and timefront track do not match\n"); return 0; } } // Generate new gradient if(need_reconfigure) { need_reconfigure = 0; if(!gradient) gradient = new VFrame( outframes[0]->get_w(), outframes[0]->get_h(), BC_A8); if (config.shape != TimeFrontConfig::OTHERTRACK && config.shape != TimeFrontConfig::ALPHA) { if(!engine) engine = new TimeFrontServer(this, get_project_smp() + 1, get_project_smp() + 1); engine->process_packages(); } } if (config.shape == TimeFrontConfig::ALPHA) { if(!gradient) gradient = new VFrame( outframes[0]->get_w(), outframes[0]->get_h(), BC_A8); VFrame *tfframe = framelist[0]; switch (tfframe->get_color_model()) { case BC_YUVA8888: case BC_RGBA8888: GRADIENTFROMCHANNEL(unsigned char, 4, 255, 3); break; case BC_RGBA_FLOAT: GRADIENTFROMCHANNEL(float, 4, 1.0f, 3); break; default: { printf("TimeFront plugin error: ALPHA used, but project color model does not have alpha\n"); return 1; break; } } } else if (config.shape == TimeFrontConfig::OTHERTRACK)
int BlurMain::process_buffer(VFrame *frame, int64_t start_position, double frame_rate) { int i; need_reconfigure |= load_configuration(); read_frame(frame, 0, start_position, frame_rate, 0); // Create temp based on alpha keying. // Alpha keying needs 2x oversampling. if(config.a_key) { PluginVClient::new_temp(frame->get_w() * 2, frame->get_h() * 2, frame->get_color_model()); if(!overlayer) { overlayer = new OverlayFrame(PluginClient::get_project_smp() + 1); } overlayer->overlay(PluginVClient::get_temp(), frame, 0, 0, frame->get_w(), frame->get_h(), 0, 0, PluginVClient::get_temp()->get_w(), PluginVClient::get_temp()->get_h(), 1, // 0 - 1 TRANSFER_REPLACE, NEAREST_NEIGHBOR); input_frame = PluginVClient::get_temp(); } else { PluginVClient::new_temp(frame->get_w(), frame->get_h(), frame->get_color_model()); input_frame = frame; } //printf("BlurMain::process_realtime 1 %d %d\n", need_reconfigure, config.radius); if(need_reconfigure) { if(!engine) { engine = new BlurEngine*[(get_project_smp() + 1)]; for(i = 0; i < (get_project_smp() + 1); i++) { engine[i] = new BlurEngine(this); engine[i]->start(); } } for(i = 0; i < (get_project_smp() + 1); i++) { engine[i]->reconfigure(&engine[i]->forward_constants, config.radius); engine[i]->reconfigure(&engine[i]->reverse_constants, config.radius); } need_reconfigure = 0; } if(config.radius < MIN_RADIUS || (!config.vertical && !config.horizontal)) { // Data never processed } else { // Process blur // Need to blur vertically to a temp and // horizontally to the output in 2 discrete passes. for(i = 0; i < get_project_smp() + 1; i++) { engine[i]->set_range( input_frame->get_h() * i / (get_project_smp() + 1), input_frame->get_h() * (i + 1) / (get_project_smp() + 1), input_frame->get_w() * i / (get_project_smp() + 1), input_frame->get_w() * (i + 1) / (get_project_smp() + 1)); } for(i = 0; i < (get_project_smp() + 1); i++) { engine[i]->do_horizontal = 0; engine[i]->start_process_frame(input_frame); } for(i = 0; i < (get_project_smp() + 1); i++) { engine[i]->wait_process_frame(); } for(i = 0; i < (get_project_smp() + 1); i++) { engine[i]->do_horizontal = 1; engine[i]->start_process_frame(input_frame); } for(i = 0; i < (get_project_smp() + 1); i++) { engine[i]->wait_process_frame(); } } // Downsample if(config.a_key) { overlayer->overlay(frame, PluginVClient::get_temp(), 0, 0, PluginVClient::get_temp()->get_w(), PluginVClient::get_temp()->get_h(), 0, 0, frame->get_w(), frame->get_h(), 1, // 0 - 1 TRANSFER_REPLACE, NEAREST_NEIGHBOR); } return 0; }
int DissolveMain::process_realtime(VFrame *incoming, VFrame *outgoing) { fade = (float)PluginClient::get_source_position() / PluginClient::get_total_len(); // Use hardware if(get_use_opengl()) { run_opengl(); return 0; } // Use software if(!overlayer) overlayer = new OverlayFrame(get_project_smp() + 1); // There is a problem when dissolving from a big picture to a small picture. // In order to make it dissolve correctly, we have to manually decrese alpha of big picture. switch (outgoing->get_color_model()) { case BC_RGBA8888: case BC_YUVA8888: { uint8_t** data_rows = (uint8_t **)outgoing->get_rows(); int w = outgoing->get_w(); int h = outgoing->get_h(); for(int i = 0; i < h; i++) { uint8_t* alpha_chan = data_rows[i] + 3; for(int j = 0; j < w; j++) { *alpha_chan = (uint8_t) (*alpha_chan * (1-fade)); alpha_chan+=4; } } break; } case BC_YUVA16161616: { uint16_t** data_rows = (uint16_t **)outgoing->get_rows(); int w = outgoing->get_w(); int h = outgoing->get_h(); for(int i = 0; i < h; i++) { uint16_t* alpha_chan = data_rows[i] + 3; // 3 since this is uint16_t for(int j = 0; j < w; j++) { *alpha_chan = (uint16_t)(*alpha_chan * (1-fade)); alpha_chan += 8; } } break; } case BC_RGBA_FLOAT: { float** data_rows = (float **)outgoing->get_rows(); int w = outgoing->get_w(); int h = outgoing->get_h(); for(int i = 0; i < h; i++) { float* alpha_chan = data_rows[i] + 3; // 3 since this is floats for(int j = 0; j < w; j++) { *alpha_chan = *alpha_chan * (1-fade); alpha_chan += sizeof(float); } } break; } default: break; } overlayer->overlay(outgoing, incoming, 0, 0, incoming->get_w(), incoming->get_h(), 0, 0, incoming->get_w(), incoming->get_h(), fade, TRANSFER_NORMAL, NEAREST_NEIGHBOR); return 0; }
int Overlay::process_buffer(VFrame **frame, int64_t start_position, double frame_rate) { load_configuration(); printf("Overlay::process_buffer mode=%d\n", config.mode); if(!temp) temp = new VFrame(0, -1, frame[0]->get_w(), frame[0]->get_h(), frame[0]->get_color_model(), -1); if(!overlayer) overlayer = new OverlayFrame(get_project_smp() + 1); int step; VFrame *output; if(config.direction == OverlayConfig::BOTTOM_FIRST) { input_layer1 = get_total_buffers() - 1; input_layer2 = -1; step = -1; } else { input_layer1 = 0; input_layer2 = get_total_buffers(); step = 1; } if(config.output_layer == OverlayConfig::TOP) { output_layer = 0; } else { output_layer = get_total_buffers() - 1; } // Direct copy the first layer output = frame[output_layer]; read_frame(output, input_layer1, start_position, frame_rate, get_use_opengl()); if(get_total_buffers() == 1) return 0; current_layer = input_layer1; if(get_use_opengl()) run_opengl(); for(int i = input_layer1 + step; i != input_layer2; i += step) { read_frame(temp, i, start_position, frame_rate, get_use_opengl()); // Call the opengl handler once for each layer if(get_use_opengl()) { current_layer = i; run_opengl(); } else { overlayer->overlay(output, temp, 0, 0, output->get_w(), output->get_h(), 0, 0, output->get_w(), output->get_h(), 1, config.mode, NEAREST_NEIGHBOR); } } return 0; }