/** * transform_filter_video: performs the transformation of frames * See tcmodule-data.h for function details. */ static int transform_filter_video(TCModuleInstance *self, vframe_list_t *frame) { FilterData *fd = NULL; TC_MODULE_SELF_CHECK(self, "filter_video"); TC_MODULE_SELF_CHECK(frame, "filter_video"); fd = self->userdata; VSFrame vsFrame; vsFrameFillFromBuffer(&vsFrame,frame->video_buf, vsTransformGetSrcFrameInfo(&fd->td)); vsTransformPrepare(&fd->td, &vsFrame, &vsFrame); VSTransform t = vsGetNextTransform(&fd->td, &fd->trans); vsDoTransform(&fd->td, t); vsTransformFinish(&fd->td); return TC_OK; }
static int apply_results( mlt_filter filter, mlt_frame frame, uint8_t* vs_image, VSPixelFormat vs_format, int width, int height ) { int error = 0; mlt_properties properties = MLT_FILTER_PROPERTIES( filter ); vs_data* data = (vs_data*)filter->child; if ( check_apply_config( filter, frame ) || mlt_properties_get_int( properties, "reload" ) ) { mlt_properties_set_int( properties, "reload", 0 ); destory_apply_data( data->apply_data ); data->apply_data = NULL; } // Init transform data if necessary (first time) if ( !data->apply_data ) { init_apply_data( filter, frame, vs_format, width, height ); } if( data->apply_data ) { // Apply transformations to this image VSTransformData* td = &data->apply_data->td; VSTransformations* trans = &data->apply_data->trans; VSFrame vsFrame; vsFrameFillFromBuffer( &vsFrame, vs_image, vsTransformGetSrcFrameInfo( td ) ); trans->current = mlt_filter_get_position( filter, frame ); vsTransformPrepare( td, &vsFrame, &vsFrame ); VSTransform t = vsGetNextTransform( td, trans ); vsDoTransform( td, t ); vsTransformFinish( td ); } return error; }
void PipelineStabTransform::onInput(InputImageInfo info, Magick::Image image) { try { if (!initialized) { init(image); } if (image.rows() != height || image.columns() != width) { throw runtime_error(QString("Not uniform image size! %").arg(info.file.fileName()).toStdString()); } Q_ASSERT(image.baseColumns() == width && image.baseRows() == height); Magick::Blob blob; // set raw RGBS output format & convert it into a Blob if (image.depth() > 8) *err << "Warning: we lost some information by converting to 8bit depth (now " << image.depth() << ")" << endl; image.depth(8); image.magick("RGB"); image.write(&blob); Q_ASSERT(blob.length() == image.baseColumns() * image.baseRows() * 3); // inframe VSFrame inframe; size_t dataLen = blob.length(); inframe.data[0] = (uint8_t*) blob.data(); inframe.linesize[0] = image.baseColumns() * 3; // TODO: it is correct? // outframe uint8_t* data = new uint8_t[dataLen]; //memcpy(data, blob.data(), dataLen); VSFrame outframe; outframe.data[0] = data; outframe.linesize[0] = image.baseColumns() * 3; // TODO: it is correct? if (vsTransformPrepare(&td, &inframe, &outframe) != VS_OK) { throw runtime_error("Failed to prepare transform"); } Q_ASSERT(vsTransformGetSrcFrameInfo(&td)->planes == 1); vsDoTransform(&td, vsGetNextTransform(&td, &trans)); vsTransformFinish(&td); Magick::Geometry g(width, height); Magick::Blob oblob(data, dataLen); Magick::Image oimage; oimage.size(g); oimage.depth(8); oimage.magick("RGB"); oimage.read(oblob); delete[] data; info.luminance = -1; emit input(info, oimage); } catch (exception &e) { emit error(e.what()); } }
static int get_image(mlt_frame frame, uint8_t **image, mlt_image_format *format, int *width, int *height, int writable) { mlt_filter filter = (mlt_filter) mlt_frame_pop_service(frame); mlt_properties properties = MLT_FILTER_PROPERTIES(filter); *format = mlt_image_yuv420p; DeshakeData *data = static_cast<DeshakeData*>(filter->child); int error = mlt_frame_get_image(frame, image, format, width, height, 1); if (!error) { // Service locks are for concurrency control mlt_service_lock(MLT_FILTER_SERVICE(filter)); // Handle signal from app to re-init data if (mlt_properties_get_int(properties, "refresh")) { mlt_properties_set(properties, "refresh", NULL); clear_deshake(data); data->initialized = false; } // clear deshake data, when seeking or dropping frames mlt_position pos = mlt_filter_get_position(filter, frame); if(pos != data->lastFrame+1) { clear_deshake(data); data->initialized = false; } data->lastFrame = pos; if (!data->initialized) { char *interps = mlt_properties_get(MLT_FRAME_PROPERTIES(frame), "rescale.interp"); init_deshake(data, properties, format, width, height, interps); data->initialized = true; } VSMotionDetect* md = &data->md; VSTransformData* td = &data->td; LocalMotions localmotions; VSTransform motion; VSFrame vsFrame; vsFrameFillFromBuffer(&vsFrame, *image, &md->fi); vsMotionDetection(md, &localmotions, &vsFrame); motion = vsSimpleMotionsToTransform(md->fi, FILTER_NAME, &localmotions); vs_vector_del(&localmotions); vsTransformPrepare(td, &vsFrame, &vsFrame); VSTransform t = vsLowPassTransforms(td, &data->avg, &motion); // mlt_log_warning(filter, "Trans: det: %f %f %f \n\t\t act: %f %f %f %f", // motion.x, motion.y, motion.alpha, // t.x, t.y, t.alpha, t.zoom); vsDoTransform(td, t); vsTransformFinish(td); mlt_service_unlock(MLT_FILTER_SERVICE(filter)); } return error; }