static int deshake_init(TCModuleInstance *self, uint32_t features) { DeshakeData* sd = NULL; TC_MODULE_SELF_CHECK(self, "init"); TC_MODULE_INIT_CHECK(self, MOD_FEATURES, features); sd = tc_zalloc(sizeof(DeshakeData)); // allocation with zero values if (!sd) { if (verbose > TC_INFO) tc_log_error(MOD_NAME, "init: out of memory!"); return TC_ERROR; } sd->vob = tc_get_vob(); if (!sd->vob) return TC_ERROR; /**** Initialise private data structure */ self->userdata = sd; if (verbose & TC_INFO){ tc_log_info(MOD_NAME, "%s %s", MOD_VERSION, MOD_CAP); } return TC_OK; }
static int dispatch_list_item(TCListItem *item, void *userdata) { struct dispatch_data *DD = userdata; int ret = 0; pvm_config_filelist *cur = tc_zalloc(sizeof(pvm_config_filelist)); // XXX if (!cur) { ret = 1; } else { cur->s_type = DD->type; cur->p_codec = DD->codec; // softref cur->p_destination = DD->destination; // softref cur->p_filename = item->data; // hardref if (!DD->head) { DD->head = cur; DD->tail = cur; } else { DD->tail->p_next = cur; cur = DD->tail->p_next; } } return ret; }
static pvm_config_filelist *dispatch_list(TCConfigList *src, int type, char *codec, char *destination, pvm_config_filelist **ret) { pvm_config_filelist *head = NULL, *tail = NULL, *item = NULL; for (; src != NULL; src = src->next) { item = tc_zalloc(sizeof(pvm_config_filelist)); // XXX item->s_type = type; item->p_codec = codec; // softref item->p_destination = destination; // softref item->p_filename = src->value; // hardref if (!head) { head = item; tail = item; } else { tail->p_next = item; tail = tail->p_next; } } if (ret) { *ret = tail; } return head; }
static AVIData *new_avi_data(avi_t *AVI) { AVIData *data = tc_zalloc(sizeof(AVIData)); if (data) { init_avi_data(data, AVI); } return data; }
static int lowpass_configure(TCModuleInstance *self, const char *options, TCJob *vob, TCModuleExtraData *xdata[]) { LowPassPrivateData *pd = NULL; TC_MODULE_SELF_CHECK(self, "configure"); pd = self->userdata; if (vob->a_bits != 16) { tc_log_error(MOD_NAME, "This filter only supports 16 bit samples"); return TC_ERROR; } pd->taps = 30; pd->highpass = 0; pd->p = 0; pd->is_mono = (vob->a_chan == 1); if (options != NULL) { optstr_get(options, "taps", "%i", &pd->taps); } if (pd->taps < 0) { pd->taps = -pd->taps; pd->highpass = 1; } pd->array_r = tc_zalloc(pd->taps * sizeof(int16_t)); pd->array_l = tc_zalloc(pd->taps * sizeof(int16_t)); if (!pd->array_r || !pd->array_l) { return TC_ERROR; } if (verbose) { tc_log_info(MOD_NAME, "taps = %i (%spass)", pd->taps, (pd->highpass) ?"high" :"low"); } return TC_OK; }
static int fields_configure(TCModuleInstance *self, const char *options, TCJob *vob, TCModuleExtraData *xdata[]) { FieldsPrivateData *pd = NULL; TC_MODULE_SELF_CHECK(self, "configure"); pd = self->userdata; // Some of the data in buffer may get used for half of the first frame (when // shifting) so make sure it's blank to start with. pd->buffer = tc_zalloc(SIZE_RGB_FRAME); if (!pd->buffer) { tc_log_error(MOD_NAME, "Unable to allocate memory. Aborting."); return TC_ERROR; } if (options != NULL) { if (optstr_lookup(options, "flip")) pd->field_ops |= FIELD_OP_FLIP; if (optstr_lookup(options, "shift")) pd->field_ops |= FIELD_OP_SHIFT; if (optstr_lookup(options, "flip_first")) pd->field_ops |= FIELD_OP_REVERSE; } // FIELD_OP_REVERSE (aka flip_first) only makes sense if we're doing // both operations. If we're not, unset it. if (pd->field_ops != FIELD_OP_FLIPSHIFT) pd->field_ops &= ~FIELD_OP_REVERSE; if (verbose) { if (pd->field_ops & FIELD_OP_SHIFT) tc_log_info(MOD_NAME, "Adjusting frame positions (shift)"); if (pd->field_ops & FIELD_OP_FLIP) tc_log_info(MOD_NAME, "Transposing input fields (flip)"); if (pd->field_ops & FIELD_OP_REVERSE) tc_log_info(MOD_NAME, "Flipping will occur before shifting (flip_first)"); } if (!pd->field_ops) { tc_log_warn(MOD_NAME, "No operations specified to perform."); return TC_ERROR; } if (vob->im_v_codec == TC_CODEC_RGB24); pd->rgb_mode = TC_TRUE; if (verbose) tc_log_info(MOD_NAME, "%s %s", MOD_VERSION, MOD_CAP); return TC_OK; }
static TCListItem *new_item(TCList *L) { TCListItem *IT = NULL; if (L->use_cache && L->cache) { IT = L->cache; L->cache = L->cache->next; } else { IT = tc_zalloc(sizeof(TCListItem)); } return IT; }
static struct clist *gen_contrib(int oldsize, int newsize, int stride, double (*filter)(double), double fwidth) { struct clist *contrib; double scale = (double)newsize / (double)oldsize; double new_fwidth, fscale; int i, j; contrib = tc_zalloc(newsize * sizeof(struct clist)); if (scale < 1.0) { fscale = 1.0 / scale; } else { fscale = 1.0; } new_fwidth = fwidth * fscale; for (i = 0; i < newsize; ++i) { double center = (double) i / scale; int left = ceil(center - new_fwidth); int right = floor(center + new_fwidth); contrib[i].n = 0; contrib[i].list = tc_zalloc((right-left+1) * sizeof(struct contrib)); for (j = left; j <= right; ++j) { int k, n; double weight = center - (double) j; weight = (*filter)(weight / fscale) / fscale; if (j < 0) { n = -j; } else if (j >= oldsize) { n = (oldsize - j) + oldsize - 1; } else { n = j; } k = contrib[i].n++; contrib[i].list[k].pixel = n*stride; contrib[i].list[k].weight = weight; } } return contrib; }
static int tc_sync_adjust_init(TCSynchronizer *sy, vob_t *vob, int master) { AdjustContext *ctx = NULL; if (master != TC_AUDIO) { tc_log_error(__FILE__, "(adjust) only audio master source supported yet"); /* can't yet use method_name */ return TC_ERROR; } ctx = tc_zalloc(sizeof(AdjustContext)); if (!ctx) { goto no_context; } ctx->saved = tc_new_video_frame(vob->im_v_width, vob->im_v_height, vob->im_v_codec, 0); if (!ctx->saved) { goto no_frame; } ctx->method_name = "adjust"; ctx->op = AdjustNone; ctx->frames_margin = vob->resync_frame_margin; ctx->frames_interval = vob->resync_frame_interval; /* vertical alignement intentionally changed */ sy->method_name = ctx->method_name; /* let's recycle some bytes */ sy->privdata = ctx; sy->audio_shift = vob->sync; sy->verbose = vob->verbose; sy->get_video = tc_sync_adjust_get_video; sy->get_audio = tc_sync_adjust_get_audio; sy->fini = tc_sync_adjust_fini; tc_log_info(__FILE__, "(%s) resync frames: interval=%i/margin=%i", sy->method_name, ctx->frames_interval, ctx->frames_margin); return TC_OK; no_frame: tc_free(ctx); no_context: return TC_ERROR; }
/** * transform_init: Initialize this instance of the module. See * tcmodule-data.h for function details. */ static int transform_init(TCModuleInstance *self, uint32_t features) { FilterData* fd = NULL; TC_MODULE_SELF_CHECK(self, "init"); TC_MODULE_INIT_CHECK(self, MOD_FEATURES, features); setLogFunctions(); fd = tc_zalloc(sizeof(FilterData)); if (fd == NULL) { tc_log_error(MOD_NAME, "init: out of memory!"); return TC_ERROR; } self->userdata = fd; if (verbose) { tc_log_info(MOD_NAME, "%s %s", MOD_VERSION, MOD_CAP); } return TC_OK; }
static int vag_init(TCModuleInstance *self, uint32_t features) { PrivateData *pd = NULL; TC_MODULE_SELF_CHECK(self, "init"); TC_MODULE_INIT_CHECK(self, MOD_FEATURES, features); pd = tc_zalloc(sizeof(PrivateData)); if (!pd) { tc_log_error(MOD_NAME, "init: out of memory!"); return TC_ERROR; } pd->blocksize = DEF_STEREO_BLOCK; self->userdata = pd; if (verbose) { tc_log_info(MOD_NAME, "%s %s", MOD_VERSION, MOD_CAP); } return TC_OK; }
void probe_nuv(info_t *ipipe) { int bytes; struct rtfileheader *rtf; if((rtf = tc_zalloc(sizeof(rtfileheader)))==NULL) { tc_log_error(__FILE__, "out of memory"); ipipe->error=1; return; } // read min frame (NTSC) if((bytes=tc_pread(ipipe->fd_in, (uint8_t*) rtf, sizeof(rtfileheader))) != sizeof(rtfileheader)) { tc_log_error(__FILE__, "end of stream"); ipipe->error=1; return; } ipipe->probe_info->width = rtf->width; ipipe->probe_info->height = rtf->height; ipipe->probe_info->fps = rtf->fps; ipipe->probe_info->track[0].samplerate = 44100; ipipe->probe_info->track[0].chan = 2; ipipe->probe_info->track[0].bits = 16; ipipe->probe_info->track[0].format = 0x1; ipipe->probe_info->magic = TC_MAGIC_NUV; ipipe->probe_info->codec = TC_CODEC_NUV; if(ipipe->probe_info->track[0].chan>0) ipipe->probe_info->num_tracks=1; free(rtf); return; }
static int dispatch_node(int id, PVMNodeData *data, pvm_config_env *env) { /* * this insert nodes in reverse orders, so node defined last * in configuration file is first on list, but nobody really * cares about that. */ if (env && data && data->enabled) { pvm_config_hosts *host = tc_zalloc(sizeof(pvm_config_hosts)); if (host) { /* fill */ host->p_hostname = data->hostname; host->s_nproc = data->maxprocs; /* link */ host->p_next = env->p_pvm_hosts; env->p_pvm_hosts = host; return 1; } } return 0; }
// main filter routine int tc_filter(frame_list_t *ptr_, char *options) { vframe_list_t *ptr = (vframe_list_t *)ptr_; static vob_t *vob=NULL; /* FIXME: these use the filter ID as an index--the ID can grow * arbitrarily large, so this needs to be fixed */ static MyFilterData *mfd[100]; static char *buffer[100]; int instance = ptr->filter_id; if(ptr->tag & TC_AUDIO) return 0; if(ptr->tag & TC_FILTER_GET_CONFIG) { char buf[128]; optstr_filter_desc (options, MOD_NAME, MOD_CAP, MOD_VERSION, MOD_AUTHOR, "VYMOE", "2"); tc_snprintf(buf, 128, "%f", PARAM1_DEFAULT); optstr_param (options, "luma", "spatial luma strength", "%f", buf, "0.0", "100.0" ); tc_snprintf(buf, 128, "%f", PARAM2_DEFAULT); optstr_param (options, "chroma", "spatial chroma strength", "%f", buf, "0.0", "100.0" ); tc_snprintf(buf, 128, "%f", PARAM3_DEFAULT); optstr_param (options, "luma_strength", "temporal luma strength", "%f", buf, "0.0", "100.0" ); tc_snprintf(buf, 128, "%f", PARAM3_DEFAULT*PARAM2_DEFAULT/PARAM1_DEFAULT); optstr_param (options, "chroma_strength", "temporal chroma strength", "%f", buf, "0.0", "100.0" ); tc_snprintf(buf, 128, "%d", mfd[instance]->pre); optstr_param (options, "pre", "run as a pre filter", "%d", buf, "0", "1" ); return 0; } if(ptr->tag & TC_FILTER_INIT) { double LumSpac, LumTmp, ChromSpac, ChromTmp; double Param1=0.0, Param2=0.0, Param3=0.0, Param4=0.0; if((vob = tc_get_vob())==NULL) return(-1); if (vob->im_v_codec != TC_CODEC_YUV420P) { tc_log_error(MOD_NAME, "This filter is only capable of YUV 4:2:0 mode"); return -1; } mfd[instance] = tc_zalloc(sizeof(MyFilterData)); if (mfd[instance]) { mfd[instance]->Line = tc_zalloc(TC_MAX_V_FRAME_WIDTH*sizeof(int)); } buffer[instance] = tc_zalloc(SIZE_RGB_FRAME); if (!mfd[instance] || !mfd[instance]->Line || !buffer[instance]) { tc_log_error(MOD_NAME, "Malloc failed"); return -1; } // defaults LumSpac = PARAM1_DEFAULT; LumTmp = PARAM3_DEFAULT; ChromSpac = PARAM2_DEFAULT; ChromTmp = LumTmp * ChromSpac / LumSpac; if (options) { if (optstr_lookup (options, "help")) { help_optstr(); } optstr_get (options, "luma", "%lf", &Param1); optstr_get (options, "luma_strength", "%lf", &Param3); optstr_get (options, "chroma", "%lf", &Param2); optstr_get (options, "chroma_strength","%lf", &Param4); optstr_get (options, "pre", "%d", &mfd[instance]->pre); // recalculate only the needed params if (Param1!=0.0) { LumSpac = Param1; LumTmp = PARAM3_DEFAULT * Param1 / PARAM1_DEFAULT; ChromSpac = PARAM2_DEFAULT * Param1 / PARAM1_DEFAULT; ChromTmp = LumTmp * ChromSpac / LumSpac; } if (Param2!=0.0) { ChromSpac = Param2; ChromTmp = LumTmp * ChromSpac / LumSpac; } if (Param3!=0.0) { LumTmp = Param3; ChromTmp = LumTmp * ChromSpac / LumSpac; } if (Param4!=0.0) { ChromTmp = Param4; } } PrecalcCoefs(mfd[instance]->Coefs[0], LumSpac); PrecalcCoefs(mfd[instance]->Coefs[1], LumTmp); PrecalcCoefs(mfd[instance]->Coefs[2], ChromSpac); PrecalcCoefs(mfd[instance]->Coefs[3], ChromTmp); if(verbose) { tc_log_info(MOD_NAME, "%s %s #%d", MOD_VERSION, MOD_CAP, instance); tc_log_info(MOD_NAME, "Settings luma=%.2f chroma=%.2f luma_strength=%.2f chroma_strength=%.2f", LumSpac, ChromSpac, LumTmp, ChromTmp); } return 0; } //---------------------------------- // // filter close // //---------------------------------- if(ptr->tag & TC_FILTER_CLOSE) { if (buffer[instance]) {free(buffer[instance]); buffer[instance]=NULL;} if (mfd[instance]) { if(mfd[instance]->Line){free(mfd[instance]->Line);mfd[instance]->Line=NULL;} if(mfd[instance]->Frame[0]){free(mfd[instance]->Frame[0]);mfd[instance]->Frame[0]=NULL;} if(mfd[instance]->Frame[1]){free(mfd[instance]->Frame[1]);mfd[instance]->Frame[1]=NULL;} if(mfd[instance]->Frame[2]){free(mfd[instance]->Frame[2]);mfd[instance]->Frame[2]=NULL;} free(mfd[instance]); } mfd[instance]=NULL; return(0); } /* filter close */ //actually do the filter if(((ptr->tag & TC_PRE_M_PROCESS && mfd[instance]->pre) || (ptr->tag & TC_POST_M_PROCESS && !mfd[instance]->pre)) && !(ptr->attributes & TC_FRAME_IS_SKIPPED)) { ac_memcpy (buffer[instance], ptr->video_buf, ptr->video_size); deNoise(buffer[instance], ptr->video_buf, mfd[instance]->Line, &mfd[instance]->Frame[0], ptr->v_width, ptr->v_height, ptr->v_width, ptr->v_width, mfd[instance]->Coefs[0], mfd[instance]->Coefs[0], mfd[instance]->Coefs[1]); deNoise(buffer[instance] + ptr->v_width*ptr->v_height, ptr->video_buf + ptr->v_width*ptr->v_height, mfd[instance]->Line, &mfd[instance]->Frame[1], ptr->v_width>>1, ptr->v_height>>1, ptr->v_width>>1, ptr->v_width>>1, mfd[instance]->Coefs[2], mfd[instance]->Coefs[2], mfd[instance]->Coefs[3]); deNoise(buffer[instance] + 5*ptr->v_width*ptr->v_height/4, ptr->video_buf + 5*ptr->v_width*ptr->v_height/4, mfd[instance]->Line, &mfd[instance]->Frame[2], ptr->v_width>>1, ptr->v_height>>1, ptr->v_width>>1, ptr->v_width>>1, mfd[instance]->Coefs[2], mfd[instance]->Coefs[2], mfd[instance]->Coefs[3]); }
int tc_filter(frame_list_t *vframe_, char * options) { vframe_list_t *vframe = (vframe_list_t *)vframe_; int instance; int tag = vframe->tag; dn3d_private_data_t * pd; if(tag & TC_AUDIO) return(0); instance = vframe->filter_id; pd = &dn3d_private_data[instance]; if(tag & TC_FILTER_GET_CONFIG) { char buf[128]; optstr_filter_desc(options, MOD_NAME, MOD_CAP, MOD_VERSION, MOD_AUTHOR, "VYMOE", "2"); tc_snprintf(buf, 128, "%f", DEFAULT_LUMA_SPATIAL); optstr_param(options, "luma", "spatial luma strength", "%f", buf, "0.0", "100.0" ); tc_snprintf(buf, 128, "%f", DEFAULT_CHROMA_SPATIAL); optstr_param(options, "chroma", "spatial chroma strength", "%f", buf, "0.0", "100.0" ); tc_snprintf(buf, 128, "%f", DEFAULT_LUMA_TEMPORAL); optstr_param(options, "luma_strength", "temporal luma strength", "%f", buf, "0.0", "100.0" ); tc_snprintf(buf, 128, "%f", DEFAULT_CHROMA_TEMPORAL); optstr_param(options, "chroma_strength", "temporal chroma strength", "%f", buf, "0.0", "100.0" ); tc_snprintf(buf, 128, "%d", dn3d_private_data[instance].prefilter); optstr_param(options, "pre", "run as a pre filter", "%d", buf, "0", "1" ); } if(tag & TC_FILTER_INIT) { int format_index, plane_index, found; const dn3d_layout_t * lp; size_t size; if(!(pd->vob = tc_get_vob())) return(TC_IMPORT_ERROR); pd->parameter.luma_spatial = 0; pd->parameter.luma_temporal = 0; pd->parameter.chroma_spatial = 0; pd->parameter.chroma_temporal = 0; if(!options) { tc_log_error(MOD_NAME, "options not set!"); return(TC_IMPORT_ERROR); } if(optstr_lookup(options, "help")) { help_optstr(); return(TC_IMPORT_ERROR); } optstr_get(options, "luma", "%lf", &pd->parameter.luma_spatial); optstr_get(options, "luma_strength", "%lf", &pd->parameter.luma_temporal); optstr_get(options, "chroma", "%lf", &pd->parameter.chroma_spatial); optstr_get(options, "chroma_strength", "%lf", &pd->parameter.chroma_temporal); optstr_get(options, "pre", "%d", &dn3d_private_data[instance].prefilter); if((pd->parameter.luma_spatial < 0) || (pd->parameter.luma_temporal < 0)) pd->enable_luma = 0; else { pd->enable_luma = 1; if(pd->parameter.luma_spatial == 0) { if(pd->parameter.luma_temporal == 0) { pd->parameter.luma_spatial = DEFAULT_LUMA_SPATIAL; pd->parameter.luma_temporal = DEFAULT_LUMA_TEMPORAL; } else { pd->parameter.luma_spatial = pd->parameter.luma_temporal * 3 / 2; } } else { if(pd->parameter.luma_temporal == 0) { pd->parameter.luma_temporal = pd->parameter.luma_spatial * 2 / 3; } } } if((pd->parameter.chroma_spatial < 0) || (pd->parameter.chroma_temporal < 0)) pd->enable_chroma = 0; else { pd->enable_chroma = 1; if(pd->parameter.chroma_spatial == 0) { if(pd->parameter.chroma_temporal == 0) { pd->parameter.chroma_spatial = DEFAULT_CHROMA_SPATIAL; pd->parameter.chroma_temporal = DEFAULT_CHROMA_TEMPORAL; } else { pd->parameter.chroma_spatial = pd->parameter.chroma_temporal * 3 / 2; } } else { if(pd->parameter.chroma_temporal == 0) { pd->parameter.chroma_temporal = pd->parameter.chroma_spatial * 2 / 3; } } } for(format_index = 0, found = 0; format_index < (sizeof(dn3d_layout) / sizeof(*dn3d_layout)); format_index++) { if(pd->vob->im_v_codec == dn3d_layout[format_index].tc_fmt) { found = 1; break; } } if(!found) { tc_log_error(MOD_NAME, "This filter is only capable of YUV, YUV422 and RGB mode"); return(TC_IMPORT_ERROR); } lp = &dn3d_layout[format_index]; pd->layout_data = *lp; for(plane_index = 0; plane_index < MAX_PLANES; plane_index++) { if((pd->layout_data.layout[plane_index].plane_type == dn3d_luma) && !pd->enable_luma) pd->layout_data.layout[plane_index].plane_type = dn3d_disabled; if((pd->layout_data.layout[plane_index].plane_type == dn3d_chroma) && !pd->enable_chroma) pd->layout_data.layout[plane_index].plane_type = dn3d_disabled; } size = pd->vob->im_v_width * MAX_PLANES * sizeof(char) * 2; pd->lineant = tc_zalloc(size); if(pd->lineant == NULL) tc_log_error(MOD_NAME, "Malloc failed"); size *= pd->vob->im_v_height * 2; pd->previous = tc_zalloc(size); if(pd->previous == NULL) tc_log_error(MOD_NAME, "Malloc failed"); PrecalcCoefs(pd->coefficients[0], pd->parameter.luma_spatial); PrecalcCoefs(pd->coefficients[1], pd->parameter.luma_temporal); PrecalcCoefs(pd->coefficients[2], pd->parameter.chroma_spatial); PrecalcCoefs(pd->coefficients[3], pd->parameter.chroma_temporal); if(verbose) { tc_log_info(MOD_NAME, "%s %s #%d", MOD_VERSION, MOD_CAP, instance); tc_log_info(MOD_NAME, "Settings luma (spatial): %.2f " "luma_strength (temporal): %.2f " "chroma (spatial): %.2f " "chroma_strength (temporal): %.2f", pd->parameter.luma_spatial, pd->parameter.luma_temporal, pd->parameter.chroma_spatial, pd->parameter.chroma_temporal); tc_log_info(MOD_NAME, "luma enabled: %s, chroma enabled: %s", pd->enable_luma ? "yes" : "no", pd->enable_chroma ? "yes" : "no"); } } if(((tag & TC_PRE_M_PROCESS && pd->prefilter) || (tag & TC_POST_M_PROCESS && !pd->prefilter)) && !(vframe->attributes & TC_FRAME_IS_SKIPPED)) { int plane_index, coef[2]; int offset = 0; const dn3d_single_layout_t * lp; for(plane_index = 0; plane_index < MAX_PLANES; plane_index++) { lp = &pd->layout_data.layout[plane_index]; if(lp->plane_type != dn3d_disabled) { // if(plane_index != 2) // debug // continue; coef[0] = (lp->plane_type == dn3d_luma) ? 0 : 2; coef[1] = coef[0] + 1; switch(lp->offset) { case(dn3d_off_r): offset = 0; break; case(dn3d_off_g): offset = 1; break; case(dn3d_off_b): offset = 2; break; case(dn3d_off_y420): offset = vframe->v_width * vframe->v_height * 0 / 4; break; case(dn3d_off_u420): offset = vframe->v_width * vframe->v_height * 4 / 4; break; case(dn3d_off_v420): offset = vframe->v_width * vframe->v_height * 5 / 4; break; case(dn3d_off_y422): offset = vframe->v_width * vframe->v_height * 0 / 2; break; case(dn3d_off_u422): offset = vframe->v_width * vframe->v_height * 2 / 2; break; case(dn3d_off_v422): offset = vframe->v_width * vframe->v_height * 3 / 2; break; } deNoise(vframe->video_buf, // frame pd->previous, // previous (saved) frame pd->lineant, // line buffer vframe->v_width / lp->scale_x, // width (pixels) vframe->v_height / lp->scale_y, // height (pixels) // debug pd->coefficients[coef[0]], // horizontal (spatial) strength pd->coefficients[coef[0]], // vertical (spatial) strength pd->coefficients[coef[1]], // temporal strength offset, // offset in bytes of first relevant pixel in frame lp->skip // skip this amount of bytes between two pixels ); } } } if(tag & TC_FILTER_CLOSE) { if(pd->previous) { free(pd->previous); pd->previous = 0; } if(pd->lineant) { free(pd->lineant); pd->lineant = 0; } } return(0); }
int tc_filter(frame_list_t *ptr_, char *options) { vframe_list_t *ptr = (vframe_list_t *)ptr_; vob_t *vob = NULL; int instance = ptr->filter_id; MyFilterData *mfd = mfd_all[instance]; if (mfd != NULL) { vob = mfd->vob; } //---------------------------------- // // filter init // //---------------------------------- if (ptr->tag & TC_FILTER_GET_CONFIG) { optstr_filter_desc(options, MOD_NAME, MOD_CAP, MOD_VERSION, MOD_AUTHOR, "VRYO", "1"); // buf, name, comment, format, val, from, to optstr_param(options, "file", "Image filename", "%s", "logo.png"); optstr_param(options, "posdef", "Position (0=None, 1=TopL, 2=TopR, 3=BotL, 4=BotR, 5=Center)", "%d", "0", "0", "5"); optstr_param(options, "pos", "Position (0-width x 0-height)", "%dx%d", "0x0", "0", "width", "0", "height"); optstr_param(options, "range", "Restrict rendering to framerange", "%u-%u", "0-0", "0", "oo", "0", "oo"); optstr_param(options, "fade", "Fade image in/out (# of frames)", "%u-%u", "0-0", "0", "oo", "0", "oo"); // bools optstr_param(options, "ignoredelay", "Ignore delay specified in animations", "", "0"); optstr_param(options, "rgbswap", "Swap red/blue colors", "", "0"); optstr_param(options, "grayout", "YUV only: don't write Cb and Cr, makes a nice effect", "", "0"); optstr_param(options, "hqconv", "YUV only: do high quality rgb->yuv img conversion", "", "0"); optstr_param(options, "flip", "Mirror image", "", "0"); return 0; } if (ptr->tag & TC_FILTER_INIT) { Image *timg; Image *nimg; ImageInfo *image_info; ExceptionInfo exception_info; int rgb_off = 0; vob_t *tmpvob; tmpvob = tc_get_vob(); if (tmpvob == NULL) return -1; mfd_all[instance] = tc_zalloc(sizeof(MyFilterData)); if (mfd_all[instance] == NULL) return -1; mfd = mfd_all[instance]; strlcpy(mfd->file, "logo.png", PATH_MAX); mfd->end = (unsigned int)-1; mfd->vob = tmpvob; vob = mfd->vob; if (options != NULL) { if (verbose) tc_log_info(MOD_NAME, "options=%s", options); optstr_get(options, "file", "%[^:]", mfd->file); optstr_get(options, "posdef", "%d", (int *)&mfd->pos); optstr_get(options, "pos", "%dx%d", &mfd->posx, &mfd->posy); optstr_get(options, "range", "%u-%u", &mfd->start, &mfd->end); optstr_get(options, "fade", "%u-%u", &mfd->fadein, &mfd->fadeout); if (optstr_lookup(options, "ignoredelay") != NULL) mfd->ignoredelay = !mfd->ignoredelay; if (optstr_lookup(options, "flip") != NULL) mfd->flip = !mfd->flip; if (optstr_lookup(options, "rgbswap") != NULL) mfd->rgbswap = !mfd->rgbswap; if (optstr_lookup(options, "grayout") != NULL) mfd->grayout = !mfd->grayout; if (optstr_lookup(options, "hqconv") != NULL) mfd->hqconv = !mfd->hqconv; if (optstr_lookup (options, "help") != NULL) flogo_help_optstr(); } if (verbose > 1) { tc_log_info(MOD_NAME, " Logo renderer Settings:"); tc_log_info(MOD_NAME, " file = %s", mfd->file); tc_log_info(MOD_NAME, " posdef = %d", mfd->pos); tc_log_info(MOD_NAME, " pos = %dx%d", mfd->posx, mfd->posy); tc_log_info(MOD_NAME, " range = %u-%u", mfd->start, mfd->end); tc_log_info(MOD_NAME, " fade = %u-%u", mfd->fadein, mfd->fadeout); tc_log_info(MOD_NAME, " flip = %d", mfd->flip); tc_log_info(MOD_NAME, " ignoredelay = %d", mfd->ignoredelay); tc_log_info(MOD_NAME, " rgbswap = %d", mfd->rgbswap); tc_log_info(MOD_NAME, " grayout = %d", mfd->grayout); tc_log_info(MOD_NAME, " hqconv = %d", mfd->hqconv); } /* Transcode serializes module execution, so this does not need a * semaphore. */ magick_usecount++; if (!IsMagickInstantiated()) { InitializeMagick(""); } GetExceptionInfo(&exception_info); image_info = CloneImageInfo((ImageInfo *) NULL); strlcpy(image_info->filename, mfd->file, MaxTextExtent); mfd->image = ReadImage(image_info, &exception_info); if (mfd->image == (Image *) NULL) { MagickWarning(exception_info.severity, exception_info.reason, exception_info.description); strlcpy(mfd->file, "/dev/null", PATH_MAX); return 0; } DestroyImageInfo(image_info); if (mfd->image->columns > vob->ex_v_width || mfd->image->rows > vob->ex_v_height ) { tc_log_error(MOD_NAME, "\"%s\" is too large", mfd->file); return -1; } if (vob->im_v_codec == TC_CODEC_YUV420P) { if ((mfd->image->columns & 1) || (mfd->image->rows & 1)) { tc_log_error(MOD_NAME, "\"%s\" has odd sizes", mfd->file); return -1; } } mfd->images = (Image *)GetFirstImageInList(mfd->image); nimg = NewImageList(); while (mfd->images != (Image *)NULL) { if (mfd->flip || flip) { timg = FlipImage(mfd->images, &exception_info); if (timg == (Image *) NULL) { MagickError(exception_info.severity, exception_info.reason, exception_info.description); return -1; } AppendImageToList(&nimg, timg); } mfd->images = GetNextImageInList(mfd->images); mfd->nr_of_images++; } // check for memleaks; //DestroyImageList(image); if (mfd->flip || flip) { mfd->image = nimg; } /* initial delay. real delay = 1/100 sec * delay */ mfd->cur_delay = mfd->image->delay*vob->fps/100; if (verbose & TC_DEBUG) tc_log_info(MOD_NAME, "Nr: %d Delay: %d mfd->image->del %lu|", mfd->nr_of_images, mfd->cur_delay, mfd->image->delay); if (vob->im_v_codec == TC_CODEC_YUV420P) { /* convert Magick RGB image format to YUV */ /* todo: convert the magick image if it's not rgb! (e.g. cmyk) */ Image *image; uint8_t *yuv_hqbuf = NULL; /* Round up for odd-size images */ unsigned long width = mfd->image->columns; unsigned long height = mfd->image->rows; int do_rgbswap = (rgbswap || mfd->rgbswap); int i; /* Allocate buffers for the YUV420P frames. mfd->nr_of_images * will be 1 unless this is an animated GIF or MNG. * This buffer needs to be large enough to store a temporary * 24-bit RGB image (extracted from the ImageMagick handle). */ mfd->yuv = flogo_yuvbuf_alloc(width*height * 3, mfd->nr_of_images); if (mfd->yuv == NULL) { tc_log_error(MOD_NAME, "(%d) out of memory\n", __LINE__); return -1; } if (mfd->hqconv) { /* One temporary buffer, to hold full Y, U, and V planes. */ yuv_hqbuf = tc_malloc(width*height * 3); if (yuv_hqbuf == NULL) { tc_log_error(MOD_NAME, "(%d) out of memory\n", __LINE__); return -1; } } mfd->tcvhandle = tcv_init(); if (mfd->tcvhandle == NULL) { tc_log_error(MOD_NAME, "image conversion init failed"); return -1; } image = GetFirstImageInList(mfd->image); for (i = 0; i < mfd->nr_of_images; i++) { if (!mfd->hqconv) { flogo_convert_image(mfd->tcvhandle, image, mfd->yuv[i], IMG_YUV420P, do_rgbswap); } else { flogo_convert_image(mfd->tcvhandle, image, yuv_hqbuf, IMG_YUV444P, do_rgbswap); // Copy over Y data from the 444 image ac_memcpy(mfd->yuv[i], yuv_hqbuf, width * height); // Resize U plane by 1/2 in each dimension, into the // mfd YUV buffer tcv_zoom(mfd->tcvhandle, yuv_hqbuf + (width * height), mfd->yuv[i] + (width * height), width, height, 1, width / 2, height / 2, TCV_ZOOM_LANCZOS3 ); // Do the same with the V plane tcv_zoom(mfd->tcvhandle, yuv_hqbuf + 2*width*height, mfd->yuv[i] + width*height + (width/2)*(height/2), width, height, 1, width / 2, height / 2, TCV_ZOOM_LANCZOS3 ); } image = GetNextImageInList(image); } if (mfd->hqconv) tc_free(yuv_hqbuf); tcv_free(mfd->tcvhandle); } else { /* for RGB format is origin bottom left */ /* for RGB, rgbswap is done in the frame routine */ rgb_off = vob->ex_v_height - mfd->image->rows; mfd->posy = rgb_off - mfd->posy; } switch (mfd->pos) { case NONE: /* 0 */ break; case TOP_LEFT: mfd->posx = 0; mfd->posy = rgb_off; break; case TOP_RIGHT: mfd->posx = vob->ex_v_width - mfd->image->columns; break; case BOT_LEFT: mfd->posy = vob->ex_v_height - mfd->image->rows - rgb_off; break; case BOT_RIGHT: mfd->posx = vob->ex_v_width - mfd->image->columns; mfd->posy = vob->ex_v_height - mfd->image->rows - rgb_off; break; case CENTER: mfd->posx = (vob->ex_v_width - mfd->image->columns)/2; mfd->posy = (vob->ex_v_height- mfd->image->rows)/2; /* align to not cause color disruption */ if (mfd->posx & 1) mfd->posx++; if (mfd->posy & 1) mfd->posy++; break; } if (mfd->posy < 0 || mfd->posx < 0 || (mfd->posx + mfd->image->columns) > vob->ex_v_width || (mfd->posy + mfd->image->rows) > vob->ex_v_height) { tc_log_error(MOD_NAME, "invalid position"); return -1; } /* for running through image sequence */ mfd->images = mfd->image; /* Set up image/video coefficient lookup tables */ if (img_coeff_lookup[0] < 0) { int i; float maxrgbval = (float)MaxRGB; // from ImageMagick for (i = 0; i <= MAX_UINT8_VAL; i++) { float x = (float)ScaleCharToQuantum(i); /* Alternatively: * img_coeff = (maxrgbval - x) / maxrgbval; * vid_coeff = x / maxrgbval; */ img_coeff_lookup[i] = 1.0 - (x / maxrgbval); vid_coeff_lookup[i] = 1.0 - img_coeff_lookup[i]; } } // filter init ok. if (verbose) tc_log_info(MOD_NAME, "%s %s", MOD_VERSION, MOD_CAP); return 0; } //---------------------------------- // // filter close // //---------------------------------- if (ptr->tag & TC_FILTER_CLOSE) { if (mfd) { flogo_yuvbuf_free(mfd->yuv, mfd->nr_of_images); mfd->yuv = NULL; if (mfd->image) { DestroyImage(mfd->image); } tc_free(mfd); mfd = NULL; mfd_all[instance] = NULL; } magick_usecount--; if (magick_usecount == 0 && IsMagickInstantiated()) { DestroyMagick(); } return 0; } /* filter close */ //---------------------------------- // // filter frame routine // //---------------------------------- // tag variable indicates, if we are called before // transcodes internal video/audo frame processing routines // or after and determines video/audio context if ((ptr->tag & TC_POST_M_PROCESS) && (ptr->tag & TC_VIDEO) && !(ptr->attributes & TC_FRAME_IS_SKIPPED) ) { PixelPacket *pixel_packet; uint8_t *video_buf; int do_fade = 0; float fade_coeff = 0.0; float img_coeff, vid_coeff; /* Note: ImageMagick defines opacity = 0 as fully visible, and * opacity = MaxRGB as fully transparent. */ Quantum opacity; int row, col; if (ptr->id < mfd->start || ptr->id > mfd->end) return 0; if (strcmp(mfd->file, "/dev/null") == 0) return 0; if (ptr->id - mfd->start < mfd->fadein) { // fading-in fade_coeff = (float)(mfd->start - ptr->id + mfd->fadein) / (float)(mfd->fadein); do_fade = 1; } else if (mfd->end - ptr->id < mfd->fadeout) { // fading-out fade_coeff = (float)(ptr->id - mfd->end + mfd->fadeout) / (float)(mfd->fadeout); do_fade = 1; } mfd->cur_delay--; if (mfd->cur_delay < 0 || mfd->ignoredelay) { int seq; mfd->cur_seq = (mfd->cur_seq + 1) % mfd->nr_of_images; mfd->images = mfd->image; for (seq=0; seq<mfd->cur_seq; seq++) mfd->images = mfd->images->next; mfd->cur_delay = mfd->images->delay * vob->fps/100; } pixel_packet = GetImagePixels(mfd->images, 0, 0, mfd->images->columns, mfd->images->rows); if (vob->im_v_codec == TC_CODEC_RGB24) { unsigned long r_off, g_off, b_off; if (!(rgbswap || mfd->rgbswap)) { r_off = 0; b_off = 2; } else { r_off = 2; b_off = 0; } g_off = 1; for (row = 0; row < mfd->image->rows; row++) { video_buf = ptr->video_buf + 3 * ((row + mfd->posy) * vob->ex_v_width + mfd->posx); for (col = 0; col < mfd->image->columns; col++) { opacity = pixel_packet->opacity; if (do_fade) opacity += (Quantum)((MaxRGB - opacity) * fade_coeff); if (opacity == 0) { *(video_buf + r_off) = ScaleQuantumToChar(pixel_packet->red); *(video_buf + g_off) = ScaleQuantumToChar(pixel_packet->green); *(video_buf + b_off) = ScaleQuantumToChar(pixel_packet->blue); } else if (opacity < MaxRGB) { unsigned char opacity_uchar = ScaleQuantumToChar(opacity); img_coeff = img_coeff_lookup[opacity_uchar]; vid_coeff = vid_coeff_lookup[opacity_uchar]; *(video_buf + r_off) = (uint8_t)((*(video_buf + r_off)) * vid_coeff) + (uint8_t)(ScaleQuantumToChar(pixel_packet->red) * img_coeff); *(video_buf + g_off) = (uint8_t)((*(video_buf + g_off)) * vid_coeff) + (uint8_t)(ScaleQuantumToChar(pixel_packet->green) * img_coeff); *(video_buf + b_off) = (uint8_t)((*(video_buf + b_off)) * vid_coeff) + (uint8_t)(ScaleQuantumToChar(pixel_packet->blue) * img_coeff); } video_buf += 3; pixel_packet++; } } } else { /* !RGB */ unsigned long vid_size = vob->ex_v_width * vob->ex_v_height; unsigned long img_size = mfd->images->columns * mfd->images->rows; uint8_t *img_pixel_Y, *img_pixel_U, *img_pixel_V; uint8_t *vid_pixel_Y, *vid_pixel_U, *vid_pixel_V; img_pixel_Y = mfd->yuv[mfd->cur_seq]; img_pixel_U = img_pixel_Y + img_size; img_pixel_V = img_pixel_U + img_size/4; for (row = 0; row < mfd->images->rows; row++) { vid_pixel_Y = ptr->video_buf + (row + mfd->posy)*mfd->vob->ex_v_width + mfd->posx; vid_pixel_U = ptr->video_buf + vid_size + (row/2 + mfd->posy/2)*(mfd->vob->ex_v_width/2) + mfd->posx/2; vid_pixel_V = vid_pixel_U + vid_size/4; for (col = 0; col < mfd->images->columns; col++) { int do_UV_pixels = (mfd->grayout == 0 && !(row % 2) && !(col % 2)) ? 1 : 0; opacity = pixel_packet->opacity; if (do_fade) opacity += (Quantum)((MaxRGB - opacity) * fade_coeff); if (opacity == 0) { *vid_pixel_Y = *img_pixel_Y; if (do_UV_pixels) { *vid_pixel_U = *img_pixel_U; *vid_pixel_V = *img_pixel_V; } } else if (opacity < MaxRGB) { unsigned char opacity_uchar = ScaleQuantumToChar(opacity); img_coeff = img_coeff_lookup[opacity_uchar]; vid_coeff = vid_coeff_lookup[opacity_uchar]; *vid_pixel_Y = (uint8_t)(*vid_pixel_Y * vid_coeff) + (uint8_t)(*img_pixel_Y * img_coeff); if (do_UV_pixels) { *vid_pixel_U = (uint8_t)(*vid_pixel_U * vid_coeff) + (uint8_t)(*img_pixel_U * img_coeff); *vid_pixel_V = (uint8_t)(*vid_pixel_V * vid_coeff) + (uint8_t)(*img_pixel_V * img_coeff); } } vid_pixel_Y++; img_pixel_Y++; if (do_UV_pixels) { vid_pixel_U++; img_pixel_U++; vid_pixel_V++; img_pixel_V++; } pixel_packet++; } } } } return 0; }
int f_parse_tree(xmlNodePtr p_node,audiovideo_t *p_audiovideo) { static audiovideo_t *p_temp; audiovideo_limit_t s_limit; static int s_type,s_param; int s_rc; static int s_video_codec=TC_CODEC_UNKNOWN,s_audio_codec=TC_CODEC_UNKNOWN; s_rc=0; if (p_node != NULL) { if (xmlStrcmp(p_node->name, (const xmlChar*)"smil") == 0) { if(f_parse_tree(p_node->xmlChildrenNode,p_audiovideo)) s_rc=1; } else if (xmlStrcmp(p_node->name, (const xmlChar*)"seq") == 0) { s_type=AUDIO_VIDEO_UNKNOWN; p_temp=tc_zalloc(sizeof(audiovideo_t)); p_temp->s_end_a_time=-1; p_temp->s_end_v_time=-1; p_temp->s_start_a_time=-1; p_temp->s_start_v_time=-1; p_temp->s_end_audio=-1; p_temp->s_end_video=-1; p_temp->s_start_audio=-1; p_temp->s_start_video=-1; p_temp->s_video_smpte=npt; //force npt p_temp->s_audio_smpte=npt; //force npt p_temp->s_a_real_codec=TC_CODEC_UNKNOWN; p_temp->s_v_real_codec=TC_CODEC_UNKNOWN; p_temp->s_a_codec=TC_CODEC_UNKNOWN; p_temp->s_v_codec=TC_CODEC_UNKNOWN; p_temp->s_a_magic=TC_MAGIC_UNKNOWN; p_temp->s_v_magic=TC_MAGIC_UNKNOWN; p_temp->s_a_rate=0; p_temp->s_a_bits=0; p_temp->s_a_chan=0; p_temp->s_v_width=0; p_temp->s_v_height=0; p_temp->s_v_tg_width=0; p_temp->s_v_tg_height=0; p_temp->s_v_tg_width=0; //target width p_temp->s_v_tg_height=0; //target height if(p_audiovideo == NULL) p_audiovideo=p_temp; else p_audiovideo->p_next=p_temp; if(f_parse_tree(p_node->xmlChildrenNode,p_temp)) //visit the branch s_rc=1; if(f_parse_tree(p_node->next,p_temp)) //eventually go to the next seq item s_rc=1; } else if (xmlStrcmp(p_node->name, (const xmlChar*)"video") == 0) { if (s_type!=AUDIO_VIDEO_UNKNOWN) { p_temp=tc_zalloc(sizeof(audiovideo_t)); p_temp->s_end_a_time=-1; p_temp->s_end_v_time=-1; p_temp->s_start_a_time=-1; p_temp->s_start_v_time=-1; p_temp->s_end_audio=-1; p_temp->s_end_video=-1; p_temp->s_start_audio=-1; p_temp->s_start_video=-1; p_temp->s_video_smpte=npt; //force npt p_temp->s_audio_smpte=npt; //force npt p_temp->s_a_codec=TC_CODEC_UNKNOWN; p_temp->s_v_codec=TC_CODEC_UNKNOWN; p_temp->s_a_real_codec=TC_CODEC_UNKNOWN; p_temp->s_v_real_codec=TC_CODEC_UNKNOWN; p_temp->s_a_magic=TC_MAGIC_UNKNOWN; p_temp->s_v_magic=TC_MAGIC_UNKNOWN; p_temp->s_a_rate=0; p_temp->s_a_bits=0; p_temp->s_a_chan=0; p_temp->s_v_width=0; p_temp->s_v_height=0; p_temp->s_v_tg_width=0; //target width p_temp->s_v_tg_height=0; //target height if(p_audiovideo != NULL) p_audiovideo->p_next=p_temp; p_audiovideo=p_temp; } s_type=VIDEO_ITEM; //set origin to video if(f_parse_tree((xmlNodePtr)p_node->properties,p_audiovideo)) //visit the properties s_rc=1; if(f_parse_tree(p_node->xmlChildrenNode,p_audiovideo)) //visit the branch s_rc=1; if(f_parse_tree(p_node->next,p_audiovideo)) //eventually go to the next audio/video item s_rc=1; } else if (xmlStrcmp(p_node->name, (const xmlChar*)"audio") == 0) { if (s_type!=AUDIO_VIDEO_UNKNOWN) { p_temp=tc_zalloc(sizeof(audiovideo_t)); p_temp->s_end_a_time=-1; p_temp->s_end_v_time=-1; p_temp->s_start_a_time=-1; p_temp->s_start_v_time=-1; p_temp->s_end_audio=-1; p_temp->s_end_video=-1; p_temp->s_start_audio=-1; p_temp->s_start_video=-1; p_temp->s_video_smpte=npt; //force npt p_temp->s_audio_smpte=npt; //force npt p_temp->s_a_codec=TC_CODEC_UNKNOWN; p_temp->s_v_codec=TC_CODEC_UNKNOWN; p_temp->s_a_real_codec=TC_CODEC_UNKNOWN; p_temp->s_v_real_codec=TC_CODEC_UNKNOWN; p_temp->s_a_magic=TC_MAGIC_UNKNOWN; p_temp->s_v_magic=TC_MAGIC_UNKNOWN; p_temp->s_a_rate=0; p_temp->s_a_bits=0; p_temp->s_a_chan=0; p_temp->s_v_width=0; p_temp->s_v_height=0; p_temp->s_v_tg_width=0; //target width p_temp->s_v_tg_height=0; //target height if(p_audiovideo != NULL) p_audiovideo->p_next=p_temp; p_audiovideo=p_temp; } s_type=AUDIO_ITEM; //set origin to audio if(f_parse_tree((xmlNodePtr)p_node->properties,p_audiovideo)) //visit the properties s_rc=1; if(f_parse_tree(p_node->xmlChildrenNode,p_audiovideo)) //visit the branch s_rc=1; if(f_parse_tree(p_node->next,p_audiovideo)) //eventually go to the next audio/video item s_rc=1; } else if (xmlStrcmp(p_node->name, (const xmlChar*)"param") == 0) { if(f_parse_tree((xmlNodePtr)p_node->properties,p_audiovideo)) //visit the properties s_rc=1; if(f_parse_tree(p_node->xmlChildrenNode,p_audiovideo)) //visit the branch s_rc=1; if(f_parse_tree(p_node->next,p_audiovideo)) //eventually go to the next audio/video item s_rc=1; } else if (xmlStrcmp(p_node->name, (const xmlChar*)"src") == 0) { if (s_type==AUDIO_ITEM) { p_audiovideo->p_nome_audio=p_node->xmlChildrenNode->content; //set the audio file name } else { p_audiovideo->p_nome_video=p_node->xmlChildrenNode->content; //set the video file name } if(f_parse_tree(p_node->next,p_audiovideo)) //goto to begin and end of clip s_rc=1; } else if (xmlStrcmp(p_node->name, (const xmlChar*)"clipBegin") == 0) { s_limit=f_det_time((char *)p_node->xmlChildrenNode->content); if (s_type==AUDIO_ITEM) { p_audiovideo->s_audio_smpte=s_limit.s_smpte; p_audiovideo->s_start_a_time=s_limit.s_time; p_audiovideo->s_start_audio=s_limit.s_frame; } else { p_audiovideo->s_video_smpte=s_limit.s_smpte; p_audiovideo->s_start_v_time=s_limit.s_time; p_audiovideo->s_start_video=s_limit.s_frame; } if(f_parse_tree(p_node->next,p_audiovideo)) //goto the next param. s_rc=1; } else if (xmlStrcmp(p_node->name, (const xmlChar*)"clipEnd") == 0) { s_limit=f_det_time((char *)p_node->xmlChildrenNode->content); if (s_type==AUDIO_ITEM) { p_audiovideo->s_audio_smpte=s_limit.s_smpte; p_audiovideo->s_end_a_time=s_limit.s_time; p_audiovideo->s_end_audio=s_limit.s_frame+1; } else { p_audiovideo->s_video_smpte=s_limit.s_smpte; p_audiovideo->s_end_v_time=s_limit.s_time; p_audiovideo->s_end_video=s_limit.s_frame+1; } if(f_parse_tree(p_node->next,p_audiovideo)) //goto the next param. s_rc=1; } else if (xmlStrcmp(p_node->name, (const xmlChar*)"name") == 0) { if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"in-video-module") == 0) s_param=IN_VIDEO_MAGIC; else if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"in-audio-module") == 0) s_param=IN_AUDIO_MAGIC; else if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"in-video-codec") == 0) s_param=IN_VIDEO_CODEC; else if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"in-audio-codec") == 0) s_param=IN_AUDIO_CODEC; else if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"target-height") == 0) s_param=OUT_VIDEO_HEIGHT; else if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"target-width") == 0) s_param=OUT_VIDEO_WIDTH; else if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"resize-filter") == 0) s_param=OUT_VIDEO_RES_FILTER; else s_param=UNSUPPORTED_PARAM; if(f_parse_tree(p_node->next,p_audiovideo)) //goto the next param. s_rc=1; } else if (xmlStrcmp(p_node->name, (const xmlChar*)"value") == 0) { if ((s_type==AUDIO_ITEM) && ((s_param==IN_VIDEO_CODEC)||(s_param==IN_VIDEO_MAGIC)||(s_param==OUT_VIDEO_HEIGHT)||(s_param==OUT_VIDEO_WIDTH))) { if (s_param==OUT_VIDEO_HEIGHT) tc_log_warn(__FILE__,"The target-height parameter cannot be used in audio item, %s skipped.",(char *)p_node->xmlChildrenNode->content); else if (s_param==OUT_VIDEO_WIDTH) tc_log_warn(__FILE__,"The target-width parameter cannot be used in audio item, %s skipped.",(char *)p_node->xmlChildrenNode->content); else if (s_param==IN_VIDEO_MAGIC) tc_log_warn(__FILE__,"The in-video-module parameter cannot be used in audio item, %s skipped.",(char *)p_node->xmlChildrenNode->content); else if (s_param==IN_VIDEO_CODEC) tc_log_warn(__FILE__,"The in-video-codec parameter cannot be used in audio item, %s skipped.",(char *)p_node->xmlChildrenNode->content); s_rc=1; } else { switch(s_param) { case OUT_VIDEO_RES_FILTER: p_audiovideo->p_v_resize_filter=p_node->xmlChildrenNode->content; break; case OUT_VIDEO_HEIGHT: p_audiovideo->s_v_tg_height=atoi((char *)p_node->xmlChildrenNode->content); break; case OUT_VIDEO_WIDTH: p_audiovideo->s_v_tg_width=atoi((char *)p_node->xmlChildrenNode->content); break; case IN_VIDEO_MAGIC: if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"dv") == 0) p_audiovideo->s_v_magic=TC_MAGIC_DV_PAL; //the same for PAL and NTSC else if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"avi") == 0) p_audiovideo->s_v_magic=TC_MAGIC_AVI; else if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"mov") == 0) p_audiovideo->s_v_magic=TC_MAGIC_AVI; else { tc_log_warn(__FILE__,"The in-video-magic %s parameter isn't yet supported.",(char *)p_node->xmlChildrenNode->content); s_rc=1; } break; case IN_AUDIO_MAGIC: if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"dv") == 0) p_audiovideo->s_a_magic=TC_MAGIC_DV_PAL; //the same for PAL and NTSC else if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"avi") == 0) p_audiovideo->s_a_magic=TC_MAGIC_AVI; else if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"mov") == 0) p_audiovideo->s_a_magic=TC_MAGIC_AVI; else { tc_log_warn(__FILE__,"The in-audio-magic %s parameter isn't yet supported.",(char *)p_node->xmlChildrenNode->content); s_rc=1; } break; case IN_VIDEO_CODEC: if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"rgb") == 0) p_audiovideo->s_v_codec=TC_CODEC_RGB24; else if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"yuv2") == 0) p_audiovideo->s_v_codec=TC_CODEC_YUV420P; else if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"yuv420p") == 0) p_audiovideo->s_v_codec=TC_CODEC_YUV420P; else if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"yv12") == 0) p_audiovideo->s_v_codec=TC_CODEC_YUV420P; // What?!? else if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"yuy2") == 0) p_audiovideo->s_v_codec=TC_CODEC_YUY2; else if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"raw") == 0) p_audiovideo->s_v_codec=TC_CODEC_RAW; else { tc_log_warn(__FILE__,"The in-video-codec %s parameter isn't yet supported.",(char *)p_node->xmlChildrenNode->content); s_rc=1; } if (s_video_codec == TC_CODEC_UNKNOWN) s_video_codec=p_audiovideo->s_v_codec; else if (s_video_codec != p_audiovideo->s_v_codec) { tc_log_warn(__FILE__,"The XML file must contain the same video codec."); s_rc=1; } break; case IN_AUDIO_CODEC: if (xmlStrcmp((char *)p_node->xmlChildrenNode->content, (const xmlChar*)"pcm") == 0) p_audiovideo->s_a_codec=TC_CODEC_PCM; else { tc_log_warn(__FILE__,"The in-audio-codec %s parameter isn't yet supported.",(char *)p_node->xmlChildrenNode->content); s_rc=1; } if (s_audio_codec == TC_CODEC_UNKNOWN) s_audio_codec=p_audiovideo->s_a_codec; else if (s_audio_codec != p_audiovideo->s_a_codec) { tc_log_warn(__FILE__,"The XML file must contain the same audio codec."); s_rc=1; } break; case UNSUPPORTED_PARAM: tc_log_warn(__FILE__,"The %s parameter isn't yet supported.",(char *)p_node->xmlChildrenNode->content); s_rc=1; break; } } if(f_parse_tree(p_node->next,p_audiovideo)) //goto the next param. s_rc=1; } } return(s_rc); }
/* * transcode API */ int tc_filter(frame_list_t *ptr_, char *options) { vframe_list_t *ptr = (vframe_list_t *)ptr_; vob_t *vob = NULL; myfilter_t *myf = myf_global; /* * filter init */ if (ptr->tag & TC_FILTER_INIT) { if (! (vob = tc_get_vob ())) return -1; if (! (myf = myf_global = tc_zalloc (sizeof (myfilter_t)))) { return -1; } if (! (myf->tcvhandle = tcv_init())) { tc_log_error(MOD_NAME, "tcv_init() failed"); free(myf); myf = myf_global = NULL; return -1; } if (verbose) /* global verbose */ tc_log_info(MOD_NAME, "%s %s", MOD_VERSION, MOD_CAP); /* default values */ myf->interlaceDiff = 1.1; myf->unknownDiff = 1.5; myf->progressiveDiff = 8; myf->progressiveChange = 0.2; myf->changedIfMore = 10; myf->forceTelecineDetect = 0; myf->verbose = 0; myf->outDiff = 0; /* video parameters */ switch (vob->im_v_codec) { case TC_CODEC_YUY2: case TC_CODEC_YUV420P: case TC_CODEC_YUV422P: case TC_CODEC_RGB24: break; default: tc_log_error(MOD_NAME, "Unsupported codec - need one of RGB24 YUV420P YUY2 YUV422P"); return -1; } myf->codec = vob->im_v_codec; myf->width = vob->im_v_width; myf->height = vob->im_v_height; myf->fps = vob->fps; myf->size = myf->width * myf->height; if (options) { optstr_get (options, "interlacediff", "%lf", &myf->interlaceDiff); optstr_get (options, "unknowndiff", "%lf", &myf->unknownDiff); optstr_get (options, "progressivediff", "%lf", &myf->progressiveDiff); optstr_get (options, "progressivechange", "%lf", &myf->progressiveChange); optstr_get (options, "changedifmore", "%lf", &myf->changedIfMore); optstr_get (options, "forcetelecinedetect", "%d", &myf->forceTelecineDetect); optstr_get (options, "verbose", "%d", &myf->verbose); optstr_get (options, "outdiff", "%d", &myf->outDiff); if (optstr_lookup (options, "help") != NULL) { tc_log_info (MOD_NAME, "(%s) help\n" "* Overview:\n" " 'fieldanalysis' scans video for interlacing artifacts and\n" " detects progressive / interlaced / telecined video.\n" " It also determines the major field for interlaced video.\n" "* Verbose Output: [PtPb c t stsb]\n" " Pt, Pb: progressivediff succeeded, per field.\n" " pt, pb: unknowndiff succeeded, progressivediff failed.\n" " c: progressivechange succeeded.\n" " t: topFieldFirst / b: bottomFieldFirst detected.\n" " st, sb: changedifmore failed (fields are similar to last frame).\n" , MOD_CAP); } } /* frame memory */ if (! (myf->lumIn = calloc (1, myf->size)) || ! (myf->lumPrev = calloc (1, myf->size)) || ! (myf->lumInT = calloc (1, myf->size)) || ! (myf->lumInB = calloc (1, myf->size)) || ! (myf->lumPrevT = calloc (1, myf->size)) || ! (myf->lumPrevB = calloc (1, myf->size))) { tc_log_error(MOD_NAME, "calloc() failed"); return -1; } if (verbose) { /* global verbose */ tc_log_info(MOD_NAME, "interlacediff %.2f, unknowndiff %.2f, progressivediff %.2f", myf->interlaceDiff, myf->unknownDiff, myf->progressiveDiff); tc_log_info(MOD_NAME, "progressivechange %.2f, changedifmore %.2f", myf->progressiveChange, myf->changedIfMore); tc_log_info(MOD_NAME, "forcetelecinedetect %s, verbose %d, outdiff %d", myf->forceTelecineDetect ? "True":"False", myf->verbose, myf->outDiff); } return 0; } /* * filter close */ if (ptr->tag & TC_FILTER_CLOSE) { int total = myf->numFrames - myf->unknownFrames; int totalfields = myf->topFirstFrames + myf->bottomFirstFrames; /* Cleanup */ free (myf->lumIn); free (myf->lumPrev); free (myf->lumInT); free (myf->lumInB); free (myf->lumPrevT); free (myf->lumPrevB); myf->lumIn = myf->lumPrev = myf->lumInT = myf->lumInB = myf->lumPrevT = myf->lumPrevB = NULL; /* Output results */ if (totalfields < 1) totalfields = 1; tc_log_info(MOD_NAME, "RESULTS: Frames: %d (100%%) Unknown: %d (%.3g%%)", myf->numFrames, myf->unknownFrames, 100.0 * myf->unknownFrames / (double)myf->numFrames); tc_log_info(MOD_NAME, "RESULTS: Progressive: %d (%.3g%%) Interlaced: %d (%.3g%%)", myf->progressiveFrames, 100.0 * myf->progressiveFrames / (double)myf->numFrames, myf->interlacedFrames, 100.0 * myf->interlacedFrames / (double)myf->numFrames); tc_log_info(MOD_NAME, "RESULTS: FieldShift: %d (%.3g%%) Telecined: %d (%.3g%%)", myf->fieldShiftFrames, 100.0 * myf->fieldShiftFrames / (double)myf->numFrames, myf->telecineFrames, 100.0 * myf->telecineFrames / (double)myf->numFrames); tc_log_info(MOD_NAME, "RESULTS: MajorField: TopFirst %d (%.3g%%) BottomFirst %d (%.3g%%)", myf->topFirstFrames, 100.0 * myf->topFirstFrames / (double)totalfields, myf->bottomFirstFrames, 100.0 * myf->bottomFirstFrames / (double)totalfields); if (total < 50) tc_log_warn (MOD_NAME, "less than 50 frames analyzed correctly, no conclusion."); else if (myf->unknownFrames * 10 > myf->numFrames * 9) tc_log_warn (MOD_NAME, "less than 10%% frames analyzed correctly, no conclusion."); else if (myf->progressiveFrames * 8 > total * 7) tc_log_info (MOD_NAME, "CONCLUSION: progressive video."); else if (myf->topFirstFrames * 8 > myf->bottomFirstFrames && myf->bottomFirstFrames * 8 > myf->topFirstFrames) tc_log_info (MOD_NAME, "major field unsure, no conclusion. Use deinterlacer for processing."); else if (myf->telecineFrames * 4 > total * 3) tc_log_info (MOD_NAME, "CONCLUSION: telecined video, %s field first.", myf->topFirstFrames > myf->bottomFirstFrames ? "top" : "bottom"); else if (myf->fieldShiftFrames * 4 > total * 3) tc_log_info (MOD_NAME, "CONCLUSION: field shifted progressive video, %s field first.", myf->topFirstFrames > myf->bottomFirstFrames ? "top" : "bottom"); else if (myf->interlacedFrames > myf->fieldShiftFrames && (myf->interlacedFrames+myf->fieldShiftFrames) * 8 > total * 7) tc_log_info (MOD_NAME, "CONCLUSION: interlaced video, %s field first.", myf->topFirstFrames > myf->bottomFirstFrames ? "top" : "bottom"); else tc_log_info (MOD_NAME, "mixed video, no conclusion. Use deinterlacer for processing."); tcv_free(myf->tcvhandle); myf->tcvhandle = 0; return 0; } /* * filter description */ if (ptr->tag & TC_FILTER_GET_CONFIG) { char buf[255]; optstr_filter_desc (options, MOD_NAME, MOD_CAP, MOD_VERSION, MOD_AUTHOR, "VRY4E", "2"); tc_snprintf (buf, sizeof(buf), "%g", myf->interlaceDiff); optstr_param (options, "interlacediff", "Minimum temporal inter-field difference for detecting interlaced video", "%f", buf, "1.0", "inf"); tc_snprintf (buf, sizeof(buf), "%g", myf->unknownDiff); optstr_param (options, "unknowndiff", "Maximum inter-frame change vs. detail differences for neglecting interlaced video", "%f", buf, "1.0", "inf"); tc_snprintf (buf, sizeof(buf), "%g", myf->progressiveDiff); optstr_param (options, "progressivediff", "Minimum inter-frame change vs. detail differences for detecting progressive video" ,"%f", buf, "unknowndiff", "inf"); tc_snprintf (buf, sizeof(buf), "%g", myf->progressiveChange); optstr_param (options, "progressivechange", "Minimum temporal change needed for detecting progressive video" ,"%f", buf, "0", "inf"); tc_snprintf (buf, sizeof(buf), "%g", myf->changedIfMore); optstr_param (options, "changedifmore", "Minimum temporal change for detecting truly changed frames" ,"%f", buf, "0", "65025"); tc_snprintf (buf, sizeof(buf), "%d", myf->forceTelecineDetect); optstr_param (options, "forcetelecinedetect", "Detect telecine even on non-NTSC (29.97fps) video", "%d", buf, "0", "1"); tc_snprintf (buf, sizeof(buf), "%d", myf->verbose); optstr_param (options, "verbose", "Output analysis for every frame", "%d", buf, "0", "2"); tc_snprintf (buf, sizeof(buf), "%d", myf->outDiff); optstr_param (options, "outdiff", "Output internal debug frames as luminance of YUV video (see source)", "%d", buf, "0", "11"); } /* * filter frame routine */ /* need to process frames in-order */ if ((ptr->tag & TC_PRE_S_PROCESS) && (ptr->tag & TC_VIDEO)) { uint8_t *tmp; int i, j; assert (ptr->free == 0 || ptr->free == 1); assert (ptr->video_buf_Y[!ptr->free] == ptr->video_buf); /* Convert / Copy to luminance only */ switch (myf->codec) { case TC_CODEC_RGB24: tcv_convert(myf->tcvhandle, ptr->video_buf, myf->lumIn, myf->width, myf->height, IMG_RGB_DEFAULT, IMG_Y8); break; case TC_CODEC_YUY2: tcv_convert(myf->tcvhandle, ptr->video_buf, myf->lumIn, myf->width, myf->height, IMG_YUY2, IMG_Y8); break; case TC_CODEC_YUV420P: tcv_convert(myf->tcvhandle, ptr->video_buf, myf->lumIn, myf->width, myf->height, IMG_YUV_DEFAULT, IMG_Y8); break; case TC_CODEC_YUV422P: tcv_convert(myf->tcvhandle, ptr->video_buf, myf->lumIn, myf->width, myf->height, IMG_YUV422P, IMG_Y8); break; default: assert (0); } /* Bob Top field */ bob_field (myf->lumIn, myf->lumInT, myf->width, myf->height/2-1); /* Bob Bottom field */ ac_memcpy (myf->lumInB, myf->lumIn + myf->width, myf->width); bob_field (myf->lumIn + myf->width, myf->lumInB + myf->width, myf->width, myf->height/2-1); /* last copied line is ignored, buffer is large enough */ if (myf->numFrames == 0) myf->numFrames++; else if (! (ptr->tag & TC_FRAME_IS_SKIPPED)) { /* check_it */ check_interlace (myf, ptr->id); } /* only works with YUV data correctly */ switch (myf->outDiff) { case 1: /* lumIn */ ac_memcpy (ptr->video_buf, myf->lumIn, myf->size); break; case 2: /* field shift */ for (i = 0 ; i < myf->height-2; i += 2) for (j = 0; j < myf->width; j++) { ptr->video_buf [myf->width*i+j] = myf->lumIn [myf->width*i+j]; ptr->video_buf [myf->width*(i+1)+j] = myf->lumPrev [myf->width*(i+1)+j]; } break; case 3: /* lumInT */ ac_memcpy (ptr->video_buf, myf->lumInT, myf->size); break; case 4: /* lumInB */ ac_memcpy (ptr->video_buf, myf->lumInB, myf->size); break; case 5: /* lumPrevT */ ac_memcpy (ptr->video_buf, myf->lumPrevT, myf->size); break; case 6: /* lumPrevB */ ac_memcpy (ptr->video_buf, myf->lumPrevB, myf->size); break; case 7: /* pixDiff */ pic_diff (myf->lumInT, myf->lumInB, ptr->video_buf, myf->size,4); break; case 8: /* pixShiftChangedT */ pic_diff (myf->lumInT, myf->lumPrevB, ptr->video_buf, myf->size,4); break; case 9: /* pixShiftChangedB */ pic_diff (myf->lumInB, myf->lumPrevT, ptr->video_buf, myf->size,4); break; case 10: /* pixLastT */ pic_diff (myf->lumInT, myf->lumPrevT, ptr->video_buf, myf->size,4); break; case 11: /* pixLastB */ pic_diff (myf->lumInB, myf->lumPrevB, ptr->video_buf, myf->size,4); break; } /* The current frame gets the next previous frame :-P */ tmp = myf->lumPrev; myf->lumPrev = myf->lumIn; myf->lumIn = tmp; tmp = myf->lumPrevT; myf->lumPrevT = myf->lumInT; myf->lumInT = tmp; tmp = myf->lumPrevB; myf->lumPrevB = myf->lumInB; myf->lumInB = tmp; } return 0; }
int tc_filter(frame_list_t *ptr_, char *options) { vframe_list_t *ptr = (vframe_list_t *)ptr_; vob_t *vob = NULL; tomsmocomp_t *tmc = tmc_global; //---------------------------------- // filter init //---------------------------------- if (ptr->tag & TC_FILTER_INIT) { if (! (vob = tc_get_vob ())) return -1; if (! (tmc = tmc_global = tc_zalloc (sizeof (tomsmocomp_t)))) { return -1; } if (! (tmc->tcvhandle = tcv_init())) { tc_log_error(MOD_NAME, "tcv_init() failed"); return -1; } if (verbose) tc_log_info(MOD_NAME, "%s %s", MOD_VERSION, MOD_CAP); /* default values */ tmc->SearchEffort = 11; tmc->UseStrangeBob = 0; tmc->TopFirst = 1; /* video parameters */ switch (vob->im_v_codec) { case CODEC_YUY2: case CODEC_YUV: case CODEC_YUV422: break; default: tc_log_error (MOD_NAME, "only working with YUV (4:2:2 and 4:2:0) and YUY2 frame data..."); return -1; } tmc->codec = vob->im_v_codec; tmc->width = vob->im_v_width; tmc->height = vob->im_v_height; tmc->size = vob->im_v_width * vob->im_v_height * 2; tmc->cpuflags = tc_accel; tmc->rowsize = vob->im_v_width * 2; if (options) { optstr_get (options, "topfirst", "%d", &tmc->TopFirst); optstr_get (options, "searcheffort", "%d", &tmc->SearchEffort); optstr_get (options, "usestrangebob", "%d", &tmc->UseStrangeBob); optstr_get (options, "cpuflags", "%x", &tmc->cpuflags); if (optstr_lookup(options, "help")) { help_optstr (); } } /* frame memory */ if (! (tmc->framePrev = calloc (1, tmc->size)) || ! (tmc->frameIn = calloc (1, tmc->size)) || ! (tmc->frameOut = calloc (1, tmc->size))) { tc_log_msg(MOD_NAME, "calloc() failed"); return -1; } tmc->DSinfo.Overlay = tmc->frameOut; tmc->DSinfo.OverlayPitch = tmc->rowsize; tmc->DSinfo.LineLength = tmc->rowsize; tmc->DSinfo.FrameWidth = tmc->width; tmc->DSinfo.FrameHeight = tmc->height; tmc->DSinfo.FieldHeight = tmc->height / 2; tmc->DSinfo.InputPitch = 2* tmc->rowsize; tmc->DSinfo.pMemcpy = ac_memcpy; if (verbose) { tc_log_info(MOD_NAME, "topfirst %s, searcheffort %d, usestrangebob %s", tmc->TopFirst ? "True":"False", tmc->SearchEffort, tmc->UseStrangeBob ? "True":"False"); tc_log_info(MOD_NAME, "cpuflags%s%s%s%s", tmc->cpuflags & AC_SSE ? " SSE":"", tmc->cpuflags & AC_3DNOW ? " 3DNOW":"", tmc->cpuflags & AC_MMX ? " MMX":"", !(tmc->cpuflags & (AC_SSE|AC_3DNOW|AC_MMX)) ? " None":""); } return 0; } //---------------------------------- // filter close //---------------------------------- if (ptr->tag & TC_FILTER_CLOSE) { free (tmc->framePrev); free (tmc->frameIn); free (tmc->frameOut); tmc->framePrev = tmc->frameIn = tmc->frameOut = NULL; tcv_free(tmc->tcvhandle); tmc->tcvhandle = 0; return 0; } //---------------------------------- // filter description //---------------------------------- if (ptr->tag & TC_FILTER_GET_CONFIG) { char buf[255]; optstr_filter_desc (options, MOD_NAME, MOD_CAP, MOD_VERSION, MOD_AUTHOR, "VY4E", "1"); tc_snprintf (buf, sizeof(buf), "%d", tmc->TopFirst); optstr_param (options, "topfirst", "Assume the top field should be displayed first" ,"%d", buf, "0", "1"); tc_snprintf (buf, sizeof(buf), "%d", tmc->SearchEffort); optstr_param (options, "searcheffort", "CPU time used to find moved pixels" ,"%d", buf, "0", "30"); tc_snprintf (buf, sizeof(buf), "%d", tmc->UseStrangeBob); optstr_param (options, "usestrangebob", "?Unknown?" ,"%d", buf, "0", "1"); tc_snprintf (buf, sizeof(buf), "%02x", tmc->cpuflags); optstr_param (options, "cpuflags", "Manual specification of CPU capabilities" ,"%x", buf, "00", "ff"); } //---------------------------------- // filter frame routine //---------------------------------- // need to process frames in-order if ((ptr->tag & TC_PRE_S_PROCESS) && (ptr->tag & TC_VIDEO)) { uint8_t *tmp; uint8_t *planes[3]; YUV_INIT_PLANES(planes, ptr->video_buf, IMG_YUV_DEFAULT, tmc->width, tmc->height); /* Convert / Copy to yuy2 */ switch (tmc->codec) { case CODEC_YUY2: ac_memcpy (tmc->frameIn, ptr->video_buf, tmc->size); break; case CODEC_YUV: tcv_convert(tmc->tcvhandle, ptr->video_buf, tmc->frameIn, tmc->width, tmc->height, IMG_YUV_DEFAULT, IMG_YUY2); break; case CODEC_YUV422: tcv_convert(tmc->tcvhandle, ptr->video_buf, tmc->frameIn, tmc->width, tmc->height, IMG_YUV422P, IMG_YUY2); break; } if (! (ptr->tag & TC_FRAME_IS_SKIPPED)) { /* Do the deinterlacing */ do_deinterlace (tmc); /* Now convert back */ switch (tmc->codec) { case CODEC_YUY2: ac_memcpy (ptr->video_buf, tmc->frameOut, tmc->size); break; case CODEC_YUV: tcv_convert(tmc->tcvhandle, tmc->frameOut, ptr->video_buf, tmc->width, tmc->height, IMG_YUY2, IMG_YUV_DEFAULT); break; case CODEC_YUV422: tcv_convert(tmc->tcvhandle, tmc->frameOut, ptr->video_buf, tmc->width, tmc->height, IMG_YUY2, IMG_YUV422P); break; default: tc_log_error(MOD_NAME, "codec: %x\n", tmc->codec); assert (0); } } // The current frame gets the next previous frame tmp = tmc->framePrev; tmc->framePrev = tmc->frameIn; tmc->frameIn = tmp; } return 0; }