int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t dst_color[4], enum AVPixelFormat pix_fmt, uint8_t rgba_color[4], int *is_packed_rgba, uint8_t rgba_map_ptr[4]) { uint8_t rgba_map[4] = {0}; int i; const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(pix_fmt); int hsub; av_assert0(pix_desc); hsub = pix_desc->log2_chroma_w; *is_packed_rgba = ff_fill_rgba_map(rgba_map, pix_fmt) >= 0; if (*is_packed_rgba) { pixel_step[0] = (av_get_bits_per_pixel(pix_desc))>>3; for (i = 0; i < 4; i++) dst_color[rgba_map[i]] = rgba_color[i]; line[0] = av_malloc_array(w, pixel_step[0]); if (!line[0]) return AVERROR(ENOMEM); for (i = 0; i < w; i++) memcpy(line[0] + i * pixel_step[0], dst_color, pixel_step[0]); if (rgba_map_ptr) memcpy(rgba_map_ptr, rgba_map, sizeof(rgba_map[0]) * 4); } else {
void show_pix_fmts(void) { enum PixelFormat pix_fmt; printf( "Pixel formats:\n" "I.... = Supported Input format for conversion\n" ".O... = Supported Output format for conversion\n" "..H.. = Hardware accelerated format\n" "...P. = Paletted format\n" "....B = Bitstream format\n" "FLAGS NAME NB_COMPONENTS BITS_PER_PIXEL\n" "-----\n"); #if !CONFIG_SWSCALE # define sws_isSupportedInput(x) 0 # define sws_isSupportedOutput(x) 0 #endif for (pix_fmt = 0; pix_fmt < PIX_FMT_NB; pix_fmt++) { const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[pix_fmt]; printf("%c%c%c%c%c %-16s %d %2d\n", sws_isSupportedInput (pix_fmt) ? 'I' : '.', sws_isSupportedOutput(pix_fmt) ? 'O' : '.', pix_desc->flags & PIX_FMT_HWACCEL ? 'H' : '.', pix_desc->flags & PIX_FMT_PAL ? 'P' : '.', pix_desc->flags & PIX_FMT_BITSTREAM ? 'B' : '.', pix_desc->name, pix_desc->nb_components, av_get_bits_per_pixel(pix_desc)); } }
static int fill_line_with_color(uint8_t *line[4], int line_step[4], int w, uint8_t color[4], enum PixelFormat pix_fmt, uint8_t rgba_color[4], int *is_packed_rgba) { uint8_t rgba_map[4] = {0}; int i; const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[pix_fmt]; int hsub = pix_desc->log2_chroma_w; *is_packed_rgba = 1; switch (pix_fmt) { case PIX_FMT_ARGB: rgba_map[ALPHA] = 0; rgba_map[RED ] = 1; rgba_map[GREEN] = 2; rgba_map[BLUE ] = 3; break; case PIX_FMT_ABGR: rgba_map[ALPHA] = 0; rgba_map[BLUE ] = 1; rgba_map[GREEN] = 2; rgba_map[RED ] = 3; break; case PIX_FMT_RGBA: case PIX_FMT_RGB24: rgba_map[RED ] = 0; rgba_map[GREEN] = 1; rgba_map[BLUE ] = 2; rgba_map[ALPHA] = 3; break; case PIX_FMT_BGRA: case PIX_FMT_BGR24: rgba_map[BLUE ] = 0; rgba_map[GREEN] = 1; rgba_map[RED ] = 2; rgba_map[ALPHA] = 3; break; default: *is_packed_rgba = 0; } if (*is_packed_rgba) { line_step[0] = (av_get_bits_per_pixel(pix_desc))>>3; for (i = 0; i < 4; i++) color[rgba_map[i]] = rgba_color[i]; line[0] = av_malloc(w * line_step[0]); for (i = 0; i < w; i++) memcpy(line[0] + i * line_step[0], color, line_step[0]); } else {
static av_cold int raw_init_encoder(AVCodecContext *avctx) { avctx->coded_frame = avctx->priv_data; avcodec_get_frame_defaults(avctx->coded_frame); avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; avctx->bits_per_coded_sample = av_get_bits_per_pixel(&av_pix_fmt_descriptors[avctx->pix_fmt]); if(!avctx->codec_tag) avctx->codec_tag = avcodec_pix_fmt_to_codec_tag(avctx->pix_fmt); return 0; }
static av_cold int raw_init_encoder(AVCodecContext *avctx) { avctx->coded_frame = (AVFrame *)avctx->priv_data; avctx->coded_frame->pict_type = FF_I_TYPE; avctx->coded_frame->key_frame = 1; avctx->bits_per_coded_sample = av_get_bits_per_pixel(&av_pix_fmt_descriptors[avctx->pix_fmt]); if(!avctx->codec_tag) avctx->codec_tag = avcodec_pix_fmt_to_codec_tag(avctx->pix_fmt); return 0; }
static av_cold int raw_encode_init(AVCodecContext *avctx) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt); avctx->coded_frame = av_frame_alloc(); avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; avctx->bits_per_coded_sample = av_get_bits_per_pixel(desc); if(!avctx->codec_tag) avctx->codec_tag = avcodec_pix_fmt_to_codec_tag(avctx->pix_fmt); return 0; }
static av_cold int raw_encode_init(AVCodecContext *avctx) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt); #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; FF_ENABLE_DEPRECATION_WARNINGS #endif avctx->bits_per_coded_sample = av_get_bits_per_pixel(desc); if(!avctx->codec_tag) avctx->codec_tag = avcodec_pix_fmt_to_codec_tag(avctx->pix_fmt); return 0; }
int yuv420P2rgb24(unsigned char *yuv_data, unsigned char *rgb24_data, int width, int hight) { //Parameters // FILE *src_file =fopen("pre.yuv", "rb"); // if (src_file == NULL) // { // perror("open pre.yuv error\n"); // } const int src_w=1328,src_h=hight;//1328*720 enum AVPixelFormat src_pixfmt=AV_PIX_FMT_YUV420P; int src_bpp=av_get_bits_per_pixel(av_pix_fmt_desc_get(src_pixfmt)); // FILE *dst_file = fopen("sintel_1280x720_rgb24.rgb", "wb"); // if (dst_file == NULL) // { // perror("open sintel_1280x720_rgb24.rgb error\n"); // } const int dst_w=width,dst_h=hight; enum AVPixelFormat dst_pixfmt=AV_PIX_FMT_RGB24; int dst_bpp=av_get_bits_per_pixel(av_pix_fmt_desc_get(dst_pixfmt)); //Structures uint8_t *src_data[4]; int src_linesize[4]; uint8_t *dst_data[4]; int dst_linesize[4]; int rescale_method=SWS_BICUBIC; struct SwsContext *img_convert_ctx; uint8_t *temp_buffer=(uint8_t *)malloc(src_w*src_h*src_bpp/8); int frame_idx=0; int ret=0; ret= av_image_alloc(src_data, src_linesize,src_w, src_h, src_pixfmt, 1); if (ret< 0) { printf( "Could not allocate source image\n"); return -1; } ret = av_image_alloc(dst_data, dst_linesize,dst_w, dst_h, dst_pixfmt, 1); if (ret< 0) { printf( "Could not allocate destination image\n"); return -1; } //----------------------------- //Init Method 1 img_convert_ctx =sws_alloc_context(); //Show AVOption //av_opt_show2(img_convert_ctx,stdout,AV_OPT_FLAG_VIDEO_PARAM,0); //Set Value av_opt_set_int(img_convert_ctx,"sws_flags",SWS_BICUBIC|SWS_PRINT_INFO,0); av_opt_set_int(img_convert_ctx,"srcw",src_w,0); av_opt_set_int(img_convert_ctx,"srch",src_h,0); av_opt_set_int(img_convert_ctx,"src_format",src_pixfmt,0); //'0' for MPEG (Y:0-235);'1' for JPEG (Y:0-255) av_opt_set_int(img_convert_ctx,"src_range",1,0); av_opt_set_int(img_convert_ctx,"dstw",dst_w,0); av_opt_set_int(img_convert_ctx,"dsth",dst_h,0); av_opt_set_int(img_convert_ctx,"dst_format",dst_pixfmt,0); av_opt_set_int(img_convert_ctx,"dst_range",1,0); sws_init_context(img_convert_ctx,NULL,NULL); //Init Method 2 //img_convert_ctx = sws_getContext(src_w, src_h,src_pixfmt, dst_w, dst_h, dst_pixfmt, // rescale_method, NULL, NULL, NULL); //----------------------------- /* //Colorspace ret=sws_setColorspaceDetails(img_convert_ctx,sws_getCoefficients(SWS_CS_ITU601),0, sws_getCoefficients(SWS_CS_ITU709),0, 0, 1 << 16, 1 << 16); if (ret==-1) { printf( "Colorspace not support.\n"); return -1; } */ // while(1) // { // memcpy(temp_buffer, yuv_data, src_w*src_h*src_bpp/8); // if (fread(temp_buffer, 1, src_w*src_h*src_bpp/8, src_file) != src_w*src_h*src_bpp/8){ // break; // } switch(src_pixfmt){ case AV_PIX_FMT_GRAY8:{ memcpy(src_data[0],temp_buffer,src_w*src_h); break; } case AV_PIX_FMT_YUV420P:{ memcpy(src_data[0],temp_buffer,src_w*src_h); //Y memcpy(src_data[1],temp_buffer+src_w*src_h,src_w*src_h/4); //U memcpy(src_data[2],temp_buffer+src_w*src_h*5/4,src_w*src_h/4); //V break; } case AV_PIX_FMT_YUV422P:{ memcpy(src_data[0],temp_buffer,src_w*src_h); //Y memcpy(src_data[1],temp_buffer+src_w*src_h,src_w*src_h/2); //U memcpy(src_data[2],temp_buffer+src_w*src_h*3/2,src_w*src_h/2); //V break; } case AV_PIX_FMT_YUV444P:{ memcpy(src_data[0],temp_buffer,src_w*src_h); //Y memcpy(src_data[1],temp_buffer+src_w*src_h,src_w*src_h); //U memcpy(src_data[2],temp_buffer+src_w*src_h*2,src_w*src_h); //V break; } case AV_PIX_FMT_YUYV422:{ memcpy(src_data[0],temp_buffer,src_w*src_h*2); //Packed break; } case AV_PIX_FMT_RGB24:{ memcpy(src_data[0],temp_buffer,src_w*src_h*3); //Packed break; } default:{ printf("Not Support Input Pixel Format.\n"); break; } } sws_scale(img_convert_ctx, src_data, src_linesize, 0, src_h, dst_data, dst_linesize); frame_idx++; switch(dst_pixfmt){ case AV_PIX_FMT_GRAY8:{ memcpy(rgb24_data, dst_data[0], dst_w*dst_h); break; } case AV_PIX_FMT_YUV420P:{ memcpy(rgb24_data, dst_data[0], dst_w*dst_h);rgb24_data+=dst_w*dst_h; memcpy(rgb24_data, dst_data[1], dst_w*dst_h/4);rgb24_data+=dst_w*dst_h/4; memcpy(rgb24_data, dst_data[2], dst_w*dst_h/4);rgb24_data+=dst_w*dst_h/4; break; } case AV_PIX_FMT_YUV422P:{ // fwrite(dst_data[0],1,dst_w*dst_h,dst_file); //Y // fwrite(dst_data[1],1,dst_w*dst_h/2,dst_file); //U // fwrite(dst_data[2],1,dst_w*dst_h/2,dst_file); //V break; } case AV_PIX_FMT_YUV444P:{ // fwrite(dst_data[0],1,dst_w*dst_h,dst_file); //Y // fwrite(dst_data[1],1,dst_w*dst_h,dst_file); //U // fwrite(dst_data[2],1,dst_w*dst_h,dst_file); //V break; } case AV_PIX_FMT_YUYV422:{ // fwrite(dst_data[0],1,dst_w*dst_h*2,dst_file); //Packed break; } case AV_PIX_FMT_RGB24:{ memcpy(rgb24_data, dst_data[0], dst_w*dst_h*3); // fwrite(dst_data[0],1,dst_w*dst_h*3,dst_file); //Packed break; } default:{ printf("Not Support Output Pixel Format.\n"); break; } } // } sws_freeContext(img_convert_ctx); free(temp_buffer); // fclose(dst_file); av_freep(&src_data[0]); av_freep(&dst_data[0]); return 0; }
static int targa_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *p, int *got_packet) { int bpp, picsize, datasize = -1, ret, i; uint8_t *out; if(avctx->width > 0xffff || avctx->height > 0xffff) { av_log(avctx, AV_LOG_ERROR, "image dimensions too large\n"); return AVERROR(EINVAL); } picsize = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height); if ((ret = ff_alloc_packet2(avctx, pkt, picsize + 45)) < 0) return ret; /* zero out the header and only set applicable fields */ memset(pkt->data, 0, 12); AV_WL16(pkt->data+12, avctx->width); AV_WL16(pkt->data+14, avctx->height); /* image descriptor byte: origin is always top-left, bits 0-3 specify alpha */ pkt->data[17] = 0x20 | (avctx->pix_fmt == PIX_FMT_BGRA ? 8 : 0); out = pkt->data + 18; /* skip past the header we write */ avctx->bits_per_coded_sample = av_get_bits_per_pixel(&av_pix_fmt_descriptors[avctx->pix_fmt]); switch(avctx->pix_fmt) { case PIX_FMT_PAL8: pkt->data[1] = 1; /* palette present */ pkt->data[2] = TGA_PAL; /* uncompressed palettised image */ pkt->data[6] = 1; /* palette contains 256 entries */ pkt->data[7] = 24; /* palette contains 24 bit entries */ pkt->data[16] = 8; /* bpp */ for (i = 0; i < 256; i++) AV_WL24(pkt->data + 18 + 3 * i, *(uint32_t *)(p->data[1] + i * 4)); out += 256 * 3; /* skip past the palette we just output */ break; case PIX_FMT_GRAY8: pkt->data[2] = TGA_BW; /* uncompressed grayscale image */ avctx->bits_per_coded_sample = 0x28; pkt->data[16] = 8; /* bpp */ break; case PIX_FMT_RGB555LE: pkt->data[2] = TGA_RGB; /* uncompresses true-color image */ avctx->bits_per_coded_sample = pkt->data[16] = 16; /* bpp */ break; case PIX_FMT_BGR24: pkt->data[2] = TGA_RGB; /* uncompressed true-color image */ pkt->data[16] = 24; /* bpp */ break; case PIX_FMT_BGRA: pkt->data[2] = TGA_RGB; /* uncompressed true-color image */ pkt->data[16] = 32; /* bpp */ break; default: av_log(avctx, AV_LOG_ERROR, "Pixel format '%s' not supported.\n", av_get_pix_fmt_name(avctx->pix_fmt)); return AVERROR(EINVAL); } bpp = pkt->data[16] >> 3; /* try RLE compression */ if (avctx->coder_type != FF_CODER_TYPE_RAW) datasize = targa_encode_rle(out, picsize, p, bpp, avctx->width, avctx->height); /* if that worked well, mark the picture as RLE compressed */ if(datasize >= 0) pkt->data[2] |= 8; /* if RLE didn't make it smaller, go back to no compression */ else datasize = targa_encode_normal(out, p, bpp, avctx->width, avctx->height); out += datasize; /* The standard recommends including this section, even if we don't use * any of the features it affords. TODO: take advantage of the pixel * aspect ratio and encoder ID fields available? */ memcpy(out, "\0\0\0\0\0\0\0\0TRUEVISION-XFILE.", 26); pkt->size = out + 26 - pkt->data; pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; }
VIDEOINFOHEADER *CLAVFVideoHelper::CreateVIH(const AVStream* avstream, ULONG *size, std::string container) { VIDEOINFOHEADER *pvi = (VIDEOINFOHEADER*)CoTaskMemAlloc(ULONG(sizeof(VIDEOINFOHEADER) + avstream->codec->extradata_size)); if (!pvi) return NULL; memset(pvi, 0, sizeof(VIDEOINFOHEADER)); // Get the frame rate REFERENCE_TIME r_avg = 0, avg_avg = 0, tb_avg = 0; if (avstream->r_frame_rate.den > 0 && avstream->r_frame_rate.num > 0) { r_avg = av_rescale(DSHOW_TIME_BASE, avstream->r_frame_rate.den, avstream->r_frame_rate.num); } if (avstream->avg_frame_rate.den > 0 && avstream->avg_frame_rate.num > 0) { avg_avg = av_rescale(DSHOW_TIME_BASE, avstream->avg_frame_rate.den, avstream->avg_frame_rate.num); } if (avstream->codec->time_base.den > 0 && avstream->codec->time_base.num > 0 && avstream->codec->ticks_per_frame > 0) { tb_avg = av_rescale(DSHOW_TIME_BASE, avstream->codec->time_base.num * avstream->codec->ticks_per_frame, avstream->codec->time_base.den); } DbgLog((LOG_TRACE, 10, L"CreateVIH: r_avg: %I64d, avg_avg: %I64d, tb_avg: %I64d", r_avg, avg_avg, tb_avg)); if (r_avg >= MIN_TIME_PER_FRAME && r_avg <= MAX_TIME_PER_FRAME) pvi->AvgTimePerFrame = r_avg; else if (avg_avg >= MIN_TIME_PER_FRAME && avg_avg <= MAX_TIME_PER_FRAME) pvi->AvgTimePerFrame = avg_avg; else if (tb_avg >= MIN_TIME_PER_FRAME && tb_avg <= MAX_TIME_PER_FRAME) pvi->AvgTimePerFrame = tb_avg; if (container == "matroska" && r_avg && tb_avg && (avstream->codec->codec_id == AV_CODEC_ID_H264 || avstream->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO)) { float factor = (float)r_avg / (float)tb_avg; if ((factor > 0.4 && factor < 0.6) || (factor > 1.9 && factor < 2.1)) { pvi->AvgTimePerFrame = tb_avg; } } pvi->dwBitErrorRate = 0; pvi->dwBitRate = avstream->codec->bit_rate; RECT empty_tagrect = {0,0,0,0}; pvi->rcSource = empty_tagrect;//Some codecs like wmv are setting that value to the video current value pvi->rcTarget = empty_tagrect; pvi->rcTarget.right = pvi->rcSource.right = avstream->codec->width; pvi->rcTarget.bottom = pvi->rcSource.bottom = avstream->codec->height; memcpy((BYTE*)&pvi->bmiHeader + sizeof(BITMAPINFOHEADER), avstream->codec->extradata, avstream->codec->extradata_size); pvi->bmiHeader.biSize = ULONG(sizeof(BITMAPINFOHEADER) + avstream->codec->extradata_size); pvi->bmiHeader.biWidth = avstream->codec->width; pvi->bmiHeader.biHeight = avstream->codec->height; pvi->bmiHeader.biBitCount = avstream->codec->bits_per_coded_sample; // Validate biBitCount is set to something useful if ((pvi->bmiHeader.biBitCount == 0 || avstream->codec->codec_id == AV_CODEC_ID_RAWVIDEO) && avstream->codec->pix_fmt != AV_PIX_FMT_NONE) { const AVPixFmtDescriptor *pixdecs = av_pix_fmt_desc_get(avstream->codec->pix_fmt); if (pixdecs) pvi->bmiHeader.biBitCount = av_get_bits_per_pixel(pixdecs); } pvi->bmiHeader.biSizeImage = DIBSIZE(pvi->bmiHeader); // Calculating this value doesn't really make alot of sense, but apparently some decoders freak out if its 0 pvi->bmiHeader.biCompression = avstream->codec->codec_tag; //TOFIX The bitplanes is depending on the subtype pvi->bmiHeader.biPlanes = 1; pvi->bmiHeader.biClrUsed = 0; pvi->bmiHeader.biClrImportant = 0; pvi->bmiHeader.biYPelsPerMeter = 0; pvi->bmiHeader.biXPelsPerMeter = 0; *size = sizeof(VIDEOINFOHEADER) + avstream->codec->extradata_size; return pvi; }
size_t PixelProperties::getBitsPerPixel() const { if( ! _pixelDesc ) throw std::runtime_error( "unable to find pixel description." ); return av_get_bits_per_pixel( _pixelDesc ); }
static int config_input(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; FilterData *fd = ctx->priv; FILE* f; // char* filenamecopy, *filebasename; const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format]; TransformData* td = &(fd->td); VSFrameInfo fi_src; VSFrameInfo fi_dest; if(!initFrameInfo(&fi_src, inlink->w, inlink->h, AV2OurPixelFormat(ctx,inlink->format)) || !initFrameInfo(&fi_dest, inlink->w, inlink->h, AV2OurPixelFormat(ctx, inlink->format))){ av_log(ctx, AV_LOG_ERROR, "unknown pixel format: %i (%s)", inlink->format, desc->name); return AVERROR(EINVAL); } // check if(fi_src.bytesPerPixel != av_get_bits_per_pixel(desc)/8 || fi_src.log2ChromaW != desc->log2_chroma_w || fi_src.log2ChromaH != desc->log2_chroma_h){ av_log(ctx, AV_LOG_ERROR, "pixel-format error: bpp %i<>%i ", fi_src.bytesPerPixel, av_get_bits_per_pixel(desc)/8); av_log(ctx, AV_LOG_ERROR, "chroma_subsampl: w: %i<>%i h: %i<>%i\n", fi_src.log2ChromaW, desc->log2_chroma_w, fi_src.log2ChromaH, desc->log2_chroma_h); return AVERROR(EINVAL); } if(initTransformData(td, &fi_src, &fi_dest, "transform") != VS_OK){ av_log(ctx, AV_LOG_ERROR, "initialization of TransformData failed\n"); return AVERROR(EINVAL); } td->verbose=1; // TODO: get from somewhere /// TODO: find out input name // fd->input = (char*)av_malloc(VS_INPUT_MAXLEN); // filenamecopy = strndup(fd->vob->video_in_file); // filebasename = basename(filenamecopy); // if (strlen(filebasename) < VS_INPUT_MAXLEN - 4) { // snprintf(fd->result, VS_INPUT_MAXLEN, "%s.trf", filebasename); //} else { // av_log(ctx, AV_LOG_WARN, "input name too long, using default `%s'", // DEFAULT_TRANS_FILE_NAME); snprintf(fd->input, VS_INPUT_MAXLEN, DEFAULT_TRANS_FILE_NAME); // } if (fd->options != NULL) { if(optstr_lookup(fd->options, "help")) { av_log(ctx, AV_LOG_INFO, transform_help); return AVERROR(EINVAL); } optstr_get(fd->options, "input", "%[^:]", fd->input); optstr_get(fd->options, "maxshift", "%d", &td->maxShift); optstr_get(fd->options, "maxangle", "%lf", &td->maxAngle); optstr_get(fd->options, "smoothing", "%d", &td->smoothing); optstr_get(fd->options, "crop" , "%d", &td->crop); optstr_get(fd->options, "invert" , "%d", &td->invert); optstr_get(fd->options, "relative" , "%d", &td->relative); optstr_get(fd->options, "zoom" , "%lf",&td->zoom); optstr_get(fd->options, "optzoom" , "%d", &td->optZoom); optstr_get(fd->options, "interpol" , "%d", (int*)(&td->interpolType)); optstr_get(fd->options, "sharpen" , "%lf",&td->sharpen); if(optstr_lookup(fd->options, "tripod")){ av_log(ctx,AV_LOG_INFO, "Virtual tripod mode: relative=False, smoothing=0"); td->relative=0; td->smoothing=0; } } if(configureTransformData(td)!= VS_OK){ av_log(ctx, AV_LOG_ERROR, "configuration of Tranform failed\n"); return AVERROR(EINVAL); } av_log(ctx, AV_LOG_INFO, "Image Transformation/Stabilization Settings:\n"); av_log(ctx, AV_LOG_INFO, " input = %s\n", fd->input); av_log(ctx, AV_LOG_INFO, " smoothing = %d\n", td->smoothing); av_log(ctx, AV_LOG_INFO, " maxshift = %d\n", td->maxShift); av_log(ctx, AV_LOG_INFO, " maxangle = %f\n", td->maxAngle); av_log(ctx, AV_LOG_INFO, " crop = %s\n", td->crop ? "Black" : "Keep"); av_log(ctx, AV_LOG_INFO, " relative = %s\n", td->relative ? "True": "False"); av_log(ctx, AV_LOG_INFO, " invert = %s\n", td->invert ? "True" : "False"); av_log(ctx, AV_LOG_INFO, " zoom = %f\n", td->zoom); av_log(ctx, AV_LOG_INFO, " optzoom = %s\n", td->optZoom ? "On" : "Off"); av_log(ctx, AV_LOG_INFO, " interpol = %s\n", interpolTypes[td->interpolType]); av_log(ctx, AV_LOG_INFO, " sharpen = %f\n", td->sharpen); f = fopen(fd->input, "r"); if (f == NULL) { av_log(ctx, AV_LOG_ERROR, "cannot open input file %s!\n", fd->input); } else { ManyLocalMotions mlms; if(readLocalMotionsFile(f,&mlms)==VS_OK){ // calculate the actual transforms from the localmotions if(localmotions2TransformsSimple(td, &mlms,&fd->trans)!=VS_OK) av_log(ctx, AV_LOG_ERROR, "calculating transformations failed!\n"); }else{ // try to read old format if (!readOldTransforms(td, f, &fd->trans)) { /* read input file */ av_log(ctx, AV_LOG_ERROR, "error parsing input file %s!\n", fd->input); } } } fclose(f); if (preprocessTransforms(td, &fd->trans)!= VS_OK ) { av_log(ctx, AV_LOG_ERROR, "error while preprocessing transforms\n"); return AVERROR(EINVAL); } // TODO: add sharpening return 0; }