/* * * Init lcl encoder * */ static av_cold int encode_init(AVCodecContext *avctx) { LclEncContext *c = avctx->priv_data; int zret; // Zlib return code c->avctx= avctx; assert(avctx->width && avctx->height); avctx->extradata= av_mallocz(8); avctx->coded_frame= &c->pic; // Will be user settable someday c->compression = 6; c->flags = 0; switch(avctx->pix_fmt){ case PIX_FMT_BGR24: c->imgtype = IMGTYPE_RGB24; avctx->bits_per_coded_sample= 24; break; default: av_log(avctx, AV_LOG_ERROR, "Input pixel format %s not supported\n", avcodec_get_pix_fmt_name(avctx->pix_fmt)); return -1; } avctx->extradata[0]= 4; avctx->extradata[1]= 0; avctx->extradata[2]= 0; avctx->extradata[3]= 0; avctx->extradata[4]= c->imgtype; avctx->extradata[5]= c->compression; avctx->extradata[6]= c->flags; avctx->extradata[7]= CODEC_ZLIB; c->avctx->extradata_size= 8; c->zstream.zalloc = Z_NULL; c->zstream.zfree = Z_NULL; c->zstream.opaque = Z_NULL; zret = deflateInit(&c->zstream, c->compression); if (zret != Z_OK) { av_log(avctx, AV_LOG_ERROR, "Deflate init error: %d\n", zret); return 1; } return 0; }
static int check_swscale( lavf_hnd_t *h, AVCodecContext *c, int i_frame ) { if( h->scaler && (h->cur_width == c->width) && (h->cur_height == c->height) && (h->cur_pix_fmt == c->pix_fmt) ) return 0; if( h->scaler ) { sws_freeContext( h->scaler ); fprintf( stderr, "lavf [warning]: stream properties changed to %dx%d, %s at frame %d \n", c->width, c->height, avcodec_get_pix_fmt_name( c->pix_fmt ), i_frame ); h->cur_width = c->width; h->cur_height = c->height; h->cur_pix_fmt = c->pix_fmt; } h->scaler = sws_getContext( h->cur_width, h->cur_height, h->cur_pix_fmt, h->init_width, h->init_height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL ); if( !h->scaler ) { fprintf( stderr, "lavf [error]: could not open swscale context\n" ); return -1; } return 0; }
static int targa_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){ AVFrame *p = data; int bpp, picsize, datasize = -1; uint8_t *out; if(avctx->width > 0xffff || avctx->height > 0xffff) { av_log(avctx, AV_LOG_ERROR, "image dimensions too large\n"); return AVERROR(EINVAL); } picsize = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height); if(buf_size < picsize + 45) { av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n"); return AVERROR(EINVAL); } p->pict_type= FF_I_TYPE; p->key_frame= 1; /* zero out the header and only set applicable fields */ memset(outbuf, 0, 12); AV_WL16(outbuf+12, avctx->width); AV_WL16(outbuf+14, avctx->height); /* image descriptor byte: origin is always top-left, bits 0-3 specify alpha */ outbuf[17] = 0x20 | (avctx->pix_fmt == PIX_FMT_BGRA ? 8 : 0); switch(avctx->pix_fmt) { case PIX_FMT_GRAY8: outbuf[2] = TGA_BW; /* uncompressed grayscale image */ outbuf[16] = 8; /* bpp */ break; case PIX_FMT_RGB555LE: outbuf[2] = TGA_RGB; /* uncompresses true-color image */ outbuf[16] = 16; /* bpp */ break; case PIX_FMT_BGR24: outbuf[2] = TGA_RGB; /* uncompressed true-color image */ outbuf[16] = 24; /* bpp */ break; case PIX_FMT_BGRA: outbuf[2] = TGA_RGB; /* uncompressed true-color image */ outbuf[16] = 32; /* bpp */ break; default: av_log(avctx, AV_LOG_ERROR, "Pixel format '%s' not supported.\n", avcodec_get_pix_fmt_name(avctx->pix_fmt)); return AVERROR(EINVAL); } bpp = outbuf[16] >> 3; out = outbuf + 18; /* skip past the header we just output */ /* try RLE compression */ if (avctx->coder_type != FF_CODER_TYPE_RAW) datasize = targa_encode_rle(out, picsize, p, bpp, avctx->width, avctx->height); /* if that worked well, mark the picture as RLE compressed */ if(datasize >= 0) outbuf[2] |= 8; /* if RLE didn't make it smaller, go back to no compression */ else datasize = targa_encode_normal(out, p, bpp, avctx->width, avctx->height); out += datasize; /* The standard recommends including this section, even if we don't use * any of the features it affords. TODO: take advantage of the pixel * aspect ratio and encoder ID fields available? */ memcpy(out, "\0\0\0\0\0\0\0\0TRUEVISION-XFILE.", 26); return out + 26 - outbuf; }
static mlt_properties find_default_streams( mlt_properties meta_media, AVFormatContext *context, int *audio_index, int *video_index ) { int i; char key[200]; mlt_properties_set_int( meta_media, "meta.media.nb_streams", context->nb_streams ); // Allow for multiple audio and video streams in the file and select first of each (if available) for( i = 0; i < context->nb_streams; i++ ) { // Get the codec context AVStream *stream = context->streams[ i ]; if ( ! stream ) continue; AVCodecContext *codec_context = stream->codec; if ( ! codec_context ) continue; AVCodec *codec = avcodec_find_decoder( codec_context->codec_id ); if ( ! codec ) continue; snprintf( key, sizeof(key), "meta.media.%d.stream.type", i ); // Determine the type and obtain the first index of each type switch( codec_context->codec_type ) { case CODEC_TYPE_VIDEO: if ( *video_index < 0 ) *video_index = i; mlt_properties_set( meta_media, key, "video" ); snprintf( key, sizeof(key), "meta.media.%d.stream.frame_rate", i ); mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->r_frame_rate ) ); #if LIBAVFORMAT_VERSION_INT >= ((52<<16)+(21<<8)+0) snprintf( key, sizeof(key), "meta.media.%d.stream.sample_aspect_ratio", i ); mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->sample_aspect_ratio ) ); #endif snprintf( key, sizeof(key), "meta.media.%d.codec.pix_fmt", i ); mlt_properties_set( meta_media, key, avcodec_get_pix_fmt_name( codec_context->pix_fmt ) ); snprintf( key, sizeof(key), "meta.media.%d.codec.sample_aspect_ratio", i ); mlt_properties_set_double( meta_media, key, av_q2d( codec_context->sample_aspect_ratio ) ); break; case CODEC_TYPE_AUDIO: if ( *audio_index < 0 ) *audio_index = i; mlt_properties_set( meta_media, key, "audio" ); #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(71<<8)+0)) snprintf( key, sizeof(key), "meta.media.%d.codec.sample_fmt", i ); mlt_properties_set( meta_media, key, avcodec_get_sample_fmt_name( codec_context->sample_fmt ) ); #endif snprintf( key, sizeof(key), "meta.media.%d.codec.sample_rate", i ); mlt_properties_set_int( meta_media, key, codec_context->sample_rate ); snprintf( key, sizeof(key), "meta.media.%d.codec.channels", i ); mlt_properties_set_int( meta_media, key, codec_context->channels ); break; default: break; } // snprintf( key, sizeof(key), "meta.media.%d.stream.time_base", i ); // mlt_properties_set_double( meta_media, key, av_q2d( context->streams[ i ]->time_base ) ); snprintf( key, sizeof(key), "meta.media.%d.codec.name", i ); mlt_properties_set( meta_media, key, codec->name ); #if (LIBAVCODEC_VERSION_INT >= ((51<<16)+(55<<8)+0)) snprintf( key, sizeof(key), "meta.media.%d.codec.long_name", i ); mlt_properties_set( meta_media, key, codec->long_name ); #endif snprintf( key, sizeof(key), "meta.media.%d.codec.bit_rate", i ); mlt_properties_set_int( meta_media, key, codec_context->bit_rate ); // snprintf( key, sizeof(key), "meta.media.%d.codec.time_base", i ); // mlt_properties_set_double( meta_media, key, av_q2d( codec_context->time_base ) ); snprintf( key, sizeof(key), "meta.media.%d.codec.profile", i ); mlt_properties_set_int( meta_media, key, codec_context->profile ); snprintf( key, sizeof(key), "meta.media.%d.codec.level", i ); mlt_properties_set_int( meta_media, key, codec_context->level ); } return meta_media; }
static int open_file( char *psz_filename, hnd_t *p_handle, video_info_t *info, cli_input_opt_t *opt ) { lavf_hnd_t *h = malloc( sizeof(lavf_hnd_t) ); if( !h ) return -1; av_register_all(); h->scaler = NULL; if( !strcmp( psz_filename, "-" ) ) psz_filename = "pipe:"; if( av_open_input_file( &h->lavf, psz_filename, NULL, 0, NULL ) ) { fprintf( stderr, "lavf [error]: could not open input file\n" ); return -1; } if( av_find_stream_info( h->lavf ) < 0 ) { fprintf( stderr, "lavf [error]: could not find input stream info\n" ); return -1; } int i = 0; while( i < h->lavf->nb_streams && h->lavf->streams[i]->codec->codec_type != CODEC_TYPE_VIDEO ) i++; if( i == h->lavf->nb_streams ) { fprintf( stderr, "lavf [error]: could not find video stream\n" ); return -1; } h->stream_id = i; h->next_frame = 0; h->pts_offset_flag = 0; h->pts_offset = 0; AVCodecContext *c = h->lavf->streams[i]->codec; h->init_width = h->cur_width = info->width = c->width; h->init_height = h->cur_height = info->height = c->height; h->cur_pix_fmt = c->pix_fmt; info->fps_num = h->lavf->streams[i]->r_frame_rate.num; info->fps_den = h->lavf->streams[i]->r_frame_rate.den; info->timebase_num = h->lavf->streams[i]->time_base.num; info->timebase_den = h->lavf->streams[i]->time_base.den; h->vfr_input = info->vfr; h->vertical_flip = 0; /* avisynth stores rgb data vertically flipped. */ if( !strcasecmp( get_filename_extension( psz_filename ), "avs" ) && (h->cur_pix_fmt == PIX_FMT_BGRA || h->cur_pix_fmt == PIX_FMT_BGR24) ) info->csp |= X264_CSP_VFLIP; if( h->cur_pix_fmt != PIX_FMT_YUV420P ) fprintf( stderr, "lavf [warning]: converting from %s to YV12\n", avcodec_get_pix_fmt_name( h->cur_pix_fmt ) ); if( avcodec_open( c, avcodec_find_decoder( c->codec_id ) ) ) { fprintf( stderr, "lavf [error]: could not find decoder for video stream\n" ); return -1; } /* prefetch the first frame and set/confirm flags */ h->first_pic = malloc( sizeof(x264_picture_t) ); if( !h->first_pic || lavf_input.picture_alloc( h->first_pic, info->csp, info->width, info->height ) ) { fprintf( stderr, "lavf [error]: malloc failed\n" ); return -1; } else if( read_frame_internal( h->first_pic, h, 0, info ) ) return -1; info->sar_height = c->sample_aspect_ratio.den; info->sar_width = c->sample_aspect_ratio.num; *p_handle = h; return 0; }
static inline int dc1394_read_common(AVFormatContext *c, AVFormatParameters *ap, struct dc1394_frame_format **select_fmt, struct dc1394_frame_rate **select_fps) { dc1394_data* dc1394 = c->priv_data; AVStream* vst; struct dc1394_frame_format *fmt; struct dc1394_frame_rate *fps; enum PixelFormat pix_fmt = ap->pix_fmt == PIX_FMT_NONE ? PIX_FMT_UYVY422 : ap->pix_fmt; /* defaults */ int width = !ap->width ? 320 : ap->width; int height = !ap->height ? 240 : ap->height; int frame_rate = !ap->time_base.num ? 30000 : av_rescale(1000, ap->time_base.den, ap->time_base.num); for (fmt = dc1394_frame_formats; fmt->width; fmt++) if (fmt->pix_fmt == pix_fmt && fmt->width == width && fmt->height == height) break; for (fps = dc1394_frame_rates; fps->frame_rate; fps++) if (fps->frame_rate == frame_rate) break; if (!fps->frame_rate || !fmt->width) { av_log(c, AV_LOG_ERROR, "Can't find matching camera format for %s, %dx%d@%d:1000fps\n", avcodec_get_pix_fmt_name(pix_fmt), width, height, frame_rate); goto out; } /* create a video stream */ vst = av_new_stream(c, 0); if (!vst) goto out; av_set_pts_info(vst, 64, 1, 1000); vst->codec->codec_type = CODEC_TYPE_VIDEO; vst->codec->codec_id = CODEC_ID_RAWVIDEO; vst->codec->time_base.den = fps->frame_rate; vst->codec->time_base.num = 1000; vst->codec->width = fmt->width; vst->codec->height = fmt->height; vst->codec->pix_fmt = fmt->pix_fmt; /* packet init */ av_init_packet(&dc1394->packet); dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height); dc1394->packet.stream_index = vst->index; dc1394->packet.flags |= PKT_FLAG_KEY; dc1394->current_frame = 0; dc1394->fps = fps->frame_rate; vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, fps->frame_rate, 1000); *select_fps = fps; *select_fmt = fmt; return 0; out: return -1; }
uint8_t externalEncoder::configure(AVDMGenericVideoStream *instream, int useExistingLogFile) { ADV_Info *info; info = instream->getInfo(); _w = info->width; _h = info->height; _vbuffer = new ADMImage (_w, _h); ADM_assert(_vbuffer); _in = instream; _useExistingLogFile = useExistingLogFile; vidEncVideoProperties properties; memset(&properties, 0, sizeof(vidEncVideoProperties)); properties.structSize = sizeof(vidEncVideoProperties); properties.width = _w; properties.height = _h; properties.parWidth = instream->getPARWidth(); properties.parHeight = instream->getPARHeight(); properties.frameCount = info->nb_frames; properties.fpsNum = info->fps1000; properties.fpsDen = 1000; if (_globalHeader) properties.flags |= ADM_VIDENC_FLAG_GLOBAL_HEADER; if (_plugin->open(_plugin->encoderId, &properties)) { int64_t pixFmtMask = 0; for (int i = 0; i < properties.supportedCspsCount; i++) pixFmtMask |= (1 << getAvCodecColourspace(properties.supportedCsps[i])); _pixFmt = avcodec_find_best_pix_fmt(pixFmtMask, PIX_FMT_YUV420P, 0, NULL); if (_pixFmt != PIX_FMT_YUV420P) { AVPicture resamplePicture; _swsContext = sws_getContext( properties.width, properties.height, PIX_FMT_YUV420P, properties.width, properties.height, _pixFmt, SWS_SPLINE, NULL, NULL, NULL); _resampleSize = avpicture_fill(&resamplePicture, NULL, _pixFmt, properties.width, properties.height); _resampleBuffer = new uint8_t[_resampleSize]; } printf("[externalEncoder] Target colourspace: %s\n", _pixFmt == PIX_FMT_YUV420P ? "yv12" : avcodec_get_pix_fmt_name(_pixFmt)); return (startPass() == ADM_VIDENC_ERR_SUCCESS); } else return 0; }
const char* FFMpegCodecDecoder::getFmtStr() { PixelFormat fmt = ((RealFFMpegCodecDecoder*) _delegate)->videoPixFormat; return avcodec_get_pix_fmt_name(fmt); }