static mblk_t *jpeg2yuv(uint8_t *jpgbuf, int bufsize, MSVideoSize *reqsize){ AVCodecContext av_context; int got_picture=0; AVFrame orig; AVPicture dest; mblk_t *ret; struct SwsContext *sws_ctx; avcodec_get_context_defaults(&av_context); if (avcodec_open(&av_context,avcodec_find_decoder(CODEC_ID_MJPEG))<0){ ms_error("jpeg2yuv: avcodec_open failed"); return NULL; } if (avcodec_decode_video(&av_context,&orig,&got_picture,jpgbuf,bufsize)<0){ ms_error("jpeg2yuv: avcodec_decode_video failed"); avcodec_close(&av_context); return NULL; } ret=allocb(avpicture_get_size(PIX_FMT_YUV420P,reqsize->width,reqsize->height),0); ret->b_wptr=ret->b_datap->db_lim; avpicture_fill(&dest,ret->b_rptr,PIX_FMT_YUV420P,reqsize->width,reqsize->height); sws_ctx=sws_getContext(av_context.width,av_context.height,PIX_FMT_YUV420P, reqsize->width,reqsize->height,PIX_FMT_YUV420P,SWS_FAST_BILINEAR, NULL, NULL, NULL); if (sws_scale(sws_ctx,orig.data,orig.linesize,0,av_context.height,dest.data,dest.linesize)<0){ ms_error("jpeg2yuv: sws_scale() failed."); } sws_freeContext(sws_ctx); avcodec_close(&av_context); return ret; }
static void dec_open(DecData *d){ AVCodec *codec; int error; codec=avcodec_find_decoder(CODEC_ID_H264); if (codec==NULL) ms_fatal("Could not find H264 decoder in ffmpeg."); avcodec_get_context_defaults(&d->av_context); error=avcodec_open(&d->av_context,codec); if (error!=0){ ms_fatal("avcodec_open() failed."); } }
/** * allocates a AVCodecContext and set it to defaults. * this can be deallocated by simply calling free() */ AVCodecContext * avcodec_alloc_context (void) { AVCodecContext *avctx = av_malloc (sizeof (AVCodecContext)); if (avctx == NULL) return NULL; avcodec_get_context_defaults (avctx); return avctx; }
Decoder::Decoder(CodecID type) { // Ensure ffmpeg is initialized avcodec_init(); avcodec_register_all(); AVCodec* codec = avcodec_find_decoder(type); m_context = avcodec_alloc_context(); avcodec_get_context_defaults(m_context); // get proper defaults m_avframe = avcodec_alloc_frame(); avcodec_open(m_context, codec); }
int tdav_codec_mp4ves_open_decoder(tdav_codec_mp4ves_t* self) { int ret, size; if(!self->decoder.codec && !(self->decoder.codec = avcodec_find_decoder(CODEC_ID_MPEG4))){ TSK_DEBUG_ERROR("Failed to find MP4V-ES decoder"); return -1; } if(self->decoder.context){ TSK_DEBUG_ERROR("Decoder already opened"); return -1; } self->decoder.context = avcodec_alloc_context(); avcodec_get_context_defaults(self->decoder.context); self->decoder.context->pix_fmt = PIX_FMT_YUV420P; self->decoder.context->width = TMEDIA_CODEC_VIDEO(self)->out.width; self->decoder.context->height = TMEDIA_CODEC_VIDEO(self)->out.height; // Picture (YUV 420) if(!(self->decoder.picture = avcodec_alloc_frame())){ TSK_DEBUG_ERROR("Failed to create decoder picture"); return -2; } avcodec_get_frame_defaults(self->decoder.picture); size = avpicture_get_size(PIX_FMT_YUV420P, self->decoder.context->width, self->decoder.context->height); if(!(self->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){ TSK_DEBUG_ERROR("Failed to allocate decoder buffer"); return -2; } if(!(self->decoder.accumulator = tsk_calloc((size + FF_INPUT_BUFFER_PADDING_SIZE), sizeof(uint8_t)))){ TSK_DEBUG_ERROR("Failed to allocate decoder buffer"); return -2; } // Open decoder if((ret = avcodec_open(self->decoder.context, self->decoder.codec)) < 0){ TSK_DEBUG_ERROR("Failed to open MP4V-ES decoder"); return ret; } self->decoder.last_seq = 0; return ret; }
static bool ffemu_init_audio(struct ff_audio_info *audio, struct ffemu_params *param) { AVCodec *codec = avcodec_find_encoder_by_name("flac"); if (!codec) return false; audio->encoder = codec; // FFmpeg just loves to deprecate stuff :) #ifdef HAVE_FFMPEG_ALLOC_CONTEXT3 audio->codec = avcodec_alloc_context3(codec); #else audio->codec = avcodec_alloc_context(); avcodec_get_context_defaults(audio->codec); #endif audio->codec->sample_rate = (int)roundf(param->samplerate); audio->codec->time_base = av_d2q(1.0 / param->samplerate, 1000000); audio->codec->channels = param->channels; audio->codec->sample_fmt = AV_SAMPLE_FMT_S16; #ifdef HAVE_FFMPEG_AVCODEC_OPEN2 if (avcodec_open2(audio->codec, codec, NULL) != 0) #else if (avcodec_open(audio->codec, codec) != 0) #endif { return false; } audio->buffer = (int16_t*)av_malloc( audio->codec->frame_size * audio->codec->channels * sizeof(int16_t)); if (!audio->buffer) return false; audio->outbuf_size = FF_MIN_BUFFER_SIZE; audio->outbuf = (uint8_t*)av_malloc(audio->outbuf_size); if (!audio->outbuf) return false; return true; }
int tdav_codec_h264_open_decoder(tdav_codec_h264_t* self) { #if HAVE_FFMPEG int ret; if(self->decoder.context){ TSK_DEBUG_ERROR("Decoder already opened"); return -1; } self->decoder.context = avcodec_alloc_context(); avcodec_get_context_defaults(self->decoder.context); self->decoder.context->pix_fmt = PIX_FMT_YUV420P; self->decoder.context->flags2 |= CODEC_FLAG2_FAST; self->decoder.context->width = TMEDIA_CODEC_VIDEO(self)->in.width; self->decoder.context->height = TMEDIA_CODEC_VIDEO(self)->in.height; // Picture (YUV 420) if(!(self->decoder.picture = avcodec_alloc_frame())){ TSK_DEBUG_ERROR("Failed to create decoder picture"); return -2; } avcodec_get_frame_defaults(self->decoder.picture); // Open decoder if((ret = avcodec_open(self->decoder.context, self->decoder.codec)) < 0){ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin->desc); return ret; } self->decoder.last_seq = 0; return ret; #elif HAVE_H264_PASSTHROUGH return 0; #endif TSK_DEBUG_ERROR("Unexpected code called"); return -1; }
status_t AVFormatWriter::StreamCookie::Init(const media_format* format, const media_codec_info* codecInfo) { TRACE("AVFormatWriter::StreamCookie::Init()\n"); BAutolock _(fStreamLock); fPacket.stream_index = fContext->nb_streams; fStream = av_new_stream(fContext, fPacket.stream_index); if (fStream == NULL) { TRACE(" failed to add new stream\n"); return B_ERROR; } // TRACE(" fStream->codec: %p\n", fStream->codec); // TODO: This is a hack for now! Use avcodec_find_encoder_by_name() // or something similar... fStream->codec->codec_id = (CodecID)codecInfo->sub_id; // Setup the stream according to the media format... if (format->type == B_MEDIA_RAW_VIDEO) { fStream->codec->codec_type = CODEC_TYPE_VIDEO; #if GET_CONTEXT_DEFAULTS // NOTE: API example does not do this: avcodec_get_context_defaults(fStream->codec); #endif // frame rate fStream->codec->time_base.den = (int)format->u.raw_video.field_rate; fStream->codec->time_base.num = 1; // NOTE: API example does not do this: // fStream->r_frame_rate.den = (int)format->u.raw_video.field_rate; // fStream->r_frame_rate.num = 1; // fStream->time_base.den = (int)format->u.raw_video.field_rate; // fStream->time_base.num = 1; // video size fStream->codec->width = format->u.raw_video.display.line_width; fStream->codec->height = format->u.raw_video.display.line_count; // pixel aspect ratio fStream->sample_aspect_ratio.num = format->u.raw_video.pixel_width_aspect; fStream->sample_aspect_ratio.den = format->u.raw_video.pixel_height_aspect; if (fStream->sample_aspect_ratio.num == 0 || fStream->sample_aspect_ratio.den == 0) { av_reduce(&fStream->sample_aspect_ratio.num, &fStream->sample_aspect_ratio.den, fStream->codec->width, fStream->codec->height, 255); } fStream->codec->sample_aspect_ratio = fStream->sample_aspect_ratio; // TODO: Don't hard code this... fStream->codec->pix_fmt = PIX_FMT_YUV420P; // Some formats want stream headers to be separate if ((fContext->oformat->flags & AVFMT_GLOBALHEADER) != 0) fStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; fCalculatePTS = true; } else if (format->type == B_MEDIA_RAW_AUDIO) { fStream->codec->codec_type = CODEC_TYPE_AUDIO; #if GET_CONTEXT_DEFAULTS // NOTE: API example does not do this: avcodec_get_context_defaults(fStream->codec); #endif // frame rate fStream->codec->sample_rate = (int)format->u.raw_audio.frame_rate; // NOTE: API example does not do this: // fStream->codec->time_base.den = (int)format->u.raw_audio.frame_rate; // fStream->codec->time_base.num = 1; // fStream->time_base.den = (int)format->u.raw_audio.frame_rate; // fStream->time_base.num = 1; // channels fStream->codec->channels = format->u.raw_audio.channel_count; switch (format->u.raw_audio.format) { case media_raw_audio_format::B_AUDIO_FLOAT: fStream->codec->sample_fmt = SAMPLE_FMT_FLT; break; case media_raw_audio_format::B_AUDIO_DOUBLE: fStream->codec->sample_fmt = SAMPLE_FMT_DBL; break; case media_raw_audio_format::B_AUDIO_INT: fStream->codec->sample_fmt = SAMPLE_FMT_S32; break; case media_raw_audio_format::B_AUDIO_SHORT: fStream->codec->sample_fmt = SAMPLE_FMT_S16; break; case media_raw_audio_format::B_AUDIO_UCHAR: fStream->codec->sample_fmt = SAMPLE_FMT_U8; break; case media_raw_audio_format::B_AUDIO_CHAR: default: return B_MEDIA_BAD_FORMAT; break; } if (format->u.raw_audio.channel_mask == 0) { // guess the channel mask... switch (format->u.raw_audio.channel_count) { default: case 2: fStream->codec->channel_layout = CH_LAYOUT_STEREO; break; case 1: fStream->codec->channel_layout = CH_LAYOUT_MONO; break; case 3: fStream->codec->channel_layout = CH_LAYOUT_SURROUND; break; case 4: fStream->codec->channel_layout = CH_LAYOUT_QUAD; break; case 5: fStream->codec->channel_layout = CH_LAYOUT_5POINT0; break; case 6: fStream->codec->channel_layout = CH_LAYOUT_5POINT1; break; case 8: fStream->codec->channel_layout = CH_LAYOUT_7POINT1; break; case 10: fStream->codec->channel_layout = CH_LAYOUT_7POINT1_WIDE; break; } } else { // The bits match 1:1 for media_multi_channels and FFmpeg defines. fStream->codec->channel_layout = format->u.raw_audio.channel_mask; } fCalculatePTS = false; } TRACE(" stream->time_base: (%d/%d), codec->time_base: (%d/%d))\n", fStream->time_base.num, fStream->time_base.den, fStream->codec->time_base.num, fStream->codec->time_base.den); return B_OK; }
int tdav_codec_h264_open_encoder(tdav_codec_h264_t* self) { #if HAVE_FFMPEG int ret; tsk_size_t size; if(self->encoder.context){ TSK_DEBUG_ERROR("Encoder already opened"); return -1; } #if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0)) if((self->encoder.context = avcodec_alloc_context3(self->encoder.codec))){ avcodec_get_context_defaults3(self->encoder.context, self->encoder.codec); } #else if((self->encoder.context = avcodec_alloc_context())){ avcodec_get_context_defaults(self->encoder.context); } #endif if(!self->encoder.context){ TSK_DEBUG_ERROR("Failed to allocate context"); return -1; } #if TDAV_UNDER_X86 && LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->dsp_mask = (FF_MM_MMX | FF_MM_MMXEXT | FF_MM_SSE); #endif self->encoder.context->pix_fmt = PIX_FMT_YUV420P; self->encoder.context->time_base.num = 1; self->encoder.context->time_base.den = TMEDIA_CODEC_VIDEO(self)->out.fps; self->encoder.context->width = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.height : TMEDIA_CODEC_VIDEO(self)->out.width; self->encoder.context->height = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.width : TMEDIA_CODEC_VIDEO(self)->out.height; self->encoder.max_bw_kpbs = TSK_CLAMP( 0, tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps), TMEDIA_CODEC(self)->bandwidth_max_upload ); self->encoder.context->bit_rate = (self->encoder.max_bw_kpbs * 1024);// bps self->encoder.context->rc_min_rate = (self->encoder.context->bit_rate >> 3); self->encoder.context->rc_max_rate = self->encoder.context->bit_rate; #if LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->rc_lookahead = 0; #endif self->encoder.context->global_quality = FF_QP2LAMBDA * self->encoder.quality; #if LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->partitions = X264_PART_I4X4 | X264_PART_I8X8 | X264_PART_P8X8 | X264_PART_B8X8; #endif self->encoder.context->me_method = ME_UMH; self->encoder.context->me_range = 16; self->encoder.context->qmin = 10; self->encoder.context->qmax = 51; #if LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->mb_qmin = self->encoder.context->qmin; self->encoder.context->mb_qmax = self->encoder.context->qmax; #endif /* METROPOLIS = G2J.COM TelePresence client. Check Issue 378: No video when calling "TANDBERG/4129 (X8.1.1)" */ #if !METROPOLIS && 0 self->encoder.context->flags |= CODEC_FLAG_GLOBAL_HEADER; #endif self->encoder.context->flags |= CODEC_FLAG_LOW_DELAY; if (self->encoder.context->profile == FF_PROFILE_H264_BASELINE) { self->encoder.context->max_b_frames = 0; } switch(TDAV_CODEC_H264_COMMON(self)->profile){ case profile_idc_baseline: default: self->encoder.context->profile = FF_PROFILE_H264_BASELINE; self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level; break; case profile_idc_main: self->encoder.context->profile = FF_PROFILE_H264_MAIN; self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level; break; } /* Comment from libavcodec/libx264.c: * Allow x264 to be instructed through AVCodecContext about the maximum * size of the RTP payload. For example, this enables the production of * payload suitable for the H.264 RTP packetization-mode 0 i.e. single * NAL unit per RTP packet. */ self->encoder.context->rtp_payload_size = H264_RTP_PAYLOAD_SIZE; self->encoder.context->opaque = tsk_null; self->encoder.context->gop_size = (TMEDIA_CODEC_VIDEO(self)->out.fps * TDAV_H264_GOP_SIZE_IN_SECONDS); #if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0)) if((ret = av_opt_set_int(self->encoder.context->priv_data, "slice-max-size", H264_RTP_PAYLOAD_SIZE, 0))){ TSK_DEBUG_ERROR("Failed to set x264 slice-max-size to %d", H264_RTP_PAYLOAD_SIZE); } if((ret = av_opt_set(self->encoder.context->priv_data, "profile", (self->encoder.context->profile == FF_PROFILE_H264_BASELINE ? "baseline" : "main"), 0))){ TSK_DEBUG_ERROR("Failed to set x264 profile"); } if((ret = av_opt_set(self->encoder.context->priv_data, "preset", "veryfast", 0))){ TSK_DEBUG_ERROR("Failed to set x264 preset to veryfast"); } if((ret = av_opt_set_int(self->encoder.context->priv_data, "rc-lookahead", 0, 0)) && (ret = av_opt_set_int(self->encoder.context->priv_data, "rc_lookahead", 0, 0))){ TSK_DEBUG_ERROR("Failed to set x264 rc_lookahead=0"); } if((ret = av_opt_set(self->encoder.context->priv_data, "tune", "animation+zerolatency", 0))){ TSK_DEBUG_ERROR("Failed to set x264 tune to zerolatency"); } #endif // Picture (YUV 420) if(!(self->encoder.picture = avcodec_alloc_frame())){ TSK_DEBUG_ERROR("Failed to create encoder picture"); return -2; } avcodec_get_frame_defaults(self->encoder.picture); size = avpicture_get_size(PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height); if(!(self->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){ TSK_DEBUG_ERROR("Failed to allocate encoder buffer"); return -2; } // Open encoder if((ret = avcodec_open(self->encoder.context, self->encoder.codec)) < 0){ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin->desc); return ret; } self->encoder.frame_count = 0; TSK_DEBUG_INFO("[H.264] bitrate=%d bps", self->encoder.context->bit_rate); return ret; #elif HAVE_H264_PASSTHROUGH self->encoder.frame_count = 0; return 0; #endif TSK_DEBUG_ERROR("Not expected code called"); return -1; }
static gboolean gst_ffmpegenc_setcaps (GstPad * pad, GstCaps * caps) { GstCaps *other_caps; GstCaps *allowed_caps; GstCaps *icaps; enum PixelFormat pix_fmt; GstFFMpegEnc *ffmpegenc = (GstFFMpegEnc *) GST_PAD_PARENT (pad); GstFFMpegEncClass *oclass = (GstFFMpegEncClass *) G_OBJECT_GET_CLASS (ffmpegenc); /* close old session */ if (ffmpegenc->opened) { gst_ffmpeg_avcodec_close (ffmpegenc->context); ffmpegenc->opened = FALSE; } /* set defaults */ avcodec_get_context_defaults (ffmpegenc->context); /* if we set it in _getcaps we should set it also in _link */ ffmpegenc->context->strict_std_compliance = -1; /* user defined properties */ ffmpegenc->context->bit_rate = ffmpegenc->bitrate; ffmpegenc->context->bit_rate_tolerance = ffmpegenc->bitrate; ffmpegenc->context->gop_size = ffmpegenc->gop_size; ffmpegenc->context->me_method = ffmpegenc->me_method; GST_DEBUG_OBJECT (ffmpegenc, "Setting avcontext to bitrate %lu, gop_size %d", ffmpegenc->bitrate, ffmpegenc->gop_size); /* RTP payload used for GOB production (for Asterisk) */ if (ffmpegenc->rtp_payload_size) { ffmpegenc->context->rtp_payload_size = ffmpegenc->rtp_payload_size; } /* additional avcodec settings */ /* first fill in the majority by copying over */ gst_ffmpeg_cfg_fill_context (ffmpegenc, ffmpegenc->context); /* then handle some special cases */ ffmpegenc->context->lmin = (ffmpegenc->lmin * FF_QP2LAMBDA + 0.5); ffmpegenc->context->lmax = (ffmpegenc->lmax * FF_QP2LAMBDA + 0.5); if (ffmpegenc->interlaced) { ffmpegenc->context->flags |= CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME; ffmpegenc->picture->interlaced_frame = TRUE; /* if this is not the case, a filter element should be used to swap fields */ ffmpegenc->picture->top_field_first = TRUE; } /* some other defaults */ ffmpegenc->context->rc_strategy = 2; ffmpegenc->context->b_frame_strategy = 0; ffmpegenc->context->coder_type = 0; ffmpegenc->context->context_model = 0; ffmpegenc->context->scenechange_threshold = 0; ffmpegenc->context->inter_threshold = 0; /* and last but not least the pass; CBR, 2-pass, etc */ ffmpegenc->context->flags |= ffmpegenc->pass; switch (ffmpegenc->pass) { /* some additional action depends on type of pass */ case CODEC_FLAG_QSCALE: ffmpegenc->context->global_quality = ffmpegenc->picture->quality = FF_QP2LAMBDA * ffmpegenc->quantizer; break; case CODEC_FLAG_PASS1: /* need to prepare a stats file */ /* we don't close when changing caps, fingers crossed */ if (!ffmpegenc->file) ffmpegenc->file = g_fopen (ffmpegenc->filename, "w"); if (!ffmpegenc->file) { GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, OPEN_WRITE, (("Could not open file \"%s\" for writing."), ffmpegenc->filename), GST_ERROR_SYSTEM); return FALSE; } break; case CODEC_FLAG_PASS2: { /* need to read the whole stats file ! */ gsize size; if (!g_file_get_contents (ffmpegenc->filename, &ffmpegenc->context->stats_in, &size, NULL)) { GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, READ, (("Could not get contents of file \"%s\"."), ffmpegenc->filename), GST_ERROR_SYSTEM); return FALSE; } break; } default: break; } /* fetch pix_fmt and so on */ gst_ffmpeg_caps_with_codectype (oclass->in_plugin->type, caps, ffmpegenc->context); if (!ffmpegenc->context->time_base.den) { ffmpegenc->context->time_base.den = 25; ffmpegenc->context->time_base.num = 1; ffmpegenc->context->ticks_per_frame = 1; } else if ((oclass->in_plugin->id == CODEC_ID_MPEG4) && (ffmpegenc->context->time_base.den > 65535)) { /* MPEG4 Standards do not support time_base denominator greater than * (1<<16) - 1 . We therefore scale them down. * Agreed, it will not be the exact framerate... but the difference * shouldn't be that noticeable */ ffmpegenc->context->time_base.num = (gint) gst_util_uint64_scale_int (ffmpegenc->context->time_base.num, 65535, ffmpegenc->context->time_base.den); ffmpegenc->context->time_base.den = 65535; GST_LOG_OBJECT (ffmpegenc, "MPEG4 : scaled down framerate to %d / %d", ffmpegenc->context->time_base.den, ffmpegenc->context->time_base.num); } pix_fmt = ffmpegenc->context->pix_fmt; /* max-key-interval may need the framerate set above */ if (ffmpegenc->max_key_interval) { AVCodecContext *ctx; /* override gop-size */ ctx = ffmpegenc->context; ctx->gop_size = (ffmpegenc->max_key_interval < 0) ? (-ffmpegenc->max_key_interval * (ctx->time_base.den * ctx->ticks_per_frame / ctx->time_base.num)) : ffmpegenc->max_key_interval; } /* open codec */ if (gst_ffmpeg_avcodec_open (ffmpegenc->context, oclass->in_plugin) < 0) { if (ffmpegenc->context->priv_data) gst_ffmpeg_avcodec_close (ffmpegenc->context); if (ffmpegenc->context->stats_in) g_free (ffmpegenc->context->stats_in); GST_DEBUG_OBJECT (ffmpegenc, "ffenc_%s: Failed to open FFMPEG codec", oclass->in_plugin->name); return FALSE; } /* second pass stats buffer no longer needed */ if (ffmpegenc->context->stats_in) g_free (ffmpegenc->context->stats_in); /* is the colourspace correct? */ if (pix_fmt != ffmpegenc->context->pix_fmt) { gst_ffmpeg_avcodec_close (ffmpegenc->context); GST_DEBUG_OBJECT (ffmpegenc, "ffenc_%s: AV wants different colourspace (%d given, %d wanted)", oclass->in_plugin->name, pix_fmt, ffmpegenc->context->pix_fmt); return FALSE; } /* we may have failed mapping caps to a pixfmt, * and quite some codecs do not make up their own mind about that * in any case, _NONE can never work out later on */ if (oclass->in_plugin->type == CODEC_TYPE_VIDEO && pix_fmt == PIX_FMT_NONE) { GST_DEBUG_OBJECT (ffmpegenc, "ffenc_%s: Failed to determine input format", oclass->in_plugin->name); return FALSE; } /* some codecs support more than one format, first auto-choose one */ GST_DEBUG_OBJECT (ffmpegenc, "picking an output format ..."); allowed_caps = gst_pad_get_allowed_caps (ffmpegenc->srcpad); if (!allowed_caps) { GST_DEBUG_OBJECT (ffmpegenc, "... but no peer, using template caps"); /* we need to copy because get_allowed_caps returns a ref, and * get_pad_template_caps doesn't */ allowed_caps = gst_caps_copy (gst_pad_get_pad_template_caps (ffmpegenc->srcpad)); } GST_DEBUG_OBJECT (ffmpegenc, "chose caps %" GST_PTR_FORMAT, allowed_caps); gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id, oclass->in_plugin->type, allowed_caps, ffmpegenc->context); /* try to set this caps on the other side */ other_caps = gst_ffmpeg_codecid_to_caps (oclass->in_plugin->id, ffmpegenc->context, TRUE); if (!other_caps) { gst_ffmpeg_avcodec_close (ffmpegenc->context); GST_DEBUG ("Unsupported codec - no caps found"); return FALSE; } icaps = gst_caps_intersect (allowed_caps, other_caps); gst_caps_unref (allowed_caps); gst_caps_unref (other_caps); if (gst_caps_is_empty (icaps)) { gst_caps_unref (icaps); return FALSE; } if (gst_caps_get_size (icaps) > 1) { GstCaps *newcaps; newcaps = gst_caps_new_full (gst_structure_copy (gst_caps_get_structure (icaps, 0)), NULL); gst_caps_unref (icaps); icaps = newcaps; } if (!gst_pad_set_caps (ffmpegenc->srcpad, icaps)) { gst_ffmpeg_avcodec_close (ffmpegenc->context); gst_caps_unref (icaps); return FALSE; } gst_caps_unref (icaps); /* success! */ ffmpegenc->opened = TRUE; return TRUE; }
static bool ffemu_init_video(struct ff_video_info *video, const struct ffemu_params *param) { #ifdef HAVE_X264RGB AVCodec *codec = NULL; if (g_settings.video.h264_record) { codec = avcodec_find_encoder_by_name("libx264rgb"); // Older versions of FFmpeg have RGB encoding in libx264. if (!codec) codec = avcodec_find_encoder_by_name("libx264"); } else codec = avcodec_find_encoder_by_name("ffv1"); #else AVCodec *codec = avcodec_find_encoder_by_name("ffv1"); #endif if (!codec) return false; video->encoder = codec; #if AV_HAVE_BIGENDIAN video->fmt = PIX_FMT_RGB555BE; #else video->fmt = PIX_FMT_RGB555LE; #endif video->pix_size = sizeof(uint16_t); if (param->rgb32) { video->fmt = PIX_FMT_RGB32; video->pix_size = sizeof(uint32_t); } #ifdef HAVE_X264RGB video->pix_fmt = g_settings.video.h264_record ? PIX_FMT_BGR24 : PIX_FMT_RGB32; #else video->pix_fmt = PIX_FMT_RGB32; #endif #ifdef HAVE_FFMPEG_ALLOC_CONTEXT3 video->codec = avcodec_alloc_context3(codec); #else video->codec = avcodec_alloc_context(); avcodec_get_context_defaults(video->codec); #endif video->codec->width = param->out_width; video->codec->height = param->out_height; video->codec->time_base = av_d2q(1.0 / param->fps, 1000000); // Arbitrary big number. video->codec->sample_aspect_ratio = av_d2q(param->aspect_ratio * param->out_height / param->out_width, 255); video->codec->pix_fmt = video->pix_fmt; #ifdef HAVE_FFMPEG_AVCODEC_OPEN2 AVDictionary *opts = NULL; #endif #ifdef HAVE_X264RGB if (g_settings.video.h264_record) { video->codec->thread_count = 3; av_dict_set(&opts, "qp", "0", 0); } else video->codec->thread_count = 2; #else video->codec->thread_count = 2; #endif #ifdef HAVE_FFMPEG_AVCODEC_OPEN2 if (avcodec_open2(video->codec, codec, &opts) != 0) #else if (avcodec_open(video->codec, codec) != 0) #endif return false; #ifdef HAVE_FFMPEG_AVCODEC_OPEN2 if (opts) av_dict_free(&opts); #endif // Allocate a big buffer :p ffmpeg API doesn't seem to give us some clues how big this buffer should be. video->outbuf_size = 1 << 23; video->outbuf = (uint8_t*)av_malloc(video->outbuf_size); size_t size = avpicture_get_size(video->pix_fmt, param->out_width, param->out_height); video->conv_frame_buf = (uint8_t*)av_malloc(size); video->conv_frame = avcodec_alloc_frame(); avpicture_fill((AVPicture*)video->conv_frame, video->conv_frame_buf, video->pix_fmt, param->out_width, param->out_height); return true; }
/* ============ Internal functions ================= */ int tdav_codec_mp4ves_open_encoder(tdav_codec_mp4ves_t* self) { int ret, size; int32_t max_bw_kpbs; if(!self->encoder.codec && !(self->encoder.codec = avcodec_find_encoder(CODEC_ID_MPEG4))){ TSK_DEBUG_ERROR("Failed to find mp4v encoder"); return -1; } if(self->encoder.context){ TSK_DEBUG_ERROR("Encoder already opened"); return -1; } self->encoder.context = avcodec_alloc_context(); avcodec_get_context_defaults(self->encoder.context); self->encoder.context->pix_fmt = PIX_FMT_YUV420P; self->encoder.context->time_base.num = 1; self->encoder.context->time_base.den = TMEDIA_CODEC_VIDEO(self)->in.fps; self->encoder.context->width = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.height : TMEDIA_CODEC_VIDEO(self)->out.width; self->encoder.context->height = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.width : TMEDIA_CODEC_VIDEO(self)->out.height; self->encoder.context->mb_decision = FF_MB_DECISION_RD; self->encoder.context->noise_reduction = 250; self->encoder.context->flags |= CODEC_FLAG_QSCALE; self->encoder.context->global_quality = FF_QP2LAMBDA * self->encoder.quality; max_bw_kpbs = TSK_CLAMP( 0, tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps), self->encoder.max_bw_kpbs ); self->encoder.context->bit_rate = (max_bw_kpbs * 1024);// bps self->encoder.context->rtp_payload_size = MP4V_RTP_PAYLOAD_SIZE; self->encoder.context->opaque = tsk_null; self->encoder.context->profile = self->profile>>4; self->encoder.context->level = self->profile & 0x0F; self->encoder.context->gop_size = (TMEDIA_CODEC_VIDEO(self)->in.fps * MP4V_GOP_SIZE_IN_SECONDS); self->encoder.context->max_b_frames = 0; self->encoder.context->b_frame_strategy = 1; self->encoder.context->flags |= CODEC_FLAG_AC_PRED; // Picture (YUV 420) if(!(self->encoder.picture = avcodec_alloc_frame())){ TSK_DEBUG_ERROR("Failed to create MP4V-ES encoder picture"); return -2; } avcodec_get_frame_defaults(self->encoder.picture); size = avpicture_get_size(PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height); if(!(self->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){ TSK_DEBUG_ERROR("Failed to allocate MP4V-ES encoder buffer"); return -2; } // Open encoder if((ret = avcodec_open(self->encoder.context, self->encoder.codec)) < 0){ TSK_DEBUG_ERROR("Failed to open MP4V-ES encoder"); return ret; } TSK_DEBUG_INFO("[MP4V-ES] bitrate=%d bps", self->encoder.context->bit_rate); return ret; }
bool QVideoDecoder::openFile(QString filename) { // Close last video.. close(); LastLastFrameTime=INT_MIN; // Last last must be small to handle the seek well LastFrameTime=0; LastLastFrameNumber=INT_MIN; LastFrameNumber=0; DesiredFrameTime=DesiredFrameNumber=0; LastFrameOk=false; // Open video file if(av_open_input_file(&pFormatCtx, filename.toStdString().c_str(), NULL, 0, NULL)!=0) return false; // Couldn't open file // Retrieve stream information if(av_find_stream_info(pFormatCtx)<0) return false; // Couldn't find stream information // Dump information about file onto standard error dump_format(pFormatCtx, 0, filename.toStdString().c_str(), false); // Find the first video stream videoStream=-1; for(unsigned i=0; i<pFormatCtx->nb_streams; i++) if(pFormatCtx->streams[i]->codec->codec_type==ffmpeg::AVMEDIA_TYPE_VIDEO) { videoStream=i; break; } if(videoStream==-1) return false; // Didn't find a video stream // Get a pointer to the codec context for the video stream pCodecCtx=pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream pCodec= ffmpeg::avcodec_find_decoder(pCodecCtx->codec_id);//avcodec_find_decoder(ffmpeg::CODEC_ID_H264);// if(pCodec==NULL) return false; // Codec not found pCodecCtx= ffmpeg::avcodec_alloc_context(); avcodec_get_context_defaults(pCodecCtx); pCodecCtx->flags2 |= CODEC_FLAG2_FAST; pCodecCtx->skip_frame = ffmpeg::AVDISCARD_DEFAULT; pCodecCtx->pix_fmt = ffmpeg::PIX_FMT_RGB24; pCodecCtx->width = 640; pCodecCtx->height = 480; // Open codec if(avcodec_open(pCodecCtx, pCodec)<0) return false; // Could not open codec // Hack to correct wrong frame rates that seem to be generated by some // codecs //if(pCodecCtx->time_base.num>1000 && pCodecCtx->time_base.den==1) // pCodecCtx->time_base.den=1000; // Allocate video frame pFrame=ffmpeg::avcodec_alloc_frame(); // Allocate an AVFrame structure pFrameRGB=ffmpeg::avcodec_alloc_frame(); if(pFrameRGB==NULL) return false; // Determine required buffer size and allocate buffer numBytes=ffmpeg::avpicture_get_size(ffmpeg::PIX_FMT_RGB24, pCodecCtx->width,pCodecCtx->height); buffer = new uint8_t[numBytes]; // Assign appropriate parts of buffer to image planes in pFrameRGB avpicture_fill((ffmpeg::AVPicture *)pFrameRGB, buffer, ffmpeg::PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height); ok=true; return true; }
status_t AVFormatWriter::StreamCookie::Init(media_format* format, const media_codec_info* codecInfo) { TRACE("AVFormatWriter::StreamCookie::Init()\n"); BAutolock _(fStreamLock); fPacket.stream_index = fContext->nb_streams; fStream = av_new_stream(fContext, fPacket.stream_index); if (fStream == NULL) { TRACE(" failed to add new stream\n"); return B_ERROR; } // TRACE(" fStream->codec: %p\n", fStream->codec); // TODO: This is a hack for now! Use avcodec_find_encoder_by_name() // or something similar... fStream->codec->codec_id = (CodecID)codecInfo->sub_id; if (fStream->codec->codec_id == CODEC_ID_NONE) fStream->codec->codec_id = raw_audio_codec_id_for(*format); // Setup the stream according to the media format... if (format->type == B_MEDIA_RAW_VIDEO) { fStream->codec->codec_type = AVMEDIA_TYPE_VIDEO; #if GET_CONTEXT_DEFAULTS // NOTE: API example does not do this: avcodec_get_context_defaults(fStream->codec); #endif // frame rate fStream->codec->time_base.den = (int)format->u.raw_video.field_rate; fStream->codec->time_base.num = 1; // video size fStream->codec->width = format->u.raw_video.display.line_width; fStream->codec->height = format->u.raw_video.display.line_count; // pixel aspect ratio fStream->sample_aspect_ratio.num = format->u.raw_video.pixel_width_aspect; fStream->sample_aspect_ratio.den = format->u.raw_video.pixel_height_aspect; if (fStream->sample_aspect_ratio.num == 0 || fStream->sample_aspect_ratio.den == 0) { av_reduce(&fStream->sample_aspect_ratio.num, &fStream->sample_aspect_ratio.den, fStream->codec->width, fStream->codec->height, 255); } fStream->codec->gop_size = 12; fStream->codec->sample_aspect_ratio = fStream->sample_aspect_ratio; // Use the last supported pixel format of the AVCodec, which we hope // is the one with the best quality (true for all currently supported // encoders). // AVCodec* codec = fStream->codec->codec; // for (int i = 0; codec->pix_fmts[i] != PIX_FMT_NONE; i++) // fStream->codec->pix_fmt = codec->pix_fmts[i]; fStream->codec->pix_fmt = PIX_FMT_YUV420P; } else if (format->type == B_MEDIA_RAW_AUDIO) { fStream->codec->codec_type = AVMEDIA_TYPE_AUDIO; #if GET_CONTEXT_DEFAULTS // NOTE: API example does not do this: avcodec_get_context_defaults(fStream->codec); #endif // frame rate fStream->codec->sample_rate = (int)format->u.raw_audio.frame_rate; // channels fStream->codec->channels = format->u.raw_audio.channel_count; switch (format->u.raw_audio.format) { case media_raw_audio_format::B_AUDIO_FLOAT: fStream->codec->sample_fmt = SAMPLE_FMT_FLT; break; case media_raw_audio_format::B_AUDIO_DOUBLE: fStream->codec->sample_fmt = SAMPLE_FMT_DBL; break; case media_raw_audio_format::B_AUDIO_INT: fStream->codec->sample_fmt = SAMPLE_FMT_S32; break; case media_raw_audio_format::B_AUDIO_SHORT: fStream->codec->sample_fmt = SAMPLE_FMT_S16; break; case media_raw_audio_format::B_AUDIO_UCHAR: fStream->codec->sample_fmt = SAMPLE_FMT_U8; break; case media_raw_audio_format::B_AUDIO_CHAR: default: return B_MEDIA_BAD_FORMAT; break; } if (format->u.raw_audio.channel_mask == 0) { // guess the channel mask... switch (format->u.raw_audio.channel_count) { default: case 2: fStream->codec->channel_layout = CH_LAYOUT_STEREO; break; case 1: fStream->codec->channel_layout = CH_LAYOUT_MONO; break; case 3: fStream->codec->channel_layout = CH_LAYOUT_SURROUND; break; case 4: fStream->codec->channel_layout = CH_LAYOUT_QUAD; break; case 5: fStream->codec->channel_layout = CH_LAYOUT_5POINT0; break; case 6: fStream->codec->channel_layout = CH_LAYOUT_5POINT1; break; case 8: fStream->codec->channel_layout = CH_LAYOUT_7POINT1; break; case 10: fStream->codec->channel_layout = CH_LAYOUT_7POINT1_WIDE; break; } } else { // The bits match 1:1 for media_multi_channels and FFmpeg defines. fStream->codec->channel_layout = format->u.raw_audio.channel_mask; } } // Some formats want stream headers to be separate if ((fContext->oformat->flags & AVFMT_GLOBALHEADER) != 0) fStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; TRACE(" stream->time_base: (%d/%d), codec->time_base: (%d/%d))\n", fStream->time_base.num, fStream->time_base.den, fStream->codec->time_base.num, fStream->codec->time_base.den); #if 0 // Write the AVCodecContext pointer to the user data section of the // media_format. For some encoders, it seems to be necessary to use // the AVCodecContext of the AVStream in order to successfully encode // anything and write valid media files. For example some codecs need // to store meta data or global data in the container. app_info appInfo; if (be_app->GetAppInfo(&appInfo) == B_OK) { uchar* userData = format->user_data; *(uint32*)userData = 'ffmp'; userData += sizeof(uint32); *(team_id*)userData = appInfo.team; userData += sizeof(team_id); *(AVCodecContext**)userData = fStream->codec; } #endif return B_OK; }
int tdav_codec_h264_open_encoder(tdav_codec_h264_t* self) { #if HAVE_FFMPEG int ret; tsk_size_t size; int32_t max_bw_kpbs; if(self->encoder.context){ TSK_DEBUG_ERROR("Encoder already opened"); return -1; } #if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0)) if((self->encoder.context = avcodec_alloc_context3(self->encoder.codec))){ avcodec_get_context_defaults3(self->encoder.context, self->encoder.codec); } #else if((self->encoder.context = avcodec_alloc_context())){ avcodec_get_context_defaults(self->encoder.context); } #endif if(!self->encoder.context){ TSK_DEBUG_ERROR("Failed to allocate context"); return -1; } #if TDAV_UNDER_X86 && LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->dsp_mask = (FF_MM_MMX | FF_MM_MMXEXT | FF_MM_SSE); #endif self->encoder.context->pix_fmt = PIX_FMT_YUV420P; self->encoder.context->time_base.num = 1; self->encoder.context->time_base.den = TMEDIA_CODEC_VIDEO(self)->out.fps; self->encoder.context->width = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.height : TMEDIA_CODEC_VIDEO(self)->out.width; self->encoder.context->height = (self->encoder.rotation == 90 || self->encoder.rotation == 270) ? TMEDIA_CODEC_VIDEO(self)->out.width : TMEDIA_CODEC_VIDEO(self)->out.height; max_bw_kpbs = TSK_CLAMP( 0, tmedia_get_video_bandwidth_kbps_2(TMEDIA_CODEC_VIDEO(self)->out.width, TMEDIA_CODEC_VIDEO(self)->out.height, TMEDIA_CODEC_VIDEO(self)->out.fps), self->encoder.max_bw_kpbs ); self->encoder.context->bit_rate = (max_bw_kpbs * 1024);// bps self->encoder.context->rc_min_rate = (self->encoder.context->bit_rate >> 3); self->encoder.context->rc_max_rate = self->encoder.context->bit_rate; #if LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->rc_lookahead = 0; #endif self->encoder.context->global_quality = FF_QP2LAMBDA * self->encoder.quality; self->encoder.context->scenechange_threshold = 0; self->encoder.context->me_subpel_quality = 0; #if LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->partitions = X264_PART_I4X4 | X264_PART_I8X8 | X264_PART_P8X8 | X264_PART_B8X8; #endif self->encoder.context->me_method = ME_EPZS; self->encoder.context->trellis = 0; self->encoder.context->me_range = 16; self->encoder.context->qmin = 10; self->encoder.context->qmax = 51; #if LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->mb_qmin = self->encoder.context->qmin; self->encoder.context->mb_qmax = self->encoder.context->qmax; #endif self->encoder.context->qcompress = 0.6f; self->encoder.context->mb_decision = FF_MB_DECISION_SIMPLE; #if LIBAVCODEC_VERSION_MAJOR <= 53 self->encoder.context->flags2 |= CODEC_FLAG2_FASTPSKIP; #else self->encoder.context->flags2 |= CODEC_FLAG2_FAST; #endif self->encoder.context->flags |= CODEC_FLAG_LOOP_FILTER; self->encoder.context->flags |= CODEC_FLAG_GLOBAL_HEADER; self->encoder.context->flags |= CODEC_FLAG_LOW_DELAY; self->encoder.context->max_b_frames = 0; self->encoder.context->b_frame_strategy = 1; self->encoder.context->chromaoffset = 0; switch(TDAV_CODEC_H264_COMMON(self)->profile){ case profile_idc_baseline: default: self->encoder.context->profile = FF_PROFILE_H264_BASELINE; self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level; break; case profile_idc_main: self->encoder.context->profile = FF_PROFILE_H264_MAIN; self->encoder.context->level = TDAV_CODEC_H264_COMMON(self)->level; break; } self->encoder.context->rtp_payload_size = H264_RTP_PAYLOAD_SIZE; self->encoder.context->opaque = tsk_null; self->encoder.context->gop_size = (TMEDIA_CODEC_VIDEO(self)->out.fps * TDAV_H264_GOP_SIZE_IN_SECONDS); #if (LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 35, 0)) if((ret = av_opt_set_int(self->encoder.context->priv_data, "slice-max-size", H264_RTP_PAYLOAD_SIZE, 0))){ TSK_DEBUG_ERROR("Failed to set x264 slice-max-size to %d", H264_RTP_PAYLOAD_SIZE); } if((ret = av_opt_set(self->encoder.context->priv_data, "profile", (self->encoder.context->profile == FF_PROFILE_H264_BASELINE ? "baseline" : "main"), 0))){ TSK_DEBUG_ERROR("Failed to set x264 profile"); } if((ret = av_opt_set(self->encoder.context->priv_data, "preset", "veryfast", 0))){ TSK_DEBUG_ERROR("Failed to set x264 preset to veryfast"); } if((ret = av_opt_set_int(self->encoder.context->priv_data, "rc-lookahead", 0, 0)) && (ret = av_opt_set_int(self->encoder.context->priv_data, "rc_lookahead", 0, 0))){ TSK_DEBUG_ERROR("Failed to set x264 rc_lookahead=0"); } if((ret = av_opt_set(self->encoder.context->priv_data, "tune", "animation+zerolatency", 0))){ TSK_DEBUG_ERROR("Failed to set x264 tune to zerolatency"); } #endif // Picture (YUV 420) if(!(self->encoder.picture = avcodec_alloc_frame())){ TSK_DEBUG_ERROR("Failed to create encoder picture"); return -2; } avcodec_get_frame_defaults(self->encoder.picture); size = avpicture_get_size(PIX_FMT_YUV420P, self->encoder.context->width, self->encoder.context->height); if(!(self->encoder.buffer = tsk_calloc(size, sizeof(uint8_t)))){ TSK_DEBUG_ERROR("Failed to allocate encoder buffer"); return -2; } // Open encoder if((ret = avcodec_open(self->encoder.context, self->encoder.codec)) < 0){ TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(self)->plugin->desc); return ret; } TSK_DEBUG_INFO("[H.264] bitrate=%d bps", self->encoder.context->bit_rate); return ret; #elif HAVE_H264_PASSTHROUGH return 0; #endif TSK_DEBUG_ERROR("Not expected code called"); return -1; }
/* * init h264 decoder context * args: * width - image width * height - image height * * asserts: * none * * returns: error code (0 - E_OK) */ int h264_init_decoder(int width, int height) { #if !LIBAVCODEC_VER_AT_LEAST(53,34) avcodec_init(); #endif /* * register all the codecs (we can also register only the codec * we wish to have smaller code) */ avcodec_register_all(); if(h264_ctx != NULL) h264_close_decoder(); h264_ctx = calloc(1, sizeof(h264_decoder_context_t)); if(h264_ctx == NULL) { fprintf(stderr, "V4L2_CORE: FATAL memory allocation failure (h264_init_decoder): %s\n", strerror(errno)); exit(-1); } h264_ctx->codec = avcodec_find_decoder(AV_CODEC_ID_H264); if(!h264_ctx->codec) { fprintf(stderr, "V4L2_CORE: (H264 decoder) codec not found (please install libavcodec-extra for H264 support)\n"); free(h264_ctx); h264_ctx = NULL; return E_NO_CODEC; } #if LIBAVCODEC_VER_AT_LEAST(53,6) h264_ctx->context = avcodec_alloc_context3(h264_ctx->codec); avcodec_get_context_defaults3 (h264_ctx->context, h264_ctx->codec); #else h264_ctx->context = avcodec_alloc_context(); avcodec_get_context_defaults(h264_ctx->context); #endif if(h264_ctx->context == NULL) { fprintf(stderr, "V4L2_CORE: FATAL memory allocation failure (h264_init_decoder): %s\n", strerror(errno)); exit(-1); } h264_ctx->context->flags2 |= CODEC_FLAG2_FAST; h264_ctx->context->pix_fmt = PIX_FMT_YUV420P; h264_ctx->context->width = width; h264_ctx->context->height = height; //h264_ctx->context->dsp_mask = (FF_MM_MMX | FF_MM_MMXEXT | FF_MM_SSE); #if LIBAVCODEC_VER_AT_LEAST(53,6) if (avcodec_open2(h264_ctx->context, h264_ctx->codec, NULL) < 0) #else if (avcodec_open(h264_ctx->context, h264_ctx->codec) < 0) #endif { fprintf(stderr, "V4L2_CORE: (H264 decoder) couldn't open codec\n"); avcodec_close(h264_ctx->context); free(h264_ctx->context); free(h264_ctx); h264_ctx = NULL; return E_NO_CODEC; } #if LIBAVCODEC_VER_AT_LEAST(55,28) h264_ctx->picture = av_frame_alloc(); av_frame_unref(h264_ctx->picture); #else h264_ctx->picture = avcodec_alloc_frame(); avcodec_get_frame_defaults(h264_ctx->picture); #endif h264_ctx->pic_size = avpicture_get_size(h264_ctx->context->pix_fmt, width, height); h264_ctx->width = width; h264_ctx->height = height; return E_OK; }
/* mediastreamer2 library - modular sound and video processing and streaming Copyright (C) 2006 Simon MORLAT ([email protected]) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "ffmpeg-priv.h" #ifndef HAVE_FUN_avcodec_encode_video2 int avcodec_encode_video2 (AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { int error=avcodec_encode_video(avctx, avpkt->data, avpkt->size,frame); if (error<0){ return error; }else{ if (error>0) { *got_packet_ptr=1; avpkt->size=error; }else *got_packet_ptr=0; } return 0; } #endif #ifndef HAVE_FUN_avcodec_get_context_defaults3 /**/ int avcodec_get_context_defaults3 (AVCodecContext *s, const AVCodec *codec) { avcodec_get_context_defaults(s); return 0; }
static mblk_t *jpeg2yuv(uint8_t *jpgbuf, int bufsize, MSVideoSize *reqsize){ #ifndef NO_FFMPEG AVCodecContext av_context; int got_picture=0; AVFrame orig; mblk_t *ret; struct SwsContext *sws_ctx; AVPacket pkt; MSPicture dest; AVCodec *codec=avcodec_find_decoder(CODEC_ID_MJPEG); if (codec==NULL){ ms_error("Could not find MJPEG decoder in ffmpeg."); return NULL; } avcodec_get_context_defaults(&av_context); if (avcodec_open(&av_context,codec)<0){ ms_error("jpeg2yuv: avcodec_open failed"); return NULL; } av_init_packet(&pkt); pkt.data=jpgbuf; pkt.size=bufsize; if (avcodec_decode_video2(&av_context,&orig,&got_picture,&pkt) < 0) { ms_error("jpeg2yuv: avcodec_decode_video failed"); avcodec_close(&av_context); return NULL; } ret=ms_yuv_buf_alloc(&dest, reqsize->width,reqsize->height); /* not using SWS_FAST_BILINEAR because it doesn't play well with * av_context.pix_fmt set to PIX_FMT_YUVJ420P by jpeg decoder */ sws_ctx=sws_getContext(av_context.width,av_context.height,av_context.pix_fmt, reqsize->width,reqsize->height,PIX_FMT_YUV420P,SWS_BILINEAR, NULL, NULL, NULL); if (sws_ctx==NULL) { ms_error("jpeg2yuv: ms_sws_getContext() failed."); avcodec_close(&av_context); freemsg(ret); return NULL; } #if LIBSWSCALE_VERSION_INT >= AV_VERSION_INT(0,9,0) if (sws_scale(sws_ctx,(const uint8_t* const *)orig.data,orig.linesize,0,av_context.height,dest.planes,dest.strides)<0){ #else if (sws_scale(sws_ctx,(uint8_t**)orig.data,orig.linesize,0,av_context.height,dest.planes,dest.strides)<0){ #endif ms_error("jpeg2yuv: ms_sws_scale() failed."); sws_freeContext(sws_ctx); avcodec_close(&av_context); freemsg(ret); return NULL; } sws_freeContext(sws_ctx); avcodec_close(&av_context); return ret; #elif TARGET_OS_IPHONE MSPicture dest; CGDataProviderRef dataProvider = CGDataProviderCreateWithData(NULL, jpgbuf, bufsize, NULL); // use the data provider to get a CGImage; release the data provider CGImageRef image = CGImageCreateWithJPEGDataProvider(dataProvider, NULL, FALSE, kCGRenderingIntentDefault); CGDataProviderRelease(dataProvider); reqsize->width = CGImageGetWidth(image); reqsize->height = CGImageGetHeight(image); uint8_t* tmp = (uint8_t*) malloc(reqsize->width * reqsize->height * 4); mblk_t* ret=ms_yuv_buf_alloc(&dest, reqsize->width, reqsize->height); CGColorSpaceRef colourSpace = CGColorSpaceCreateDeviceRGB(); CGContextRef imageContext = CGBitmapContextCreate(tmp, reqsize->width, reqsize->height, 8, reqsize->width*4, colourSpace, kCGImageAlphaNoneSkipLast); CGColorSpaceRelease(colourSpace); // draw the image to the context, release it CGContextDrawImage(imageContext, CGRectMake(0, 0, reqsize->width, reqsize->height), image); CGImageRelease(image); /* convert tmp/RGB -> ret/YUV */ for(int y=0; y<reqsize->height; y++) { for(int x=0; x<reqsize->width; x++) { uint8_t r = tmp[y * reqsize->width * 4 + x * 4 + 0]; uint8_t g = tmp[y * reqsize->width * 4 + x * 4 + 1]; uint8_t b = tmp[y * reqsize->width * 4 + x * 4 + 2]; // Y *dest.planes[0]++ = (uint8_t)((0.257 * r) + (0.504 * g) + (0.098 * b) + 16); // U/V subsampling if ((y % 2==0) && (x%2==0)) { uint32_t r32=0, g32=0, b32=0; for(int i=0; i<2; i++) { for(int j=0; j<2; j++) { r32 += tmp[(y+i) * reqsize->width * 4 + (x+j) * 4 + 0]; g32 += tmp[(y+i) * reqsize->width * 4 + (x+j) * 4 + 1]; b32 += tmp[(y+i) * reqsize->width * 4 + (x+j) * 4 + 2]; } } r32 = (uint32_t)(r32 * 0.25f); g32 = (uint32_t)(g32 * 0.25f); b32 = (uint32_t) (b32 * 0.25f); // U *dest.planes[1]++ = (uint8_t)(-(0.148 * r32) - (0.291 * g32) + (0.439 * b32) + 128); // V *dest.planes[2]++ = (uint8_t)((0.439 * r32) - (0.368 * g32) - (0.071 * b32) + 128); } } } free(tmp); return ret; #else return NULL; #endif } mblk_t *ms_load_jpeg_as_yuv(const char *jpgpath, MSVideoSize *reqsize){ #if defined(WIN32) mblk_t *m=NULL; DWORD st_sizel; DWORD st_sizeh; uint8_t *jpgbuf; DWORD err; HANDLE fd; #ifdef UNICODE WCHAR wUnicode[1024]; MultiByteToWideChar(CP_UTF8, 0, jpgpath, -1, wUnicode, 1024); fd = CreateFile(wUnicode, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, 0, NULL); #else fd = CreateFile(jpgpath, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, 0, NULL); #endif if (fd==INVALID_HANDLE_VALUE){ ms_error("Failed to open %s",jpgpath); return NULL; } st_sizel=0; st_sizeh=0; st_sizel = GetFileSize(fd, &st_sizeh); if (st_sizeh>0 || st_sizel<=0) { CloseHandle(fd); ms_error("Can't load file %s",jpgpath); return NULL; } jpgbuf=(uint8_t*)ms_malloc0(st_sizel); if (jpgbuf==NULL) { CloseHandle(fd); ms_error("Cannot allocate buffer for %s",jpgpath); return NULL; } err=0; ReadFile(fd, jpgbuf, st_sizel, &err, NULL) ; if (err!=st_sizel){ ms_error("Could not read as much as wanted !"); } m=jpeg2yuv(jpgbuf,st_sizel,reqsize); ms_free(jpgbuf); if (m==NULL) { CloseHandle(fd); ms_error("Cannot load image from buffer for %s",jpgpath); return NULL; } CloseHandle(fd); return m; #else mblk_t *m=NULL; struct stat statbuf; uint8_t *jpgbuf; int err; int fd=open(jpgpath,O_RDONLY); if (fd!=-1){ fstat(fd,&statbuf); if (statbuf.st_size<=0) { close(fd); ms_error("Cannot load %s",jpgpath); return NULL; } jpgbuf=(uint8_t*)ms_malloc0(statbuf.st_size + FF_INPUT_BUFFER_PADDING_SIZE); if (jpgbuf==NULL) { close(fd); ms_error("Cannot allocate buffer for %s",jpgpath); return NULL; } err=read(fd,jpgbuf,statbuf.st_size); if (err!=statbuf.st_size){ ms_error("Could not read as much as wanted: %i<>%li !",err,(long)statbuf.st_size); } m=jpeg2yuv(jpgbuf,statbuf.st_size,reqsize); ms_free(jpgbuf); if (m==NULL) { close(fd); ms_error("Cannot load image from buffer for %s",jpgpath); return NULL; } }else{ ms_error("Cannot load %s",jpgpath); return NULL; } close(fd); return m; #endif }
int ffmpeg_property_add_string(RenderData *rd, const char * type, const char * str) { AVCodecContext c; const AVOption * o = 0; const AVOption * p = 0; char name_[128]; char * name; char * param; IDProperty * prop; avcodec_get_context_defaults(&c); strncpy(name_, str, sizeof(name_)); name = name_; while (*name == ' ') name++; param = strchr(name, ':'); if (!param) { param = strchr(name, ' '); } if (param) { *param++ = 0; while (*param == ' ') param++; } o = my_av_find_opt(&c, name, NULL, 0, 0); if (!o) { return 0; } if (param && o->type == FF_OPT_TYPE_CONST) { return 0; } if (param && o->type != FF_OPT_TYPE_CONST && o->unit) { p = my_av_find_opt(&c, param, o->unit, 0, 0); prop = ffmpeg_property_add(rd, (char*) type, p - c.av_class->option, o - c.av_class->option); } else { prop = ffmpeg_property_add(rd, (char*) type, o - c.av_class->option, 0); } if (!prop) { return 0; } if (param && !p) { switch (prop->type) { case IDP_INT: IDP_Int(prop) = atoi(param); break; case IDP_FLOAT: IDP_Float(prop) = atof(param); break; case IDP_STRING: strncpy(IDP_String(prop), param, prop->len); break; } } return 1; }
IDProperty *ffmpeg_property_add(RenderData *rd, char * type, int opt_index, int parent_index) { AVCodecContext c; const AVOption * o; const AVOption * parent; IDProperty * group; IDProperty * prop; IDPropertyTemplate val; int idp_type; char name[256]; val.i = 0; avcodec_get_context_defaults(&c); o = c.av_class->option + opt_index; parent = c.av_class->option + parent_index; if (!rd->ffcodecdata.properties) { rd->ffcodecdata.properties = IDP_New(IDP_GROUP, val, "ffmpeg"); } group = IDP_GetPropertyFromGroup( rd->ffcodecdata.properties, (char*) type); if (!group) { group = IDP_New(IDP_GROUP, val, (char*) type); IDP_AddToGroup(rd->ffcodecdata.properties, group); } if (parent_index) { sprintf(name, "%s:%s", parent->name, o->name); } else { strcpy(name, o->name); } fprintf(stderr, "ffmpeg_property_add: %s %d %d %s\n", type, parent_index, opt_index, name); prop = IDP_GetPropertyFromGroup(group, name); if (prop) { return prop; } switch (o->type) { case FF_OPT_TYPE_INT: case FF_OPT_TYPE_INT64: val.i = FFMPEG_DEF_OPT_VAL_INT(o); idp_type = IDP_INT; break; case FF_OPT_TYPE_DOUBLE: case FF_OPT_TYPE_FLOAT: val.f = FFMPEG_DEF_OPT_VAL_DOUBLE(o); idp_type = IDP_FLOAT; break; case FF_OPT_TYPE_STRING: val.str = " "; idp_type = IDP_STRING; break; case FF_OPT_TYPE_CONST: val.i = 1; idp_type = IDP_INT; break; default: return NULL; } prop = IDP_New(idp_type, val, name); IDP_AddToGroup(group, prop); return prop; }
// ###################################################################### FfmpegEncoder::FfmpegEncoder(const std::string& fname, const std::string& codecname, const int bitrate, const int framerate, const int frameratebase, const Dims& dims, const int bufsz, const bool useFormatContext) : itsFile(0), itsContext(), itsFormatContext(0), itsFrameNumber(0), itsOutbufSize(bufsz), itsFrameSizeRange(), itsUseFormatContext(useFormatContext) { GVX_TRACE(__PRETTY_FUNCTION__); // no need to guard these functions for being called multiple times; // they all have internal guards av_register_all(); avcodec_init(); avcodec_register_all(); AVOutputFormat* oformat = NULL; #if LIBAVCODEC_VERSION_MAJOR >= 53 && LIBAVCODEC_VERSION_MINOR >= 21 if (codecname.compare("List") == 0) { // list available codecs LINFO("##### Available output codecs (not all may work for video):"); AVOutputFormat* f = av_oformat_next(NULL); while(f) { LINFO("%s: %s %d", f->name, f->long_name, f->flags); f = av_oformat_next(f); } LFATAL("Please select a codec from this list"); } else { // format is given // no av_find_output_format()?? let's do it by hand... AVOutputFormat* f = av_oformat_next(NULL); while(f) { if (codecname.compare(f->name) == 0) { oformat = f; break; } f = av_oformat_next(f); } } #else if (codecname.compare("List") == 0) { // list available codecs LINFO("##### Available output codecs (not all may work for video):"); for(AVOutputFormat* f = first_oformat; f != NULL; f = f->next) LINFO("%s: %s %d", f->name, f->long_name, f->flags); LFATAL("Please select a codec from this list"); } else { // format is given // no av_find_output_format()?? let's do it by hand... for(AVOutputFormat* f = first_oformat; f != NULL; f = f->next) if (codecname.compare(f->name) == 0) { oformat = f; break; } } #endif if (oformat == 0) LFATAL("No such video codec '%s';\n" "try re-running with --output-codec=List to see a list\n" "of available codecs", codecname.c_str()); char ext[100]; ext[0] = '.'; uint i; for (i = 0; i < strlen(oformat->extensions); i ++) if (oformat->extensions[i] == ',') break; else ext[i+1] = oformat->extensions[i]; ext[i+1] = '\0'; LINFO("Using output format '%s' (%s), extension %s", oformat->name, oformat->long_name, ext); std::string oname(fname); std::string::size_type idx1 = oname.rfind('/', oname.npos); std::string::size_type idx2 = oname.rfind('.', oname.npos); // must check that idx2 is valid; otherwise if we do // oname.erase(idx2) with e.g. idx2==npos then we will get a // std::out_of_range exception if (idx2 < oname.size() && idx2 > idx1) oname.erase(idx2, oname.npos); oname.append(ext); LINFO("Output file: %s", oname.c_str()); if (itsUseFormatContext) { #ifdef INVT_FFMPEG_HAS_FORMATCONTEXT_FUNCTIONS LINFO("Using FormatContext to output data"); #ifdef AVMEDIA_TYPE_VIDEO itsFormatContext = avformat_alloc_context(); #else itsFormatContext = av_alloc_format_context(); #endif if (!itsFormatContext) LFATAL("Cannot allocate format context"); itsFormatContext->oformat = oformat; itsAVStream = av_new_stream(itsFormatContext, 0); if (!itsAVStream) LFATAL("Can not allocate AVStream"); #else LFATAL("Need a new version of ffmpeg libs for this option"); itsFormatContext = NULL; #endif } AVCodec* const codec = avcodec_find_encoder(oformat->video_codec); if (codec == NULL) LFATAL("codec not found"); #if defined(INVT_FFMPEG_HAS_DEFAULTS_FUNCTIONS) avcodec_get_context_defaults(&itsContext); #else { AVCodecContext* const tmp = avcodec_alloc_context(); memcpy(&itsContext, tmp, sizeof(AVCodecContext)); free(tmp); } #endif itsContext.bit_rate = bitrate; // Be sure to set itsContext.pix_fmt -- it may occasionally // appear to work to leave pix_fmt unset, because the value we want, // PIX_FMT_YUV420P, has the enum value of 0, so if the uninitialized // memory for pix_fmt happens to have the value 0, then we'll slip // through without setting it explicitly. itsContext.pix_fmt = PIX_FMT_YUV420P; /* resolution must be a multiple of two */ itsContext.width = dims.w(); itsContext.height = dims.h(); #if defined(INVT_FFMPEG_AVCODECCONTEXT_HAS_TIME_BASE) AVRational time_base = { frameratebase, framerate }; itsContext.time_base = time_base; const int frb = frameratebase; #elif LIBAVCODEC_VERSION_INT >= 0x000406 && LIBAVCODEC_BUILD > 4665 itsContext.frame_rate = framerate; const int frb = frameratebase; itsContext.frame_rate_base = frb; #else itsContext.frame_rate = framerate; const int frb = FRAME_RATE_BASE; #endif itsContext.gop_size = 10; /* emit one intra frame every ten frames */ if(codec->id != CODEC_ID_MPEG4 && codec->id != CODEC_ID_MPEG1VIDEO && codec->id != CODEC_ID_MPEG2VIDEO) itsContext.max_b_frames = 0; else itsContext.max_b_frames = 1; itsFrameNumber = 0; LINFO("using max_b_frames=%i bitrate=%u width=%u height=%u framerate=%u frameratebase=%u", itsContext.max_b_frames, itsContext.bit_rate, itsContext.width, itsContext.height, framerate, frb); if (avcodec_open(&itsContext, codec) < 0) LFATAL("could not open codec\n"); if (itsUseFormatContext) { #ifdef INVT_FFMPEG_HAS_FORMATCONTEXT_FUNCTIONS AVCodecContext *c = itsAVStream->codec; c->codec_id = itsContext.codec_id; #ifdef CODEC_TYPE_VIDEO c->codec_type = CODEC_TYPE_VIDEO; #else #ifdef AVMEDIA_TYPE_VIDEO c->codec_type = AVMEDIA_TYPE_VIDEO; #endif #endif /* put sample parameters */ c->bit_rate = itsContext.bit_rate; /* resolution must be a multiple of two */ c->width = itsContext.width; c->height = itsContext.height; /* time base: this is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented. for fixed-fps content, timebase should be 1/framerate and timestamp increments should be identically 1. */ #if defined(INVT_FFMPEG_AVCODECCONTEXT_HAS_TIME_BASE) c->time_base.den = itsContext.time_base.den; c->time_base.num = itsContext.time_base.num; #endif c->gop_size = 12; /* emit one intra frame every twelve frames at most */ c->pix_fmt = itsContext.pix_fmt; /* set the output parameters (must be done even if no parameters). */ if (av_set_parameters(itsFormatContext, NULL) < 0) LFATAL("Invalid output format parameters"); #if defined(INVT_FFMPEG_URL_OPEN_FUNC_TAKES_SINGLE_POINTER) #if defined(INVT_FFMPEG_AVFORMATCONTEXT_BYTEIO_ISPOINTER) if (url_fopen(itsFormatContext->pb, oname.c_str(), URL_WRONLY) < 0) LFATAL("Could not open '%s'", oname.c_str()); #else if (url_fopen(&itsFormatContext->pb, oname.c_str(), URL_WRONLY) < 0) LFATAL("Could not open '%s'", oname.c_str()); #endif #else #if defined(INVT_FFMPEG_AVFORMATCONTEXT_BYTEIO_ISPOINTER) if (url_fopen(&itsFormatContext->pb, oname.c_str(), URL_WRONLY) < 0) LFATAL("Could not open '%s'", oname.c_str()); #else LFATAL("Could not open '%s' ffmpeg version mismatch", oname.c_str()); #endif #endif //INVT_FFMPEG_URL_OPEN_FUNC_TAKES_SINGLE_POINTER) /* write the stream header, if any */ av_write_header(itsFormatContext); #else LFATAL("Need a new version of FFMPEG for this option"); #endif } else { itsFile = fopen(oname.c_str(), "w"); if (itsFile==NULL) LFATAL("could not open file! %s", oname.c_str()); } LINFO("EnCoder Inited"); }
static HRESULT FFMVWrapper_BeginTransform( CTransformBaseImpl* pImpl, const AM_MEDIA_TYPE* pmtIn, const AM_MEDIA_TYPE* pmtOut, BOOL bReuseSample ) { CFFMVWrapperImpl* This = pImpl->m_pUserData; BITMAPINFO* pbiIn = NULL; BITMAPINFO* pbiOut = NULL; LONG width, height; DWORD dwFourCC; AVCodec* codec; HRESULT hr; int i; TRACE("(%p,%p,%p,%d)\n",This,pmtIn,pmtOut,bReuseSample); if ( This == NULL || This->ctx.codec ) return E_UNEXPECTED; hr = FFMVWrapper_CheckMediaType( pImpl, pmtIn, pmtOut ); if ( FAILED(hr) ) return hr; FFMVWrapper_ReleaseDIBBuffers(This); if ( IsEqualGUID( &pmtIn->formattype, &FORMAT_VideoInfo ) ) { pbiIn = (BITMAPINFO*)(&((VIDEOINFOHEADER*)pmtIn->pbFormat)->bmiHeader); dwFourCC = pbiIn->bmiHeader.biCompression; } else if ( IsEqualGUID( &pmtIn->formattype, &FORMAT_MPEGVideo ) ) { pbiIn = (BITMAPINFO*)(&((VIDEOINFOHEADER*)pmtIn->pbFormat)->bmiHeader); dwFourCC = mmioFOURCC('P','I','M','1'); } else return E_FAIL; width = pbiIn->bmiHeader.biWidth; height = (pbiIn->bmiHeader.biHeight < 0) ? -pbiIn->bmiHeader.biHeight : pbiIn->bmiHeader.biHeight; pbiOut = (BITMAPINFO*)(&((VIDEOINFOHEADER*)pmtOut->pbFormat)->bmiHeader); This->m_pbiIn = FFMVWrapper_DuplicateBitmapInfo(pbiIn); This->m_pbiOut = FFMVWrapper_DuplicateBitmapInfo(pbiOut); if ( This->m_pbiIn == NULL || This->m_pbiOut == NULL ) return E_OUTOFMEMORY; if ( This->m_pbiOut->bmiHeader.biCompression == 0 || This->m_pbiOut->bmiHeader.biCompression == 3 ) This->m_pbiOut->bmiHeader.biSizeImage = DIBSIZE(This->m_pbiOut->bmiHeader); for (i=0; ff_codecs[i].dwFourCC && ff_codecs[i].dwFourCC != dwFourCC; i++); if (!ff_codecs[i].dwFourCC) { TRACE("couldn't find codec format\n"); return E_FAIL; } codec = avcodec_find_decoder(ff_codecs[i].codec); if (!codec) { TRACE("couldn't open codec\n"); return E_FAIL; } if ( !bReuseSample ) { This->m_pOutBuf = QUARTZ_AllocMem(This->m_pbiOut->bmiHeader.biSizeImage); if ( This->m_pOutBuf == NULL ) return E_OUTOFMEMORY; ZeroMemory( This->m_pOutBuf, This->m_pbiOut->bmiHeader.biSizeImage ); } This->rtCur = 0; This->rtInternal = 0; EnterCriticalSection( &This->m_cs ); avcodec_get_context_defaults( &This->ctx ); This->ctx.width = width; This->ctx.height = height; if (codec->id == CODEC_ID_MPEG1VIDEO || codec->id == CODEC_ID_H264) This->ctx.flags |= CODEC_FLAG_TRUNCATED; TRACE("opening codec for %dx%d video\n", This->ctx.width, This->ctx.height); if (avcodec_open( &This->ctx, codec) < 0) { TRACE("couldn't open codec\n"); return E_FAIL; } LeaveCriticalSection( &This->m_cs ); return NOERROR; }