static switch_status_t switch_vpx_init(switch_codec_t *codec, switch_codec_flag_t flags, const switch_codec_settings_t *codec_settings) { vpx_context_t *context = NULL; int encoding, decoding; encoding = (flags & SWITCH_CODEC_FLAG_ENCODE); decoding = (flags & SWITCH_CODEC_FLAG_DECODE); if (!(encoding || decoding) || ((context = switch_core_alloc(codec->memory_pool, sizeof(*context))) == 0)) { return SWITCH_STATUS_FALSE; } memset(context, 0, sizeof(*context)); context->flags = flags; codec->private_info = context; context->pool = codec->memory_pool; if (codec_settings) { context->codec_settings = *codec_settings; } if (!strcmp(codec->implementation->iananame, "VP9")) { context->is_vp9 = 1; context->encoder_interface = vpx_codec_vp9_cx(); context->decoder_interface = vpx_codec_vp9_dx(); } else { context->encoder_interface = vpx_codec_vp8_cx(); context->decoder_interface = vpx_codec_vp8_dx(); } if (codec->fmtp_in) { codec->fmtp_out = switch_core_strdup(codec->memory_pool, codec->fmtp_in); } if (vpx_codec_enc_config_default(context->encoder_interface, &context->config, 0) != VPX_CODEC_OK) { switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "Encoder config Error\n"); return SWITCH_STATUS_FALSE; } context->codec_settings.video.width = 320; context->codec_settings.video.height = 240; switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG, "VPX VER:%s VPX_IMAGE_ABI_VERSION:%d VPX_CODEC_ABI_VERSION:%d\n", vpx_codec_version_str(), VPX_IMAGE_ABI_VERSION, VPX_CODEC_ABI_VERSION); return SWITCH_STATUS_SUCCESS; }
krad_vpx_decoder_t *krad_vpx_decoder_create () { krad_vpx_decoder_t *vpx; vpx = calloc (1, sizeof(krad_vpx_decoder_t)); vpx->stream_info.sz = sizeof (vpx->stream_info); vpx->dec_flags = 0; vpx->cfg.threads = 3; vpx_codec_dec_init (&vpx->decoder, vpx_codec_vp8_dx(), &vpx->cfg, vpx->dec_flags); //vpx->ppcfg.post_proc_flag = VP8_DEBLOCK; //vpx->ppcfg.deblocking_level = 1; //vpx->ppcfg.noise_level = 0; vpx->ppcfg.post_proc_flag = VP8_DEMACROBLOCK | VP8_DEBLOCK | VP8_ADDNOISE; vpx->ppcfg.deblocking_level = 5; vpx->ppcfg.noise_level = 1; vpx_codec_control (&vpx->decoder, VP8_SET_POSTPROC, &vpx->ppcfg); vpx->img = NULL; return vpx; }
nsresult SoftwareWebMVideoDecoder::Init(unsigned int aWidth, unsigned int aHeight) { int decode_threads = 2; //Default to 2 threads for small sizes or VP8 vpx_codec_iface_t* dx = nullptr; switch(mReader->GetVideoCodec()) { case NESTEGG_CODEC_VP8: dx = vpx_codec_vp8_dx(); break; case NESTEGG_CODEC_VP9: dx = vpx_codec_vp9_dx(); if (aWidth >= 2048) { decode_threads = 8; } else if (aWidth >= 1024) { decode_threads = 4; } break; } // Never exceed the number of system cores! decode_threads = std::min(decode_threads, PR_GetNumberOfProcessors()); vpx_codec_dec_cfg_t config; config.threads = decode_threads; config.w = aWidth; config.h = aHeight; if (!dx || vpx_codec_dec_init(&mVPX, dx, &config, 0)) { return NS_ERROR_FAILURE; } return NS_OK; }
int x_vpx_decoder_init(vpx_codec_ctx_t *_decoder, int numcores) { vpx_codec_dec_cfg_t cfg; vpx_codec_flags_t flags = 0; int err; cfg.threads = 1; cfg.h = cfg.w = 0; // set after decode #if WEBRTC_LIBVPX_VERSION >= 971 flags = VPX_CODEC_USE_ERROR_CONCEALMENT | VPX_CODEC_USE_POSTPROC; #ifdef INDEPENDENT_PARTITIONS flags |= VPX_CODEC_USE_INPUT_PARTITION; #endif #endif if (vpx_codec_dec_init(_decoder, vpx_codec_vp8_dx(), &cfg, flags)) { return -ENOMEM; } #if WEBRTC_LIBVPX_VERSION >= 971 vp8_postproc_cfg_t ppcfg; // Disable deblocking for now due to uninitialized memory being returned. ppcfg.post_proc_flag = 0; // Strength of deblocking filter. Valid range:[0,16] //ppcfg.deblocking_level = 3; vpx_codec_control(_decoder, VP8_SET_POSTPROC, &ppcfg); #endif return 0; }
static vpx_codec_iface_t *get_codec_interface(unsigned int fourcc) { switch (fourcc) { case VP8_FOURCC: return vpx_codec_vp8_dx(); case VP9_FOURCC: return vpx_codec_vp9_dx(); } return NULL; }
int VPXEncoder::InitDecoder() { _vpxDecoder = new vpx_codec_ctx_t(); /* Initialize decoder */ if (vpx_codec_dec_init(_vpxDecoder, (vpx_codec_vp8_dx()), NULL, 0)) { printf("Failed to initialize libvpx decoder.\n"); return -1; } }
static void dec_init(MSFilter *f) { DecState *s = (DecState *)ms_new0(DecState, 1); s->iface = vpx_codec_vp8_dx(); ms_message("Using %s", vpx_codec_iface_name(s->iface)); s->last_error_reported_time = 0; s->yuv_width = 0; s->yuv_height = 0; s->yuv_msg = 0; ms_queue_init(&s->q); s->first_image_decoded = FALSE; s->avpf_enabled = FALSE; s->freeze_on_error = TRUE; f->data = s; ms_average_fps_init(&s->fps, "VP8 decoder: FPS: %f"); }
static pj_status_t pj_vpx_decoder_open(vpx_private *vpx) { vpx_codec_flags_t flags = 0; vpx_codec_dec_cfg_t cfg; int res; cfg.threads = 1; cfg.h = 0; cfg.w = 0; res = vpx_codec_dec_init(&vpx->decoder, vpx_codec_vp8_dx(), &cfg, flags); if (res != VPX_CODEC_OK) { PJ_LOG(1, (THIS_FILE, "Failed to init vpx decoder : %s", vpx_codec_err_to_string(res))); return PJ_ENOMEM; } return PJ_SUCCESS; }
nsresult SoftwareWebMVideoDecoder::Init(unsigned int aWidth, unsigned int aHeight) { vpx_codec_iface_t* dx = nullptr; switch(mReader->GetVideoCodec()) { case NESTEGG_CODEC_VP8: dx = vpx_codec_vp8_dx(); break; case NESTEGG_CODEC_VP9: dx = vpx_codec_vp9_dx(); break; } if (!dx || vpx_codec_dec_init(&mVPX, dx, nullptr, 0)) { return NS_ERROR_FAILURE; } return NS_OK; }
struct vpx_context *init_decoder(int width, int height, const char *colorspace) { int flags = 0; int err = 0; vpx_codec_iface_t *codec_iface = NULL; struct vpx_context *ctx = malloc(sizeof(struct vpx_context)); if (ctx == NULL) return NULL; codec_iface = vpx_codec_vp8_dx(); memset(ctx, 0, sizeof(struct vpx_context)); err = vpx_codec_dec_init(&ctx->codec, codec_iface, NULL, flags); if (err) { codec_error(&ctx->codec, "vpx_codec_dec_init"); printf("vpx_codec_dec_init(..) failed with error %d\n", err); free(ctx); return NULL; } ctx->width = width; ctx->height = height; return ctx; }
// returns 0 on success, -1 on error static int init_video(nestegg *nestegg_ctx, int track, video_context *video_ctx) { nestegg_video_params video_params; nestegg_track_video_params(nestegg_ctx, track, &video_params); assert(video_params.stereo_mode == NESTEGG_VIDEO_MONO); if (vpx_codec_dec_init(&(video_ctx->vpx_ctx), vpx_codec_vp8_dx(), NULL, 0)) { printf("Error: failed to initialize libvpx\n"); return -1; } video_ctx->width = video_params.width; video_ctx->height = video_params.height; video_ctx->display_width = video_params.display_width; video_ctx->display_height = video_params.display_height; nestegg_track_default_duration(nestegg_ctx, track, &(video_ctx->frame_delay)); printf("Video track: resolution=%i*%i, display resolution=%i*%i, %.2f frames/second\n", video_params.width, video_params.height, video_params.display_width, video_params.display_height, 1000000000.0 / video_ctx->frame_delay); video_ctx->packet_queue = queue_init(PACKET_QUEUE_SIZE); video_ctx->frame_queue = queue_init(FRAME_QUEUE_SIZE); return 0; }
bool SoftwareWebMVideoDecoder::DecodeVideoFrame(bool &aKeyframeSkip, int64_t aTimeThreshold) { MOZ_ASSERT(mReader->OnTaskQueue()); // Record number of frames decoded and parsed. Automatically update the // stats counters using the AutoNotifyDecoded stack-based class. AbstractMediaDecoder::AutoNotifyDecoded a(mReader->GetDecoder()); nsAutoRef<NesteggPacketHolder> holder(mReader->NextPacket(WebMReader::VIDEO)); if (!holder) { return false; } nestegg_packet* packet = holder->mPacket; unsigned int track = 0; int r = nestegg_packet_track(packet, &track); if (r == -1) { return false; } unsigned int count = 0; r = nestegg_packet_count(packet, &count); if (r == -1) { return false; } uint64_t tstamp = 0; r = nestegg_packet_tstamp(packet, &tstamp); if (r == -1) { return false; } // The end time of this frame is the start time of the next frame. Fetch // the timestamp of the next packet for this track. If we've reached the // end of the resource, use the file's duration as the end time of this // video frame. uint64_t next_tstamp = 0; nsAutoRef<NesteggPacketHolder> next_holder(mReader->NextPacket(WebMReader::VIDEO)); if (next_holder) { r = nestegg_packet_tstamp(next_holder->mPacket, &next_tstamp); if (r == -1) { return false; } mReader->PushVideoPacket(next_holder.disown()); } else { next_tstamp = tstamp; next_tstamp += tstamp - mReader->GetLastVideoFrameTime(); } mReader->SetLastVideoFrameTime(tstamp); int64_t tstamp_usecs = tstamp / NS_PER_USEC; for (uint32_t i = 0; i < count; ++i) { unsigned char* data; size_t length; r = nestegg_packet_data(packet, i, &data, &length); if (r == -1) { return false; } vpx_codec_stream_info_t si; memset(&si, 0, sizeof(si)); si.sz = sizeof(si); if (mReader->GetVideoCodec() == NESTEGG_CODEC_VP8) { vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), data, length, &si); } else if (mReader->GetVideoCodec() == NESTEGG_CODEC_VP9) { vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), data, length, &si); } if (aKeyframeSkip && (!si.is_kf || tstamp_usecs < aTimeThreshold)) { // Skipping to next keyframe... a.mParsed++; // Assume 1 frame per chunk. a.mDropped++; continue; } if (aKeyframeSkip && si.is_kf) { aKeyframeSkip = false; } if (vpx_codec_decode(&mVPX, data, length, nullptr, 0)) { return false; } // If the timestamp of the video frame is less than // the time threshold required then it is not added // to the video queue and won't be displayed. if (tstamp_usecs < aTimeThreshold) { a.mParsed++; // Assume 1 frame per chunk. a.mDropped++; continue; } vpx_codec_iter_t iter = nullptr; vpx_image_t *img; while ((img = vpx_codec_get_frame(&mVPX, &iter))) { NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420, "WebM image format not I420"); // Chroma shifts are rounded down as per the decoding examples in the SDK VideoData::YCbCrBuffer b; b.mPlanes[0].mData = img->planes[0]; b.mPlanes[0].mStride = img->stride[0]; b.mPlanes[0].mHeight = img->d_h; b.mPlanes[0].mWidth = img->d_w; b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0; b.mPlanes[1].mData = img->planes[1]; b.mPlanes[1].mStride = img->stride[1]; b.mPlanes[1].mHeight = (img->d_h + 1) >> img->y_chroma_shift; b.mPlanes[1].mWidth = (img->d_w + 1) >> img->x_chroma_shift; b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0; b.mPlanes[2].mData = img->planes[2]; b.mPlanes[2].mStride = img->stride[2]; b.mPlanes[2].mHeight = (img->d_h + 1) >> img->y_chroma_shift; b.mPlanes[2].mWidth = (img->d_w + 1) >> img->x_chroma_shift; b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0; nsIntRect pictureRect = mReader->GetPicture(); IntRect picture = pictureRect; nsIntSize initFrame = mReader->GetInitialFrame(); if (img->d_w != static_cast<uint32_t>(initFrame.width) || img->d_h != static_cast<uint32_t>(initFrame.height)) { // Frame size is different from what the container reports. This is // legal in WebM, and we will preserve the ratio of the crop rectangle // as it was reported relative to the picture size reported by the // container. picture.x = (pictureRect.x * img->d_w) / initFrame.width; picture.y = (pictureRect.y * img->d_h) / initFrame.height; picture.width = (img->d_w * pictureRect.width) / initFrame.width; picture.height = (img->d_h * pictureRect.height) / initFrame.height; } VideoInfo videoInfo = mReader->GetMediaInfo().mVideo; nsRefPtr<VideoData> v = VideoData::Create(videoInfo, mReader->GetDecoder()->GetImageContainer(), holder->mOffset, tstamp_usecs, (next_tstamp / NS_PER_USEC) - tstamp_usecs, b, si.is_kf, -1, picture); if (!v) { return false; } a.mParsed++; a.mDecoded++; NS_ASSERTION(a.mDecoded <= a.mParsed, "Expect only 1 frame per chunk per packet in WebM..."); mReader->VideoQueue().Push(v); } } return true; }
static vpx_codec_iface_t *video_codec_decoder_interface(void) { return vpx_codec_vp8_dx(); }