コード例 #1
0
ファイル: lavfvideo.cpp プロジェクト: 1974kpkpkp/ffms2
FFMS_Frame *FFLAVFVideo::GetFrame(int n) {
	GetFrameCheck(n);
	n = Frames.RealFrameNumber(n);

	if (LastFrameNum == n)
		return &LocalFrame;

	int SeekOffset = 0;
	bool Seek = true;

	do {
		bool HasSeeked = false;
		if (Seek) {
			HasSeeked = SeekTo(n, SeekOffset);
			Seek = false;
		}

		if (CurrentFrame + FFMS_CALCULATE_DELAY >= n || HasSeeked)
			CodecContext->skip_frame = AVDISCARD_DEFAULT;
		else
			CodecContext->skip_frame = AVDISCARD_NONREF;

		int64_t StartTime = ffms_av_nopts_value, FilePos = -1;
		DecodeNextFrame(&StartTime, &FilePos);

		if (!HasSeeked)
			continue;

		if (StartTime == ffms_av_nopts_value && !Frames.HasTS) {
			if (FilePos >= 0) {
				CurrentFrame = Frames.FrameFromPos(FilePos);
				if (CurrentFrame >= 0)
					continue;
			}
			// If the track doesn't have timestamps or file positions then
			// just trust that we got to the right place, since we have no
			// way to tell where we are
			else {
				CurrentFrame = n;
				continue;
			}
		}

		CurrentFrame = Frames.FrameFromPTS(StartTime);

		// Is the seek destination time known? Does it belong to a frame?
		if (CurrentFrame < 0) {
			if (SeekMode == 1 || StartTime < 0) {
				// No idea where we are so go back a bit further
				SeekOffset -= 10;
				Seek = true;
			}
			else
				CurrentFrame = Frames.ClosestFrameFromPTS(StartTime);
		}
	} while (++CurrentFrame <= n);

	LastFrameNum = n;
	return OutputFrame(DecodeFrame);
}
コード例 #2
0
void FFMS_VideoSource::SetPP(const char *PP) {

#ifdef FFMS_USE_POSTPROC
	if (PPMode)
		pp_free_mode(PPMode);
	PPMode = NULL;

	if (PP != NULL && strcmp(PP, "")) {
		// due to a parsing bug in libpostproc it can read beyond the end of a string
		// adding a ',' prevents the bug from manifesting
		// libav head 2011-08-26
		std::string s = PP;
		s.append(",");
		PPMode = pp_get_mode_by_name_and_quality(s.c_str(), PP_QUALITY_MAX);
		if (!PPMode) {
			ResetPP();
			throw FFMS_Exception(FFMS_ERROR_POSTPROCESSING, FFMS_ERROR_INVALID_ARGUMENT,
				"Invalid postprocesing settings");
		}
		
	}

	ReAdjustPP(CodecContext->pix_fmt, CodecContext->width, CodecContext->height);
	OutputFrame(DecodeFrame);
#else
	throw FFMS_Exception(FFMS_ERROR_POSTPROCESSING, FFMS_ERROR_UNSUPPORTED,
		"FFMS2 was not compiled with postprocessing support");
#endif /* FFMS_USE_POSTPROC */
}
コード例 #3
0
ファイル: matroskavideo.cpp プロジェクト: 1974kpkpkp/ffms2
FFMS_Frame *FFMatroskaVideo::GetFrame(int n) {
	GetFrameCheck(n);
	n = Frames.RealFrameNumber(n);

	if (LastFrameNum == n)
		return &LocalFrame;

	bool HasSeeked = false;
	int ClosestKF = Frames.FindClosestVideoKeyFrame(n);
	if (CurrentFrame > n || ClosestKF > CurrentFrame + 10) {
		DelayCounter = 0;
		InitialDecode = 1;
		PacketNumber = ClosestKF;
		CurrentFrame = ClosestKF;
		FlushBuffers(CodecContext);
		HasSeeked = true;
	}

	do {
		if (CurrentFrame + FFMS_CALCULATE_DELAY >= n || HasSeeked)
			CodecContext->skip_frame = AVDISCARD_DEFAULT;
		else
			CodecContext->skip_frame = AVDISCARD_NONREF;
		DecodeNextFrame();
		CurrentFrame++;
		HasSeeked = false;
	} while (CurrentFrame <= n);

	LastFrameNum = n;
	return OutputFrame(DecodeFrame);
}
コード例 #4
0
ファイル: avssources.cpp プロジェクト: qyot27/ffms2-old
PVideoFrame AvisynthVideoSource::GetFrame(int n, IScriptEnvironment *Env) {
	n = FFMIN(FFMAX(n,0), VI.num_frames - 1);

	char ErrorMsg[1024];
	FFMS_ErrorInfo E;
	E.Buffer = ErrorMsg;
	E.BufferSize = sizeof(ErrorMsg);

	PVideoFrame Dst = Env->NewVideoFrame(VI);

	if (RFFMode > 0) {
		const FFMS_Frame *Frame = FFMS_GetFrame(V, FFMIN(FieldList[n].Top, FieldList[n].Bottom), &E);
		if (Frame == NULL)
			Env->ThrowError("FFVideoSource: %s", E.Buffer);
		if (FieldList[n].Top == FieldList[n].Bottom) {
			OutputFrame(Frame, Dst, Env);
		} else {
			int FirstField = FFMIN(FieldList[n].Top, FieldList[n].Bottom) == FieldList[n].Bottom;
			OutputField(Frame, Dst, FirstField, Env);
			Frame = FFMS_GetFrame(V, FFMAX(FieldList[n].Top, FieldList[n].Bottom), &E);
			if (Frame == NULL)
				Env->ThrowError("FFVideoSource: %s", E.Buffer);
			OutputField(Frame, Dst, !FirstField, Env);
		}
	} else {
		const FFMS_Frame *Frame;

		if (FPSNum > 0 && FPSDen > 0) {
			Frame = FFMS_GetFrameByTime(V, FFMS_GetVideoProperties(V)->FirstTime +
				(double)(n * (int64_t)FPSDen) / FPSNum, &E);
		} else {
			Frame = FFMS_GetFrame(V, n, &E);
			FFMS_Track *T = FFMS_GetTrackFromVideo(V);
			const FFMS_TrackTimeBase *TB = FFMS_GetTimeBase(T);
			Env->SetVar(Env->Sprintf("%s%s", this->VarPrefix, "FFVFR_TIME"), static_cast<int>(FFMS_GetFrameInfo(T, n)->PTS * static_cast<double>(TB->Num) / TB->Den));
		}

		if (Frame == NULL)
			Env->ThrowError("FFVideoSource: %s", E.Buffer);

		Env->SetVar(Env->Sprintf("%s%s", this->VarPrefix, "FFPICT_TYPE"), static_cast<int>(Frame->PictType));
		OutputFrame(Frame, Dst, Env);
	}

	return Dst;
}
コード例 #5
0
void FFMS_VideoSource::ResetInputFormat() {
	InputFormatOverridden = false;
	InputFormat = FFMS_PIX_FMT(NONE);
	InputColorSpace = AVCOL_SPC_UNSPECIFIED;
	InputColorRange = AVCOL_RANGE_UNSPECIFIED;

	ReAdjustOutputFormat();
	OutputFrame(DecodeFrame);
}
コード例 #6
0
  nsresult Input(MediaRawData* aSample) override
  {
    RefPtr<MediaData> data =
      mCreator->Create(media::TimeUnit::FromMicroseconds(aSample->mTime),
                       media::TimeUnit::FromMicroseconds(aSample->mDuration),
                       aSample->mOffset);

    OutputFrame(data);

    return NS_OK;
  }
コード例 #7
0
void FFMS_VideoSource::SetOutputFormat(const AVPixelFormat *TargetFormats, int Width, int Height, int Resizer) {
	TargetWidth = Width;
	TargetHeight = Height;
	TargetResizer = Resizer;
	TargetPixelFormats.clear();
	while (*TargetFormats != FFMS_PIX_FMT(NONE))
		TargetPixelFormats.push_back(*TargetFormats++);
	OutputFormat = FFMS_PIX_FMT(NONE);

	ReAdjustOutputFormat();
	OutputFrame(DecodeFrame);
}
コード例 #8
0
void FFMS_VideoSource::ResetPP() {
#ifdef FFMS_USE_POSTPROC
	if (PPContext)
		pp_free_context(PPContext);
	PPContext = NULL;

	if (PPMode)
		pp_free_mode(PPMode);
	PPMode = NULL;

#endif /* FFMS_USE_POSTPROC */
	OutputFrame(DecodeFrame);
}
コード例 #9
0
void FFMS_VideoSource::SetInputFormat(int ColorSpace, int ColorRange, AVPixelFormat Format) {
	InputFormatOverridden = true;

	if (Format != FFMS_PIX_FMT(NONE))
		InputFormat = Format;
	if (ColorRange != AVCOL_RANGE_UNSPECIFIED)
		InputColorRange = (AVColorRange)ColorRange;
	if (ColorSpace != AVCOL_SPC_UNSPECIFIED)
		InputColorSpace = (AVColorSpace)ColorSpace;

	if (TargetPixelFormats.size()) {
		ReAdjustOutputFormat();
		OutputFrame(DecodeFrame);
	}
}
コード例 #10
0
void FFMS_VideoSource::ResetOutputFormat() {
	if (SWS) {
		sws_freeContext(SWS);
		SWS = nullptr;
	}

	TargetWidth = -1;
	TargetHeight = -1;
	TargetPixelFormats.clear();

	OutputFormat = FFMS_PIX_FMT(NONE);
	OutputColorSpace = AVCOL_SPC_UNSPECIFIED;
	OutputColorRange = AVCOL_RANGE_UNSPECIFIED;

	OutputFrame(DecodeFrame);
}
コード例 #11
0
ファイル: haalivideo.cpp プロジェクト: 1974kpkpkp/ffms2
FFMS_Frame *FFHaaliVideo::GetFrame(int n) {
	GetFrameCheck(n);
	n = Frames.RealFrameNumber(n);

	if (LastFrameNum == n)
		return &LocalFrame;

	bool HasSeeked = false;
	int SeekOffset = 0;

	if (n < CurrentFrame || Frames.FindClosestVideoKeyFrame(n) > CurrentFrame + 10) {
ReSeek:
		pMMC->Seek(Frames[n + SeekOffset].PTS, MMSF_PREV_KF);
		FlushBuffers(CodecContext);
		DelayCounter = 0;
		InitialDecode = 1;
		HasSeeked = true;
	}

	do {
		int64_t StartTime = -1;
		if (CurrentFrame + FFMS_CALCULATE_DELAY >= n || HasSeeked)
			CodecContext->skip_frame = AVDISCARD_DEFAULT;
		else
			CodecContext->skip_frame = AVDISCARD_NONREF;
		DecodeNextFrame(&StartTime);

		if (HasSeeked) {
			HasSeeked = false;

			if (StartTime < 0 || (CurrentFrame = Frames.FrameFromPTS(StartTime)) < 0) {
				// No idea where we are so go back a bit further
				if (n + SeekOffset == 0)
					throw FFMS_Exception(FFMS_ERROR_SEEKING, FFMS_ERROR_UNKNOWN,
						"Frame accurate seeking is not possible in this file");

				SeekOffset -= FFMIN(20, n + SeekOffset);
				goto ReSeek;
			}
		}

		CurrentFrame++;
	} while (CurrentFrame <= n);

	LastFrameNum = n;
	return OutputFrame(DecodeFrame);
}
コード例 #12
0
ファイル: lavfvideo.cpp プロジェクト: 1974kpkpkp/ffms2
FFLAVFVideo::FFLAVFVideo(const char *SourceFile, int Track, FFMS_Index &Index,
	int Threads, int SeekMode)
: FFMS_VideoSource(SourceFile, Index, Track, Threads)
, FormatContext(NULL)
, SeekMode(SeekMode)
, Res(FFSourceResources<FFMS_VideoSource>(this))
{
	AVCodec *Codec = NULL;

	LAVFOpenFile(SourceFile, FormatContext);

	if (SeekMode >= 0 && Frames.size() > 1 && av_seek_frame(FormatContext, VideoTrack, Frames[0].PTS, AVSEEK_FLAG_BACKWARD) < 0)
		throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
			"Video track is unseekable");

	CodecContext = FormatContext->streams[VideoTrack]->codec;
	CodecContext->thread_count = DecodingThreads;

	Codec = avcodec_find_decoder(CodecContext->codec_id);
	if (Codec == NULL)
		throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
			"Video codec not found");

	if (avcodec_open2(CodecContext, Codec, NULL) < 0)
		throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
			"Could not open video codec");

	Res.CloseCodec(true);

	// Always try to decode a frame to make sure all required parameters are known
	int64_t DummyPTS = 0, DummyPos = 0;
	DecodeNextFrame(&DummyPTS, &DummyPos);

	//VP.image_type = VideoInfo::IT_TFF;
	VP.FPSDenominator = FormatContext->streams[VideoTrack]->time_base.num;
	VP.FPSNumerator = FormatContext->streams[VideoTrack]->time_base.den;

	// sanity check framerate
	if (VP.FPSDenominator > VP.FPSNumerator || VP.FPSDenominator <= 0 || VP.FPSNumerator <= 0) {
		VP.FPSDenominator = 1;
		VP.FPSNumerator = 30;
	}

	// Calculate the average framerate
	if (Frames.size() >= 2) {
		double PTSDiff = (double)(Frames.back().PTS - Frames.front().PTS);
		double TD = (double)(Frames.TB.Den);
		double TN = (double)(Frames.TB.Num);
		VP.FPSDenominator = (unsigned int)(((double)1000000) / (double)((Frames.size() - 1) / ((PTSDiff * TN/TD) / (double)1000)));
		VP.FPSNumerator = 1000000;
	}

	// Set the video properties from the codec context
	SetVideoProperties();

	// Set the SAR from the container if the codec SAR is invalid
	if (VP.SARNum <= 0 || VP.SARDen <= 0) {
		VP.SARNum = FormatContext->streams[VideoTrack]->sample_aspect_ratio.num;
		VP.SARDen = FormatContext->streams[VideoTrack]->sample_aspect_ratio.den;
	}

	// Cannot "output" to PPFrame without doing all other initialization
	// This is the additional mess required for seekmode=-1 to work in a reasonable way
	OutputFrame(DecodeFrame);
}
コード例 #13
0
ファイル: vapoursource.cpp プロジェクト: slajar/ffms2
const VSFrameRef *VS_CC VSVideoSource::GetFrame(int n, int activationReason, void **instanceData, void **, VSFrameContext *frameCtx, VSCore *core, const VSAPI *vsapi) {
	VSVideoSource *vs = static_cast<VSVideoSource *>(*instanceData);
	if (activationReason == arInitial) {

		char ErrorMsg[1024];
		FFMS_ErrorInfo E;
		E.Buffer = ErrorMsg;
		E.BufferSize = sizeof(ErrorMsg);
		std::string buf = "Source: ";

		int OutputIndex = vs->OutputAlpha ? vsapi->getOutputIndex(frameCtx) : 0;	

		VSFrameRef *Dst = vsapi->newVideoFrame(vs->VI[OutputIndex].format, vs->VI[OutputIndex].width, vs->VI[OutputIndex].height, nullptr, core);
		VSMap *Props = vsapi->getFramePropsRW(Dst);

		const FFMS_Frame *Frame;

		if (vs->FPSNum > 0 && vs->FPSDen > 0) {
			double currentTime = FFMS_GetVideoProperties(vs->V)->FirstTime +
				(double)(n * (int64_t)vs->FPSDen) / vs->FPSNum;
			Frame = FFMS_GetFrameByTime(vs->V, currentTime, &E);
			vsapi->propSetInt(Props, "_DurationNum", vs->FPSDen, paReplace);
			vsapi->propSetInt(Props, "_DurationDen", vs->FPSNum, paReplace);
			vsapi->propSetFloat(Props, "_AbsoluteTime", currentTime, paReplace);
		} else {
			Frame = FFMS_GetFrame(vs->V, n, &E);
			FFMS_Track *T = FFMS_GetTrackFromVideo(vs->V);
			const FFMS_TrackTimeBase *TB = FFMS_GetTimeBase(T);
			int64_t num;
			if (n + 1 < vs->VI[0].numFrames)
				num = FFMS_GetFrameInfo(T, n + 1)->PTS - FFMS_GetFrameInfo(T, n)->PTS;
			else if (n > 0) // simply use the second to last frame's duration for the last one, should be good enough
				num = FFMS_GetFrameInfo(T, n)->PTS - FFMS_GetFrameInfo(T, n - 1)->PTS;
			else // just make it one timebase if it's a single frame clip
				num = 1;
			int64_t DurNum = TB->Num * num;
			int64_t DurDen = TB->Den * 1000;
			muldivRational(&DurNum, &DurDen, 1, 1);
			vsapi->propSetInt(Props, "_DurationNum", DurNum, paReplace);
			vsapi->propSetInt(Props, "_DurationDen", DurDen, paReplace);
			vsapi->propSetFloat(Props, "_AbsoluteTime", ((static_cast<double>(TB->Num) / 1000) *  FFMS_GetFrameInfo(T, n)->PTS) / TB->Den, paReplace);
		}

		if (Frame == nullptr) {
			buf += E.Buffer;
			vsapi->setFilterError(buf.c_str(), frameCtx);
			return nullptr;
		}

		// Set AR variables
		if (vs->SARNum > 0 && vs->SARDen > 0) {
			vsapi->propSetInt(Props, "_SARNum", vs->SARNum, paReplace);
			vsapi->propSetInt(Props, "_SARDen", vs->SARDen, paReplace);
		}

		vsapi->propSetInt(Props, "_Matrix", Frame->ColorSpace, paReplace);
		vsapi->propSetInt(Props, "_Primaries", Frame->ColorPrimaries, paReplace);
		vsapi->propSetInt(Props, "_Transfer", Frame->TransferCharateristics, paReplace);
		if (Frame->ChromaLocation > 0)
			vsapi->propSetInt(Props, "_ChromaLocation", Frame->ChromaLocation - 1, paReplace);

		if (Frame->ColorRange == FFMS_CR_MPEG)
			vsapi->propSetInt(Props, "_ColorRange", 1, paReplace);
		else if (Frame->ColorRange == FFMS_CR_JPEG)
			vsapi->propSetInt(Props, "_ColorRange", 0, paReplace);
		vsapi->propSetData(Props, "_PictType", &Frame->PictType, 1, paReplace);

		// Set field information
		int FieldBased = 0;
		if (Frame->InterlacedFrame)
			FieldBased = (Frame->TopFieldFirst ? 2 : 1);
		vsapi->propSetInt(Props, "_FieldBased", FieldBased, paReplace);

		if (OutputIndex == 0)
			OutputFrame(Frame, Dst, vsapi);
		else
			OutputAlphaFrame(Frame, vs->VI[0].format->numPlanes, Dst, vsapi);

		return Dst;
	}

	return nullptr;
}
コード例 #14
0
ファイル: haalivideo.cpp プロジェクト: 1974kpkpkp/ffms2
FFHaaliVideo::FFHaaliVideo(const char *SourceFile, int Track,
	FFMS_Index &Index, int Threads, FFMS_Sources SourceMode)
: Res(FFSourceResources<FFMS_VideoSource>(this)), FFMS_VideoSource(SourceFile, Index, Track, Threads) {
	BitStreamFilter = NULL;

	pMMC = HaaliOpenFile(SourceFile, SourceMode);

	CComPtr<IEnumUnknown> pEU;
	if (!SUCCEEDED(pMMC->EnumTracks(&pEU)))
		throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
			"Failed to enumerate tracks");

	CComPtr<IUnknown> pU;
	int CurrentTrack = -1;
	while (pEU->Next(1, &pU, NULL) == S_OK && ++CurrentTrack != Track) pU = NULL;
	CComQIPtr<IPropertyBag> pBag = pU;

	if (CurrentTrack != Track || !pBag)
		throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
			"Failed to find track");

	HCodecContext = InitializeCodecContextFromHaaliInfo(pBag);
	CodecContext = HCodecContext;

	const AVCodec *Codec = NULL;
	std::swap(Codec, CodecContext->codec);
	if (avcodec_open2(CodecContext, Codec, NULL) < 0)
		throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
			"Could not open video codec");

	CodecContext->thread_count = DecodingThreads;

	if (CodecContext->codec->id == FFMS_ID(H264) && SourceMode == FFMS_SOURCE_HAALIMPEG)
		BitStreamFilter = av_bitstream_filter_init("h264_mp4toannexb");

	Res.CloseCodec(true);

	// Always try to decode a frame to make sure all required parameters are known
	int64_t Dummy;
	DecodeNextFrame(&Dummy);

	VP.FPSDenominator = 1;
	VP.FPSNumerator = 30;

	// Calculate the average framerate
	if (Frames.size() >= 2) {
		double PTSDiff = (double)(Frames.back().PTS - Frames.front().PTS);
		VP.FPSDenominator = (unsigned int)(PTSDiff  / (double)1000 / (double)(Frames.size() - 1) + 0.5);
		VP.FPSNumerator = 1000000;
	}

	// Set the video properties from the codec context
	SetVideoProperties();

	// Output the already decoded frame so it isn't wasted
	OutputFrame(DecodeFrame);

	// Set AR variables
	CComVariant pV;

	USHORT Num = 0, Den = 0;

	pV.Clear();
	if (SUCCEEDED(pBag->Read(L"Video.DisplayWidth", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4)))
		Num = pV.uiVal;
	pV.Clear();
	if (SUCCEEDED(pBag->Read(L"Video.DisplayHeight", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4)))
		Den = pV.uiVal;

	if (Num && Den) {
		VP.SARNum = LocalFrame.EncodedHeight * Num;
		VP.SARDen = LocalFrame.EncodedWidth * Den;
	}
}
コード例 #15
0
ファイル: video.c プロジェクト: AsamQi/vlc
int transcode_video_process( sout_stream_t *p_stream, sout_stream_id_t *id,
                                    block_t *in, block_t **out )
{
    sout_stream_sys_t *p_sys = p_stream->p_sys;
    bool b_need_duplicate = false;
    picture_t *p_pic;
    *out = NULL;

    if( unlikely( in == NULL ) )
    {
        if( p_sys->i_threads == 0 )
        {
            block_t *p_block;
            do {
                p_block = id->p_encoder->pf_encode_video(id->p_encoder, NULL );
                block_ChainAppend( out, p_block );
            } while( p_block );
        }
        else
        {
            /*
             * FIXME: we need EncoderThread() to flush buffers and signal us
             * when it's done so we can send the last frames to the chain
             */
        }
        return VLC_SUCCESS;
    }


    while( (p_pic = id->p_decoder->pf_decode_video( id->p_decoder, &in )) )
    {

        if( p_stream->p_sout->i_out_pace_nocontrol && p_sys->b_hurry_up )
        {
            mtime_t current_date = mdate();
            if( unlikely( current_date + 50000 > p_pic->date ) )
            {
                msg_Dbg( p_stream, "late picture skipped (%"PRId64")",
                         current_date + 50000 - p_pic->date );
                picture_Release( p_pic );
                continue;
            }
        }

        if( p_sys->b_master_sync )
        {
            mtime_t i_master_drift = p_sys->i_master_drift;
            mtime_t i_pts = date_Get( &id->interpolated_pts ) + 1;
            mtime_t i_video_drift = p_pic->date - i_pts;

            if ( unlikely( i_video_drift > MASTER_SYNC_MAX_DRIFT
                  || i_video_drift < -MASTER_SYNC_MAX_DRIFT ) )
            {
                msg_Dbg( p_stream,
                    "drift is too high (%"PRId64", resetting master sync",
                    i_video_drift );
                date_Set( &id->interpolated_pts, p_pic->date );
                i_pts = p_pic->date + 1;
            }
            i_video_drift = p_pic->date - i_pts;
            b_need_duplicate = false;

            /* Set the pts of the frame being encoded */
            p_pic->date = i_pts;

            if( unlikely( i_video_drift < (i_master_drift - 50000) ) )
            {
#if 0
                msg_Dbg( p_stream, "dropping frame (%i)",
                         (int)(i_video_drift - i_master_drift) );
#endif
                picture_Release( p_pic );
                continue;
            }
            else if( unlikely( i_video_drift > (i_master_drift + 50000) ) )
            {
#if 0
                msg_Dbg( p_stream, "adding frame (%i)",
                         (int)(i_video_drift - i_master_drift) );
#endif
                b_need_duplicate = true;
            }
        }
        if( unlikely (
             id->p_encoder->p_module &&
             !video_format_IsSimilar( &p_sys->fmt_input_video, &id->p_decoder->fmt_out.video )
            )
          )
        {
            msg_Info( p_stream, "aspect-ratio changed, reiniting. %i -> %i : %i -> %i.",
                        p_sys->fmt_input_video.i_sar_num, id->p_decoder->fmt_out.video.i_sar_num,
                        p_sys->fmt_input_video.i_sar_den, id->p_decoder->fmt_out.video.i_sar_den
                    );
            /* Close filters */
            if( id->p_f_chain )
                filter_chain_Delete( id->p_f_chain );
            id->p_f_chain = NULL;
            if( id->p_uf_chain )
                filter_chain_Delete( id->p_uf_chain );
            id->p_uf_chain = NULL;

            /* Reinitialize filters */
            id->p_encoder->fmt_out.video.i_width  = p_sys->i_width & ~1;
            id->p_encoder->fmt_out.video.i_height = p_sys->i_height & ~1;
            id->p_encoder->fmt_out.video.i_sar_num = id->p_encoder->fmt_out.video.i_sar_den = 0;

            transcode_video_filter_init( p_stream, id );
            transcode_video_encoder_init( p_stream, id );
            conversion_video_filter_append( id );
            memcpy( &p_sys->fmt_input_video, &id->p_decoder->fmt_out.video, sizeof(video_format_t));
        }


        if( unlikely( !id->p_encoder->p_module ) )
        {
            if( id->p_f_chain )
                filter_chain_Delete( id->p_f_chain );
            if( id->p_uf_chain )
                filter_chain_Delete( id->p_uf_chain );
            id->p_f_chain = id->p_uf_chain = NULL;

            transcode_video_filter_init( p_stream, id );
            transcode_video_encoder_init( p_stream, id );
            conversion_video_filter_append( id );
            memcpy( &p_sys->fmt_input_video, &id->p_decoder->fmt_out.video, sizeof(video_format_t));

            if( transcode_video_encoder_open( p_stream, id ) != VLC_SUCCESS )
            {
                picture_Release( p_pic );
                transcode_video_close( p_stream, id );
                id->b_transcode = false;
                return VLC_EGENERIC;
            }
        }

        /* Run the filter and output chains; first with the picture,
         * and then with NULL as many times as we need until they
         * stop outputting frames.
         */
        for ( ;; ) {
            picture_t *p_filtered_pic = p_pic;

            /* Run filter chain */
            if( id->p_f_chain )
                p_filtered_pic = filter_chain_VideoFilter( id->p_f_chain, p_filtered_pic );
            if( !p_filtered_pic )
                break;

            for ( ;; ) {
                picture_t *p_user_filtered_pic = p_filtered_pic;

                /* Run user specified filter chain */
                if( id->p_uf_chain )
                    p_user_filtered_pic = filter_chain_VideoFilter( id->p_uf_chain, p_user_filtered_pic );
                if( !p_user_filtered_pic )
                    break;

                OutputFrame( p_sys, p_user_filtered_pic, b_need_duplicate, p_stream, id, out );
                b_need_duplicate = false;

                p_filtered_pic = NULL;
            }

            p_pic = NULL;
        }
    }

    if( p_sys->i_threads >= 1 )
    {
        /* Pick up any return data the encoder thread wants to output. */
        vlc_mutex_lock( &p_sys->lock_out );
        *out = p_sys->p_buffers;
        p_sys->p_buffers = NULL;
        vlc_mutex_unlock( &p_sys->lock_out );
    }

    return VLC_SUCCESS;
}
コード例 #16
0
ファイル: video.c プロジェクト: chucolin/vlc
int transcode_video_process( sout_stream_t *p_stream, sout_stream_id_t *id,
                                    block_t *in, block_t **out )
{
    sout_stream_sys_t *p_sys = p_stream->p_sys;
    picture_t *p_pic = NULL;
    *out = NULL;

    if( unlikely( in == NULL ) )
    {
        if( p_sys->i_threads == 0 )
        {
            block_t *p_block;
            do {
                p_block = id->p_encoder->pf_encode_video(id->p_encoder, NULL );
                block_ChainAppend( out, p_block );
            } while( p_block );
        }
        else
        {
            msg_Dbg( p_stream, "Flushing thread and waiting that");
            vlc_mutex_lock( &p_stream->p_sys->lock_out );
            p_stream->p_sys->b_abort = true;
            vlc_cond_signal( &p_stream->p_sys->cond );
            vlc_mutex_unlock( &p_stream->p_sys->lock_out );

            vlc_join( p_stream->p_sys->thread, NULL );
            vlc_mutex_lock( &p_sys->lock_out );
            *out = p_sys->p_buffers;
            p_sys->p_buffers = NULL;
            vlc_mutex_unlock( &p_sys->lock_out );

            msg_Dbg( p_stream, "Flushing done");
        }
        return VLC_SUCCESS;
    }


    while( (p_pic = id->p_decoder->pf_decode_video( id->p_decoder, &in )) )
    {

        if( p_stream->p_sout->i_out_pace_nocontrol && p_sys->b_hurry_up )
        {
            mtime_t current_date = mdate();
            if( unlikely( (current_date - 50000) > p_pic->date ) )
            {
                msg_Dbg( p_stream, "late picture skipped (%"PRId64")",
                         current_date - 50000 - p_pic->date );
                picture_Release( p_pic );
                continue;
            }
        }

        if( unlikely (
             id->p_encoder->p_module &&
             !video_format_IsSimilar( &p_sys->fmt_input_video, &id->p_decoder->fmt_out.video )
            )
          )
        {
            msg_Info( p_stream, "aspect-ratio changed, reiniting. %i -> %i : %i -> %i.",
                        p_sys->fmt_input_video.i_sar_num, id->p_decoder->fmt_out.video.i_sar_num,
                        p_sys->fmt_input_video.i_sar_den, id->p_decoder->fmt_out.video.i_sar_den
                    );
            /* Close filters */
            if( id->p_f_chain )
                filter_chain_Delete( id->p_f_chain );
            id->p_f_chain = NULL;
            if( id->p_uf_chain )
                filter_chain_Delete( id->p_uf_chain );
            id->p_uf_chain = NULL;

            /* Reinitialize filters */
            id->p_encoder->fmt_out.video.i_visible_width  = p_sys->i_width & ~1;
            id->p_encoder->fmt_out.video.i_visible_height = p_sys->i_height & ~1;
            id->p_encoder->fmt_out.video.i_sar_num = id->p_encoder->fmt_out.video.i_sar_den = 0;

            transcode_video_filter_init( p_stream, id );
            transcode_video_encoder_init( p_stream, id );
            conversion_video_filter_append( id );
            memcpy( &p_sys->fmt_input_video, &id->p_decoder->fmt_out.video, sizeof(video_format_t));
        }


        if( unlikely( !id->p_encoder->p_module ) )
        {
            if( id->p_f_chain )
                filter_chain_Delete( id->p_f_chain );
            if( id->p_uf_chain )
                filter_chain_Delete( id->p_uf_chain );
            id->p_f_chain = id->p_uf_chain = NULL;

            transcode_video_filter_init( p_stream, id );
            transcode_video_encoder_init( p_stream, id );
            conversion_video_filter_append( id );
            memcpy( &p_sys->fmt_input_video, &id->p_decoder->fmt_out.video, sizeof(video_format_t));

            if( transcode_video_encoder_open( p_stream, id ) != VLC_SUCCESS )
            {
                picture_Release( p_pic );
                transcode_video_close( p_stream, id );
                id->b_transcode = false;
                return VLC_EGENERIC;
            }
        }

        /*Input lipsync and drop check */
        if( p_sys->b_master_sync )
        {
            /* How much audio has drifted */
            mtime_t i_master_drift = p_sys->i_master_drift;

            /* This is the pts input should have now with constant frame rate */
            mtime_t i_pts = date_Get( &id->interpolated_pts );

            /* How much video pts is ahead of calculated pts */
            mtime_t i_video_drift = p_pic->date - i_pts;

            /* Check that we are having lipsync with input here */
            if( unlikely ( ( (i_video_drift - i_master_drift ) > MASTER_SYNC_MAX_DRIFT
                          || (i_video_drift + i_master_drift ) < -MASTER_SYNC_MAX_DRIFT ) ) )
            {
                msg_Warn( p_stream,
                    "video drift too big, resetting sync %"PRId64" to %"PRId64,
                    (i_video_drift + i_master_drift),
                    p_pic->date
                    );
                date_Set( &id->interpolated_pts, p_pic->date );
                date_Set( &id->next_output_pts, p_pic->date );
                i_pts = date_Get( &id->interpolated_pts );
            }

            /* Set the pts of the frame being encoded */
            p_pic->date = i_pts;

            /* now take next input pts, pts dates are only enabled if p_module is set*/
            date_Increment( &id->interpolated_pts, id->p_decoder->fmt_out.video.i_frame_rate_base );


            /* If input pts + input_frame_interval is lower than next_output_pts - output_frame_interval
             * Then the future input frame should fit better and we can drop this one 
             *
             * Duplication need is checked in OutputFrame */
            if( ( p_pic->date + (mtime_t)id->i_input_frame_interval ) <
                ( date_Get( &id->next_output_pts ) ) )
            {
#if 0
                msg_Dbg( p_stream, "dropping frame (%"PRId64" + %"PRId64" vs %"PRId64")",
                         p_pic->date, id->i_input_frame_interval, date_Get(&id->next_output_pts) );
#endif
                picture_Release( p_pic );
                continue;
            }
#if 0
            msg_Dbg( p_stream, "not dropping frame");
#endif

            /* input calculated pts isn't necessary what pts output should be, so use output pts*/
            p_pic->date = date_Get( &id->next_output_pts );


        }

        /* Run the filter and output chains; first with the picture,
         * and then with NULL as many times as we need until they
         * stop outputting frames.
         */
        for ( ;; ) {
            picture_t *p_filtered_pic = p_pic;

            /* Run filter chain */
            if( id->p_f_chain )
                p_filtered_pic = filter_chain_VideoFilter( id->p_f_chain, p_filtered_pic );
            if( !p_filtered_pic )
                break;

            for ( ;; ) {
                picture_t *p_user_filtered_pic = p_filtered_pic;

                /* Run user specified filter chain */
                if( id->p_uf_chain )
                    p_user_filtered_pic = filter_chain_VideoFilter( id->p_uf_chain, p_user_filtered_pic );
                if( !p_user_filtered_pic )
                    break;

                OutputFrame( p_sys, p_user_filtered_pic, p_stream, id, out );

                p_filtered_pic = NULL;
            }

            p_pic = NULL;
        }
    }

    if( p_sys->i_threads >= 1 )
    {
        /* Pick up any return data the encoder thread wants to output. */
        vlc_mutex_lock( &p_sys->lock_out );
        *out = p_sys->p_buffers;
        p_sys->p_buffers = NULL;
        vlc_mutex_unlock( &p_sys->lock_out );
    }

    return VLC_SUCCESS;
}
コード例 #17
0
ファイル: matroskavideo.cpp プロジェクト: 1974kpkpkp/ffms2
FFMatroskaVideo::FFMatroskaVideo(const char *SourceFile, int Track,
	FFMS_Index &Index, int Threads)
: FFMS_VideoSource(SourceFile, Index, Track, Threads)
, MF(0)
, Res(FFSourceResources<FFMS_VideoSource>(this))
, PacketNumber(0)
{
	AVCodec *Codec = NULL;
	TrackInfo *TI = NULL;

	MC.ST.fp = ffms_fopen(SourceFile, "rb");
	if (MC.ST.fp == NULL) {
		std::ostringstream buf;
		buf << "Can't open '" << SourceFile << "': " << strerror(errno);
		throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
	}

	setvbuf(MC.ST.fp, NULL, _IOFBF, CACHESIZE);

	MF = mkv_OpenEx(&MC.ST.base, 0, 0, ErrorMessage, sizeof(ErrorMessage));
	if (MF == NULL) {
		std::ostringstream buf;
		buf << "Can't parse Matroska file: " << ErrorMessage;
		throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
	}

	TI = mkv_GetTrackInfo(MF, VideoTrack);

	if (TI->CompEnabled)
		TCC.reset(new TrackCompressionContext(MF, TI, VideoTrack));

	CodecContext = avcodec_alloc_context3(NULL);
	CodecContext->thread_count = DecodingThreads;

	Codec = avcodec_find_decoder(MatroskaToFFCodecID(TI->CodecID, TI->CodecPrivate));
	if (Codec == NULL)
		throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
			"Video codec not found");

	InitializeCodecContextFromMatroskaTrackInfo(TI, CodecContext);

	if (avcodec_open2(CodecContext, Codec, NULL) < 0)
		throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
			"Could not open video codec");

	Res.CloseCodec(true);

	// Always try to decode a frame to make sure all required parameters are known
	DecodeNextFrame();

	VP.FPSDenominator = 1;
	VP.FPSNumerator = 30;

	// Calculate the average framerate
	if (Frames.size() >= 2) {
		double PTSDiff = (double)(Frames.back().PTS - Frames.front().PTS);
		// Dividing by 1000 caused too much information to be lost, when CorrectNTSCRationalFramerate runs there was a possibility
		// of it outputting the wrong framerate.  We still divide by 100 to protect against the possibility of overflows.
		VP.FPSDenominator = (unsigned int)(PTSDiff * mkv_TruncFloat(TI->TimecodeScale) / (double)100 / (double)(Frames.size() - 1) + 0.5);
		VP.FPSNumerator = 10000000;
	}

	// Set the video properties from the codec context
	SetVideoProperties();

	// Output the already decoded frame so it isn't wasted
	OutputFrame(DecodeFrame);

	// Set AR variables
	VP.SARNum = TI->AV.Video.DisplayWidth * TI->AV.Video.PixelHeight;
	VP.SARDen = TI->AV.Video.DisplayHeight * TI->AV.Video.PixelWidth;

	// Set crop variables
	VP.CropLeft = TI->AV.Video.CropL;
	VP.CropRight = TI->AV.Video.CropR;
	VP.CropTop = TI->AV.Video.CropT;
	VP.CropBottom = TI->AV.Video.CropB;
}
コード例 #18
0
ファイル: video.c プロジェクト: 5UN5H1N3/vlc
int transcode_video_process( sout_stream_t *p_stream, sout_stream_id_sys_t *id,
                                    block_t *in, block_t **out )
{
    sout_stream_sys_t *p_sys = p_stream->p_sys;
    picture_t *p_pic = NULL;
    *out = NULL;

    if( unlikely( in == NULL ) )
    {
        if( p_sys->i_threads == 0 )
        {
            block_t *p_block;
            do {
                p_block = id->p_encoder->pf_encode_video(id->p_encoder, NULL );
                block_ChainAppend( out, p_block );
            } while( p_block );
        }
        else
        {
            msg_Dbg( p_stream, "Flushing thread and waiting that");
            vlc_mutex_lock( &p_stream->p_sys->lock_out );
            p_stream->p_sys->b_abort = true;
            vlc_cond_signal( &p_stream->p_sys->cond );
            vlc_mutex_unlock( &p_stream->p_sys->lock_out );

            vlc_join( p_stream->p_sys->thread, NULL );
            vlc_mutex_lock( &p_sys->lock_out );
            *out = p_sys->p_buffers;
            p_sys->p_buffers = NULL;
            vlc_mutex_unlock( &p_sys->lock_out );

            msg_Dbg( p_stream, "Flushing done");
        }
        return VLC_SUCCESS;
    }


    while( (p_pic = id->p_decoder->pf_decode_video( id->p_decoder, &in )) )
    {

        if( unlikely (
             id->p_encoder->p_module &&
             !video_format_IsSimilar( &p_sys->fmt_input_video, &id->p_decoder->fmt_out.video )
            )
          )
        {
            msg_Info( p_stream, "aspect-ratio changed, reiniting. %i -> %i : %i -> %i.",
                        p_sys->fmt_input_video.i_sar_num, id->p_decoder->fmt_out.video.i_sar_num,
                        p_sys->fmt_input_video.i_sar_den, id->p_decoder->fmt_out.video.i_sar_den
                    );
            /* Close filters */
            if( id->p_f_chain )
                filter_chain_Delete( id->p_f_chain );
            id->p_f_chain = NULL;
            if( id->p_uf_chain )
                filter_chain_Delete( id->p_uf_chain );
            id->p_uf_chain = NULL;

            /* Reinitialize filters */
            id->p_encoder->fmt_out.video.i_visible_width  = p_sys->i_width & ~1;
            id->p_encoder->fmt_out.video.i_visible_height = p_sys->i_height & ~1;
            id->p_encoder->fmt_out.video.i_sar_num = id->p_encoder->fmt_out.video.i_sar_den = 0;

            transcode_video_filter_init( p_stream, id );
            transcode_video_encoder_init( p_stream, id );
            conversion_video_filter_append( id );
            memcpy( &p_sys->fmt_input_video, &id->p_decoder->fmt_out.video, sizeof(video_format_t));
        }


        if( unlikely( !id->p_encoder->p_module ) )
        {
            if( id->p_f_chain )
                filter_chain_Delete( id->p_f_chain );
            if( id->p_uf_chain )
                filter_chain_Delete( id->p_uf_chain );
            id->p_f_chain = id->p_uf_chain = NULL;

            transcode_video_filter_init( p_stream, id );
            transcode_video_encoder_init( p_stream, id );
            conversion_video_filter_append( id );
            memcpy( &p_sys->fmt_input_video, &id->p_decoder->fmt_out.video, sizeof(video_format_t));

            if( transcode_video_encoder_open( p_stream, id ) != VLC_SUCCESS )
            {
                picture_Release( p_pic );
                transcode_video_close( p_stream, id );
                id->b_transcode = false;
                return VLC_EGENERIC;
            }
            date_Set( &id->next_output_pts, p_pic->date );
        }

        /*Input lipsync and drop check */
        if( p_sys->b_master_sync )
        {
            /* If input pts lower than next_output_pts - output_frame_interval
             * Then the future input frame should fit better and we can drop this one 
             *
             * We check this here as we don't need to run video filter at all for pictures
             * we are going to drop anyway
             *
             * Duplication need is checked in OutputFrame */
            if( ( p_pic->date ) <
                ( date_Get( &id->next_output_pts ) - (mtime_t)id->i_output_frame_interval ) )
            {
#if 0
                msg_Dbg( p_stream, "dropping frame (%"PRId64" + %"PRId64" vs %"PRId64")",
                         p_pic->date, id->i_input_frame_interval, date_Get(&id->next_output_pts) );
#endif
                picture_Release( p_pic );
                continue;
            }
#if 0
            msg_Dbg( p_stream, "not dropping frame");
#endif

        }

        /* Run the filter and output chains; first with the picture,
         * and then with NULL as many times as we need until they
         * stop outputting frames.
         */
        for ( ;; ) {
            picture_t *p_filtered_pic = p_pic;

            /* Run filter chain */
            if( id->p_f_chain )
                p_filtered_pic = filter_chain_VideoFilter( id->p_f_chain, p_filtered_pic );
            if( !p_filtered_pic )
                break;

            for ( ;; ) {
                picture_t *p_user_filtered_pic = p_filtered_pic;

                /* Run user specified filter chain */
                if( id->p_uf_chain )
                    p_user_filtered_pic = filter_chain_VideoFilter( id->p_uf_chain, p_user_filtered_pic );
                if( !p_user_filtered_pic )
                    break;

                OutputFrame( p_stream, p_user_filtered_pic, id, out );

                p_filtered_pic = NULL;
            }

            p_pic = NULL;
        }
    }

    if( p_sys->i_threads >= 1 )
    {
        /* Pick up any return data the encoder thread wants to output. */
        vlc_mutex_lock( &p_sys->lock_out );
        *out = p_sys->p_buffers;
        p_sys->p_buffers = NULL;
        vlc_mutex_unlock( &p_sys->lock_out );
    }

    return VLC_SUCCESS;
}