コード例 #1
0
ファイル: vapoursource.cpp プロジェクト: slajar/ffms2
void VSVideoSource::InitOutputFormat(int ResizeToWidth, int ResizeToHeight,
		const char *ResizerName, int ConvertToFormat, const VSAPI *vsapi, VSCore *core) {

	char ErrorMsg[1024];
	FFMS_ErrorInfo E;
	E.Buffer = ErrorMsg;
	E.BufferSize = sizeof(ErrorMsg);

	const FFMS_Frame *F = FFMS_GetFrame(V, 0, &E);
	if (!F) {
		std::string buf = "Source: ";
		buf += E.Buffer;
		throw std::runtime_error(buf);
	}

	std::vector<int> TargetFormats;
	int npixfmt = GetNumPixFmts();
	for (int i = 0; i < npixfmt; i++)
		if (IsRealNativeEndianPlanar(*av_pix_fmt_desc_get((AVPixelFormat)i)))
			TargetFormats.push_back(i);
	TargetFormats.push_back(PIX_FMT_NONE);

	int TargetPixelFormat = PIX_FMT_NONE;
	if (ConvertToFormat != pfNone) {
		TargetPixelFormat = FormatConversionToPixelFormat(ConvertToFormat, OutputAlpha, core, vsapi);
		if (TargetPixelFormat == PIX_FMT_NONE)
			throw std::runtime_error(std::string("Source: Invalid output colorspace specified"));

		TargetFormats.clear();
		TargetFormats.push_back(TargetPixelFormat);
		TargetFormats.push_back(-1);
	}

	if (ResizeToWidth <= 0)
		ResizeToWidth = F->EncodedWidth;

	if (ResizeToHeight <= 0)
		ResizeToHeight = F->EncodedHeight;

	int Resizer = ResizerNameToSWSResizer(ResizerName);
	if (Resizer == 0)
		throw std::runtime_error(std::string("Source: Invalid resizer name specified"));

	if (FFMS_SetOutputFormatV2(V, &TargetFormats[0],
		ResizeToWidth, ResizeToHeight, Resizer, &E))
		throw std::runtime_error(std::string("Source: No suitable output format found"));

	F = FFMS_GetFrame(V, 0, &E);
	TargetFormats.clear();
	TargetFormats.push_back(F->ConvertedPixelFormat);
	TargetFormats.push_back(-1);

	// This trick is required to first get the "best" default format and then set only that format as the output
	if (FFMS_SetOutputFormatV2(V, TargetFormats.data(), ResizeToWidth, ResizeToHeight, Resizer, &E))
		throw std::runtime_error(std::string("Source: No suitable output format found"));

	F = FFMS_GetFrame(V, 0, &E);

	// Don't output alpha if the clip doesn't have it
	if (!HasAlpha(*av_pix_fmt_desc_get((AVPixelFormat)F->ConvertedPixelFormat)))
		OutputAlpha = false;

	VI[0].format = FormatConversionToVS(F->ConvertedPixelFormat, core, vsapi);
	if (!VI[0].format)
		throw std::runtime_error(std::string("Source: No suitable output format found"));

	VI[0].width = F->ScaledWidth;
	VI[0].height = F->ScaledHeight;

	// fixme? Crop to obey sane even width/height requirements
}
コード例 #2
0
ファイル: avssources.cpp プロジェクト: jeeb/ffms2
void AvisynthVideoSource::InitOutputFormat(
	int ResizeToWidth, int ResizeToHeight, const char *ResizerName,
	const char *ConvertToFormatName, IScriptEnvironment *Env) {

	ErrorInfo E;
	const FFMS_VideoProperties *VP = FFMS_GetVideoProperties(V);
	const FFMS_Frame *F = FFMS_GetFrame(V, 0, &E);
	if (!F)
		Env->ThrowError("FFVideoSource: %s", E.Buffer);

    std::vector<int> TargetFormats;
    if (HighBitDepth) {
        TargetFormats.push_back(FFMS_GetPixFmt("yuv420p16"));
        TargetFormats.push_back(FFMS_GetPixFmt("yuva420p16"));
        TargetFormats.push_back(FFMS_GetPixFmt("yuv422p16"));
        TargetFormats.push_back(FFMS_GetPixFmt("yuva422p16"));
        TargetFormats.push_back(FFMS_GetPixFmt("yuv444p16"));
        TargetFormats.push_back(FFMS_GetPixFmt("yuva444p16"));
        TargetFormats.push_back(FFMS_GetPixFmt("yuv420p10"));
        TargetFormats.push_back(FFMS_GetPixFmt("yuva420p10"));
        TargetFormats.push_back(FFMS_GetPixFmt("yuv422p10"));
        TargetFormats.push_back(FFMS_GetPixFmt("yuva422p10"));
        TargetFormats.push_back(FFMS_GetPixFmt("yuv444p10"));
        TargetFormats.push_back(FFMS_GetPixFmt("yuva444p10"));
        TargetFormats.push_back(FFMS_GetPixFmt("gbrp16"));
        TargetFormats.push_back(FFMS_GetPixFmt("gbrap16"));
        TargetFormats.push_back(FFMS_GetPixFmt("gray16"));
        TargetFormats.push_back(FFMS_GetPixFmt("yuva420p"));
        TargetFormats.push_back(FFMS_GetPixFmt("yuva422p"));
        TargetFormats.push_back(FFMS_GetPixFmt("yuva444p"));
    }
    TargetFormats.push_back(FFMS_GetPixFmt("yuv410p"));
    TargetFormats.push_back(FFMS_GetPixFmt("yuv411p"));
    TargetFormats.push_back(FFMS_GetPixFmt("yuv420p"));
    TargetFormats.push_back(FFMS_GetPixFmt("yuv422p"));
    TargetFormats.push_back(FFMS_GetPixFmt("yuv444p"));
    TargetFormats.push_back(FFMS_GetPixFmt("gray8"));
    TargetFormats.push_back(FFMS_GetPixFmt("yuyv422"));
    TargetFormats.push_back(FFMS_GetPixFmt("bgra"));

    // Remove unsupported formats from list so they don't appear as an early termination
    TargetFormats.erase(std::remove(TargetFormats.begin(), TargetFormats.end(), -1), TargetFormats.end());

    TargetFormats.push_back(-1);

	// PIX_FMT_NV21 is misused as a return value different to the defined ones in the function
	AVPixelFormat TargetPixelFormat = CSNameToPIXFMT(ConvertToFormatName, FFMS_PIX_FMT(NV21), HighBitDepth);
	if (TargetPixelFormat == FFMS_PIX_FMT(NONE))
		Env->ThrowError("FFVideoSource: Invalid colorspace name specified");

	if (TargetPixelFormat != FFMS_PIX_FMT(NV21)) {
        TargetFormats.clear();
        TargetFormats.push_back(TargetPixelFormat);
        TargetFormats.push_back(-1);
	}

	if (ResizeToWidth <= 0)
		ResizeToWidth = F->EncodedWidth;

	if (ResizeToHeight <= 0)
		ResizeToHeight = F->EncodedHeight;

	int Resizer = ResizerNameToSWSResizer(ResizerName);
	if (Resizer == 0)
		Env->ThrowError("FFVideoSource: Invalid resizer name specified");

	if (FFMS_SetOutputFormatV2(V, TargetFormats.data(),
		ResizeToWidth, ResizeToHeight, Resizer, &E))
		Env->ThrowError("FFVideoSource: No suitable output format found");

	F = FFMS_GetFrame(V, 0, &E);
    TargetFormats.clear();
    TargetFormats.push_back(F->ConvertedPixelFormat);
    TargetFormats.push_back(-1);

		// This trick is required to first get the "best" default format and then set only that format as the output
	if (FFMS_SetOutputFormatV2(V, TargetFormats.data(),
		ResizeToWidth, ResizeToHeight, Resizer, &E))
		Env->ThrowError("FFVideoSource: No suitable output format found");

	F = FFMS_GetFrame(V, 0, &E);

	if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuvj420p") || F->ConvertedPixelFormat == FFMS_GetPixFmt("yuv420p"))
		VI.pixel_type = VideoInfo::CS_I420;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuva420p"))
        VI.pixel_type = VideoInfo::CS_YUVA420;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuvj422p") || F->ConvertedPixelFormat == FFMS_GetPixFmt("yuv422p"))
        VI.pixel_type = VideoInfo::CS_YV16;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuva422p"))
        VI.pixel_type = VideoInfo::CS_YUVA422;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuvj444p") || F->ConvertedPixelFormat == FFMS_GetPixFmt("yuv444p"))
        VI.pixel_type = VideoInfo::CS_YV24;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuva444p"))
        VI.pixel_type = VideoInfo::CS_YUVA444;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuv411p"))
        VI.pixel_type = VideoInfo::CS_YV411;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuv410p"))
        VI.pixel_type = VideoInfo::CS_YUV9;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("gray8"))
        VI.pixel_type = VideoInfo::CS_Y8;
	else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuyv422"))
		VI.pixel_type = VideoInfo::CS_YUY2;
	else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("rgb32"))
		VI.pixel_type = VideoInfo::CS_BGR32;
	else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("bgr24"))
		VI.pixel_type = VideoInfo::CS_BGR24;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuv420p16"))
        VI.pixel_type = VideoInfo::CS_YUV420P16;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuva420p16"))
        VI.pixel_type = VideoInfo::CS_YUVA420P16;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuv422p16"))
        VI.pixel_type = VideoInfo::CS_YUV422P16;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuva422p16"))
        VI.pixel_type = VideoInfo::CS_YUVA422P16;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuv444p16"))
        VI.pixel_type = VideoInfo::CS_YUV444P16;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuva444p16"))
        VI.pixel_type = VideoInfo::CS_YUVA444P16;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuv420p10"))
        VI.pixel_type = VideoInfo::CS_YUV420P10;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuva420p10"))
        VI.pixel_type = VideoInfo::CS_YUVA420P10;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuv422p10"))
        VI.pixel_type = VideoInfo::CS_YUV422P10;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuva422p10"))
        VI.pixel_type = VideoInfo::CS_YUVA422P10;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuv444p10"))
        VI.pixel_type = VideoInfo::CS_YUV444P10;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuva444p10"))
        VI.pixel_type = VideoInfo::CS_YUVA444P10;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("gbrp16"))
        VI.pixel_type = VideoInfo::CS_RGBP16;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("gbrap16"))
        VI.pixel_type = VideoInfo::CS_RGBAP16;
    else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("gray16"))
        VI.pixel_type = VideoInfo::CS_Y16;
	else
		Env->ThrowError("FFVideoSource: No suitable output format found");

	if (RFFMode > 0 && ResizeToHeight != F->EncodedHeight)
		Env->ThrowError("FFVideoSource: Vertical scaling not allowed in RFF mode");

	if (RFFMode > 0 && TargetPixelFormat != FFMS_PIX_FMT(NV21))
		Env->ThrowError("FFVideoSource: Only the default output colorspace can be used in RFF mode");

	// set color information variables
	Env->SetVar(Env->Sprintf("%s%s", this->VarPrefix, "FFCOLOR_SPACE"), F->ColorSpace);
	Env->SetVar(Env->Sprintf("%s%s", this->VarPrefix, "FFCOLOR_RANGE"), F->ColorRange);

	if (VP->TopFieldFirst)
		VI.image_type = VideoInfo::IT_TFF;
	else
		VI.image_type = VideoInfo::IT_BFF;

	VI.width = F->ScaledWidth;
	VI.height = F->ScaledHeight;

	// Crop to obey subsampling width/height requirements
    VI.width -= VI.width % (1 << GetSubSamplingW(VI));
    VI.height -= VI.height % (1 << (GetSubSamplingH(VI) + (RFFMode > 0 ? 1 : 0)));
}
コード例 #3
0
/// @brief Opens video
/// @param filename The filename to open
void FFmpegSourceVideoProvider::LoadVideo(wxString filename) {
	wxString FileNameShort = wxFileName(filename).GetShortPath();

	FFMS_Indexer *Indexer = FFMS_CreateIndexer(FileNameShort.utf8_str(), &ErrInfo);
	if (!Indexer)
		throw agi::FileNotFoundError(ErrInfo.Buffer);

	std::map<int,wxString> TrackList = GetTracksOfType(Indexer, FFMS_TYPE_VIDEO);
	if (TrackList.size() <= 0)
		throw VideoNotSupported("no video tracks found");

	// initialize the track number to an invalid value so we can detect later on
	// whether the user actually had to choose a track or not
	int TrackNumber = -1;
	if (TrackList.size() > 1) {
		TrackNumber = AskForTrackSelection(TrackList, FFMS_TYPE_VIDEO);
		// if it's still -1 here, user pressed cancel
		if (TrackNumber == -1)
			throw agi::UserCancelException("video loading cancelled by user");
	}

	// generate a name for the cache file
	wxString CacheName = GetCacheFilename(filename);

	// try to read index
	agi::scoped_holder<FFMS_Index*, void (FFMS_CC*)(FFMS_Index*)>
		Index(FFMS_ReadIndex(CacheName.utf8_str(), &ErrInfo), FFMS_DestroyIndex);

	if (Index && FFMS_IndexBelongsToFile(Index, FileNameShort.utf8_str(), &ErrInfo))
		Index = NULL;

	// time to examine the index and check if the track we want is indexed
	// technically this isn't really needed since all video tracks should always be indexed,
	// but a bit of sanity checking never hurt anyone
	if (Index && TrackNumber >= 0) {
		FFMS_Track *TempTrackData = FFMS_GetTrackFromIndex(Index, TrackNumber);
		if (FFMS_GetNumFrames(TempTrackData) <= 0)
			Index = NULL;
	}

	// moment of truth
	if (!Index) {
		int TrackMask = FFMS_TRACKMASK_NONE;
		if (OPT_GET("Provider/FFmpegSource/Index All Tracks")->GetBool() || OPT_GET("Video/Open Audio")->GetBool())
			TrackMask = FFMS_TRACKMASK_ALL;
		Index = DoIndexing(Indexer, CacheName, TrackMask, GetErrorHandlingMode());
	}
	else {
		FFMS_CancelIndexing(Indexer);
	}

	// update access time of index file so it won't get cleaned away
	wxFileName(CacheName).Touch();

	// we have now read the index and may proceed with cleaning the index cache
	CleanCache();

	// track number still not set?
	if (TrackNumber < 0) {
		// just grab the first track
		TrackNumber = FFMS_GetFirstIndexedTrackOfType(Index, FFMS_TYPE_VIDEO, &ErrInfo);
		if (TrackNumber < 0)
			throw VideoNotSupported(std::string("Couldn't find any video tracks: ") + ErrInfo.Buffer);
	}

	// set thread count
	int Threads = OPT_GET("Provider/Video/FFmpegSource/Decoding Threads")->GetInt();
	if (FFMS_GetVersion() < ((2 << 24) | (17 << 16) | (2 << 8) | 1) && FFMS_GetSourceType(Index) == FFMS_SOURCE_LAVF)
		Threads = 1;

	// set seekmode
	// TODO: give this its own option?
	int SeekMode;
	if (OPT_GET("Provider/Video/FFmpegSource/Unsafe Seeking")->GetBool())
		SeekMode = FFMS_SEEK_UNSAFE;
	else
		SeekMode = FFMS_SEEK_NORMAL;

	VideoSource = FFMS_CreateVideoSource(FileNameShort.utf8_str(), TrackNumber, Index, Threads, SeekMode, &ErrInfo);
	if (!VideoSource)
		throw VideoOpenError(std::string("Failed to open video track: ") + ErrInfo.Buffer);

	// load video properties
	VideoInfo = FFMS_GetVideoProperties(VideoSource);

	const FFMS_Frame *TempFrame = FFMS_GetFrame(VideoSource, 0, &ErrInfo);
	if (!TempFrame)
		throw VideoOpenError(std::string("Failed to decode first frame: ") + ErrInfo.Buffer);

	Width  = TempFrame->EncodedWidth;
	Height = TempFrame->EncodedHeight;
	if (VideoInfo->SARDen > 0 && VideoInfo->SARNum > 0)
		DAR = double(Width) * VideoInfo->SARNum / ((double)Height * VideoInfo->SARDen);
	else
		DAR = double(Width) / Height;

	// Assuming TV for unspecified
	wxString ColorRange = TempFrame->ColorRange == FFMS_CR_JPEG ? "PC" : "TV";

	int CS = TempFrame->ColorSpace;
#if FFMS_VERSION >= ((2 << 24) | (17 << 16) | (1 << 8) | 0)
	if (CS != FFMS_CS_RGB && CS != FFMS_CS_BT470BG && OPT_GET("Video/Force BT.601")->GetBool()) {
		if (FFMS_SetInputFormatV(VideoSource, FFMS_CS_BT470BG, TempFrame->ColorRange, FFMS_GetPixFmt(""), &ErrInfo))
			throw VideoOpenError(std::string("Failed to set input format: ") + ErrInfo.Buffer);

		CS = FFMS_CS_BT470BG;
	}
#endif

	switch (CS) {
		case FFMS_CS_RGB:
			ColorSpace = "None";
			break;
		case FFMS_CS_BT709:
			ColorSpace = wxString::Format("%s.709", ColorRange);
			break;
		case FFMS_CS_UNSPECIFIED:
			ColorSpace = wxString::Format("%s.%s", ColorRange, Width > 1024 || Height >= 600 ? "709" : "601");
			break;
		case FFMS_CS_FCC:
			ColorSpace = wxString::Format("%s.FCC", ColorRange);
			break;
		case FFMS_CS_BT470BG:
		case FFMS_CS_SMPTE170M:
			ColorSpace = wxString::Format("%s.601", ColorRange);
			break;
		case FFMS_CS_SMPTE240M:
			ColorSpace = wxString::Format("%s.240M", ColorRange);
			break;
		default:
			throw VideoOpenError("Unknown video color space");
			break;
	}

	const int TargetFormat[] = { FFMS_GetPixFmt("bgra"), -1 };
	if (FFMS_SetOutputFormatV2(VideoSource, TargetFormat, Width, Height, FFMS_RESIZER_BICUBIC, &ErrInfo)) {
		throw VideoOpenError(std::string("Failed to set output format: ") + ErrInfo.Buffer);
	}

	// get frame info data
	FFMS_Track *FrameData = FFMS_GetTrackFromVideo(VideoSource);
	if (FrameData == NULL)
		throw VideoOpenError("failed to get frame data");
	const FFMS_TrackTimeBase *TimeBase = FFMS_GetTimeBase(FrameData);
	if (TimeBase == NULL)
		throw VideoOpenError("failed to get track time base");

	const FFMS_FrameInfo *CurFrameData;

	// build list of keyframes and timecodes
	std::vector<int> TimecodesVector;
	for (int CurFrameNum = 0; CurFrameNum < VideoInfo->NumFrames; CurFrameNum++) {
		CurFrameData = FFMS_GetFrameInfo(FrameData, CurFrameNum);
		if (CurFrameData == NULL) {
			throw VideoOpenError(STD_STR(wxString::Format("Couldn't get info about frame %d", CurFrameNum)));
		}

		// keyframe?
		if (CurFrameData->KeyFrame)
			KeyFramesList.push_back(CurFrameNum);

		// calculate timestamp and add to timecodes vector
		int Timestamp = (int)((CurFrameData->PTS * TimeBase->Num) / TimeBase->Den);
		TimecodesVector.push_back(Timestamp);
	}
	if (TimecodesVector.size() < 2)
		Timecodes = 25.0;
	else
		Timecodes = agi::vfr::Framerate(TimecodesVector);

	FrameNumber = 0;
}
コード例 #4
0
ファイル: avssources.cpp プロジェクト: qyot27/ffms2-old
void AvisynthVideoSource::InitOutputFormat(
	int ResizeToWidth, int ResizeToHeight, const char *ResizerName,
	const char *ConvertToFormatName, IScriptEnvironment *Env) {

	char ErrorMsg[1024];
	FFMS_ErrorInfo E;
	E.Buffer = ErrorMsg;
	E.BufferSize = sizeof(ErrorMsg);

	const FFMS_VideoProperties *VP = FFMS_GetVideoProperties(V);
	const FFMS_Frame *F = FFMS_GetFrame(V, 0, &E);
	if (!F)
		Env->ThrowError("FFVideoSource: %s", E.Buffer);

	int TargetFormats[4];
	TargetFormats[0] = FFMS_GetPixFmt("yuv420p");
	TargetFormats[1] = FFMS_GetPixFmt("yuyv422");
	TargetFormats[2] = FFMS_GetPixFmt("bgra");
	TargetFormats[3] = -1;

	// PIX_FMT_NV21 is misused as a return value different to the defined ones in the function
	PixelFormat TargetPixelFormat = CSNameToPIXFMT(ConvertToFormatName, PIX_FMT_NV21);
	if (TargetPixelFormat == PIX_FMT_NONE)
		Env->ThrowError("FFVideoSource: Invalid colorspace name specified");

	if (TargetPixelFormat != PIX_FMT_NV21) {
		TargetFormats[0] = TargetPixelFormat;
		TargetFormats[1] = -1;
	}

	if (ResizeToWidth <= 0)
		ResizeToWidth = F->EncodedWidth;

	if (ResizeToHeight <= 0)
		ResizeToHeight = F->EncodedHeight;

	int Resizer = ResizerNameToSWSResizer(ResizerName);
	if (Resizer == 0)
		Env->ThrowError("FFVideoSource: Invalid resizer name specified");

	if (FFMS_SetOutputFormatV2(V, TargetFormats,
		ResizeToWidth, ResizeToHeight, Resizer, &E))
		Env->ThrowError("FFVideoSource: No suitable output format found");

	F = FFMS_GetFrame(V, 0, &E);
	TargetFormats[0] = F->ConvertedPixelFormat;
	TargetFormats[1] = -1;
	
		// This trick is required to first get the "best" default format and then set only that format as the output
	if (FFMS_SetOutputFormatV2(V, TargetFormats,
		ResizeToWidth, ResizeToHeight, Resizer, &E))
		Env->ThrowError("FFVideoSource: No suitable output format found");

	F = FFMS_GetFrame(V, 0, &E);

	if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuvj420p") || F->ConvertedPixelFormat == FFMS_GetPixFmt("yuv420p"))
		VI.pixel_type = VideoInfo::CS_I420;
	else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("yuyv422"))
		VI.pixel_type = VideoInfo::CS_YUY2;
	else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("rgb32"))
		VI.pixel_type = VideoInfo::CS_BGR32;
	else if (F->ConvertedPixelFormat == FFMS_GetPixFmt("bgr24"))
		VI.pixel_type = VideoInfo::CS_BGR24;
	else
		Env->ThrowError("FFVideoSource: No suitable output format found");

	if (RFFMode > 0 && ResizeToHeight != F->EncodedHeight)
		Env->ThrowError("FFVideoSource: Vertical scaling not allowed in RFF mode");

	if (RFFMode > 0 && TargetPixelFormat != PIX_FMT_NV21)
		Env->ThrowError("FFVideoSource: Only the default output colorspace can be used in RFF mode");
	
	// set color information variables
	Env->SetVar(Env->Sprintf("%s%s", this->VarPrefix, "FFCOLOR_SPACE"), F->ColorSpace);
	Env->SetVar(Env->Sprintf("%s%s", this->VarPrefix, "FFCOLOR_RANGE"), F->ColorRange);

	if (VP->TopFieldFirst)
		VI.image_type = VideoInfo::IT_TFF;
	else
		VI.image_type = VideoInfo::IT_BFF;

	VI.width = F->ScaledWidth;
	VI.height = F->ScaledHeight;

	// Crop to obey avisynth's even width/height requirements
	if (VI.pixel_type == VideoInfo::CS_I420) {
		VI.height -= VI.height & 1;
		VI.width -= VI.width & 1;
	}

	if (VI.pixel_type == VideoInfo::CS_YUY2) {
		VI.width -= VI.width & 1;
	}

	if (RFFMode > 0) {
		VI.height -= VI.height & 1;
	}
}
コード例 #5
0
ファイル: file_ffms.cpp プロジェクト: sh0/madj2
// Constructor and destructor
c_media_file_ffms::c_media_file_ffms(boost::filesystem::path path) :
    // Path
    m_path(path),
    // File
    m_source(nullptr),
    // Info
    m_frames(0), m_rate(0), m_aspect(1), m_width(0), m_height(0)
{
    // Debug
    //std::cout << boost::format("FFMS: Opening file! path = %1%") % path << std::endl;

    // FFMS error
    m_fferr.Buffer = m_ffmsg.data();
    m_fferr.BufferSize = m_ffmsg.size();
    m_fferr.ErrorType = FFMS_ERROR_SUCCESS;
    m_fferr.SubType = FFMS_ERROR_SUCCESS;

    // Library
    FFMS_Init(0, 1);

    // Index
    FFMS_Index* index = nullptr;

    // Cached index
    auto path_index = m_path;
    path_index.replace_extension(".ffindex");
    if (boost::filesystem::is_regular_file(path_index)) {
        // Read index
        index = FFMS_ReadIndex(path_index.c_str(), &m_fferr);
        if (index) {
            // Check validity
            int result = FFMS_IndexBelongsToFile(index, m_path.c_str(), &m_fferr);
            if (result) {
                // Invalid index
                FFMS_DestroyIndex(index);
                index = nullptr;

                // Delete index file too
                boost::filesystem::remove(path_index);
            }
        }
    }

    // Create index
    if (!index) {
        // Indexer
        FFMS_Indexer* indexer = FFMS_CreateIndexer(m_path.c_str(), &m_fferr);
        if (!indexer)
            throw c_exception("FFMS: Could not create indexer!", { throw_format("path", m_path) });

        //index = FFMS_DoIndexing2(indexer, FFMS_IEH_ABORT, &m_fferr);
        index = FFMS_DoIndexing(indexer, 0, 0, nullptr, nullptr, FFMS_IEH_ABORT, nullptr, nullptr, &m_fferr);
        if (!index)
            throw c_exception("FFMS: Failed to index media!", { throw_format("path", m_path) });

        // Write index to file
        FFMS_WriteIndex(path_index.c_str(), index, &m_fferr);
    }

    // Track
    int track_id = FFMS_GetFirstTrackOfType(index, FFMS_TYPE_VIDEO, &m_fferr);
    if (track_id < 0) {
        FFMS_DestroyIndex(index);
        throw c_exception("FFMS: Failed to find any video tracks!", { throw_format("path", m_path) });
    }

    // Source
    m_source = FFMS_CreateVideoSource(m_path.c_str(), track_id, index, 1, FFMS_SEEK_NORMAL, &m_fferr);
    if (!m_source) {
        FFMS_DestroyIndex(index);
        throw c_exception("FFMS: Failed to create video source!", { throw_format("path", m_path) });
    }

    // Destroy index
    FFMS_DestroyIndex(index);
    index = nullptr;

    // Video properties
    const FFMS_VideoProperties* props = FFMS_GetVideoProperties(m_source);
    m_frames = props->NumFrames;
    if (props->FirstTime < props->LastTime && props->LastTime > 0.0)
        m_rate = (props->LastTime - props->FirstTime) / static_cast<double>(m_frames);
    else if (props->FPSNumerator != 0)
        m_rate = static_cast<double>(props->FPSNumerator) / static_cast<double>(props->FPSDenominator);
    if (props->SARNum != 0)
        m_aspect = static_cast<double>(props->SARNum) / static_cast<double>(props->SARDen);

    // First frame
    const FFMS_Frame* frame = FFMS_GetFrame(m_source, 0, &m_fferr);
    if (!frame)
        throw c_exception("FFMS: Failed to get first video frame!", { throw_format("path", m_path) });
    if (frame->ScaledWidth > 0)
        m_width = frame->ScaledWidth;
    else
        m_width = frame->EncodedWidth;
    if (frame->ScaledHeight > 0)
        m_height = frame->ScaledHeight;
    else
        m_height = frame->EncodedHeight;

    // Conversion
    int pixfmts[2];
    pixfmts[0] = FFMS_GetPixFmt("rgb24");
    pixfmts[1] = -1;
    if (FFMS_SetOutputFormatV2(m_source, pixfmts, frame->EncodedWidth, frame->EncodedHeight, FFMS_RESIZER_POINT, &m_fferr))
        throw c_exception("FFMS: Failed to set output format!", { throw_format("path", m_path) });

    // Info
    std::cout << boost::format("FFMS: width = %d, height = %d, frames = %d, rate = %.3f, aspect = %.3f") %
        m_width % m_height % m_frames % m_rate % m_aspect << std::endl;
}