Пример #1
0
int AvxContext::OutputAudio() {
	FILE *sink;
	void *writeBuffer = NULL;
	sighandler_t old_sigpipe = signal(SIGPIPE, SIG_IGN);

	if (launchMPlayer) {
		char command[1024];
		if (MPlayerCommandAudio(command))
			return -1;
		AVXLOG_INFO("MPlayer command line: %s", command);

		sink = popen(command, "w");
		if (!sink) {
			AVXLOG_ERROR("%s", "Error starting mplayer");
			return -1;
		}
	} else {
		sink = stdout;
	}

	#define AUDIO_SAMPLES 1000
	try {
		writeBuffer = malloc(vi.BytesPerAudioSample() * AUDIO_SAMPLES);
		if (!writeBuffer) {
			AVXLOG_ERROR("%s", "Unable to allocate memory");
			goto fail;
		}
		for (__int64 i = 0; i < vi.num_audio_samples; i += AUDIO_SAMPLES) {
			if (launchMPlayer && (feof(sink) || ferror(sink))) {
				AVXLOG_ERROR("%s", "mplayer process exited");
				break;
			}
			int read_samples;
			if (vi.num_audio_samples - AUDIO_SAMPLES < i)
				read_samples = vi.num_audio_samples - i;
			else
				read_samples = AUDIO_SAMPLES;
			clip->GetAudio(writeBuffer, i, read_samples, avx_library.env);
			fwrite(writeBuffer, vi.BytesPerAudioSample(), read_samples, sink);
		}
	} catch (AvisynthError &e) {
		AVXLOG_ERROR("AvisynthError: %s", e.msg);
		goto fail;
	}
	#undef AUDIO_SAMPLES

	free(writeBuffer);
	if (launchMPlayer)
		pclose(sink);
	signal(SIGPIPE, old_sigpipe);
	return 0;

fail:
	if (writeBuffer)
		free(writeBuffer);
	if (launchMPlayer)
		pclose(sink);
	signal(SIGPIPE, old_sigpipe);
	return -1;
}
Пример #2
0
void YouTubeWebPageView::LoadVideo(const VideoInfo& info)
{
   m_bLoaded = false;

   ATLTRACE(_T("%08x: LoadVideo: \"%s\" (%s)\n"),
      m_hWnd,
      info.Name(),
      info.Address());

   m_iLastError = 0;

   CString cszHtml = GetHtmlTemplate();

   cszHtml.Replace(_T("{..$address..}"), info.Address());
   cszHtml.Replace(_T("{..$name..}"), info.Name());
   cszHtml.Replace(_T("\r\n"), _T("\n"));

   // generate temp name
   CString cszTempFolder;
   GetTempPath(MAX_PATH, cszTempFolder.GetBuffer(MAX_PATH));
   cszTempFolder.ReleaseBuffer();

   CString cszFilename;
   GetTempFileName(cszTempFolder, _T("YTP"), 0, cszFilename.GetBuffer(MAX_PATH));
   cszFilename.ReleaseBuffer();

   // write to temporary file
   {
      FILE* fd = NULL;
	  errno_t err = _tfopen_s(&fd, cszFilename, _T("wt"));
	  ATLVERIFY(err == 0 && fd != NULL);

      USES_CONVERSION;
      fprintf(fd, T2CA(cszHtml));
      fclose(fd);
   }

   // navigate to page
   CComPtr<IWebBrowser2> spWebBrowser2;
   HRESULT hr = GetWebBrowser2(spWebBrowser2);
   if (SUCCEEDED(hr))
   {
      CString cszURL = _T("file:///") + cszFilename;
      cszURL.Replace(_T("\\"), _T("/"));

      CComBSTR bstrURL = cszURL;

      CComVariant varFlags(static_cast<int>(navNoHistory | navNoWriteToCache));

      hr = spWebBrowser2->Navigate(bstrURL, &varFlags, NULL, NULL, NULL);
   }

   if (!m_cszHtmlFilename.IsEmpty())
      DeleteFile(m_cszHtmlFilename);
   m_cszHtmlFilename = cszFilename;
}
Пример #3
0
bool PlanarFrame::allocSpace(VideoInfo &viInfo)
{
	myalignedfree(planar_4);
	myalignedfree(planar_3);
	myalignedfree(planar_2);
	myalignedfree(planar_1);
	alloc_ok=false;

	grey = viInfo.IsY();
	isRGBPfamily = viInfo.IsPlanarRGB() || viInfo.IsPlanarRGBA();
	isAlphaChannel = viInfo.IsYUVA() || viInfo.IsPlanarRGBA();
	pixelsize = (uint8_t)viInfo.ComponentSize(); // AVS16
	bits_per_pixel = (uint8_t)viInfo.BitsPerComponent();
	
	ypitch = uvpitch = 0;
	ywidth = uvwidth = 0;
	yheight = uvheight = 0;

	int height = viInfo.height;
	int width = viInfo.width;
	if ((height==0) || (width==0)) return(false);
	if (viInfo.Is420())
	{
		ypitch = modnpf((int)pixelsize*(width+MIN_PAD),MIN_ALIGNMENT);
		ywidth = width;
		yheight = height;
		width >>= 1;
		height >>= 1;
		uvpitch = modnpf((int)pixelsize*(width+MIN_PAD),MIN_ALIGNMENT);
		uvwidth = width;
		uvheight = height;
	}
Пример #4
0
bool PlanarFrame::allocSpace(VideoInfo &viInfo)
{
	if (y != NULL) { _aligned_free(y); y = NULL; }
	if (u != NULL) { _aligned_free(u); u = NULL; }
	if (v != NULL) { _aligned_free(v); v = NULL; }
	int height = viInfo.height;
	int width = viInfo.width;
	if (viInfo.IsYV12())
	{
		ypitch = modnpf(width+MIN_PAD,MIN_ALIGNMENT);
		ywidth = width;
		yheight = height;
		width >>= 1;
		height >>= 1;
		uvpitch = modnpf(width+MIN_PAD,MIN_ALIGNMENT);
		uvwidth = width;
		uvheight = height;
		y = (unsigned char*)_aligned_malloc(ypitch*yheight,MIN_ALIGNMENT);
		if (y == NULL) return false;
		u = (unsigned char*)_aligned_malloc(uvpitch*uvheight,MIN_ALIGNMENT);
		if (u == NULL) return false;
		v = (unsigned char*)_aligned_malloc(uvpitch*uvheight,MIN_ALIGNMENT);
		if (v == NULL) return false;
		return true;
	}
Пример #5
0
void VideoSink::Redraw(const VideoInfo& aInfo) {
  AssertOwnerThread();

  // No video track, nothing to draw.
  if (!aInfo.IsValid() || !mContainer) {
    return;
  }

  auto now = TimeStamp::Now();

  RefPtr<VideoData> video = VideoQueue().PeekFront();
  if (video) {
    if (mBlankImage) {
      video->mImage = mBlankImage;
    }
    video->MarkSentToCompositor();
    mContainer->SetCurrentFrame(video->mDisplay, video->mImage, now);
    if (mSecondaryContainer) {
      mSecondaryContainer->SetCurrentFrame(video->mDisplay, video->mImage, now);
    }
    return;
  }

  // When we reach here, it means there are no frames in this video track.
  // Draw a blank frame to ensure there is something in the image container
  // to fire 'loadeddata'.

  RefPtr<Image> blank =
      mContainer->GetImageContainer()->CreatePlanarYCbCrImage();
  mContainer->SetCurrentFrame(aInfo.mDisplay, blank, now);

  if (mSecondaryContainer) {
    mSecondaryContainer->SetCurrentFrame(aInfo.mDisplay, blank, now);
  }
}
Пример #6
0
int AvxContext::MPlayerCommandAudio(char *command) { // This doesn't seem to work on my MPlayer
	if (vi.sample_type == SAMPLE_FLOAT) {
		AVXLOG_ERROR("%s", "Cannot pipe float audio to mplayer");
		return -1;
	}

	sprintf(command, MPLAYER " -demuxer rawaudio -rawaudio channels=%d:rate=%d:samplesize=%d:format=0 - 1> /dev/null",
			vi.nchannels, vi.audio_samples_per_second, vi.BytesPerChannelSample());
	return 0;
}
Пример #7
0
void EMU_copyMasterBuffer()
{
	video.srcBuffer = (u8*)GPU_screen;
	
	//convert pixel format to 32bpp for compositing
	//why do we do this over and over? well, we are compositing to
	//filteredbuffer32bpp, and it needs to get refreshed each frame..
	const int size = video.size();
	u16* src = (u16*)video.srcBuffer;
    u32* dest = video.buffer;
    for(int i=0;i<size;++i)
        *dest++ = 0xFF000000 | RGB15TO32_NOALPHA(src[i]);
	
}
Пример #8
0
int AvxContext::MPlayerCommandVideo(char *command) {
	char format[sizeof("bgr32")];
	bool flipVertical = false;

	if (vi.IsRGB24()) {
		sprintf(format, "bgr24");
		flipVertical = true;
	} else if (vi.IsRGB32()) {
		sprintf(format, "bgr32");
		flipVertical = true;
	} else if (vi.IsYUY2()) {
		sprintf(format, "yuy2");
	} else if (vi.IsYV12()) {
		sprintf(format, "yv12");
	} else {
		AVXLOG_ERROR("%s", "Unsupported colorspace");
		return -1;
	}

	sprintf(command, MPLAYER " %s -demuxer rawvideo -rawvideo w=%d:h=%d:format=%s - 1> /dev/null",
		flipVertical ? "-flip" : "", vi.width, vi.height, format);
	return 0;
}
Пример #9
0
/////////////
// Get frame
PVideoFrame __stdcall DrawPRS::GetFrame(int n, IScriptEnvironment* env) {
    // Avisynth frame
    PVideoFrame avsFrame = child->GetFrame(n,env);

    try {
        // Check if there is anything to be drawn
        if (file.HasDataAtFrame(n)) {
            // Create the PRSFrame structure
            env->MakeWritable(&avsFrame);
            PRSVideoFrame frame;
            frame.data[0] = (char*) avsFrame->GetWritePtr();
            frame.w = avsFrame->GetRowSize()/4;
            frame.h = avsFrame->GetHeight();
            frame.pitch = avsFrame->GetPitch();
            frame.flipColors = true;
            frame.flipVertical = true;

            // Set colorspace
            VideoInfo vi = child->GetVideoInfo();
            if (vi.IsYV12()) frame.colorSpace = ColorSpace_YV12;
            else if (vi.IsYUY2()) frame.colorSpace = ColorSpace_YUY2;
            else if (vi.IsRGB32()) frame.colorSpace = ColorSpace_RGB32;
            else if (vi.IsRGB24()) frame.colorSpace = ColorSpace_RGB24;

            // Draw into the frame
            file.DrawFrame(n,&frame);
        }
    }

    // Catch exception
    catch (const std::exception &e) {
        env->ThrowError(e.what());
    }

    // Return frame
    return avsFrame;
}
Пример #10
0
already_AddRefed<MediaDataDecoder>
AgnosticDecoderModule::CreateVideoDecoder(const VideoInfo& aConfig,
                                          layers::LayersBackend aLayersBackend,
                                          layers::ImageContainer* aImageContainer,
                                          FlushableTaskQueue* aVideoTaskQueue,
                                          MediaDataDecoderCallback* aCallback)
{
  RefPtr<MediaDataDecoder> m;

  if (VPXDecoder::IsVPX(aConfig.mMimeType)) {
    m = new VPXDecoder(*aConfig.GetAsVideoInfo(),
                       aImageContainer,
                       aVideoTaskQueue,
                       aCallback);
  }

  return m.forget();
}
Пример #11
0
void
VideoSink::Redraw(const VideoInfo& aInfo)
{
  AssertOwnerThread();

  // No video track, nothing to draw.
  if (!aInfo.IsValid() || !mContainer) {
    return;
  }

  if (VideoQueue().GetSize() > 0) {
    RenderVideoFrames(1);
    return;
  }

  // When we reach here, it means there are no frames in this video track.
  // Draw a blank frame to ensure there is something in the image container
  // to fire 'loadeddata'.
  RefPtr<Image> blank =
    mContainer->GetImageContainer()->CreatePlanarYCbCrImage();
  mContainer->SetCurrentFrame(aInfo.mDisplay, blank, TimeStamp::Now());
}
Пример #12
0
/// @brief Read from environment
/// @param _clip
///
void AvisynthAudioProvider::LoadFromClip(AVSValue _clip) {
	AVSValue script;

	// Check if it has audio
	VideoInfo vi = _clip.AsClip()->GetVideoInfo();
	if (!vi.HasAudio()) throw agi::AudioDataNotFoundError("No audio found.", 0);

	IScriptEnvironment *env = avs_wrapper.GetEnv();

	// Convert to one channel
	char buffer[1024];
	strcpy(buffer,lagi_wxString(OPT_GET("Audio/Downmixer")->GetString()).mb_str(csConvLocal));
	script = env->Invoke(buffer, _clip);

	// Convert to 16 bits per sample
	script = env->Invoke("ConvertAudioTo16bit", script);
	vi = script.AsClip()->GetVideoInfo();

	// Convert sample rate
	int setsample = OPT_GET("Provider/Audio/AVS/Sample Rate")->GetInt();
	if (vi.SamplesPerSecond() < 32000) setsample = 44100;
	if (setsample != 0) {
		AVSValue args[2] = { script, setsample };
		script = env->Invoke("ResampleAudio", AVSValue(args,2));
	}

	// Set clip
	PClip tempclip = script.AsClip();
	vi = tempclip->GetVideoInfo();

	// Read properties
	channels = vi.AudioChannels();
	num_samples = vi.num_audio_samples;
	sample_rate = vi.SamplesPerSecond();
	bytes_per_sample = vi.BytesPerAudioSample();
	float_samples = false;

	clip = tempclip;
}
Пример #13
0
int AvxContext::Output() {
	if (vi.HasVideo()) // An Avisynth clip must have at least one stream.
		return OutputVideo();
	else
		return OutputAudio();
}
Пример #14
0
int __stdcall dimzon_avs_init(SafeStruct** ppstr, char *func, char *arg, AVSDLLVideoInfo *vi)
{
	SafeStruct* pstr = NULL;

	if(!*ppstr)
	{
		pstr = ((SafeStruct*)malloc(sizeof(SafeStruct)));
		*ppstr = pstr;
		memset(pstr, 0, sizeof(SafeStruct));

		pstr->dll = LoadLibrary("avisynth.dll");
		if(!pstr->dll)
		{
			strncpy_s(pstr->err, ERRMSG_LEN, "Cannot load avisynth.dll", _TRUNCATE);
			return 1;
		}

		IScriptEnvironment* (*CreateScriptEnvironment)(int version) = (IScriptEnvironment*(*)(int)) GetProcAddress(pstr->dll, "CreateScriptEnvironment");
		if(!CreateScriptEnvironment)
		{
			strncpy_s(pstr->err, ERRMSG_LEN, "Cannot load CreateScriptEnvironment", _TRUNCATE);
			return 2;
		}

		pstr->env = CreateScriptEnvironment(AVISYNTH_INTERFACE_VERSION);

		if (pstr->env == NULL)
		{
			strncpy_s(pstr->err, ERRMSG_LEN, "Required Avisynth 2.5", _TRUNCATE);
			return 3;
		}
	}
	else
	{
		pstr = *ppstr;
	}

	pstr->err[0] = 0;

	//Заходили только чтоб получить ppstr
	if (!func || strlen(func) == 0 || !arg)
		return 0;

	try
	{
		AVSValue arg(arg);
		AVSValue res;

		if (vi != NULL && vi->mt_import == MT_DISABLED)
		{
			//Если надо, отключаем MT - до импорта
			try { pstr->env->Invoke("SetMTMode", 0); }
			catch (IScriptEnvironment::NotFound) { /*AviSynth без MT*/ }
		}

		res = pstr->env->Invoke(func, AVSValue(&arg, 1));
		if(!*ppstr) return 1;

		if (!res.IsClip())
		{
			strncpy_s(pstr->err, ERRMSG_LEN, "The script's return was not a video clip.", _TRUNCATE);
			return 4;
		}

		if (vi != NULL && (vi->mt_import == MT_ADDDISTR || vi->mt_import == MT_ADDM1DISTR))
		{
			try
			{
				//Если надо, добавляем Distributor() - после импорта
				AVSValue mt_test = pstr->env->Invoke("GetMTMode", false);
				const int mt_mode = mt_test.IsInt() ? mt_test.AsInt() : 0;
				if (mt_mode > 0 && mt_mode < 5)
				{
					if (mt_mode != 1 && vi->mt_import == MT_ADDM1DISTR)
						pstr->env->Invoke("SetMTMode", 1);

					res = pstr->env->Invoke("Distributor", res);
				}
			}
			catch (IScriptEnvironment::NotFound) { /*AviSynth без MT*/ }

			if (!res.IsClip())
			{
				strncpy_s(pstr->err, ERRMSG_LEN, "After adding \"Distributor()\" the script's return was not a video clip.", _TRUNCATE);
				return 4;
			}
		}

		pstr->clp = res.AsClip();
		VideoInfo inf = pstr->clp->GetVideoInfo();

		if (inf.HasVideo())
		{
			string filter = "";
			string err_string = "";

			//Original и Requested PixelType
			if (vi != NULL) vi->pixel_type_orig = inf.pixel_type;
			int pixel_type_req = (vi != NULL) ? vi->pixel_type : 0;

			if (pixel_type_req == 0) { /*Выводим видео как оно есть, без проверок и преобразований*/ }
			else if (pixel_type_req == inf.CS_BGR32) { if (!inf.IsRGB32()) { filter = "ConvertToRGB32"; err_string = "AviSynthWrapper: Cannot convert video to RGB32!"; }}
			else if (pixel_type_req == inf.CS_BGR24) { if (!inf.IsRGB24()) { filter = "ConvertToRGB24"; err_string = "AviSynthWrapper: Cannot convert video to RGB24!"; }}
			else if (pixel_type_req == inf.CS_YUY2) { if (!inf.IsYUY2()) { filter = "ConvertToYUY2"; err_string = "AviSynthWrapper: Cannot convert video to YUY2!"; }}
			else if (pixel_type_req == inf.CS_YV12) { if (!inf.IsYV12()) { filter = "ConvertToYV12"; err_string = "AviSynthWrapper: Cannot convert video to YV12!"; }}
			else if (pixel_type_req == inf.CS_I420) { if (!inf.IsYV12()) { filter = "ConvertToYV12"; err_string = "AviSynthWrapper: Cannot convert video to YV12!"; }}
			else
			{
				//"2.5 Baked API will see all new planar as YV12"
				//YV411, YV24, YV16 и Y8 в IsYV12() определяются как YV12
				strncpy_s(pstr->err, ERRMSG_LEN, "AviSynthWrapper: Requested PixelType isn't valid or such conversion is not yet implemented!", _TRUNCATE);
				return 5;
			}

			if (filter.length() > 0)
			{
				res = pstr->env->Invoke(filter.c_str(), AVSValue(&res, 1));

				pstr->clp = res.AsClip();
				VideoInfo infh = pstr->clp->GetVideoInfo();

				if (pixel_type_req == inf.CS_BGR32 && !infh.IsRGB32() ||
					pixel_type_req == inf.CS_BGR24 && !infh.IsRGB24() ||
					pixel_type_req == inf.CS_YUY2 && !infh.IsYUY2() ||
					pixel_type_req == inf.CS_YV12 && !infh.IsYV12() ||
					pixel_type_req == inf.CS_I420 && !infh.IsYV12())
				{
					strncpy_s(pstr->err, ERRMSG_LEN, err_string.c_str(), _TRUNCATE);
					return 5;
				}
			}
		}

		if (inf.HasAudio())
		{
			string filter = "";
			string err_string = "";

			//Original и Requested SampleType
			if (vi != NULL) vi->sample_type_orig = inf.sample_type;
			int sample_type_req = (vi != NULL) ? vi->sample_type : 0;

			if (sample_type_req == 0) { /*Выводим звук как он есть, без проверок и преобразований*/ }
			else if (sample_type_req == SAMPLE_FLOAT) { if (inf.sample_type != SAMPLE_FLOAT) { filter = "ConvertAudioToFloat"; err_string = "AviSynthWrapper: Cannot convert audio to FLOAT!"; }}
			else if (sample_type_req == SAMPLE_INT32) { if (inf.sample_type != SAMPLE_INT32) { filter = "ConvertAudioTo32bit"; err_string = "AviSynthWrapper: Cannot convert audio to INT32!"; }}
			else if (sample_type_req == SAMPLE_INT24) { if (inf.sample_type != SAMPLE_INT24) { filter = "ConvertAudioTo24bit"; err_string = "AviSynthWrapper: Cannot convert audio to INT24!"; }}
			else if (sample_type_req == SAMPLE_INT16) { if (inf.sample_type != SAMPLE_INT16) { filter = "ConvertAudioTo16bit"; err_string = "AviSynthWrapper: Cannot convert audio to INT16!"; }}
			else if (sample_type_req == SAMPLE_INT8) { if (inf.sample_type != SAMPLE_INT8) { filter = "ConvertAudioTo8bit"; err_string = "AviSynthWrapper: Cannot convert audio to INT8!"; }}
			else
			{
				strncpy_s(pstr->err, ERRMSG_LEN, "AviSynthWrapper: Requested SampleType isn't valid or such conversion is not yet implemented!", _TRUNCATE);
				return 6;
			}

			if (filter.length() > 0)
			{
				res = pstr->env->Invoke(filter.c_str(), res);

				pstr->clp = res.AsClip();
				VideoInfo infh = pstr->clp->GetVideoInfo();

				if (sample_type_req == SAMPLE_FLOAT && infh.sample_type != SAMPLE_FLOAT ||
					sample_type_req == SAMPLE_INT32 && infh.sample_type != SAMPLE_INT32 ||
					sample_type_req == SAMPLE_INT24 && infh.sample_type != SAMPLE_INT24 ||
					sample_type_req == SAMPLE_INT16 && infh.sample_type != SAMPLE_INT16 ||
					sample_type_req == SAMPLE_INT8 && infh.sample_type != SAMPLE_INT8)
				{
					strncpy_s(pstr->err, ERRMSG_LEN, err_string.c_str(), _TRUNCATE);
					return 6;
				}
			}
		}

		inf = pstr->clp->GetVideoInfo();
		if (vi != NULL) {
			vi->width   = inf.width;
			vi->height  = inf.height;
			vi->raten   = inf.fps_numerator;
			vi->rated   = inf.fps_denominator;
			vi->field_based = (inf.IsFieldBased()) ? 1 : 0;
			vi->first_field = (inf.IsTFF()) ? 1 : (inf.IsBFF()) ? 2 : 0;
			vi->num_frames = inf.num_frames;

			//if (vi->pixel_type == 0) vi->pixel_type = inf.pixel_type;
			//if (vi->sample_type == 0) vi->sample_type = inf.sample_type;
			vi->pixel_type = inf.pixel_type;
			vi->sample_type = inf.sample_type;

			vi->audio_samples_per_second = inf.audio_samples_per_second;
			vi->num_audio_samples        = inf.num_audio_samples;
			vi->nchannels                = inf.nchannels;
		}

		//Нужен ли нам вообще этот res?!
		if(pstr->res) delete pstr->res;
		pstr->res = new AVSValue(res);

		pstr->err[0] = 0;
		return 0;
	}
	catch(AvisynthError err)
	{
		strncpy_s(pstr->err, ERRMSG_LEN, err.msg, _TRUNCATE);
		return AVS_GERROR;
	}
}
Пример #15
0
int main(int argc, TCHAR* argv[]) {
	SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED);

	printf("Usage: filmtester <avs filename> [duplicates_maxlength=2]\n");
	printf("The program plays the AVS file and tests for frame duplicates\n\n");

	int duplicates_maxlength = 2;
	if (argc < 2) {
		printf("No filename specified.\n\n");
		return -1;
	}
	if (argc > 2) {
		duplicates_maxlength = _ttoi(argv[2]);
		printf("INFO: duplicates_maxlength set to %d\n", duplicates_maxlength);
	}

	IScriptEnvironment *env = CreateScriptEnvironment();
	_tprintf(_T("Loading \"%s\" ...\n"), argv[1]);

	LPCSTR arg_names[1] = { nullptr };
	AVSValue arg_vals[1] = { (LPCSTR)argv[1] };
	clip = env->Invoke("import", AVSValue(arg_vals,1), arg_names).AsClip();

	printf("AVS file loaded successfully.\n\n");

	VideoInfo vi = clip->GetVideoInfo();
	printf("VideoInfo:\n");
	printf("-----------\n");
	if (vi.HasVideo()) {
		printf("width x height: %dx%d\n", vi.width, vi.height);
		printf("num_frames: %d\n", vi.num_frames);
		printf("fps: %d/%d\n", vi.fps_numerator, vi.fps_denominator);

		std::string colorspace;
		if (vi.pixel_type & VideoInfo::CS_BGR) colorspace += "BGR, ";
		if (vi.pixel_type & VideoInfo::CS_YUV) colorspace += "YUV, ";
		if (vi.pixel_type & VideoInfo::CS_INTERLEAVED) colorspace += "INTERLEAVED, ";
		if (vi.pixel_type & VideoInfo::CS_PLANAR) colorspace += "PLANAR, ";
		if (colorspace.length() > 0) colorspace.erase(colorspace.length()-2);
		printf("colorspace: %s\n", colorspace.c_str());

		std::string colorformat;
		if (vi.pixel_type & VideoInfo::CS_BGR24) colorformat += "BGR24, ";
		if (vi.pixel_type & VideoInfo::CS_BGR32) colorformat += "BGR32, ";
		if (vi.pixel_type & VideoInfo::CS_YUY2)  colorformat += "YUY2, ";
		if (vi.pixel_type & VideoInfo::CS_YV12)  colorformat += "YV12, ";
		if (vi.pixel_type & VideoInfo::CS_I420)  colorformat += "I420 (IYUV), ";
		if (colorformat.length() > 0)
			colorformat.erase(colorformat.length()-2);
		else
			colorformat = "UNKNOWN";
		printf("colorformat: %s\n", colorformat.c_str());

		std::string imagetype;
		if (vi.image_type & VideoInfo::IT_BFF) imagetype += "BFF, ";
		if (vi.image_type & VideoInfo::IT_TFF) imagetype += "TFF, ";
		if (vi.image_type & VideoInfo::IT_FIELDBASED)  imagetype += "FIELDBASED, ";
		if (imagetype.length() > 0)
			imagetype.erase(imagetype.length()-2);
		else
			imagetype = "UNKNOWN";
		printf("image_type: %s\n", imagetype.c_str());
		printf("bits per pixel: %d\n", vi.BitsPerPixel());
	}
	else
		printf("NO VIDEO\n");

	if (vi.HasAudio()) {
		printf("audio channels: %d\n", vi.nchannels);
		printf("sample_type: %x\n", vi.sample_type);
		printf("samples per second: %d\n", vi.audio_samples_per_second);
		printf("bytes per channel sample: %d\n", vi.BytesPerChannelSample());
		printf("bytes per audio sample: %d\n", vi.BytesPerAudioSample());
		printf("num_audio_samples: %lld\n", vi.num_audio_samples);
	}
	else
		printf("NO AUDIO\n");
	printf("-----------\n\n");

	if (!vi.HasVideo()) {
		printf("Can't start video playback for the sequence without video.\n\n");
		return -1;
	}

	printf("Starting playback ...\n");
	prev_frame = clip->GetFrame(0, env);
	int framesize = prev_frame->GetFrameBuffer()->GetDataSize();
	printf("INFO: framesize = %d bytes.\n\n", framesize);

	InitializeCriticalSection(&cs);
	SetConsoleCtrlHandler((PHANDLER_ROUTINE)CtrlHandler, TRUE);

	int error_count = 0;
	int dup_start_frame = 0;
	bool flag_dup = false;
	std::vector<std::pair<int, int>> duplicates;
	for(int i=1; i<vi.num_frames; ++i) {
		EnterCriticalSection(&cs);
		dst = clip->GetFrame(i, env);
		const BYTE *src_ptr = prev_frame->GetFrameBuffer()->GetReadPtr();
		const BYTE *dst_ptr = dst->GetFrameBuffer()->GetReadPtr();
		if (!memcmp(src_ptr, dst_ptr, framesize)) {
			if (!flag_dup) {
				flag_dup = true;
				dup_start_frame = i-1;
			}
		}
		else if (flag_dup) {
			int length = (i-1) - dup_start_frame;
			if (length >= duplicates_maxlength) {
				printf("\rfilmtester: duplication interval: %d..%d" SPACES "\n", dup_start_frame, i-1);
				duplicates.push_back(std::make_pair(dup_start_frame, i-1));
				error_count++;
			}
			flag_dup = false;
		}
		prev_frame = dst;
		LeaveCriticalSection(&cs);
		printf("\r[%5.1f%%] [%d errors] %d/%d frame processing", (float)((i+1)*100)/vi.num_frames, error_count, i+1, vi.num_frames);
	}

	EnterCriticalSection(&cs);
	if (flag_dup) {
		int i = vi.num_frames;
		int length = (i-1) - dup_start_frame;
		if (length >= duplicates_maxlength) {
			printf("\rfilmtester: duplication interval: %d..%d" SPACES "\n", dup_start_frame, i-1);
			duplicates.push_back(std::make_pair(dup_start_frame, i-1));
			error_count++;
		}
		flag_dup = false;
	}
	printf("\rProcessing completed." SPACES "\n\n");
	printf("%d errors\n", error_count);
	printf("\n");
	if (error_count > 0) {
		printf("Erroneous intervals (%d):\n", duplicates.size());
		for(auto it = duplicates.begin(); it != duplicates.end(); ++it)
			printf("%5d .. %d\n", it->first, it->second);
		printf("\n");
	}
	dst = nullptr;
	prev_frame = nullptr;
	clip = nullptr;

	LeaveCriticalSection(&cs);
	DeleteCriticalSection(&cs);
	return error_count;
}
Пример #16
0
int AvxContext::OutputVideo() {
	FILE *sink;
	unsigned char *writeBuffer = NULL;
	sighandler_t old_sigpipe = signal(SIGPIPE, SIG_IGN);

	if (launchMPlayer) {
		char command[1024];
		if (MPlayerCommandVideo(command))
			return -1;
		AVXLOG_INFO("MPlayer command line: %s", command);

		sink = popen(command, "w");
		if (!sink) {
			AVXLOG_ERROR("%s", "Error starting mplayer");
			return -1;
		}
	} else {
		sink = stdout;
	}

	writeBuffer = (unsigned char *)malloc(vi.RowSize() * vi.height);
	if (!writeBuffer) {
		AVXLOG_ERROR("%s", "Unable to allocate memory");
		goto fail;
	}

	try {
		for (int i = 0; i < vi.num_frames; ++i) {
			if (launchMPlayer && (feof(sink) || ferror(sink))) {
				AVXLOG_ERROR("%s", "mplayer process exited");
				break;
			}
			PVideoFrame frame = clip->GetFrame(i, avx_library.env);
			if (vi.IsPlanar()) { // Check plane count in 2.6.
				int planes[] = {PLANAR_Y, PLANAR_V, PLANAR_U};
				for (int j = 0; j < 3; ++j) {
					int plane = planes[j];
					int src_pitch = frame->GetPitch(plane);
					int row_size = frame->GetRowSize(plane);
					int height = frame->GetHeight(plane);
					const unsigned char *srcp = frame->GetReadPtr(plane);

					avx_library.env->BitBlt(writeBuffer, row_size, srcp, src_pitch, row_size, height);
					fwrite(writeBuffer, 1, row_size * height, sink);
				}
			} else {
				int src_pitch = frame->GetPitch();
				int row_size = frame->GetRowSize();
				int height = frame->GetHeight();
				const unsigned char *srcp = frame->GetReadPtr();

				avx_library.env->BitBlt(writeBuffer, row_size, srcp, src_pitch, row_size, height);
				fwrite(writeBuffer, 1, row_size * height, sink);
			}
		}
	} catch (AvisynthError &e) {
		AVXLOG_ERROR("AvisynthError: %s", e.msg);
		goto fail;
	}

	free(writeBuffer);
	if (launchMPlayer)
		pclose(sink);
	signal(SIGPIPE, old_sigpipe);
	return 0;

fail:
	if (writeBuffer)
		free(writeBuffer);
	if (launchMPlayer)
		pclose(sink);
	signal(SIGPIPE, old_sigpipe);
	return -1;
};
Пример #17
0
static int GetSubSamplingW(const VideoInfo &vi) {
    if (vi.IsYUV() && vi.IsPlanar())
        return vi.GetPlaneWidthSubsampling(PLANAR_U);
    else
        return 0;
}
Пример #18
0
int __stdcall dimzon_avs_invoke(SafeStruct* pstr, char *func, char **arg, int len, AVSDLLVideoInfo *vi, float* func_out)
{
	try
	{
		*func_out = -FLT_MAX;
		pstr->err[0] = 0;

		const int N = 10;
		int actual_len = 0;

		AVSValue args[N] = { };
		if (len == 0) args[0] = 0;
		else if (len > N) len = N;

		for(int i = 0; i < len; i++)
		{
			if (strlen(arg[i]) > 0)
			{
				string lower = arg[i];
				bool was_letters = false;
				bool was_digits = false;
				bool was_spaces = false;

				//Слишком длинные значения - точно текст
				for (unsigned int n = 0; n < lower.size() && lower.size() <= 10; n++)
				{
					lower[n] = tolower(lower[n]);
					if (!was_letters && isalpha(lower[n])) was_letters = true;
					if (!was_digits && isdigit(lower[n])) was_digits = true;
					if (!was_spaces && isspace(lower[n])) was_spaces = true;
				}

				if (i == 0 && was_letters && !was_digits && !was_spaces && lower.compare("last") == 0)
				{
					//Clip (last)
					if(!pstr->clp) throw AvisynthError("AviSynthWrapper: The \"last\" clip was requested, but it doesn't exist!");
					args[actual_len] = pstr->clp; //pstr->res->AsClip();
					actual_len += 1;

					//pstr->clp; pstr->res->AsClip(); //С обработкой после прошлых вызовов Invoke
					//pstr->env->GetVar("last").AsClip(); //"Чистый" выход скрипта
				}
				else if (was_letters && !was_digits && !was_spaces && lower.compare("true") == 0)
				{
					//Bool (true)
					args[actual_len] = true;
					actual_len += 1;
				}
				else if (was_letters && !was_digits && !was_spaces && lower.compare("false") == 0)
				{
					//Bool (false)
					args[actual_len] = false;
					actual_len += 1;
				}
				else if (!was_letters && was_digits && !was_spaces && lower.find(".") != string::npos)
				{
					//Float (double..)
					args[actual_len] = atof(arg[i]);
					actual_len += 1;
				}
				else if (!was_letters && was_digits && !was_spaces)
				{
					//Integer
					args[actual_len] = atoi(arg[i]);
					actual_len += 1;
				}
				else
				{
					//String
					args[actual_len] = arg[i];
					actual_len += 1;
				}
			}
		}

		AVSValue res = pstr->env->Invoke(func, AVSValue(args, actual_len));

		if (!res.IsClip())
		{
			//Вывод результата
			if (res.IsBool())
			{ 
				if(!res.AsBool()) *func_out = 0;
				else *func_out = FLT_MAX;
			}
			else if (res.IsInt()) *func_out = (float)res.AsInt();
			else if (res.IsFloat()) *func_out = (float)res.AsFloat();
			else if (res.IsString()) { *func_out = FLT_MAX; strncpy_s(pstr->err, ERRMSG_LEN, res.AsString(), _TRUNCATE); }
		}
		else
		{
			pstr->clp = res.AsClip();
			VideoInfo inf = pstr->clp->GetVideoInfo();

			if (vi != NULL)
			{
				vi->width   = inf.width;
				vi->height  = inf.height;
				vi->raten   = inf.fps_numerator;
				vi->rated   = inf.fps_denominator;
				vi->field_based = (inf.IsFieldBased()) ? 1 : 0;
				vi->first_field = (inf.IsTFF()) ? 1 : (inf.IsBFF()) ? 2 : 0;
				vi->num_frames = inf.num_frames;

				//Или не меняем?
				if (vi->pixel_type_orig == 0) vi->pixel_type_orig = inf.pixel_type;
				if (vi->sample_type_orig == 0) vi->sample_type_orig = inf.sample_type;

				vi->pixel_type = inf.pixel_type;
				vi->sample_type = inf.sample_type;

				vi->num_audio_samples        = inf.num_audio_samples;
				vi->audio_samples_per_second = inf.audio_samples_per_second;
				vi->nchannels                = inf.nchannels;
			}

			//Нужен ли нам вообще этот res?!
			if(pstr->res) delete pstr->res;
			pstr->res = new AVSValue(res);

			pstr->err[0] = 0;
		}

		return 0;
	}
	catch(AvisynthError err)
	{
		strncpy_s(pstr->err, ERRMSG_LEN, err.msg, _TRUNCATE);
		return AVS_GERROR;
	}
	catch(IScriptEnvironment::NotFound)
	{
		strncpy_s(pstr->err, ERRMSG_LEN, "AviSynthWrapper: Wrong function name or invalid parameters was passed to Invoke!", _TRUNCATE);
		return AVS_VARNFOUND
	}
}
Пример #19
0
void* EMU_getVideoBuffer(size_t *outSize)
{
    if (outSize) *outSize = video.size();
    return video.buffer;
}
Пример #20
0
Файл: emu.cpp Проект: DD4ve/iNDS
void EMU_init(int lang)
{
	//oglrender_init = android_opengl_init;
	
	path.ReadPathSettings();
	if (video.layout > 2)
	{
		video.layout = video.layout_old = 0;
	}
	
	EMU_loadSettings();
    
	Desmume_InitOnce();
	//gpu_SetRotateScreen(video.rotation);
	NDS_FillDefaultFirmwareConfigData(&fw_config);
	//Hud.reset();
	
	INFO("Init NDS");
	/*
	switch (slot1_device_type)
	{
		case NDS_SLOT1_NONE:
		case NDS_SLOT1_RETAIL:
		case NDS_SLOT1_R4:
		case NDS_SLOT1_RETAIL_NAND:
			break;
		default:
			slot1_device_type = NDS_SLOT1_RETAIL;
			break;
	}
	*/
	switch (addon_type)
	{
        case NDS_ADDON_NONE:
            break;
        case NDS_ADDON_CFLASH:
            break;
        case NDS_ADDON_RUMBLEPAK:
            break;
        case NDS_ADDON_GBAGAME:
            if (!strlen(GBAgameName))
            {
                addon_type = NDS_ADDON_NONE;
                break;
            }
            // TODO: check for file exist
            break;
        case NDS_ADDON_GUITARGRIP:
            break;
        case NDS_ADDON_EXPMEMORY:
            break;
        case NDS_ADDON_PIANO:
            break;
        case NDS_ADDON_PADDLE:
            break;
        default:
            addon_type = NDS_ADDON_NONE;
            break;
	}
    
	//!slot1Change((NDS_SLOT1_TYPE)slot1_device_type);
	addonsChangePak(addon_type);
    
	NDS_Init();
	
	//osd->singleScreen = true;
	cur3DCore = 1;
	NDS_3D_ChangeCore(cur3DCore); //OpenGL
	
	LOG("Init sound core\n");
	SPU_ChangeSoundCore(SNDCORE_COREAUDIO, DESMUME_SAMPLE_RATE*8/60);
	
	static const char* nickname = "iNDS"; //TODO: Add firmware cfg in settings
	fw_config.nickname_len = strlen(nickname);
	for(int i = 0 ; i < fw_config.nickname_len ; ++i)
		fw_config.nickname[i] = nickname[i];
    
	static const char* message = "iNDS is the best!";
	fw_config.message_len = strlen(message);
	for(int i = 0 ; i < fw_config.message_len ; ++i)
		fw_config.message[i] = message[i];
	
	fw_config.language = lang < 0 ? NDS_FW_LANG_ENG : lang;
	fw_config.fav_colour = 15;
	fw_config.birth_month = 2;
	fw_config.birth_day = 17;
	fw_config.ds_type = NDS_CONSOLE_TYPE_LITE;
    
	video.setfilter(video.NONE); //figure out why this doesn't seem to work (also add to cfg)
	
	NDS_CreateDummyFirmware(&fw_config);
	
	InitSpeedThrottle();
	
	mainLoopData.freq = 1000;
	mainLoopData.lastticks = GetTickCount();
}
Пример #21
0
void EMU_init()
{
	//oglrender_init = android_opengl_init;
	
	path.ReadPathSettings();
	if (video.layout > 2)
	{
		video.layout = video.layout_old = 0;
	}
	
	EMU_loadSettings();
    
	Desmume_InitOnce();
	//gpu_SetRotateScreen(video.rotation);
	NDS_FillDefaultFirmwareConfigData(&fw_config);
	//Hud.reset();
	
	INFO("Init NDS");
	/*
	switch (slot1_device_type)
	{
		case NDS_SLOT1_NONE:
		case NDS_SLOT1_RETAIL:
		case NDS_SLOT1_R4:
		case NDS_SLOT1_RETAIL_NAND:
			break;
		default:
			slot1_device_type = NDS_SLOT1_RETAIL;
			break;
	}
	*/
	switch (addon_type)
	{
        case NDS_ADDON_NONE:
            break;
        case NDS_ADDON_CFLASH:
            break;
        case NDS_ADDON_RUMBLEPAK:
            break;
        case NDS_ADDON_GBAGAME:
            if (!strlen(GBAgameName))
            {
                addon_type = NDS_ADDON_NONE;
                break;
            }
            // TODO: check for file exist
            break;
        case NDS_ADDON_GUITARGRIP:
            break;
        case NDS_ADDON_EXPMEMORY:
            break;
        case NDS_ADDON_PIANO:
            break;
        case NDS_ADDON_PADDLE:
            break;
        default:
            addon_type = NDS_ADDON_NONE;
            break;
	}
    
	//!slot1Change((NDS_SLOT1_TYPE)slot1_device_type);
	addonsChangePak(addon_type);
    
	NDS_Init();
	
	//osd->singleScreen = true;
	cur3DCore = 1;
	NDS_3D_ChangeCore(cur3DCore); //OpenGL
	
	LOG("Init sound core\n");
	SPU_ChangeSoundCore(SNDCORE_COREAUDIO, DESMUME_SAMPLE_RATE*8/60);
	
	static const char* nickname = "emozilla";
	fw_config.nickname_len = strlen(nickname);
	for(int i = 0 ; i < fw_config.nickname_len ; ++i)
		fw_config.nickname[i] = nickname[i];
    
	static const char* message = "nds4ios makes you happy!";
	fw_config.message_len = strlen(message);
	for(int i = 0 ; i < fw_config.message_len ; ++i)
		fw_config.message[i] = message[i];
	
	fw_config.language = 1;
    
	video.setfilter(video.NONE);
	
	NDS_CreateDummyFirmware(&fw_config);
	
	InitSpeedThrottle();
	
	mainLoopData.freq = 1000;
	mainLoopData.lastticks = GetTickCount();
}
Пример #22
0
extern "C" HRESULT __stdcall get_stream_info(VF_FileHandle in, DWORD stream, void *out)
{
	if (stream == VF_STREAM_VIDEO)
	{
		LPVF_StreamInfo_Video info = (LPVF_StreamInfo_Video)out;

		if (info == NULL)
			return VF_ERROR;

		if (info->dwSize != sizeof(VF_StreamInfo_Video))
			return VF_ERROR;

		vfMI *i = (vfMI*)in;

		if (i->type == D2V_TYPE)
		{
			info->dwLengthL = i->vi->num_frames;
			if (i->vi->fps_denominator) info->dwRate = i->vi->fps_numerator;
			else info->dwRate = 0;
			info->dwScale = i->vi->fps_denominator;
			info->dwWidth = i->vi->width;
			info->dwHeight = i->vi->height;
		}
		else
		{
			const VideoInfo vit = (*i->clip)->GetVideoInfo();
			info->dwLengthL = vit.num_frames;
			if (vit.fps_denominator) info->dwRate = vit.fps_numerator;
			else info->dwRate = 0;
			info->dwScale = vit.fps_denominator;
			info->dwWidth = vit.width;
			info->dwHeight = vit.height;
		}
		info->dwBitCount = 24;
	}
	else if (stream == VF_STREAM_AUDIO)
	{
		LPVF_StreamInfo_Audio info = (LPVF_StreamInfo_Audio)out;

		if (info == NULL)
			return VF_ERROR;

		if (info->dwSize != sizeof(VF_StreamInfo_Audio))
			return VF_ERROR;

		vfMI *i = (vfMI*)in;

		if (i->type == D2V_TYPE)
			return VF_ERROR;
		else
		{
			const VideoInfo vit = (*i->clip)->GetVideoInfo();
			if (!vit.HasAudio()) return VF_ERROR;
			info->dwLengthL = (unsigned long)vit.num_audio_samples;
			info->dwChannels = vit.nchannels;
			info->dwRate = vit.audio_samples_per_second * vit.BytesPerAudioSample();
			info->dwScale = vit.BytesPerAudioSample();
			info->dwBitsPerSample = vit.BytesPerChannelSample()*8;
			info->dwBlockAlign = vit.BytesPerAudioSample();
		}
	}
	else return VF_ERROR;

	return VF_OK;
}