Example #1
0
crude_time X(get_crude_time)(void)
{
     crude_time tv;
     QueryPerformanceCounter(&tv);
     return tv;
}
	void Impl_Timer::start(void)
	{
		QueryPerformanceCounter(&_tstart);
	}
Example #3
0
// Creation
void VoxGame::Create()
{
	m_pRenderer = NULL;
	m_pGameCamera = NULL;
	m_pQubicleBinaryManager = NULL;
	m_pPlayer = NULL;

	m_pVoxApplication = new VoxApplication();
	m_pVoxWindow = new VoxWindow();

	// Create application and window
	m_pVoxApplication->Create();
	m_pVoxWindow->Create();

	/* Setup the FPS and deltatime counters */
	QueryPerformanceCounter(&m_fpsPreviousTicks);
	QueryPerformanceCounter(&m_fpsCurrentTicks);
	QueryPerformanceFrequency(&m_fpsTicksPerSecond);
	m_deltaTime = 0.0f;
	m_fps = 0.0f;

	/* Create the renderer */
	m_windowWidth = m_pVoxWindow->GetWindowWidth();
	m_windowHeight = m_pVoxWindow->GetWindowHeight();
	m_pRenderer = new Renderer(m_windowWidth, m_windowHeight, 32, 8);

	/* Create the GUI */
	m_pGUI = new OpenGLGUI(m_pRenderer);

	/* Create cameras */
	m_pGameCamera = new Camera(m_pRenderer);
	m_pGameCamera->SetPosition(vec3(0.0f, 1.25f, 3.0f));
	m_pGameCamera->SetFacing(vec3(0.0f, 0.0f, -1.0f));
	m_pGameCamera->SetUp(vec3(0.0f, 1.0f, 0.0f));
	m_pGameCamera->SetRight(vec3(1.0f, 0.0f, 0.0f));

	/* Create viewports */
	m_pRenderer->CreateViewport(0, 0, m_windowWidth, m_windowHeight, 60.0f, &m_defaultViewport);

	/* Create fonts */
	m_pRenderer->CreateFreeTypeFont("media/fonts/arial.ttf", 12, &m_defaultFont);

	/* Create lights */
	m_defaultLightPosition = vec3(3.0f, 3.0f, 3.0f);
	m_defaultLightView = vec3(0.0f, 0.0f, 0.0f);
	m_pRenderer->CreateLight(Colour(1.0f, 1.0f, 1.0f, 1.0f), Colour(1.0f, 1.0f, 1.0f, 1.0f), Colour(0.0f, 0.0f, 0.0f, 1.0f),
							 m_defaultLightPosition, m_defaultLightView - m_defaultLightPosition, 0.0f, 0.0f, 0.5f, 0.35f, 0.05f, true, false, &m_defaultLight);

	/* Create materials */
	m_pRenderer->CreateMaterial(Colour(1.0f, 1.0f, 1.0f, 1.0f), Colour(1.0f, 1.0f, 1.0f, 1.0f), Colour(1.0f, 1.0f, 1.0f, 1.0f), Colour(0.0f, 0.0f, 0.0f, 1.0f), 64, &m_defaultMaterial);

	/* Create the frame buffers */
	bool frameBufferCreated = false;
	frameBufferCreated = m_pRenderer->CreateFrameBuffer(-1, true, true, true, true, m_windowWidth, m_windowHeight, 1.0f, "SSAO", &m_SSAOFrameBuffer);
	frameBufferCreated = m_pRenderer->CreateFrameBuffer(-1, true, true, true, true, m_windowWidth, m_windowHeight, 5.0f, "Shadow", &m_shadowFrameBuffer);
	frameBufferCreated = m_pRenderer->CreateFrameBuffer(-1, true, true, true, true, m_windowWidth, m_windowHeight, 1.0f, "Deferred Lighting", &m_lightingFrameBuffer);
	frameBufferCreated = m_pRenderer->CreateFrameBuffer(-1, true, true, true, true, m_windowWidth, m_windowHeight, 1.0f, "Transparency", &m_transparencyFrameBuffer);
	frameBufferCreated = m_pRenderer->CreateFrameBuffer(-1, true, true, true, true, m_windowWidth, m_windowHeight, 1.0f, "FXAA", &m_FXAAFrameBuffer);
	frameBufferCreated = m_pRenderer->CreateFrameBuffer(-1, true, true, true, true, m_windowWidth, m_windowHeight, 1.0f, "FullScreen 1st Pass", &m_firstPassFullscreenBuffer);
	frameBufferCreated = m_pRenderer->CreateFrameBuffer(-1, true, true, true, true, m_windowWidth, m_windowHeight, 1.0f, "FullScreen 2nd Pass", &m_secondPassFullscreenBuffer);

	/* Create the shaders */
	m_defaultShader = -1;
	m_phongShader = -1;
	m_SSAOShader = -1;
	m_shadowShader = -1;
	m_lightingShader = -1;
	m_textureShader = -1;
	m_fxaaShader = -1;
	m_blurVerticalShader = -1;
	m_blurHorizontalShader = -1;
	m_pRenderer->LoadGLSLShader("media/shaders/default.vertex", "media/shaders/default.pixel", &m_defaultShader);
	m_pRenderer->LoadGLSLShader("media/shaders/phong.vertex", "media/shaders/phong.pixel", &m_phongShader);
	m_pRenderer->LoadGLSLShader("media/shaders/shadow.vertex", "media/shaders/shadow.pixel", &m_shadowShader);
	m_pRenderer->LoadGLSLShader("media/shaders/texture.vertex", "media/shaders/texture.pixel", &m_textureShader);
	m_pRenderer->LoadGLSLShader("media/shaders/fullscreen/SSAO.vertex", "media/shaders/fullscreen/SSAO.pixel", &m_SSAOShader);
	m_pRenderer->LoadGLSLShader("media/shaders/fullscreen/fxaa.vertex", "media/shaders/fullscreen/fxaa.pixel", &m_fxaaShader);
	m_pRenderer->LoadGLSLShader("media/shaders/fullscreen/lighting.vertex", "media/shaders/fullscreen/lighting.pixel", &m_lightingShader);
	m_pRenderer->LoadGLSLShader("media/shaders/fullscreen/blur_vertical.vertex", "media/shaders/fullscreen/blur_vertical.pixel", &m_blurVerticalShader);
	m_pRenderer->LoadGLSLShader("media/shaders/fullscreen/blur_horizontal.vertex", "media/shaders/fullscreen/blur_horizontal.pixel", &m_blurHorizontalShader);

	/* Create the qubicle binary file manager */
	m_pQubicleBinaryManager = new QubicleBinaryManager(m_pRenderer);

	/* Create the lighting manager */
	m_pLightingManager = new LightingManager(m_pRenderer);

	/* Create the block particle manager */
	m_pBlockParticleManager = new BlockParticleManager(m_pRenderer);

	/* Create the player */
	m_pPlayer = new Player(m_pRenderer, m_pQubicleBinaryManager, m_pLightingManager, m_pBlockParticleManager);

	/* Create the GUI components */
	CreateGUI();

	// Keyboard movement
	m_bKeyboardForward = false;
	m_bKeyboardBackward = false;
	m_bKeyboardStrafeLeft = false;
	m_bKeyboardStrafeRight = false;
	m_bKeyboardLeft = false;
	m_bKeyboardRight = false;
	m_bKeyboardUp = false;
	m_bKeyboardDown = false;
	m_bKeyboardSpace = false;

	// Camera movement
	m_bCameraRotate = false;
	m_bCameraZoom = false;
	m_pressedX = 0;
	m_pressedY = 0;	
	m_currentX = 0;
	m_currentY = 0;

	// Toggle flags
	m_deferredRendering = true;
	m_modelWireframe = false;
	m_modelAnimationIndex = 0;
	m_multiSampling = true;
	m_ssao = true;
	m_blur = false;
	m_shadows = true;
	m_dynamicLighting = true;
	m_animationUpdate = true;
	m_fullscreen = false;
}
Example #4
0
pfloat toc(timer* t)
{
	QueryPerformanceCounter(&t->toc);
	return ((t->toc.QuadPart - t->tic.QuadPart) / (pfloat)t->freq.QuadPart);
}
int gettimeofday(struct timeval* tp, int* /*tz*/) {
  static LARGE_INTEGER tickFrequency, epochOffset;

  static Boolean isInitialized = False;

  LARGE_INTEGER tickNow;

#if !defined(_WIN32_WCE)
  QueryPerformanceCounter(&tickNow);
#else
  tickNow.QuadPart = GetTickCount();
#endif
 
  if (!isInitialized) {
    if(1 == InterlockedIncrement(&initializeLock_gettimeofday)) {
#if !defined(_WIN32_WCE)
      // For our first call, use "ftime()", so that we get a time with a proper epoch.
      // For subsequent calls, use "QueryPerformanceCount()", because it's more fine-grain.
      struct timeb tb;
      ftime(&tb);
      tp->tv_sec = tb.time;
      tp->tv_usec = 1000*tb.millitm;

      // Also get our counter frequency:
      QueryPerformanceFrequency(&tickFrequency);
#else
      /* FILETIME of Jan 1 1970 00:00:00. */
      const LONGLONG epoch = 116444736000000000LL;
      FILETIME fileTime;
      LARGE_INTEGER time;
      GetSystemTimeAsFileTime(&fileTime);

      time.HighPart = fileTime.dwHighDateTime;
      time.LowPart = fileTime.dwLowDateTime;

      // convert to from 100ns time to unix timestamp in seconds, 1000*1000*10
      tp->tv_sec = (long)((time.QuadPart - epoch) / 10000000L);

      /*
        GetSystemTimeAsFileTime has just a seconds resolution,
        thats why wince-version of gettimeofday is not 100% accurate, usec accuracy would be calculated like this:
        // convert 100 nanoseconds to usec
        tp->tv_usec= (long)((time.QuadPart - epoch)%10000000L) / 10L;
      */
      tp->tv_usec = 0;

      // resolution of GetTickCounter() is always milliseconds
      tickFrequency.QuadPart = 1000;
#endif     
      // compute an offset to add to subsequent counter times, so we get a proper epoch:
      epochOffset.QuadPart
          = tp->tv_sec * tickFrequency.QuadPart + (tp->tv_usec * tickFrequency.QuadPart) / 1000000L - tickNow.QuadPart;
      
      // next caller can use ticks for time calculation
      isInitialized = True; 
      return 0;
    } else {
        InterlockedDecrement(&initializeLock_gettimeofday);
        // wait until first caller has initialized static values
        while(!isInitialized){
          Sleep(1);
        }
    }
  }

  // adjust our tick count so that we get a proper epoch:
  tickNow.QuadPart += epochOffset.QuadPart;

  tp->tv_sec =  (long)(tickNow.QuadPart / tickFrequency.QuadPart);
  tp->tv_usec = (long)(((tickNow.QuadPart % tickFrequency.QuadPart) * 1000000L) / tickFrequency.QuadPart);

  return 0;
}
Example #6
0
void Timer::stop()
{
	QueryPerformanceCounter(&m_end);
	m_running = false;
}
Example #7
0
void make_session_key(char *key, char *seed, int mode)
{
   int j, k;
   struct MD5Context md5c;
   unsigned char md5key[16], md5key1[16];
   char s[1024];

#define ss sizeof(s)

   s[0] = 0;
   if (seed != NULL) {
     bstrncat(s, seed, sizeof(s));
   }

   /* The following creates a seed for the session key generator
     based on a collection of volatile and environment-specific
     information unlikely to be vulnerable (as a whole) to an
     exhaustive search attack.  If one of these items isn't
     available on your machine, replace it with something
     equivalent or, if you like, just delete it. */

#if defined(HAVE_WIN32)
   {
      LARGE_INTEGER     li;
      DWORD             length;
      FILETIME          ft;

      bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)GetCurrentProcessId());
      (void)getcwd(s + strlen(s), 256);
      bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)GetTickCount());
      QueryPerformanceCounter(&li);
      bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)li.LowPart);
      GetSystemTimeAsFileTime(&ft);
      bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)ft.dwLowDateTime);
      bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)ft.dwHighDateTime);
      length = 256;
      GetComputerName(s + strlen(s), &length);
      length = 256;
      GetUserName(s + strlen(s), &length);
   }
#else
   bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)getpid());
   bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)getppid());
   (void)getcwd(s + strlen(s), 256);
   bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)clock());
   bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)time(NULL));
#if defined(Solaris)
   sysinfo(SI_HW_SERIAL,s + strlen(s), 12);
#endif
#if defined(HAVE_GETHOSTID)
   bsnprintf(s + strlen(s), ss, "%lu", (uint32_t) gethostid());
#endif
   gethostname(s + strlen(s), 256);
   bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)getuid());
   bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)getgid());
#endif
   MD5Init(&md5c);
   MD5Update(&md5c, (uint8_t *)s, strlen(s));
   MD5Final(md5key, &md5c);
   bsnprintf(s + strlen(s), ss, "%lu", (uint32_t)((time(NULL) + 65121) ^ 0x375F));
   MD5Init(&md5c);
   MD5Update(&md5c, (uint8_t *)s, strlen(s));
   MD5Final(md5key1, &md5c);
#define nextrand    (md5key[j] ^ md5key1[j])
   if (mode) {
     for (j = k = 0; j < 16; j++) {
        unsigned char rb = nextrand;

#define Rad16(x) ((x) + 'A')
        key[k++] = Rad16((rb >> 4) & 0xF);
        key[k++] = Rad16(rb & 0xF);
#undef Rad16
        if (j & 1) {
           key[k++] = '-';
        }
     }
     key[--k] = 0;
   } else {
     for (j = 0; j < 16; j++) {
int main2(int argc, char **arg){
	char* error;
	int n = 3;

	/*start OpenGL*/
	initOpenGL();

	/*testing system compatibility*/
	if ((error = test()) != 0){
		printf("Error: %s\n", error);
		return -1;
	}

	/*initializing system.*/
	if (!init()){
		printf("Init not successful...");
		return -1;
	}

	/*create layers using the sigmoid_sum fragment program.*/
	A = generateLayer("sigmoid_sum_masked.fp", 4, 40, 0);
	B = generateLayer("sigmoid_sum_masked.fp", 40, 16, 0);
	C = generateLayer("sigmoid_sum_masked.fp", 40, 22, 16);
	D = generateLayer("sigmoid_sum_masked.fp", 38, 5, 0);
	E = generateLayer(0, 5, 0, 0);

	setOutput(A, B);
	setInput(C, A);
	setOutput(B, D);
	setOutput(C, D);
	setOutput(D, E);

	/*dummy values.*/
	/*fill vectors with values.*/

	fillWeights(A);
	copyWeightsToTexture(weight_matrix, A);
	copyMaskToTexture(mask_matrix, A);
	free(weight_matrix);
	free(mask_matrix);

	fillWeights(B);
	copyWeightsToTexture(weight_matrix, B);
	copyMaskToTexture(mask_matrix, B);
	free(weight_matrix);
	free(mask_matrix);

	fillWeights(C);
	copyWeightsToTexture(weight_matrix, C);
	copyMaskToTexture(mask_matrix, C);
	free(weight_matrix);
	free(mask_matrix);

	fillWeights(D);
	copyWeightsToTexture(weight_matrix, D);
	copyMaskToTexture(mask_matrix, D);
	free(mask_matrix);
	free(weight_matrix);

	/*Execute the network N times.*/
	while (n-->0){
		fillVector(A);
		/*glFinish(); //finish all operations before starting the clock*/
#ifdef WIN32
		QueryPerformanceCounter(&start);
#else
		start = clock();
#endif
		copyVectorToTexture(input, A);
		run(A);
		run(B);
		run(C);
		run(D);
		printLayer(E);
		/*glFinish(); //finish all operations before stopping the clock*/
#ifdef WIN32
		QueryPerformanceCounter(&end);
		QueryPerformanceFrequency( &freq );
		printf("Time in s:%f\n", ((double)(end.QuadPart - start.QuadPart))/(double)freq.QuadPart);
#else
		end = clock();
		run_time = (end-start)/CLOCKS_PER_SEC*1000;
		printf("Time in ms: %d\n", (int)run_time);
#endif
		free(input);
	}

	/*clean up*/
	destroyLayer(A);
	destroyLayer(B);
	destroyLayer(C);
	destroyLayer(D);
	destroyLayer(E);

	return 0;
}
Example #9
0
	void end()
	{
		QueryPerformanceCounter(&m_end);
		QueryPerformanceFrequency(&m_freq);
	}
Example #10
0
File: main.cpp Project: Jadela/GT1
//Find out the current tick count
unsigned long long GetPerformanceTicks()
{
    LARGE_INTEGER lValue;
    QueryPerformanceCounter(&lValue);
    return lValue.QuadPart;
}
Example #11
0
	void Update()
	{
		Start = Stop;
		QueryPerformanceCounter(&Stop);
	}
Example #12
0
	CallTimeRecorder (char* name)
	{
		m_name = name;
		QueryPerformanceCounter(&m_start);
	}
Example #13
0
TimeVal getPerfTime()
{
	__int64 count;
	QueryPerformanceCounter((LARGE_INTEGER*)&count);
	return count;
}
Example #14
0
static unsigned int
osip_fallback_random_number ()
#endif
{
  if (!random_seed_set)
    {
      unsigned int ticks;

#ifdef __PALMOS__
#	if __PALMOS__ < 0x06000000
      SysRandom ((Int32) TimGetTicks ());
#	else
      struct timeval tv;

      gettimeofday (&tv, NULL);
      srand (tv.tv_usec);
      ticks = tv.tv_sec + tv.tv_usec;
#	endif
#elif defined(WIN32)
      LARGE_INTEGER lCount;

      QueryPerformanceCounter (&lCount);
      ticks = lCount.LowPart + lCount.HighPart;
#elif defined(_WIN32_WCE)
      ticks = GetTickCount ();
#elif defined(__PSOS__)
#elif defined(__VXWORKS_OS__)
      struct timespec tp;

      clock_gettime (CLOCK_REALTIME, &tp);
      ticks = tp.tv_sec + tp.tv_nsec;
#else
      struct timeval tv;
      int fd;

      gettimeofday (&tv, NULL);
      ticks = tv.tv_sec + tv.tv_usec;
      fd = open ("/dev/urandom", O_RDONLY);
      if (fd > 0)
        {
          unsigned int r;
          int i;

          for (i = 0; i < 512; i++)
            {
              read (fd, &r, sizeof (r));
              ticks += r;
            }
          close (fd);
        }
#endif

#ifdef HAVE_LRAND48
      srand48 (ticks);
#else
      srand (ticks);
#endif
      random_seed_set = 1;
    }
#ifdef HAVE_LRAND48
  return lrand48 ();
#else
  return rand ();
#endif
}
/// <summary>
/// Handle new body data
/// <param name="nTime">timestamp of frame</param>
/// <param name="nBodyCount">body data count</param>
/// <param name="ppBodies">body data in frame</param>
/// </summary>
void CBodyBasics::ProcessBody(INT64 nTime, int nBodyCount, IBody** ppBodies)
{
    if (m_hWnd)
    {
        HRESULT hr = EnsureDirect2DResources();

        if (SUCCEEDED(hr) && m_pRenderTarget && m_pCoordinateMapper)
        {
            m_pRenderTarget->BeginDraw();
            m_pRenderTarget->Clear();

            RECT rct;
            GetClientRect(GetDlgItem(m_hWnd, IDC_VIDEOVIEW), &rct);
            int width = rct.right;
            int height = rct.bottom;

            for (int i = 0; i < nBodyCount; ++i)
            {
                IBody* pBody = ppBodies[i];
                if (pBody)
                {
                    BOOLEAN bTracked = false;
                    hr = pBody->get_IsTracked(&bTracked);

                    if (SUCCEEDED(hr) && bTracked)
                    {
                        Joint joints[JointType_Count]; 
                        D2D1_POINT_2F jointPoints[JointType_Count];
                        HandState leftHandState = HandState_Unknown;
                        HandState rightHandState = HandState_Unknown;

                        pBody->get_HandLeftState(&leftHandState);
                        pBody->get_HandRightState(&rightHandState);

                        hr = pBody->GetJoints(_countof(joints), joints);
                        if (SUCCEEDED(hr))
                        {
                            for (int j = 0; j < _countof(joints); ++j)
                            {
                                jointPoints[j] = BodyToScreen(joints[j].Position, width, height);
                            }

                            DrawBody(joints, jointPoints);

                            DrawHand(leftHandState, jointPoints[JointType_HandLeft]);
                            DrawHand(rightHandState, jointPoints[JointType_HandRight]);
                        }
						
                    }
                }
            }

            hr = m_pRenderTarget->EndDraw();

            // Device lost, need to recreate the render target
            // We'll dispose it now and retry drawing
            if (D2DERR_RECREATE_TARGET == hr)
            {
                hr = S_OK;
                DiscardDirect2DResources();
            }
        }
		FILE *fp;
		if ((fp = fopen("C:\\Users\\tuchiyama\\Desktop\\SpeechBasics-D2D\\speech.txt", "r")) != NULL) {
			if ((fp = fopen("body.txt", "r")) != NULL) {
				std::ifstream ifsa("C:\\Users\\tuchiyama\\Desktop\\SpeechBasics-D2D\\speech.txt");
				std::string linea;
				int si = 0;
				while (std::getline(ifsa, linea)) {
					si++;
				}
				std::ifstream ifsb("body.txt");
				std::string lineb;
				int bi = 0;
				while (std::getline(ifsb, lineb)) {
					bi++;
				}
				if (si > bi) {
					isWrite = true;
					std::ofstream ofs("body.txt", std::ios::app);
					for (int j = 0; j < si - bi; j++) {
						ofs << "start" << std::endl;
					}
				}
			}
		}
		if ((fp = fopen("C:\\Users\\tuchiyama\\Desktop\\SpeechBasics-D2D\\delete.txt", "r")) != NULL) {
			if ((fp = fopen("delete.txt", "r")) != NULL) {
				std::ifstream ifsa("C:\\Users\\tuchiyama\\Desktop\\SpeechBasics-D2D\\delete.txt");
				std::string linea;
				int si = 0;
				while (std::getline(ifsa, linea)) {
					si++;
				}
				std::ifstream ifsb("delete.txt");
				std::string lineb;
				int bi = 0;
				while (std::getline(ifsb, lineb)) {
					bi++;
				}
				if (si > bi) {
					system("ruby C:\\Users\\tuchiyama\\Documents\\odorimming\\make_html.rb undo");
					std::ofstream ofs("delete.txt", std::ios::app);
					for (int j = 0; j < si - bi; j++) {
						ofs << "delete" << std::endl;
					}
				}
			}
		}

        if (!m_nStartTime)
        {
            m_nStartTime = nTime;
        }

        double fps = 0.0;

        LARGE_INTEGER qpcNow = {0};
        if (m_fFreq)
        {
            if (QueryPerformanceCounter(&qpcNow))
            {
                if (m_nLastCounter)
                {
                    m_nFramesSinceUpdate+=1;
                    fps = m_fFreq * m_nFramesSinceUpdate / double(qpcNow.QuadPart - m_nLastCounter);
                }
            }
        }

        WCHAR szStatusMessage[64];
        StringCchPrintf(szStatusMessage, _countof(szStatusMessage), L" FPS = %0.2f    Time = %I64d", fps, (nTime - m_nStartTime));

        if (SetStatusMessage(szStatusMessage, 1000, false))
        {
            m_nLastCounter = qpcNow.QuadPart ;
            m_nFramesSinceUpdate = 0;
        }
    }
}
Example #16
0
static int64_t get_clock(void)
{
    LARGE_INTEGER ti;
    QueryPerformanceCounter(&ti);
    return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
}
Example #17
0
PxU64 Time::getCurrentCounterValue()
{
	LARGE_INTEGER ticks;
	QueryPerformanceCounter (&ticks);
	return (PxU64)ticks.QuadPart;
}
Example #18
0
/**
* \brief         This will classify the messages, which lin be one of Rx, Tx or
*                Error messages. In case of Err messages this identifies under
*                what broader category (Rx / Tx) does this occur.
* \param[in]     XLevent& xlEvent message polled from the bus in XLevent format
* \param[out]    sLinData Application specific data format
* \return        TRUE (always)
* \authors       [email protected]
* \date          05.29.2015 Created
*/
static INT bClassifyMsgType(LinMessageInfo* msgInf, STLINDATA& sLinData,unsigned char* msg,unsigned int flags,unsigned int len,unsigned int id,int chnindex)
{
    INT nHresult = S_OK;
    sLinData.m_lTickCount.QuadPart = ((LONGLONG)msgInf->timestamp / 100000);

    if (CREATE_MAP_TIMESTAMP == sg_byCurrState)
    {
        LARGE_INTEGER g_QueryTickCount;
        QueryPerformanceCounter(&g_QueryTickCount);
        UINT64 unConnectionTime;
        unConnectionTime = ((g_QueryTickCount.QuadPart * 10000) / sg_lnFrequency.QuadPart) - sg_TimeStamp;
        //Time difference should be +ve value
        if(sLinData.m_lTickCount.QuadPart >= unConnectionTime)
        {
            sg_TimeStamp  = (LONGLONG)(sLinData.m_lTickCount.QuadPart - unConnectionTime);
        }
        else
        {
            sg_TimeStamp  = (LONGLONG)(unConnectionTime - sLinData.m_lTickCount.QuadPart);
        }

        sg_byCurrState = CALC_TIMESTAMP_READY;
    }
    sLinData.m_lTickCount.QuadPart = (LONGLONG)(msgInf->timestamp * 10);
    sLinData.m_uDataInfo.m_sLINMsg.m_ucChannel = (UCHAR)chnindex +1;
    sLinData.m_uDataInfo.m_sErrInfo.m_ucChannel = (UCHAR)chnindex +1;
    sLinData.m_uDataInfo.m_sLINMsg.m_ulTimeStamp = sLinData.m_lTickCount.QuadPart;
    sLinData.m_uDataInfo.m_sErrInfo.m_ulTimeStamp = sLinData.m_lTickCount.QuadPart;
    sLinData.m_ucDataType   = (UCHAR)RX_FLAG;

    switch (flags)
    {
            // LIN events
        case LIN_NODATA:
        {
            sLinData.m_eLinMsgType = LIN_EVENT;
            sLinData.m_uDataInfo.m_sErrInfo.m_eEventType = EVENT_LIN_ERRNOANS;
            // [email protected]
            // sLinData.m_uDataInfo.m_sErrInfo.m_ucId = xlEvent.tagData.linMsgApi.linNoAns.id;
        }
        break;
        case LIN_WAKEUP_FRAME:
        {
            sLinData.m_uDataInfo.m_sErrInfo.m_ucId = -1;
            sLinData.m_eLinMsgType = LIN_EVENT;
            sLinData.m_uDataInfo.m_sErrInfo.m_eEventType = EVENT_LIN_WAKEUP;
            // [email protected]
            // sLinData.m_uDataInfo.m_sErrInfo.m_ucId = xlEvent.tagData.linMsgApi.linNoAns.id;
        }
        break;
        case LIN_RX:
        case LIN_TX:
        {
            sLinData.m_eLinMsgType = LIN_MSG;
            sLinData.m_uDataInfo.m_sLINMsg.m_ucMsgID   = id;
            if(flags == LIN_RX)
            {
                sLinData.m_ucDataType   = (UCHAR)RX_FLAG;
            }
            else
            {
                sLinData.m_ucDataType   = (UCHAR)TX_FLAG;
            }
            /* Copy the message data */
            memcpy(sLinData.m_uDataInfo.m_sLINMsg.m_ucData,
                   msg, len);
            sLinData.m_uDataInfo.m_sLINMsg.m_ucDataLen = len;
            break;
        }

        case LIN_PARITY_ERROR:
            sLinData.m_uDataInfo.m_sErrInfo.m_ucId = -1;
            sLinData.m_eLinMsgType = LIN_EVENT;

            sLinData.m_uDataInfo.m_sErrInfo.m_eEventType = EVENT_LIN_ERRMSG;
            break;

        case LIN_SYNCH_ERROR:
            sLinData.m_uDataInfo.m_sErrInfo.m_ucId = -1;
            sLinData.m_eLinMsgType = LIN_EVENT;
            sLinData.m_uDataInfo.m_sErrInfo.m_eEventType = EVENT_LIN_ERRSYNC;
            break;

        case LIN_CSUM_ERROR:
            sLinData.m_uDataInfo.m_sErrInfo.m_ucId = -1;
            sLinData.m_eLinMsgType = LIN_EVENT;

            sLinData.m_uDataInfo.m_sErrInfo.m_eEventType = EVENT_LIN_ERRCRC;
            break;

        case LIN_BIT_ERROR:
            sLinData.m_uDataInfo.m_sErrInfo.m_ucId = -1;
            sLinData.m_eLinMsgType = LIN_EVENT;

            sLinData.m_uDataInfo.m_sErrInfo.m_eEventType = EVENT_LIN_ERRBIT;
            break;
        default:
            int a = 0;
            break;
    }

    return nHresult;
}
Example #19
0
static int APIENTRY WinMain( HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow ) {
	AllocConsole(); //create a console
	freopen( "conin$","r",stdin );
	freopen( "conout$","w",stdout );
	freopen( "conout$","w",stderr );
	printf( "Program Started, console initialized\n" );

	const char WindowName[] = "Wet Clay";

	int64 mSecsPerFrame = 16; //60FPS
	System system = { };
	system.windowHeight = 680;
	system.windowWidth = 1080;
	system.ReadWholeFile = &ReadWholeFile;
	system.GetMostRecentMatchingFile = &GetMostRecentMatchingFile;
	system.TrackFileUpdates = &TrackFileUpdates;
	system.DidFileUpdate = &DidFileUpdate;
	system.WriteFile = &WriteFile;
	system.HasFocus = &HasFocus;

	//Center position of window
	uint16 fullScreenWidth = GetSystemMetrics( SM_CXSCREEN );
	uint16 fullScreenHeight = GetSystemMetrics( SM_CYSCREEN );
	uint16 windowPosX = ( fullScreenWidth / 2 ) - (system.windowWidth / 2 );
	uint16 windowPosY = ( fullScreenHeight / 2 ) - (system.windowHeight / 2 );

	WNDCLASSEX wc = { };
	wc.cbSize = sizeof(WNDCLASSEX);
	wc.style = CS_OWNDC;
	wc.lpfnWndProc = WndProc;
	wc.cbClsExtra = 0;
	wc.cbWndExtra = 0;
	wc.hInstance = hInstance;
	wc.hIcon = LoadIcon(NULL, IDI_APPLICATION);
	wc.hCursor = LoadCursor(NULL, IDC_ARROW);
	wc.hbrBackground = (HBRUSH)(COLOR_WINDOW + 1);
	wc.lpszMenuName = NULL;
	wc.lpszClassName = WindowName;
	wc.hIconSm = LoadIcon(NULL, IDI_APPLICATION);

	if( !RegisterClassEx( &wc ) ) {
		printf( "Failed to register class\n" );
		return -1;
	}

	EnableCrashingOnCrashes();
 
	// at this point WM_CREATE message is sent/received
	// the WM_CREATE branch inside WinProc function will execute here
	HWND hwnd = CreateWindowEx(
		0, 
		WindowName, 
		"Wet Clay App", 
		WS_BORDER, 
		windowPosX, 
		windowPosY, 
		system.windowWidth, 
		system.windowHeight,
		NULL, 
		NULL, 
		hInstance, 
		NULL
	);

	RECT windowRect = { };
	GetClientRect( hwnd, &windowRect );

	SetWindowLong( hwnd, GWL_STYLE, 0 );
	ShowWindow ( hwnd, SW_SHOWNORMAL );
	UpdateWindow( hwnd );

	LARGE_INTEGER timerResolution;
	BOOL canSupportHiResTimer = QueryPerformanceFrequency( &timerResolution );
	assert( canSupportHiResTimer );

	size_t systemsMemorySize = MEGABYTES( 8 );
	Stack gameSlab;
	gameSlab.size = SIZEOF_GLOBAL_HEAP + systemsMemorySize;
	gameSlab.start = VirtualAlloc( 
		NULL, 
		gameSlab.size, 
		MEM_COMMIT | MEM_RESERVE, 
		PAGE_EXECUTE_READWRITE 
	);
	gameSlab.current = gameSlab.start;
	assert( gameSlab.start != NULL );

	Stack systemsMemory = AllocateNewStackFromStack( &gameSlab, systemsMemorySize );

	InitWin32WorkerThread( &systemsMemory );

	GLRendererGlobalState glRendererStorage = { };
	globalGLRendererStorageRef = &glRendererStorage;
	GLRenderDriver glDriver = Win32InitGLRenderer( hwnd, &system, globalGLRendererStorageRef );
	Win32Sound win32Sound = Win32InitSound( hwnd, 60, &systemsMemory );

	void* imguistate = InitImGui_LimeStone(	&systemsMemory,	(RenderDriver*)&glDriver, system.windowWidth, system.windowHeight );

	printf( "Remaining System Memory: %Id\n", SPACE_IN_STACK( (&gameSlab) ) );

	LoadGameCode( &gameapp );
	assert( gameapp.GameInit != NULL );

	fileTracking = { };
	fileTracking.writeHead = &fileTracking.stringBuffer[0];

	int dllTrackingIndex = TrackFileUpdates( GAME_CODE_FILEPATH );
	assert( dllTrackingIndex != -1 );

	void* gameMemoryPtr = gameapp.GameInit( 
		&gameSlab, 
		(RenderDriver*)&glDriver,
		&win32Sound.driver,
		&system
	);

	appIsRunning = true;

	InputState inputSnapshot1 = { };
	InputState inputSnapshot2 = { };
	inputSnapshot1.prevState = &inputSnapshot2;
	inputSnapshot2.prevState = &inputSnapshot1;
	InputState* currentSnapshotStorage = &inputSnapshot1;

	MSG Msg;
	while( appIsRunning ) {
		if( DidFileUpdate( dllTrackingIndex ) ) {
			LoadGameCode( &gameapp );
		}

		static LARGE_INTEGER startTime;
		LARGE_INTEGER elapsedTime;
		LARGE_INTEGER lastTime = startTime;
		QueryPerformanceCounter( &startTime );

		elapsedTime.QuadPart = startTime.QuadPart - lastTime.QuadPart;
		elapsedTime.QuadPart *= 1000;
		elapsedTime.QuadPart /= timerResolution.QuadPart;

		//GAME LOOP
		if(	gameapp.UpdateAndRender != NULL && gameapp.MixSound != NULL ) {

			currentSnapshotStorage = currentSnapshotStorage->prevState;
		    QueryInput( 
		    	system.windowWidth, 
		    	system.windowHeight,
		    	windowPosX,
		    	windowPosY,
		    	currentSnapshotStorage
		    );
		    keypressHistoryIndex = 0;

		    UpdateImgui( currentSnapshotStorage, imguistate, system.windowWidth, system.windowHeight );

			appIsRunning = gameapp.UpdateAndRender( 
				gameMemoryPtr, 
				(float)elapsedTime.QuadPart, 
				currentSnapshotStorage,
				&win32Sound.driver,
				(RenderDriver*)&glDriver,
				&system,
				imguistate
			);

		    PushAudioToSoundCard( &gameapp, &win32Sound );

		    ImGui::Render();

			BOOL swapBufferSuccess = SwapBuffers( deviceContext );
			glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
		} else {
			printf( "Game code was not loaded...\n" );
		}

		//Windows Message pump is here, so if there is a system level Close or
		//Destory command, the UpdateAndRender method does not reset the appIsRunning
		//flag to true
		while( PeekMessage( &Msg, NULL, 0, 0, PM_REMOVE ) ) {
			TranslateMessage( &Msg );
			DispatchMessage( &Msg );
		}

		//End loop timer query
		LARGE_INTEGER endTime, computeTime;
		{
			QueryPerformanceCounter( &endTime );
			computeTime.QuadPart = endTime.QuadPart - startTime.QuadPart;
			computeTime.QuadPart *= 1000;
			computeTime.QuadPart /= timerResolution.QuadPart;
		}

		if( computeTime.QuadPart <= mSecsPerFrame ) {
			Sleep(mSecsPerFrame - computeTime.QuadPart );
		} else {
			printf("Didn't sleep, compute was %ld\n", computeTime.QuadPart );
		}
	};

	FreeConsole();

	return Msg.wParam;
}
Example #20
0
/**
* \brief         This function will connect the tool with hardware. This will
*                establish the data link between the application and hardware.
* \param[in]     bConnect TRUE to Connect, FALSE to Disconnect
* \return        Returns S_OK if successful otherwise corresponding Error code.
* \authors       [email protected]
* \date          05.29.2015 Created
*/
static int nConnect(BOOL bConnect)
{
    int nReturn = -1;
    LinStatus xlStatus;
    LinHandle  hnd;

    if (!sg_bIsConnected && bConnect) // Disconnected and to be connected
    {
        /* Set the permission mask for all channel access */
        for( UINT i = 0; i < sg_nNoOfChannels; i++)
        {
            //open LIN channel
            hnd =  linOpenChannel(sg_aodChannels[i].m_nChannel,sg_aodChannels[i].m_unLINMode);

            if (hnd >= 0)
            {
                sg_aodChannels[i].m_hnd = hnd;
            }
            else
            {
                return S_FALSE;
            }
        }
        nReturn = nSetBaudRate();
        if(nReturn == S_OK)
        {
            for( UINT i = 0; i < sg_nNoOfChannels; i++)
            {
                xlStatus = linBusOn(sg_aodChannels[i].m_hnd);
                if (xlStatus < 0)
                {
                    return S_FALSE;
                }
                else
                {
                    sg_aodChannels[i].m_nIsOnBus = 1;
                }
            }
            // Update configuration to restore the settings
            /* Transit into 'CREATE TIME MAP' state */
            sg_byCurrState = CREATE_MAP_TIMESTAMP;
            sg_bIsConnected = bConnect;
        }
    }
    else if (sg_bIsConnected && !bConnect) // Connected & to be disconnected
    {
        sg_bIsConnected = bConnect;
        Sleep(0); // Let other threads run for once
        nReturn = nDisconnectFromDriver();
    }
    else
    {
        nReturn = S_OK;
    }
    if ( sg_bIsConnected )
    {
        InitializeCriticalSection(&sg_CritSectForWrite);
        //Calculate connected Timestamp
        QueryPerformanceCounter(&sg_QueryTickCount);
        // Get frequency of the performance counter
        QueryPerformanceFrequency(&sg_lnFrequency);
        // Convert it to time stamp with the granularity of hundreds of microsecond
        //if (sg_QueryTickCount.QuadPart * 10000 > sg_QueryTickCount.QuadPart)
        if ((sg_QueryTickCount.QuadPart * 10000) > sg_lnFrequency.QuadPart)
        {
            sg_TimeStamp = (sg_QueryTickCount.QuadPart * 10000) / sg_lnFrequency.QuadPart;
        }
        else
        {
            sg_TimeStamp = (sg_QueryTickCount.QuadPart / sg_lnFrequency.QuadPart) * 10000;
        }
    }
    else
    {
        DeleteCriticalSection(&sg_CritSectForWrite);
    }

    return nReturn;
}
Example #21
0
	void start()
	{
		QueryPerformanceCounter(&m_begin);
	}
Example #22
0
double CSystem::GetCPUSpeedGHz()
{
#ifdef _WIN64
	return 0;
#elif _WIN32
	//?先是存放计时次数,后存放固定时间间隔值
	unsigned long int       ticks;
	//?存放两固定时刻的CPU内置时钟值,值的含意为计数
	unsigned long int       stock0, stock1;
	//?存放内置时钟值之差,好固定时段的计数值 
	unsigned long int       cycles;
	//?存放频率,为了提高精度,采用了相邻的测的5个频率的平均值
	unsigned long int       freq[5] = { 0, 0, 0, 0, 0 };
	//?循环次数
	unsigned long int       nums = 0;
	//?存放频率之和
	unsigned long int       total = 0;
	LARGE_INTEGER       t0, t1;
	LARGE_INTEGER       countfreq;
	//?返回高精度的计数频率,即每秒多少次;
	if (!QueryPerformanceFrequency(&countfreq))
	{
		return 0.0f;
	}
	//?返回特定进程的优先级;
	DWORD priority_class = GetPriorityClass(GetCurrentProcess());
	//?返回特定线程的优先级;
	int   thread_priority = GetThreadPriority(GetCurrentThread());
	//?将当前进程设成实时进程;
	SetPriorityClass(GetCurrentProcess(), REALTIME_PRIORITY_CLASS);
	//?设定线程优先级;
	SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_TIME_CRITICAL);
	do
	{
		nums++;
		freq[4] = freq[3];
		freq[3] = freq[2];
		freq[2] = freq[1];
		freq[1] = freq[0];
		//?返回高精度计数的值;
		QueryPerformanceCounter(&t0);
		t1.LowPart = t0.LowPart;
		t1.HighPart = t0.HighPart;
		//?这句中的50和后面相同语句中的1000是一个经验值,起的作用是控制时间间隔,可以
		//?调节这两个值来实现最佳时间间隔。
		while ((unsigned long int)t1.LowPart - (unsigned long int)t0.LowPart < 10)
		{
			QueryPerformanceCounter(&t1);
		}
		_asm
		{
			//?启动读取CPU的内置时钟,其返回值是个64位的整数,高32到EDX,低32到EAX里
			rdtsc
				//?高位部份在短暂时间内是不会有变化的,故无需读出对比
				mov stock0, EAX
		}
		//? 重置初始时刻
		t0.LowPart = t1.LowPart;
		t0.HighPart = t1.HighPart;
		while ((unsigned long int)t1.LowPart - (unsigned long int)t0.LowPart < 1000)
		{
			QueryPerformanceCounter(&t1);
		}
		_asm
		{
			rdtsc
				mov  stock1, EAX
		}
		cycles = stock1 - stock0;
		ticks = (unsigned long int) t1.LowPart - (unsigned long int) t0.LowPart;
		ticks = ticks * 1000000;
		ticks = ticks / countfreq.LowPart;
		if (ticks % countfreq.LowPart > countfreq.LowPart / 2)
		{
			//? 使数据收敛
			ticks++;
		}
		//? 求出频率,单位:MHz
		freq[0] = cycles / ticks;
		if (cycles%ticks > ticks / 2)
		{
			//? 使数据收敛
			freq[0]++;
		}
		total = (freq[0] + freq[1] + freq[2] + freq[3] + freq[4]);
	} while ((nums < 5) || (nums < 100) && ((abs(5 * (long)freq[0] - (long)total) < 5)
		|| (abs(5 * (long)freq[1] - (long)total) < 5) || (abs(5 * (long)freq[2] - (long)total) < 5)
		|| (abs(5 * (long)freq[3] - (long)total) < 5) || (abs(5 * (long)freq[4] - (long)total) < 5)
		));
	//?条件循环,以确保循环不少于5次,在大于5次后确保达到一定的精度后退出
	if (total / 5 != (total + 1) / 5)
	{
		//? 使数据收敛
		total++;
	}
	//? 恢复进程及线程的优先级别;
	SetPriorityClass(GetCurrentProcess(), priority_class);
	SetThreadPriority(GetCurrentThread(), thread_priority);
	return double(total) / 5.0 / 1000.0;
#endif
}
Example #23
0
void Timer::start()
{
	m_running = true;
	QueryPerformanceCounter(&m_start);
}
//
// GetCurrentGameTime
// Purpose: Retrieves the current game time in high resolution seconds
//
float GetCurrentGameTime( void )
{
  LARGE_INTEGER currentTime;
  QueryPerformanceCounter( &currentTime );
  return (float)(currentTime.QuadPart) / (float)(FREQUENCY.QuadPart);
}
// エントリポイント
int WINAPI _tWinMain( HINSTANCE hInst, HINSTANCE, LPTSTR, int )
{
	LARGE_INTEGER			nNowTime, nLastTime;		// 現在とひとつ前の時刻
	LARGE_INTEGER			nTimeFreq;					// 時間単位

    // 画面サイズ
    g_nClientWidth  = VIEW_WIDTH;						// 幅
    g_nClientHeight = VIEW_HEIGHT;						// 高さ

	// Register the window class
    WNDCLASSEX wc = { sizeof( WNDCLASSEX ), CS_CLASSDC, MsgProc, 0L, 0L,
                      GetModuleHandle( NULL ), NULL, NULL, NULL, NULL,
                      _T( "D3D Sample" ), NULL };
    RegisterClassEx( &wc );

	RECT rcRect;
	SetRect( &rcRect, 0, 0, g_nClientWidth, g_nClientHeight );
	AdjustWindowRect( &rcRect, WS_OVERLAPPEDWINDOW, FALSE );
    g_hWnd = CreateWindow( _T( "D3D Sample" ), _T( "Wipe_3_1" ),
						   WS_OVERLAPPEDWINDOW, 100, 20, rcRect.right - rcRect.left, rcRect.bottom - rcRect.top,
						   GetDesktopWindow(), NULL, wc.hInstance, NULL );

    // Initialize Direct3D
    if( SUCCEEDED( InitD3D() ) && SUCCEEDED( MakeShaders() ) )
    {
        // Create the shaders
        if( SUCCEEDED( InitDrawModes() ) )
        {
			if ( SUCCEEDED( InitGeometry() ) ) {					// ジオメトリ作成

				// Show the window
				ShowWindow( g_hWnd, SW_SHOWDEFAULT );
				UpdateWindow( g_hWnd );

				InitChangingPictures();									// キャラクタ初期化
				
				QueryPerformanceFrequency( &nTimeFreq );			// 時間単位
				QueryPerformanceCounter( &nLastTime );				// 1フレーム前時刻初期化

				// Enter the message loop
				MSG msg;
				ZeroMemory( &msg, sizeof( msg ) );
				while( msg.message != WM_QUIT )
				{
					Render();
//					DrawChangingPictures();
					do {
						if( PeekMessage( &msg, NULL, 0U, 0U, PM_REMOVE ) )
						{
							TranslateMessage( &msg );
							DispatchMessage( &msg );
						}
						QueryPerformanceCounter( &nNowTime );
					} while( ( ( nNowTime.QuadPart - nLastTime.QuadPart ) < ( nTimeFreq.QuadPart / 90 ) ) &&
							 ( msg.message != WM_QUIT ) );
					while( ( ( nNowTime.QuadPart - nLastTime.QuadPart ) < ( nTimeFreq.QuadPart / 60 ) ) &&
						   ( msg.message != WM_QUIT ) )
					{
						QueryPerformanceCounter( &nNowTime );
					}
					nLastTime = nNowTime;
					g_pSwapChain->Present( 0, 0 );					// 表示
				}
			}
        }
    }

    // Clean up everything and exit the app
    Cleanup();
    UnregisterClass( _T( "D3D Sample" ), wc.hInstance );
    return 0;
}
Example #26
0
void GStopWatch::stopTimer( ) {
    QueryPerformanceCounter(&timer.stop);
}
Example #27
0
void tic(timer* t)
{
	QueryPerformanceFrequency(&t->freq);
	QueryPerformanceCounter(&t->tic);
}
Example #28
0
VOID  SA2_AnimRender(VOID)
{
  INT i;
  LARGE_INTEGER t;
  POINT pt;

  SA2_FrameCounter++;

  /* Update timer */
  QueryPerformanceCounter(&t);
  SA2_Anim.GlobalTime = (DBL)(t.QuadPart - SA2_StartTime) / SA2_TimePerSec;
  SA2_Anim.GlobalDeltaTime = (DBL)(t.QuadPart - SA2_OldTime) / SA2_TimePerSec;
  if(SA2_Anim.IsPause)
  {
    SA2_Anim.DeltaTime = 0;
    SA2_PauseTime += t.QuadPart - SA2_OldTime;
  }
  else
  {
     SA2_Anim.DeltaTime = SA2_Anim.GlobalDeltaTime;
     SA2_Anim.Time = (DBL)(t.QuadPart - SA2_PauseTime - SA2_StartTime) / SA2_TimePerSec;
  }
  if(t.QuadPart - SA2_OldTimeFPS > SA2_TimePerSec )
  {
    CHAR str[100];
    SA2_Anim.FPS = SA2_FrameCounter * SA2_TimePerSec / (DBL)(t.QuadPart - SA2_OldTimeFPS);
    SA2_OldTimeFPS = t.QuadPart;
    sprintf(str, "FPS: %.5F", SA2_Anim.FPS);
    SetWindowText(SA2_Anim.hWnd, str);
    SA2_FrameCounter = 0;
  }
  SA2_OldTime = t.QuadPart;
                                                      
  /* Update keyboard */
  GetKeyboardState(SA2_Anim.Keys);
  for (i = 0; i < 256; i++)
  {
    SA2_Anim.Keys[i] >>= 7;
    if(!SA2_Anim.OldKeys[i] && SA2_Anim.Keys[i])
      SA2_Anim.KeysClick[i] = TRUE;
    else
      SA2_Anim.KeysClick[i] = FALSE;
  }
  memcpy(SA2_Anim.OldKeys, SA2_Anim.Keys, 256);

  /* Update mouse */
  GetCursorPos(&pt);
  ScreenToClient(SA2_Anim.hWnd, &pt);
  SA2_Anim.Mdx = pt.x - SA2_Anim.Mx;
  SA2_Anim.Mdy = pt.y - SA2_Anim.My;
  SA2_Anim.Mx = pt.x;
  SA2_Anim.My = pt.y;

  /* Update joystick */
  if (joyGetNumDevs() > 0)
  {
    JOYCAPS jc;

    if (joyGetDevCaps(JOYSTICKID1, &jc, sizeof(jc)) == JOYERR_NOERROR)
    {
      
      JOYINFOEX ji;
      ji.dwSize = sizeof(JOYINFOEX);                                         
      ji.dwFlags = JOY_RETURNALL;
      if (joyGetPosEx(JOYSTICKID1, &ji) == JOYERR_NOERROR)
      {
         /*buttons*/
        for (i = 0; i < 32; i++)
          SA2_Anim.JBut[i] = (ji.dwButtons >> i) & 1;
          

        /*axes*/                                                  
         SA2_Anim.JX = SA2_GET_JOYSTICK_AXIS(X);
         SA2_Anim.JY = SA2_GET_JOYSTICK_AXIS(Y);    
         SA2_Anim.JZ = SA2_GET_JOYSTICK_AXIS(Z);
         SA2_Anim.JR = SA2_GET_JOYSTICK_AXIS(R);
         SA2_Anim.JPov = ji.dwPOV == 0xFFFF ? 0 : ji.dwPOV / 4500 + 1;        
      }
    }
Example #29
0
int
nanosleep (const struct timespec *requested_delay,
           struct timespec *remaining_delay)
{
  static bool initialized;
  /* Number of performance counter increments per nanosecond,
     or zero if it could not be determined.  */
  static double ticks_per_nanosecond;

  if (requested_delay->tv_nsec < 0 || BILLION <= requested_delay->tv_nsec)
    {
      errno = EINVAL;
      return -1;
    }

  /* For requested delays of one second or more, 15ms resolution is
     sufficient.  */
  if (requested_delay->tv_sec == 0)
    {
      if (!initialized)
        {
          /* Initialize ticks_per_nanosecond.  */
          LARGE_INTEGER ticks_per_second;

          if (QueryPerformanceFrequency (&ticks_per_second))
            ticks_per_nanosecond =
              (double) ticks_per_second.QuadPart / 1000000000.0;

          initialized = true;
        }
      if (ticks_per_nanosecond)
        {
          /* QueryPerformanceFrequency worked.  We can use
             QueryPerformanceCounter.  Use a combination of Sleep and
             busy-looping.  */
          /* Number of milliseconds to pass to the Sleep function.
             Since Sleep can take up to 8 ms less or 8 ms more than requested
             (or maybe more if the system is loaded), we subtract 10 ms.  */
          int sleep_millis = (int) requested_delay->tv_nsec / 1000000 - 10;
          /* Determine how many ticks to delay.  */
          LONGLONG wait_ticks = requested_delay->tv_nsec * ticks_per_nanosecond;
          /* Start.  */
          LARGE_INTEGER counter_before;
          if (QueryPerformanceCounter (&counter_before))
            {
              /* Wait until the performance counter has reached this value.
                 We don't need to worry about overflow, because the performance
                 counter is reset at reboot, and with a frequency of 3.6E6
                 ticks per second 63 bits suffice for over 80000 years.  */
              LONGLONG wait_until = counter_before.QuadPart + wait_ticks;
              /* Use Sleep for the longest part.  */
              if (sleep_millis > 0)
                Sleep (sleep_millis);
              /* Busy-loop for the rest.  */
              for (;;)
                {
                  LARGE_INTEGER counter_after;
                  if (!QueryPerformanceCounter (&counter_after))
                    /* QueryPerformanceCounter failed, but succeeded earlier.
                       Should not happen.  */
                    break;
                  if (counter_after.QuadPart >= wait_until)
                    /* The requested time has elapsed.  */
                    break;
                }
              goto done;
            }
        }
    }
  /* Implementation for long delays and as fallback.  */
  Sleep (requested_delay->tv_sec * 1000 + requested_delay->tv_nsec / 1000000);

 done:
  /* Sleep is not interruptible.  So there is no remaining delay.  */
  if (remaining_delay != NULL)
    {
      remaining_delay->tv_sec = 0;
      remaining_delay->tv_nsec = 0;
    }
  return 0;
}
Example #30
-1
int main()
{
    mfxStatus sts = MFX_ERR_NONE;

    // =====================================================================
    // Intel Media SDK decode pipeline setup
    // - In this example we are decoding an AVC (H.264) stream
    // - For simplistic memory management, system memory surfaces are used to store the decoded frames
    //   (Note that when using HW acceleration D3D surfaces are prefered, for better performance)
    //
    //  - VPP used to post process (resize) the frame
    //

    // Open input H.264 elementary stream (ES) file
    FILE* fSource;
    fopen_s(&fSource, "bbb1920x1080.264", "rb");
    MSDK_CHECK_POINTER(fSource, MFX_ERR_NULL_PTR);

    // Create output YUV file
    FILE* fSink;
    fopen_s(&fSink, "dectest_960x540.yuv", "wb");
    MSDK_CHECK_POINTER(fSink, MFX_ERR_NULL_PTR);

    // Initialize Media SDK session
    // - MFX_IMPL_AUTO_ANY selects HW accelaration if available (on any adapter)
    // - Version 1.0 is selected for greatest backwards compatibility.
    //   If more recent API features are needed, change the version accordingly
    mfxIMPL impl = MFX_IMPL_AUTO_ANY;
    mfxVersion ver = {0, 1};
    MFXVideoSession mfxSession;
    sts = mfxSession.Init(impl, &ver);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    // Create Media SDK decoder
    MFXVideoDECODE mfxDEC(mfxSession);
    // Create Media SDK VPP component
    MFXVideoVPP mfxVPP(mfxSession);

    // Set required video parameters for decode
    // - In this example we are decoding an AVC (H.264) stream
    // - For simplistic memory management, system memory surfaces are used to store the decoded frames
    //   (Note that when using HW acceleration D3D surfaces are prefered, for better performance)
    mfxVideoParam mfxVideoParams;
    memset(&mfxVideoParams, 0, sizeof(mfxVideoParams));
    mfxVideoParams.mfx.CodecId = MFX_CODEC_AVC;
    mfxVideoParams.IOPattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;

    // Prepare Media SDK bit stream buffer
    // - Arbitrary buffer size for this example
    mfxBitstream mfxBS;
    memset(&mfxBS, 0, sizeof(mfxBS));
    mfxBS.MaxLength = 1024 * 1024;
    mfxBS.Data = new mfxU8[mfxBS.MaxLength];
    MSDK_CHECK_POINTER(mfxBS.Data, MFX_ERR_MEMORY_ALLOC);

    // Read a chunk of data from stream file into bit stream buffer
    // - Parse bit stream, searching for header and fill video parameters structure
    // - Abort if bit stream header is not found in the first bit stream buffer chunk
    sts = ReadBitStreamData(&mfxBS, fSource);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    sts = mfxDEC.DecodeHeader(&mfxBS, &mfxVideoParams);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);


    // Initialize VPP parameters
    // - For simplistic memory management, system memory surfaces are used to store the raw frames
    //   (Note that when using HW acceleration D3D surfaces are prefered, for better performance)
    mfxVideoParam VPPParams;
    memset(&VPPParams, 0, sizeof(VPPParams));
    // Input data
    VPPParams.vpp.In.FourCC         = MFX_FOURCC_NV12;
    VPPParams.vpp.In.ChromaFormat   = MFX_CHROMAFORMAT_YUV420;
    VPPParams.vpp.In.CropX          = 0;
    VPPParams.vpp.In.CropY          = 0;
    VPPParams.vpp.In.CropW          = mfxVideoParams.mfx.FrameInfo.CropW;
    VPPParams.vpp.In.CropH          = mfxVideoParams.mfx.FrameInfo.CropH;
    VPPParams.vpp.In.PicStruct      = MFX_PICSTRUCT_PROGRESSIVE;
    VPPParams.vpp.In.FrameRateExtN  = 30;
    VPPParams.vpp.In.FrameRateExtD  = 1;
    // width must be a multiple of 16
    // height must be a multiple of 16 in case of frame picture and a multiple of 32 in case of field picture
    VPPParams.vpp.In.Width  = MSDK_ALIGN16(VPPParams.vpp.In.CropW);
    VPPParams.vpp.In.Height = (MFX_PICSTRUCT_PROGRESSIVE == VPPParams.vpp.In.PicStruct)?
                              MSDK_ALIGN16(VPPParams.vpp.In.CropH) : MSDK_ALIGN32(VPPParams.vpp.In.CropH);
    // Output data
    VPPParams.vpp.Out.FourCC        = MFX_FOURCC_NV12;
    VPPParams.vpp.Out.ChromaFormat  = MFX_CHROMAFORMAT_YUV420;
    VPPParams.vpp.Out.CropX         = 0;
    VPPParams.vpp.Out.CropY         = 0;
    VPPParams.vpp.Out.CropW         = VPPParams.vpp.In.CropW/2;  // Resize to half size resolution
    VPPParams.vpp.Out.CropH         = VPPParams.vpp.In.CropH/2;
    VPPParams.vpp.Out.PicStruct     = MFX_PICSTRUCT_PROGRESSIVE;
    VPPParams.vpp.Out.FrameRateExtN = 30;
    VPPParams.vpp.Out.FrameRateExtD = 1;
    // width must be a multiple of 16
    // height must be a multiple of 16 in case of frame picture and a multiple of 32 in case of field picture
    VPPParams.vpp.Out.Width  = MSDK_ALIGN16(VPPParams.vpp.Out.CropW);
    VPPParams.vpp.Out.Height = (MFX_PICSTRUCT_PROGRESSIVE == VPPParams.vpp.Out.PicStruct)?
                               MSDK_ALIGN16(VPPParams.vpp.Out.CropH) : MSDK_ALIGN32(VPPParams.vpp.Out.CropH);

    VPPParams.IOPattern = MFX_IOPATTERN_IN_SYSTEM_MEMORY | MFX_IOPATTERN_OUT_SYSTEM_MEMORY;

    // Query number of required surfaces for decoder
    mfxFrameAllocRequest DecRequest;
    memset(&DecRequest, 0, sizeof(DecRequest));
    sts = mfxDEC.QueryIOSurf(&mfxVideoParams, &DecRequest);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    // Query number of required surfaces for VPP
    mfxFrameAllocRequest VPPRequest[2];// [0] - in, [1] - out
    memset(&VPPRequest, 0, sizeof(mfxFrameAllocRequest)*2);
    sts = mfxVPP.QueryIOSurf(&VPPParams, VPPRequest);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);


    // Determine the required number of surfaces for decoder output (VPP input) and for VPP output
    mfxU16 nSurfNumDecVPP = DecRequest.NumFrameSuggested + VPPRequest[0].NumFrameSuggested;
    mfxU16 nSurfNumVPPOut = VPPRequest[1].NumFrameSuggested;


    // Allocate surfaces for decoder and VPP In
    // - Width and height of buffer must be aligned, a multiple of 32
    // - Frame surface array keeps pointers all surface planes and general frame info
    mfxU16 width = (mfxU16)MSDK_ALIGN32(DecRequest.Info.Width);
    mfxU16 height = (mfxU16)MSDK_ALIGN32(DecRequest.Info.Height);
    mfxU8  bitsPerPixel = 12;  // NV12 format is a 12 bits per pixel format
    mfxU32 surfaceSize = width * height * bitsPerPixel / 8;
    mfxU8* surfaceBuffers = (mfxU8 *)new mfxU8[surfaceSize * nSurfNumDecVPP];

    mfxFrameSurface1** pmfxSurfaces = new mfxFrameSurface1*[nSurfNumDecVPP];
    MSDK_CHECK_POINTER(pmfxSurfaces, MFX_ERR_MEMORY_ALLOC);
    for (int i = 0; i < nSurfNumDecVPP; i++)
    {
        pmfxSurfaces[i] = new mfxFrameSurface1;
        memset(pmfxSurfaces[i], 0, sizeof(mfxFrameSurface1));
        memcpy(&(pmfxSurfaces[i]->Info), &(mfxVideoParams.mfx.FrameInfo), sizeof(mfxFrameInfo));
        pmfxSurfaces[i]->Data.Y = &surfaceBuffers[surfaceSize * i];
        pmfxSurfaces[i]->Data.U = pmfxSurfaces[i]->Data.Y + width * height;
        pmfxSurfaces[i]->Data.V = pmfxSurfaces[i]->Data.U + 1;
        pmfxSurfaces[i]->Data.Pitch = width;
    }

    // Allocate surfaces for VPP Out
    // - Width and height of buffer must be aligned, a multiple of 32
    // - Frame surface array keeps pointers all surface planes and general frame info
    width = (mfxU16)MSDK_ALIGN32(VPPRequest[1].Info.Width);
    height = (mfxU16)MSDK_ALIGN32(VPPRequest[1].Info.Height);
    bitsPerPixel = 12;  // NV12 format is a 12 bits per pixel format
    surfaceSize = width * height * bitsPerPixel / 8;
    mfxU8* surfaceBuffers2 = (mfxU8 *)new mfxU8[surfaceSize * nSurfNumVPPOut];

    mfxFrameSurface1** pmfxSurfaces2 = new mfxFrameSurface1*[nSurfNumVPPOut];
    MSDK_CHECK_POINTER(pmfxSurfaces2, MFX_ERR_MEMORY_ALLOC);
    for (int i = 0; i < nSurfNumVPPOut; i++)
    {
        pmfxSurfaces2[i] = new mfxFrameSurface1;
        memset(pmfxSurfaces2[i], 0, sizeof(mfxFrameSurface1));
        memcpy(&(pmfxSurfaces2[i]->Info), &(VPPParams.vpp.Out), sizeof(mfxFrameInfo));
        pmfxSurfaces2[i]->Data.Y = &surfaceBuffers[surfaceSize * i];
        pmfxSurfaces2[i]->Data.U = pmfxSurfaces2[i]->Data.Y + width * height;
        pmfxSurfaces2[i]->Data.V = pmfxSurfaces2[i]->Data.U + 1;
        pmfxSurfaces2[i]->Data.Pitch = width;
    }

    // Initialize the Media SDK decoder
    sts = mfxDEC.Init(&mfxVideoParams);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    // Initialize Media SDK VPP
    sts = mfxVPP.Init(&VPPParams);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);


    // ===============================================================
    // Start decoding the frames from the stream
    //

#ifdef ENABLE_BENCHMARK
    LARGE_INTEGER tStart, tEnd;
    QueryPerformanceFrequency(&tStart);
    double freq = (double)tStart.QuadPart;
    QueryPerformanceCounter(&tStart);
#endif

    mfxSyncPoint syncpD;
    mfxSyncPoint syncpV;
    mfxFrameSurface1* pmfxOutSurface = NULL;
    int nIndex = 0;
    int nIndex2 = 0;
    mfxU32 nFrame = 0;

    //
    // Stage 1: Main decoding loop
    //
    while (MFX_ERR_NONE <= sts || MFX_ERR_MORE_DATA == sts || MFX_ERR_MORE_SURFACE == sts)
    {
        if (MFX_WRN_DEVICE_BUSY == sts)
            Sleep(1); // Wait if device is busy, then repeat the same call to DecodeFrameAsync

        if (MFX_ERR_MORE_DATA == sts)
        {
            sts = ReadBitStreamData(&mfxBS, fSource); // Read more data into input bit stream
            MSDK_BREAK_ON_ERROR(sts);
        }

        if (MFX_ERR_MORE_SURFACE == sts || MFX_ERR_NONE == sts)
        {
            nIndex = GetFreeSurfaceIndex(pmfxSurfaces, nSurfNumDecVPP); // Find free frame surface
            if (MFX_ERR_NOT_FOUND == nIndex)
                return MFX_ERR_MEMORY_ALLOC;
        }

        // Decode a frame asychronously (returns immediately)
        sts = mfxDEC.DecodeFrameAsync(&mfxBS, pmfxSurfaces[nIndex], &pmfxOutSurface, &syncpD);

        // Ignore warnings if output is available,
        // if no output and no action required just repeat the DecodeFrameAsync call
        if (MFX_ERR_NONE < sts && syncpD)
            sts = MFX_ERR_NONE;


        if (MFX_ERR_NONE == sts)
        {
            nIndex2 = GetFreeSurfaceIndex(pmfxSurfaces2, nSurfNumVPPOut); // Find free frame surface
            if (MFX_ERR_NOT_FOUND == nIndex)
                return MFX_ERR_MEMORY_ALLOC;

            for (;;)
            {
                // Process a frame asychronously (returns immediately)
                sts = mfxVPP.RunFrameVPPAsync(pmfxOutSurface, pmfxSurfaces2[nIndex2], NULL, &syncpV);

                if (MFX_ERR_NONE < sts && !syncpV) // repeat the call if warning and no output
                {
                    if (MFX_WRN_DEVICE_BUSY == sts)
                        Sleep(1); // wait if device is busy
                }
                else if (MFX_ERR_NONE < sts && syncpV)
                {
                    sts = MFX_ERR_NONE; // ignore warnings if output is available
                    break;
                }
                else
                    break; // not a warning
            }

            // VPP needs more data, let decoder decode another frame as input
            if (MFX_ERR_MORE_DATA == sts)
            {
                continue;
            }
            else if (MFX_ERR_MORE_SURFACE == sts)
            {
                // Not relevant for the illustrated workload! Therefore not handled.
                // Relevant for cases when VPP produces more frames at output than consumes at input. E.g. framerate conversion 30 fps -> 60 fps
                break;
            }
            else
                MSDK_BREAK_ON_ERROR(sts);
        }


        if (MFX_ERR_NONE == sts)
            sts = mfxSession.SyncOperation(syncpV, 60000); // Synchronize. Wait until decoded frame is ready

        if (MFX_ERR_NONE == sts)
        {
            ++nFrame;
#ifdef ENABLE_OUTPUT
            sts = WriteRawFrame(pmfxSurfaces2[nIndex2], fSink);
            MSDK_BREAK_ON_ERROR(sts);

            printf("Frame number: %d\r", nFrame);
#endif
        }
    }

    // MFX_ERR_MORE_DATA means that file has ended, need to go to buffering loop, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    //
    // Stage 2: Retrieve the buffered decoded frames
    //
    while (MFX_ERR_NONE <= sts || MFX_ERR_MORE_SURFACE == sts)
    {
        if (MFX_WRN_DEVICE_BUSY == sts)
            Sleep(1); // Wait if device is busy, then repeat the same call to DecodeFrameAsync

        nIndex = GetFreeSurfaceIndex(pmfxSurfaces, nSurfNumDecVPP); // Find free frame surface
        if (MFX_ERR_NOT_FOUND == nIndex)
            return MFX_ERR_MEMORY_ALLOC;

        // Decode a frame asychronously (returns immediately)
        sts = mfxDEC.DecodeFrameAsync(NULL, pmfxSurfaces[nIndex], &pmfxOutSurface, &syncpD);

        // Ignore warnings if output is available,
        // if no output and no action required just repeat the DecodeFrameAsync call
        if (MFX_ERR_NONE < sts && syncpD)
            sts = MFX_ERR_NONE;


        if (MFX_ERR_NONE == sts)
        {
            nIndex2 = GetFreeSurfaceIndex(pmfxSurfaces2, nSurfNumVPPOut); // Find free frame surface
            if (MFX_ERR_NOT_FOUND == nIndex)
                return MFX_ERR_MEMORY_ALLOC;

            for (;;)
            {
                // Process a frame asychronously (returns immediately)
                sts = mfxVPP.RunFrameVPPAsync(pmfxOutSurface, pmfxSurfaces2[nIndex2], NULL, &syncpV);

                if (MFX_ERR_NONE < sts && !syncpV) // repeat the call if warning and no output
                {
                    if (MFX_WRN_DEVICE_BUSY == sts)
                        Sleep(1); // wait if device is busy
                }
                else if (MFX_ERR_NONE < sts && syncpV)
                {
                    sts = MFX_ERR_NONE; // ignore warnings if output is available
                    break;
                }
                else
                    break; // not a warning
            }

            // VPP needs more data, let decoder decode another frame as input
            if (MFX_ERR_MORE_DATA == sts)
            {
                continue;
            }
            else if (MFX_ERR_MORE_SURFACE == sts)
            {
                // Not relevant for the illustrated workload! Therefore not handled.
                // Relevant for cases when VPP produces more frames at output than consumes at input. E.g. framerate conversion 30 fps -> 60 fps
                break;
            }
            else
                MSDK_BREAK_ON_ERROR(sts);
        }


        if (MFX_ERR_NONE == sts)
            sts = mfxSession.SyncOperation(syncpV, 60000); // Synchronize. Waits until decoded frame is ready

        if (MFX_ERR_NONE == sts)
        {
            ++nFrame;
#ifdef ENABLE_OUTPUT
            sts = WriteRawFrame(pmfxSurfaces2[nIndex2], fSink);
            MSDK_BREAK_ON_ERROR(sts);

            printf("Frame number: %d\r", nFrame);
#endif
        }
    }

    // MFX_ERR_MORE_DATA means that decoder is done with buffered frames, need to go to VPP buffering loop, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    //
    // Stage 3: Retrieve the buffered VPP frames
    //
    while (MFX_ERR_NONE <= sts)
    {
        nIndex2 = GetFreeSurfaceIndex(pmfxSurfaces2, nSurfNumVPPOut); // Find free frame surface
        if (MFX_ERR_NOT_FOUND == nIndex2)
            return MFX_ERR_MEMORY_ALLOC;

        // Process a frame asychronously (returns immediately)
        sts = mfxVPP.RunFrameVPPAsync(NULL, pmfxSurfaces2[nIndex2], NULL, &syncpV);
        MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_SURFACE);
        MSDK_BREAK_ON_ERROR(sts);

        sts = mfxSession.SyncOperation(syncpV, 60000); // Synchronize. Wait until frame processing is ready
        MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

        ++nFrame;
#ifdef ENABLE_OUTPUT
        sts = WriteRawFrame(pmfxSurfaces2[nIndex2], fSink);
        MSDK_BREAK_ON_ERROR(sts);

        printf("Frame number: %d\r", nFrame);
#endif
    }

    // MFX_ERR_MORE_DATA indicates that all buffers has been fetched, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

#ifdef ENABLE_BENCHMARK
    QueryPerformanceCounter(&tEnd);
    double duration = ((double)tEnd.QuadPart - (double)tStart.QuadPart)  / freq;
    printf("\nExecution time: %3.2fs (%3.2ffps)\n", duration, nFrame/duration);
#endif

    // ===================================================================
    // Clean up resources
    //  - It is recommended to close Media SDK components first, before releasing allocated surfaces, since
    //    some surfaces may still be locked by internal Media SDK resources.

    mfxDEC.Close();
    mfxVPP.Close();
    // mfxSession closed automatically on destruction

    for (int i = 0; i < nSurfNumDecVPP; i++)
        delete pmfxSurfaces[i];
    for (int i = 0; i < nSurfNumVPPOut; i++)
        delete pmfxSurfaces2[i];
    MSDK_SAFE_DELETE_ARRAY(pmfxSurfaces);
    MSDK_SAFE_DELETE_ARRAY(pmfxSurfaces2);
    MSDK_SAFE_DELETE_ARRAY(surfaceBuffers);
    MSDK_SAFE_DELETE_ARRAY(surfaceBuffers2);
    MSDK_SAFE_DELETE_ARRAY(mfxBS.Data);

    fclose(fSource);
    fclose(fSink);

    return 0;
}