void gs_vertex_buffer::MakeBufferList(gs_vertex_shader *shader,
		vector<ID3D11Buffer*> &buffers, vector<uint32_t> &strides)
{
	PushBuffer(buffers, strides, vertexBuffer, sizeof(vec3), "point");

	if (shader->hasNormals)
		PushBuffer(buffers, strides, normalBuffer, sizeof(vec3),
				"normal");
	if (shader->hasColors)
		PushBuffer(buffers, strides, colorBuffer, sizeof(uint32_t),
				"color");
	if (shader->hasTangents)
		PushBuffer(buffers, strides, tangentBuffer, sizeof(vec3),
				"tangent");
	if (shader->nTexUnits <= uvBuffers.size()) {
		for (size_t i = 0; i < shader->nTexUnits; i++) {
			buffers.push_back(uvBuffers[i]);
			strides.push_back((uint32_t)uvSizes[i]);
		}
	} else {
		blog(LOG_ERROR, "This vertex shader requires at least %u "
		                "texture buffers.",
		                (uint32_t)shader->nTexUnits);
	}
}
Example #2
0
void RA_Wiznet5100::ProcessEthernet()
{
	bIncoming=true;
	timeout=millis();
	while (bIncoming)
	{
		if (millis()-timeout>100)
		{
			bIncoming=false;
			NetClient.stop();
		}
		if (NetClient.available()>0)
		{
			PushBuffer(NetClient.read());
			timeout=millis();
			wdt_reset();
			if (reqtype>0 && reqtype<128)
			{
				bIncoming=false;
				while(NetClient.available())
				{
					wdt_reset();
					NetClient.read();
				}
			}
		}
	}
	wdt_reset();
	ProcessHTTP();

	NetClient.stop();
	m_pushbackindex=0;
}
Example #3
0
//-------------------------------------------------
int CheckBack(WPARAM wParam)
{
	if (wParam == VK_PAUSE && PuttingBacks) {
		PuttingBacks = 0;
		//ReleaseShift(0);
		ReleaseControlKeys(0);
		PushBuffer(GetFocus());
		return 1;
	}
	return 0;
}
Example #4
0
/*  CXAudio2::ProcessSound
The mixing function called by the sound core when new samples are available.
SoundBuffer is divided into blockCount blocks. If there are enought available samples and a free block,
the block is filled and queued to the source voice. bufferCount is increased by pushbuffer and decreased by
the OnBufferComplete callback.
*/
void CXAudio2::ProcessSound()
{
	S9xFinalizeSamples();

	if(!initDone)
		return;

	BYTE * curBuffer;

	UINT32 availableSamples;
	UINT32 availableBytes;

	availableSamples = S9xGetSampleCount();
	availableBytes = availableSamples * (Settings.SixteenBitSound ? 2 : 1);

	while(availableSamples > singleBufferSamples && bufferCount < blockCount) {
		curBuffer = soundBuffer + writeOffset;
		S9xMixSamples(curBuffer,singleBufferSamples);
		PushBuffer(singleBufferBytes,curBuffer,NULL);
		writeOffset+=singleBufferBytes;
		writeOffset%=sum_bufferSize;
	}
}
Example #5
0
//-------------------------------------------------
void PushBacks()
{
	if (VnKbd.backs == 0) {
		BackTracks = 0;
		PushBuffer(GetFocus());
	}
	else {
		PuttingBacks = 1;
/*
		if (KeyState[VK_SHIFT] & 0x80)
			ReleaseShift(1);
*/
		if (KeyState[VK_CONTROL] & 0x80 || KeyState[VK_SHIFT] & 0x80 ||	KeyState[VK_MENU] & 0x80)
			ReleaseControlKeys(1);

		for (int i=0; i<VnKbd.backs; i++) {
			keybd_event(VK_BACK, VK_BACK_SCAN,0,0);
			keybd_event(VK_BACK, VK_BACK_SCAN, KEYEVENTF_KEYUP, 0);
		}
//		keybd_event(VK_PAUSE, VK_PAUSE_SCAN, 0, 0);
		keybd_event(VK_PAUSE, VK_PAUSE_SCAN, KEYEVENTF_KEYUP, 0);
	}
}
Example #6
0
	//----------------------------------------------------------------------------------------------------
	bool EEMusic::AsyncLoadMusic(const char* _fileName)
	{
		AVFormatContext *formatContext = NULL;
		int streamIndex = -1;
		AVCodecContext *codecContext = NULL;
		AVCodec *codec = NULL;

		// open file
		if (avformat_open_input(&formatContext, _fileName, NULL, NULL) < 0)
		{
			return false;
		}

		// find stream info
		if (avformat_find_stream_info(formatContext, NULL) < 0)
		{
			//unable to find stream info
			avformat_close_input(&formatContext);
			return false;
		}
		// find the stream
		if ((streamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0)) < 0)
		{
			avformat_close_input(&formatContext);
			return false;
		}

		// find decoder
		codecContext = formatContext->streams[streamIndex]->codec;
		codec = avcodec_find_decoder(codecContext->codec_id);
		if (!codec)
		{
			avformat_close_input(&formatContext);
			return false;
		}
		// open codec
		if (avcodec_open2(codecContext, codec, NULL) < 0)
		{
			avformat_close_input(&formatContext);
			return false;
		}

		int channels = codecContext->channels;
		int bitsPerSample = av_get_bits_per_sample(av_get_pcm_codec(codecContext->sample_fmt, -1));
		int bytesPerSample = bitsPerSample / 8;
		int samplesPerSec = codecContext->sample_rate;
		int blockAlign = bytesPerSample * channels;
		int avgBytesPerSec = samplesPerSec * blockAlign;
		// m_totalBytes = (int)((double)formatContext->duration / AV_TIME_BASE * avgBytesPerSec);
		// m_totalSamples = (int)((double)formatContext->duration / AV_TIME_BASE * samplesPerSec);
		// m_totalTime = formatContext->duration / (double)AV_TIME_BASE;

		if (bitsPerSample == 32)
			m_format.wFormatTag = WAVE_FORMAT_IEEE_FLOAT;
		else
			m_format.wFormatTag = WAVE_FORMAT_PCM;
		m_format.nChannels = channels;
		m_format.nSamplesPerSec = samplesPerSec;
		m_format.nAvgBytesPerSec = avgBytesPerSec;
		m_format.nBlockAlign = blockAlign;
		m_format.wBitsPerSample = bitsPerSample;
		m_format.cbSize = 0;
		if (FAILED(s_XAudio2->CreateSourceVoice(&m_sourceVoice, &m_format, 0, XAUDIO2_DEFAULT_FREQ_RATIO, &m_musicCallBack)))
			return false;

		// todo: keep the the handler of the thr
		m_loader = new boost::thread([&, bytesPerSample, formatContext, streamIndex, codecContext, codec] () mutable ->bool {
			AVPacket *packet = new AVPacket;
			av_init_packet(packet);
			AVFrame	*frame = av_frame_alloc();
			uint32_t len = 0;
			int got;
			while (av_read_frame(formatContext, packet) >= 0)
			{
				if (packet->stream_index == streamIndex)
				{
					if (avcodec_decode_audio4(codecContext, frame, &got, packet) < 0)
					{
						printf("Error in decoding audio frame.\n");
						av_free_packet(packet);
						continue;
					}
					if (got > 0)
					{
						//int size = *frame->linesize;
						int size = frame->nb_samples * bytesPerSample;
						if (m_data.empty())
							m_data.push_back(std::pair<int, std::string>(0, std::string()));
						else
							m_data.push_back(std::pair<int, std::string>(m_data.back().first + m_data.back().second.size(), std::string()));
						std::string& data = m_data.back().second;
						if (av_sample_fmt_is_planar(codecContext->sample_fmt))
						{
							data.resize(size * 2);
							int index = 0;
							for (int i = 0; i < size; i += bytesPerSample)
							{
								for (int j = 0; j < bytesPerSample; ++j)
								{
									data[index++] = (char)frame->data[0][i + j];
								}
								for (int j = 0; j < bytesPerSample; ++j)
								{
									data[index++] = (char)frame->data[1][i + j];
								}
							}
							len += size * 2;
						}
						else
						{
							data.resize(size);
							memcpy((&data[0]), frame->data[0], size);
							len += size;
						}
						try
						{
							PushBuffer(EEMusicCell(&m_data.back()));
							if (EE_MUSIC_NO_BUFFER == m_state)
							{
								SubmitBuffer();
								SubmitBuffer();
								SubmitBuffer();
							}
							EEThreadSleep(1);
						}
						catch (boost::thread_interrupted&)
						{
							avcodec_close(codecContext);
							avformat_close_input(&formatContext);
							return false;
						}
					}
				}
				av_free_packet(packet);
			}
			m_totalBytes = len;
			m_totalSamples = len / m_format.nBlockAlign;
			m_totalTime = (double)m_totalSamples / m_format.nSamplesPerSec;

			av_frame_free(&frame);
			avcodec_close(codecContext);
			avformat_close_input(&formatContext);

			return true;
		});

		return true;
	}
Example #7
0
BOOL
ParseAndPrintString( HANDLE hDev,
		     LPCVOID lpBuffer,
		     DWORD nNumberOfBytesToWrite,
		     LPDWORD lpNumberOfBytesWritten
		     )
{
  DWORD   i;
  LPCTSTR s;

  if (hDev != hConOut)	// reinit if device has changed
  {
    hConOut = hDev;
    state = 1;
    shifted = FALSE;
  }
  for (i = nNumberOfBytesToWrite, s = (LPCTSTR)lpBuffer; i > 0; i--, s++)
  {
    if (state == 1)
    {
      if (*s == ESC) state = 2;
      else if (*s == SO) shifted = TRUE;
      else if (*s == SI) shifted = FALSE;
      else PushBuffer( *s );
    }
    else if (state == 2)
    {
      if (*s == ESC) ;	// \e\e...\e == \e
      else if ((*s == '[') || (*s == ']'))
      {
	FlushBuffer();
	prefix = *s;
	prefix2 = 0;
	state = 3;
	Pt_len = 0;
	*Pt_arg = '\0';
      }
      else if (*s == ')' || *s == '(') state = 6;
      else state = 1;
    }
    else if (state == 3)
    {
      if (isdigit( *s ))
      {
        es_argc = 0;
	es_argv[0] = *s - '0';
        state = 4;
      }
      else if (*s == ';')
      {
        es_argc = 1;
        es_argv[0] = 0;
	es_argv[1] = 0;
        state = 4;
      }
      else if (*s == '?' || *s == '>')
      {
	prefix2 = *s;
      }
      else
      {
        es_argc = 0;
        suffix = *s;
        InterpretEscSeq();
        state = 1;
      }
    }
    else if (state == 4)
    {
      if (isdigit( *s ))
      {
	es_argv[es_argc] = 10 * es_argv[es_argc] + (*s - '0');
      }
      else if (*s == ';')
      {
        if (es_argc < MAX_ARG-1) es_argc++;
	es_argv[es_argc] = 0;
	if (prefix == ']')
	  state = 5;
      }
      else
      {
	es_argc++;
        suffix = *s;
        InterpretEscSeq();
        state = 1;
      }
    }
    else if (state == 5)
    {
      if (*s == BEL)
      {
	Pt_arg[Pt_len] = '\0';
        InterpretEscSeq();
        state = 1;
      }
      else if (*s == '\\' && Pt_len > 0 && Pt_arg[Pt_len-1] == ESC)
      {
	Pt_arg[--Pt_len] = '\0';
        InterpretEscSeq();
        state = 1;
      }
      else if (Pt_len < lenof(Pt_arg)-1)
	Pt_arg[Pt_len++] = *s;
    }
    else if (state == 6)
    {
      // Ignore it (ESC ) 0 is implicit; nothing else is supported).
      state = 1;
    }
  }
  FlushBuffer();
  if (lpNumberOfBytesWritten != NULL)
    *lpNumberOfBytesWritten = nNumberOfBytesToWrite - i;
  return( i == 0 );
}