FramePtr decodeUntilVideoFrame(){
		StreamFrameMap streamFrames;
		streamFrames[videoStream] = Frame::CreateEmpty();

		for(int i = 0; i < 100; i++){
			try {
				bool ret = decodeFrame(streamFrames);

				if(!ret){
					FlogW("failed to decode frame");
					return false;
				}

				// throw away any resulting frames
				if(streamFrames[videoStream]->finished != 0)
					return streamFrames[videoStream];
			}

			catch(VideoException e)
			{
				FlogW("While decoding video frame");
				FlogW(e.what());
			}

			Retry("not a video frame in decodeUntilVideoFrame()");
		}

		FlogD("couldn't find a video frame in 100 steps");
		return 0;
	}
	void tick(bool includeOldAudio = false){
		bool success = false;

		StreamFrameMap streamFrames;

		streamFrames[videoStream] = Frame::CreateEmpty();
		streamFrames[audioStream] = Frame::CreateEmpty();

		while(!IsEof() && !success)
		{
			try
			{
				int audioQueueTargetSize = audioDevice->GetBlockSize() * 4;

				while(
					frameQueue.size() < (unsigned int)targetFrameQueueSize || 
					(hasAudioStream() && audioHandler->getAudioQueueSize() < audioQueueTargetSize))
				{
					if(frameQueue.size() >= (unsigned int)maxFrameQueueSize)
						break;
					
					bool frameDecoded = decodeFrame(streamFrames);

					if(!frameDecoded)
						throw VideoException(VideoException::EDecodingVideo);

					if(streamFrames[videoStream]->finished != 0){
						frameQueue.push(streamFrames[videoStream]->Clone());
						streamFrames[videoStream] = Frame::CreateEmpty();
					}
					
					if(streamFrames[audioStream]->finished != 0){
						// only enqueue audio that's newer than the current video time, 
						// eg. on seeking we might encounter audio that's older than the frames in the frame queue.
						if(streamFrames[audioStream]->GetSamples().size() > 0 && 
							(includeOldAudio || streamFrames[audioStream]->GetSamples()[0].ts >= timeHandler->GetTime()))
						{
							audioHandler->EnqueueAudio(streamFrames[audioStream]->GetSamples());
						}else{
							FlogD("skipping old audio samples: " << streamFrames[audioStream]->GetSamples().size());
						}
						streamFrames[audioStream] = Frame::CreateEmpty();
					}
				}

				// sync framequeue target size with number of frames needed for audio queue 
				if(targetFrameQueueSize < (int)frameQueue.size()){
					targetFrameQueueSize = std::max((int)frameQueue.size(), minFrameQueueSize);
				}
					
				success = true;
			}

			catch(VideoException e)
			{
				Retry(Str("Exception in tick: " << e.what()));
			}
		}
	}
void OptiTrackNatNetClient::handle_data_receive(const boost::system::error_code& ec,
        std::size_t bytes_transferred)
{
    if (ec)
    {
        serr << ec.category().name() << " ERROR while receiving data from " << recv_data_endpoint << sendl;
    }
    else
    {
        sout << "Received " << bytes_transferred << "b data from " << recv_data_endpoint << sendl;
        sPacket& PacketIn = *recv_data_packet;
        switch (PacketIn.iMessage)
        {
        case NAT_MODELDEF:
        {
            sout << "Received MODELDEF" << sendl;
            if (serverInfoReceived)
            {
                decodeModelDef(PacketIn);
            }
            else if (!this->serverName.isSet())
            {
                server_endpoint = recv_data_endpoint;
                server_endpoint.port(PORT_COMMAND);
                serr << "Requesting server info to " << server_endpoint << sendl;
                boost::array<unsigned short, 2> helloMsg;
                helloMsg[0] = NAT_PING; helloMsg[1] = 0;
                command_socket->send_to(boost::asio::buffer(helloMsg), server_endpoint);
            }
            break;
        }
        case NAT_FRAMEOFDATA:
        {
            sout << "Received FRAMEOFDATA" << sendl;
            if (serverInfoReceived)
            {
                decodeFrame(PacketIn);
            }
            else if (!this->serverName.isSet())
            {
                server_endpoint = recv_data_endpoint;
                server_endpoint.port(PORT_COMMAND);
                serr << "Requesting server info to " << server_endpoint << sendl;
                boost::array<unsigned short, 2> helloMsg;
                helloMsg[0] = NAT_PING; helloMsg[1] = 0;
                command_socket->send_to(boost::asio::buffer(helloMsg), server_endpoint);
            }
            break;
        }
        default:
        {
            serr << "Received unrecognized data packet type: " << PacketIn.iMessage << sendl;
            break;
        }
        }
    }
    start_data_receive();
}
Exemple #4
0
int main (int ac, char *ag[])
{
	uchar buf[255];
	buf[0] = 0xFA;
	buf[1] = 0xBC;
	buf[2] = 0x3;
	buf[3] = 0x4;
	int rc=0, i=0;
	eibaddr_t myAddr = 22;
	eibaddr_t dest = 11;
	EIBConnection *con;

	if (ac != 2)
	{
		printf("usage: %s <EIB-URL>\n\n", ag[0]);
        	return -1;
	}
	con = EIBSocketURL (ag[1]);
	if (!con)
	{
		printf("EIBSocketURL() failed\n\n");
        	return -1;
	}
	printf("URL opened\n\n");

	//if (EIBOpenBusmonitor(con) == -1)
	if (EIBOpenVBusmonitor(con) == -1)
	{
		printf("EIBOpenBusmonitor() failed\n\n");
	       	return -1;
	}

	printf("entering loop\n");
	while(1)
	{
		printf("waiting...");
		rc = EIBGetBusmonitorPacket(con, sizeof(buf), buf);
		if(rc == -1)
		{
			printf("EIBGetBusmonitorPacket() failed\n\n");
        		return -1;
		}
		else
		{
			for(i=0; i < rc; i++)
				printf("%02X ", buf[i]);


			decodeFrame(buf);
		}
	}

	EIBClose (con);
	printf("con closed\n\n");
	return 0;
}
Exemple #5
0
void PhVideoEngine::drawVideo(int x, int y, int w, int h)
{
	if(_videoStream) {
		PhTime delay = static_cast<PhTime>(_settings->screenDelay() * _clock.rate() * 24000.);
		decodeFrame(_clock.time() + delay);
	}
	_videoRect.setRect(x, y, w, h);
	_videoRect.setZ(-10);
	_videoRect.draw();
}
Exemple #6
0
	void setFrame(int frame)
	{
		if(frame >= numSamples)
			frame = numSamples - 1;

		lastSample = AVIStreamFindSample(stream,(LONG)frame,FIND_KEY | FIND_PREV);
		while((int)lastSample != frame)
		{
			decodeFrame();
			lastSample++;
		}
	}
Exemple #7
0
void PhVideoDecoder::requestFrame(PhVideoBuffer *buffer)
{
	bool topLevel = _requestedFrames.empty();

	PHDBG(24) << buffer->requestFrame() << " " << topLevel;

	_requestedFrames.append(buffer);

	if (topLevel) {
		while (!_requestedFrames.empty()) {
			QCoreApplication::processEvents();
			decodeFrame();
		}
	}
}
void static audioThreadEntryPoint(void* udata, uint8_t* stream, int len)
{
	sdlargst* args = (sdlargst*)udata;
	DSPManager* dspman = static_cast<DSPManager*>(args->dspman);
	AVCodecContext* codecCtx = (AVCodecContext*)args->avcodeccontext;
	packetQueue* queue = args->queue;

	static uint8_t buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
	static unsigned int bufLength = 0;
	static unsigned int bufCurrentIndex = 0;
	uint8_t* streamIndex = stream;

	int samplesLeft = len;
	while(samplesLeft > 0)
	{
		if(bufCurrentIndex >= bufLength)
		{
			// No more data in the buffer, get some more.
			int decodeSize = decodeFrame(codecCtx, buf, sizeof(buf), queue);
			if(decodeSize < 0)
			{
				// something went wrong... silence.
				bufCurrentIndex = AVCODEC_MAX_AUDIO_FRAME_SIZE;
				memset(buf, 0, AVCODEC_MAX_AUDIO_FRAME_SIZE);
			}
			else
			{
			  bufLength = decodeSize;
			}
			//Reset the index for the new data.
			bufCurrentIndex = 0;
		}
		int numberOfSamples = bufLength - bufCurrentIndex;
		if(numberOfSamples > samplesLeft)
			numberOfSamples = samplesLeft;
		memcpy(streamIndex, (uint8_t*)buf + bufCurrentIndex, numberOfSamples);
		samplesLeft -= numberOfSamples;
		streamIndex += numberOfSamples;
		bufCurrentIndex += numberOfSamples;
	}

	dspman->processAudioPCM(NULL, stream, len);
	
	if(dspman->cbuf == NULL)
		dspman->cbuf = new circularBuffer::circularBuffer(CIRCBUFSIZE, sizeof(uint8_t) * len);
	memcpy(dspman->cbuf->add(), stream, sizeof(uint8_t) * len);
	memcpy(stream, dspman->cbuf->pop(), sizeof(uint8_t) * len);
}
CCMovementBoneData *CCDataReaderHelper::decodeMovementBone(cs::CSJsonDictionary &json)
{
    CCMovementBoneData *movementBoneData = CCMovementBoneData::create();

    movementBoneData->delay = json.getItemFloatValue(A_MOVEMENT_DELAY, 0);

    const char *name = json.getItemStringValue(A_NAME);
    if(name != NULL)
    {
        movementBoneData->name = name;
    }

    int length = json.getArrayItemCount(FRAME_DATA);
    for (int i = 0; i < length; i++)
    {
        cs::CSJsonDictionary *dic = json.getSubItemFromArray(FRAME_DATA, i);
        CCFrameData *frameData = decodeFrame(*dic);

		movementBoneData->addFrameData(frameData);

		if (s_CocoStudioVersion < VERSION_COMBINED)
		{
			frameData->frameID = movementBoneData->duration;
			movementBoneData->duration += frameData->duration;
		}

        delete dic;
    }

	if (s_CocoStudioVersion < VERSION_COMBINED)
	{
		if (movementBoneData->frameList.count() > 0)
		{
			CCFrameData *frameData = CCFrameData::create();
			frameData->copy((CCFrameData*)movementBoneData->frameList.lastObject());
			movementBoneData->addFrameData(frameData);

			frameData->frameID = movementBoneData->duration;
		}
	}

    return movementBoneData;
}
Exemple #10
0
const Graphics::Surface *SeqDecoder::decodeNextFrame() {
	int16 frameWidth = _fileStream->readUint16LE();
	int16 frameHeight = _fileStream->readUint16LE();
	int16 frameLeft = _fileStream->readUint16LE();
	int16 frameTop = _fileStream->readUint16LE();
	byte colorKey = _fileStream->readByte();
	byte frameType = _fileStream->readByte();
	_fileStream->skip(2);
	uint16 frameSize = _fileStream->readUint16LE();
	_fileStream->skip(2);
	uint16 rleSize = _fileStream->readUint16LE();
	_fileStream->skip(6);
	uint32 offset = _fileStream->readUint32LE();

	_fileStream->seek(offset);

	if (frameType == kSeqFrameFull) {
		byte *dst = (byte *)_surface->pixels + frameTop * SEQ_SCREEN_WIDTH + frameLeft;

		byte *linebuf = new byte[frameWidth];

		do {
			_fileStream->read(linebuf, frameWidth);
			memcpy(dst, linebuf, frameWidth);
			dst += SEQ_SCREEN_WIDTH;
		} while (--frameHeight);

		delete[] linebuf;
	} else {
		byte *buf = new byte[frameSize];
		_fileStream->read(buf, frameSize);
		decodeFrame(buf, rleSize, buf + rleSize, frameSize - rleSize, (byte *)_surface->pixels + SEQ_SCREEN_WIDTH * frameTop, frameLeft, frameWidth, frameHeight, colorKey);
		delete[] buf;
	}

	if (_curFrame == -1)
		_startTime = g_system->getMillis();

	_curFrame++;
	return _surface;
}
Exemple #11
0
void image::decode()
{
	unsigned char nextMarker;

	nextMarker = getNextMarker();
	if(nextMarker == SOI)
	{
		printInfo((nextMarker == SOI),"\ninfo : found SOI marker\n");
		//setupDecoder(); //TODO:
		// keep processing markers until you see any SOF marker
		nextMarker = getNextMarker();
		while(((nextMarker!=SOF0)&&(nextMarker!=SOF1)&&(nextMarker!=SOF2)&&(nextMarker!=SOF3)&&(nextMarker!=SOF5)&&(nextMarker!=SOF6)&&(nextMarker!=SOF7)&&(nextMarker!=SOF9)&&(nextMarker!=SOFA)&&(nextMarker!=SOFB)&&(nextMarker!=SOFD)&&(nextMarker!=SOFE)&&(nextMarker!=SOFF)))		
		{
			processTablenMisc(nextMarker);
			nextMarker = getNextMarker();
		}
		decodeFrame(nextMarker);
	}
	else
		printError((nextMarker != SOI),"\nerror: could not find SOI marker\n");
}	// end decodeImage() fn.
Exemple #12
0
void PhVideoPool::requestFrame(PhFrame frame)
{
	PhVideoBuffer * buffer;

	if (!_recycledPool.empty()) {
		buffer = _recycledPool.takeFirst();
	}
	else {
		PHDBG(24) << "creating a new buffer";
		buffer = new PhVideoBuffer();
	}

	buffer->setFrame(0);
	buffer->setRequestFrame(frame);

	PHDBG(24) << frame;

	// ask the frame to the decoder.
	// Notice that the time origin for the decoder is 0 at the start of the file, it's not timeIn.
	emit decodeFrame(buffer);

	_requestedPool.append(buffer);
}
Exemple #13
0
void FFmpegDecoderAudio::fillBuffer(void * const buffer, size_t size)
{
    uint8_t * dst_buffer = reinterpret_cast<uint8_t*>(buffer);

    while (size != 0)
    {
        if (m_audio_buf_index == m_audio_buf_size)
        {
            m_audio_buf_index = 0;

            // Pre-fetch audio buffer is empty, refill it.
            const size_t bytes_decoded = decodeFrame(&m_audio_buffer[0], m_audio_buffer.size());

            // If nothing could be decoded (e.g. error or no packet available), output a bit of silence
            if (bytes_decoded == 0)
            {
                m_audio_buf_size = std::min(Buffer::size_type(1024), m_audio_buffer.size());
                memset(&m_audio_buffer[0], 0, m_audio_buf_size);
            }
            else
            {
                m_audio_buf_size = bytes_decoded;
            }
        }

        const size_t fill_size = std::min(m_audio_buf_size - m_audio_buf_index, size);

        memcpy(dst_buffer, &m_audio_buffer[m_audio_buf_index], fill_size);

        size -= fill_size;
        dst_buffer += fill_size;

        m_audio_buf_index += fill_size;

        adjustBufferEndTps(fill_size);
    }
}
CCMovementBoneData *CCDataReaderHelper::decodeMovementBone(tinyxml2::XMLElement *movBoneXml, tinyxml2::XMLElement *parentXml, CCBoneData *boneData)
{
    CCMovementBoneData *movBoneData = CCMovementBoneData::create();
    float scale, delay;

    if( movBoneXml )
    {
		if( movBoneXml->QueryFloatAttribute(A_MOVEMENT_SCALE, &scale) == tinyxml2::XML_SUCCESS )
		{
			movBoneData->scale = scale;
		}
        if( movBoneXml->QueryFloatAttribute(A_MOVEMENT_DELAY, &delay) == tinyxml2::XML_SUCCESS )
        {
            if(delay > 0)
            {
                delay -= 1;
            }
            movBoneData->delay = delay;
        }
    }

    int length = 0;
    int i = 0;
    int parentTotalDuration = 0;
    int currentDuration = 0;

    tinyxml2::XMLElement *parentFrameXML = NULL;

    std::vector<tinyxml2::XMLElement *> parentXmlList;

    /*
    *  get the parent frame xml list, we need get the origin data
    */
    if( parentXml != NULL )
    {
        parentFrameXML = parentXml->FirstChildElement(FRAME);
        while (parentFrameXML)
        {
            parentXmlList.push_back(parentFrameXML);
            parentFrameXML = parentFrameXML->NextSiblingElement(FRAME);
        }

        parentFrameXML = NULL;

        length = parentXmlList.size();
    }


    int totalDuration = 0;

    std::string name = movBoneXml->Attribute(A_NAME);

    movBoneData->name = name;

    tinyxml2::XMLElement *frameXML = movBoneXml->FirstChildElement(FRAME);

    while( frameXML )
    {
        if(parentXml)
        {
            /*
            *  in this loop we get the corresponding parent frame xml
            */
            while(i < length && (parentFrameXML ? (totalDuration < parentTotalDuration || totalDuration >= parentTotalDuration + currentDuration) : true))
            {
                parentFrameXML = parentXmlList[i];
                parentTotalDuration += currentDuration;
                parentFrameXML->QueryIntAttribute(A_DURATION, &currentDuration);
                i++;

            }
        }

        CCFrameData *frameData = decodeFrame( frameXML, parentFrameXML, boneData);
        movBoneData->addFrameData(frameData);

		frameData->frameID = totalDuration;
        totalDuration += frameData->duration;
		movBoneData->duration = totalDuration;

        frameXML = frameXML->NextSiblingElement(FRAME);
    }

	
	//
	CCFrameData *frameData = CCFrameData::create();
	frameData->copy((CCFrameData*)movBoneData->frameList.lastObject());
	frameData->frameID = movBoneData->duration;
	movBoneData->addFrameData(frameData);


    return movBoneData;
}
MovementBoneData *DataReaderHelper::decodeMovementBone(TiXmlElement* _movBoneXml, TiXmlElement* _parentXml, BoneData *_boneData)
{
    MovementBoneData* _movBoneData = MovementBoneData::create();
    float _scale, _delay;
    
    if( _movBoneXml )
    {
        if( _movBoneXml->QueryFloatAttribute(A_MOVEMENT_SCALE, &_scale) == TIXML_SUCCESS )
        {
            _movBoneData->setScale(_scale);
        }
        if( _movBoneXml->QueryFloatAttribute(A_MOVEMENT_DELAY, &_delay) == TIXML_SUCCESS )
        {
            if(_delay > 0)
            {
                _delay -= 1;
            }
            _movBoneData->setDelay(_delay);
        }
    }
    
    int _length = 0;
    int _i = 0;
    int _parentTotalDuration = 0;
    int _currentDuration = 0;
    
    TiXmlElement *_parentFrameXML = NULL;
    
    std::vector<TiXmlElement*> _parentXMLList;
    
    /*
     *  get the parent frame xml list, we need get the origin data
     */
    if( _parentXml != NULL )
    {
        _parentFrameXML = _parentXml->FirstChildElement(FRAME);
        while (_parentFrameXML)
        {
            _parentXMLList.push_back(_parentFrameXML);
            _parentFrameXML = _parentFrameXML->NextSiblingElement(FRAME);
        }
        
        _parentFrameXML = NULL;
        
        _length = _parentXMLList.size();
    }
    
    
    int _totalDuration =0;
    
    std::string name = _movBoneXml->Attribute(A_NAME);
    
    _movBoneData->setName(name);
    
    TiXmlElement *_frameXML= _movBoneXml->FirstChildElement(FRAME);
    
    while( _frameXML )
    {
        if(_parentXml)
        {
            /*
             *  in this loop we get the corresponding parent frame xml
             */
            while(_i < _length && (_parentFrameXML?(_totalDuration < _parentTotalDuration || _totalDuration >= _parentTotalDuration + _currentDuration):true))
            {
                _parentFrameXML = _parentXMLList[_i];
                _parentTotalDuration += _currentDuration;
                _parentFrameXML->QueryIntAttribute(A_DURATION, &_currentDuration);
                _i++;
                
            }
        }
        
        FrameData * _frameData = decodeFrame( _frameXML, _parentFrameXML, _boneData);
        
        _movBoneData->addFrameData(_frameData);
        
        _totalDuration += _frameData->getDuration();
        
        _frameXML = _frameXML->NextSiblingElement(FRAME);
    }
    
    
    return _movBoneData;
}
Exemple #16
0
bool PhVideoEngine::open(QString fileName)
{
	close();
	PHDEBUG << fileName;

	_clock.setTime(0);
	_clock.setRate(0);
	_currentTime = PHTIMEMIN;

	if(avformat_open_input(&_formatContext, fileName.toStdString().c_str(), NULL, NULL) < 0)
		return false;

	PHDEBUG << "Retrieve stream information";
	if (avformat_find_stream_info(_formatContext, NULL) < 0)
		return false; // Couldn't find stream information

	av_dump_format(_formatContext, 0, fileName.toStdString().c_str(), 0);

	// Find video stream :
	for(int i = 0; i < (int)_formatContext->nb_streams; i++) {
		AVMediaType streamType = _formatContext->streams[i]->codec->codec_type;
		PHDEBUG << i << ":" << streamType;
		switch(streamType) {
		case AVMEDIA_TYPE_VIDEO:
			_videoStream = _formatContext->streams[i];
			PHDEBUG << "\t=> video";
			break;
		case AVMEDIA_TYPE_AUDIO:
			if(_useAudio && (_audioStream == NULL))
				_audioStream = _formatContext->streams[i];
			PHDEBUG << "\t=> audio";
			break;
		default:
			PHDEBUG << "\t=> unknown";
			break;
		}
	}

	if(_videoStream == NULL)
		return false;

	// Looking for timecode type
	_tcType = PhTimeCode::computeTimeCodeType(this->framePerSecond());
	emit timeCodeTypeChanged(_tcType);

	// Reading timestamp :
	AVDictionaryEntry *tag = av_dict_get(_formatContext->metadata, "timecode", NULL, AV_DICT_IGNORE_SUFFIX);
	if(tag == NULL)
		tag = av_dict_get(_videoStream->metadata, "timecode", NULL, AV_DICT_IGNORE_SUFFIX);

	if(tag) {
		PHDEBUG << "Found timestamp:" << tag->value;
		_timeIn = PhTimeCode::timeFromString(tag->value, _tcType);
	}


	PHDEBUG << "size : " << _videoStream->codec->width << "x" << _videoStream->codec->height;
	AVCodec * videoCodec = avcodec_find_decoder(_videoStream->codec->codec_id);
	if(videoCodec == NULL) {
		PHDEBUG << "Unable to find the codec:" << _videoStream->codec->codec_id;
		return false;
	}


	if (avcodec_open2(_videoStream->codec, videoCodec, NULL) < 0) {
		PHDEBUG << "Unable to open the codec:" << _videoStream->codec;
		return false;
	}

	_videoFrame = av_frame_alloc();

	PHDEBUG << "length:" << this->length();
	PHDEBUG << "fps:" << this->framePerSecond();

	if(_audioStream) {
		AVCodec* audioCodec = avcodec_find_decoder(_audioStream->codec->codec_id);
		if(audioCodec) {
			if(avcodec_open2(_audioStream->codec, audioCodec, NULL) < 0) {
				PHDEBUG << "Unable to open audio codec.";
				_audioStream = NULL;
			}
			else {
				_audioFrame = av_frame_alloc();
				PHDEBUG << "Audio OK.";
			}
		}
		else {
			PHDEBUG << "Unable to find codec for audio.";
			_audioStream = NULL;
		}
	}

	decodeFrame(0);
	_fileName = fileName;

	return true;
}
boost::uint8_t*
AudioDecoderFfmpeg::decode(const EncodedAudioFrame& ef,
        boost::uint32_t& outputSize)
{
    return decodeFrame(ef.data.get(), ef.dataSize, outputSize);
}
Exemple #18
0
void static audioThreadEntryPoint(void* udata, uint8_t* stream, int len)
{
	sdlargst* args = (sdlargst*)udata;
	DSPManager* dspman = static_cast<DSPManager*>(args->dspman);
	AVCodecContext* codecCtx = (AVCodecContext*)args->avcodeccontext;
	packetQueue* queue = args->queue;
	SwrContext *swr;

	static uint8_t *buf = NULL;
	static unsigned int bufLength = 0;
	static unsigned int bufCurrentIndex = 0;
	uint8_t* streamIndex = stream;

	// Setup the resample context to ensure out samples are in the
	// format that SDL expectes them to be.
	swr = swr_alloc();
	av_opt_set_int(swr, "in_channel_layout",  codecCtx->channel_layout, 0);
	av_opt_set_int(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO,  0);
	av_opt_set_int(swr, "in_sample_rate", codecCtx->sample_rate, 0);
	av_opt_set_int(swr, "out_sample_rate", codecCtx->sample_rate, 0);
	av_opt_set_sample_fmt(swr, "in_sample_fmt", codecCtx->sample_fmt, 0);
	av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16,  0);
	swr_init(swr);

	int samplesLeft = len;
	while(samplesLeft > 0)
	{
		if(bufCurrentIndex >= bufLength)
		{
retry_decode:
			// No more data in the buffer, get some
			// more. Ensure we free the old buffer that we
			// allocated in the previous decodeFrame call.
			if (buf)
				free(buf);

			int decodeSize = decodeFrame(codecCtx, &buf, sizeof(buf), queue, swr);
			if(decodeSize < 0)
			{
				// something went wrong... try again.
				goto retry_decode;
			}
			else
			{
			  bufLength = decodeSize;
			}
			//Reset the index for the new data.
			bufCurrentIndex = 0;
		}
		int numberOfSamples = bufLength - bufCurrentIndex;
		if(numberOfSamples > samplesLeft)
			numberOfSamples = samplesLeft;
		memcpy(streamIndex, (uint8_t*)buf + bufCurrentIndex, numberOfSamples);
		samplesLeft -= numberOfSamples;
		streamIndex += numberOfSamples;
		bufCurrentIndex += numberOfSamples;
	}

	dspman->processAudioPCM(NULL, stream, len);

	
	if(dspman->cbuf == NULL)
		dspman->cbuf = new circularBuffer::circularBuffer(CIRCBUFSIZE, sizeof(uint8_t) * len);
	memcpy(dspman->cbuf->add(), stream, sizeof(uint8_t) * len);
	memcpy(stream, dspman->cbuf->pop(), sizeof(uint8_t) * len);

	swr_free(&swr);
}
void OptiTrackNatNetClient::handle_command_receive(const boost::system::error_code& ec,
        std::size_t bytes_transferred)
{
    if (ec)
    {
        serr << ec.category().name() << " ERROR while receiving command from " << recv_command_endpoint << sendl;
    }
    else
    {
        sout << "Received " << bytes_transferred << "b command from " << recv_command_endpoint << sendl;
        sPacket& PacketIn = *recv_command_packet;
        switch (PacketIn.iMessage)
        {
        case NAT_MODELDEF:
        {
            sout << "Received MODELDEF" << sendl;
            if (serverInfoReceived)
            {
                decodeModelDef(PacketIn);
            }
            else if (!this->serverName.isSet())
            {
                server_endpoint = recv_command_endpoint;
                server_endpoint.port(PORT_COMMAND);
                serr << "Requesting server info to " << server_endpoint << sendl;
                boost::array<unsigned short, 2> helloMsg;
                helloMsg[0] = NAT_PING; helloMsg[1] = 0;
                command_socket->send_to(boost::asio::buffer(helloMsg), server_endpoint);
            }
            break;
        }
        case NAT_FRAMEOFDATA:
        {
            sout << "Received FRAMEOFDATA" << sendl;
            if (serverInfoReceived)
            {
                decodeFrame(PacketIn);
            }
            else if (!this->serverName.isSet())
            {
                server_endpoint = recv_command_endpoint;
                server_endpoint.port(PORT_COMMAND);
                serr << "Requesting server info to " << server_endpoint << sendl;
                boost::array<unsigned short, 2> helloMsg;
                helloMsg[0] = NAT_PING; helloMsg[1] = 0;
                command_socket->send_to(boost::asio::buffer(helloMsg), server_endpoint);
            }
            break;
        }
        case NAT_PINGRESPONSE:
        {
            serverInfoReceived = true;
            serverString = PacketIn.Data.Sender.szName;
            for(int i=0; i<4; i++)
            {
                natNetVersion[i] = PacketIn.Data.Sender.NatNetVersion[i];
                serverVersion[i] = PacketIn.Data.Sender.Version[i];
            }
            serr << "Connected to server \"" << serverString << "\" v" << (int)serverVersion[0];
            if (serverVersion[1] || serverVersion[2] || serverVersion[3])
                serr << "." << (int)serverVersion[1];
            if (serverVersion[2] || serverVersion[3])
                serr << "." << (int)serverVersion[2];
            if (serverVersion[3])
                serr << "." << (int)serverVersion[3];
            serr << " protocol v" << (int)natNetVersion[0];
            if (natNetVersion[1] || natNetVersion[2] || natNetVersion[3])
                serr << "." << (int)natNetVersion[1];
            if (natNetVersion[2] || natNetVersion[3])
                serr << "." << (int)natNetVersion[2];
            if (natNetVersion[3])
                serr << "." << (int)natNetVersion[3];
            serr << sendl;
            // request scene info
            boost::array<unsigned short, 2> reqMsg;
            reqMsg[0] = NAT_REQUEST_MODELDEF; reqMsg[1] = 0;
            command_socket->send_to(boost::asio::buffer(reqMsg), server_endpoint);
            break;
        }
        case NAT_RESPONSE:
        {
            sout << "Received response : " << PacketIn.Data.szData << sendl;
            break;
        }
        case NAT_UNRECOGNIZED_REQUEST:
        {
            serr << "Received 'unrecognized request'" << sendl;
            break;
        }
        case NAT_MESSAGESTRING:
        {
            sout << "Received message: " << PacketIn.Data.szData << sendl;
            break;
        }
        default:
        {
            serr << "Received unrecognized command packet type: " << PacketIn.iMessage << sendl;
            break;
        }
        }
    }
    start_command_receive();
}
VC2DecoderSequenceResult VC2Decoder::sequenceDecodeOnePicture(char **_idata, int ilength, uint16_t **odata, int *ostride, bool skip_aux) {
	VC2DecoderParseSegment pi;
	char *idata = *_idata;
	char *iend = *_idata + ilength;

	try {
		while (idata < *(_idata)+ilength) {
			pi = parse_info(idata);
			if ((uint64_t)idata >(uint64_t)iend) {
				writelog(LOG_ERROR, "%s:%d:  Data Unit is off end of input data\n", __FILE__, __LINE__);
				throw VC2DECODER_CODEROVERRUN;
			}
			switch (pi.parse_code) {
			case VC2DECODER_PARSE_CODE_SEQUENCE_HEADER:
				if (parseSeqHeader(pi.data)) {
					*_idata = pi.next_header;
					return VC2DECODER_RECONFIGURED;
				}
				break;

			case VC2DECODER_PARSE_CODE_END_OF_SEQUENCE:
				*_idata = idata + 13;
				return VC2DECODER_EOS;

			case VC2DECODER_PARSE_CODE_AUXILIARY_DATA:
				if (!skip_aux) {
					*_idata = idata;
					return VC2DECODER_AUXILIARY;
				}
			case VC2DECODER_PARSE_CODE_PADDING_DATA:
				break;

			case VC2DECODER_PARSE_CODE_CORE_PICTURE_AC:
			case VC2DECODER_PARSE_CODE_CORE_PICTURE_VLC:
			case VC2DECODER_PARSE_CODE_LD_PICTURE:
				*_idata = pi.next_header;
				return VC2DECODER_INVALID_PICTURE;

			case VC2DECODER_PARSE_CODE_HQ_PICTURE:
			{
				uint64_t length = decodeFrame(pi.data, iend - pi.data, odata, ostride);
				if (pi.next_header != NULL) {
					*_idata = pi.next_header;
				}
				else {
					*_idata = FindNextParseInfo(idata + length, iend - (idata + length));
				}
			}
			return VC2DECODER_PICTURE;

			default:
				writelog(LOG_WARN, "%s:%d:  Unknown parse code 0x%02x\n", __FILE__, __LINE__, pi.parse_code);
				break;
			}

			if (pi.next_header == NULL) {
				*_idata = idata + 13;
				return VC2DECODER_EOS;
			}

			idata = pi.next_header;
		}
	}
	catch (VC2DecoderResult &r) {
		if (r == VC2DECODER_NOTPARSEINFO) {
			writelog(LOG_ERROR, "%s:%d:  No Parse Info Header Where One was Expected\n", __FILE__, __LINE__);
			throw VC2DECODER_BADSTREAM;
		}
		throw;
	}

	writelog(LOG_WARN, "%s:%d: Premature end of stream\n", __FILE__, __LINE__);
	*_idata += ilength;
	return VC2DECODER_EOS;
}