Esempio n. 1
0
void QuickTimeDecoder::init() {
	Audio::QuickTimeAudioDecoder::init();

	_videoTrackIndex = -1;
	_startTime = 0;

	// Find video streams
	for (uint32 i = 0; i < _tracks.size(); i++)
		if (_tracks[i]->codecType == CODEC_TYPE_VIDEO && _videoTrackIndex < 0)
			_videoTrackIndex = i;

	// Start the audio codec if we've got one that we can handle
	if (_audStream) {
		startAudio();
		_audioStartOffset = Audio::Timestamp(0);
	}

	// Initialize video, if present
	if (_videoTrackIndex >= 0) {
		for (uint32 i = 0; i < _tracks[_videoTrackIndex]->sampleDescs.size(); i++)
			((VideoSampleDesc *)_tracks[_videoTrackIndex]->sampleDescs[i])->initCodec();

		if (getScaleFactorX() != 1 || getScaleFactorY() != 1) {
			// We have to initialize the scaled surface
			_scaledSurface = new Graphics::Surface();
			_scaledSurface->create(getWidth(), getHeight(), getPixelFormat());
		}
	}
}
Esempio n. 2
0
void SeekableBinkDecoder::seekToFrame(uint32 frame) {
	assert(frame < _frames.size());

	// Fast path
	if ((int32)frame == _curFrame + 1)
		return;

	// Stop all audio (for now)
	stopAudio();

	// Track down the keyframe
	_curFrame = findKeyFrame(frame) - 1;
	while (_curFrame < (int32)frame - 1)
		skipNextFrame();

	// Map out the starting point
	Common::Rational startTime = frame * 1000 / getFrameRate();
	_startTime = g_system->getMillis() - startTime.toInt();
	resetPauseStartTime();

	// Adjust the audio starting point
	if (_audioTrack < _audioTracks.size()) {
		Common::Rational audioStartTime = (frame + 1) * 1000 / getFrameRate();
		_audioStartOffset = audioStartTime.toInt();
	}

	// Restart the audio
	startAudio();
}
Esempio n. 3
0
bool VideoDecoder::seek(const Audio::Timestamp &time) {
	if (!isSeekable())
		return false;

	// Stop all tracks so they can be seeked
	if (isPlaying())
		stopAudio();

	for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
		if (!(*it)->seek(time))
			return false;

	_lastTimeChange = time;

	// Now that we've seeked, start all tracks again
	// Also reset our start time
	if (isPlaying()) {
		startAudio();
		_startTime = g_system->getMillis() - time.msecs();
	}

	resetPauseStartTime();
	_needsUpdate = true;
	return true;
}
Esempio n. 4
0
void VideoDecoder::setRate(const Common::Rational &rate) {
	if (!isVideoLoaded() || _playbackRate == rate)
		return;

	if (rate == 0) {
		stop();
		return;
	} else if (rate != 1 && hasAudio()) {
		warning("Cannot set custom rate in videos with audio");
		return;
	}

	Common::Rational targetRate = rate;

	if (rate < 0) {
		// TODO: Implement support for this
		warning("Cannot set custom rate to backwards");
		targetRate = 1;

		if (_playbackRate == targetRate)
			return;
	}

	if (_playbackRate != 0)
		_lastTimeChange = getTime();

	_playbackRate = targetRate;
	_startTime = g_system->getMillis();

	// Adjust start time if we've seeked to something besides zero time
	if (_lastTimeChange.totalNumberOfFrames() != 0)
		_startTime -= (_lastTimeChange.msecs() / _playbackRate).toInt();

	startAudio();
}
Esempio n. 5
0
bool VideoDecoder::seek(const Audio::Timestamp &time) {
	if (!isSeekable())
		return false;

	// Stop all tracks so they can be seeked
	if (isPlaying())
		stopAudio();

	// Do the actual seeking
	if (!seekIntern(time))
		return false;

	// Seek any external track too
	for (TrackListIterator it = _externalTracks.begin(); it != _externalTracks.end(); it++)
		if (!(*it)->seek(time))
			return false;

	_lastTimeChange = time;

	// Now that we've seeked, start all tracks again
	// Also reset our start time
	if (isPlaying()) {
		startAudio();
		_startTime = g_system->getMillis() - (time.msecs() / _playbackRate).toInt();
	}

	resetPauseStartTime();
	findNextVideoTrack();
	_needsUpdate = true;
	return true;
}
Esempio n. 6
0
int main(int argc, char *argv[]){
    PaStream *stream;

    sharedBuffer = (Packet *)malloc(sizeof(Packet) * BUFFER_SIZE);

    processArgs(argc, argv);

    if (!startAudio(stream, argv[1])){
        exit(1);
    }

    // TODO - get rid of glut stuff and set up context manually
    setupGlut(argc, argv);

    GLenum err = glewInit();
    if (GLEW_OK != err){
        fprintf(stderr, "GLEW Error: %s\n", glewGetErrorString(err));
        exit(1);
    }

    SetupRC();
    glutMainLoop();

    free(sharedBuffer);
    endAudio(stream);
    exit(0);
}
Esempio n. 7
0
void SyntroLCamConsole::runDaemon()
{
	startVideo();
    startAudio();

    while (!SyntroLCamConsole::sigIntReceived)
		msleep(100); 
}
Esempio n. 8
0
static void my_button_press(GtkWidget *widget, GdkEventButton *event,
		gpointer data)
{
	int x, y;
//	bstate = PRESSED;

	x = event->x;
	y = event->y;

#if MY_DEBUG_OUTPUT == 1
	g_print("hello, x is:%d, y is:%d\n", x, y);
#endif
	if(in_image1(x,y))
	{/*browser*/
#if MY_DEBUG_OUTPUT == 1
		g_print("pressed in the image1\n");
#endif
		//launchApp_simple("/usr/bin/midbrowser");
		startBrowser(widget);
	}
	else if(in_image2(x,y))
	{/*Audio*/
#if MY_DEBUG_OUTPUT == 1
		g_print("pressed in the image2\n");
#endif
		//launchApp_simple("/usr/bin/ls -a");//这里启动程序
		//launchApp_simple("/usr/bin/StartAudio.sh");
		startAudio(widget);
	}
	else if(in_image3(x,y))
	{/*Video*/
#if MY_DEBUG_OUTPUT == 1
		g_print("pressed in the image3\n");
#endif
		//launchApp_simple("/usr/bin/echo good");//这里启动程序
		//launchApp_simple("/usr/bin/StartVideo.sh");
		startVideo(widget);
	}
	else if(in_image4(x,y))
	{/*Wifi*/
#if MY_DEBUG_OUTPUT == 1
		g_print("pressed in the image4\n");
#endif
		//launchApp_simple("/usr/bin/echo 'hello world'");//这里启动程序
		//launchApp_simple("/usr/bin/StartWifi.sh");//这里启动程序
		startWifi(widget);
	}
	else
	{
#if MY_DEBUG_OUTPUT == 1
		g_print("pressed out of all the image!\n");
#endif
	}
}
Esempio n. 9
0
void QuickTimeDecoder::init() {
	Audio::QuickTimeAudioDecoder::init();

	_startTime = 0;
	_setStartTime = false;

	// Initialize all the audio tracks
	if (!_audioTracks.empty()) {
		_audioHandles.resize(_audioTracks.size());

		for (uint32 i = 0; i < _audioTracks.size(); i++)
			_handlers.push_back(new AudioTrackHandler(this, _audioTracks[i]));
	}

	// Initialize all the video tracks
	for (uint32 i = 0; i < _tracks.size(); i++) {
		if (_tracks[i]->codecType == CODEC_TYPE_VIDEO) {
			for (uint32 j = 0; j < _tracks[i]->sampleDescs.size(); j++)
				((VideoSampleDesc *)_tracks[i]->sampleDescs[j])->initCodec();

			_handlers.push_back(new VideoTrackHandler(this, _tracks[i]));
		}
	}

	// Prepare the first video track
	_nextVideoTrack = findNextVideoTrack();

	if (_nextVideoTrack) {
		// Initialize the scaled surface
		if (_scaleFactorX != 1 || _scaleFactorY != 1) {
			// We have to initialize the scaled surface
			_scaledSurface = new Graphics::Surface();
			_scaledSurface->create((_nextVideoTrack->getWidth() / _scaleFactorX).toInt(),
					(_nextVideoTrack->getHeight() / _scaleFactorY).toInt(), getPixelFormat());
			_width = _scaledSurface->w;
			_height = _scaledSurface->h;
		} else {
			_width = _nextVideoTrack->getWidth().toInt();
			_height = _nextVideoTrack->getHeight().toInt();
		}

		_needUpdate = true;
	} else {
		_needUpdate = false;
	}

	// Now start any audio
	if (!_audioTracks.empty()) {
		startAudio();
		_audioStartOffset = Audio::Timestamp(0);
	}
}
Esempio n. 10
0
void SdlMixerManager::init() {
	// Start SDL Audio subsystem
	if (SDL_InitSubSystem(SDL_INIT_AUDIO) == -1) {
		error("Could not initialize SDL: %s", SDL_GetError());
	}

	const int maxNameLen = 20;
	char sdlDriverName[maxNameLen];
	sdlDriverName[0] = '\0';
	SDL_AudioDriverName(sdlDriverName, maxNameLen);
	debug(1, "Using SDL Audio Driver \"%s\"", sdlDriverName);

	// Get the desired audio specs
	SDL_AudioSpec desired = getAudioSpec(SAMPLES_PER_SEC);

	// Needed as SDL_OpenAudio as of SDL-1.2.14 mutates fields in
	// "desired" if used directly.
	SDL_AudioSpec fmt = desired;

	// Start SDL audio with the desired specs
	if (SDL_OpenAudio(&fmt, &_obtained) != 0) {
		warning("Could not open audio device: %s", SDL_GetError());

		_mixer = new Audio::MixerImpl(g_system, desired.freq);
		assert(_mixer);
		_mixer->setReady(false);
	} else {
		debug(1, "Output sample rate: %d Hz", _obtained.freq);
		if (_obtained.freq != desired.freq)
			warning("SDL mixer output sample rate: %d differs from desired: %d", _obtained.freq, desired.freq);

		debug(1, "Output buffer size: %d samples", _obtained.samples);
		if (_obtained.samples != desired.samples)
			warning("SDL mixer output buffer size: %d differs from desired: %d", _obtained.samples, desired.samples);

		if (_obtained.format != desired.format)
			warning("SDL mixer sound format: %d differs from desired: %d", _obtained.format, desired.format);

#ifndef __SYMBIAN32__
		// The SymbianSdlMixerManager does stereo->mono downmixing,
		// but otherwise we require stereo output.
		if (_obtained.channels != 2)
			error("SDL mixer output requires stereo output device");
#endif

		_mixer = new Audio::MixerImpl(g_system, _obtained.freq);
		assert(_mixer);
		_mixer->setReady(true);

		startAudio();
	}
}
Esempio n. 11
0
int main()
{
	printf("INIT: %d\n", initializeAudio());

	//testBuffer();

	startAudio();
	startRecording("test.ogg");
	sleep(5);
	stopRecording();
	stopAudio();

	printf("DEINIT: %d\n", uninitializeAudio());

	return 0;
}
Esempio n. 12
0
void QuickTimeDecoder::seekToTime(const Audio::Timestamp &time) {
	stopAudio();
	_audioStartOffset = time;

	// Sets all tracks to this time
	for (uint32 i = 0; i < _handlers.size(); i++)
		_handlers[i]->seekToTime(time);

	startAudio();

	// Reset our start time
	_startTime = g_system->getMillis() - time.msecs();
	_setStartTime = true;
	resetPauseStartTime();

	// Reset the next video track too
	_nextVideoTrack = findNextVideoTrack();
	_needUpdate = _nextVideoTrack != 0;
}
Esempio n. 13
0
bool VideoDecoder::rewind() {
	if (!isRewindable())
		return false;

	// Stop all tracks so they can be rewound
	if (isPlaying())
		stopAudio();

	for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++)
		if (!(*it)->rewind())
			return false;

	// Now that we've rewound, start all tracks again
	if (isPlaying())
		startAudio();

	_lastTimeChange = 0;
	_startTime = g_system->getMillis();
	resetPauseStartTime();
	return true;
}
Esempio n. 14
0
void SyntroLCamConsole::runConsole()
{
#ifndef WIN32
	struct termios	ctty;

	tcgetattr(fileno(stdout), &ctty);
	ctty.c_lflag &= ~(ICANON);
	tcsetattr(fileno(stdout), TCSANOW, &ctty);
#endif

	bool grabbing = startVideo();
    startAudio();

	while (grabbing) {
		printf("\nEnter option: ");

#ifdef WIN32
		switch (tolower(_getch()))
#else
        switch (tolower(getchar()))
#endif		
		{
		case 'h':
			showHelp();
			break;

		case 's':
			showStatus();
			break;

		case 'x':
			printf("\nExiting\n");
			grabbing = false;		
			break;

		case '\n':
			continue;
		}
	}
}
Esempio n. 15
0
void QuickTimeDecoder::seekToFrame(uint32 frame) {
	assert(_videoTrackIndex >= 0);
	assert(frame < _tracks[_videoTrackIndex]->frameCount);

	// Stop all audio (for now)
	stopAudio();

	// Track down the keyframe
	_curFrame = findKeyFrame(frame) - 1;
	while (_curFrame < (int32)frame - 1)
		decodeNextFrame();

	// Map out the starting point
	_nextFrameStartTime = 0;
	uint32 curFrame = 0;

	for (int32 i = 0; i < _tracks[_videoTrackIndex]->timeToSampleCount && curFrame < frame; i++) {
		for (int32 j = 0; j < _tracks[_videoTrackIndex]->timeToSample[i].count && curFrame < frame; j++) {
			curFrame++;
			_nextFrameStartTime += _tracks[_videoTrackIndex]->timeToSample[i].duration;
		}
	}

	// Adjust the video starting point
	const Audio::Timestamp curVideoTime(0, _nextFrameStartTime, _tracks[_videoTrackIndex]->timeScale);
	_startTime = g_system->getMillis() - curVideoTime.msecs();
	resetPauseStartTime();

	// Adjust the audio starting point
	if (_audioTrackIndex >= 0) {
		_audioStartOffset = curVideoTime;

		// Seek to the new audio location
		setAudioStreamPos(_audioStartOffset);

		// Restart the audio
		startAudio();
	}
}
Esempio n. 16
0
void VideoDecoder::setRate(const Common::Rational &rate) {
	if (!isVideoLoaded() || _playbackRate == rate)
		return;

	if (rate == 0) {
		stop();
		return;
	} else if (rate != 1 && hasAudio()) {
		warning("Cannot set custom rate in videos with audio");
		return;
	}

	Common::Rational targetRate = rate;

	// Attempt to set the reverse
	if (!setReverse(rate < 0)) {
		assert(rate < 0); // We shouldn't fail for forward.
		warning("Cannot set custom rate to backwards");
		setReverse(false);
		targetRate = 1;

		if (_playbackRate == targetRate)
			return;
	}

	if (_playbackRate != 0)
		_lastTimeChange = getTime();

	_playbackRate = targetRate;
	_startTime = g_system->getMillis();

	// Adjust start time if we've seeked to something besides zero time
	if (_lastTimeChange != 0)
		_startTime -= (_lastTimeChange.msecs() / _playbackRate).toInt();

	startAudio();
}
Esempio n. 17
0
/*******************************************************************************
 * Initializes the top buttons.
 */
QWidget*
MainWindow::initButtons(QWidget* parent)
{
	QWidget* widget = new QWidget(parent);

	QPushButton* startAudioButton = new QPushButton(tr("(Re)start"), widget);
	QPushButton* stopAudioButton  = new QPushButton(tr("Stop"), widget);
	QPushButton* reloadButton     = new QPushButton(tr("Reload Configuration"), widget);
	QPushButton* analysisButton   = new QPushButton(tr("Analysis"), widget);

	QHBoxLayout* layout = new QHBoxLayout(widget);
	layout->setContentsMargins(0, 0, 0, 0);
	layout->addWidget(startAudioButton);
	layout->addWidget(stopAudioButton);
	layout->addWidget(reloadButton);
	layout->addWidget(analysisButton);

	connect(startAudioButton, SIGNAL(clicked()), this, SLOT(startAudio()));
	connect(stopAudioButton , SIGNAL(clicked()), this, SLOT(stopAudio()));
	connect(reloadButton    , SIGNAL(clicked()), this, SLOT(reload()));
	connect(analysisButton  , SIGNAL(clicked()), this, SLOT(showAnalysisWindow()));

	return widget;
}
Esempio n. 18
0
void QuickTimeDecoder::seekToFrame(uint32 frame) {
	assert(_videoStreamIndex >= 0);
	assert(frame < _streams[_videoStreamIndex]->nb_frames);

	// Stop all audio (for now)
	stopAudio();

	// Track down the keyframe
	_curFrame = findKeyFrame(frame) - 1;
	while (_curFrame < (int32)frame - 1)
		decodeNextFrame();

	// Map out the starting point
	_nextFrameStartTime = 0;
	uint32 curFrame = 0;

	for (int32 i = 0; i < _streams[_videoStreamIndex]->stts_count && curFrame < frame; i++) {
		for (int32 j = 0; j < _streams[_videoStreamIndex]->stts_data[i].count && curFrame < frame; j++) {
			curFrame++;
			_nextFrameStartTime += _streams[_videoStreamIndex]->stts_data[i].duration;
		}
	}

	// Adjust the video starting point
	const Audio::Timestamp curVideoTime(0, _nextFrameStartTime, _streams[_videoStreamIndex]->time_scale);
	_startTime = g_system->getMillis() - curVideoTime.msecs();
	resetPauseStartTime();

	// Adjust the audio starting point
	if (_audioStreamIndex >= 0) {
		_audioStartOffset = curVideoTime;

		// Re-create the audio stream
		STSDEntry *entry = &_streams[_audioStreamIndex]->stsdEntries[0];
		_audStream = Audio::makeQueuingAudioStream(entry->sampleRate, entry->channels == 2);

		// First, we need to track down what audio sample we need
		Audio::Timestamp curAudioTime(0, _streams[_audioStreamIndex]->time_scale);
		uint sample = 0;
		bool done = false;
		for (int32 i = 0; i < _streams[_audioStreamIndex]->stts_count && !done; i++) {
			for (int32 j = 0; j < _streams[_audioStreamIndex]->stts_data[i].count; j++) {
				curAudioTime = curAudioTime.addFrames(_streams[_audioStreamIndex]->stts_data[i].duration);

				if (curAudioTime > curVideoTime) {
					done = true;
					break;
				}

				sample++;
			}
		}

		// Now to track down what chunk it's in
		_curAudioChunk = 0;
		uint32 totalSamples = 0;
		for (uint32 i = 0; i < _streams[_audioStreamIndex]->chunk_count; i++, _curAudioChunk++) {
			int sampleToChunkIndex = -1;

			for (uint32 j = 0; j < _streams[_audioStreamIndex]->sample_to_chunk_sz; j++)
				if (i >= _streams[_audioStreamIndex]->sample_to_chunk[j].first)
					sampleToChunkIndex = j;

			assert(sampleToChunkIndex >= 0);

			totalSamples += _streams[_audioStreamIndex]->sample_to_chunk[sampleToChunkIndex].count;

			if (sample < totalSamples) {
				totalSamples -= _streams[_audioStreamIndex]->sample_to_chunk[sampleToChunkIndex].count;
				break;
			}
		}
		
		// Reposition the audio stream
		readNextAudioChunk();
		if (sample != totalSamples) {
			// HACK: Skip a certain amount of samples from the stream
			// (There's got to be a better way to do this!)
			int16 *tempBuffer = new int16[sample - totalSamples];
			_audStream->readBuffer(tempBuffer, sample - totalSamples);
			delete[] tempBuffer;
			debug(3, "Skipping %d audio samples", sample - totalSamples);
		}
		
		// Restart the audio
		startAudio();
	}
}
Esempio n. 19
0
void SdlMixerManager::init() {
	// Start SDL Audio subsystem
	if (SDL_InitSubSystem(SDL_INIT_AUDIO) == -1) {
		error("Could not initialize SDL: %s", SDL_GetError());
	}

#if SDL_VERSION_ATLEAST(2, 0, 0)
	const char *sdlDriverName = SDL_GetCurrentAudioDriver();
#else
	const int maxNameLen = 20;
	char sdlDriverName[maxNameLen];
	sdlDriverName[0] = '\0';
	SDL_AudioDriverName(sdlDriverName, maxNameLen);
#endif
	debug(1, "Using SDL Audio Driver \"%s\"", sdlDriverName);

	// Get the desired audio specs
	SDL_AudioSpec desired = getAudioSpec(SAMPLES_PER_SEC);

	// Needed as SDL_OpenAudio as of SDL-1.2.14 mutates fields in
	// "desired" if used directly.
	SDL_AudioSpec fmt = desired;

	// Start SDL audio with the desired specs
	if (SDL_OpenAudio(&fmt, &_obtained) != 0) {
		warning("Could not open audio device: %s", SDL_GetError());

		// The mixer is not marked as ready
		_mixer = new Audio::MixerImpl(g_system, desired.freq);
		return;
	}

	// The obtained sample format is not supported by the mixer, call
	// SDL_OpenAudio again with NULL as the second argument to force
	// SDL to do resampling to the desired audio spec.
	if (_obtained.format != desired.format) {
		debug(1, "SDL mixer sound format: %d differs from desired: %d", _obtained.format, desired.format);
		SDL_CloseAudio();

		if (SDL_OpenAudio(&fmt, NULL) != 0) {
			warning("Could not open audio device: %s", SDL_GetError());

			// The mixer is not marked as ready
			_mixer = new Audio::MixerImpl(g_system, desired.freq);
			return;
		}

		_obtained = desired;
	}

	debug(1, "Output sample rate: %d Hz", _obtained.freq);
	if (_obtained.freq != desired.freq)
		debug(1, "SDL mixer output sample rate: %d differs from desired: %d", _obtained.freq, desired.freq);

	debug(1, "Output buffer size: %d samples", _obtained.samples);
	if (_obtained.samples != desired.samples)
		debug(1, "SDL mixer output buffer size: %d differs from desired: %d", _obtained.samples, desired.samples);

#ifndef __SYMBIAN32__
	// The SymbianSdlMixerManager does stereo->mono downmixing,
	// but otherwise we require stereo output.
	if (_obtained.channels != 2)
		error("SDL mixer output requires stereo output device");
#endif

	_mixer = new Audio::MixerImpl(g_system, _obtained.freq);
	assert(_mixer);
	_mixer->setReady(true);

	startAudio();
}
Esempio n. 20
0
// Response to all POST request
// 3 requests are correct : 
// - /GetJson --> Receive faust code / Compile Data / Send back jsonInterface
// - /CreateInstance --> Receive factoryIndex / Create instance 
// - /DeleteFactory --> Receive factoryIndex / Delete Factory
int Server::answer_post(MHD_Connection *connection, const char *url, const char *upload_data, size_t *upload_data_size, void **con_cls){
    
    struct connection_info_struct *con_info = (connection_info_struct*)*con_cls;
    
    if (0 != *upload_data_size) {
        
        MHD_post_process(con_info->fPostprocessor, upload_data, *upload_data_size);
        *upload_data_size = 0;
        
        return MHD_YES;
    } else {
        
        if(strcmp(url, "/GetJson") == 0){
            
            if (compile_Data(con_info)) {
                return send_page(connection, con_info->fAnswerstring.c_str(), con_info->fAnswerstring.size(), MHD_HTTP_OK, "application/json"); 
            } else {
                return send_page(connection, con_info->fAnswerstring.c_str(), con_info->fAnswerstring.size(), MHD_HTTP_BAD_REQUEST, "text/html");
            }
        }
        else if(strcmp(url, "/GetJsonFromKey") == 0){
            
            if (getJsonFromKey(con_info)) {
                return send_page(connection, con_info->fAnswerstring.c_str(), con_info->fAnswerstring.size(), MHD_HTTP_OK, "application/json"); 
            } else {
                return send_page(connection, con_info->fAnswerstring.c_str(), con_info->fAnswerstring.size(), MHD_HTTP_BAD_REQUEST, "text/html");
            }
        }
        
        else if(strcmp(url, "/CreateInstance") == 0){
            
            if (createInstance(con_info)) {
                return send_page(connection, "", 0, MHD_HTTP_OK, "text/html");
            } else {
                return send_page(connection, con_info->fAnswerstring.c_str(), con_info->fAnswerstring.size(), MHD_HTTP_BAD_REQUEST, "text/html");
            }
            
        }
//        else if(strcmp(url, "/DeleteFactory") == 0){
//                    
//            llvm_dsp_factory* toDelete = fAvailableFactories[con_info->fSHAKey];
//            
//            if (toDelete) {
//                
//                fAvailableFactories.erase(con_info->fSHAKey);
//                deleteSlaveDSPFactory(toDelete);
//                
//                return send_page(connection, "", 0, MHD_HTTP_OK, "application/html"); 
//            } else {
//                return send_page(connection, "", 0, MHD_HTTP_BAD_REQUEST, "text/html"); 
//            }
//        }
        else if(strcmp(url, "/StartAudio") == 0){
            
            startAudio(con_info->fSHAKey);
            return send_page(connection, "", 0, MHD_HTTP_OK, "text/html");
        }
        else if(strcmp(url, "/StopAudio") == 0){
            
            stopAudio(con_info->fSHAKey);
            return send_page(connection, "", 0, MHD_HTTP_OK, "text/html");
        }
        else{
            return send_page(connection, "", 0, MHD_HTTP_BAD_REQUEST, "text/html"); 
        }
    }
}
Esempio n. 21
0
void QuickTimeDecoder::init() {
	// Remove non-Video/Audio streams
	for (uint32 i = 0; i < _numStreams;) {
		if (_streams[i]->codec_type == CODEC_TYPE_MOV_OTHER) {
			delete _streams[i];
			for (uint32 j = i + 1; j < _numStreams; j++)
				_streams[j - 1] = _streams[j];
			_numStreams--;
		} else
			i++;
	}

	// Adjust time/duration
	for (uint32 i = 0; i < _numStreams; i++) {
		MOVStreamContext *sc = _streams[i];

		if (!sc->time_rate)
			sc->time_rate = 1;

		if (!sc->time_scale)
			sc->time_scale = _timeScale;

		sc->duration /= sc->time_rate;

		if (sc->codec_type == CODEC_TYPE_VIDEO && _videoStreamIndex < 0)
			_videoStreamIndex = i;
		else if (sc->codec_type == CODEC_TYPE_AUDIO && _audioStreamIndex < 0)
			_audioStreamIndex = i;
	}

	// Initialize audio, if present
	if (_audioStreamIndex >= 0) {
		STSDEntry *entry = &_streams[_audioStreamIndex]->stsdEntries[0];

		if (checkAudioCodecSupport(entry->codecTag)) {
			_audStream = Audio::makeQueuingAudioStream(entry->sampleRate, entry->channels == 2);
			_curAudioChunk = 0;

			// Make sure the bits per sample transfers to the sample size
			if (entry->codecTag == MKID_BE('raw ') || entry->codecTag == MKID_BE('twos'))
				_streams[_audioStreamIndex]->sample_size = (entry->bitsPerSample / 8) * entry->channels;

			startAudio();
		}

		_audioStartOffset = Audio::Timestamp(0);
	}

	// Initialize video, if present
	if (_videoStreamIndex >= 0) {
		for (uint32 i = 0; i < _streams[_videoStreamIndex]->stsdEntryCount; i++) {
			STSDEntry *entry = &_streams[_videoStreamIndex]->stsdEntries[i];
			entry->videoCodec = createCodec(entry->codecTag, entry->bitsPerSample & 0x1F);
		}

		if (getScaleFactorX() != 1 || getScaleFactorY() != 1) {
			// We have to initialize the scaled surface
			_scaledSurface = new Graphics::Surface();
			_scaledSurface->create(getWidth(), getHeight(), getPixelFormat().bytesPerPixel);
		}
	}
}