Buffer* LoadOGG(const char* name, const void* data, size_t size)
	{
		Buffer* b = new Buffer(name);
		OggStaticFile fp(data,size);

		long bytes;
		std::vector<char> buffer;
		int bitStream;
		char arr[OGG_STATIC_BUFFER_SIZE] = {0};
		OggVorbis_File oggStream;
		vorbis_info* vorbisInfo;
		ALenum format;
		ALsizei freq;
		ALint isize;
		ALint d = 1;

		if (ov_open_callbacks(&fp,&oggStream,NULL,0,OggStaticGetCallback()) != 0)
		{
			freeslw::ErrorPrintf("Failed to load \"%s\"",name);
			delete b; return 0;
		}

		alGenBuffers(1,&b->bufferId[0]);
		if (b->bufferId[0] == NO_BUFFER)
		{
			freeslw::ErrorPrintf("Failed to create buffer for \"%s\"",name);
			delete b; return 0;
		}

		vorbisInfo = ov_info(&oggStream,-1);
		format = (vorbisInfo->channels == 1)? AL_FORMAT_MONO16 : AL_FORMAT_STEREO16;
		freq = vorbisInfo->rate;

		do
		{
			bytes = ov_read(&oggStream,arr,OGG_STATIC_BUFFER_SIZE, 0, 2, 1, &bitStream);
			buffer.insert(buffer.end(),arr,arr + bytes);
		} while (bytes > 0);

		ov_clear(&oggStream);

		alBufferData(b->bufferId[0],format,&buffer[0],(ALsizei)buffer.size(),freq);
		alGetBufferi(b->bufferId[0],AL_SIZE,&isize);

			 if (format == AL_FORMAT_MONO8) b->format = freeslw::TA_MONO8;
		else if (format == AL_FORMAT_MONO16) b->format = freeslw::TA_MONO16;
		else if (format == AL_FORMAT_STEREO8) b->format = freeslw::TA_STEREO8;
		else if (format == AL_FORMAT_STEREO16) b->format = freeslw::TA_STEREO16;
		else freeslw::ErrorPrintf("Failed to retrieve a valid sound format (format = %d)",format);

		if (b->format == freeslw::TA_MONO16 || b->format == freeslw::TA_STEREO8) d = 2; else if (format == freeslw::TA_STEREO16) d = 4;

		if (freq == 0)
		{
			freeslw::ErrorPrintf("\"%s\": Frequency = 0",name);
			freq = 1;
		}

		b->frequency = (int)freq;
		b->length = (isize * 1000) / (freq * d);
		b->activeBuffer = 0;
		b->numBuffers = 1;

		return b;
	}
//------------------------------------------------------------
bool ofSoundPlayerExtended::load(string fileName, bool is_stream){
    
    string ext = ofToLower(ofFilePath::getFileExt(fileName));
    if(ext != "wav" && ext != "aif" && ext != "aiff" && ext != "mp3"){
        ofLogError("Sound player can only load .wav .aiff or .mp3 files");
        return false;
    }
    
    fileName = ofToDataPath(fileName);
    
    bLoadedOk = false;
    bMultiPlay = false;
    isStreaming = is_stream;
    
    // [1] init sound systems, if necessary
    initialize();
    
    // [2] try to unload any previously loaded sounds
    // & prevent user-created memory leaks
    // if they call "loadSound" repeatedly, for example
    
    unload();
    
    ALenum format=AL_FORMAT_MONO16;
    
    if(!isStreaming || ext == "mp3"){ // mp3s don't stream cause they gotta be decoded
        readFile(fileName, buffer);
    }else{
        stream(fileName, buffer);
    }
    
    if(channels == 0){
        ofLogError("ofSoundPlayerExtended -- File not found");
        return false;
    }
    
    int numFrames = buffer.size()/channels;
    if(isStreaming){
        buffers.resize(channels*2);
    }else{
        buffers.resize(channels);
    }
    alGenBuffers(buffers.size(), &buffers[0]);
    if(channels==1){
        sources.resize(1);
        alGenSources(1, &sources[0]);
        if (alGetError() != AL_NO_ERROR){
            ofLog(OF_LOG_WARNING,"ofSoundPlayerExtended: openAL error reported generating sources for " + fileName);
            return false;
        }
        
        for(int i=0; i<(int)buffers.size(); i++){
            alBufferData(buffers[i],format,&buffer[0],buffer.size()*2,samplerate);
            if (alGetError() != AL_NO_ERROR){
                ofLog(OF_LOG_ERROR,"ofSoundPlayerExtended: error creating buffer");
                return false;
            }
            if(isStreaming){
                stream(fileName,buffer);
            }
        }
        if(isStreaming){
            alSourceQueueBuffers(sources[0],buffers.size(),&buffers[0]);
        }else{
            alSourcei (sources[0], AL_BUFFER,   buffers[0]);
        }
        
        alSourcef (sources[0], AL_PITCH,    1.0f);
        alSourcef (sources[0], AL_GAIN,     1.0f);
        alSourcef (sources[0], AL_ROLLOFF_FACTOR,  0.0);
        alSourcei (sources[0], AL_SOURCE_RELATIVE, AL_TRUE);
    }else{
        vector<vector<short> > multibuffer;
        multibuffer.resize(channels);
        sources.resize(channels);
        alGenSources(channels, &sources[0]);
        if(isStreaming){
            for(int s=0; s<2;s++){
                for(int i=0;i<channels;i++){
                    multibuffer[i].resize(buffer.size()/channels);
                    for(int j=0;j<numFrames;j++){
                        multibuffer[i][j] = buffer[j*channels+i];
                    }
                    alBufferData(buffers[s*2+i],format,&multibuffer[i][0],buffer.size()/channels*2,samplerate);
                    if (alGetError() != AL_NO_ERROR){
                        ofLog(OF_LOG_ERROR,"ofSoundPlayerExtended: error creating stereo buffers for " + fileName);
                        return false;
                    }
                    alSourceQueueBuffers(sources[i],1,&buffers[s*2+i]);
                    stream(fileName,buffer);
                }
            }
        }else{
            for(int i=0;i<channels;i++){
                multibuffer[i].resize(buffer.size()/channels);
                for(int j=0;j<numFrames;j++){
                    multibuffer[i][j] = buffer[j*channels+i];
                }
                alBufferData(buffers[i],format,&multibuffer[i][0],buffer.size()/channels*2,samplerate);
                if (alGetError() != AL_NO_ERROR){
                    ofLog(OF_LOG_ERROR,"ofSoundPlayerExtended: error creating stereo buffers for " + fileName);
                    return false;
                }
                alSourcei (sources[i], AL_BUFFER,   buffers[i]   );
            }
        }
        
        for(int i=0;i<channels;i++){
            if (alGetError() != AL_NO_ERROR){
                ofLog(OF_LOG_ERROR,"ofSoundPlayerExtended: error creating stereo sources for " + fileName);
                return false;
            }
            
            // only stereo panning
            if(i==0){
                float pos[3] = {-1,0,0};
                alSourcefv(sources[i],AL_POSITION,pos);
            }else{
                float pos[3] = {1,0,0};
                alSourcefv(sources[i],AL_POSITION,pos);
            }
            alSourcef (sources[i], AL_ROLLOFF_FACTOR,  0.0);
            alSourcei (sources[i], AL_SOURCE_RELATIVE, AL_TRUE);
        }
    }
    //soundBuffer stuff---------------------
    currentSoundBuffer.setNumChannels(channels);
    currentSoundBuffer.setSampleRate(samplerate);
    currentSoundBuffer.clear();
    channelSoundBuffer.setNumChannels(1);
    channelSoundBuffer.setSampleRate(samplerate);
    channelSoundBuffer.clear();
    //--------------------------------------
    ofLogVerbose("ofSoundPlayerExtended: successfully loaded " + fileName);
    bLoadedOk = true;
    return true;
}
Beispiel #3
0
	static ALuint createBufferFromOGG(const char *pszFilePath)
	{
		ALuint 			buffer;
		OggVorbis_File  ogg_file;
		vorbis_info*    info;
		ALenum 			format;
		int 			result;
		int 			section;
		int				err;
		unsigned int 	size = 0;

		if (ov_fopen(pszFilePath, &ogg_file) < 0)
		{
			ov_clear(&ogg_file);
			fprintf(stderr, "Could not open OGG file %s\n", pszFilePath);
			return -1;
		}

		info = ov_info(&ogg_file, -1);

		if (info->channels == 1)
			format = AL_FORMAT_MONO16;
		else
			format = AL_FORMAT_STEREO16;

		// size = #samples * #channels * 2 (for 16 bit)
		unsigned int data_size = ov_pcm_total(&ogg_file, -1) * info->channels * 2;
		char* data = new char[data_size];

		while (size < data_size)
		{
			result = ov_read(&ogg_file, data + size, data_size - size, 0, 2, 1, &section);
			if (result > 0)
			{
				size += result;
			}
			else if (result < 0)
			{
				delete [] data;
				fprintf(stderr, "OGG file problem %s\n", pszFilePath);
				return -1;
			}
			else
			{
				break;
			}
		}

		if (size == 0)
		{
			delete [] data;
			fprintf(stderr, "Unable to read OGG data\n");
			return -1;
		}

		// clear al errors
		checkALError("createBufferFromOGG");

	    // Load audio data into a buffer.
	    alGenBuffers(1, &buffer);

	    if (checkALError("createBufferFromOGG") != AL_NO_ERROR)
	    {
	        fprintf(stderr, "Couldn't generate a buffer for OGG file\n");
	        delete [] data;
	        return buffer;
	    }

		alBufferData(buffer, format, data, data_size, info->rate);
		checkALError("createBufferFromOGG");

		delete [] data;
		ov_clear(&ogg_file);

		return buffer;
	}
	//------------------------------------------------------------------------------
	bool CGUIMusicData_openal::LoadOggFile( const CGUIString& rFilename ) const
	{
		//open file
		FILE *pFile = fopen( rFilename.c_str(), "rb" );
		if( !pFile)
		{
			return false;
		}

		ov_callbacks sCallbacks;
		sCallbacks.read_func = ov_read_func;
		sCallbacks.seek_func = ov_seek_func;
		sCallbacks.close_func = ov_close_func;
		sCallbacks.tell_func = ov_tell_func;
		if (ov_open_callbacks(pFile, &m_aVorbisFile, NULL, 0, sCallbacks) != 0)
		{
			fclose( pFile );
			return false;
		}

		// Get some information about the file (Channels, Format, and Frequency)
		if( false == GetOggVorbisInfo( &m_aVorbisFile, &m_nFrequency, &m_nFormat, &m_nFormat, &m_nBufferSize ) )
		{
			ov_clear(&m_aVorbisFile);
			return false;
		}

		// Allocate a buffer to be used to store decoded data for all Buffers
		m_pDecodeBuffer = (char*)malloc(m_nBufferSize);
		if ( !m_pDecodeBuffer )
		{
			ov_clear(&m_aVorbisFile);
			return false;
		}

		// Generate a Source to playback the Buffers
		alGenSources(1, &m_nSourceId);
		if (alGetError() != AL_NO_ERROR)
		{
			return false;
		}

		// Generate some AL Buffers for streaming
		alGenBuffers( GUI_MUSIC_NUMBUFFERS, m_nBuffers );
		if (alGetError() != AL_NO_ERROR)
		{
			return false;
		}

		// Fill all the Buffers with decoded audio data from the OggVorbis file
		for (int iLoop = 0; iLoop < GUI_MUSIC_NUMBUFFERS; iLoop++)
		{
			unsigned long ulBytesWritten = DecodeOggVorbis(&m_aVorbisFile, m_pDecodeBuffer, m_nBufferSize, m_nChannels);
			if (ulBytesWritten)
			{
				alBufferData(m_nBuffers[iLoop], m_nFormat, m_pDecodeBuffer, ulBytesWritten, m_nFrequency);
				alSourceQueueBuffers(m_nSourceId, 1, &m_nBuffers[iLoop]);
			}
		}

		return true;
	}
Beispiel #5
0
void Sound::Stream::Init()
{
	buffers.resize(bufferCount);
	alGenBuffers(buffers.size(), &buffers[0]);

	uint8 data[8];
	stream.seekg(12, std::ios::cur);

	stream.read((char*)data, 8);
	if(data[0] != 'f' || data[1] != 'm' || data[2] != 't')
	{
		// Not wav format
		stream.close();
		return;
	}

	// reading time
	stream.read((char*)data, 2);
	if(data[0] != 1 || data[1] != 0)
	{
		// not PCM?
	}

	// channel count
	stream.read((char*)&channelCount, sizeof(channelCount));

	// sample frequency
	stream.read((char*)&frequency, sizeof(frequency));

	// skip block size and bytes/sec
	stream.seekg(6, std::ios::cur);

	// bit depth?
	int32 bits = 0;
	stream.read((char*)&bits, sizeof(bits));

	if(bits == 8)
	{
		if(channelCount == 1)
		{
			format = AL_FORMAT_MONO8;
		}
		else
		{
			format = AL_FORMAT_STEREO8;
		}
	}
	else if(bits == 16)
	{
		if(channelCount == 1)
		{
			format = AL_FORMAT_MONO16;
		}
		else
		{
			format = AL_FORMAT_STEREO16;
		}
	}
	if(format == 0)
	{
		// unsupported format?
	}

	int ret = 0;
//	temp = malloc(4096);

	for(uint i = 0; i < buffers.size(); ++i)
	{
		stream.read((char*)temp, 4096);
		alBufferData(buffers[i], format, temp, ret, frequency);
	}
	// check errors?

	// @todo Get source id here somehow
	alSourceQueueBuffers(source, buffers.size(), &buffers[0]);
	alSourcePlay(source);
	// check errors
}
FALSoundBuffer* FALSoundBuffer::CreateNativeBuffer( FALAudioDevice* AudioDevice, USoundWave* Wave)
{
	SCOPE_CYCLE_COUNTER( STAT_AudioResourceCreationTime );

	// This code is not relevant for now on HTML5 but adding this for consistency with other platforms.
	// Check to see if thread has finished decompressing on the other thread

	if (Wave->AudioDecompressor != NULL)
	{
		Wave->AudioDecompressor->EnsureCompletion();

		// Remove the decompressor
		delete Wave->AudioDecompressor;
		Wave->AudioDecompressor = NULL;
	}

	// Can't create a buffer without any source data
	if( Wave == NULL || Wave->NumChannels == 0 )
	{
		return( NULL );
	}
	FWaveModInfo WaveInfo;

	Wave->InitAudioResource(AudioDevice->GetRuntimeFormat(Wave));

	FALSoundBuffer* Buffer = NULL;

	// Find the existing buffer if any
	if( Wave->ResourceID )
	{
		Buffer = static_cast<FALSoundBuffer*>(AudioDevice->WaveBufferMap.FindRef( Wave->ResourceID ));
	}

	if( Buffer == NULL )
	{
		// Create new buffer.
		Buffer = new FALSoundBuffer( AudioDevice );

		alGenBuffers( 1, Buffer->BufferIds );

		AudioDevice->alError( TEXT( "RegisterSound" ) );

		AudioDevice->TrackResource(Wave, Buffer);

		Buffer->InternalFormat = AudioDevice->GetInternalFormat( Wave->NumChannels );
		Buffer->NumChannels = Wave->NumChannels;
		Buffer->SampleRate = Wave->SampleRate;

		if (Wave->RawPCMData)
		{
			// upload it
			Buffer->BufferSize = Wave->RawPCMDataSize;
			alBufferData( Buffer->BufferIds[0], Buffer->InternalFormat, Wave->RawPCMData, Wave->RawPCMDataSize, Buffer->SampleRate );

			// Free up the data if necessary
			if( Wave->bDynamicResource )
			{
				FMemory::Free( Wave->RawPCMData );
				Wave->RawPCMData = NULL;
				Wave->bDynamicResource = false;
			}
		}
		else
		{
			// get the raw data
			uint8* SoundData = ( uint8* )Wave->RawData.Lock( LOCK_READ_ONLY );
			// it's (possibly) a pointer to a wave file, so skip over the header
			int SoundDataSize = Wave->RawData.GetBulkDataSize();

			// is there a wave header?
			FWaveModInfo WaveInfo;
			if (WaveInfo.ReadWaveInfo(SoundData, SoundDataSize))
			{
				// if so, modify the location and size of the sound data based on header
				SoundData = WaveInfo.SampleDataStart;
				SoundDataSize = WaveInfo.SampleDataSize;
			}
			// let the Buffer know the final size
			Buffer->BufferSize = SoundDataSize;

			// upload it
			alBufferData( Buffer->BufferIds[0], Buffer->InternalFormat, SoundData, Buffer->BufferSize, Buffer->SampleRate );
			// unload it
			Wave->RawData.Unlock();
		}

		if( AudioDevice->alError( TEXT( "RegisterSound (buffer data)" ) ) || ( Buffer->BufferSize == 0 ) )
		{
			Buffer->InternalFormat = 0;
		}

		if( Buffer->InternalFormat == 0 )
		{
			UE_LOG ( LogAudio, Log,TEXT( "Audio: sound format not supported for '%s' (%d)" ), *Wave->GetName(), Wave->NumChannels );
			delete Buffer;
			Buffer = NULL;
		}
	}

	return Buffer;
}
SoundBuffer* loadOggFile(const std::string &filepath)
{
	int endian = 0; // 0 for Little-Endian, 1 for Big-Endian
	int bitStream;
	long bytes;
	char array[BUFFER_SIZE]; // Local fixed size array
	vorbis_info *pInfo;
	OggVorbis_File oggFile;
	
	// Do a dumb-ass static string copy for old versions of ov_fopen
	// because they expect a non-const char*
	char nonconst[10000];
	snprintf(nonconst, 10000, "%s", filepath.c_str());
	// Try opening the given file
	//if(ov_fopen(filepath.c_str(), &oggFile) != 0)
	if(ov_fopen(nonconst, &oggFile) != 0)
	{
		infostream<<"Audio: Error opening "<<filepath<<" for decoding"<<std::endl;
		return NULL;
	}

	SoundBuffer *snd = new SoundBuffer;

	// Get some information about the OGG file
	pInfo = ov_info(&oggFile, -1);

	// Check the number of channels... always use 16-bit samples
	if(pInfo->channels == 1)
		snd->format = AL_FORMAT_MONO16;
	else
		snd->format = AL_FORMAT_STEREO16;

	// The frequency of the sampling rate
	snd->freq = pInfo->rate;

	// Keep reading until all is read
	do
	{
		// Read up to a buffer's worth of decoded sound data
		bytes = ov_read(&oggFile, array, BUFFER_SIZE, endian, 2, 1, &bitStream);

		if(bytes < 0)
		{
			ov_clear(&oggFile);
			infostream<<"Audio: Error decoding "<<filepath<<std::endl;
			return NULL;
		}

		// Append to end of buffer
		snd->buffer.insert(snd->buffer.end(), array, array + bytes);
	} while (bytes > 0);

	alGenBuffers(1, &snd->buffer_id);
	alBufferData(snd->buffer_id, snd->format,
			&(snd->buffer[0]), snd->buffer.size(),
			snd->freq);

	ALenum error = alGetError();

	if(error != AL_NO_ERROR){
		infostream<<"Audio: OpenAL error: "<<alErrorString(error)
				<<"preparing sound buffer"<<std::endl;
	}

	infostream<<"Audio file "<<filepath<<" loaded"<<std::endl;

	// Clean up!
	ov_clear(&oggFile);

	return snd;
}
Beispiel #8
0
void OpenALStream::SoundLoop()
{
	Common::SetCurrentThreadName("Audio thread - openal");

	bool surround_capable = Core::g_CoreStartupParameter.bDPL2Decoder;
#if defined(__APPLE__)
	bool float32_capable = false;
	const ALenum AL_FORMAT_STEREO_FLOAT32 = 0;
	// OSX does not have the alext AL_FORMAT_51CHN32 yet.
	surround_capable = false;
	const ALenum AL_FORMAT_51CHN32 = 0;
#else
	bool float32_capable = true;
#endif

	u32 ulFrequency = m_mixer->GetSampleRate();
	numBuffers = Core::g_CoreStartupParameter.iLatency + 2; // OpenAL requires a minimum of two buffers

	memset(uiBuffers, 0, numBuffers * sizeof(ALuint));
	uiSource = 0;

	// Generate some AL Buffers for streaming
	alGenBuffers(numBuffers, (ALuint *)uiBuffers);
	// Generate a Source to playback the Buffers
	alGenSources(1, &uiSource);

	// Short Silence
	memset(sampleBuffer, 0, OAL_MAX_SAMPLES * numBuffers * FRAME_SURROUND_FLOAT);
	memset(realtimeBuffer, 0, OAL_MAX_SAMPLES * FRAME_STEREO_SHORT);
	for (int i = 0; i < numBuffers; i++)
	{
		if (surround_capable)
			alBufferData(uiBuffers[i], AL_FORMAT_51CHN32, sampleBuffer, 4 * FRAME_SURROUND_FLOAT, ulFrequency);
		else
			alBufferData(uiBuffers[i], AL_FORMAT_STEREO16, realtimeBuffer, 4 * FRAME_STEREO_SHORT, ulFrequency);
	}
	alSourceQueueBuffers(uiSource, numBuffers, uiBuffers);
	alSourcePlay(uiSource);

	// Set the default sound volume as saved in the config file.
	alSourcef(uiSource, AL_GAIN, fVolume);

	// TODO: Error handling
	//ALenum err = alGetError();

	ALint iBuffersFilled = 0;
	ALint iBuffersProcessed = 0;
	ALint iState = 0;
	ALuint uiBufferTemp[OAL_MAX_BUFFERS] = {0};

	soundTouch.setChannels(2);
	soundTouch.setSampleRate(ulFrequency);
	soundTouch.setTempo(1.0);
	soundTouch.setSetting(SETTING_USE_QUICKSEEK, 0);
	soundTouch.setSetting(SETTING_USE_AA_FILTER, 0);
	soundTouch.setSetting(SETTING_SEQUENCE_MS, 1);
	soundTouch.setSetting(SETTING_SEEKWINDOW_MS, 28);
	soundTouch.setSetting(SETTING_OVERLAP_MS, 12);

	while (!threadData)
	{
		// num_samples_to_render in this update - depends on SystemTimers::AUDIO_DMA_PERIOD.
		const u32 stereo_16_bit_size = 4;
		const u32 dma_length = 32;
		const u64 ais_samples_per_second = 48000 * stereo_16_bit_size;
		u64 audio_dma_period = SystemTimers::GetTicksPerSecond() / (AudioInterface::GetAIDSampleRate() * stereo_16_bit_size / dma_length);
		u64 num_samples_to_render = (audio_dma_period * ais_samples_per_second) / SystemTimers::GetTicksPerSecond();

		unsigned int numSamples = (unsigned int)num_samples_to_render;
		unsigned int minSamples = surround_capable ? 240 : 0; // DPL2 accepts 240 samples minimum (FWRDURATION)

		numSamples = (numSamples > OAL_MAX_SAMPLES) ? OAL_MAX_SAMPLES : numSamples;
		numSamples = m_mixer->Mix(realtimeBuffer, numSamples, false);

		// Convert the samples from short to float
		float dest[OAL_MAX_SAMPLES * STEREO_CHANNELS];
		for (u32 i = 0; i < numSamples * STEREO_CHANNELS; ++i)
			dest[i] = (float)realtimeBuffer[i] / (1 << 16);

		soundTouch.putSamples(dest, numSamples);

		if (iBuffersProcessed == iBuffersFilled)
		{
			alGetSourcei(uiSource, AL_BUFFERS_PROCESSED, &iBuffersProcessed);
			iBuffersFilled = 0;
		}

		if (iBuffersProcessed)
		{
			float rate = m_mixer->GetCurrentSpeed();
			if (rate <= 0)
			{
				Core::RequestRefreshInfo();
				rate = m_mixer->GetCurrentSpeed();
			}

			// Place a lower limit of 10% speed.  When a game boots up, there will be
			// many silence samples.  These do not need to be timestretched.
			if (rate > 0.10)
			{
				// Adjust SETTING_SEQUENCE_MS to balance between lag vs hollow audio
				soundTouch.setSetting(SETTING_SEQUENCE_MS, (int)(1 / (rate * rate)));
				soundTouch.setTempo(rate);
				if (rate > 10)
				{
					soundTouch.clear();
				}
			}

			unsigned int nSamples = soundTouch.receiveSamples(sampleBuffer, OAL_MAX_SAMPLES * numBuffers);

			if (nSamples <= minSamples)
				continue;

			// Remove the Buffer from the Queue.  (uiBuffer contains the Buffer ID for the unqueued Buffer)
			if (iBuffersFilled == 0)
			{
				alSourceUnqueueBuffers(uiSource, iBuffersProcessed, uiBufferTemp);
				ALenum err = alGetError();
				if (err != 0)
				{
					ERROR_LOG(AUDIO, "Error unqueuing buffers: %08x", err);
				}
			}

			if (surround_capable)
			{
				float dpl2[OAL_MAX_SAMPLES * OAL_MAX_BUFFERS * SURROUND_CHANNELS];
				dpl2decode(sampleBuffer, nSamples, dpl2);
				alBufferData(uiBufferTemp[iBuffersFilled], AL_FORMAT_51CHN32, dpl2, nSamples * FRAME_SURROUND_FLOAT, ulFrequency);
				ALenum err = alGetError();
				if (err == AL_INVALID_ENUM)
				{
					// 5.1 is not supported by the host, fallback to stereo
					WARN_LOG(AUDIO, "Unable to set 5.1 surround mode.  Updating OpenAL Soft might fix this issue.");
					surround_capable = false;
				}
				else if (err != 0)
				{
					ERROR_LOG(AUDIO, "Error occurred while buffering data: %08x", err);
				}
			}

			else
			{
				if (float32_capable)
				{
					alBufferData(uiBufferTemp[iBuffersFilled], AL_FORMAT_STEREO_FLOAT32, sampleBuffer, nSamples * FRAME_STEREO_FLOAT, ulFrequency);
					ALenum err = alGetError();
					if (err == AL_INVALID_ENUM)
					{
						float32_capable = false;
					}
					else if (err != 0)
					{
						ERROR_LOG(AUDIO, "Error occurred while buffering float32 data: %08x", err);
					}
				}

				else
				{
					// Convert the samples from float to short
					short stereo[OAL_MAX_SAMPLES * STEREO_CHANNELS * OAL_MAX_BUFFERS];
					for (u32 i = 0; i < nSamples * STEREO_CHANNELS; ++i)
						stereo[i] = (short)((float)sampleBuffer[i] * (1 << 16));

					alBufferData(uiBufferTemp[iBuffersFilled], AL_FORMAT_STEREO16, stereo, nSamples * FRAME_STEREO_SHORT, ulFrequency);
				}
			}

			alSourceQueueBuffers(uiSource, 1, &uiBufferTemp[iBuffersFilled]);
			ALenum err = alGetError();
			if (err != 0)
			{
				ERROR_LOG(AUDIO, "Error queuing buffers: %08x", err);
			}
			iBuffersFilled++;

			if (iBuffersFilled == numBuffers)
			{
				alSourcePlay(uiSource);
				err = alGetError();
				if (err != 0)
				{
					ERROR_LOG(AUDIO, "Error occurred during playback: %08x", err);
				}
			}

			alGetSourcei(uiSource, AL_SOURCE_STATE, &iState);
			if (iState != AL_PLAYING)
			{
				// Buffer underrun occurred, resume playback
				alSourcePlay(uiSource);
				err = alGetError();
				if (err != 0)
				{
					ERROR_LOG(AUDIO, "Error occurred resuming playback: %08x", err);
				}
			}
		}
		else
		{
			soundSyncEvent.Wait();
		}
	}
}
Beispiel #9
0
int main(int argc, char *argv[])
{
	ALuint buffer[kNumBuffers], source[kNumSources];
	ALuint freq;
	ALenum format;
	ALvoid *data;
	ALsizei i, size;


	/* listener parameters */
	ALfloat listenerOrientation[] = { 0.0f, 0.0f, 1.0f,  0.0f, 1.0f, 0.0f };
	ALfloat listenerPosition[] = { 0.0f, 0.0f, 0.0f };
	ALfloat listenerVelocity[] = { 0.0f, 0.0f, 0.0f };

	/* source parameters */
	ALfloat sourcePosition[] = { 0.0f, 0.0f, 1.0f };
	ALfloat sourceVelocity[] = { 0.0f, 0.0f, 0.0f };
	ALfloat sourcePitch = 1.0f;
	ALfloat sourceGain = 1.0f;



	/* initialize */
	alutInit(&argc, argv);

	/* create buffers and sources */
	if (alGenBuffers(kNumBuffers, buffer) != kNumBuffers)
		quit("Can't create buffers");

	if (alGenSources(kNumSources, source) != kNumSources)
		quit("Can't create sources");

	/* load buffers with data */
	alutLoadWAV(kWaveFileName, &format, &data, &size, &freq);
	for (i = 0; i < kNumBuffers; i++) {
		alBufferData(buffer[i], format, data, size, freq);
	}
	free(data);


	/* initialize listener */
	alListenerfv(AL_POSITION, listenerPosition);
	alListenerfv(AL_VELOCITY, listenerVelocity);
	alListenerfv(AL_ORIENTATION, listenerOrientation);

	/* initialize sources */
	for (i = 0; i < kNumSources; i++) {
		alSourcefv(source[i], AL_POSITION, sourcePosition);
		alSourcefv(source[i], AL_VELOCITY, sourceVelocity);

		alSourcef(source[i], AL_PITCH, sourcePitch);
		alSourcef(source[i], AL_GAIN, sourceGain);

		alSourcei(source[i], AL_BUFFER, buffer[i % kNumBuffers]);
		alSourcei(source[i], AL_LOOPING, AL_TRUE);
	}


	/* test pitch range [0.5, 2.0] */
	printf("Test range\n");
	for (sourcePitch = -1.0f; sourcePitch < 3.0f; sourcePitch += 0.1f) {
		for (i = 0; i < kNumSources; i++) {
			alSourcef(source[i], AL_PITCH, sourcePitch);
			if (alGetError() != AL_NO_ERROR) {
				if (sourcePitch >= 0.5f && sourcePitch <= 2.0f)
					quit("Pitch change failed");
			}
			else {
				if (!(sourcePitch >= 0.5f && sourcePitch <= 2.0f))
					quit("Pitch out of range accepted");
			}
		}
	}

	/* start the sources, pitch shift up, down and stop all */
	printf("Play\n");
	for (i = 0; i < kNumSources; i++) {
		alSourcePlay(source[i]);
	}

	printf("Pitch Up ");
	for (sourcePitch = 0.5f; sourcePitch < 2.0f; sourcePitch += 0.005f) {

		for (i = 0; i < kNumSources; i++)
			alSourcef(source[i], AL_PITCH, sourcePitch);

		printf(".");
		fflush(stdout);

		delay(20);
	}
	delay(2000);
	printf("\n");

	printf("Pitch Down ");
	for (sourcePitch = 2.0f; sourcePitch > 0.5f; sourcePitch -= 0.005f) {

		for (i = 0; i < kNumSources; i++)
			alSourcef(source[i], AL_PITCH, sourcePitch);

		printf(".");
		fflush(stdout);

		delay(20);
	}
	delay(2000);
	printf("\n");


	printf("Stop\n");
	for (i = 0; i < kNumSources; i++)
		alSourceStop(source[i]);


	/* delete buffers and sources */
	alDeleteSources(kNumSources, source);
	alDeleteBuffers(kNumBuffers, buffer);

	/* shutdown */
	alutExit();

	return 0;
}
Beispiel #10
0
bool OpenALDevice::OpenALHandle::seek(float position)
{
	if(!m_status)
		return false;

	std::lock_guard<ILockable> lock(*m_device);

	if(!m_status)
		return false;

	if(m_isBuffered)
		alSourcef(m_source, AL_SEC_OFFSET, position);
	else
	{
		m_reader->seek((int)(position * m_reader->getSpecs().rate));
		m_eos = false;

		ALint info;

		alGetSourcei(m_source, AL_SOURCE_STATE, &info);

		// we need to stop playing sounds as well to clear the buffers
		// this might cause clicks, but fixes a bug regarding position determination
		if(info == AL_PAUSED || info == AL_PLAYING)
			alSourceStop(m_source);

		alSourcei(m_source, AL_BUFFER, 0);

		ALenum err;
		if((err = alGetError()) == AL_NO_ERROR)
		{
			int length;
			DeviceSpecs specs = m_device->m_specs;
			specs.specs = m_reader->getSpecs();
			m_device->m_buffer.assureSize(m_device->m_buffersize * AUD_DEVICE_SAMPLE_SIZE(specs));

			for(m_current = 0; m_current < CYCLE_BUFFERS; m_current++)
			{
				length = m_device->m_buffersize;

				m_reader->read(length, m_eos, m_device->m_buffer.getBuffer());

				if(length == 0)
					break;

				alBufferData(m_buffers[m_current], m_format, m_device->m_buffer.getBuffer(), length * AUD_DEVICE_SAMPLE_SIZE(specs), specs.rate);

				if(alGetError() != AL_NO_ERROR)
					break;
			}

			if(m_loopcount != 0)
				m_eos = false;

			alSourceQueueBuffers(m_source, m_current, m_buffers);
		}

		alSourceRewind(m_source);
	}

	if(m_status == STATUS_STOPPED)
		m_status = STATUS_PAUSED;

	return true;
}
Beispiel #11
0
void OpenALDevice::updateStreams()
{
	int length;

	ALint info;
	DeviceSpecs specs = m_specs;
	ALCenum cerr;
	std::list<std::shared_ptr<OpenALHandle> > stopSounds;
	std::list<std::shared_ptr<OpenALHandle> > pauseSounds;

	auto sleepDuration = std::chrono::milliseconds(20);

	for(;;)
	{
		lock();

		alcSuspendContext(m_context);
		cerr = alcGetError(m_device);
		if(cerr == ALC_NO_ERROR)
		{
			// for all sounds
			for(auto& sound : m_playingSounds)
			{
				// is it a streamed sound?
				if(!sound->m_isBuffered)
				{
					// check for buffer refilling
					alGetSourcei(sound->m_source, AL_BUFFERS_PROCESSED, &info);

					info += (OpenALHandle::CYCLE_BUFFERS - sound->m_current);

					if(info)
					{
						specs.specs = sound->m_reader->getSpecs();
						m_buffer.assureSize(m_buffersize * AUD_DEVICE_SAMPLE_SIZE(specs));

						// for all empty buffers
						while(info--)
						{
							// if there's still data to play back
							if(!sound->m_eos)
							{
								// read data
								length = m_buffersize;

								try
								{
									sound->m_reader->read(length, sound->m_eos, m_buffer.getBuffer());

									// looping necessary?
									if(length == 0 && sound->m_loopcount)
									{
										if(sound->m_loopcount > 0)
											sound->m_loopcount--;

										sound->m_reader->seek(0);

										length = m_buffersize;
										sound->m_reader->read(length, sound->m_eos, m_buffer.getBuffer());
									}
								}
								catch(Exception& e)
								{
									length = 0;
									std::cerr << "Caught exception while reading sound data during playback with OpenAL: " << e.getMessage() << std::endl;
								}

								if(sound->m_loopcount != 0)
									sound->m_eos = false;

								// read nothing?
								if(length == 0)
								{
									break;
								}

								ALuint buffer;

								if(sound->m_current < OpenALHandle::CYCLE_BUFFERS)
									buffer = sound->m_buffers[sound->m_current++];
								else
									alSourceUnqueueBuffers(sound->m_source, 1, &buffer);

								ALenum err;
								if((err = alGetError()) != AL_NO_ERROR)
								{
									sound->m_eos = true;
									break;
								}

								// fill with new data
								alBufferData(buffer, sound->m_format, m_buffer.getBuffer(), length * AUD_DEVICE_SAMPLE_SIZE(specs), specs.rate);

								if((err = alGetError()) != AL_NO_ERROR)
								{
									sound->m_eos = true;
									break;
								}

								// and queue again
								alSourceQueueBuffers(sound->m_source, 1,&buffer);
								if(alGetError() != AL_NO_ERROR)
								{
									sound->m_eos = true;
									break;
								}
							}
							else
								break;
						}
					}
				}

				// check if the sound has been stopped
				alGetSourcei(sound->m_source, AL_SOURCE_STATE, &info);

				if(info != AL_PLAYING)
				{
					// if it really stopped
					if(sound->m_eos && info != AL_INITIAL)
					{
						if(sound->m_stop)
							sound->m_stop(sound->m_stop_data);

						// pause or
						if(sound->m_keep)
							pauseSounds.push_back(sound);
						// stop
						else
							stopSounds.push_back(sound);
					}
					// continue playing
					else
						alSourcePlay(sound->m_source);
				}
			}

			for(auto& sound : pauseSounds)
				sound->pause(true);

			for(auto& sound : stopSounds)
				sound->stop();

			pauseSounds.clear();
			stopSounds.clear();

			alcProcessContext(m_context);
		}

		// stop thread
		if(m_playingSounds.empty() || (cerr != ALC_NO_ERROR))
		{
			m_playing = false;
			unlock();

			return;
		}

		unlock();

		std::this_thread::sleep_for(sleepDuration);
	}
}
void WaveFile::read(ALuint bufferID)
{
	int bytesRead = static_cast<int>(sf_read_int(mSoundFile, mBuffer.get(), mBUFFER_SIZE));
	alBufferData(bufferID, mFormat, mBuffer.get(), bytesRead, mBUFFER_SIZE);	
}
Beispiel #13
0
void StreamingFile::fillNextBuffer(void* data, size_t len) {
  if(!data) return; // TODO: mark this audio as completed so it can be cleaned up
  alBufferData(m_albuffers[m_next_buffer], m_format, data, len, m_frequency);
  m_next_buffer ^= 1; // toggle between 0 and 1
}
Beispiel #14
0
bool OpenALAudio::StreamContext::stream_data(int new_buffer_count)
{
   const size_t BUFFER_SIZE = 0x4000;

   /*
    * This constant determines how many milliseconds of audio data go into
    * one buffer.  The larger it is, the less probability of skipping, but
    * the longer the delay from calling stop() to the point when it
    * actually stops.
    */
   const size_t MAX_BUFFER_TIME_MS = 50;

   uint8_t data[BUFFER_SIZE];
   size_t frames_read;
   size_t max_frames;
   ALenum format;
   ALuint buf;
   ALint state;

   if (!this->streaming)
      return false;

   format = openal_format(this->stream);
   max_frames = this->stream->frame_rate() * MAX_BUFFER_TIME_MS / 1000;

   for (;;)
   {
      buf = 0;

      if (new_buffer_count > 0)
      {
	 alGenBuffers(1, &buf);
	 if (!check_al())
	    goto err;

	 new_buffer_count--;
      }
      else
      {
	 ALint processed;

	 alGetSourcei(this->source, AL_BUFFERS_PROCESSED, &processed);

	 if (processed == 0)
	    break;

	 alSourceUnqueueBuffers(this->source, 1, &buf);
	 if (!check_al())
	    goto err;
      }

      size_t space_frames = sizeof(data) / this->stream->frame_size();
      space_frames = MIN(space_frames, max_frames);
      frames_read = this->stream->read(data, space_frames);

      if (frames_read == 0)
      {
	 if (this->looping)
	 {
	    this->stream->seek(this->loop_start_frame);
	    frames_read = this->stream->read(data, space_frames);
	    if (frames_read == 0)
	    {
	       this->streaming = false;
	       break;
	    }
	 }
	 else
	 {
	    this->streaming = false;
	    break;
	 }
      }

      if (this->fade_frames != 0)
	 this->apply_fading(data, frames_read);

      alBufferData(buf, format, data,
		   frames_read * this->stream->frame_size(),
		   this->stream->frame_rate());
      if (!check_al())
	 goto err;

      alSourceQueueBuffers(this->source, 1, &buf);
      if (!check_al())
	 goto err;
   }

   if (!this->streaming)
   {
      alDeleteBuffers(1, &buf);
      check_al();
   }

   alGetSourcei(this->source, AL_SOURCE_STATE, &state);

   if (state != AL_PLAYING)
   {
      alSourcePlay(this->source);
      check_al();
   }

   return this->streaming;

err:
   if (buf != 0)
      alDeleteBuffers(1, &buf);

   this->streaming = false;
   return false;
}
Beispiel #15
0
/* LoadBuffer loads the named audio file into an OpenAL buffer object, and
 * returns the new buffer ID. */
static ALuint LoadSound(const char *filename)
{
    ALenum err, format, type, channels;
    ALuint rate, buffer;
    size_t datalen;
    void *data;
    FilePtr sound;

    /* Open the audio file */
    sound = openAudioFile(filename, 1000);
    if(!sound)
    {
        fprintf(stderr, "Could not open audio in %s\n", filename);
        closeAudioFile(sound);
        return 0;
    }

    /* Get the sound format, and figure out the OpenAL format */
    if(getAudioInfo(sound, &rate, &channels, &type) != 0)
    {
        fprintf(stderr, "Error getting audio info for %s\n", filename);
        closeAudioFile(sound);
        return 0;
    }

    format = GetFormat(channels, type, NULL);
    if(format == AL_NONE)
    {
        fprintf(stderr, "Unsupported format (%s, %s) for %s\n",
                ChannelsName(channels), TypeName(type), filename);
        closeAudioFile(sound);
        return 0;
    }

    /* Decode the whole audio stream to a buffer. */
    data = decodeAudioStream(sound, &datalen);
    if(!data)
    {
        fprintf(stderr, "Failed to read audio from %s\n", filename);
        closeAudioFile(sound);
        return 0;
    }

    /* Buffer the audio data into a new buffer object, then free the data and
     * close the file. */
    buffer = 0;
    alGenBuffers(1, &buffer);
    alBufferData(buffer, format, data, datalen, rate);
    free(data);
    closeAudioFile(sound);

    /* Check if an error occured, and clean up if so. */
    err = alGetError();
    if(err != AL_NO_ERROR)
    {
        fprintf(stderr, "OpenAL Error: %s\n", alGetString(err));
        if(buffer && alIsBuffer(buffer))
            alDeleteBuffers(1, &buffer);
        return 0;
    }

    return buffer;
}
Beispiel #16
0
void VoiceMessagesLoader::onLoad(AudioData *audio) {
	bool started = false;
	int32 audioindex = -1;
	Loader *l = 0;
	Loaders::iterator j = _loaders.end();
	{
		QMutexLocker lock(&voicemsgsMutex);
		VoiceMessages *voice = audioVoice();
		if (!voice) return;

		for (int32 i = 0; i < AudioVoiceMsgSimultaneously; ++i) {
			VoiceMessages::Msg &m(voice->_data[i]);
			if (m.audio != audio || !m.loading) continue;

			audioindex = i;
			j = _loaders.find(audio);
			if (j != _loaders.end() && (j.value()->fname != m.fname || j.value()->data.size() != m.data.size())) {
				delete j.value();
				_loaders.erase(j);
				j = _loaders.end();
			}
			if (j == _loaders.end()) {
				l = (j = _loaders.insert(audio, new Loader())).value();
				l->fname = m.fname;
				l->data = m.data;
				
				int ret;
				if (m.data.isEmpty()) {
					l->file = op_open_file(m.fname.toUtf8().constData(), &ret);
				} else {
					l->file = op_open_memory((const unsigned char*)m.data.constData(), m.data.size(), &ret);
				}
				if (!l->file) {
					LOG(("Audio Error: op_open_file failed for '%1', data size '%2', error code %3").arg(m.fname).arg(m.data.size()).arg(ret));
					m.state = VoiceMessageStopped;
					return loadError(j);
				}
				ogg_int64_t duration = op_pcm_total(l->file, -1);
				if (duration < 0) {
					LOG(("Audio Error: op_pcm_total failed to get full duration for '%1', data size '%2', error code %3").arg(m.fname).arg(m.data.size()).arg(duration));
					m.state = VoiceMessageStopped;
					return loadError(j);
				}
				m.duration = duration;
				m.skipStart = 0;
				m.skipEnd = duration;
				m.position = 0;
				m.started = 0;
				started = true;
			} else {
				if (!m.skipEnd) continue;
				l = j.value();
			}
			break;
		}
	}

	if (j == _loaders.end()) {
		LOG(("Audio Error: trying to load part of audio, that is not playing at the moment"));
		emit error(audio);
		return;
	}
	if (started) {
		l->pcm_offset = op_pcm_tell(l->file);
		l->pcm_print_offset = l->pcm_offset - AudioVoiceMsgFrequency;
	}

	bool finished = false;
    DEBUG_LOG(("Audio Info: reading buffer for file '%1', data size '%2', current pcm_offset %3").arg(l->fname).arg(l->data.size()).arg(l->pcm_offset));

	QByteArray result;
	int64 samplesAdded = 0;
	while (result.size() < AudioVoiceMsgBufferSize) {
		opus_int16 pcm[AudioVoiceMsgFrequency * AudioVoiceMsgChannels];

		int ret = op_read_stereo(l->file, pcm, sizeof(pcm) / sizeof(*pcm));
		if (ret < 0) {
			{
				QMutexLocker lock(&voicemsgsMutex);
				VoiceMessages *voice = audioVoice();
				if (voice) {
					VoiceMessages::Msg &m(voice->_data[audioindex]);
					if (m.audio == audio) {
						m.state = VoiceMessageStopped;
					}
				}
			}
			LOG(("Audio Error: op_read_stereo failed, error code %1").arg(ret));
			return loadError(j);
		}

		int li = op_current_link(l->file);
		if (li != l->prev_li) {
			const OpusHead *head = op_head(l->file, li);
			const OpusTags *tags = op_tags(l->file, li);
			for (int32 ci = 0; ci < tags->comments; ++ci) {
				const char *comment = tags->user_comments[ci];
				if (opus_tagncompare("METADATA_BLOCK_PICTURE", 22, comment) == 0) {
					OpusPictureTag pic;
					int err = opus_picture_tag_parse(&pic, comment);
					if (err >= 0) {
						opus_picture_tag_clear(&pic);
					}
				}
			}
			if (!op_seekable(l->file)) {
				l->pcm_offset = op_pcm_tell(l->file) - ret;
			}
		}
		if (li != l->prev_li || l->pcm_offset >= l->pcm_print_offset + AudioVoiceMsgFrequency) {
			l->pcm_print_offset = l->pcm_offset;
		}
		l->pcm_offset = op_pcm_tell(l->file);

		if (!ret) {
			DEBUG_LOG(("Audio Info: read completed"));
			finished = true;
			break;
		}
		result.append((const char*)pcm, sizeof(*pcm) * ret * AudioVoiceMsgChannels);
		l->prev_li = li;
		samplesAdded += ret;

		{
			QMutexLocker lock(&voicemsgsMutex);
			VoiceMessages *voice = audioVoice();
			if (!voice) return;

			VoiceMessages::Msg &m(voice->_data[audioindex]);
			if (m.audio != audio || !m.loading || m.fname != l->fname || m.data.size() != l->data.size()) {
				LOG(("Audio Error: playing changed while loading"));
				m.state = VoiceMessageStopped;
				return loadError(j);
			}
		}
	}

	QMutexLocker lock(&voicemsgsMutex);
	VoiceMessages *voice = audioVoice();
	if (!voice) return;

	VoiceMessages::Msg &m(voice->_data[audioindex]);
	if (m.audio != audio || !m.loading || m.fname != l->fname || m.data.size() != l->data.size()) {
		LOG(("Audio Error: playing changed while loading"));
		m.state = VoiceMessageStopped;
		return loadError(j);
	}

	if (started) {
		if (m.source) {
			alSourceStop(m.source);
			for (int32 i = 0; i < 3; ++i) {
				if (m.samplesCount[i]) {
					alSourceUnqueueBuffers(m.source, 1, m.buffers + i);
					m.samplesCount[i] = 0;
				}
			}
			m.nextBuffer = 0;
		}
	}
	if (samplesAdded) {
		if (!m.source) {
			alGenSources(1, &m.source);
			alSourcef(m.source, AL_PITCH, 1.f);
			alSourcef(m.source, AL_GAIN, 1.f);
			alSource3f(m.source, AL_POSITION, 0, 0, 0);
			alSource3f(m.source, AL_VELOCITY, 0, 0, 0);
			alSourcei(m.source, AL_LOOPING, 0);
		}
		if (!m.buffers[m.nextBuffer]) alGenBuffers(3, m.buffers);
		if (!_checkALError()) {
			m.state = VoiceMessageStopped;
			return loadError(j);
		}

		if (m.samplesCount[m.nextBuffer]) {
			alSourceUnqueueBuffers(m.source, 1, m.buffers + m.nextBuffer);
			m.skipStart += m.samplesCount[m.nextBuffer];
		}

		m.samplesCount[m.nextBuffer] = samplesAdded;
		alBufferData(m.buffers[m.nextBuffer], AL_FORMAT_STEREO16, result.constData(), result.size(), AudioVoiceMsgFrequency);
		alSourceQueueBuffers(m.source, 1, m.buffers + m.nextBuffer);
		m.skipEnd -= samplesAdded;

		m.nextBuffer = (m.nextBuffer + 1) % 3;

		if (!_checkALError()) {
			m.state = VoiceMessageStopped;
			return loadError(j);
		}
	} else {
		finished = true;
	}
	if (finished) {
		m.skipEnd = 0;
		m.duration = m.skipStart + m.samplesCount[0] + m.samplesCount[1] + m.samplesCount[2];
	}
	m.loading = false;
	if (m.state == VoiceMessageResuming || m.state == VoiceMessagePlaying || m.state == VoiceMessageStarting) {
		ALint state = AL_INITIAL;
		alGetSourcei(m.source, AL_SOURCE_STATE, &state);
		if (_checkALError()) {
			if (state != AL_PLAYING) {
				alSourcePlay(m.source);
				emit needToCheck();
			}
		}
	}
}
/** Plays the audio data from the given file
 *  \param fileHandle,volume,onFinished,user_data see sound_PlayStream()
 *  \param streamBufferSize the size to use for the decoded audio buffers
 *  \param buffer_count the amount of audio buffers to use
 *  \see sound_PlayStream() for details about the rest of the function
 *       parameters and other details.
 */
AUDIO_STREAM *sound_PlayStreamWithBuf(PHYSFS_file *fileHandle, float volume, void (*onFinished)(const void *), const void *user_data, size_t streamBufferSize, unsigned int buffer_count)
{
	AUDIO_STREAM *stream;
	ALuint       *buffers = (ALuint *)alloca(sizeof(ALuint) * buffer_count);
	ALint error;
	unsigned int i;

	if (!openal_initialized)
	{
		debug(LOG_WARNING, "OpenAL isn't initialized, not creating an audio stream");
		return nullptr;
	}

	stream = (AUDIO_STREAM *)malloc(sizeof(AUDIO_STREAM));
	if (stream == nullptr)
	{
		debug(LOG_FATAL, "sound_PlayStream: Out of memory");
		abort();
		return nullptr;
	}

	// Clear error codes
	alGetError();

	// Retrieve an OpenAL sound source
	alGenSources(1, &(stream->source));

	error = sound_GetError();
	if (error != AL_NO_ERROR)
	{
		// Failed to create OpenAL sound source, so bail out...
		debug(LOG_SOUND, "alGenSources failed, most likely out of sound sources");
		free(stream);
		return nullptr;
	}

	stream->fileHandle = fileHandle;

	stream->decoder = sound_CreateOggVorbisDecoder(stream->fileHandle, false);
	if (stream->decoder == nullptr)
	{
		debug(LOG_ERROR, "sound_PlayStream: Failed to open audio file for decoding");
		free(stream);
		return nullptr;
	}

	stream->volume = volume;
	stream->bufferSize = streamBufferSize;

	alSourcef(stream->source, AL_GAIN, stream->volume);

	// HACK: this is a workaround for a bug in the 64bit implementation of OpenAL on GNU/Linux
	// The AL_PITCH value really should be 1.0.
	alSourcef(stream->source, AL_PITCH, 1.001f);

	// Create some OpenAL buffers to store the decoded data in
	alGenBuffers(buffer_count, buffers);
	sound_GetError();

	// Fill some buffers with audio data
	for (i = 0; i < buffer_count; ++i)
	{
		// Decode some audio data
		soundDataBuffer *soundBuffer = sound_DecodeOggVorbis(stream->decoder, stream->bufferSize);

		// If we actually decoded some data
		if (soundBuffer && soundBuffer->size > 0)
		{
			// Determine PCM data format
			ALenum format = (soundBuffer->channelCount == 1) ? AL_FORMAT_MONO16 : AL_FORMAT_STEREO16;

			// Copy the audio data into one of OpenAL's own buffers
			alBufferData(buffers[i], format, soundBuffer->data, soundBuffer->size, soundBuffer->frequency);
			sound_GetError();

			// Clean up our memory
			free(soundBuffer);
		}
		else
		{
			// If no data has been decoded we're probably at the end of our
			// stream. So cleanup the excess stuff here.

			// First remove the data buffer itself
			free(soundBuffer);

			// Then remove OpenAL's buffers
			alDeleteBuffers(buffer_count - i, &buffers[i]);
			sound_GetError();

			break;
		}
	}

	// Bail out if we didn't fill any buffers
	if (i == 0)
	{
		debug(LOG_ERROR, "Failed to fill buffers with decoded audio data!");

		// Destroy the decoder
		sound_DestroyOggVorbisDecoder(stream->decoder);

		// Destroy the OpenAL source
		alDeleteSources(1, &stream->source);

		// Free allocated memory
		free(stream);

		return nullptr;
	}

	// Attach the OpenAL buffers to our OpenAL source
	// (i = the amount of buffers we worked on in the above for-loop)
	alSourceQueueBuffers(stream->source, i, buffers);
	sound_GetError();

	// Start playing the source
	alSourcePlay(stream->source);

	sound_GetError();

	// Set callback info
	stream->onFinished = onFinished;
	stream->user_data = user_data;

	// Prepend this stream to the linked list
	stream->next = active_streams;
	active_streams = stream;

	return stream;
}
Beispiel #18
0
void audioInit() {
	if (audioDevice) return;

	audioDevice = alcOpenDevice(NULL);
	if (!audioDevice) {
		LOG(("Audio Error: default sound device not present."));
		return;
	}
	
	ALCint attributes[] = { ALC_STEREO_SOURCES, 8, 0 };
	audioContext = alcCreateContext(audioDevice, attributes);
	alcMakeContextCurrent(audioContext);
	if (!_checkALCError()) return audioFinish();

	ALfloat v[] = { 0.f, 0.f, -1.f, 0.f, 1.f, 0.f };
	alListener3f(AL_POSITION, 0.f, 0.f, 0.f);
	alListener3f(AL_VELOCITY, 0.f, 0.f, 0.f);
	alListenerfv(AL_ORIENTATION, v);

	alDistanceModel(AL_NONE);

	alGenSources(1, &notifySource);
	alSourcef(notifySource, AL_PITCH, 1.f);
	alSourcef(notifySource, AL_GAIN, 1.f);
	alSource3f(notifySource, AL_POSITION, 0, 0, 0);
	alSource3f(notifySource, AL_VELOCITY, 0, 0, 0);
	alSourcei(notifySource, AL_LOOPING, 0);

	alGenBuffers(1, &notifyBuffer);
	if (!_checkALError()) return audioFinish();

	QFile notify(st::newMsgSound);
	if (!notify.open(QIODevice::ReadOnly)) return audioFinish();

	QByteArray blob = notify.readAll();
	const char *data = blob.constData();
	if (blob.size() < 44) return audioFinish();

	if (*((const uint32*)(data + 0)) != 0x46464952) return audioFinish(); // ChunkID - "RIFF"
	if (*((const uint32*)(data + 4)) != uint32(blob.size() - 8)) return audioFinish(); // ChunkSize
	if (*((const uint32*)(data + 8)) != 0x45564157) return audioFinish(); // Format - "WAVE"
	if (*((const uint32*)(data + 12)) != 0x20746d66) return audioFinish(); // Subchunk1ID - "fmt "
	uint32 subchunk1Size = *((const uint32*)(data + 16)), extra = subchunk1Size - 16;
	if (subchunk1Size < 16 || (extra && extra < 2)) return audioFinish();
	if (*((const uint16*)(data + 20)) != 1) return audioFinish(); // AudioFormat - PCM (1)

	uint16 numChannels = *((const uint16*)(data + 22));
	if (numChannels != 1 && numChannels != 2) return audioFinish();

	uint32 sampleRate = *((const uint32*)(data + 24));
	uint32 byteRate = *((const uint32*)(data + 28));

	uint16 blockAlign = *((const uint16*)(data + 32));
	uint16 bitsPerSample = *((const uint16*)(data + 34));
	if (bitsPerSample % 8) return audioFinish();
	uint16 bytesPerSample = bitsPerSample / 8;
	if (bytesPerSample != 1 && bytesPerSample != 2) return audioFinish();

	if (blockAlign != numChannels * bytesPerSample) return audioFinish();
	if (byteRate != sampleRate * blockAlign) return audioFinish();

	if (extra) {
		uint16 extraSize = *((const uint16*)(data + 36));
        if (uint32(extraSize + 2) != extra) return audioFinish();
		if (uint32(blob.size()) < 44 + extra) return audioFinish();
	}

	if (*((const uint32*)(data + extra + 36)) != 0x61746164) return audioFinish(); // Subchunk2ID - "data"
	uint32 subchunk2Size = *((const uint32*)(data + extra + 40));
	if (subchunk2Size % (numChannels * bytesPerSample)) return audioFinish();
	uint32 numSamples = subchunk2Size / (numChannels * bytesPerSample);

	if (uint32(blob.size()) < 44 + extra + subchunk2Size) return audioFinish();
	data += 44 + extra;

	ALenum format = 0;
	switch (bytesPerSample) {
	case 1:
		switch (numChannels) {
		case 1: format = AL_FORMAT_MONO8; break;
		case 2: format = AL_FORMAT_STEREO8; break;
		}
	break;

	case 2:
		switch (numChannels) {
		case 1: format = AL_FORMAT_MONO16; break;
		case 2: format = AL_FORMAT_STEREO16; break;
		}
	break;
	}
	if (!format) return audioFinish();

	alBufferData(notifyBuffer, format, data, subchunk2Size, sampleRate);
	alSourcei(notifySource, AL_BUFFER, notifyBuffer);
	if (!_checkALError()) return audioFinish();

	voicemsgs = new VoiceMessages();
}
void ChannelStream::LockNumberedBuffer( unsigned int size, unsigned int buff )
{
    alBufferData(buffers[buff], format, obtainedBuffer, size, samplerate);
    Check("locknumberbuff");
}
void AudioPlayer::tick_p()
{
	pthread_mutex_lock(&mutex_);

	if (playing_ == false)
	{
		pthread_mutex_unlock(&mutex_);
		return;
	}

	ALuint buffer;

	if (emptyqueue_)
	{
		ALint state;
		alGetSourcei(source_, AL_SOURCE_STATE, &state);

		if (state == AL_PLAYING)
		{
			pthread_mutex_unlock(&mutex_);
			return;
		}

		finished_ = true;

		playing_ = false;
		freebuffers();

		pthread_mutex_unlock(&mutex_);
		return;
	}

	ALint queued;
	alGetSourcei(source_, AL_BUFFERS_QUEUED, &queued);
	if (queued < NUM_BUFFERS)
	{
		alGenBuffers(1, &buffer);
//		printf("alGenBuffers: %d\n", buffer);
	}
	else
	{
		ALint processed;
		alGetSourcei(source_, AL_BUFFERS_PROCESSED, &processed);

		if (processed == 0)
		{
			pthread_mutex_unlock(&mutex_);
			return;
		}

		alSourceUnqueueBuffers(source_, 1, &buffer);
		//		printf("alSourceUnqueueBuffers\n");
		positions_.pop_front();
	}

	double pos = streamer_->tell();
//	printf("pos: %g\n", pos);
	size_t done = streamer_->read(buffer_, BUFFER_SIZE);
//	printf("done: %d\n", done);
	if (done != 0)
	{
		alBufferData(buffer, (audiosource_->channelCount() == 1) ? AL_FORMAT_MONO16 : AL_FORMAT_STEREO16, buffer_, done, audiosource_->rate());
		alSourceQueueBuffers(source_, 1, &buffer);
		//		printf("alSourceQueueBuffers\n");
		positions_.push_back(pos);

		ALint state;
		alGetSourcei(source_, AL_SOURCE_STATE, &state);
		if (state == AL_STOPPED)
			alSourcePlay(source_);
	}
	else
	{
		alDeleteBuffers(1, &buffer);
//		printf("alDeleteBuffers: %d\n", buffer);
		emptyqueue_ = true;
	}

	pthread_mutex_unlock(&mutex_);
	return;
}
Beispiel #21
0
SoundID Audio::addSound(const char *fileName, unsigned int flags){
	Sound sound;

	// Clear error flag
	alGetError();

	const char *ext = strrchr(fileName, '.') + 1;
	char str[256];
	if (stricmp(ext, "ogg") == 0){
		FILE *file = fopen(fileName, "rb");
		if (file == NULL){
			sprintf(str, "Couldn't open \"%s\"", fileName);
			ErrorMsg(str);
			return SOUND_NONE;
		}

		OggVorbis_File vf;
		memset(&vf, 0, sizeof(vf));
		if (ov_open(file, &vf, NULL, 0) < 0){
			fclose(file);
			sprintf(str, "\"%s\" is not an ogg file", fileName);
			ErrorMsg(str);
			return SOUND_NONE;
		}

		vorbis_info *vi = ov_info(&vf, -1);

		int nSamples = (uint) ov_pcm_total(&vf, -1);
		int nChannels = vi->channels;
		sound.format = nChannels == 1? AL_FORMAT_MONO16 : AL_FORMAT_STEREO16;
		sound.sampleRate = vi->rate;

		sound.size = nSamples * nChannels;

		sound.samples = new short[sound.size];
		sound.size *= sizeof(short);

		int samplePos = 0;
		while (samplePos < sound.size){
			char *dest = ((char *) sound.samples) + samplePos;

			int bitStream, readBytes = ov_read(&vf, dest, sound.size - samplePos, 0, 2, 1, &bitStream);
			if (readBytes <= 0) break;
			samplePos += readBytes;
		}

		ov_clear(&vf);

	} else {
		ALboolean al_bool;
		ALvoid *data;
		alutLoadWAVFile(fileName, &sound.format, &data, &sound.size, &sound.sampleRate, &al_bool);
		sound.samples = (short *) data;
	}

	alGenBuffers(1, &sound.buffer);
	alBufferData(sound.buffer, sound.format, sound.samples, sound.size, sound.sampleRate);
	if (alGetError() != AL_NO_ERROR){
		alDeleteBuffers(1, &sound.buffer);

		sprintf(str, "Couldn't open \"%s\"", fileName);
		ErrorMsg(str);
		return SOUND_NONE;
	}

	return insertSound(sound);
}
Beispiel #22
0
// this function is used when restarting the audio system. It is needed to restart a streaming source from where it left off
bool plDSoundBuffer::SetupStreamingSource(plAudioFileReader *stream)
{
    unsigned char data[STREAM_BUFFER_SIZE];
    unsigned int size;
    ALenum error;
    
    alGetError();   
    int numBuffersToQueue = 0;
    
    // fill buffers with data
    for( int i = 0; i < STREAMING_BUFFERS; i++ )
    {
        size = stream->NumBytesLeft() < STREAM_BUFFER_SIZE ? stream->NumBytesLeft() : STREAM_BUFFER_SIZE;
        if(!size)
        {
            if(IsLooping())
            {
                stream->SetPosition(0);
            }
        }
    
        stream->Read(size, data);
        numBuffersToQueue++;

        alGenBuffers( 1, &streamingBuffers[i] );
        error = alGetError();
        if( error != AL_NO_ERROR )
        {
            plStatusLog::AddLineS("audio.log", "Failed to create sound buffer %d", error);
            return false;
        }

        ALenum format = IGetALFormat(fBufferDesc->fBitsPerSample, fBufferDesc->fNumChannels);
        alBufferData( streamingBuffers[i], format, data, size, fBufferDesc->fNumSamplesPerSec );
        if( (error = alGetError()) != AL_NO_ERROR )
            plStatusLog::AddLineS("audio.log", "alBufferData");
    }

     // Generate AL Source
    alGenSources( 1, &source );
    error = alGetError();
    if( error != AL_NO_ERROR )
    {
        plStatusLog::AddLineS("audio.log", "Failed to create audio source %d %d", error, source);
        return false;
    }
    alSourcei(source, AL_BUFFER, 0);
    SetScalarVolume(0);
    
    
    alSourcef(source, AL_ROLLOFF_FACTOR, 0.3048);
    alGetError();
    if( error != AL_NO_ERROR )
    {
        return false;
    }

    alSourceQueueBuffers( source, numBuffersToQueue, streamingBuffers );
    error = alGetError();
    if( error != AL_NO_ERROR )
    {
        plStatusLog::AddLineS("audio.log", "Failed to queue buffers %d", error);
        return false;
    }
    return true;
}
ALboolean LoadALData()
{
	// Variables to load into.

	ALenum format;
	ALsizei size;
	ALvoid* data;
	ALsizei freq;
	ALboolean loop;

	// Load wav data into buffers.

	alGenBuffers(NUM_BUFFERS, Buffers);

	if(alGetError() != AL_NO_ERROR)
		return AL_FALSE;

	alutLoadWAVFile("sound/hit.wav", &format, &data, &size, &freq, &loop);
	alBufferData(Buffers[hit], format, data, size, freq);
	alutUnloadWAV(format, data, size, freq);

//	alutLoadWAVFile("wavdata/Gun1.wav", &format, &data, &size, &freq, &loop);
//	alBufferData(Buffers[GUN1], format, data, size, freq);
//	alutUnloadWAV(format, data, size, freq);

//	alutLoadWAVFile("wavdata/Gun2.wav", &format, &data, &size, &freq, &loop);
//	alBufferData(Buffers[GUN2], format, data, size, freq);
//	alutUnloadWAV(format, data, size, freq);
	
	//alutLoadWAVFile("sound/dung.wav", &format, &data, &size, &freq, &loop);
	//alBufferData(Buffers[dung], format, data, size, freq);
	//alutUnloadWAV(format, data, size, freq);

	// Bind buffers into audio sources.

	alGenSources(NUM_SOURCES, Sources);

	if(alGetError() != AL_NO_ERROR)
		return AL_FALSE;



	//alSourcei (Sources[dung], AL_BUFFER,   Buffers[dung]   );
	//alSourcef (Sources[dung], AL_PITCH,    1.0f              );
	//alSourcef (Sources[dung], AL_GAIN,     1.0f              );
	//alSourcefv(Sources[dung], AL_POSITION, SourcesPos[dung]);
	//alSourcefv(Sources[dung], AL_VELOCITY, SourcesVel[dung]);
	//alSourcei (Sources[dung], AL_LOOPING,  AL_TRUE           );


	alSourcei (Sources[hit], AL_BUFFER,   Buffers[hit]   );
	alSourcef (Sources[hit], AL_PITCH,    1.0f              );
	alSourcef (Sources[hit], AL_GAIN,     1.0f              );
	alSourcefv(Sources[hit], AL_POSITION, SourcesPos[hit]);
	alSourcefv(Sources[hit], AL_VELOCITY, SourcesVel[hit]);
	alSourcei (Sources[hit], AL_LOOPING,  AL_FALSE           );
/*
	alSourcei (Sources[GUN1], AL_BUFFER,   Buffers[GUN1]   );
	alSourcef (Sources[GUN1], AL_PITCH,    1.0f            );
	alSourcef (Sources[GUN1], AL_GAIN,     1.0f            );
	alSourcefv(Sources[GUN1], AL_POSITION, SourcesPos[GUN1]);
	alSourcefv(Sources[GUN1], AL_VELOCITY, SourcesVel[GUN1]);
	alSourcei (Sources[GUN1], AL_LOOPING,  AL_FALSE        );

	alSourcei (Sources[GUN2], AL_BUFFER,   Buffers[GUN2]   );
	alSourcef (Sources[GUN2], AL_PITCH,    1.0f            );
	alSourcef (Sources[GUN2], AL_GAIN,     1.0f            );
	alSourcefv(Sources[GUN2], AL_POSITION, SourcesPos[GUN2]);
	alSourcefv(Sources[GUN2], AL_VELOCITY, SourcesVel[GUN2]);
	alSourcei (Sources[GUN2], AL_LOOPING,  AL_FALSE        );
*/
	// Do another error check and return.

	if(alGetError() != AL_NO_ERROR)
		return AL_FALSE;

	return AL_TRUE;
}
Beispiel #24
0
// this function is used when starting up a streaming sound, as opposed to restarting it due to an audio system restart.
bool plDSoundBuffer::SetupStreamingSource(void *data, unsigned bytes)
{
    unsigned char bufferData[STREAM_BUFFER_SIZE];
    unsigned int size;
    ALenum error;
    char *pData = (char *)data; 
    
    alGetError();   
    int numBuffersToQueue = 0;

    // fill buffers with data
    for( int i = 0; i < STREAMING_BUFFERS; i++ )
    {
        size = bytes < STREAM_BUFFER_SIZE ? bytes : STREAM_BUFFER_SIZE;
        if(!size)
            break;  

        memcpy(bufferData, pData, size);
        pData += size;
        bytes-= size;
        numBuffersToQueue++;

        alGenBuffers( 1, &streamingBuffers[i] );
        error = alGetError();
        if( error != AL_NO_ERROR )
        {
            plStatusLog::AddLineS("audio.log", "Failed to create sound buffer %d", error);
            return false;
        }

        ALenum format = IGetALFormat(fBufferDesc->fBitsPerSample, fBufferDesc->fNumChannels);
        alBufferData( streamingBuffers[i], format, bufferData, size, fBufferDesc->fNumSamplesPerSec );
        if( (error = alGetError()) != AL_NO_ERROR )
            plStatusLog::AddLineS("audio.log", "alBufferData");
    }

     // Generate AL Source
    alGenSources( 1, &source );
    error = alGetError();
    if( error != AL_NO_ERROR )
    {
        plStatusLog::AddLineS("audio.log", "Failed to create audio source %d %d", error, source);
        return false;
    }
    alSourcei(source, AL_BUFFER, 0);
    SetScalarVolume(0);
    
    alSourcef(source, AL_ROLLOFF_FACTOR, 0.3048);
    alGetError();
    if( error != AL_NO_ERROR )
    {
        return false;
    }

    alSourceQueueBuffers( source, numBuffersToQueue, streamingBuffers );
    error = alGetError();
    if( error != AL_NO_ERROR )
    {
        plStatusLog::AddLineS("audio.log", "Failed to queue buffers %d", error);
        return false;
    }
    return true;
}
Beispiel #25
0
void audio_thread(void *args){
    ToxAV *av = args;
    const char *device_list, *output_device = NULL;
    void *audio_device = NULL;

    _Bool call[MAX_CALLS] = {0}, preview = 0;
    _Bool groups_audio[MAX_NUM_GROUPS] = {0};

    int perframe = (UTOX_DEFAULT_FRAME_A * UTOX_DEFAULT_SAMPLE_RATE_A) / 1000;
    uint8_t buf[perframe * 2 * UTOX_DEFAULT_AUDIO_CHANNELS]; //, dest[perframe * 2 * UTOX_DEFAULT_AUDIO_CHANNELS];
    memset(buf, 0, sizeof(buf));

    uint8_t audio_count = 0;
    _Bool record_on = 0;
    #ifdef AUDIO_FILTERING
    debug("Audio Filtering");
    #ifdef ALC_LOOPBACK_CAPTURE_SAMPLES
    debug(" and Echo cancellation");
    #endif
    debug(" enabled in this build\n");
    #endif

    debug("frame size: %u\n", perframe);

    device_list = alcGetString(NULL, ALC_CAPTURE_DEVICE_SPECIFIER);
    if (device_list) {
        audio_device = (void*)device_list;
        debug("uTox audio input device list:\n");
        while(*device_list) {
            debug("\t%s\n", device_list);
            postmessage(AUDIO_IN_DEVICE, UI_STRING_ID_INVALID, 0, (void*)device_list);
            device_list += strlen(device_list) + 1;
        }
    }

    postmessage(AUDIO_IN_DEVICE, STR_AUDIO_IN_NONE, 0, NULL);
    audio_detect();

    if (alcIsExtensionPresent(NULL, "ALC_ENUMERATE_ALL_EXT")) {
        device_list = alcGetString(NULL, ALC_ALL_DEVICES_SPECIFIER);
    } else {
        device_list = alcGetString(NULL, ALC_DEVICE_SPECIFIER);
    }

    if(device_list) {
        output_device = device_list;
        debug("uTox audio output device list:\n");
        while(*device_list) {
            debug("\t%s\n", device_list);
            postmessage(AUDIO_OUT_DEVICE, 0, 0, (void*)device_list);
            device_list += strlen(device_list) + 1;
        }
    }

    device_out = alcOpenDevice(output_device);
    if(!device_out) {
        debug("alcOpenDevice() failed\n");
        return;
    }

    int attrlist[] = {  ALC_FREQUENCY, UTOX_DEFAULT_SAMPLE_RATE_A,
                        ALC_INVALID };

    context = alcCreateContext(device_out, attrlist);
    if(!alcMakeContextCurrent(context)) {
        debug("alcMakeContextCurrent() failed\n");
        alcCloseDevice(device_out);
        return;
    }

    alGenSources(countof(source), source);

    static ALuint ringSrc[MAX_CALLS];
    alGenSources(MAX_CALLS, ringSrc);

    /* Create buffer to store samples */
    ALuint RingBuffer;
    alGenBuffers(1, &RingBuffer);

    {
        float frequency1 = 441.f;
        float frequency2 = 882.f;
        int seconds = 4;
        unsigned sample_rate = 22050;
        size_t buf_size = seconds * sample_rate * 2; //16 bit (2 bytes per sample)
        int16_t *samples = malloc(buf_size * sizeof(int16_t));
        if (!samples)
            return;

        /*Generate an electronic ringer sound that quickly alternates between two frequencies*/
        int index = 0;
        for(index = 0; index < buf_size; ++index) {
            if ((index / (sample_rate)) % 4 < 2 ) {//4 second ring cycle, first 2 secondsring, the rest(2 seconds) is silence
                if((index / 1000) % 2 == 1) {
                    samples[index] = 5000 * sin((2.0 * 3.1415926 * frequency1) / sample_rate * index); //5000=amplitude(volume level). It can be from zero to 32700
                } else {
                    samples[index] = 5000 * sin((2.0 * 3.1415926 * frequency2) / sample_rate * index);
                }
            } else {
                samples[index] = 0;
            }
        }

        alBufferData(RingBuffer, AL_FORMAT_MONO16, samples, buf_size, sample_rate);
        free(samples);
    }

    {
        unsigned int i;
        for (i = 0; i < MAX_CALLS; ++i) {
            alSourcei(ringSrc[i], AL_LOOPING, AL_TRUE);
            alSourcei(ringSrc[i], AL_BUFFER, RingBuffer);
        }
    }
    Filter_Audio *f_a = NULL;

    audio_thread_init = 1;

    int16_t *preview_buffer = NULL;
    unsigned int preview_buffer_index = 0;
    #define PREVIEW_BUFFER_SIZE (UTOX_DEFAULT_SAMPLE_RATE_A / 2)

    while(1) {
        if(audio_thread_msg) {
            TOX_MSG *m = &audio_msg;
            if(!m->msg) {
                break;
            }

            switch(m->msg) {
            case AUDIO_SET_INPUT: {
                audio_device = m->data;

                if(record_on) {
                    alccapturestop(device_in);
                    alccaptureclose(device_in);
                }

                if(audio_count) {
                    device_in = alcopencapture(audio_device);
                    if(!device_in) {
                        record_on = 0;
                    } else {
                        alccapturestart(device_in);
                        record_on = 1;
                    }
                }

                debug("set audio in\n");
                break;
            }

            case AUDIO_SET_OUTPUT: {
                output_device = m->data;

                ALCdevice *device = alcOpenDevice(output_device);
                if(!device) {
                    debug("alcOpenDevice() failed\n");
                    break;
                }

                ALCcontext *con = alcCreateContext(device, NULL);
                if(!alcMakeContextCurrent(con)) {
                    debug("alcMakeContextCurrent() failed\n");
                    alcCloseDevice(device);
                    break;
                }

                alcDestroyContext(context);
                alcCloseDevice(device_out);
                context = con;
                device_out = device;

                alGenSources(countof(source), source);
                alGenSources(MAX_CALLS, ringSrc);

                Tox *tox = toxav_get_tox(av);
                uint32_t num_chats = tox_count_chatlist(tox);

                if (num_chats != 0) {
                    int32_t chats[num_chats];
                    uint32_t max = tox_get_chatlist(tox, chats, num_chats);

                    unsigned int i;
                    for (i = 0; i < max; ++i) {
                        if (tox_group_get_type(tox, chats[i]) == TOX_GROUPCHAT_TYPE_AV) {
                            GROUPCHAT *g = &group[chats[i]];
                            alGenSources(g->peers, g->source);
                        }
                    }
                }

                debug("set audio out\n");
                break;
            }

            case AUDIO_PREVIEW_START: {
                preview = 1;
                audio_count++;
                preview_buffer = calloc(PREVIEW_BUFFER_SIZE, 2);
                preview_buffer_index = 0;
                if(!record_on) {
                    device_in = alcopencapture(audio_device);
                    if(device_in) {
                        alccapturestart(device_in);
                        record_on = 1;
                        debug("Starting Audio Preview\n");
                    }
                }
                break;
            }

            case AUDIO_START: {
                audio_count++;
                if(!record_on) {
                    device_in = alcopencapture(audio_device);
                    if(device_in) {
                        alccapturestart(device_in);
                        record_on = 1;
                        debug("Listening to audio\n");
                        yieldcpu(20);
                    }
                }
                break;
            }

            case GROUP_AUDIO_CALL_START: {
                break; // TODO, new groups API
                audio_count++;
                groups_audio[m->param1] = 1;
                if(!record_on) {
                    device_in = alcopencapture(audio_device);
                    if(device_in) {
                        alccapturestart(device_in);
                        record_on = 1;
                        debug("Starting Audio GroupCall\n");
                    }
                }
                break;
            }

            case AUDIO_PREVIEW_END: {
                preview = 0;
                audio_count--;
                free(preview_buffer);
                preview_buffer = NULL;
                if(!audio_count && record_on) {
                    alccapturestop(device_in);
                    alccaptureclose(device_in);
                    record_on = 0;
                    debug("Audio Preview Stopped\n");
                }
                break;
            }

            case AUDIO_END: {
                if(!call[m->param1]) {
                    break;
                }
                call[m->param1] = 0;
                audio_count--;
                if(!audio_count && record_on) {
                    alccapturestop(device_in);
                    alccaptureclose(device_in);
                    record_on = 0;
                    debug("stop\n");
                }
                break;
            }

            case GROUP_AUDIO_CALL_END: {
                break; // TODO, new groups API
                if(!groups_audio[m->param1]) {
                    break;
                }
                audio_count--;
                groups_audio[m->param1] = 0;
                if(!audio_count && record_on) {
                    alccapturestop(device_in);
                    alccaptureclose(device_in);
                    record_on = 0;
                    debug("stop\n");
                }
                break;
            }

            case AUDIO_PLAY_RINGTONE: {
                if(!audible_notifications_enabled) {
                    break;
                }

                alSourcePlay(ringSrc[m->param1]);
                break;
            }

            case AUDIO_STOP_RINGTONE: {
                ALint state;
                alGetSourcei(ringSrc[m->param1], AL_SOURCE_STATE, &state);
                if(state == AL_PLAYING) {
                    alSourceStop(ringSrc[m->param1]);
                }
                break;
            }
            }

            audio_thread_msg = 0;
        }

        // TODO move this code to filter_audio.c
        #ifdef AUDIO_FILTERING
            if (!f_a && audio_filtering_enabled) {
                f_a = new_filter_audio(UTOX_DEFAULT_SAMPLE_RATE_A);
                if (!f_a) {
                    audio_filtering_enabled = 0;
                    debug("filter audio failed\n");
                } else {
                    debug("filter audio on\n");
                }
            } else if (f_a && !audio_filtering_enabled) {
                kill_filter_audio(f_a);
                f_a = NULL;
                debug("filter audio off\n");
            }
        #else
            if (audio_filtering_enabled) {
                audio_filtering_enabled = 0;
            }
        #endif

        _Bool sleep = 1;

        if(record_on) {
            ALint samples;
            _Bool frame = 0;
            /* If we have a device_in we're on linux so we can just call OpenAL, otherwise we're on something else so
             * we'll need to call audio_frame() to add to the buffer for us. */
            if (device_in == (void*)1) {
                frame = audio_frame((void*)buf);
                if (frame) {
                    /* We have an audio frame to use, continue without sleeping. */
                    sleep = 0;
                }
            } else {
                alcGetIntegerv(device_in, ALC_CAPTURE_SAMPLES, sizeof(samples), &samples);
                if(samples >= perframe) {
                    alcCaptureSamples(device_in, buf, perframe);
                    frame = 1;
                    if (samples >= perframe * 2) {
                        sleep = 0;
                    }
                }
            }

            #ifdef AUDIO_FILTERING
            #ifdef ALC_LOOPBACK_CAPTURE_SAMPLES
            if (f_a && audio_filtering_enabled) {
                alcGetIntegerv(device_out, ALC_LOOPBACK_CAPTURE_SAMPLES, sizeof(samples), &samples);
                if(samples >= perframe) {
                    int16_t buffer[perframe];
                    alcCaptureSamplesLoopback(device_out, buffer, perframe);
                    pass_audio_output(f_a, buffer, perframe);
                    set_echo_delay_ms(f_a, UTOX_DEFAULT_FRAME_A);
                    if (samples >= perframe * 2) {
                        sleep = 0;
                    }
                }
            }
            #endif
            #endif

            if (frame) {
                _Bool voice = 1;
                #ifdef AUDIO_FILTERING
                if (f_a) {
                    int ret = filter_audio(f_a, (int16_t*)buf, perframe);

                    if (ret == -1) {
                        debug("filter audio error\n");
                    }

                    if (ret == 0) {
                        voice = 0;
                    }
                }
                #endif

                /* If push to talk, we don't have to do anything */
                if (!check_ptt_key()) {
                    voice = 0; //PTT is up, send nothing.
                }

                if (preview) {
                    if (preview_buffer_index + perframe > PREVIEW_BUFFER_SIZE) {
                        preview_buffer_index = 0;
                    }

                    sourceplaybuffer(0, preview_buffer + preview_buffer_index, perframe, UTOX_DEFAULT_AUDIO_CHANNELS, UTOX_DEFAULT_SAMPLE_RATE_A);
                    if (voice) {
                        memcpy(preview_buffer + preview_buffer_index, buf, perframe * sizeof(int16_t));
                    } else {
                        memset(preview_buffer + preview_buffer_index, 0, perframe * sizeof(int16_t));
                    }
                    preview_buffer_index += perframe;
                }

                if (voice) {
                    int i, active_call_count = 0;
                    for(i = 0; i < UTOX_MAX_NUM_FRIENDS; i++) {
                        if( UTOX_SEND_AUDIO(i) ) {
                            active_call_count++;
                            TOXAV_ERR_SEND_FRAME error = 0;
                            toxav_audio_send_frame(av, friend[i].number, (const int16_t *)buf, perframe, UTOX_DEFAULT_AUDIO_CHANNELS, UTOX_DEFAULT_SAMPLE_RATE_A, &error);
                            if (error) {
                                debug("toxav_send_audio error friend == %i, error ==  %i\n", i, error);
                            } else {
                                // debug("Send a frame to friend %i\n",i);
                                if (i >= UTOX_MAX_CALLS) {
                                    debug("We're calling more peers than allowed by UTOX_MAX_CALLS, This is a bug\n");
                                        break;
                                }
                            }
                        }
                    }

                    // TODO REMOVED until new groups api can be implemented.
                    /*Tox *tox = toxav_get_tox(av);
                    uint32_t num_chats = tox_count_chatlist(tox);

                    if (num_chats != 0) {
                        int32_t chats[num_chats];
                        uint32_t max = tox_get_chatlist(tox, chats, num_chats);
                        for (i = 0; i < max; ++i) {
                            if (groups_audio[chats[i]]) {
                                toxav_group_send_audio(tox, chats[i], (int16_t *)buf, perframe, UTOX_DEFAULT_AUDIO_CHANNELS, UTOX_DEFAULT_SAMPLE_RATE_A);
                            }
                        }
                    }*/
                }
            }
        }

        if (sleep) {
            yieldcpu(5);
        }
    }

    utox_filter_audio_kill(f_a);

    //missing some cleanup ?
    alDeleteSources(MAX_CALLS, ringSrc);
    alDeleteSources(countof(source), source);
    alDeleteBuffers(1, &RingBuffer);

    if(device_in) {
        if(record_on) {
            alcCaptureStop(device_in);
        }
        alcCaptureCloseDevice(device_in);
    }

    alcMakeContextCurrent(NULL);
    alcDestroyContext(context);
    alcCloseDevice(device_out);

    audio_thread_msg = 0;
    audio_thread_init = 0;
    debug("UTOX AUDIO:\tClean thread exit!\n");
}
Beispiel #26
0
//============================================================================ 
bool plDSoundBuffer::StreamingFillBuffer(plAudioFileReader *stream)
{
    if(!source)
        return false;

    ALenum error;
    ALuint bufferId;
    unsigned char data[STREAM_BUFFER_SIZE];
    int buffersProcessed = BuffersProcessed();
    bool finished = false;

    for(int i = 0; i < buffersProcessed; i++)
    {
        alSourceUnqueueBuffers( source, 1, &bufferId );
        if( (error = alGetError()) != AL_NO_ERROR )
        {
            plStatusLog::AddLineS("audio.log", "Failed to unqueue buffer %d", error);
            return false;
        }

        if(!finished)
        {
            if(stream->NumBytesLeft() == 0)
            {
                // if at anytime we run out of data, and we are looping, reset the data stream and continue to fill buffers
                if(IsLooping())
                {
                    stream->SetPosition(0); // we are looping, so reset data stream, and keep filling buffers
                }
                else
                {
                    finished = true;    // no more data, but we could still be playing, so we don't want to stop the sound yet
                }
            }

            if(!finished)
            {   unsigned int size = stream->NumBytesLeft() < STREAM_BUFFER_SIZE ? stream->NumBytesLeft() : STREAM_BUFFER_SIZE;
                stream->Read(size, data);

                ALenum format = IGetALFormat(fBufferDesc->fBitsPerSample, fBufferDesc->fNumChannels);
                alBufferData( bufferId, format, data, size, fBufferDesc->fNumSamplesPerSec );
                if( (error = alGetError()) != AL_NO_ERROR )
                {
                    plStatusLog::AddLineS("audio.log", "Failed to copy data to sound buffer %d", error);
                    return false;
                }

                alSourceQueueBuffers( source, 1, &bufferId );
                if( (error = alGetError()) != AL_NO_ERROR )
                {
                    plStatusLog::AddLineS("audio.log", "Failed to queue buffer %d", error);
                    return false;
                }
            }
        }
    }
    if(!IsPlaying() && !finished)
    {
        alSourcePlay(source);
    }
    alGetError();
    return true;
}
Beispiel #27
0
void *decode_audio_thread(void *arg)
{
    INFO("Started decode audio thread!");
    av_session_t *_phone = arg;
    _phone->running_decaud = 1;

    //int recved_size;
    //uint8_t dest [RTP_PAYLOAD_SIZE];

    int frame_size = AUDIO_FRAME_SIZE;
    //int data_size;

    ALCdevice *dev;
    ALCcontext *ctx;
    ALuint source, *buffers;
    dev = alcOpenDevice(NULL);
    ctx = alcCreateContext(dev, NULL);
    alcMakeContextCurrent(ctx);
    int openal_buffers = 5;

    buffers = calloc(sizeof(ALuint) * openal_buffers, 1);
    alGenBuffers(openal_buffers, buffers);
    alGenSources((ALuint)1, &source);
    alSourcei(source, AL_LOOPING, AL_FALSE);

    ALuint buffer;
    ALint ready;

    uint16_t zeros[frame_size];
    memset(zeros, 0, frame_size);
    int16_t PCM[frame_size];

    int i;

    for (i = 0; i < openal_buffers; ++i) {
        alBufferData(buffers[i], AL_FORMAT_MONO16, zeros, frame_size, 48000);
    }

    alSourceQueueBuffers(source, openal_buffers, buffers);
    alSourcePlay(source);

    if (alGetError() != AL_NO_ERROR) {
        fprintf(stderr, "Error starting audio\n");
        goto ending;
    }

    int dec_frame_len = 0;

    while (_phone->running_decaud) {

        alGetSourcei(source, AL_BUFFERS_PROCESSED, &ready);

        if (ready <= 0)
            continue;

        dec_frame_len = toxav_recv_audio(_phone->av, frame_size, PCM);

        /* Play the packet */
        if (dec_frame_len > 0) {
            alSourceUnqueueBuffers(source, 1, &buffer);
            alBufferData(buffer, AL_FORMAT_MONO16, PCM, dec_frame_len * 2 * 1, 48000);
            int error = alGetError();

            if (error != AL_NO_ERROR) {
                fprintf(stderr, "Error setting buffer %d\n", error);
                break;
            }

            alSourceQueueBuffers(source, 1, &buffer);

            if (alGetError() != AL_NO_ERROR) {
                fprintf(stderr, "Error: could not buffer audio\n");
                break;
            }

            alGetSourcei(source, AL_SOURCE_STATE, &ready);

            if (ready != AL_PLAYING) alSourcePlay(source);
        }

        usleep(1000);
    }


ending:
    /* clean up codecs */
    //pthread_mutex_lock(&cs->ctrl_mutex);
    /*
    alDeleteSources(1, &source);
    alDeleteBuffers(openal_buffers, buffers);
    alcMakeContextCurrent(NULL);
    alcDestroyContext(ctx);
    alcCloseDevice(dev);
    */
    //pthread_mutex_unlock(&cs->ctrl_mutex);

    _phone->running_decaud = -1;

    pthread_exit ( NULL );
}
OASound async_init_wav(const char* _file)
{
    
    OASound sound;
    memset(&sound,0,sizeof(OASound));
    
    ALvoid* l_DataBuffer = NULL;
    unsigned int l_FileSize = 0;
   
    // OpenAL: first 
    if(x_load_wav(_file, (unsigned char**)&l_DataBuffer , l_FileSize,sound))
        //    if((l_DataBuffer != NULL) && (l_FileSize > 0))
    {
        if(l_DataBuffer == NULL)
        {
            printf("*** DataBuffer NULL!!! *** \n");
        }
        // grab a buffer ID from openAL
        alGenBuffers(1, (ALuint*)&sound.m_BufferId);
        
        ALenum l_error = alGetError();
        
        if(l_error != AL_NO_ERROR)
        {
            printf("**** OpenAL ERROR buffer%i\n", l_error);
            return sound;
        }
        else
        {
            printf("**** OpenAL OK buffer %i\n", sound.m_BufferId);
        }
        
        ALuint f;
        if (sound.depth==16)
        {
            if (sound.nbchannel == 1)
                f = AL_FORMAT_MONO16;
            else
                f = AL_FORMAT_STEREO16;
        }
        else
        {
            if (sound.nbchannel == 1)
                f = AL_FORMAT_MONO8;
            else
                f = AL_FORMAT_STEREO8;
            
        }
            
        
        alBufferData(sound.m_BufferId, f, l_DataBuffer, l_FileSize, sound.frequency);
        
        l_error = alGetError();
         if(l_error != AL_NO_ERROR)
        {
            printf("**** OpenAL ERROR format\n");
            return sound;
        }
        
        // create Source
        alGenSources(1, (ALuint*)&sound.m_SourceId);
        l_error = alGetError();
          if(l_error != AL_NO_ERROR)
        {
            printf("**** OpenAL ERROR source\n");
            return sound;
        }
        
        alSourcei(sound.m_SourceId, AL_BUFFER, sound.m_BufferId);
        
        l_error = alGetError();
         if(l_error != AL_NO_ERROR)
        {
            printf("**** OpenAL ERROR source buffer\n");
            return sound;
        }
        
        // if loop:
        //alSourcei(m_SourceId, AL_LOOPING, AL_TRUE);
        
        alSourcef(sound.m_SourceId, AL_PITCH, 1.0f);
        //l_error = alGetError();
        
        //ASSERTC(l_error == AL_NO_ERROR,"**** OpenAL ERROR buffer\n");
        //if(l_error != AL_NO_ERROR)
        //{
        //    CONSOLEMSG("**** OpenAL ERROR properties\n");
        //    return FAILURE;
        //}
        
        alSourcef(sound.m_SourceId, AL_GAIN, 1.0f);     
        
        l_error = alGetError();
        
        if(l_error != AL_NO_ERROR)
        {
            printf("**** OpenAL ERROR properties\n");
            return sound;
        }
           }
    else
    {
        return sound;
    }
    
    if (l_DataBuffer)
    {
        free(l_DataBuffer);
        l_DataBuffer = NULL;
    }
    
    //printf("**** OpenAL Resource load SUCCESS\n");
    sound.successfullyLoaded = 1;
	return sound;
}
//------------------------------------------------------------
bool ofOpenALSoundPlayer::loadSound(string fileName, bool is_stream){

	fileName = ofToDataPath(fileName);

	bMultiPlay = false;
	isStreaming = is_stream;
	int err = AL_NO_ERROR;

	// [1] init sound systems, if necessary
	initialize();

	// [2] try to unload any previously loaded sounds
	// & prevent user-created memory leaks
	// if they call "loadSound" repeatedly, for example

	unloadSound();
	ALenum format=AL_FORMAT_MONO16;
	bLoadedOk = false;

	if(!isStreaming){
		readFile(fileName, buffer);
	}else{
		stream(fileName, buffer);
	}

	int numFrames = buffer.size()/channels;

	if(isStreaming){
		buffers.resize(channels*2);
	}else{
		buffers.resize(channels);
	}
	alGenBuffers(buffers.size(), &buffers[0]);
	if(channels==1){
		sources.resize(1);
		alGenSources(1, &sources[0]);
		err = alGetError();
		if (err != AL_NO_ERROR){
			ofLogError("ofOpenALSoundPlayer") << "loadSound(): couldn't generate source for \"" << fileName << "\": "
			<< (int) err << " " << getALErrorString(err);
			return false;
		}

		for(int i=0; i<(int)buffers.size(); i++){
			alBufferData(buffers[i],format,&buffer[0],buffer.size()*2,samplerate);
			err = alGetError();
			if (err != AL_NO_ERROR){
				ofLogError("ofOpenALSoundPlayer:") << "loadSound(): couldn't create buffer for \"" << fileName << "\": "
				<< (int) err << " " << getALErrorString(err);
				return false;
			}
			if(isStreaming){
				stream(fileName,buffer);
			}
		}
		if(isStreaming){
			alSourceQueueBuffers(sources[0],buffers.size(),&buffers[0]);
		}else{
			alSourcei (sources[0], AL_BUFFER,   buffers[0]);
		}

		alSourcef (sources[0], AL_PITCH,    1.0f);
		alSourcef (sources[0], AL_GAIN,     1.0f);
	    alSourcef (sources[0], AL_ROLLOFF_FACTOR,  0.0);
	    alSourcei (sources[0], AL_SOURCE_RELATIVE, AL_TRUE);
	}else{
		vector<vector<short> > multibuffer;
		multibuffer.resize(channels);
		sources.resize(channels);
		alGenSources(channels, &sources[0]);
		if(isStreaming){
			for(int s=0; s<2;s++){
				for(int i=0;i<channels;i++){
					multibuffer[i].resize(buffer.size()/channels);
					for(int j=0;j<numFrames;j++){
						multibuffer[i][j] = buffer[j*channels+i];
					}
					alBufferData(buffers[s*2+i],format,&multibuffer[i][0],buffer.size()/channels*2,samplerate);
					err = alGetError();
					if ( err != AL_NO_ERROR){
						ofLogError("ofOpenALSoundPlayer") << "loadSound(): couldn't create stereo buffers for \"" << fileName << "\": " << (int) err << " " << getALErrorString(err);
						return false;
					}
					alSourceQueueBuffers(sources[i],1,&buffers[s*2+i]);
					stream(fileName,buffer);
				}
			}
		}else{
			for(int i=0;i<channels;i++){
				multibuffer[i].resize(buffer.size()/channels);
				for(int j=0;j<numFrames;j++){
					multibuffer[i][j] = buffer[j*channels+i];
				}
				alBufferData(buffers[i],format,&multibuffer[i][0],buffer.size()/channels*2,samplerate);
				err = alGetError();
				if (err != AL_NO_ERROR){
					ofLogError("ofOpenALSoundPlayer") << "loadSound(): couldn't create stereo buffers for \"" << fileName << "\": "
					<< (int) err << " " << getALErrorString(err);
					return false;
				}
				alSourcei (sources[i], AL_BUFFER,   buffers[i]   );
			}
		}

		
		for(int i=0;i<channels;i++){
			err = alGetError();
			if (err != AL_NO_ERROR){
				ofLogError("ofOpenALSoundPlayer") << "loadSound(): couldn't create stereo sources for \"" << fileName << "\": "
				<< (int) err << " " << getALErrorString(err);
				return false;
			}

			// only stereo panning
			if(i==0){
				float pos[3] = {-1,0,0};
				alSourcefv(sources[i],AL_POSITION,pos);
			}else{
				float pos[3] = {1,0,0};
				alSourcefv(sources[i],AL_POSITION,pos);
			}
			alSourcef (sources[i], AL_ROLLOFF_FACTOR,  0.0);
			alSourcei (sources[i], AL_SOURCE_RELATIVE, AL_TRUE);
		}
	}
	
	bLoadedOk = true;
	return bLoadedOk;

}
Beispiel #30
0
int main(int argc, char *argv[]) {

    if(alcIsExtensionPresent(NULL, "ALC_SOFT_loopback") == ALC_FALSE) {
        fputs("Your OpenAL implementation doesn't support the "
              "\"ALC_SOFT_loopback\" extension, required for this test. "
              "Sorry.\n", stderr);
        exit(EXIT_FAILURE);
    }
    ALCint alc_major, alc_minor;
    alcGetIntegerv(NULL, ALC_MAJOR_VERSION, 1, &alc_major);
    alcGetIntegerv(NULL, ALC_MINOR_VERSION, 1, &alc_minor);
    if(alc_major<1 || (alc_major==1 && alc_minor<1))
        fputs("Warning : ALC_SOFT_loopback has been written against "
              "the OpenAL 1.1 specification.\n", stderr);

#define HELPER(X) X = alcGetProcAddress(NULL, #X)
    HELPER(alcLoopbackOpenDeviceSOFT);
    HELPER(alcIsRenderFormatSupportedSOFT);
    HELPER(alcRenderSamplesSOFT);
#undef  HELPER
#define HELPER(X) X = alcGetEnumValue(NULL, #X)
    HELPER(ALC_BYTE_SOFT);
    HELPER(ALC_UNSIGNED_BYTE_SOFT);
    HELPER(ALC_SHORT_SOFT);
    HELPER(ALC_UNSIGNED_SHORT_SOFT);
    HELPER(ALC_INT_SOFT);
    HELPER(ALC_UNSIGNED_INT_SOFT);
    HELPER(ALC_FLOAT_SOFT);
    HELPER(ALC_MONO_SOFT);
    HELPER(ALC_STEREO_SOFT);
    HELPER(ALC_QUAD_SOFT);
    HELPER(ALC_5POINT1_SOFT);
    HELPER(ALC_6POINT1_SOFT);
    HELPER(ALC_7POINT1_SOFT);
    HELPER(ALC_FORMAT_CHANNELS_SOFT);
    HELPER(ALC_FORMAT_TYPE_SOFT);
#undef  HELPER

    ALCdevice *loopback_device = alcLoopbackOpenDeviceSOFT(NULL);
    if(!loopback_device) {
        fputs("Could not open loopback device.\n", stderr);
        exit(EXIT_FAILURE);
    }
    if(alcIsRenderFormatSupportedSOFT(loopback_device, 
            22050, ALC_STEREO_SOFT, ALC_SHORT_SOFT) == ALC_FALSE) 
    {
        fputs("The loopback device does not support the "
              "required render format.\n", stderr);
        alcCloseDevice(loopback_device);
        exit(EXIT_FAILURE);
    }

    ALCint attrlist[11] = {
        ALC_MONO_SOURCES, 0,
        ALC_STEREO_SOURCES, 255,
        ALC_FREQUENCY, 22050,
        ALC_FORMAT_CHANNELS_SOFT, ALC_STEREO_SOFT,
        ALC_FORMAT_TYPE_SOFT, ALC_SHORT_SOFT,
        0
    };
    ALCcontext *ctx = alcCreateContext(loopback_device, attrlist);
    alcMakeContextCurrent(ctx);
    alGetError(); /* Clear the error state */

    if(argc<=1) {
        fprintf(stderr, "Usage : %s <small_oggfile>\n", argv[0]);
        alcCloseDevice(loopback_device);
        exit(EXIT_FAILURE);
    }

    SF_INFO sndinfo;
    SNDFILE *snd = sf_open(argv[1], SFM_READ, &sndinfo);
    if(!snd) {
        fprintf(stderr, "Failed to open \"%s\" : %s\n", 
                argv[1], sf_strerror(snd));
        alcCloseDevice(loopback_device);
        exit(EXIT_FAILURE);
    }
    if(sndinfo.channels != 2) {
        fprintf(stderr, "The source sound file has %d channels "
                "(exactly 2 are required).\n", sndinfo.channels);
        alcCloseDevice(loopback_device);
        exit(EXIT_FAILURE);
    }
    short *data = malloc(sndinfo.frames*2*sizeof(short));
    printf("Reading from '%s'...\n", argv[1]);
    sf_readf_short(snd, data, sndinfo.frames);
    sf_close(snd);

    ALuint audio_buffer;
    alGenBuffers(1, &audio_buffer);
    alBufferData(audio_buffer, AL_FORMAT_STEREO16, data,
            sndinfo.frames*2*sizeof(short), sndinfo.samplerate);
    free(data);

    ALuint audio_source;
    alGenSources(1, &audio_source);
    alSourcei(audio_source, AL_BUFFER, audio_buffer);

    unsigned num_frames = sndinfo.frames;
    ALCshort *rendered_samples = malloc(num_frames*2*sizeof(ALCshort));
    sndinfo.samplerate = 22050;
    sndinfo.channels = 2;
    sndinfo.format = SF_FORMAT_OGG | SF_FORMAT_VORBIS | SF_ENDIAN_FILE;
    snd = sf_open("rendered.ogg", SFM_WRITE, &sndinfo);

    alSourcePlay(audio_source);

    puts("Rendering frames...");
    unsigned chunk_size = 4096, off;
    for(off=0 ; off<(num_frames-chunk_size)*2 ; off+=2*chunk_size)
        alcRenderSamplesSOFT(loopback_device, rendered_samples+off, chunk_size);

    puts("Writing to 'rendered.ogg'...");
    sf_write_short(snd, rendered_samples, off-s);
    sf_close(snd);
    free(rendered_samples);

    alDeleteSources(1, &audio_source);
    alDeleteBuffers(1, &audio_buffer);
    alcCloseDevice(loopback_device);
    alcMakeContextCurrent(NULL);
    alcDestroyContext(ctx);

    exit(EXIT_SUCCESS);
}