void SoundOutput_DirectSound::play_sound_buffer() { start_mixer_thread(); HRESULT err = soundbuffer->Play(0, 0, DSBPLAY_LOOPING); if (FAILED(err)) { stop_mixer_thread(); throw Exception("Could not start sound buffer playback"); } }
CL_SoundOutput_MacOSX::CL_SoundOutput_MacOSX(int frequency, int latency) : CL_SoundOutput_Impl(frequency, latency), frequency(frequency), latency(latency), fragment_size(0), next_fragment(0), read_cursor(0), fragments_available(0) { fragment_size = frequency * latency / fragment_buffer_count / 1000; fragment_size = (fragment_size + 3) & ~3; // Force to be a multiple of 4 fragments_available = fragment_buffer_count; fragment_data = CL_DataBuffer(fragment_size * sizeof(short) * 2 * fragment_buffer_count); start_mixer_thread(); }
SoundOutput_alsa::SoundOutput_alsa(int mixing_frequency, int mixing_latency) : SoundOutput_Impl(mixing_frequency, mixing_latency), frames_in_buffer(4096), frames_in_period(1024) { int rc; snd_pcm_hw_params_t *hwparams; rc = snd_pcm_open(&handle, "default", SND_PCM_STREAM_PLAYBACK, 0); if (rc < 0) { log_event("warn", "ClanSound: Couldn't open sound device, disabling sound"); handle = nullptr; return; } snd_pcm_hw_params_alloca(&hwparams); snd_pcm_hw_params_any(handle, hwparams); snd_pcm_hw_params_set_access(handle, hwparams, SND_PCM_ACCESS_RW_INTERLEAVED); snd_pcm_hw_params_set_format(handle, hwparams, SND_PCM_FORMAT_FLOAT); snd_pcm_hw_params_set_channels(handle, hwparams, 2); snd_pcm_hw_params_set_rate_near(handle, hwparams, (unsigned int *)&this->mixing_frequency, nullptr); snd_pcm_hw_params_set_buffer_size_near(handle, hwparams, &frames_in_buffer); frames_in_period = frames_in_buffer / 4; snd_pcm_hw_params_set_period_size_near(handle, hwparams, &frames_in_period, nullptr); rc = snd_pcm_hw_params(handle, hwparams); if (rc < 0) { log_event("warn", "ClanSound: Couldn't initialize sound device, disabling sound"); snd_pcm_close(handle); handle = nullptr; return; } snd_pcm_hw_params_get_period_size(hwparams, &frames_in_period, nullptr); start_mixer_thread(); }
SoundOutput_Win32::SoundOutput_Win32(int init_mixing_frequency, int init_mixing_latency) : SoundOutput_Impl(init_mixing_frequency, init_mixing_latency), audio_buffer_ready_event(INVALID_HANDLE_VALUE), is_playing(false), fragment_size(0), wait_timeout(mixing_latency * 2), write_pos(0) { try { ComPtr<IMMDeviceEnumerator> device_enumerator; HRESULT result = CoCreateInstance(__uuidof(MMDeviceEnumerator), 0, CLSCTX_ALL, __uuidof(IMMDeviceEnumerator), (void**)device_enumerator.output_variable()); if (FAILED(result)) throw Exception("Unable to create IMMDeviceEnumerator instance"); result = device_enumerator->GetDefaultAudioEndpoint(eRender, eMultimedia, mmdevice.output_variable()); if (FAILED(result)) throw Exception("IDeviceEnumerator.GetDefaultAudioEndpoint failed"); result = mmdevice->Activate(__uuidof(IAudioClient), CLSCTX_ALL, 0, (void**)audio_client.output_variable()); if (FAILED(result)) throw Exception("IMMDevice.Activate failed"); WAVEFORMATEXTENSIBLE wave_format; wave_format.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE; wave_format.Format.nChannels = 2; wave_format.Format.nBlockAlign = 2 * sizeof(float); wave_format.Format.wBitsPerSample = 8 * sizeof(float); wave_format.Format.cbSize = 22; wave_format.Samples.wValidBitsPerSample = wave_format.Format.wBitsPerSample; wave_format.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; wave_format.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; wave_format.Format.nSamplesPerSec = mixing_frequency; wave_format.Format.nAvgBytesPerSec = wave_format.Format.nSamplesPerSec * wave_format.Format.nBlockAlign; WAVEFORMATEX *closest_match = 0; result = audio_client->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED, (WAVEFORMATEX*)&wave_format, &closest_match); if (FAILED(result)) throw Exception("IAudioClient.IsFormatSupported failed"); // We could not get the exact format we wanted. Try to use the frequency that the closest matching format is using: if (result == S_FALSE) { mixing_frequency = closest_match->nSamplesPerSec; wait_timeout = mixing_latency * 2; wave_format.Format.nSamplesPerSec = mixing_frequency; wave_format.Format.nAvgBytesPerSec = wave_format.Format.nSamplesPerSec * wave_format.Format.nBlockAlign; CoTaskMemFree(closest_match); closest_match = 0; } /* // For debugging what mixing format Windows is using. WAVEFORMATEX *device_format = 0; // Note: this points at a WAVEFORMATEXTENSIBLE if cbSize is 22 result = audio_client->GetMixFormat(&device_format); if (SUCCEEDED(result)) { CoTaskMemFree(device_format); device_format = 0; } */ result = audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK, mixing_latency * (REFERENCE_TIME)1000, 0, (WAVEFORMATEX*)&wave_format, 0); if (FAILED(result)) throw Exception("IAudioClient.Initialize failed"); result = audio_client->GetService(__uuidof(IAudioRenderClient), (void**)audio_render_client.output_variable()); if (FAILED(result)) throw Exception("IAudioClient.GetService(IAudioRenderClient) failed"); audio_buffer_ready_event = CreateEvent(0, TRUE, TRUE, 0); if (audio_buffer_ready_event == INVALID_HANDLE_VALUE) throw Exception("CreateEvent failed"); result = audio_client->SetEventHandle(audio_buffer_ready_event); if (FAILED(result)) throw Exception("IAudioClient.SetEventHandle failed"); result = audio_client->GetBufferSize(&fragment_size); if (FAILED(result)) throw Exception("IAudioClient.GetBufferSize failed"); next_fragment = DataBuffer(sizeof(float) * 2 * fragment_size); start_mixer_thread(); } catch (...) { if (audio_buffer_ready_event != INVALID_HANDLE_VALUE) CloseHandle(audio_buffer_ready_event); throw; } }
CL_SoundOutput_OSS::CL_SoundOutput_OSS(int mixing_frequency, int mixing_latency) : CL_SoundOutput_Impl(mixing_frequency, mixing_latency), dev_dsp_fd(-1), frag_size(0), has_sound(true) { dev_dsp_fd = open(DEFAULT_DSP, O_WRONLY|O_NONBLOCK); if (dev_dsp_fd == -1) { has_sound = false; frag_size = mixing_frequency/2; return; // throw CL_Error("Could not open " + DEFAULT_DSP + ". No sound will be available."); } fcntl(dev_dsp_fd, F_SETFL, fcntl(dev_dsp_fd, F_GETFL) &~ O_NONBLOCK); #ifndef USE_DRIVER_FRAGSIZE int frag_settings = 0x0003000b; // 0xMMMMSSSS // (where MMMM = num fragments, SSSS = fragment size) if (ioctl(dev_dsp_fd, SNDCTL_DSP_SETFRAGMENT, &frag_settings)) { cl_log_event("debug", "ClanSound: Failed to set soundcard fragment size. Sound may have a long latency."); } #endif int format = AFMT_S16_NE; ioctl(dev_dsp_fd, SNDCTL_DSP_SETFMT, &format); if (format != AFMT_S16_NE) { close(dev_dsp_fd); throw CL_Exception("Requires 16 bit soundcard. No sound will be available."); } int stereo = 1; ioctl(dev_dsp_fd, SNDCTL_DSP_STEREO, &stereo); if (stereo != 1) { close(dev_dsp_fd); throw CL_Exception("ClanSound: Requires 16 bit stereo capable soundcard. No sound will be available."); } int speed = mixing_frequency; ioctl(dev_dsp_fd, SNDCTL_DSP_SPEED, &speed); float percent_wrong = speed / (float) mixing_frequency; if (percent_wrong < 0.90 || percent_wrong > 1.10) { close(dev_dsp_fd); throw CL_Exception("ClanSound: Mixing rate (22.05 kHz) not supported by soundcard."); } // Try to improve mixing performance by using the same mixing buffer size // as the sound device does: int err = ioctl(dev_dsp_fd, SNDCTL_DSP_GETBLKSIZE, &frag_size); if (err == -1) { cl_log_event("debug", "ClanSound: Warning, Couldn't get sound device blocksize. Using 0.25 sec mixing buffer."); frag_size = mixing_frequency/2; // 0.25 sec mixing buffer used. } audio_buf_info info; ioctl(dev_dsp_fd, SNDCTL_DSP_GETOSPACE, &info); start_mixer_thread(); }