static ALCenum oss_open_capture(ALCdevice *device, const ALCchar *deviceName) { int numFragmentsLogSize; int log2FragmentSize; unsigned int periods; audio_buf_info info; ALuint frameSize; int numChannels; oss_data *data; int ossFormat; int ossSpeed; char *err; if(!deviceName) deviceName = oss_device; else if(strcmp(deviceName, oss_device) != 0) return ALC_INVALID_VALUE; data = (oss_data*)calloc(1, sizeof(oss_data)); data->killNow = 0; data->fd = open(oss_capture, O_RDONLY); if(data->fd == -1) { free(data); ERR("Could not open %s: %s\n", oss_capture, strerror(errno)); return ALC_INVALID_VALUE; } switch(device->FmtType) { case DevFmtByte: ossFormat = AFMT_S8; break; case DevFmtUByte: ossFormat = AFMT_U8; break; case DevFmtShort: ossFormat = AFMT_S16_NE; break; case DevFmtUShort: case DevFmtInt: case DevFmtUInt: case DevFmtFloat: free(data); ERR("%s capture samples not supported\n", DevFmtTypeString(device->FmtType)); return ALC_INVALID_VALUE; } periods = 4; numChannels = ChannelsFromDevFmt(device->FmtChans); frameSize = numChannels * BytesFromDevFmt(device->FmtType); ossSpeed = device->Frequency; log2FragmentSize = log2i(device->UpdateSize * device->NumUpdates * frameSize / periods); /* according to the OSS spec, 16 bytes are the minimum */ if (log2FragmentSize < 4) log2FragmentSize = 4; numFragmentsLogSize = (periods << 16) | log2FragmentSize; #define CHECKERR(func) if((func) < 0) { \ err = #func; \ goto err; \ } CHECKERR(ioctl(data->fd, SNDCTL_DSP_SETFRAGMENT, &numFragmentsLogSize)); CHECKERR(ioctl(data->fd, SNDCTL_DSP_SETFMT, &ossFormat)); CHECKERR(ioctl(data->fd, SNDCTL_DSP_CHANNELS, &numChannels)); CHECKERR(ioctl(data->fd, SNDCTL_DSP_SPEED, &ossSpeed)); CHECKERR(ioctl(data->fd, SNDCTL_DSP_GETISPACE, &info)); if(0) { err: ERR("%s failed: %s\n", err, strerror(errno)); close(data->fd); free(data); return ALC_INVALID_VALUE; } #undef CHECKERR if((int)ChannelsFromDevFmt(device->FmtChans) != numChannels) { ERR("Failed to set %s, got %d channels instead\n", DevFmtChannelsString(device->FmtChans), numChannels); close(data->fd); free(data); return ALC_INVALID_VALUE; } if(!((ossFormat == AFMT_S8 && device->FmtType == DevFmtByte) || (ossFormat == AFMT_U8 && device->FmtType == DevFmtUByte) || (ossFormat == AFMT_S16_NE && device->FmtType == DevFmtShort))) { ERR("Failed to set %s samples, got OSS format %#x\n", DevFmtTypeString(device->FmtType), ossFormat); close(data->fd); free(data); return ALC_INVALID_VALUE; } data->ring = CreateRingBuffer(frameSize, device->UpdateSize * device->NumUpdates); if(!data->ring) { ERR("Ring buffer create failed\n"); close(data->fd); free(data); return ALC_OUT_OF_MEMORY; } data->data_size = info.fragsize; data->mix_data = calloc(1, data->data_size); device->ExtraData = data; data->thread = StartThread(OSSCaptureProc, device); if(data->thread == NULL) { device->ExtraData = NULL; free(data->mix_data); free(data); return ALC_OUT_OF_MEMORY; } device->szDeviceName = strdup(deviceName); return ALC_NO_ERROR; }
static ALCenum ca_open_capture(ALCdevice *device, const ALCchar *deviceName) { AudioStreamBasicDescription requestedFormat; // The application requested format AudioStreamBasicDescription hardwareFormat; // The hardware format AudioStreamBasicDescription outputFormat; // The AudioUnit output format AURenderCallbackStruct input; ComponentDescription desc; AudioDeviceID inputDevice; UInt32 outputFrameCount; UInt32 propertySize; UInt32 enableIO; Component comp; ca_data *data; OSStatus err; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_HALOutput; desc.componentManufacturer = kAudioUnitManufacturer_Apple; desc.componentFlags = 0; desc.componentFlagsMask = 0; // Search for component with given description comp = FindNextComponent(NULL, &desc); if(comp == NULL) { ERR("FindNextComponent failed\n"); return ALC_INVALID_VALUE; } data = calloc(1, sizeof(*data)); device->ExtraData = data; // Open the component err = OpenAComponent(comp, &data->audioUnit); if(err != noErr) { ERR("OpenAComponent failed\n"); goto error; } // Turn off AudioUnit output enableIO = 0; err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(ALuint)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); goto error; } // Turn on AudioUnit input enableIO = 1; err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(ALuint)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); goto error; } // Get the default input device propertySize = sizeof(AudioDeviceID); err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &inputDevice); if(err != noErr) { ERR("AudioHardwareGetProperty failed\n"); goto error; } if(inputDevice == kAudioDeviceUnknown) { ERR("No input device found\n"); goto error; } // Track the input device err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &inputDevice, sizeof(AudioDeviceID)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); goto error; } // set capture callback input.inputProc = ca_capture_callback; input.inputProcRefCon = device; err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(AURenderCallbackStruct)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); goto error; } // Initialize the device err = AudioUnitInitialize(data->audioUnit); if(err != noErr) { ERR("AudioUnitInitialize failed\n"); goto error; } // Get the hardware format propertySize = sizeof(AudioStreamBasicDescription); err = AudioUnitGetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &hardwareFormat, &propertySize); if(err != noErr || propertySize != sizeof(AudioStreamBasicDescription)) { ERR("AudioUnitGetProperty failed\n"); goto error; } // Set up the requested format description switch(device->FmtType) { case DevFmtUByte: requestedFormat.mBitsPerChannel = 8; requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked; break; case DevFmtShort: requestedFormat.mBitsPerChannel = 16; requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; break; case DevFmtInt: requestedFormat.mBitsPerChannel = 32; requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; break; case DevFmtFloat: requestedFormat.mBitsPerChannel = 32; requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked; break; case DevFmtByte: case DevFmtUShort: case DevFmtUInt: ERR("%s samples not supported\n", DevFmtTypeString(device->FmtType)); goto error; } switch(device->FmtChans) { case DevFmtMono: requestedFormat.mChannelsPerFrame = 1; break; case DevFmtStereo: requestedFormat.mChannelsPerFrame = 2; break; case DevFmtQuad: case DevFmtX51: case DevFmtX51Side: case DevFmtX61: case DevFmtX71: ERR("%s not supported\n", DevFmtChannelsString(device->FmtChans)); goto error; } requestedFormat.mBytesPerFrame = requestedFormat.mChannelsPerFrame * requestedFormat.mBitsPerChannel / 8; requestedFormat.mBytesPerPacket = requestedFormat.mBytesPerFrame; requestedFormat.mSampleRate = device->Frequency; requestedFormat.mFormatID = kAudioFormatLinearPCM; requestedFormat.mReserved = 0; requestedFormat.mFramesPerPacket = 1; // save requested format description for later use data->format = requestedFormat; data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType); // Use intermediate format for sample rate conversion (outputFormat) // Set sample rate to the same as hardware for resampling later outputFormat = requestedFormat; outputFormat.mSampleRate = hardwareFormat.mSampleRate; // Determine sample rate ratio for resampling data->sampleRateRatio = outputFormat.mSampleRate / device->Frequency; // The output format should be the requested format, but using the hardware sample rate // This is because the AudioUnit will automatically scale other properties, except for sample rate err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, (void *)&outputFormat, sizeof(outputFormat)); if(err != noErr) { ERR("AudioUnitSetProperty failed\n"); goto error; } // Set the AudioUnit output format frame count outputFrameCount = device->UpdateSize * data->sampleRateRatio; err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Output, 0, &outputFrameCount, sizeof(outputFrameCount)); if(err != noErr) { ERR("AudioUnitSetProperty failed: %d\n", err); goto error; } // Set up sample converter err = AudioConverterNew(&outputFormat, &requestedFormat, &data->audioConverter); if(err != noErr) { ERR("AudioConverterNew failed: %d\n", err); goto error; } // Create a buffer for use in the resample callback data->resampleBuffer = malloc(device->UpdateSize * data->frameSize * data->sampleRateRatio); // Allocate buffer for the AudioUnit output data->bufferList = allocate_buffer_list(outputFormat.mChannelsPerFrame, device->UpdateSize * data->frameSize * data->sampleRateRatio); if(data->bufferList == NULL) goto error; data->ring = CreateRingBuffer(data->frameSize, (device->UpdateSize * data->sampleRateRatio) * device->NumUpdates); if(data->ring == NULL) goto error; al_string_copy_cstr(&device->DeviceName, deviceName); return ALC_NO_ERROR; error: DestroyRingBuffer(data->ring); free(data->resampleBuffer); destroy_buffer_list(data->bufferList); if(data->audioConverter) AudioConverterDispose(data->audioConverter); if(data->audioUnit) CloseComponent(data->audioUnit); free(data); device->ExtraData = NULL; return ALC_INVALID_VALUE; }
static ALCboolean oss_reset_playback(ALCdevice *device) { oss_data *data = (oss_data*)device->ExtraData; int numFragmentsLogSize; int log2FragmentSize; unsigned int periods; audio_buf_info info; ALuint frameSize; int numChannels; int ossFormat; int ossSpeed; char *err; switch(device->FmtType) { case DevFmtByte: ossFormat = AFMT_S8; break; case DevFmtUByte: ossFormat = AFMT_U8; break; case DevFmtUShort: case DevFmtInt: case DevFmtUInt: case DevFmtFloat: device->FmtType = DevFmtShort; /* fall-through */ case DevFmtShort: ossFormat = AFMT_S16_NE; break; } periods = device->NumUpdates; numChannels = ChannelsFromDevFmt(device->FmtChans); frameSize = numChannels * BytesFromDevFmt(device->FmtType); ossSpeed = device->Frequency; log2FragmentSize = log2i(device->UpdateSize * frameSize); /* according to the OSS spec, 16 bytes are the minimum */ if (log2FragmentSize < 4) log2FragmentSize = 4; /* Subtract one period since the temp mixing buffer counts as one. Still * need at least two on the card, though. */ if(periods > 2) periods--; numFragmentsLogSize = (periods << 16) | log2FragmentSize; #define CHECKERR(func) if((func) < 0) { \ err = #func; \ goto err; \ } /* Don't fail if SETFRAGMENT fails. We can handle just about anything * that's reported back via GETOSPACE */ ioctl(data->fd, SNDCTL_DSP_SETFRAGMENT, &numFragmentsLogSize); CHECKERR(ioctl(data->fd, SNDCTL_DSP_SETFMT, &ossFormat)); CHECKERR(ioctl(data->fd, SNDCTL_DSP_CHANNELS, &numChannels)); CHECKERR(ioctl(data->fd, SNDCTL_DSP_SPEED, &ossSpeed)); CHECKERR(ioctl(data->fd, SNDCTL_DSP_GETOSPACE, &info)); if(0) { err: ERR("%s failed: %s\n", err, strerror(errno)); return ALC_FALSE; } #undef CHECKERR if((int)ChannelsFromDevFmt(device->FmtChans) != numChannels) { ERR("Failed to set %s, got %d channels instead\n", DevFmtChannelsString(device->FmtChans), numChannels); return ALC_FALSE; } if(!((ossFormat == AFMT_S8 && device->FmtType == DevFmtByte) || (ossFormat == AFMT_U8 && device->FmtType == DevFmtUByte) || (ossFormat == AFMT_S16_NE && device->FmtType == DevFmtShort))) { ERR("Failed to set %s samples, got OSS format %#x\n", DevFmtTypeString(device->FmtType), ossFormat); return ALC_FALSE; } device->Frequency = ossSpeed; device->UpdateSize = info.fragsize / frameSize; device->NumUpdates = info.fragments + 1; SetDefaultChannelOrder(device); return ALC_TRUE; }
static ALCenum pa_open_capture(ALCdevice *device, const ALCchar *deviceName) { ALuint frame_size; pa_data *data; PaError err; if(!deviceName) deviceName = pa_device; else if(strcmp(deviceName, pa_device) != 0) return ALC_INVALID_VALUE; data = (pa_data*)calloc(1, sizeof(pa_data)); if(data == NULL) return ALC_OUT_OF_MEMORY; frame_size = FrameSizeFromDevFmt(device->FmtChans, device->FmtType); data->ring = CreateRingBuffer(frame_size, device->UpdateSize*device->NumUpdates); if(data->ring == NULL) goto error; data->params.device = -1; if(!ConfigValueInt("port", "capture", &data->params.device) || data->params.device < 0) data->params.device = Pa_GetDefaultOutputDevice(); data->params.suggestedLatency = 0.0f; data->params.hostApiSpecificStreamInfo = NULL; switch(device->FmtType) { case DevFmtByte: data->params.sampleFormat = paInt8; break; case DevFmtUByte: data->params.sampleFormat = paUInt8; break; case DevFmtShort: data->params.sampleFormat = paInt16; break; case DevFmtInt: data->params.sampleFormat = paInt32; break; case DevFmtFloat: data->params.sampleFormat = paFloat32; break; case DevFmtUInt: case DevFmtUShort: ERR("%s samples not supported\n", DevFmtTypeString(device->FmtType)); goto error; } data->params.channelCount = ChannelsFromDevFmt(device->FmtChans); err = Pa_OpenStream(&data->stream, &data->params, NULL, device->Frequency, paFramesPerBufferUnspecified, paNoFlag, pa_capture_cb, device); if(err != paNoError) { ERR("Pa_OpenStream() returned an error: %s\n", Pa_GetErrorText(err)); goto error; } device->DeviceName = strdup(deviceName); device->ExtraData = data; return ALC_NO_ERROR; error: DestroyRingBuffer(data->ring); free(data); return ALC_INVALID_VALUE; }
static ALCenum DSoundOpenCapture(ALCdevice *device, const ALCchar *deviceName) { DSoundCaptureData *data = NULL; WAVEFORMATEXTENSIBLE InputType; DSCBUFFERDESC DSCBDescription; LPGUID guid = NULL; HRESULT hr, hrcom; ALuint samples; if(!CaptureDeviceList) { /* Initialize COM to prevent name truncation */ hrcom = CoInitialize(NULL); hr = DirectSoundCaptureEnumerateA(DSoundEnumCaptureDevices, NULL); if(FAILED(hr)) ERR("Error enumerating DirectSound devices (%#x)!\n", (unsigned int)hr); if(SUCCEEDED(hrcom)) CoUninitialize(); } if(!deviceName && NumCaptureDevices > 0) { deviceName = CaptureDeviceList[0].name; guid = &CaptureDeviceList[0].guid; } else { ALuint i; for(i = 0;i < NumCaptureDevices;i++) { if(strcmp(deviceName, CaptureDeviceList[i].name) == 0) { guid = &CaptureDeviceList[i].guid; break; } } if(i == NumCaptureDevices) return ALC_INVALID_VALUE; } switch(device->FmtType) { case DevFmtByte: case DevFmtUShort: case DevFmtUInt: WARN("%s capture samples not supported\n", DevFmtTypeString(device->FmtType)); return ALC_INVALID_ENUM; case DevFmtUByte: case DevFmtShort: case DevFmtInt: case DevFmtFloat: break; } //Initialise requested device data = calloc(1, sizeof(DSoundCaptureData)); if(!data) return ALC_OUT_OF_MEMORY; hr = DS_OK; //DirectSoundCapture Init code if(SUCCEEDED(hr)) hr = DirectSoundCaptureCreate(guid, &data->DSC, NULL); if(SUCCEEDED(hr)) { memset(&InputType, 0, sizeof(InputType)); switch(device->FmtChans) { case DevFmtMono: InputType.dwChannelMask = SPEAKER_FRONT_CENTER; break; case DevFmtStereo: InputType.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; break; case DevFmtQuad: InputType.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break; case DevFmtX51: InputType.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break; case DevFmtX51Side: InputType.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT; break; case DevFmtX61: InputType.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_CENTER | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT; break; case DevFmtX71: InputType.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT; break; } InputType.Format.wFormatTag = WAVE_FORMAT_PCM; InputType.Format.nChannels = ChannelsFromDevFmt(device->FmtChans); InputType.Format.wBitsPerSample = BytesFromDevFmt(device->FmtType) * 8; InputType.Format.nBlockAlign = InputType.Format.nChannels*InputType.Format.wBitsPerSample/8; InputType.Format.nSamplesPerSec = device->Frequency; InputType.Format.nAvgBytesPerSec = InputType.Format.nSamplesPerSec*InputType.Format.nBlockAlign; InputType.Format.cbSize = 0; if(InputType.Format.nChannels > 2 || device->FmtType == DevFmtFloat) { InputType.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE; InputType.Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX); InputType.Samples.wValidBitsPerSample = InputType.Format.wBitsPerSample; if(device->FmtType == DevFmtFloat) InputType.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; else InputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; } samples = device->UpdateSize * device->NumUpdates; samples = maxu(samples, 100 * device->Frequency / 1000); memset(&DSCBDescription, 0, sizeof(DSCBUFFERDESC)); DSCBDescription.dwSize = sizeof(DSCBUFFERDESC); DSCBDescription.dwFlags = 0; DSCBDescription.dwBufferBytes = samples * InputType.Format.nBlockAlign; DSCBDescription.lpwfxFormat = &InputType.Format; hr = IDirectSoundCapture_CreateCaptureBuffer(data->DSC, &DSCBDescription, &data->DSCbuffer, NULL); } if(SUCCEEDED(hr)) { data->Ring = CreateRingBuffer(InputType.Format.nBlockAlign, device->UpdateSize * device->NumUpdates); if(data->Ring == NULL) hr = DSERR_OUTOFMEMORY; } if(FAILED(hr)) { ERR("Device init failed: 0x%08lx\n", hr); DestroyRingBuffer(data->Ring); data->Ring = NULL; if(data->DSCbuffer != NULL) IDirectSoundCaptureBuffer_Release(data->DSCbuffer); data->DSCbuffer = NULL; if(data->DSC) IDirectSoundCapture_Release(data->DSC); data->DSC = NULL; free(data); return ALC_INVALID_VALUE; } data->BufferBytes = DSCBDescription.dwBufferBytes; SetDefaultWFXChannelOrder(device); device->DeviceName = strdup(deviceName); device->ExtraData = data; return ALC_NO_ERROR; }
static ALCenum pulse_open_capture(ALCdevice *device, const ALCchar *device_name) { const char *pulse_name = NULL; pa_stream_flags_t flags = 0; pa_channel_map chanmap; pulse_data *data; pa_operation *o; ALuint samples; if(device_name) { ALuint i; if(!allCaptureDevNameMap) probe_devices(AL_TRUE); for(i = 0;i < numCaptureDevNames;i++) { if(strcmp(device_name, allCaptureDevNameMap[i].name) == 0) { pulse_name = allCaptureDevNameMap[i].device_name; break; } } if(i == numCaptureDevNames) return ALC_INVALID_VALUE; } if(pulse_open(device) == ALC_FALSE) return ALC_INVALID_VALUE; data = device->ExtraData; pa_threaded_mainloop_lock(data->loop); data->spec.rate = device->Frequency; data->spec.channels = ChannelsFromDevFmt(device->FmtChans); switch(device->FmtType) { case DevFmtUByte: data->spec.format = PA_SAMPLE_U8; break; case DevFmtShort: data->spec.format = PA_SAMPLE_S16NE; break; case DevFmtInt: data->spec.format = PA_SAMPLE_S32NE; break; case DevFmtFloat: data->spec.format = PA_SAMPLE_FLOAT32NE; break; case DevFmtByte: case DevFmtUShort: case DevFmtUInt: ERR("%s capture samples not supported\n", DevFmtTypeString(device->FmtType)); pa_threaded_mainloop_unlock(data->loop); goto fail; } if(pa_sample_spec_valid(&data->spec) == 0) { ERR("Invalid sample format\n"); pa_threaded_mainloop_unlock(data->loop); goto fail; } if(!pa_channel_map_init_auto(&chanmap, data->spec.channels, PA_CHANNEL_MAP_WAVEEX)) { ERR("Couldn't build map for channel count (%d)!\n", data->spec.channels); pa_threaded_mainloop_unlock(data->loop); goto fail; } samples = device->UpdateSize * device->NumUpdates; samples = maxu(samples, 100 * device->Frequency / 1000); data->attr.minreq = -1; data->attr.prebuf = -1; data->attr.maxlength = samples * pa_frame_size(&data->spec); data->attr.tlength = -1; data->attr.fragsize = minu(samples, 50*device->Frequency/1000) * pa_frame_size(&data->spec); flags |= PA_STREAM_DONT_MOVE; flags |= PA_STREAM_START_CORKED|PA_STREAM_ADJUST_LATENCY; data->stream = connect_record_stream(pulse_name, data->loop, data->context, flags, &data->attr, &data->spec, &chanmap); if(!data->stream) { pa_threaded_mainloop_unlock(data->loop); goto fail; } pa_stream_set_state_callback(data->stream, stream_state_callback2, device); data->device_name = strdup(pa_stream_get_device_name(data->stream)); o = pa_context_get_source_info_by_name(data->context, data->device_name, source_name_callback, device); WAIT_FOR_OPERATION(o, data->loop); pa_threaded_mainloop_unlock(data->loop); return ALC_NO_ERROR; fail: pulse_close(device); return ALC_INVALID_VALUE; }
static ALCboolean ALCsolarisBackend_reset(ALCsolarisBackend *self) { ALCdevice *device = STATIC_CAST(ALCbackend,self)->mDevice; audio_info_t info; ALsizei frameSize; ALsizei numChannels; AUDIO_INITINFO(&info); info.play.sample_rate = device->Frequency; if(device->FmtChans != DevFmtMono) device->FmtChans = DevFmtStereo; numChannels = ChannelsFromDevFmt(device->FmtChans, device->AmbiOrder); info.play.channels = numChannels; switch(device->FmtType) { case DevFmtByte: info.play.precision = 8; info.play.encoding = AUDIO_ENCODING_LINEAR; break; case DevFmtUByte: info.play.precision = 8; info.play.encoding = AUDIO_ENCODING_LINEAR8; break; case DevFmtUShort: case DevFmtInt: case DevFmtUInt: case DevFmtFloat: device->FmtType = DevFmtShort; /* fall-through */ case DevFmtShort: info.play.precision = 16; info.play.encoding = AUDIO_ENCODING_LINEAR; break; } frameSize = numChannels * BytesFromDevFmt(device->FmtType); info.play.buffer_size = device->UpdateSize*device->NumUpdates * frameSize; if(ioctl(self->fd, AUDIO_SETINFO, &info) < 0) { ERR("ioctl failed: %s\n", strerror(errno)); return ALC_FALSE; } if(ChannelsFromDevFmt(device->FmtChans, device->AmbiOrder) != (ALsizei)info.play.channels) { ERR("Failed to set %s, got %u channels instead\n", DevFmtChannelsString(device->FmtChans), info.play.channels); return ALC_FALSE; } if(!((info.play.precision == 8 && info.play.encoding == AUDIO_ENCODING_LINEAR8 && device->FmtType == DevFmtUByte) || (info.play.precision == 8 && info.play.encoding == AUDIO_ENCODING_LINEAR && device->FmtType == DevFmtByte) || (info.play.precision == 16 && info.play.encoding == AUDIO_ENCODING_LINEAR && device->FmtType == DevFmtShort) || (info.play.precision == 32 && info.play.encoding == AUDIO_ENCODING_LINEAR && device->FmtType == DevFmtInt))) { ERR("Could not set %s samples, got %d (0x%x)\n", DevFmtTypeString(device->FmtType), info.play.precision, info.play.encoding); return ALC_FALSE; } device->Frequency = info.play.sample_rate; device->UpdateSize = (info.play.buffer_size/device->NumUpdates) + 1; SetDefaultChannelOrder(device); free(self->mix_data); self->data_size = device->UpdateSize * FrameSizeFromDevFmt( device->FmtChans, device->FmtType, device->AmbiOrder ); self->mix_data = calloc(1, self->data_size); return ALC_TRUE; }
static ALCboolean solaris_reset_playback(ALCdevice *device) { solaris_data *data = (solaris_data*)device->ExtraData; audio_info_t info; ALuint frameSize; int numChannels; AUDIO_INITINFO(&info); info.play.sample_rate = device->Frequency; if(device->FmtChans != DevFmtMono) device->FmtChans = DevFmtStereo; numChannels = ChannelsFromDevFmt(device->FmtChans); info.play.channels = numChannels; switch(device->FmtType) { case DevFmtByte: info.play.precision = 8; info.play.encoding = AUDIO_ENCODING_LINEAR; break; case DevFmtUByte: info.play.precision = 8; info.play.encoding = AUDIO_ENCODING_LINEAR8; break; case DevFmtUShort: case DevFmtInt: case DevFmtUInt: case DevFmtFloat: device->FmtType = DevFmtShort; /* fall-through */ case DevFmtShort: info.play.precision = 16; info.play.encoding = AUDIO_ENCODING_LINEAR; break; } frameSize = numChannels * BytesFromDevFmt(device->FmtType); info.play.buffer_size = device->UpdateSize*device->NumUpdates * frameSize; if(ioctl(data->fd, AUDIO_SETINFO, &info) < 0) { ERR("ioctl failed: %s\n", strerror(errno)); return ALC_FALSE; } if(ChannelsFromDevFmt(device->FmtChans) != info.play.channels) { ERR("Could not set %d channels, got %d instead\n", ChannelsFromDevFmt(device->FmtChans), info.play.channels); return ALC_FALSE; } if(!((info.play.precision == 8 && info.play.encoding == AUDIO_ENCODING_LINEAR8 && device->FmtType == DevFmtUByte) || (info.play.precision == 8 && info.play.encoding == AUDIO_ENCODING_LINEAR && device->FmtType == DevFmtByte) || (info.play.precision == 16 && info.play.encoding == AUDIO_ENCODING_LINEAR && device->FmtType == DevFmtShort) || (info.play.precision == 32 && info.play.encoding == AUDIO_ENCODING_LINEAR && device->FmtType == DevFmtInt))) { ERR("Could not set %s samples, got %d (0x%x)\n", DevFmtTypeString(device->FmtType), info.play.precision, info.play.encoding); return ALC_FALSE; } device->Frequency = info.play.sample_rate; device->UpdateSize = (info.play.buffer_size/device->NumUpdates) + 1; SetDefaultChannelOrder(device); return ALC_TRUE; }