예제 #1
0
파일: coreaudio.c 프로젝트: 9heart/DT3
static ALCenum ca_open_capture(ALCdevice *device, const ALCchar *deviceName)
{
    AudioStreamBasicDescription requestedFormat;  // The application requested format
    AudioStreamBasicDescription hardwareFormat;   // The hardware format
    AudioStreamBasicDescription outputFormat;     // The AudioUnit output format
    AURenderCallbackStruct input;
    ComponentDescription desc;
    AudioDeviceID inputDevice;
    UInt32 outputFrameCount;
    UInt32 propertySize;
    UInt32 enableIO;
    Component comp;
    ca_data *data;
    OSStatus err;

    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_HALOutput;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;

    // Search for component with given description
    comp = FindNextComponent(NULL, &desc);
    if(comp == NULL)
    {
        ERR("FindNextComponent failed\n");
        return ALC_INVALID_VALUE;
    }

    data = calloc(1, sizeof(*data));
    device->ExtraData = data;

    // Open the component
    err = OpenAComponent(comp, &data->audioUnit);
    if(err != noErr)
    {
        ERR("OpenAComponent failed\n");
        goto error;
    }

    // Turn off AudioUnit output
    enableIO = 0;
    err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(ALuint));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        goto error;
    }

    // Turn on AudioUnit input
    enableIO = 1;
    err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(ALuint));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        goto error;
    }

    // Get the default input device
    propertySize = sizeof(AudioDeviceID);
    err = AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice, &propertySize, &inputDevice);
    if(err != noErr)
    {
        ERR("AudioHardwareGetProperty failed\n");
        goto error;
    }

    if(inputDevice == kAudioDeviceUnknown)
    {
        ERR("No input device found\n");
        goto error;
    }

    // Track the input device
    err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &inputDevice, sizeof(AudioDeviceID));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        goto error;
    }

    // set capture callback
    input.inputProc = ca_capture_callback;
    input.inputProcRefCon = device;

    err = AudioUnitSetProperty(data->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(AURenderCallbackStruct));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        goto error;
    }

    // Initialize the device
    err = AudioUnitInitialize(data->audioUnit);
    if(err != noErr)
    {
        ERR("AudioUnitInitialize failed\n");
        goto error;
    }

    // Get the hardware format
    propertySize = sizeof(AudioStreamBasicDescription);
    err = AudioUnitGetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &hardwareFormat, &propertySize);
    if(err != noErr || propertySize != sizeof(AudioStreamBasicDescription))
    {
        ERR("AudioUnitGetProperty failed\n");
        goto error;
    }

    // Set up the requested format description
    switch(device->FmtType)
    {
        case DevFmtUByte:
            requestedFormat.mBitsPerChannel = 8;
            requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked;
            break;
        case DevFmtShort:
            requestedFormat.mBitsPerChannel = 16;
            requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
            break;
        case DevFmtFloat:
            requestedFormat.mBitsPerChannel = 32;
            requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked;
            break;
        case DevFmtByte:
        case DevFmtUShort:
            ERR("%s samples not supported\n", DevFmtTypeString(device->FmtType));
            goto error;
    }

    switch(device->FmtChans)
    {
        case DevFmtMono:
            requestedFormat.mChannelsPerFrame = 1;
            break;
        case DevFmtStereo:
            requestedFormat.mChannelsPerFrame = 2;
            break;

        case DevFmtQuad:
        case DevFmtX51:
        case DevFmtX51Side:
        case DevFmtX61:
        case DevFmtX71:
            ERR("%s not supported\n", DevFmtChannelsString(device->FmtChans));
            goto error;
    }

    requestedFormat.mBytesPerFrame = requestedFormat.mChannelsPerFrame * requestedFormat.mBitsPerChannel / 8;
    requestedFormat.mBytesPerPacket = requestedFormat.mBytesPerFrame;
    requestedFormat.mSampleRate = device->Frequency;
    requestedFormat.mFormatID = kAudioFormatLinearPCM;
    requestedFormat.mReserved = 0;
    requestedFormat.mFramesPerPacket = 1;

    // save requested format description for later use
    data->format = requestedFormat;
    data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType);

    // Use intermediate format for sample rate conversion (outputFormat)
    // Set sample rate to the same as hardware for resampling later
    outputFormat = requestedFormat;
    outputFormat.mSampleRate = hardwareFormat.mSampleRate;

    // Determine sample rate ratio for resampling
    data->sampleRateRatio = outputFormat.mSampleRate / device->Frequency;

    // The output format should be the requested format, but using the hardware sample rate
    // This is because the AudioUnit will automatically scale other properties, except for sample rate
    err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, (void *)&outputFormat, sizeof(outputFormat));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        goto error;
    }

    // Set the AudioUnit output format frame count
    outputFrameCount = device->UpdateSize * data->sampleRateRatio;
    err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Output, 0, &outputFrameCount, sizeof(outputFrameCount));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed: %d\n", err);
        goto error;
    }

    // Set up sample converter
    err = AudioConverterNew(&outputFormat, &requestedFormat, &data->audioConverter);
    if(err != noErr)
    {
        ERR("AudioConverterNew failed: %d\n", err);
        goto error;
    }

    // Create a buffer for use in the resample callback
    data->resampleBuffer = malloc(device->UpdateSize * data->frameSize * data->sampleRateRatio);

    // Allocate buffer for the AudioUnit output
    data->bufferList = allocate_buffer_list(outputFormat.mChannelsPerFrame, device->UpdateSize * data->frameSize * data->sampleRateRatio);
    if(data->bufferList == NULL)
        goto error;

    data->ring = CreateRingBuffer(data->frameSize, (device->UpdateSize * data->sampleRateRatio) * device->NumUpdates);
    if(data->ring == NULL)
        goto error;

    return ALC_NO_ERROR;

error:
    DestroyRingBuffer(data->ring);
    free(data->resampleBuffer);
    destroy_buffer_list(data->bufferList);

    if(data->audioConverter)
        AudioConverterDispose(data->audioConverter);
    if(data->audioUnit)
        CloseComponent(data->audioUnit);

    free(data);
    device->ExtraData = NULL;

    return ALC_INVALID_VALUE;
}
예제 #2
0
static ALCenum pa_open_capture(ALCdevice *device, const ALCchar *deviceName)
{
    ALuint frame_size;
    pa_data *data;
    PaError err;

    if(!deviceName)
        deviceName = pa_device;
    else if(strcmp(deviceName, pa_device) != 0)
        return ALC_INVALID_VALUE;

    data = (pa_data*)calloc(1, sizeof(pa_data));
    if(data == NULL)
        return ALC_OUT_OF_MEMORY;

    frame_size = FrameSizeFromDevFmt(device->FmtChans, device->FmtType);
    data->ring = CreateRingBuffer(frame_size, device->UpdateSize*device->NumUpdates);
    if(data->ring == NULL)
        goto error;

    data->params.device = -1;
    if(!ConfigValueInt("port", "capture", &data->params.device) ||
       data->params.device < 0)
        data->params.device = Pa_GetDefaultOutputDevice();
    data->params.suggestedLatency = 0.0f;
    data->params.hostApiSpecificStreamInfo = NULL;

    switch(device->FmtType)
    {
        case DevFmtByte:
            data->params.sampleFormat = paInt8;
            break;
        case DevFmtUByte:
            data->params.sampleFormat = paUInt8;
            break;
        case DevFmtShort:
            data->params.sampleFormat = paInt16;
            break;
        case DevFmtInt:
            data->params.sampleFormat = paInt32;
            break;
        case DevFmtFloat:
            data->params.sampleFormat = paFloat32;
            break;
        case DevFmtUInt:
        case DevFmtUShort:
            ERR("%s samples not supported\n", DevFmtTypeString(device->FmtType));
            goto error;
    }
    data->params.channelCount = ChannelsFromDevFmt(device->FmtChans);

    err = Pa_OpenStream(&data->stream, &data->params, NULL, device->Frequency,
                        paFramesPerBufferUnspecified, paNoFlag, pa_capture_cb, device);
    if(err != paNoError)
    {
        ERR("Pa_OpenStream() returned an error: %s\n", Pa_GetErrorText(err));
        goto error;
    }

    device->DeviceName = strdup(deviceName);

    device->ExtraData = data;
    return ALC_NO_ERROR;

error:
    DestroyRingBuffer(data->ring);
    free(data);
    return ALC_INVALID_VALUE;
}
예제 #3
0
파일: coreaudio.c 프로젝트: 9heart/DT3
static ALCboolean ca_reset_playback(ALCdevice *device)
{
    ca_data *data = (ca_data*)device->ExtraData;
    AudioStreamBasicDescription streamFormat;
    AURenderCallbackStruct input;
    OSStatus err;
    UInt32 size;

    /* init and start the default audio unit... */
    err = AudioUnitInitialize(data->audioUnit);
    if(err != noErr)
    {
        ERR("AudioUnitInitialize failed\n");
        return ALC_FALSE;
    }

    err = AudioOutputUnitStart(data->audioUnit);
    if(err != noErr)
    {
        ERR("AudioOutputUnitStart failed\n");
        return ALC_FALSE;
    }

    /* retrieve default output unit's properties (output side) */
    size = sizeof(AudioStreamBasicDescription);
    err = AudioUnitGetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &streamFormat, &size);
    if(err != noErr || size != sizeof(AudioStreamBasicDescription))
    {
        ERR("AudioUnitGetProperty failed\n");
        return ALC_FALSE;
    }

#if 0
    TRACE("Output streamFormat of default output unit -\n");
    TRACE("  streamFormat.mFramesPerPacket = %d\n", streamFormat.mFramesPerPacket);
    TRACE("  streamFormat.mChannelsPerFrame = %d\n", streamFormat.mChannelsPerFrame);
    TRACE("  streamFormat.mBitsPerChannel = %d\n", streamFormat.mBitsPerChannel);
    TRACE("  streamFormat.mBytesPerPacket = %d\n", streamFormat.mBytesPerPacket);
    TRACE("  streamFormat.mBytesPerFrame = %d\n", streamFormat.mBytesPerFrame);
    TRACE("  streamFormat.mSampleRate = %5.0f\n", streamFormat.mSampleRate);
#endif

    /* set default output unit's input side to match output side */
    err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, size);
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        return ALC_FALSE;
    }

    if(device->Frequency != streamFormat.mSampleRate)
    {
        device->UpdateSize = (ALuint)((ALuint64)device->UpdateSize *
                                      streamFormat.mSampleRate /
                                      device->Frequency);
        device->Frequency = streamFormat.mSampleRate;
    }

    /* FIXME: How to tell what channels are what in the output device, and how
     * to specify what we're giving?  eg, 6.0 vs 5.1 */
    switch(streamFormat.mChannelsPerFrame)
    {
        case 1:
            device->FmtChans = DevFmtMono;
            break;
        case 2:
            device->FmtChans = DevFmtStereo;
            break;
        case 4:
            device->FmtChans = DevFmtQuad;
            break;
        case 6:
            device->FmtChans = DevFmtX51;
            break;
        case 7:
            device->FmtChans = DevFmtX61;
            break;
        case 8:
            device->FmtChans = DevFmtX71;
            break;
        default:
            ERR("Unhandled channel count (%d), using Stereo\n", streamFormat.mChannelsPerFrame);
            device->FmtChans = DevFmtStereo;
            streamFormat.mChannelsPerFrame = 2;
            break;
    }
    SetDefaultWFXChannelOrder(device);

    /* use channel count and sample rate from the default output unit's current
     * parameters, but reset everything else */
    streamFormat.mFramesPerPacket = 1;
    switch(device->FmtType)
    {
        case DevFmtUByte:
            device->FmtType = DevFmtByte;
            /* fall-through */
        case DevFmtByte:
            streamFormat.mBitsPerChannel = 8;
            streamFormat.mBytesPerPacket = streamFormat.mChannelsPerFrame;
            streamFormat.mBytesPerFrame = streamFormat.mChannelsPerFrame;
            break;
        case DevFmtUShort:
        case DevFmtFloat:
            device->FmtType = DevFmtShort;
            /* fall-through */
        case DevFmtShort:
            streamFormat.mBitsPerChannel = 16;
            streamFormat.mBytesPerPacket = 2 * streamFormat.mChannelsPerFrame;
            streamFormat.mBytesPerFrame = 2 * streamFormat.mChannelsPerFrame;
            break;
    }
    streamFormat.mFormatID = kAudioFormatLinearPCM;
    streamFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
                                kAudioFormatFlagsNativeEndian |
                                kLinearPCMFormatFlagIsPacked;

    err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamFormat, sizeof(AudioStreamBasicDescription));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        return ALC_FALSE;
    }

    /* setup callback */
    data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType);
    input.inputProc = ca_callback;
    input.inputProcRefCon = device;

    err = AudioUnitSetProperty(data->audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(AURenderCallbackStruct));
    if(err != noErr)
    {
        ERR("AudioUnitSetProperty failed\n");
        return ALC_FALSE;
    }

    return ALC_TRUE;
}
static ALCboolean pulse_reset_playback(ALCdevice *device) //{{{
{
    pulse_data *data = device->ExtraData;
    pa_stream_flags_t flags = 0;
    pa_channel_map chanmap;

    pa_threaded_mainloop_lock(data->loop);

    if(!(device->Flags&DEVICE_CHANNELS_REQUEST))
    {
        pa_operation *o;
        o = pa_context_get_sink_info_by_name(data->context, data->device_name, sink_info_callback, device);
        while(pa_operation_get_state(o) == PA_OPERATION_RUNNING)
            pa_threaded_mainloop_wait(data->loop);
        pa_operation_unref(o);
    }
    if(!(device->Flags&DEVICE_FREQUENCY_REQUEST))
        flags |= PA_STREAM_FIX_RATE;

    data->frame_size = FrameSizeFromDevFmt(device->FmtChans, device->FmtType);
    data->attr.prebuf = -1;
    data->attr.fragsize = -1;
    data->attr.minreq = device->UpdateSize * data->frame_size;
    data->attr.tlength = data->attr.minreq * maxu(device->NumUpdates, 2);
    data->attr.maxlength = -1;
    flags |= PA_STREAM_EARLY_REQUESTS;
    flags |= PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_AUTO_TIMING_UPDATE;

    switch(device->FmtType)
    {
        case DevFmtByte:
            device->FmtType = DevFmtUByte;
            /* fall-through */
        case DevFmtUByte:
            data->spec.format = PA_SAMPLE_U8;
            break;
        case DevFmtUShort:
            device->FmtType = DevFmtShort;
            /* fall-through */
        case DevFmtShort:
            data->spec.format = PA_SAMPLE_S16NE;
            break;
        case DevFmtFloat:
            data->spec.format = PA_SAMPLE_FLOAT32NE;
            break;
    }
    data->spec.rate = device->Frequency;
    data->spec.channels = ChannelsFromDevFmt(device->FmtChans);

    if(pa_sample_spec_valid(&data->spec) == 0)
    {
        ERR("Invalid sample format\n");
        pa_threaded_mainloop_unlock(data->loop);
        return ALC_FALSE;
    }

    if(!pa_channel_map_init_auto(&chanmap, data->spec.channels, PA_CHANNEL_MAP_WAVEEX))
    {
        ERR("Couldn't build map for channel count (%d)!\n", data->spec.channels);
        pa_threaded_mainloop_unlock(data->loop);
        return ALC_FALSE;
    }
    SetDefaultWFXChannelOrder(device);

    data->stream = connect_playback_stream(device, flags, &data->attr, &data->spec, &chanmap);
    if(!data->stream)
    {
        pa_threaded_mainloop_unlock(data->loop);
        return ALC_FALSE;
    }

    pa_stream_set_state_callback(data->stream, stream_state_callback2, device);

    data->spec = *(pa_stream_get_sample_spec(data->stream));
    if(device->Frequency != data->spec.rate)
    {
        pa_operation *o;

        if((device->Flags&DEVICE_FREQUENCY_REQUEST))
            ERR("Failed to set frequency %dhz, got %dhz instead\n", device->Frequency, data->spec.rate);
        device->Flags &= ~DEVICE_FREQUENCY_REQUEST;

        /* Server updated our playback rate, so modify the buffer attribs
         * accordingly. */
        data->attr.minreq = (ALuint64)(data->attr.minreq/data->frame_size) *
                            data->spec.rate / device->Frequency * data->frame_size;
        data->attr.tlength = data->attr.minreq * maxu(device->NumUpdates, 2);

        o = pa_stream_set_buffer_attr(data->stream, &data->attr,
                                      stream_success_callback, device);
        while(pa_operation_get_state(o) == PA_OPERATION_RUNNING)
            pa_threaded_mainloop_wait(data->loop);
        pa_operation_unref(o);

        device->Frequency = data->spec.rate;
    }

    stream_buffer_attr_callback(data->stream, device);
    if(device->NumUpdates < 2)
    {
        pa_operation *o;

        /* Server gave a comparatively large minreq, so modify the tlength. */
        device->NumUpdates = 2;
        data->attr.tlength = data->attr.minreq * device->NumUpdates;

        o = pa_stream_set_buffer_attr(data->stream, &data->attr,
                                      stream_success_callback, device);
        while(pa_operation_get_state(o) == PA_OPERATION_RUNNING)
            pa_threaded_mainloop_wait(data->loop);
        pa_operation_unref(o);
    }

#if PA_CHECK_VERSION(0,9,15)
    if(pa_stream_set_buffer_attr_callback)
        pa_stream_set_buffer_attr_callback(data->stream, stream_buffer_attr_callback, device);
#endif
    pa_stream_set_moved_callback(data->stream, stream_device_callback, device);
    pa_stream_set_write_callback(data->stream, stream_write_callback, device);
    pa_stream_set_underflow_callback(data->stream, stream_signal_callback, device);

    data->thread = StartThread(PulseProc, device);
    if(!data->thread)
    {
#if PA_CHECK_VERSION(0,9,15)
        if(pa_stream_set_buffer_attr_callback)
            pa_stream_set_buffer_attr_callback(data->stream, NULL, NULL);
#endif
        pa_stream_set_moved_callback(data->stream, NULL, NULL);
        pa_stream_set_write_callback(data->stream, NULL, NULL);
        pa_stream_set_underflow_callback(data->stream, NULL, NULL);
        pa_stream_disconnect(data->stream);
        pa_stream_unref(data->stream);
        data->stream = NULL;

        pa_threaded_mainloop_unlock(data->loop);
        return ALC_FALSE;
    }

    pa_threaded_mainloop_unlock(data->loop);
    return ALC_TRUE;
} //}}}
예제 #5
0
static ALuint DSoundPlaybackProc(ALvoid *ptr)
{
    ALCdevice *pDevice = (ALCdevice*)ptr;
    DSoundPlaybackData *pData = (DSoundPlaybackData*)pDevice->ExtraData;
    DSBCAPS DSBCaps;
    DWORD LastCursor = 0;
    DWORD PlayCursor;
    VOID *WritePtr1, *WritePtr2;
    DWORD WriteCnt1,  WriteCnt2;
    BOOL Playing = FALSE;
    DWORD FrameSize;
    DWORD FragSize;
    DWORD avail;
    HRESULT err;

    SetRTPriority();

    memset(&DSBCaps, 0, sizeof(DSBCaps));
    DSBCaps.dwSize = sizeof(DSBCaps);
    err = IDirectSoundBuffer_GetCaps(pData->DSsbuffer, &DSBCaps);
    if(FAILED(err))
    {
        ERR("Failed to get buffer caps: 0x%lx\n", err);
        aluHandleDisconnect(pDevice);
        return 1;
    }

    FrameSize = FrameSizeFromDevFmt(pDevice->FmtChans, pDevice->FmtType);
    FragSize = pDevice->UpdateSize * FrameSize;

    IDirectSoundBuffer_GetCurrentPosition(pData->DSsbuffer, &LastCursor, NULL);
    while(!pData->killNow)
    {
        // Get current play cursor
        IDirectSoundBuffer_GetCurrentPosition(pData->DSsbuffer, &PlayCursor, NULL);
        avail = (PlayCursor-LastCursor+DSBCaps.dwBufferBytes) % DSBCaps.dwBufferBytes;

        if(avail < FragSize)
        {
            if(!Playing)
            {
                err = IDirectSoundBuffer_Play(pData->DSsbuffer, 0, 0, DSBPLAY_LOOPING);
                if(FAILED(err))
                {
                    ERR("Failed to play buffer: 0x%lx\n", err);
                    aluHandleDisconnect(pDevice);
                    return 1;
                }
                Playing = TRUE;
            }

            avail = WaitForSingleObjectEx(pData->hNotifyEvent, 2000, FALSE);
            if(avail != WAIT_OBJECT_0)
                ERR("WaitForSingleObjectEx error: 0x%lx\n", avail);
            continue;
        }
        avail -= avail%FragSize;

        // Lock output buffer
        WriteCnt1 = 0;
        WriteCnt2 = 0;
        err = IDirectSoundBuffer_Lock(pData->DSsbuffer, LastCursor, avail, &WritePtr1, &WriteCnt1, &WritePtr2, &WriteCnt2, 0);

        // If the buffer is lost, restore it and lock
        if(err == DSERR_BUFFERLOST)
        {
            WARN("Buffer lost, restoring...\n");
            err = IDirectSoundBuffer_Restore(pData->DSsbuffer);
            if(SUCCEEDED(err))
            {
                Playing = FALSE;
                LastCursor = 0;
                err = IDirectSoundBuffer_Lock(pData->DSsbuffer, 0, DSBCaps.dwBufferBytes, &WritePtr1, &WriteCnt1, &WritePtr2, &WriteCnt2, 0);
            }
        }

        // Successfully locked the output buffer
        if(SUCCEEDED(err))
        {
            // If we have an active context, mix data directly into output buffer otherwise fill with silence
            aluMixData(pDevice, WritePtr1, WriteCnt1/FrameSize);
            aluMixData(pDevice, WritePtr2, WriteCnt2/FrameSize);

            // Unlock output buffer only when successfully locked
            IDirectSoundBuffer_Unlock(pData->DSsbuffer, WritePtr1, WriteCnt1, WritePtr2, WriteCnt2);
        }
        else
        {
            ERR("Buffer lock error: %#lx\n", err);
            aluHandleDisconnect(pDevice);
            return 1;
        }

        // Update old write cursor location
        LastCursor += WriteCnt1+WriteCnt2;
        LastCursor %= DSBCaps.dwBufferBytes;
    }

    return 0;
}
예제 #6
0
파일: opensl.c 프로젝트: dns/CLove
#endif
    }

    if(sampleRate != device->Frequency)
    {
        device->NumUpdates = (device->NumUpdates*sampleRate + (device->Frequency>>1)) /
                             device->Frequency;
        device->NumUpdates = maxu(device->NumUpdates, 2);
        device->Frequency = sampleRate;
    }

    device->FmtChans = DevFmtStereo;
    device->FmtType = DevFmtShort;

    SetDefaultWFXChannelOrder(device);
    self->mFrameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType, device->AmbiOrder);


    loc_bufq.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE;
    loc_bufq.numBuffers = device->NumUpdates;

#ifdef SL_DATAFORMAT_PCM_EX
    SLDataFormat_PCM_EX format_pcm;
    format_pcm.formatType = SL_DATAFORMAT_PCM_EX;
    format_pcm.numChannels = ChannelsFromDevFmt(device->FmtChans, device->AmbiOrder);
    format_pcm.sampleRate = device->Frequency * 1000;
    format_pcm.bitsPerSample = BytesFromDevFmt(device->FmtType) * 8;
    format_pcm.containerSize = format_pcm.bitsPerSample;
    format_pcm.channelMask = GetChannelMask(device->FmtChans);
    format_pcm.endianness = IS_LITTLE_ENDIAN ? SL_BYTEORDER_LITTLEENDIAN :
                                               SL_BYTEORDER_BIGENDIAN;
static ALCenum pulse_open_capture(ALCdevice *device, const ALCchar *device_name) //{{{
{
    char *pulse_name = NULL;
    pulse_data *data;
    pa_stream_flags_t flags = 0;
    pa_stream_state_t state;
    pa_channel_map chanmap;

    if(!allCaptureDevNameMap)
        probe_devices(AL_TRUE);

    if(!device_name)
        device_name = pulse_device;
    else if(strcmp(device_name, pulse_device) != 0)
    {
        ALuint i;

        for(i = 0;i < numCaptureDevNames;i++)
        {
            if(strcmp(device_name, allCaptureDevNameMap[i].name) == 0)
            {
                pulse_name = allCaptureDevNameMap[i].device_name;
                break;
            }
        }
        if(i == numCaptureDevNames)
            return ALC_INVALID_VALUE;
    }

    if(pulse_open(device, device_name) == ALC_FALSE)
        return ALC_INVALID_VALUE;

    data = device->ExtraData;
    pa_threaded_mainloop_lock(data->loop);

    data->samples = device->UpdateSize * device->NumUpdates;
    data->frame_size = FrameSizeFromDevFmt(device->FmtChans, device->FmtType);
    data->samples = maxu(data->samples, 100 * device->Frequency / 1000);

    if(!(data->ring = CreateRingBuffer(data->frame_size, data->samples)))
    {
        pa_threaded_mainloop_unlock(data->loop);
        goto fail;
    }

    data->attr.minreq = -1;
    data->attr.prebuf = -1;
    data->attr.maxlength = data->samples * data->frame_size;
    data->attr.tlength = -1;
    data->attr.fragsize = minu(data->samples, 50*device->Frequency/1000) *
                          data->frame_size;

    data->spec.rate = device->Frequency;
    data->spec.channels = ChannelsFromDevFmt(device->FmtChans);

    switch(device->FmtType)
    {
        case DevFmtUByte:
            data->spec.format = PA_SAMPLE_U8;
            break;
        case DevFmtShort:
            data->spec.format = PA_SAMPLE_S16NE;
            break;
        case DevFmtFloat:
            data->spec.format = PA_SAMPLE_FLOAT32NE;
            break;
        case DevFmtByte:
        case DevFmtUShort:
            ERR("Capture format type %#x capture not supported on PulseAudio\n", device->FmtType);
            pa_threaded_mainloop_unlock(data->loop);
            goto fail;
    }

    if(pa_sample_spec_valid(&data->spec) == 0)
    {
        ERR("Invalid sample format\n");
        pa_threaded_mainloop_unlock(data->loop);
        goto fail;
    }

    if(!pa_channel_map_init_auto(&chanmap, data->spec.channels, PA_CHANNEL_MAP_WAVEEX))
    {
        ERR("Couldn't build map for channel count (%d)!\n", data->spec.channels);
        pa_threaded_mainloop_unlock(data->loop);
        goto fail;
    }

    data->stream = pa_stream_new(data->context, "Capture Stream", &data->spec, &chanmap);
    if(!data->stream)
    {
        ERR("pa_stream_new() failed: %s\n",
            pa_strerror(pa_context_errno(data->context)));

        pa_threaded_mainloop_unlock(data->loop);
        goto fail;
    }

    pa_stream_set_state_callback(data->stream, stream_state_callback, data->loop);

    flags |= PA_STREAM_START_CORKED|PA_STREAM_ADJUST_LATENCY;
    if(pa_stream_connect_record(data->stream, pulse_name, &data->attr, flags) < 0)
    {
        ERR("Stream did not connect: %s\n",
            pa_strerror(pa_context_errno(data->context)));

        pa_stream_unref(data->stream);
        data->stream = NULL;

        pa_threaded_mainloop_unlock(data->loop);
        goto fail;
    }

    while((state=pa_stream_get_state(data->stream)) != PA_STREAM_READY)
    {
        if(!PA_STREAM_IS_GOOD(state))
        {
            ERR("Stream did not get ready: %s\n",
                pa_strerror(pa_context_errno(data->context)));

            pa_stream_unref(data->stream);
            data->stream = NULL;

            pa_threaded_mainloop_unlock(data->loop);
            goto fail;
        }

        pa_threaded_mainloop_wait(data->loop);
    }
    pa_stream_set_state_callback(data->stream, stream_state_callback2, device);

    pa_threaded_mainloop_unlock(data->loop);
    return ALC_NO_ERROR;

fail:
    pulse_close(device);
    return ALC_INVALID_VALUE;
} //}}}
예제 #8
0
static ALCboolean ALCsolarisBackend_reset(ALCsolarisBackend *self)
{
    ALCdevice *device = STATIC_CAST(ALCbackend,self)->mDevice;
    audio_info_t info;
    ALsizei frameSize;
    ALsizei numChannels;

    AUDIO_INITINFO(&info);

    info.play.sample_rate = device->Frequency;

    if(device->FmtChans != DevFmtMono)
        device->FmtChans = DevFmtStereo;
    numChannels = ChannelsFromDevFmt(device->FmtChans, device->AmbiOrder);
    info.play.channels = numChannels;

    switch(device->FmtType)
    {
        case DevFmtByte:
            info.play.precision = 8;
            info.play.encoding = AUDIO_ENCODING_LINEAR;
            break;
        case DevFmtUByte:
            info.play.precision = 8;
            info.play.encoding = AUDIO_ENCODING_LINEAR8;
            break;
        case DevFmtUShort:
        case DevFmtInt:
        case DevFmtUInt:
        case DevFmtFloat:
            device->FmtType = DevFmtShort;
            /* fall-through */
        case DevFmtShort:
            info.play.precision = 16;
            info.play.encoding = AUDIO_ENCODING_LINEAR;
            break;
    }

    frameSize = numChannels * BytesFromDevFmt(device->FmtType);
    info.play.buffer_size = device->UpdateSize*device->NumUpdates * frameSize;

    if(ioctl(self->fd, AUDIO_SETINFO, &info) < 0)
    {
        ERR("ioctl failed: %s\n", strerror(errno));
        return ALC_FALSE;
    }

    if(ChannelsFromDevFmt(device->FmtChans, device->AmbiOrder) != (ALsizei)info.play.channels)
    {
        ERR("Failed to set %s, got %u channels instead\n", DevFmtChannelsString(device->FmtChans), info.play.channels);
        return ALC_FALSE;
    }

    if(!((info.play.precision == 8 && info.play.encoding == AUDIO_ENCODING_LINEAR8 && device->FmtType == DevFmtUByte) ||
         (info.play.precision == 8 && info.play.encoding == AUDIO_ENCODING_LINEAR && device->FmtType == DevFmtByte) ||
         (info.play.precision == 16 && info.play.encoding == AUDIO_ENCODING_LINEAR && device->FmtType == DevFmtShort) ||
         (info.play.precision == 32 && info.play.encoding == AUDIO_ENCODING_LINEAR && device->FmtType == DevFmtInt)))
    {
        ERR("Could not set %s samples, got %d (0x%x)\n", DevFmtTypeString(device->FmtType),
            info.play.precision, info.play.encoding);
        return ALC_FALSE;
    }

    device->Frequency = info.play.sample_rate;
    device->UpdateSize = (info.play.buffer_size/device->NumUpdates) + 1;

    SetDefaultChannelOrder(device);

    free(self->mix_data);
    self->data_size = device->UpdateSize * FrameSizeFromDevFmt(
        device->FmtChans, device->FmtType, device->AmbiOrder
    );
    self->mix_data = calloc(1, self->data_size);

    return ALC_TRUE;
}
예제 #9
0
static ALuint WaveProc(ALvoid *ptr)
{
    ALCdevice *Device = (ALCdevice*)ptr;
    wave_data *data = (wave_data*)Device->ExtraData;
    ALuint frameSize;
    ALuint now, start;
    ALuint64 avail, done;
    size_t fs;
    const ALuint restTime = (ALuint64)Device->UpdateSize * 1000 /
                            Device->Frequency / 2;

    frameSize = FrameSizeFromDevFmt(Device->FmtChans, Device->FmtType);

    done = 0;
    start = timeGetTime();
    while(!data->killNow && Device->Connected)
    {
        now = timeGetTime();

        avail = (ALuint64)(now-start) * Device->Frequency / 1000;
        if(avail < done)
        {
            /* Timer wrapped (50 days???). Add the remainder of the cycle to
             * the available count and reset the number of samples done */
            avail += ((ALuint64)1<<32)*Device->Frequency/1000 - done;
            done = 0;
        }
        if(avail-done < Device->UpdateSize)
        {
            Sleep(restTime);
            continue;
        }

        while(avail-done >= Device->UpdateSize)
        {
            aluMixData(Device, data->buffer, Device->UpdateSize);
            done += Device->UpdateSize;

            if(!IS_LITTLE_ENDIAN)
            {
                ALuint bytesize = BytesFromDevFmt(Device->FmtType);
                ALubyte *bytes = data->buffer;
                ALuint i;

                if(bytesize == 1)
                {
                    for(i = 0;i < data->size;i++)
                        fputc(bytes[i], data->f);
                }
                else if(bytesize == 2)
                {
                    for(i = 0;i < data->size;i++)
                        fputc(bytes[i^1], data->f);
                }
                else if(bytesize == 4)
                {
                    for(i = 0;i < data->size;i++)
                        fputc(bytes[i^3], data->f);
                }
            }
            else
            {
                fs = fwrite(data->buffer, frameSize, Device->UpdateSize,
                            data->f);
                fs = fs;
            }
            if(ferror(data->f))
            {
                ERR("Error writing to file\n");
                ALCdevice_Lock(Device);
                aluHandleDisconnect(Device);
                ALCdevice_Unlock(Device);
                break;
            }
        }
    }

    return 0;
}
예제 #10
0
static ALCboolean opensl_start_playback(ALCdevice *Device)
{
    osl_data *data = Device->ExtraData;
    SLAndroidSimpleBufferQueueItf bufferQueue;
    SLPlayItf player;
    SLresult result;
    ALuint i;

    result = VCALL(data->bufferQueueObject,GetInterface)(SL_IID_BUFFERQUEUE, &bufferQueue);
    PRINTERR(result, "bufferQueue->GetInterface");
    if(SL_RESULT_SUCCESS == result)
    {
        result = VCALL(bufferQueue,RegisterCallback)(opensl_callback, Device);
        PRINTERR(result, "bufferQueue->RegisterCallback");
    }
    if(SL_RESULT_SUCCESS == result)
    {
        data->frameSize = FrameSizeFromDevFmt(Device->FmtChans, Device->FmtType);
        data->bufferSize = Device->UpdateSize * data->frameSize;
        data->buffer = calloc(Device->NumUpdates, data->bufferSize);
        if(!data->buffer)
        {
            result = SL_RESULT_MEMORY_FAILURE;
            PRINTERR(result, "calloc");
        }
    }
    /* enqueue the first buffer to kick off the callbacks */
    for(i = 0;i < Device->NumUpdates;i++)
    {
        if(SL_RESULT_SUCCESS == result)
        {
            ALvoid *buf = (ALbyte*)data->buffer + i*data->bufferSize;
            result = VCALL(bufferQueue,Enqueue)(buf, data->bufferSize);
            PRINTERR(result, "bufferQueue->Enqueue");
        }
    }
    data->curBuffer = 0;
    if(SL_RESULT_SUCCESS == result)
    {
        result = VCALL(data->bufferQueueObject,GetInterface)(SL_IID_PLAY, &player);
        PRINTERR(result, "bufferQueue->GetInterface");
    }
    if(SL_RESULT_SUCCESS == result)
    {
        result = VCALL(player,SetPlayState)(SL_PLAYSTATE_PLAYING);
        PRINTERR(result, "player->SetPlayState");
    }

    if(SL_RESULT_SUCCESS != result)
    {
        if(data->bufferQueueObject != NULL)
            VCALL0(data->bufferQueueObject,Destroy)();
        data->bufferQueueObject = NULL;

        free(data->buffer);
        data->buffer = NULL;
        data->bufferSize = 0;

        return ALC_FALSE;
    }

    return ALC_TRUE;
}
예제 #11
0
static int ALCsolarisBackend_mixerProc(void *ptr)
{
    ALCsolarisBackend *self = ptr;
    ALCdevice *device = STATIC_CAST(ALCbackend, self)->mDevice;
    struct timeval timeout;
    ALubyte *write_ptr;
    ALint frame_size;
    ALint to_write;
    ssize_t wrote;
    fd_set wfds;
    int sret;

    SetRTPriority();
    althrd_setname(althrd_current(), MIXER_THREAD_NAME);

    frame_size = FrameSizeFromDevFmt(device->FmtChans, device->FmtType, device->AmbiOrder);

    ALCsolarisBackend_lock(self);
    while(!ATOMIC_LOAD(&self->killNow, almemory_order_acquire) &&
          ATOMIC_LOAD(&device->Connected, almemory_order_acquire) != DeviceConnect_Disconnected)
    {
        FD_ZERO(&wfds);
        FD_SET(self->fd, &wfds);
        timeout.tv_sec = 1;
        timeout.tv_usec = 0;

        ALCsolarisBackend_unlock(self);
        sret = select(self->fd+1, NULL, &wfds, NULL, &timeout);
        ALCsolarisBackend_lock(self);
        if(sret < 0)
        {
            if(errno == EINTR)
                continue;
            ERR("select failed: %s\n", strerror(errno));
            aluHandleDisconnect(device, "Failed to wait for playback buffer: %s", strerror(errno));
            break;
        }
        else if(sret == 0)
        {
            WARN("select timeout\n");
            continue;
        }

        write_ptr = self->mix_data;
        to_write = self->data_size;
        aluMixData(device, write_ptr, to_write/frame_size);
        while(to_write > 0 && !ATOMIC_LOAD_SEQ(&self->killNow))
        {
            wrote = write(self->fd, write_ptr, to_write);
            if(wrote < 0)
            {
                if(errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
                    continue;
                ERR("write failed: %s\n", strerror(errno));
                aluHandleDisconnect(device, "Failed to write playback samples: %s",
                                    strerror(errno));
                break;
            }

            to_write -= wrote;
            write_ptr += wrote;
        }
    }
    ALCsolarisBackend_unlock(self);

    return 0;
}
예제 #12
0
static ALCboolean opensl_reset_playback(ALCdevice *Device)
{
    osl_data *data = Device->ExtraData;
    SLDataLocator_AndroidSimpleBufferQueue loc_bufq;
    SLAndroidSimpleBufferQueueItf bufferQueue;
    SLDataLocator_OutputMix loc_outmix;
    SLDataFormat_PCM format_pcm;
    SLDataSource audioSrc;
    SLDataSink audioSnk;
    SLPlayItf player;
    SLInterfaceID id;
    SLboolean req;
    SLresult result;
    ALuint i;


    Device->UpdateSize = (ALuint64)Device->UpdateSize * 44100 / Device->Frequency;
    Device->UpdateSize = Device->UpdateSize * Device->NumUpdates / 2;
    Device->NumUpdates = 2;

    Device->Frequency = 44100;
    Device->FmtChans = DevFmtStereo;
    Device->FmtType = DevFmtShort;

    SetDefaultWFXChannelOrder(Device);


    id  = SL_IID_ANDROIDSIMPLEBUFFERQUEUE;
    req = SL_BOOLEAN_TRUE;

    loc_bufq.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE;
    loc_bufq.numBuffers = Device->NumUpdates;

    format_pcm.formatType = SL_DATAFORMAT_PCM;
    format_pcm.numChannels = ChannelsFromDevFmt(Device->FmtChans);
    format_pcm.samplesPerSec = Device->Frequency * 1000;
    format_pcm.bitsPerSample = BytesFromDevFmt(Device->FmtType) * 8;
    format_pcm.containerSize = format_pcm.bitsPerSample;
    format_pcm.channelMask = GetChannelMask(Device->FmtChans);
    format_pcm.endianness = SL_BYTEORDER_NATIVE;

    audioSrc.pLocator = &loc_bufq;
    audioSrc.pFormat = &format_pcm;

    loc_outmix.locatorType = SL_DATALOCATOR_OUTPUTMIX;
    loc_outmix.outputMix = data->outputMix;
    audioSnk.pLocator = &loc_outmix;
    audioSnk.pFormat = NULL;


    result = SLEngineItf_CreateAudioPlayer(data->engine, &data->bufferQueueObject, &audioSrc, &audioSnk, 1, &id, &req);
    PRINTERR(result, "engine->CreateAudioPlayer");
    if(SL_RESULT_SUCCESS == result)
    {
        result = SLObjectItf_Realize(data->bufferQueueObject, SL_BOOLEAN_FALSE);
        PRINTERR(result, "bufferQueue->Realize");
    }
    if(SL_RESULT_SUCCESS == result)
    {
        result = SLObjectItf_GetInterface(data->bufferQueueObject, SL_IID_BUFFERQUEUE, &bufferQueue);
        PRINTERR(result, "bufferQueue->GetInterface");
    }
    if(SL_RESULT_SUCCESS == result)
    {
        result = (*bufferQueue)->RegisterCallback(bufferQueue, opensl_callback, Device);
        PRINTERR(result, "bufferQueue->RegisterCallback");
    }
    if(SL_RESULT_SUCCESS == result)
    {
        data->frameSize = FrameSizeFromDevFmt(Device->FmtChans, Device->FmtType);
        data->bufferSize = Device->UpdateSize * data->frameSize;
        data->buffer = calloc(1, data->bufferSize);
        if(!data->buffer)
        {
            result = SL_RESULT_MEMORY_FAILURE;
            PRINTERR(result, "calloc");
        }
    }
    /* enqueue the first buffer to kick off the callbacks */
    for(i = 0;i < Device->NumUpdates;i++)
    {
        if(SL_RESULT_SUCCESS == result)
        {
            result = (*bufferQueue)->Enqueue(bufferQueue, data->buffer, data->bufferSize);
            PRINTERR(result, "bufferQueue->Enqueue");
        }
    }
    if(SL_RESULT_SUCCESS == result)
    {
        result = SLObjectItf_GetInterface(data->bufferQueueObject, SL_IID_PLAY, &player);
        PRINTERR(result, "bufferQueue->GetInterface");
    }
    if(SL_RESULT_SUCCESS == result)
    {
        result = SLPlayItf_SetPlayState(player, SL_PLAYSTATE_PLAYING);
        PRINTERR(result, "player->SetPlayState");
    }

    if(SL_RESULT_SUCCESS != result)
    {
        if(data->bufferQueueObject != NULL)
            SLObjectItf_Destroy(data->bufferQueueObject);
        data->bufferQueueObject = NULL;

        free(data->buffer);
        data->buffer = NULL;
        data->bufferSize = 0;

        return ALC_FALSE;
    }

    return ALC_TRUE;
}
예제 #13
0
ALCboolean xaudio2_do_reset_playback(ALCdevice * device)
{
	XAudio2Data* data = (XAudio2Data*)device->ExtraData;

	HRESULT hr;
	DWORD outputchannelMasks;
	XAUDIO2_VOICE_DETAILS outputDetails;
	WAVEFORMATEXTENSIBLE SourceType;

	data->masterVoice->GetChannelMask(&outputchannelMasks);
	data->masterVoice->GetVoiceDetails(&outputDetails);

	if (!(device->Flags&DEVICE_FREQUENCY_REQUEST))
		device->Frequency = outputDetails.InputSampleRate;
	if (!(device->Flags&DEVICE_CHANNELS_REQUEST))
	{
		if (outputDetails.InputChannels == 1 && outputchannelMasks == MONO)
			device->FmtChans = DevFmtMono;
		else if (outputDetails.InputChannels == 2 && outputchannelMasks == STEREO)
			device->FmtChans = DevFmtStereo;
		else if (outputDetails.InputChannels == 4 && outputchannelMasks == QUAD)
			device->FmtChans = DevFmtQuad;
		else if (outputDetails.InputChannels == 6 && outputchannelMasks == X5DOT1)
			device->FmtChans = DevFmtX51;
		else if (outputDetails.InputChannels == 6 && outputchannelMasks == X5DOT1SIDE)
			device->FmtChans = DevFmtX51Side;
		else if (outputDetails.InputChannels == 7 && outputchannelMasks == X6DOT1)
			device->FmtChans = DevFmtX61;
		else if (outputDetails.InputChannels == 8 && outputchannelMasks == X7DOT1)
			device->FmtChans = DevFmtX71;
		else
			ERR("Unhandled channel config: %d -- 0x%08lx\n", outputDetails.InputChannels, outputchannelMasks);
	}

	SetDefaultWFXChannelOrder(device);

	switch (device->FmtChans)
	{
	case DevFmtMono:
		SourceType.Format.nChannels = 1;
		SourceType.dwChannelMask = MONO;
		break;
	case DevFmtStereo:
		SourceType.Format.nChannels = 2;
		SourceType.dwChannelMask = STEREO;
		break;
	case DevFmtQuad:
		SourceType.Format.nChannels = 4;
		SourceType.dwChannelMask = QUAD;
		break;
	case DevFmtX51:
		SourceType.Format.nChannels = 6;
		SourceType.dwChannelMask = X5DOT1;
		break;
	case DevFmtX51Side:
		SourceType.Format.nChannels = 6;
		SourceType.dwChannelMask = X5DOT1SIDE;
		break;
	case DevFmtX61:
		SourceType.Format.nChannels = 7;
		SourceType.dwChannelMask = X6DOT1;
		break;
	case DevFmtX71:
		SourceType.Format.nChannels = 8;
		SourceType.dwChannelMask = X7DOT1;
		break;
	}
	switch (device->FmtType)
	{
	case DevFmtByte:
		device->FmtType = DevFmtUByte;
		/* fall-through */
	case DevFmtUByte:
		SourceType.Format.wBitsPerSample = 8;
		SourceType.Samples.wValidBitsPerSample = 8;
		SourceType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
		break;
	case DevFmtUShort:
		device->FmtType = DevFmtShort;
		/* fall-through */
	case DevFmtShort:
		SourceType.Format.wBitsPerSample = 16;
		SourceType.Samples.wValidBitsPerSample = 16;
		SourceType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
		break;
	case DevFmtFloat:
		SourceType.Format.wBitsPerSample = 32;
		SourceType.Samples.wValidBitsPerSample = 32;
		SourceType.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
		break;
	}
	SourceType.Format.cbSize = (sizeof(SourceType) - sizeof(WAVEFORMATEX));
	SourceType.Format.nSamplesPerSec = device->Frequency;

	SourceType.Format.nBlockAlign = SourceType.Format.nChannels *
		SourceType.Format.wBitsPerSample / 8;
	SourceType.Format.nAvgBytesPerSec = SourceType.Format.nSamplesPerSec *
		SourceType.Format.nBlockAlign;

	SourceType.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;

	/*-----------create source voice-------------*/
	data->sourceVoiceCallback = new (std::nothrow) XAudio2SourceVoiceCallback(device);
	if (data->sourceVoiceCallback == NULL)
	{
		ERR("create XAudio2SourceVoiceCallback() failed\n");
		return ALC_FALSE;
	}

	data->sourceVoiceCallback->callbackEvent = CreateEventEx(NULL, NULL, 0, EVENT_ACCESS_MASK);
	if (data->sourceVoiceCallback->callbackEvent == NULL)
	{
		ERR("create callback event failed\n");
		return ALC_FALSE;
	}

	hr = data->xAudioObj->CreateSourceVoice(&data->sourceVoice, &SourceType.Format, 0,
		XAUDIO2_DEFAULT_FREQ_RATIO, data->sourceVoiceCallback);
	if (FAILED(hr))
	{
		ERR("CreateSourceVoice() failed: 0x%08lx\n", hr);
		return ALC_FALSE;
	}

	/*---------create buffer--------------*/
	//calculate frame size
	data->frameSize = FrameSizeFromDevFmt(device->FmtChans, device->FmtType);

	//start playing the audio engine
	data->sourceVoice->Start();

	//wait for the buffer to be created
	WaitForSingleObjectEx(data->sourceVoiceCallback->callbackEvent, INFINITE, FALSE);

	device->UpdateSize = data->bufferSize / data->frameSize;
	device->NumUpdates = 1;


	//set the running flag
	InterlockedExchange(&data->running, TRUE);

	return ALC_TRUE;
}
예제 #14
0
파일: alsa.c 프로젝트: 9heart/DT3
static ALCenum alsa_open_capture(ALCdevice *pDevice, const ALCchar *deviceName)
{
    const char *driver = "default";
    snd_pcm_hw_params_t *p;
    snd_pcm_uframes_t bufferSizeInFrames;
    snd_pcm_uframes_t periodSizeInFrames;
    snd_pcm_format_t format;
    ALuint frameSize;
    alsa_data *data;
    char str[128];
    char *err;
    int i;

    ConfigValueStr("alsa", "capture", &driver);

    if(!allCaptureDevNameMap)
        allCaptureDevNameMap = probe_devices(SND_PCM_STREAM_CAPTURE, &numCaptureDevNames);

    if(!deviceName)
        deviceName = allCaptureDevNameMap[0].name;
    else
    {
        size_t idx;

        for(idx = 0;idx < numCaptureDevNames;idx++)
        {
            if(allCaptureDevNameMap[idx].name &&
               strcmp(deviceName, allCaptureDevNameMap[idx].name) == 0)
            {
                if(idx > 0)
                {
                    snprintf(str, sizeof(str), "%sCARD=%s,DEV=%d", capture_prefix,
                             allCaptureDevNameMap[idx].card, allCaptureDevNameMap[idx].dev);
                    driver = str;
                }
                break;
            }
        }
        if(idx == numCaptureDevNames)
            return ALC_INVALID_VALUE;
    }

    data = (alsa_data*)calloc(1, sizeof(alsa_data));

    i = snd_pcm_open(&data->pcmHandle, driver, SND_PCM_STREAM_CAPTURE, SND_PCM_NONBLOCK);
    if(i < 0)
    {
        ERR("Could not open capture device '%s': %s\n", driver, snd_strerror(i));
        free(data);
        return ALC_INVALID_VALUE;
    }

    format = -1;
    switch(pDevice->FmtType)
    {
        case DevFmtByte:
            format = SND_PCM_FORMAT_S8;
            break;
        case DevFmtUByte:
            format = SND_PCM_FORMAT_U8;
            break;
        case DevFmtShort:
            format = SND_PCM_FORMAT_S16;
            break;
        case DevFmtUShort:
            format = SND_PCM_FORMAT_U16;
            break;
        case DevFmtFloat:
            format = SND_PCM_FORMAT_FLOAT;
            break;
    }

    err = NULL;
    bufferSizeInFrames = maxu(pDevice->UpdateSize*pDevice->NumUpdates,
                              100*pDevice->Frequency/1000);
    periodSizeInFrames = minu(bufferSizeInFrames, 50*pDevice->Frequency/1000);
    snd_pcm_hw_params_malloc(&p);

    if((i=snd_pcm_hw_params_any(data->pcmHandle, p)) < 0)
        err = "any";
    /* set interleaved access */
    if(i >= 0 && (i=snd_pcm_hw_params_set_access(data->pcmHandle, p, SND_PCM_ACCESS_RW_INTERLEAVED)) < 0)
        err = "set access";
    /* set format (implicitly sets sample bits) */
    if(i >= 0 && (i=snd_pcm_hw_params_set_format(data->pcmHandle, p, format)) < 0)
        err = "set format";
    /* set channels (implicitly sets frame bits) */
    if(i >= 0 && (i=snd_pcm_hw_params_set_channels(data->pcmHandle, p, ChannelsFromDevFmt(pDevice->FmtChans))) < 0)
        err = "set channels";
    /* set rate (implicitly constrains period/buffer parameters) */
    if(i >= 0 && (i=snd_pcm_hw_params_set_rate(data->pcmHandle, p, pDevice->Frequency, 0)) < 0)
        err = "set rate near";
    /* set buffer size in frame units (implicitly sets period size/bytes/time and buffer time/bytes) */
    if(i >= 0 && (i=snd_pcm_hw_params_set_buffer_size_near(data->pcmHandle, p, &bufferSizeInFrames)) < 0)
        err = "set buffer size near";
    /* set buffer size in frame units (implicitly sets period size/bytes/time and buffer time/bytes) */
    if(i >= 0 && (i=snd_pcm_hw_params_set_period_size_near(data->pcmHandle, p, &periodSizeInFrames, NULL)) < 0)
        err = "set period size near";
    /* install and prepare hardware configuration */
    if(i >= 0 && (i=snd_pcm_hw_params(data->pcmHandle, p)) < 0)
        err = "set params";
    if(i < 0)
    {
        ERR("%s failed: %s\n", err, snd_strerror(i));
        snd_pcm_hw_params_free(p);
        goto error;
    }

    if((i=snd_pcm_hw_params_get_period_size(p, &bufferSizeInFrames, NULL)) < 0)
    {
        ERR("get size failed: %s\n", snd_strerror(i));
        snd_pcm_hw_params_free(p);
        goto error;
    }

    snd_pcm_hw_params_free(p);

    frameSize = FrameSizeFromDevFmt(pDevice->FmtChans, pDevice->FmtType);

    data->ring = CreateRingBuffer(frameSize, pDevice->UpdateSize*pDevice->NumUpdates);
    if(!data->ring)
    {
        ERR("ring buffer create failed\n");
        goto error;
    }

    data->size = snd_pcm_frames_to_bytes(data->pcmHandle, bufferSizeInFrames);
    data->buffer = malloc(data->size);
    if(!data->buffer)
    {
        ERR("buffer malloc failed\n");
        goto error;
    }

    pDevice->szDeviceName = strdup(deviceName);

    pDevice->ExtraData = data;
    return ALC_NO_ERROR;

error:
    free(data->buffer);
    DestroyRingBuffer(data->ring);
    snd_pcm_close(data->pcmHandle);
    free(data);

    pDevice->ExtraData = NULL;
    return ALC_INVALID_VALUE;
}
예제 #15
0
static ALuint WaveProc(ALvoid *ptr)
{
    ALCdevice *pDevice = (ALCdevice*)ptr;
    wave_data *data = (wave_data*)pDevice->ExtraData;
    ALuint frameSize;
    ALuint now, start;
    ALuint64 avail, done;
    size_t fs;
    union {
        short s;
        char b[sizeof(short)];
    } uSB;
    const ALuint restTime = (ALuint64)pDevice->UpdateSize * 1000 /
                            pDevice->Frequency / 2;

    uSB.s = 1;
    frameSize = FrameSizeFromDevFmt(pDevice->FmtChans, pDevice->FmtType);

    done = 0;
    start = timeGetTime();
    while(!data->killNow && pDevice->Connected)
    {
        now = timeGetTime();

        avail = (ALuint64)(now-start) * pDevice->Frequency / 1000;
        if(avail < done)
        {
            /* Timer wrapped. Add the remainder of the cycle to the available
             * count and reset the number of samples done */
            avail += (ALuint64)0xFFFFFFFFu*pDevice->Frequency/1000 - done;
            done = 0;
        }
        if(avail-done < pDevice->UpdateSize)
        {
            Sleep(restTime);
            continue;
        }

        while(avail-done >= pDevice->UpdateSize)
        {
            aluMixData(pDevice, data->buffer, pDevice->UpdateSize);
            done += pDevice->UpdateSize;

            if(uSB.b[0] != 1)
            {
                ALuint bytesize = BytesFromDevFmt(pDevice->FmtType);
                ALubyte *bytes = data->buffer;
                ALuint i;

                if(bytesize == 1)
                {
                    for(i = 0;i < data->size;i++)
                        fputc(bytes[i], data->f);
                }
                else if(bytesize == 2)
                {
                    for(i = 0;i < data->size;i++)
                        fputc(bytes[i^1], data->f);
                }
                else if(bytesize == 4)
                {
                    for(i = 0;i < data->size;i++)
                        fputc(bytes[i^3], data->f);
                }
            }
            else
                fs = fwrite(data->buffer, frameSize, pDevice->UpdateSize,
                            data->f);
            if(ferror(data->f))
            {
                AL_PRINT("Error writing to file\n");
                aluHandleDisconnect(pDevice);
                break;
            }
        }
    }

    return 0;
}
예제 #16
0
static void* thread_function(void* arg)
{
    ALCdevice* device = (ALCdevice*)arg;
    AndroidData* data = (AndroidData*)device->ExtraData;

    JNIEnv* env;
    (*javaVM)->AttachCurrentThread(javaVM, &env, NULL);

    (*env)->PushLocalFrame(env, 2);

    int sampleRateInHz = device->Frequency;
    int channelConfig = ChannelsFromDevFmt(device->FmtChans) == 1 ? CHANNEL_CONFIGURATION_MONO : CHANNEL_CONFIGURATION_STEREO;
    int audioFormat = BytesFromDevFmt(device->FmtType) == 1 ? ENCODING_PCM_8BIT : ENCODING_PCM_16BIT;

    int bufferSizeInBytes = (*env)->CallStaticIntMethod(env, cAudioTrack, 
        mGetMinBufferSize, sampleRateInHz, channelConfig, audioFormat);

    int bufferSizeInSamples = bufferSizeInBytes / FrameSizeFromDevFmt(device->FmtChans, device->FmtType);

    jobject track = (*env)->NewObject(env, cAudioTrack, mAudioTrack,
        STREAM_MUSIC, sampleRateInHz, channelConfig, audioFormat, device->NumUpdates * bufferSizeInBytes, MODE_STREAM);

#ifdef HAVE_ANDROID_LOW_LATENCY
    int started = 0;
    size_t overallBytes = 0;
#else
    (*env)->CallNonvirtualVoidMethod(env, track, cAudioTrack, mPlay);
#endif

    jarray buffer = (*env)->NewByteArray(env, bufferSizeInBytes);

    while (data->running)
    {
        void* pBuffer = (*env)->GetPrimitiveArrayCritical(env, buffer, NULL);

        if (pBuffer)
        {
            aluMixData(device, pBuffer, bufferSizeInSamples);
            (*env)->ReleasePrimitiveArrayCritical(env, buffer, pBuffer, 0);

#ifdef HAVE_ANDROID_LOW_LATENCY
            if (bufferSizeInBytes >= 0)
            {
                if (started)
                {
#endif
                    (*env)->CallNonvirtualIntMethod(env, track, cAudioTrack, mWrite, buffer, 0, bufferSizeInBytes);
#ifdef HAVE_ANDROID_LOW_LATENCY
                }
                else
                {
                    overallBytes += (*env)->CallNonvirtualIntMethod(env, track, cAudioTrack, mWrite, buffer, 0, bufferSizeInBytes);
                    if (overallBytes >= (device->NumUpdates * bufferSizeInBytes))
                    {
                        (*env)->CallNonvirtualVoidMethod(env, track, cAudioTrack, mPlay);
                        started = 1;
                    }
                }
            }
#endif
        }
        else
        {
            AL_PRINT("Failed to get pointer to array bytes");
        }
    }
    
    (*env)->CallNonvirtualVoidMethod(env, track, cAudioTrack, mStop);
    (*env)->CallNonvirtualVoidMethod(env, track, cAudioTrack, mRelease);

    (*env)->PopLocalFrame(env, NULL);

    (*javaVM)->DetachCurrentThread(javaVM);
    return NULL;
}