예제 #1
0
파일: paaudio.c 프로젝트: juanquintela/qemu
static int qpa_init_in(HWVoiceIn *hw, struct audsettings *as, void *drv_opaque)
{
    int error;
    pa_sample_spec ss;
    struct audsettings obt_as = *as;
    PAVoiceIn *pa = (PAVoiceIn *) hw;
    paaudio *g = pa->g = drv_opaque;
    AudiodevPaOptions *popts = &g->dev->u.pa;
    AudiodevPaPerDirectionOptions *ppdo = popts->in;

    ss.format = audfmt_to_pa (as->fmt, as->endianness);
    ss.channels = as->nchannels;
    ss.rate = as->freq;

    obt_as.fmt = pa_to_audfmt (ss.format, &obt_as.endianness);

    pa->stream = qpa_simple_new (
        g,
        "qemu",
        PA_STREAM_RECORD,
        ppdo->has_name ? ppdo->name : NULL,
        &ss,
        NULL,                   /* channel map */
        NULL,                   /* buffering attributes */
        &error
        );
    if (!pa->stream) {
        qpa_logerr (error, "pa_simple_new for capture failed\n");
        goto fail1;
    }

    audio_pcm_init_info (&hw->info, &obt_as);
    hw->samples = pa->samples = audio_buffer_samples(
        qapi_AudiodevPaPerDirectionOptions_base(ppdo), &obt_as, 46440);
    pa->pcm_buf = audio_calloc(__func__, hw->samples, 1 << hw->info.shift);
    pa->wpos = hw->wpos;
    if (!pa->pcm_buf) {
        dolog ("Could not allocate buffer (%d bytes)\n",
               hw->samples << hw->info.shift);
        goto fail2;
    }

    if (audio_pt_init(&pa->pt, qpa_thread_in, hw, AUDIO_CAP, __func__)) {
        goto fail3;
    }

    return 0;

 fail3:
    g_free (pa->pcm_buf);
    pa->pcm_buf = NULL;
 fail2:
    if (pa->stream) {
        pa_stream_unref (pa->stream);
        pa->stream = NULL;
    }
 fail1:
    return -1;
}
예제 #2
0
파일: paaudio.c 프로젝트: 0-14N/NDroid
static int qpa_init_in (HWVoiceIn *hw, struct audsettings *as)
{
    int error;
    static pa_sample_spec ss;
    struct audsettings obt_as = *as;
    PAVoiceIn *pa = (PAVoiceIn *) hw;

    ss.format = audfmt_to_pa (as->fmt, as->endianness);
    ss.channels = as->nchannels;
    ss.rate = as->freq;

    obt_as.fmt = pa_to_audfmt (ss.format, &obt_as.endianness);

    pa->s = FF(pa_simple_new) (
        conf.server,
        "qemu",
        PA_STREAM_RECORD,
        conf.source,
        "pcm.capture",
        &ss,
        NULL,                   /* channel map */
        NULL,                   /* buffering attributes */
        &error
        );
    if (!pa->s) {
        qpa_logerr (error, "pa_simple_new for capture failed\n");
        goto fail1;
    }

    audio_pcm_init_info (&hw->info, &obt_as);
    hw->samples = conf.samples;
    pa->pcm_buf = audio_calloc (AUDIO_FUNC, hw->samples, 1 << hw->info.shift);
    if (!pa->pcm_buf) {
        dolog ("Could not allocate buffer (%d bytes)\n",
               hw->samples << hw->info.shift);
        goto fail2;
    }

    if (audio_pt_init (&pa->pt, qpa_thread_in, hw, AUDIO_CAP, AUDIO_FUNC)) {
        goto fail3;
    }

    return 0;

 fail3:
    qemu_free (pa->pcm_buf);
    pa->pcm_buf = NULL;
 fail2:
    FF(pa_simple_free) (pa->s);
    pa->s = NULL;
 fail1:
    return -1;
}
예제 #3
0
파일: coreaudio.c 프로젝트: 0-14N/NDroid
static int coreaudio_init_out (HWVoiceOut *hw, struct audsettings *as)
{
    coreaudioVoice*  core = CORE_OUT(hw);
    int err;

    audio_pcm_init_info (&hw->info, as);

    err = coreaudio_voice_init (core, as, conf.out_buffer_frames, audioOutDeviceIOProc, hw, 0);
    if (err < 0)
        return err;

    hw->samples = core->bufferFrameSize * conf.out_nbuffers;
    return 0;
}
예제 #4
0
파일: coreaudio.c 프로젝트: 0-14N/NDroid
static int
coreaudio_init_in (HWVoiceIn *hw, struct audsettings *as)
{
    coreaudioVoice*  core = CORE_IN(hw);
    int              err;

    audio_pcm_init_info (&hw->info, as);

    err = coreaudio_voice_init (core, as, conf.in_buffer_frames, audioInDeviceIOProc, hw, 1);
    if (err < 0) {
        return err;
    }

    hw->samples = core->bufferFrameSize * conf.in_nbuffers;
    return 0;
}
예제 #5
0
파일: audiodrv.cpp 프로젝트: hb9cwp/genode
static int genode_init_out(HWVoiceOut *hw, audsettings_t *as)
{
	GenodeVoiceOut * const out = (GenodeVoiceOut *)hw;

	if (as->nchannels != VBOX_CHANNELS) {
		Genode::error("only ", (int)VBOX_CHANNELS, " channels supported ",
		              "( ", as->nchannels, " were requested)");
		return -1;
	}

	if (as->freq != Audio_out::SAMPLE_RATE) {
		Genode::error("only ", (int)Audio_out::SAMPLE_RATE, " frequency supported "
		              "(", as->freq, " was requested)");
		return -1;
	}

	for (int i = 0; i < VBOX_CHANNELS; i++) {
		try {
			out->audio[i] = new (Genode::env()->heap())
				Audio_out::Connection(channel_names[i]);
		} catch (...) {
			Genode::error("could not establish Audio_out connection");
			while (--i > 0)
				Genode::destroy(Genode::env()->heap(), out->audio[i]);
			return -1;
		}
	}

	audio_pcm_init_info(&out->hw.info, as);
	out->hw.samples = Audio_out::PERIOD;
	out->packets = 0;

	Genode::log("--- using Audio_out session ---");
	Genode::log("freq: ",       as->freq);
	Genode::log("channels: ",   as->nchannels);
	Genode::log("format: ",     (int)as->fmt);
	Genode::log("endianness: ", as->endianness);

	return 0;
}
예제 #6
0
파일: audiodrv.cpp 프로젝트: hb9cwp/genode
static int genode_init_in(HWVoiceIn *hw, audsettings_t *as)
{
	GenodeVoiceIn *in = (GenodeVoiceIn*)hw;

	try {
		in->audio = new (Genode::env()->heap()) Audio_in::Connection("left");
	} catch (...) {
		Genode::error("could not establish Audio_in connection");
		return -1;
	}

	audio_pcm_init_info(&in->hw.info, as);
	in->hw.samples = Audio_in::PERIOD;
	in->packets    = 0;

	Genode::log("--- using Audio_in session ---");
	Genode::log("freq: ",       as->freq);
	Genode::log("channels: ",   as->nchannels);
	Genode::log("format: ",     (int)as->fmt);
	Genode::log("endianness: ", as->endianness);

	return 0;
}
예제 #7
0
static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as,
                              void *drv_opaque)
{
    OSStatus status;
    coreaudioVoiceOut *core = (coreaudioVoiceOut *) hw;
    UInt32 propertySize;
    int err;
    const char *typ = "playback";
    AudioValueRange frameRange;
    CoreaudioConf *conf = drv_opaque;

    /* create mutex */
    err = pthread_mutex_init(&core->mutex, NULL);
    if (err) {
        dolog("Could not create mutex\nReason: %s\n", strerror (err));
        return -1;
    }

    audio_pcm_init_info (&hw->info, as);

    /* open default output device */
    propertySize = sizeof(core->outputDeviceID);
    status = AudioHardwareGetProperty(
        kAudioHardwarePropertyDefaultOutputDevice,
        &propertySize,
        &core->outputDeviceID);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ,
                           "Could not get default output Device\n");
        return -1;
    }
    if (core->outputDeviceID == kAudioDeviceUnknown) {
        dolog ("Could not initialize %s - Unknown Audiodevice\n", typ);
        return -1;
    }

    /* get minimum and maximum buffer frame sizes */
    propertySize = sizeof(frameRange);
    status = AudioDeviceGetProperty(
        core->outputDeviceID,
        0,
        0,
        kAudioDevicePropertyBufferFrameSizeRange,
        &propertySize,
        &frameRange);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ,
                           "Could not get device buffer frame range\n");
        return -1;
    }

    if (frameRange.mMinimum > conf->buffer_frames) {
        core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMinimum;
        dolog ("warning: Upsizing Buffer Frames to %f\n", frameRange.mMinimum);
    }
    else if (frameRange.mMaximum < conf->buffer_frames) {
        core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMaximum;
        dolog ("warning: Downsizing Buffer Frames to %f\n", frameRange.mMaximum);
    }
    else {
        core->audioDevicePropertyBufferFrameSize = conf->buffer_frames;
    }

    /* set Buffer Frame Size */
    propertySize = sizeof(core->audioDevicePropertyBufferFrameSize);
    status = AudioDeviceSetProperty(
        core->outputDeviceID,
        NULL,
        0,
        false,
        kAudioDevicePropertyBufferFrameSize,
        propertySize,
        &core->audioDevicePropertyBufferFrameSize);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ,
                           "Could not set device buffer frame size %" PRIu32 "\n",
                           (uint32_t)core->audioDevicePropertyBufferFrameSize);
        return -1;
    }

    /* get Buffer Frame Size */
    propertySize = sizeof(core->audioDevicePropertyBufferFrameSize);
    status = AudioDeviceGetProperty(
        core->outputDeviceID,
        0,
        false,
        kAudioDevicePropertyBufferFrameSize,
        &propertySize,
        &core->audioDevicePropertyBufferFrameSize);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ,
                           "Could not get device buffer frame size\n");
        return -1;
    }
    hw->samples = conf->nbuffers * core->audioDevicePropertyBufferFrameSize;

    /* get StreamFormat */
    propertySize = sizeof(core->outputStreamBasicDescription);
    status = AudioDeviceGetProperty(
        core->outputDeviceID,
        0,
        false,
        kAudioDevicePropertyStreamFormat,
        &propertySize,
        &core->outputStreamBasicDescription);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ,
                           "Could not get Device Stream properties\n");
        core->outputDeviceID = kAudioDeviceUnknown;
        return -1;
    }

    /* set Samplerate */
    core->outputStreamBasicDescription.mSampleRate = (Float64) as->freq;
    propertySize = sizeof(core->outputStreamBasicDescription);
    status = AudioDeviceSetProperty(
        core->outputDeviceID,
        0,
        0,
        0,
        kAudioDevicePropertyStreamFormat,
        propertySize,
        &core->outputStreamBasicDescription);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ, "Could not set samplerate %d\n",
                           as->freq);
        core->outputDeviceID = kAudioDeviceUnknown;
        return -1;
    }

    /* set Callback */
    status = AudioDeviceAddIOProc(core->outputDeviceID, audioDeviceIOProc, hw);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ, "Could not set IOProc\n");
        core->outputDeviceID = kAudioDeviceUnknown;
        return -1;
    }

    /* start Playback */
    if (!isPlaying(core->outputDeviceID)) {
        status = AudioDeviceStart(core->outputDeviceID, audioDeviceIOProc);
        if (status != kAudioHardwareNoError) {
            coreaudio_logerr2 (status, typ, "Could not start playback\n");
            AudioDeviceRemoveIOProc(core->outputDeviceID, audioDeviceIOProc);
            core->outputDeviceID = kAudioDeviceUnknown;
            return -1;
        }
    }

    return 0;
}
예제 #8
0
파일: paaudio.c 프로젝트: juanquintela/qemu
static int qpa_init_out(HWVoiceOut *hw, struct audsettings *as,
                        void *drv_opaque)
{
    int error;
    pa_sample_spec ss;
    pa_buffer_attr ba;
    struct audsettings obt_as = *as;
    PAVoiceOut *pa = (PAVoiceOut *) hw;
    paaudio *g = pa->g = drv_opaque;
    AudiodevPaOptions *popts = &g->dev->u.pa;
    AudiodevPaPerDirectionOptions *ppdo = popts->out;

    ss.format = audfmt_to_pa (as->fmt, as->endianness);
    ss.channels = as->nchannels;
    ss.rate = as->freq;

    /*
     * qemu audio tick runs at 100 Hz (by default), so processing
     * data chunks worth 10 ms of sound should be a good fit.
     */
    ba.tlength = pa_usec_to_bytes (10 * 1000, &ss);
    ba.minreq = pa_usec_to_bytes (5 * 1000, &ss);
    ba.maxlength = -1;
    ba.prebuf = -1;

    obt_as.fmt = pa_to_audfmt (ss.format, &obt_as.endianness);

    pa->stream = qpa_simple_new (
        g,
        "qemu",
        PA_STREAM_PLAYBACK,
        ppdo->has_name ? ppdo->name : NULL,
        &ss,
        NULL,                   /* channel map */
        &ba,                    /* buffering attributes */
        &error
        );
    if (!pa->stream) {
        qpa_logerr (error, "pa_simple_new for playback failed\n");
        goto fail1;
    }

    audio_pcm_init_info (&hw->info, &obt_as);
    hw->samples = pa->samples = audio_buffer_samples(
        qapi_AudiodevPaPerDirectionOptions_base(ppdo), &obt_as, 46440);
    pa->pcm_buf = audio_calloc(__func__, hw->samples, 1 << hw->info.shift);
    pa->rpos = hw->rpos;
    if (!pa->pcm_buf) {
        dolog ("Could not allocate buffer (%d bytes)\n",
               hw->samples << hw->info.shift);
        goto fail2;
    }

    if (audio_pt_init(&pa->pt, qpa_thread_out, hw, AUDIO_CAP, __func__)) {
        goto fail3;
    }

    return 0;

 fail3:
    g_free (pa->pcm_buf);
    pa->pcm_buf = NULL;
 fail2:
    if (pa->stream) {
        pa_stream_unref (pa->stream);
        pa->stream = NULL;
    }
 fail1:
    return -1;
}
예제 #9
0
static int
winaudio_in_init (HWVoiceIn *hw, struct audsettings *as)
{
    WinAudioIn*   s = (WinAudioIn*) hw;
    MMRESULT       result;
    WAVEFORMATEX   format;
    int            shift, i, samples_size;

    s->wavein = NULL;
    InitializeCriticalSection( &s->lock );
    for (i = 0; i < NUM_OUT_BUFFERS; i++) {
            s->buffers[i].dwUser = 0xFFFF;
    }
    s->buffer_bytes = NULL;

    /* compute desired wave input format */
    format.wFormatTag      = WAVE_FORMAT_PCM;
    format.nChannels       = as->nchannels;
    format.nSamplesPerSec  = as->freq;
    format.nAvgBytesPerSec = as->freq*as->nchannels;

    switch (as->fmt) {
        case AUD_FMT_S8:   shift = 0; break;
        case AUD_FMT_U8:   shift = 0; break;
        case AUD_FMT_S16:  shift = 1; break;
        case AUD_FMT_U16:  shift = 1; break;
        default:
            fprintf(stderr, "qemu: winaudio: Bad input audio format: %d\n",
                    as->fmt);
                return -1;
    }

    format.nAvgBytesPerSec = (format.nSamplesPerSec * format.nChannels) << shift;
    format.nBlockAlign     = format.nChannels << shift;
    format.wBitsPerSample  = 8 << shift;
    format.cbSize          = 0;

    /* open the wave in device */
    result = waveInOpen( &s->wavein, WAVE_MAPPER, &format,
                         (DWORD_PTR)winaudio_in_buffer_done, (DWORD_PTR) hw,
                         CALLBACK_FUNCTION);
    if ( result != MMSYSERR_NOERROR ) {
        dump_mmerror( "qemu: winaudio: waveInOpen()", result);
            return -1;
    }

    samples_size    = format.nBlockAlign * conf.nb_samples;
    s->buffer_bytes = qemu_malloc( NUM_IN_BUFFERS * samples_size );
    if (s->buffer_bytes == NULL) {
            waveInClose( s->wavein );
            s->wavein = NULL;
            fprintf(stderr, "not enough memory for Windows audio buffers\n");
            return -1;
    }

    for (i = 0; i < NUM_IN_BUFFERS; i++) {
        memset( &s->buffers[i], 0, sizeof(s->buffers[i]) );
        s->buffers[i].lpData         = (LPSTR)(s->buffer_bytes + i*samples_size);
        s->buffers[i].dwBufferLength = samples_size;
        s->buffers[i].dwFlags        = WHDR_DONE;

        result = waveInPrepareHeader( s->wavein, &s->buffers[i],
                               sizeof(s->buffers[i]) );
        if ( result != MMSYSERR_NOERROR ) {
                dump_mmerror("waveInPrepareHeader()", result);
                return -1;
        }

        result = waveInAddBuffer( s->wavein, &s->buffers[i],
                              sizeof(s->buffers[i]) );
        if ( result != MMSYSERR_NOERROR ) {
            dump_mmerror("waveInAddBuffer()", result);
            return -1;
        }
    }

#if DEBUG
    /* Check the sound device we retrieved */
    {
        WAVEINCAPS caps;

        result = waveInGetDevCaps((UINT) s->wavein, &caps, sizeof(caps));
        if ( result != MMSYSERR_NOERROR ) {
            dump_mmerror("waveInGetDevCaps()", result);
        } else
            printf("Audio in device: %s\n", caps.szPname);
    }
#endif

    audio_pcm_init_info (&hw->info, as);
    hw->samples = conf.nb_samples*2;

    s->read_index = 0;
    s->read_count = 0;
    s->read_pos   = 0;
    s->read_size  = samples_size;
    return 0;
}
예제 #10
0
static int coreaudio_init_out(HWVoiceOut *hw, struct audsettings *as,
                              void *drv_opaque)
{
    OSStatus status;
    coreaudioVoiceOut *core = (coreaudioVoiceOut *) hw;
    int err;
    const char *typ = "playback";
    AudioValueRange frameRange;
    Audiodev *dev = drv_opaque;
    AudiodevCoreaudioPerDirectionOptions *cpdo = dev->u.coreaudio.out;
    int frames;

    /* create mutex */
    err = pthread_mutex_init(&core->mutex, NULL);
    if (err) {
        dolog("Could not create mutex\nReason: %s\n", strerror (err));
        return -1;
    }

    audio_pcm_init_info (&hw->info, as);

    status = coreaudio_get_voice(&core->outputDeviceID);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ,
                           "Could not get default output Device\n");
        return -1;
    }
    if (core->outputDeviceID == kAudioDeviceUnknown) {
        dolog ("Could not initialize %s - Unknown Audiodevice\n", typ);
        return -1;
    }

    /* get minimum and maximum buffer frame sizes */
    status = coreaudio_get_framesizerange(core->outputDeviceID,
                                          &frameRange);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ,
                           "Could not get device buffer frame range\n");
        return -1;
    }

    frames = audio_buffer_frames(
        qapi_AudiodevCoreaudioPerDirectionOptions_base(cpdo), as, 11610);
    if (frameRange.mMinimum > frames) {
        core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMinimum;
        dolog ("warning: Upsizing Buffer Frames to %f\n", frameRange.mMinimum);
    } else if (frameRange.mMaximum < frames) {
        core->audioDevicePropertyBufferFrameSize = (UInt32) frameRange.mMaximum;
        dolog ("warning: Downsizing Buffer Frames to %f\n", frameRange.mMaximum);
    }
    else {
        core->audioDevicePropertyBufferFrameSize = frames;
    }

    /* set Buffer Frame Size */
    status = coreaudio_set_framesize(core->outputDeviceID,
                                     &core->audioDevicePropertyBufferFrameSize);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ,
                           "Could not set device buffer frame size %" PRIu32 "\n",
                           (uint32_t)core->audioDevicePropertyBufferFrameSize);
        return -1;
    }

    /* get Buffer Frame Size */
    status = coreaudio_get_framesize(core->outputDeviceID,
                                     &core->audioDevicePropertyBufferFrameSize);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ,
                           "Could not get device buffer frame size\n");
        return -1;
    }
    hw->samples = (cpdo->has_buffer_count ? cpdo->buffer_count : 4) *
        core->audioDevicePropertyBufferFrameSize;

    /* get StreamFormat */
    status = coreaudio_get_streamformat(core->outputDeviceID,
                                        &core->outputStreamBasicDescription);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ,
                           "Could not get Device Stream properties\n");
        core->outputDeviceID = kAudioDeviceUnknown;
        return -1;
    }

    /* set Samplerate */
    core->outputStreamBasicDescription.mSampleRate = (Float64) as->freq;
    status = coreaudio_set_streamformat(core->outputDeviceID,
                                        &core->outputStreamBasicDescription);
    if (status != kAudioHardwareNoError) {
        coreaudio_logerr2 (status, typ, "Could not set samplerate %d\n",
                           as->freq);
        core->outputDeviceID = kAudioDeviceUnknown;
        return -1;
    }

    /* set Callback */
    core->ioprocid = NULL;
    status = AudioDeviceCreateIOProcID(core->outputDeviceID,
                                       audioDeviceIOProc,
                                       hw,
                                       &core->ioprocid);
    if (status != kAudioHardwareNoError || core->ioprocid == NULL) {
        coreaudio_logerr2 (status, typ, "Could not set IOProc\n");
        core->outputDeviceID = kAudioDeviceUnknown;
        return -1;
    }

    /* start Playback */
    if (!isPlaying(core->outputDeviceID)) {
        status = AudioDeviceStart(core->outputDeviceID, core->ioprocid);
        if (status != kAudioHardwareNoError) {
            coreaudio_logerr2 (status, typ, "Could not start playback\n");
            AudioDeviceDestroyIOProcID(core->outputDeviceID, core->ioprocid);
            core->outputDeviceID = kAudioDeviceUnknown;
            return -1;
        }
    }

    return 0;
}
예제 #11
0
static int qesd_init_in (HWVoiceIn *hw, struct audsettings *as)
{
    ESDVoiceIn *esd = (ESDVoiceIn *) hw;
    struct audsettings obt_as = *as;
    int esdfmt = ESD_STREAM | ESD_RECORD;
    int result = -1;

    /* shut down verbose debug spew */
    if (!D_ACTIVE)
        stdio_disable();

    esdfmt |= (as->nchannels == 2) ? ESD_STEREO : ESD_MONO;
    switch (as->fmt) {
    case AUD_FMT_S8:
    case AUD_FMT_U8:
        esdfmt |= ESD_BITS8;
        obt_as.fmt = AUD_FMT_U8;
        break;

    case AUD_FMT_S16:
    case AUD_FMT_U16:
        esdfmt |= ESD_BITS16;
        obt_as.fmt = AUD_FMT_S16;
        break;

    case AUD_FMT_S32:
    case AUD_FMT_U32:
        dolog ("Will use 16 instead of 32 bit samples\n");
        esdfmt |= ESD_BITS16;
        obt_as.fmt = AUD_FMT_S16;
        break;
    }
    obt_as.endianness = AUDIO_HOST_ENDIANNESS;

    audio_pcm_init_info (&hw->info, &obt_as);

    hw->samples = conf.samples;
    esd->pcm_buf = audio_calloc (AUDIO_FUNC, hw->samples, 1 << hw->info.shift);
    if (!esd->pcm_buf) {
        dolog ("Could not allocate buffer (%d bytes)\n",
               hw->samples << hw->info.shift);
        goto exit;
    }

    esd->fd = FF(esd_record_stream) (esdfmt, as->freq, conf.adc_host, NULL);
    if (esd->fd < 0) {
        if (conf.adc_host == NULL) {
            esd->fd = FF(esd_record_stream) (esdfmt, as->freq, "localhost", NULL);
        }
        if (esd->fd < 0) {
            qesd_logerr (errno, "esd_record_stream failed\n");
            goto fail2;
        }
    }

    {
        int  flags;
        flags = fcntl(esd->fd, F_GETFL);
        fcntl(esd->fd, F_SETFL, flags | O_NONBLOCK);
    }

    result = 0;  /* success */
    goto exit;

 fail2:
    qemu_free (esd->pcm_buf);
    esd->pcm_buf = NULL;

 exit:
    if (!D_ACTIVE)
        stdio_enable();

    return result;
}
예제 #12
0
파일: winwaveaudio.c 프로젝트: 0bliv10n/s2e
static int winwave_init_in (HWVoiceIn *hw, struct audsettings *as)
{
    int i;
    int err;
    MMRESULT mr;
    WAVEFORMATEX wfx;
    WaveVoiceIn *wave;

    wave = (WaveVoiceIn *) hw;

    InitializeCriticalSection (&wave->crit_sect);

    err = waveformat_from_audio_settings (&wfx, as);
    if (err) {
        goto err0;
    }

    mr = waveInOpen (&wave->hwi, WAVE_MAPPER, &wfx,
                     (DWORD_PTR) winwave_callback_in,
                     (DWORD_PTR) wave, CALLBACK_FUNCTION);
    if (mr != MMSYSERR_NOERROR) {
        winwave_logerr (mr, "waveInOpen");
        goto err1;
    }

    wave->hdrs = audio_calloc (AUDIO_FUNC, conf.dac_headers,
                               sizeof (*wave->hdrs));
    if (!wave->hdrs) {
        goto err2;
    }

    audio_pcm_init_info (&hw->info, as);
    hw->samples = conf.adc_samples * conf.adc_headers;
    wave->avail = 0;

    wave->pcm_buf = audio_calloc (AUDIO_FUNC, conf.adc_samples,
                                  conf.adc_headers << hw->info.shift);
    if (!wave->pcm_buf) {
        goto err3;
    }

    for (i = 0; i < conf.adc_headers; ++i) {
        WAVEHDR *h = &wave->hdrs[i];

        h->dwUser = 0;
        h->dwBufferLength = conf.adc_samples << hw->info.shift;
        h->lpData = advance (wave->pcm_buf, i * h->dwBufferLength);
        h->dwFlags = 0;

        mr = waveInPrepareHeader (wave->hwi, h, sizeof (*h));
        if (mr != MMSYSERR_NOERROR) {
            winwave_logerr (mr, "waveInPrepareHeader(%d)", i);
            goto err4;
        }
    }

    wave->paused = 1;
    winwave_add_buffers (wave, hw->samples);
    return 0;

 err4:
    g_free (wave->pcm_buf);
 err3:
    g_free (wave->hdrs);
 err2:
    winwave_anal_close_in (wave);
 err1:
 err0:
    return -1;
}