예제 #1
0
파일: sound.c 프로젝트: bernds/UAE
int init_sound (void)
{
    int tmp;
    int rate;
    int i;

    if (currprefs.sound_minbsiz > currprefs.sound_maxbsiz) {
	fprintf(stderr, "Minimum sound buffer size bigger then maximum, exchanging.\n");
	tmp = currprefs.sound_minbsiz;
	currprefs.sound_minbsiz = currprefs.sound_maxbsiz;
	currprefs.sound_maxbsiz = tmp;
    }

    dspbits = currprefs.sound_bits;
    rate = currprefs.sound_freq;
    sndbufsize = currprefs.sound_maxbsiz;

    if (SB_DetectInitSound(&dspbits, &rate, &sndbufsize, direct_buffers, &currprefs.stereo));
    else if (0/*OTHER_CARD_DETECT_ROUTINE*/);
    else
	return 0;

    currprefs.sound_freq = rate;
    currprefs.sound_minbsiz = (currprefs.sound_minbsiz>>2)<<2;
    currprefs.sound_maxbsiz = sndbufsize;

    sample_evtime = (long)MAXHPOS_PAL * MAXVPOS_PAL * VBLANK_HZ_PAL / rate;

    if (dspbits == 16) {
	init_sound_table16 ();
	if (currprefs.stereo)
	    eventtab[ev_sample].handler = direct_stereo_sample16_handler;
	else
	    eventtab[ev_sample].handler = direct_mono_sample16_handler;
    } else {
	init_sound_table8 ();
	if (currprefs.stereo)
	    eventtab[ev_sample].handler = direct_stereo_sample8_handler;
	else
	    eventtab[ev_sample].handler = direct_mono_sample8_handler;
    }
    printf ("Sound driver found and configured for %d bits at %d Hz, buffer is %d:%d bytes\n",
	    dspbits, rate, currprefs.sound_minbsiz, currprefs.sound_maxbsiz);
    sndbufpt = sndbuffer;
    direct_sndbufpt = direct_buffers[0];

    sound_curfreq = currprefs.sound_freq;
    for (i=0; i<INTERPOL_SIZE; i++)
	freq_buf[i] = sound_curfreq;
    buf_tot = sound_curfreq * INTERPOL_SIZE;

#ifdef FRAME_RATE_HACK
    vsynctime = vsynctime * 9 / 10;
#endif

    return 1;
}
예제 #2
0
파일: sound.cpp 프로젝트: voorhees1979/PUAE
int init_sound (void)
{
    if (gSoundPlayer != NULL)
        return 0;

    media_raw_audio_format audioFormat;

    gSoundBufferSize = currprefs.sound_freq * currprefs.sound_latency *
                       (currprefs.sound_stereo ? 2 : 1) / 1000;
    gSoundBufferSize = get_nearest_power_of_2 (gSoundBufferSize);

    audioFormat.frame_rate    = currprefs.sound_freq;
    audioFormat.channel_count = currprefs.sound_stereo ? 2 : 1;
    audioFormat.format        = media_raw_audio_format::B_AUDIO_FLOAT;
    audioFormat.byte_order    = B_MEDIA_HOST_ENDIAN;
    audioFormat.buffer_size   = gSoundBufferSize * sizeof(float);

    gSoundPlayer = new BSoundPlayer (&audioFormat, "UAE SoundPlayer", stream_func16);
    sound_ready = (gSoundPlayer != NULL);

    if (!currprefs.produce_sound)
        return 3;

    sound_sync_sem  = create_sem (0, "UAE Sound Sync Semaphore");
    gBufferReadPos = 0;
    gDoubleBufferWrite = new uae_u16[2 * gSoundBufferSize];
    gDoubleBufferRead = gDoubleBufferWrite + gSoundBufferSize;

    buffer = gDoubleBufferWrite;
    memset (buffer, 0, 4 * gSoundBufferSize);
    paula_sndbufpt = paula_sndbuffer = buffer;

    paula_sndbufsize = sizeof (uae_u16) * gSoundBufferSize;
    if (currprefs.sound_stereo)
        sample_handler = sample16s_handler;
    else
        sample_handler = sample16_handler;
    init_sound_table16 ();

    sound_available = 1;
    obtainedfreq = currprefs.sound_freq;

    write_log ("BeOS sound driver found and configured at %d Hz, buffer is %d samples (%d ms)\n",
               currprefs.sound_freq, gSoundBufferSize / audioFormat.channel_count,
               (gSoundBufferSize / audioFormat.channel_count) * 1000 / currprefs.sound_freq);

    if (gSoundPlayer) {
        gSoundPlayer->Start ();
        gSoundPlayer->SetHasData (true);
        return 1;
    }
    return 0;
}
예제 #3
0
int init_sound (void)
{
    AFSetACAttributes   attributes;
    AFDeviceDescriptor *aDev;
    int                 device;
    
    aud = AFOpenAudioConn(NULL);
    have_sound = !(aud == NULL);
    if (!have_sound) {
	return 0;
    }
    
    for(device = 0; device < ANumberOfAudioDevices(aud); device++) {
	aDev = AAudioDeviceDescriptor(aud, device);
	rate = aDev->playSampleFreq;
	sndbufsize = (rate / 8) * 4;
	if(aDev->inputsFromPhone == 0
	   && aDev->outputsToPhone == 0
	   && aDev->playNchannels == 1)
	    break;
    }
    if (device == ANumberOfAudioDevices(aud)) {
	return 0;
    }
    
    attributes.type = LIN16;
    ac = AFCreateAC(aud, device, ACEncodingType, &attributes);
    aftime = AFGetTime(ac);

    init_sound_table16 ();
    sample_handler = sample16_handler;
    sample_evtime = (long)MAXHPOS_PAL * MAXVPOS_PAL * VBLANK_HZ_PAL / rate;

    sndbufpt = sndbuffer;
    sound_available = 1;
    printf ("Sound driver found and configured for %d bits at %d Hz, buffer is %d bytes\n", 16, rate, sndbufsize);
    return 1;
}
예제 #4
0
파일: sound.c 프로젝트: bernds/UAE
int init_sound (void)
{
    int rate, dspbits;

    struct audio_info sfd_info;

    if (currprefs.sound_maxbsiz < 128 || currprefs.sound_maxbsiz > 44100) {
	fprintf(stderr, "Sound buffer size %d out of range.\n", currprefs.sound_maxbsiz);
	currprefs.sound_maxbsiz = 8192;
    }

    rate = currprefs.sound_freq;
    dspbits = currprefs.sound_bits;
    AUDIO_INITINFO(&sfd_info);
    sfd_info.play.sample_rate = rate;
    sfd_info.play.channels = 1;
    sfd_info.play.precision = dspbits;
    sfd_info.play.encoding = (dspbits == 8 ) ? AUDIO_ENCODING_ULAW : AUDIO_ENCODING_LINEAR;
    if (ioctl(sound_fd, AUDIO_SETINFO, &sfd_info)) {
	fprintf(stderr, "Can't use sample rate %d with %d bits, %s!\n", rate, dspbits, (dspbits ==8) ? "ulaw" : "linear");
	return 0;
    }
    obtained_freq = rate;

    init_sound_table16 ();

    if (dspbits == 8)
	sample_handler = sample_ulaw_handler;
    else
	sample_handler = sample16_handler;

    sndbufpt = sndbuffer;
    sound_available = 1;
    sndbufsize = currprefs.sound_maxbsiz;
    printf ("Sound driver found and configured for %d bits, %s at %d Hz, buffer is %d bytes\n", dspbits, (dspbits ==8) ? "ulaw" : "linear", rate, sndbufsize);
    return 1;
}
예제 #5
0
파일: sound.c 프로젝트: agwatic/PUAE
int setup_sound(void)
{
    ppb_audio_interface = (PPB_Audio *) NaCl_GetInterface(PPB_AUDIO_INTERFACE);
    ppb_audio_config_interface = (PPB_AudioConfig *)
        NaCl_GetInterface(PPB_AUDIO_CONFIG_INTERFACE);
    pp_instance = NaCl_GetInstance();

    if (!ppb_audio_interface) {
        write_log("Could not acquire PPB_Audio interface.\n");
        return 0;
    }
    if (!ppb_audio_config_interface) {
        write_log("Could not acquire PPB_AudioConfig interface.\n");
        return 0;
    }
    if (!pp_instance) {
        write_log("Could not find current Pepper instance.\n");
        return 0;
    }

    if (!init_sound()) return 0;
    close_sound();

    write_log("Pepper audio successfully set up.\n");
    write_log("Frequency: %d\n", currprefs.sound_freq);
    write_log("Stereo   : %d\n", currprefs.sound_stereo);
    write_log("Latency  : %d\n", currprefs.sound_latency);

    init_sound_table16();
    sample_handler = sample16s_handler;
    obtainedfreq = currprefs.sound_freq;
    have_sound = 1;
    sound_available = 1;
    update_sound (fake_vblank_hz, 1, currprefs.ntscmode);

    return sound_available;
}
예제 #6
0
파일: sound.c 프로젝트: bernds/UAE
int init_sound (void)
{
    int rate;
    int dspbits;
    int channels;
    MMRESULT	  status;
    LPPCMWAVEFORMAT waveformat;

    if (currprefs.sound_maxbsiz < 128 || currprefs.sound_maxbsiz > 16384) {
	fprintf(stderr, "Sound buffer size %d out of range.\n", currprefs.sound_maxbsiz);
	currprefs.sound_maxbsiz = 8192;
    }

    sndbufsize = currprefs.sound_maxbsiz;

    dspbits = currprefs.sound_bits;

    rate = currprefs.sound_freq;

    channels = 1;

    if((waveformat = (LPPCMWAVEFORMAT)
	mmeAllocMem(sizeof(PCMWAVEFORMAT))) == NULL ) {
	fprintf(stderr, "Failed to allocate PCMWAVEFORMAT struct\n");
	return 0;
    }
    waveformat->wf.nSamplesPerSec = rate;
    waveformat->wf.nChannels = channels;
    waveformat->wBitsPerSample = dspbits;
    waveformat->wf.wFormatTag = WAVE_FORMAT_PCM;

    bytes_per_sample = waveformat->wf.nChannels *
	(waveformat->wBitsPerSample/8);
    waveformat->wf.nBlockAlign = bytes_per_sample;
    waveformat->wf.nAvgBytesPerSec = bytes_per_sample *
	waveformat->wf.nSamplesPerSec;

    /* Open the audio device with desired rate/format */
    status = waveOutOpen( &mme_handle,
			  WAVE_MAPPER,
			  (LPWAVEFORMAT)waveformat,
			  (void (*)())mme_callback,
			  (unsigned int)NULL,
			  CALLBACK_FUNCTION | WAVE_OPEN_SHAREABLE );
    mmeFreeMem(waveformat);

    if(status != MMSYSERR_NOERROR) {
	fprintf(stderr, "waveOutOpen failed - status = %d\n", status);
	return 0;
    }

    /* Allocate wave header for use in write */
    if((WaveHeader = (LPWAVEHDR)
	mmeAllocMem(sizeof(WAVEHDR))) == NULL ) {
	fprintf(stderr, "Failed to allocate WAVEHDR struct\n");
	return 0;
    }
    /* Allocate shared audio buffer for communicating with audio device */
    if ((mme_audiobuf = (LPSTR)
	 mmeAllocBuffer(sndbufsize*SOUND_NUMBUF*bytes_per_sample*2)) == NULL) {
	fprintf(stderr, "Failed to allocate shared audio buffer\n");
	mmeFreeMem(WaveHeader);
	return 0;
    }
    sndbuffer = mme_audiobuf;
    obtained_freq = rate;

    if (dspbits == 16) {
	init_sound_table16 ();
	sample_handler = sample16_handler;
    } else {
	init_sound_table8 ();
	sample_handler = sample8_handler;
    }
    sound_available = 1;
    printf ("Sound driver found and configured for %d bits at %d Hz, buffer is %d bytes\n", dspbits, rate, sndbufsize);
    mme_sndbufpt = sndbufpt = sndbuffer;
    return 1;
}