예제 #1
0
void
PortAudioOutput::initAudio( long sampleRate, int channels )
{
    if ( m_audio )
    {
        Pa_CloseStream( m_audio );
        m_audio = 0;
    }

    if ( m_deviceNum >= Pa_GetDeviceCount() || m_deviceNum < 0 )
        m_deviceNum = 0;

    int bufferSize = 512;
    int deviceID = internalSoundCardID( m_deviceNum );
    qDebug() << "Internal ID:" << deviceID << "-" << "Config:" << m_deviceNum;

    if ( deviceID < 0 )
    {
        emit error( Radio_NoSoundcard, tr( "Your soundcard is either busy or not present. "
            "Try restarting the application." ) );
        return;
    }

    PaStreamParameters p;
    memset( &p, 0, sizeof( PaStreamParameters ) );

    p.sampleFormat = paInt16;
    p.channelCount = 0;

    while ( p.channelCount < channels && deviceID < Pa_GetDeviceCount() )
    {
#ifdef Q_WS_WIN
        p.device = Pa_HostApiDeviceIndexToDeviceIndex( Pa_HostApiTypeIdToHostApiIndex( paDirectSound ), deviceID++ );
#endif
#ifdef Q_WS_MAC
        p.device = Pa_HostApiDeviceIndexToDeviceIndex( Pa_HostApiTypeIdToHostApiIndex( paCoreAudio ), deviceID++ );
#endif
#ifdef Q_WS_X11
        p.device = Pa_HostApiDeviceIndexToDeviceIndex( Pa_HostApiTypeIdToHostApiIndex( paALSA ), deviceID++ );
#endif

        p.suggestedLatency = Pa_GetDeviceInfo( p.device )->defaultHighOutputLatency;
        p.channelCount = Pa_GetDeviceInfo( p.device )->maxOutputChannels;
    }

    qDebug() << "Using device with id:" << --deviceID;
    p.channelCount = channels;
//     Pa_IsFormatSupported( 0, &p, bufferSize );

    m_deviceInfo = *Pa_GetDeviceInfo( p.device );
    m_sourceChannels = channels;

    PaError error = Pa_OpenStream( &m_audio, 0, &p, sampleRate, bufferSize, 0, audioCallback, this );
    if ( error != paNoError )
    {
        qDebug() << "PortAudio Error:" << Pa_GetErrorText( error );
        m_audio = 0;
    }
}
예제 #2
0
파일: pa_dev.c 프로젝트: avble/natClientEx
/* Internal: Get PortAudio default output device ID */
static int pa_get_default_output_dev(int channel_count)
{
    int i, count;

    /* Special for Windows - try to use the DirectSound implementation
     * first since it provides better latency.
     */
#if PJMEDIA_PREFER_DIRECT_SOUND
    if (Pa_HostApiTypeIdToHostApiIndex(paDirectSound) >= 0) {
	const PaHostApiInfo *pHI;
	int index = Pa_HostApiTypeIdToHostApiIndex(paDirectSound);
	pHI = Pa_GetHostApiInfo(index);
	if (pHI) {
	    const PaDeviceInfo *paDevInfo = NULL;
	    paDevInfo = Pa_GetDeviceInfo(pHI->defaultOutputDevice);
	    if (paDevInfo && paDevInfo->maxOutputChannels >= channel_count)
		return pHI->defaultOutputDevice;
	}
    }
#endif

    /* Enumerate the host api's for the default devices, and return
     * the device with suitable channels.
     */
    count = Pa_GetHostApiCount();
    for (i=0; i < count; ++i) {
	const PaHostApiInfo *pHAInfo;

	pHAInfo = Pa_GetHostApiInfo(i);
	if (!pHAInfo)
	    continue;

	if (pHAInfo->defaultOutputDevice >= 0) {
	    const PaDeviceInfo *paDevInfo;

	    paDevInfo = Pa_GetDeviceInfo(pHAInfo->defaultOutputDevice);

	    if (paDevInfo->maxOutputChannels >= channel_count)
		return pHAInfo->defaultOutputDevice;
	}
    }

    /* If still no device is found, enumerate all devices */
    count = Pa_GetDeviceCount();
    for (i=0; i<count; ++i) {
	const PaDeviceInfo *paDevInfo;

	paDevInfo = Pa_GetDeviceInfo(i);
	if (paDevInfo->maxOutputChannels >= channel_count)
	    return i;
    }

    return -1;
}
예제 #3
0
void MFSound_InitModulePlatformSpecific(int *pSoundDataSize, int *pVoiceDataSize)
{
	MFCALLSTACK;

	// we need to return the size of the internal structures so the platform independant
	// code can make the correct allocations..
	*pSoundDataSize = sizeof(MFSoundDataInternal);

	MFDebug_Log(2, "Initialising PortAudio driver.");

	// init portaudio
	Pa_Initialize();

	// choose the output device
	PaDeviceIndex device = Pa_GetDefaultOutputDevice();

	// HACK: try and find an ALSA device (default OSS sucks)
	PaHostApiIndex alsaAPI = Pa_HostApiTypeIdToHostApiIndex(paALSA);
	if(alsaAPI >= 0)
	{
		int numDevices = Pa_GetDeviceCount();
		for(int a=0; a<numDevices; ++a)
		{
			pDeviceInfo = Pa_GetDeviceInfo(a);
			if(pDeviceInfo->hostApi == alsaAPI)
			{
				device = a;
				break;
			}
		}
	}

	pDeviceInfo = Pa_GetDeviceInfo(device);
	pHostAPIInfo = Pa_GetHostApiInfo(pDeviceInfo->hostApi);

	MFDebug_Log(2, MFStr("PortAudio output: %s", pDeviceInfo->name));
	MFDebug_Log(2, MFStr("PortAudio host: %s", pHostAPIInfo->name));
	MFDebug_Log(2, MFStr("Sample rate: %g", (float)pDeviceInfo->defaultSampleRate));
	MFDebug_Log(2, MFStr("In/Out channels: %d/%d", pDeviceInfo->maxInputChannels, pDeviceInfo->maxOutputChannels));
	MFDebug_Log(2, MFStr("Input latency: %g-%g", (float)pDeviceInfo->defaultLowInputLatency, (float)pDeviceInfo->defaultHighInputLatency));
	MFDebug_Log(2, MFStr("Output latency: %g-%g", (float)pDeviceInfo->defaultLowOutputLatency, (float)pDeviceInfo->defaultHighOutputLatency));

	// create a very low latency audio output stream
	PaStreamParameters params;
	params.device = Pa_GetDefaultOutputDevice();
	params.channelCount = 2;
	params.sampleFormat = paInt16;
	params.suggestedLatency = 0.0167;
	params.hostApiSpecificStreamInfo = NULL;

	PaError error = Pa_OpenStream(&pPAStream, NULL, &params, pDeviceInfo->defaultSampleRate, paFramesPerBufferUnspecified, paPrimeOutputBuffersUsingStreamCallback, MFSound_MixCallback, NULL);

	if(error != paNoError)
		MFDebug_Log(2, MFStr("Error: %s", Pa_GetErrorText(error)));
	else
		Pa_StartStream(pPAStream);
}
PaDeviceIndex PA_AudioIO_DS::setDevice(int deviceIndex){
    PaHostApiIndex dsInd = Pa_HostApiTypeIdToHostApiIndex(paDirectSound);
    const PaHostApiInfo* apiInf = nullptr;

    apiInf = Pa_GetHostApiInfo(dsInd);
    if(!apiInf) throw Pa_NoApiException();
    if(-1 == deviceIndex) return apiInf->defaultOutputDevice;
    if(deviceIndex > apiInf->deviceCount ||  deviceIndex < 0){
        throw Pa_DeviceIndexNotFoundException();
    }
    return Pa_HostApiDeviceIndexToDeviceIndex(dsInd, deviceIndex);
}
PaDeviceIndex PA_AudioIO_ALSA::setDevice(int deviceIndex = 0){
    PaHostApiIndex alsaInd = Pa_HostApiTypeIdToHostApiIndex(paALSA);
    const PaHostApiInfo* apiInf = Pa_GetHostApiInfo(alsaInd);

    if(!apiInf) throw Pa_NoApiException();
    if(-1 == deviceIndex) return apiInf->defaultOutputDevice;

    if(deviceIndex > apiInf->deviceCount ||  deviceIndex < 0){
        throw Pa_DeviceIndexNotFoundException();
    }
    return Pa_HostApiDeviceIndexToDeviceIndex(alsaInd, deviceIndex);
}
PaDeviceIndex PA_AudioIO_WMME::setDevice(int deviceIndex){
    PaHostApiIndex mmeInd = Pa_HostApiTypeIdToHostApiIndex(paMME);
    const PaHostApiInfo* apiInf = nullptr;

    apiInf = Pa_GetHostApiInfo(mmeInd);
    if(!apiInf) throw Pa_NoApiException();
    if(-1 == deviceIndex) return apiInf->defaultOutputDevice;
    if(deviceIndex > apiInf->deviceCount ||  deviceIndex < 0){
        throw Pa_DeviceIndexNotFoundException();
    }
    std::cout << "Device index is: " << deviceIndex << std::endl;
    return Pa_HostApiDeviceIndexToDeviceIndex(mmeInd, deviceIndex);
}
PaDeviceIndex PA_AudioIO_ASIO::setDevice(int deviceIndex){
    PaHostApiIndex asioInd = Pa_HostApiTypeIdToHostApiIndex(paASIO);
    const PaHostApiInfo* apiInf = nullptr;

    apiInf = Pa_GetHostApiInfo(asioInd);
    if(!apiInf) throw Pa_NoApiException();
    if(-1 == deviceIndex) return apiInf->defaultOutputDevice;
    if(deviceIndex > apiInf->deviceCount ||  deviceIndex < 0){
        throw Pa_DeviceIndexNotFoundException();
    }
    std::cout << "Warning: ASIO blocking is currently unimplemented in portaudio." << std::endl;
    return Pa_HostApiDeviceIndexToDeviceIndex(asioInd, deviceIndex);
}
예제 #8
0
int main(int argc, char* argv[])
{
    PaStreamParameters outputParameters;
    PaWinDirectSoundStreamInfo directSoundStreamInfo;
    PaStream *stream;
    PaError err;
    paTestData data;
    int i;
    int deviceIndex;

    printf("PortAudio Test: output a sine blip on each channel. SR = %d, BufSize = %d, Chans = %d\n", SAMPLE_RATE, FRAMES_PER_BUFFER, CHANNEL_COUNT);

    err = Pa_Initialize();
    if( err != paNoError ) goto error;

	deviceIndex = Pa_GetHostApiInfo( Pa_HostApiTypeIdToHostApiIndex( paDirectSound ) )->defaultOutputDevice;
	if( argc == 2 ){
		sscanf( argv[1], "%d", &deviceIndex );
	}

	printf( "using device id %d (%s)\n", deviceIndex, Pa_GetDeviceInfo(deviceIndex)->name );

    /* initialise sinusoidal wavetable */
    for( i=0; i<TABLE_SIZE; i++ )
    {
        data.sine[i] = (float) sin( ((double)i/(double)TABLE_SIZE) * M_PI * 2. );
    }

	data.phase = 0;
	data.currentChannel = 0;
	data.cycleCount = 0;

    outputParameters.device = deviceIndex;
    outputParameters.channelCount = CHANNEL_COUNT;
    outputParameters.sampleFormat = paFloat32; /* 32 bit floating point processing */
    outputParameters.suggestedLatency = Pa_GetDeviceInfo( outputParameters.device )->defaultLowOutputLatency;
    outputParameters.hostApiSpecificStreamInfo = NULL;

    /* it's not strictly necessary to provide a channelMask for surround sound
       output. But if you want to be sure which channel mask PortAudio will use
       then you should supply one */
    directSoundStreamInfo.size = sizeof(PaWinDirectSoundStreamInfo);
    directSoundStreamInfo.hostApiType = paDirectSound; 
    directSoundStreamInfo.version = 1;
    directSoundStreamInfo.flags = paWinDirectSoundUseChannelMask;
    directSoundStreamInfo.channelMask = PAWIN_SPEAKER_5POINT1; /* request 5.1 output format */
    outputParameters.hostApiSpecificStreamInfo = &directSoundStreamInfo;

	if( Pa_IsFormatSupported( 0, &outputParameters, SAMPLE_RATE ) == paFormatIsSupported  ){
		printf( "Pa_IsFormatSupported reports device will support %d channels.\n", CHANNEL_COUNT );
	}else{
		printf( "Pa_IsFormatSupported reports device will not support %d channels.\n", CHANNEL_COUNT );
	}

    err = Pa_OpenStream(
              &stream,
              NULL, /* no input */
              &outputParameters,
              SAMPLE_RATE,
              FRAMES_PER_BUFFER,
              paClipOff,      /* we won't output out of range samples so don't bother clipping them */
              patestCallback,
              &data );
    if( err != paNoError ) goto error;

    err = Pa_StartStream( stream );
    if( err != paNoError ) goto error;

    printf("Play for %d seconds.\n", NUM_SECONDS );
    Pa_Sleep( NUM_SECONDS * 1000 );

    err = Pa_StopStream( stream );
    if( err != paNoError ) goto error;

    err = Pa_CloseStream( stream );
    if( err != paNoError ) goto error;

    Pa_Terminate();
    printf("Test finished.\n");
    
    return err;
error:
    Pa_Terminate();
    fprintf( stderr, "An error occured while using the portaudio stream\n" );
    fprintf( stderr, "Error number: %d\n", err );
    fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
    return err;
}
예제 #9
0
static int output_data(char *buf, int32 nbytes)
{
	unsigned int i;
	int32 samplesToGo;
	char *bufepoint;

    if(pa_active == 0) return -1; 
	
	while((pa_active==1) && (pa_data.samplesToGo > bytesPerInBuffer)){ Pa_Sleep(1);};
	
//	if(pa_data.samplesToGo > DATA_BLOCK_SIZE){ 
//		Sleep(  (pa_data.samplesToGo - DATA_BLOCK_SIZE)/dpm.rate/4  );
//	}
	samplesToGo=pa_data.samplesToGo;
	bufepoint=pa_data.bufepoint;

	if (pa_data.buf+bytesPerInBuffer*2 >= bufepoint + nbytes){
		memcpy(bufepoint, buf, nbytes);
		bufepoint += nbytes;
		//buf += nbytes;
	}else{
		int32 send = pa_data.buf+bytesPerInBuffer*2 - bufepoint;
		if (send != 0) memcpy(bufepoint, buf, send);
		buf += send;
		memcpy(pa_data.buf, buf, nbytes - send);
		bufepoint = pa_data.buf + nbytes - send;
		//buf += nbytes-send;
	}
	samplesToGo += nbytes;

	pa_data.samplesToGo=samplesToGo;
	pa_data.bufepoint=bufepoint;

/*
	if(firsttime==1){
		err = Pa_StartStream( stream );

		if( err != paNoError ) goto error;
		firsttime=0;
	}
*/
#if PORTAUDIO_V19
	if( 0==Pa_IsStreamActive(stream)){
#else
	if( 0==Pa_StreamActive(stream)){
#endif
		err = Pa_StartStream( stream );

		if( err != paNoError ) goto error;
	}
		
//	if(ctl->id_character != 'r' && ctl->id_character != 'A' && ctl->id_character != 'W' && ctl->id_character != 'P')
//	    while((pa_active==1) && (pa_data.samplesToGo > bytesPerInBuffer)){ Pa_Sleep(1);};
//	Pa_Sleep( (pa_data.samplesToGo - bytesPerInBuffer)/dpm.rate * 1000);
	return 0;

error:
	Pa_Terminate(); pa_active=0;
	ctl->cmsg(CMSG_ERROR, VERB_NORMAL, "PortAudio error: %s\n", Pa_GetErrorText( err ) );
	return -1;
}

static void close_output(void)
{	
	if( pa_active==0) return;
#ifdef PORTAUDIO_V19
	if(Pa_IsStreamActive(stream)){
#else
	if(Pa_StreamActive(stream)){
#endif
		Pa_Sleep(  bytesPerInBuffer/dpm.rate*1000  );
	}	
	err = Pa_AbortStream( stream );
    if( (err!=paStreamIsStopped) && (err!=paNoError) ) goto error;
	err = Pa_CloseStream( stream );
//	if( err != paNoError ) goto error;
	Pa_Terminate(); 
	pa_active=0;

#ifdef AU_PORTAUDIO_DLL
#ifndef PORTAUDIO_V19
  free_portaudio_dll();
#endif
#endif

	return;

error:
	Pa_Terminate(); pa_active=0;
#ifdef AU_PORTAUDIO_DLL
#ifndef PORTAUDIO_V19
  free_portaudio_dll();
#endif
#endif
	ctl->cmsg(  CMSG_ERROR, VERB_NORMAL, "PortAudio error: %s\n", Pa_GetErrorText( err ) );
	return;
}


static int acntl(int request, void *arg)
{
    switch(request)
    {
 
      case PM_REQ_GETQSIZ:
		 *(int *)arg = bytesPerInBuffer*2;
    	return 0;
		//break;
      case PM_REQ_GETFILLABLE:
		 *(int *)arg = bytesPerInBuffer*2-pa_data.samplesToGo;
    	return 0;
		//break;
      case PM_REQ_GETFILLED:
		 *(int *)arg = pa_data.samplesToGo;
    	return 0;
		//break;

    	
   	case PM_REQ_DISCARD:
    case PM_REQ_FLUSH:
    	pa_data.samplesToGo=0;
    	pa_data.bufpoint=pa_data.bufepoint;
    	err = Pa_AbortStream( stream );
    	if( (err!=paStreamIsStopped) && (err!=paNoError) ) goto error;
    	err = Pa_StartStream( stream );
    	if(err!=paNoError) goto error;
		return 0;

		//break;

    case PM_REQ_RATE:  
    	{
    		int i;
    		double sampleRateBack;
    		i = *(int *)arg; //* sample rate in and out *
    		close_output();
    		sampleRateBack=dpm.rate;
    		dpm.rate=i;
    		if(0==open_output()){
    			return 0;
    		}else{    		
    			dpm.rate=sampleRateBack;
    			open_output();
    			return -1;
    		}
    	}
    	//break;

//    case PM_REQ_RATE: 
//          return -1;
    	
    case PM_REQ_PLAY_START: //* Called just before playing *
    case PM_REQ_PLAY_END: //* Called just after playing *
        return 0;
      
	default:
    	return -1;

    }
	return -1;
error:
/*
	Pa_Terminate(); pa_active=0;
#ifdef AU_PORTAUDIO_DLL
  free_portaudio_dll();
#endif
*/
	ctl->cmsg(  CMSG_ERROR, VERB_NORMAL, "PortAudio error in acntl : %s\n", Pa_GetErrorText( err ) );
	return -1;
}


static void print_device_list(void){
	PaDeviceIndex maxDeviceIndex, i;
	PaHostApiIndex HostApiIndex;
	const PaDeviceInfo* DeviceInfo;

#if PORTAUDIO_V19
	HostApiIndex=Pa_HostApiTypeIdToHostApiIndex(HostApiTypeId);
#endif
	
	maxDeviceIndex=Pa_GetDeviceCount();
	
	for( i = 0; i < maxDeviceIndex; i++){
		DeviceInfo=Pa_GetDeviceInfo(i);
#if PORTAUDIO_V19
		if( DeviceInfo->hostApi == HostApiIndex){
#endif
			if( DeviceInfo->maxOutputChannels > 0){
				ctl->cmsg(  CMSG_ERROR, VERB_NORMAL, "%2d %s",i,DeviceInfo->name);
			}
#if PORTAUDIO_V19
		}
#endif
	}
}
예제 #10
0
int main(int argc, char* argv[])
{
    PaStreamParameters outputParameters;
    PaWinMmeStreamInfo wmmeStreamInfo;
    PaStream *stream;
    PaError err;
    paTestData data;
    int deviceIndex;
    FILE *fp;
    const char *fileName = "c:\\test_48k.ac3.spdif";
    data.buffer = NULL;

    printf("usage: patest_wmme_ac3 fileName [paDeviceIndex]\n");
    printf("**IMPORTANT*** The provided file must include the spdif preamble at the start of every AC-3 frame. Using a normal ac3 file won't work.\n");
    printf("PortAudio Test: output a raw spdif ac3 stream. SR = %d, BufSize = %d, Chans = %d\n", 
            SAMPLE_RATE, FRAMES_PER_BUFFER, CHANNEL_COUNT);

        
    if( argc >= 2 )
        fileName = argv[1];

    printf( "reading spdif ac3 raw stream file %s\n", fileName );

    fp = fopen( fileName, "rb" );
    if( !fp ){
        fprintf( stderr, "error opening spdif ac3 file.\n" );
        return -1;
    }
    /* get file size */
    fseek( fp, 0, SEEK_END );
    data.bufferSampleCount = ftell( fp ) / sizeof(short);
    fseek( fp, 0, SEEK_SET );

    /* allocate buffer, read the whole file into memory */
    data.buffer = (short*)malloc( data.bufferSampleCount * sizeof(short) );
    if( !data.buffer ){
        fprintf( stderr, "error allocating buffer.\n" );
        return -1;
    }

    fread( data.buffer, sizeof(short), data.bufferSampleCount, fp );
    fclose( fp );

    data.playbackIndex = 0;

    err = Pa_Initialize();
    if( err != paNoError ) goto error;

	deviceIndex = Pa_GetHostApiInfo( Pa_HostApiTypeIdToHostApiIndex( paMME ) )->defaultOutputDevice;
	if( argc >= 3 ){
		sscanf( argv[1], "%d", &deviceIndex );
	}

	printf( "using device id %d (%s)\n", deviceIndex, Pa_GetDeviceInfo(deviceIndex)->name );

    
    outputParameters.device = deviceIndex;
    outputParameters.channelCount = CHANNEL_COUNT;
    outputParameters.sampleFormat = paInt16; /* IMPORTANT must use paInt16 for WMME AC3 */
    outputParameters.suggestedLatency = Pa_GetDeviceInfo( outputParameters.device )->defaultLowOutputLatency;
    outputParameters.hostApiSpecificStreamInfo = NULL;

    wmmeStreamInfo.size = sizeof(PaWinMmeStreamInfo);
    wmmeStreamInfo.hostApiType = paMME; 
    wmmeStreamInfo.version = 1;
    wmmeStreamInfo.flags = paWinMmeWaveFormatDolbyAc3Spdif;
    outputParameters.hostApiSpecificStreamInfo = &wmmeStreamInfo;


	if( Pa_IsFormatSupported( 0, &outputParameters, SAMPLE_RATE ) == paFormatIsSupported  ){
		printf( "Pa_IsFormatSupported reports device will support %d channels.\n", CHANNEL_COUNT );
	}else{
		printf( "Pa_IsFormatSupported reports device will not support %d channels.\n", CHANNEL_COUNT );
	}

    err = Pa_OpenStream(
              &stream,
              NULL, /* no input */
              &outputParameters,
              SAMPLE_RATE,
              FRAMES_PER_BUFFER,
              0,
              patestCallback,
              &data );
    if( err != paNoError ) goto error;

    err = Pa_StartStream( stream );
    if( err != paNoError ) goto error;

    printf("Play for %d seconds.\n", NUM_SECONDS );
    Pa_Sleep( NUM_SECONDS * 1000 );

    err = Pa_StopStream( stream );
    if( err != paNoError ) goto error;

    err = Pa_CloseStream( stream );
    if( err != paNoError ) goto error;

    Pa_Terminate();
    free( data.buffer );
    printf("Test finished.\n");
    
    return err;
error:
    Pa_Terminate();
    free( data.buffer );

    fprintf( stderr, "An error occured while using the portaudio stream\n" );
    fprintf( stderr, "Error number: %d\n", err );
    fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
    return err;
}
예제 #11
0
int main(void)
{
    PaStreamParameters outputParameters;
    PaStream *stream;
    PaError err;
    paTestData data;
	const PaDeviceInfo *device;
	PaWasapiDeviceRole role;
    int i;
	PaWasapiStreamInfo ws_info = { 0 };

    
    printf("PortAudio Test: output sine wave. SR = %d, BufSize = %d\n", SAMPLE_RATE, FRAMES_PER_BUFFER);
    
    /* initialise sinusoidal wavetable */
    for( i=0; i<TABLE_SIZE; i++ )
    {
        data.sine[i] = (float) sin( ((double)i/(double)TABLE_SIZE) * M_PI * 2. );
    }
    data.left_phase = data.right_phase = 0;
    
    err = Pa_Initialize();
    if( err != paNoError ) goto error;

	for (i = 0; i < Pa_GetDeviceCount(); ++i)
	{
		device = Pa_GetDeviceInfo(i);

		if (Pa_GetDeviceInfo(i)->maxInputChannels != 0)
			continue;
		if (Pa_GetDeviceInfo(i)->hostApi != Pa_HostApiTypeIdToHostApiIndex(paWASAPI))
			continue;

		role = PaWasapi_GetDeviceRole(i);
		if (role == eRoleSpeakers)
			break;
	}
	if (role != eRoleSpeakers)
	{
		fprintf(stderr,"Error: No WASAPI Speakers.\n");
		return -1;
	}

    outputParameters.device = i;//Pa_GetDefaultOutputDevice(); /* default output device */

	ws_info.size		= sizeof(ws_info);
	ws_info.hostApiType = paWASAPI;
	ws_info.version		= 1;
	ws_info.flags		= paWinWasapiExclusive;

    if (outputParameters.device == paNoDevice) {
      fprintf(stderr,"Error: No default output device.\n");
      goto error;
    }
    outputParameters.channelCount = 2;
    outputParameters.sampleFormat = paInt16;
    outputParameters.suggestedLatency = Pa_GetDeviceInfo( outputParameters.device )->defaultHighOutputLatency;
    outputParameters.hostApiSpecificStreamInfo = &ws_info;

    err = Pa_OpenStream(
              &stream,
              NULL, /* no input */
              &outputParameters,
              SAMPLE_RATE,
              paFramesPerBufferUnspecified/*FRAMES_PER_BUFFER*/,
              paClipOff|paPrimeOutputBuffersUsingStreamCallback,      /* we won't output out of range samples so don't bother clipping them */
              patestCallback,
              &data );
    if( err != paNoError ) goto error;

    sprintf( data.message, "No Message" );
    err = Pa_SetStreamFinishedCallback( stream, &StreamFinished );
    if( err != paNoError ) goto error;

    err = Pa_StartStream( stream );
    if( err != paNoError ) goto error;

    printf("Play for %d seconds.\n", NUM_SECONDS );
    Pa_Sleep( NUM_SECONDS * 1000 );

	//while (1) { Pa_Sleep(2); }

    err = Pa_StopStream( stream );
    if( err != paNoError ) goto error;

    err = Pa_CloseStream( stream );
    if( err != paNoError ) goto error;

    Pa_Terminate();
    printf("Test finished.\n");
    
    return err;
error:
    Pa_Terminate();
    fprintf( stderr, "An error occured while using the portaudio stream\n" );
    fprintf( stderr, "Error number: %d\n", err );
    fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
    return err;
}
예제 #12
0
파일: jpa.c 프로젝트: rjeschke/jpa
JNIEXPORT jint JNICALL Java_com_github_rjeschke_jpa_JPA_hostApiTypeIdToHostApiIndex(JNIEnv *env, jclass clazz, jint type)
{
    return (jint)Pa_HostApiTypeIdToHostApiIndex((PaHostApiTypeId)type);
}
int main(int argc, char* argv[])
{
    PaStreamParameters outputParameters;
    PaWinMmeStreamInfo wmmeStreamInfo;
    PaStream *stream;
    PaError err;
    paTestData data;
    int i;
    int deviceIndex;

    printf("PortAudio Test: output a sine blip on each channel. SR = %d, BufSize = %d, Chans = %d\n", SAMPLE_RATE, FRAMES_PER_BUFFER, CHANNEL_COUNT);

    err = Pa_Initialize();
    if( err != paNoError ) goto error;

	deviceIndex = Pa_GetHostApiInfo( Pa_HostApiTypeIdToHostApiIndex( paMME ) )->defaultOutputDevice;
	if( argc == 2 ){
		sscanf( argv[1], "%d", &deviceIndex );
	}

	printf( "using device id %d (%s)\n", deviceIndex, Pa_GetDeviceInfo(deviceIndex)->name );

    /* initialise sinusoidal wavetable */
    for( i=0; i<TABLE_SIZE; i++ )
    {
        data.sine[i] = (float) sin( ((double)i/(double)TABLE_SIZE) * M_PI * 2. );
    }

	data.phase = 0;

    outputParameters.device = deviceIndex;
    outputParameters.channelCount = CHANNEL_COUNT;
    outputParameters.sampleFormat = paFloat32; /* 32 bit floating point processing */
    outputParameters.suggestedLatency = 0; /*Pa_GetDeviceInfo( outputParameters.device )->defaultLowOutputLatency;*/
    outputParameters.hostApiSpecificStreamInfo = NULL;

    wmmeStreamInfo.size = sizeof(PaWinMmeStreamInfo);
    wmmeStreamInfo.hostApiType = paMME; 
    wmmeStreamInfo.version = 1;
    wmmeStreamInfo.flags = paWinMmeUseLowLevelLatencyParameters | paWinMmeDontThrottleOverloadedProcessingThread;
    wmmeStreamInfo.framesPerBuffer = WMME_FRAMES_PER_BUFFER;
    wmmeStreamInfo.bufferCount = WMME_BUFFER_COUNT;
    outputParameters.hostApiSpecificStreamInfo = &wmmeStreamInfo;
   

	if( Pa_IsFormatSupported( 0, &outputParameters, SAMPLE_RATE ) == paFormatIsSupported  ){
		printf( "Pa_IsFormatSupported reports device will support %d channels.\n", CHANNEL_COUNT );
	}else{
		printf( "Pa_IsFormatSupported reports device will not support %d channels.\n", CHANNEL_COUNT );
	}

    err = Pa_OpenStream(
              &stream,
              NULL, /* no input */
              &outputParameters,
              SAMPLE_RATE,
              FRAMES_PER_BUFFER,
              paClipOff,      /* we won't output out of range samples so don't bother clipping them */
              patestCallback,
              &data );
    if( err != paNoError ) goto error;

    err = Pa_StartStream( stream );
    if( err != paNoError ) goto error;

    printf("Play for %d seconds.\n", NUM_SECONDS );
    Pa_Sleep( NUM_SECONDS * 1000 );

    err = Pa_StopStream( stream );
    if( err != paNoError ) goto error;

    err = Pa_CloseStream( stream );
    if( err != paNoError ) goto error;

    Pa_Terminate();
    printf("Test finished.\n");
    
    return err;
error:
    Pa_Terminate();
    fprintf( stderr, "An error occured while using the portaudio stream\n" );
    fprintf( stderr, "Error number: %d\n", err );
    fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
    return err;
}
예제 #14
0
RTC::ReturnCode_t PortAudioInput::onActivated(RTC::UniqueId ec_id)
{
  RTC_DEBUG(("onActivated start"));
  PaStreamParameters inputParameters;
//  PaWasapiStreamInfo wasapiinfo;

  try {
    //m_pa_mutex.lock(); //by Irie Seisho

    m_format = getFormat(m_formatstr);
    m_totalframes = FRAMES_PER_BUFFER * m_channels;
    m_err = Pa_GetSampleSize(m_format);
    if( m_err > 0 ) {
      m_totalframes *= m_err;
    }

#if 0
    /* Find all WASAPI devices */
    const PaDeviceInfo *device;
    for ( int i = 0; i < Pa_GetDeviceCount(); ++i ) {
      device = Pa_GetDeviceInfo(i);
      if ( Pa_GetDeviceInfo(i)->hostApi == Pa_HostApiTypeIdToHostApiIndex(paWASAPI) ) {
        std::cout << "Device Index " << i << " : " << device->name << ", inch " << device->maxInputChannels << ", outch " << device->maxOutputChannels << std::endl;
      }
    }

//#if 0
    PaDeviceIndex dnum = Pa_GetDeviceCount();
    for (int i = 0; i < (int)dnum; i++) {
      std::cout << "Device Index " << i << " : " << Pa_GetDeviceInfo(i)->name << ", inch " << Pa_GetDeviceInfo(i)->maxInputChannels << ", outch " << Pa_GetDeviceInfo(i)->maxOutputChannels << std::endl;
    }
//#endif
    void *pFormat;
    unsigned int nFormatSize;
    PaDeviceIndex nDevice;
    int r = PaWasapi_GetDeviceDefaultFormat(pFormat, nFormatSize, nDevice);
#endif

    inputParameters.device = Pa_GetDefaultInputDevice(); //!< default input device
    if ( inputParameters.device < 0 ) {
      throw (paNotInitialized);
    }
    if ( m_channels > Pa_GetDeviceInfo(inputParameters.device)->maxInputChannels )
      m_channels = Pa_GetDeviceInfo(inputParameters.device)->maxInputChannels;
    inputParameters.channelCount = m_channels;
    inputParameters.sampleFormat = m_format;
    inputParameters.suggestedLatency = Pa_GetDeviceInfo( inputParameters.device )->defaultLowInputLatency;

/*
    wasapiinfo.size = sizeof(PaWasapiStreamInfo);
    wasapiinfo.hostApiType = paWASAPI;
    wasapiinfo.version = 1;
    wasapiinfo.flags = paWasapiUseChannelSelectors;
    wasapiinfo.hostProcessorInput
    wasapiinfo.threadPriority
    wasapiinfo.channelMask = outputChannelSelectors;

    inputParameters.hostApiSpecificStreamInfo = wasapiinfo;
*/
    inputParameters.hostApiSpecificStreamInfo = NULL;
    m_err = Pa_OpenStream(
          &m_stream,         //!< PortAudioStream
          &inputParameters,  //!< InputParameters
          NULL,              //!< outputParameters
          m_samplerate,      //!< sampleRate
          FRAMES_PER_BUFFER, //!< framesPerBuffer
          paClipOff,         //!< streamFlags:we won't output out of range samples so don't bother clipping
//          StreamCB,          //!< streamCallback
//          this );            //!< callback userData
          NULL,              //!< streamCallback:no callback, use blocking API
          NULL );            //!< no callback, so no callback userData
    if( m_err != paNoError ) {
      throw m_err;
    }
#ifdef HAVE_LIBPORTMIXER
    m_mixer = Px_OpenMixer( m_stream, 0 );
    m_volume = Px_GetInputVolume( m_mixer );
#else
#if defined(_WIN32)
    if ( InitMixer() == true ) {
      DWORD vol;
      GetMicrophoneLevel(&vol);
    } else {
      CloseMixer();
    }
#elif defined(__linux)
    const char* sound_device_names[] = SOUND_DEVICE_NAMES;
    m_device = -1;
    m_fd = -1;
    for ( int i = 0; i < SOUND_MIXER_NRDEVICES; i ++ ) {
      std::cout << " device name : " << sound_device_names[i] << std::endl;
      if ( strcmp( "mic", sound_device_names[i] ) == 0 ) {
        m_device = i;
        break;
      }
    }
    if ( ( m_fd = open( "/dev/mixer", O_RDONLY ) ) == -1 ) {
      perror( "open" );
    }
#endif
#endif

    m_err = Pa_StartStream( m_stream );
    if( m_err != paNoError ) {
      throw m_err;
    }

    //m_pa_mutex.unlock(); //by Irie Seisho
  } catch (...) {
    std::string error_str = Pa_GetErrorText(m_err);
    RTC_WARN(("PortAudio failed onActivated:%s", error_str.c_str()));
    return RTC::RTC_ERROR;
  }

  syncflg = true;
  is_active = true;
  RTC_DEBUG(("onActivated finish"));
  return RTC::RTC_OK;
}