示例#1
0
CaudioFifo::CaudioFifo(int samplerate, int framesize):
  m_frame_size(framesize),
  m_bquit(false)
{
  int i;

  m_frame_count = 32; // Must be a power of 2
  m_buffers_size = m_frame_count * framesize;
  m_psamples_in = new float[m_buffers_size];
  if (PaUtil_InitializeRingBuffer(&m_rbufin, framesize * sizeof(float), m_frame_count, m_psamples_in) == -1)
    {
      printf("Portaudio ringbuffer error: element count is not a power of 2.\n");
      exit(EXIT_FAILURE);
    }
  m_psamples_out = new float[m_buffers_size];
  if (PaUtil_InitializeRingBuffer(&m_rbufout, framesize * sizeof(float), m_frame_count, m_psamples_out) == -1)
    {
      printf("Portaudio ringbuffer error: element count is not a power of 2.\n");
      exit(EXIT_FAILURE);
    }
  for (i = 0; i < m_buffers_size; i++)
    {
      m_psamples_in[i] = m_psamples_out[i] = 0;
    }
}
示例#2
0
MLT3DHub::MLT3DHub() :
	mDataRate(-1),
	mT3DWaitTime(0),
	mEnabled(false),
	mUDPPortOffset(0),
	mReceivingT3d(false),
	mConnected(0),
	mShouldConnect(false),
	mShouldDisconnect(false)
{
	// initialize touch frame for output
	mOutputFrame.setDims(MLT3DHub::kFrameWidth, MLT3DHub::kFrameHeight);

	setShortName("<unnamed hub>");
	
	// build touch frame buffer
	//
	mFrameBuf.buffer = 0;
	MLSample* pFrameData;
	pFrameData = mTouchFrames.setDims(MLT3DHub::kFrameWidth, MLT3DHub::kFrameHeight, MLT3DHub::kFrameBufferSize);
	if (pFrameData)
	{
		PaUtil_InitializeRingBuffer(&mFrameBuf, mTouchFrames.getZStride(), MLT3DHub::kFrameBufferSize, pFrameData);
	}
	else
	{
		debug() << "MLT3DHub::initialize: couldn't get frame data!\n";
	}

	setPortOffset(0);
	
	// start protocol polling
	startTimer(500);
}
示例#3
0
MLProc::err MLProcRingBuffer::resize() 
{	
	MLProc::err e = OK;
	int size = 1 << bitsToContain((int)getParam("length"));
	void * buf;
	
	// debug() << "allocating " << size << " samples for ringbuffer " << getName() << "\n";

	buf = mRing.setDims(size);
	
	if (!buf)
	{
		debug() << "MLRingBuffer: allocate failed!\n";
		e = memErr;
	}
	else
	{	
		PaUtil_InitializeRingBuffer( &mBuf, sizeof(MLSample), size, buf );
        
		// get trash signal
		if (getParam("mode") != eMLRingBufferNoTrash)
		{
			mTrashSignal.setDims(size);	
		}
	}

	return e;
}
示例#4
0
文件: main.cpp 项目: audioprog/parec
int main(int argc, char *argv[])
{
    if (argc == 2)
    {
        QCoreApplication ca(argc, argv);

        if (ca.arguments().at(1) == "-s")
        {
            LsPaConnection(NULL).printDeviceList();
        }
        return 0;
    }

    QApplication a(argc, argv);

    PaUtilRingBuffer rb;
    char buffer[8 * 1024 * 16];
    ring_buffer_size_t size = PaUtil_InitializeRingBuffer(&rb, 8, 1024 * 16, &buffer[0]);
    Q_UNUSED(size);

    PaUtil_FlushRingBuffer(&rb);

    int ret = -1;

    LsPaConnection connection(&rb);

    Control control;

    control.setPortAudioRec(&connection);

    if (a.arguments().count() > 2)
    {
        bool ok = false;
        int nr = a.arguments().at(1).toInt(&ok);
        QString deviceName;
        if ( ! ok )
        {
            deviceName = "Line 3/4 (M-Audio Delta 1010LT)[Windows WASAPI]";
        }
        else
        {
            deviceName = connection.getDeviceName(nr);
        }
        if (deviceName != "")
        {
            control.startRecording(deviceName, a.arguments().at(2));
        }

        ret = a.exec();
    }
    else
    {
        ThreatedServerSocket server(&control);
        server.start();

        ret = a.exec();
    }

    return ret;
}
示例#5
0
MLPluginController::MLPluginController(MLPluginProcessor* pProcessor) :
	MLWidget::Listener(),
	MLReporter(),
	MLSignalReporter(pProcessor),
	mpView(nullptr),
	mpProcessor(pProcessor),
	mClockDivider(0),
	mConvertingPresets(false),
	mFilesConverted(0),
	mProtocolMenuItemStart(0),
	mOSCMenuItemStart(0)
{
	// initialize reference
	WeakReference<MLPluginController> initWeakReference = this;
	
	createMenu("key_scale");
	createMenu("preset");
	createMenu("settings");

	listenTo(pProcessor);
	listenTo(pProcessor->getEnvironment());
#if ML_MAC
	mFileActionData.resize(0);
	PaUtil_InitializeRingBuffer( &mFileActionQueue, sizeof(FileAction), 0, &(mFileActionData[0]) );
#endif
}
示例#6
0
MLReporter::MLReporter()
{
	int size = 1 << 10;
	mChangeData.resize(size);
	PaUtil_InitializeRingBuffer( &mChangeQueue, sizeof(MLSymbol), size, &(mChangeData[0]) );
	mpTimer = std::tr1::shared_ptr<ReporterTimer>(new ReporterTimer(this));
	mpTimer->startTimer(33);
}
示例#7
0
/* Allocate buffer. */
static PaError PABLIO_InitFIFO(PaUtilRingBuffer * rbuf, long numFrames, long bytesPerFrame)
{
	long numBytes = numFrames * bytesPerFrame;
	char *buffer = (char *) malloc(numBytes);
	if (buffer == NULL)
		return paInsufficientMemory;
	memset(buffer, 0, numBytes);
	return (PaError) PaUtil_InitializeRingBuffer(rbuf, numBytes, buffer);
}
示例#8
0
// only convert .aupreset (AU) to .mlpreset (VST) now. After Aalto 1.6 / Kaivo 1.2 there will be no need to convert presets.
void MLPluginController::convertPresets()
{
    if(!mpProcessor) return;
	
	mMaxFileQueueSize = 0;
	mFilesProcessed = 0;
	mFilesConverted = 0;
	
    File presetsFolder = getDefaultFileLocation(kOldPresetFiles);
    if (presetsFolder != File::nonexistent)
    {
		// turn off audio -- will be turned back on by finish or cancel
        mpProcessor->suspendProcessing(true);
		
		// clear presets collection
		mpProcessor->clearPresetCollection();

		// clear menu
		findMenuByName("preset")->clear();
		
		mPresetsToConvertAU1 = std::auto_ptr<MLFileCollection>(new MLFileCollection("convert_presets_au1", getDefaultFileLocation(kOldPresetFiles), ".aupreset"));
		mPresetsToConvertAU1->addListener(this);
		mPresetsToConvertAU2 = std::auto_ptr<MLFileCollection>(new MLFileCollection("convert_presets_au2", getDefaultFileLocation(kOldPresetFiles2), ".aupreset"));
		mPresetsToConvertAU2->addListener(this);
		mPresetsToConvertVST1 = std::auto_ptr<MLFileCollection>(new MLFileCollection("convert_presets_vst1", getDefaultFileLocation(kOldPresetFiles), ".mlpreset"));
		mPresetsToConvertVST1->addListener(this);
		mPresetsToConvertVST2 = std::auto_ptr<MLFileCollection>(new MLFileCollection("convert_presets_vst2", getDefaultFileLocation(kOldPresetFiles2), ".mlpreset"));
		mPresetsToConvertVST2->addListener(this);
		
		mFilesToProcess = 0;
		mFilesToProcess += mPresetsToConvertAU1->searchForFilesImmediate();
		mFilesToProcess += mPresetsToConvertAU2->searchForFilesImmediate();
		mFilesToProcess += mPresetsToConvertVST1->searchForFilesImmediate();
		mFilesToProcess += mPresetsToConvertVST2->searchForFilesImmediate();
		
		int fileBufferSize = 1 << bitsToContain(mFilesToProcess);
		mFileActionData.resize(fileBufferSize);
		PaUtil_InitializeRingBuffer( &mFileActionQueue, sizeof(FileAction), fileBufferSize, &(mFileActionData[0]) );
		
		// convert files in immediate mode and wait for finish.
		int interFileDelay = 0;
		mPresetsToConvertAU1->processFilesImmediate(interFileDelay);
		mPresetsToConvertAU2->processFilesImmediate(interFileDelay);
		mPresetsToConvertVST1->processFilesImmediate(interFileDelay);
		mPresetsToConvertVST2->processFilesImmediate(interFileDelay);
		mConvertingPresets = true;
		
		(new ConvertProgressDisplayThread(this))->launchThread();
		
//		startTimer(5); // speed up action for convert
    }
    else
    {
        debug() << "convertPresets: couldn't find preset folder " << presetsFolder.getFullPathName() << ".\n";
    }
}
示例#9
0
int MLRingBuffer::resize(int length)
{
    int r = 0;
    int size = 1 << bitsToContain(length);
    if (pData) delete[] pData;
    pData = new MLSample[size];

    if (pData)
    {
        r = size;
        PaUtil_InitializeRingBuffer( &mBuf, sizeof(MLSample), size, pData );
    }

    return r;
}
示例#10
0
	void Initialize(bool StartThread)
	{
		RingbufData = new char[BUFF_SIZE*sizeof(float)];

		WaitForRingbufferSpace = false;

		PaUtil_InitializeRingBuffer(&RingBuf, sizeof(float), BUFF_SIZE, RingbufData);

		Threaded = StartThread;
		Stream = nullptr;

		if (StartThread)
		{
			thread(&PaMixer::Run, this).detach();
		}
#ifdef WIN32
		if (UseWasapi)
		{
			OpenStream(&Stream, GetWasapiDevice(), 44100, (void*) this, Latency, Mix);

			if (!Stream)
			{
				// This was a Wasapi problem. Retry without it.
				Log::Logf("Problem initializing WASAPI. Falling back to default API.");
				UseWasapi = false;
				OpenStream(&Stream, Pa_GetDefaultOutputDevice(), 44100, (void*) this, Latency, Mix);
			}

		}
		else
		{
			OpenStream(&Stream, DefaultDSDevice, 44100, (void*) this, Latency, Mix);
		}
#else
		OpenStream( &Stream, Pa_GetDefaultOutputDevice(), 44100, (void*) this, Latency, Mix );

#endif

		if (Stream)
		{
			Pa_StartStream(Stream);
			std::this_thread::sleep_for(std::chrono::milliseconds(16));
			Latency = Pa_GetStreamInfo(Stream)->outputLatency;
			Log::Logf("AUDIO: Latency after opening stream = %f \n", Latency);
		}

		ConstFactor = 1.0;
	}
示例#11
0
bool AudioStream::Open(const char* Filename)
{
	mSource = SourceFromExt(Filename);

	if (mSource && mSource->IsValid())
	{
		Channels = mSource->GetChannels();

		mBufferSize = BUFF_SIZE;

		mData = new short[mBufferSize];
		PaUtil_InitializeRingBuffer(&mRingBuf, sizeof(int16), mBufferSize, mData);

		mStreamTime = mPlaybackTime = 0;
		
		SeekTime(0);

		return true;
	}else
		return false;
}
示例#12
0
/*****************************************************************************
 * Open: open and setup a HAL AudioUnit to do analog (multichannel) audio output
 *****************************************************************************/
int CoreAudioAUHAL::OpenPCM(struct CoreAudioDeviceParameters *deviceParameters, const CStdString& strName, int channels, float sampleRate, int bitsPerSample, int packetSize)
{
    OSStatus                    err = noErr;
    UInt32                      i_param_size = 0;
    ComponentDescription        desc;
    AudioStreamBasicDescription DeviceFormat;
    AURenderCallbackStruct      input;

    // We're non-digital.
    s_lastPlayWasSpdif = false;
    
    /* Lets go find our Component */
    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_HALOutput;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;

    deviceParameters->au_component = FindNextComponent( NULL, &desc );
    if(deviceParameters->au_component == NULL)
    {
		CLog::Log(LOGERROR, "we cannot find our HAL component");
        return false;
    }

    err = OpenAComponent( deviceParameters->au_component, &deviceParameters->au_unit );
    if( err != noErr )
    {
		CLog::Log(LOGERROR, "we cannot open our HAL component");
        return false;
    }

    AudioDeviceID selectedDeviceID = deviceArray->getSelectedDevice()->getDeviceID();
    
    /* Set the device we will use for this output unit */
    err = AudioUnitSetProperty(deviceParameters->au_unit,
							   kAudioOutputUnitProperty_CurrentDevice,
							   kAudioUnitScope_Input,
							   0,
							   &selectedDeviceID,
							   sizeof(AudioDeviceID));

    if( err != noErr )
    {
		CLog::Log(LOGERROR, "we cannot select the audio device");
        return false;
    }

    /* Get the current format */
    i_param_size = sizeof(AudioStreamBasicDescription);

    err = AudioUnitGetProperty(deviceParameters->au_unit,
							   kAudioUnitProperty_StreamFormat,
							   kAudioUnitScope_Input,
							   0,
							   &deviceParameters->sfmt_revert,
							   &i_param_size );

    if( err != noErr ) return false;
    else CLog::Log(LOGINFO, STREAM_FORMAT_MSG("current format is: ", deviceParameters->sfmt_revert) );

    /* Set up the format to be used */
    DeviceFormat.mSampleRate = sampleRate;
    DeviceFormat.mFormatID = kAudioFormatLinearPCM;
    DeviceFormat.mFormatFlags = (bitsPerSample == 32 ? kLinearPCMFormatFlagIsFloat : kLinearPCMFormatFlagIsSignedInteger);
    DeviceFormat.mBitsPerChannel = bitsPerSample;
    DeviceFormat.mChannelsPerFrame = channels;

    /* Calculate framesizes and stuff */
    DeviceFormat.mFramesPerPacket = 1;
    DeviceFormat.mBytesPerFrame = DeviceFormat.mBitsPerChannel/8 * DeviceFormat.mChannelsPerFrame;
    DeviceFormat.mBytesPerPacket = DeviceFormat.mBytesPerFrame * DeviceFormat.mFramesPerPacket;

    /* Set the desired format */
    i_param_size = sizeof(AudioStreamBasicDescription);
    verify_noerr( AudioUnitSetProperty(deviceParameters->au_unit,
									   kAudioUnitProperty_StreamFormat,
									   kAudioUnitScope_Input,
									   0,
									   &DeviceFormat,
									   i_param_size ));

	CLog::Log(LOGINFO, STREAM_FORMAT_MSG( "we set the AU format: " , DeviceFormat ) );

    /* Retrieve actual format */
    verify_noerr( AudioUnitGetProperty(deviceParameters->au_unit,
									   kAudioUnitProperty_StreamFormat,
									   kAudioUnitScope_Input,
									   0,
									   &deviceParameters->stream_format,
									   &i_param_size ));

	CLog::Log(LOGINFO, STREAM_FORMAT_MSG( "the actual set AU format is " , DeviceFormat ) );

    /* set the IOproc callback */
	input.inputProc = (AURenderCallback) RenderCallbackAnalog;
	input.inputProcRefCon = deviceParameters;

    verify_noerr( AudioUnitSetProperty(deviceParameters->au_unit,
									   kAudioUnitProperty_SetRenderCallback,
									   kAudioUnitScope_Global,
									   0, &input, sizeof(input)));

    /* AU initialize */
    verify_noerr( AudioUnitInitialize(deviceParameters->au_unit));

	// Get AU hardware buffer size

	uint32_t audioDeviceLatency, audioDeviceBufferFrameSize, audioDeviceSafetyOffset;
	deviceParameters->hardwareFrameLatency = 0;
	
	i_param_size = sizeof(uint32_t);

	verify_noerr( AudioUnitGetProperty(deviceParameters->au_unit,
									   kAudioDevicePropertyLatency,
									   kAudioUnitScope_Global,
									   0,
									   &audioDeviceLatency,
									   &i_param_size ));

	deviceParameters->hardwareFrameLatency += audioDeviceLatency;

	verify_noerr( AudioUnitGetProperty(deviceParameters->au_unit,
									   kAudioDevicePropertyBufferFrameSize,
									   kAudioUnitScope_Global,
									   0,
									   &audioDeviceBufferFrameSize,
									   &i_param_size ));

	deviceParameters->hardwareFrameLatency += audioDeviceBufferFrameSize;

	verify_noerr( AudioUnitGetProperty(deviceParameters->au_unit,
									   kAudioDevicePropertySafetyOffset,
									   kAudioUnitScope_Global,
									   0,
									   &audioDeviceSafetyOffset,
									   &i_param_size ));
	
	deviceParameters->hardwareFrameLatency += audioDeviceSafetyOffset;

	CLog::Log(LOGINFO, "Hardware latency: %i frames (%.2f msec @ %.0fHz)", deviceParameters->hardwareFrameLatency,
			  (float)deviceParameters->hardwareFrameLatency / deviceParameters->stream_format.mSampleRate * 1000,
			  deviceParameters->stream_format.mSampleRate);

	// initialise the CoreAudio sink buffer
	uint32_t framecount = 1;
	while(framecount <= deviceParameters->stream_format.mSampleRate) // ensure power of 2
	{
		framecount <<= 1;
	}
	deviceParameters->outputBuffer = (PaUtilRingBuffer *)malloc(sizeof(PaUtilRingBuffer));
	deviceParameters->outputBufferData = malloc(framecount * deviceParameters->stream_format.mBytesPerFrame);

	PaUtil_InitializeRingBuffer(deviceParameters->outputBuffer,
								deviceParameters->stream_format.mBytesPerFrame,
								framecount, deviceParameters->outputBufferData);


    /* Start the AU */
    verify_noerr( AudioOutputUnitStart(deviceParameters->au_unit) );

    return true;
}
示例#13
0
文件: main.cpp 项目: rgerganov/usdr
int main(int argc, char *argv[])
{
    if (SDL_Init(SDL_INIT_VIDEO) != 0) {
        fprintf(stderr, "SDL_Init Error: %s\n", SDL_GetError());
        return 1;
    }
    SDL_Window *window = SDL_CreateWindow("uSDR", SDL_WINDOWPOS_CENTERED,
        SDL_WINDOWPOS_CENTERED, 640, 480, SDL_WINDOW_SHOWN);
    if (window == NULL) {
        fprintf(stderr, "CreateWindow Error: %s\n", SDL_GetError());
        return 1;
    }
    SDL_Renderer *renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
    if (renderer == NULL) {
        fprintf(stderr, "CreateRenderer Error: %s\n", SDL_GetError());
        return 1;
    }
    PaError err = Pa_Initialize();
    if (err != paNoError) {
        fprintf(stderr, "PaInit error: %s\n", Pa_GetErrorText(err));
        return 1;
    }
    //init_hackrf();

    float *rb_low_data = new float[LOW_BUF_SZIE]();
    PaUtilRingBuffer rbuf_low;
    if (PaUtil_InitializeRingBuffer(&rbuf_low, sizeof(float), LOW_BUF_SZIE, rb_low_data) < 0) {
        fprintf(stderr, "Cannot init rbuf_low\n");
        return 1;
    }
    float *rb_high_data = new float[HIGH_BUF_SZIE]();
    PaUtilRingBuffer rbuf_high;
    if (PaUtil_InitializeRingBuffer(&rbuf_high, sizeof(float), HIGH_BUF_SZIE, rb_high_data) < 0) {
        fprintf(stderr, "Cannot init rbuf_high\n");
        return 1;
    }

    SDL_Event e;
    bool quit = false;
    bool recording = false;
    PaStream *stream;
    uint32_t frames_count = 0;
    uint32_t start = SDL_GetTicks();

    while (!quit) {
        while (SDL_PollEvent(&e)) {
            if (e.type == SDL_QUIT) {
                quit = true;
            } else if (e.type == SDL_KEYDOWN && e.key.keysym.sym == SDLK_r) {
                if (!recording) {
                    printf("start recording\n");
                    //hackrf_stop_rx(device);
                    //recording = start_capture(&stream, &rbuf);
                    recording = start_capture_fake(&rbuf_low);
                    usleep(300 * 1000);
                    recording = recording && start_dsp_tx(&rbuf_low, &rbuf_high);
                    recording = recording && start_tx_fake(&rbuf_high);
                    fflush(stdout);
                }
            } else if (e.type == SDL_KEYUP && e.key.keysym.sym == SDLK_r) {
                if (recording) {
                    printf("stop recording\n");
                    //stop_capture(stream);
                    stop_capture_fake();
                    stop_dsp_tx();
                    stop_tx_fake();
                    //hackrf_start_rx(device, rx_callback, NULL);
                    recording = false;
                    fflush(stdout);
                }
            }
        }
        SDL_RenderClear(renderer);
        SDL_RenderPresent(renderer);
        float fps = frames_count / ((SDL_GetTicks() - start) / 1000.f);
        printf("fps = %.4f     \r", fps); fflush(stdout);
        ++frames_count;
    }
    delete[] rb_low_data;
    delete[] rb_high_data;
    //close_hackrf();
    Pa_Terminate();
    SDL_DestroyRenderer(renderer);
    SDL_DestroyWindow(window);
    SDL_Quit();
    return 0;
}
示例#14
0
bool audio_init(audio_t *a, size_t sample_rate, size_t n_channels, size_t samples_per_chunk)
{
    //***Initialize PA internal data structures******
    if(Pa_Initialize() != paNoError)
        return false;

    size_t frames_per_buffer = samples_per_chunk / n_channels;

    //******Initialize input device*******
    PaStreamParameters inparams;
    PaDeviceIndex dev;
    PaTime lat;

    dev = Pa_GetDefaultInputDevice();
    if(dev == paNoDevice)
        return false;
    lat = Pa_GetDeviceInfo(dev)->defaultLowInputLatency;

    inparams = (PaStreamParameters) {
        .device = dev,
         .channelCount = n_channels,
          .sampleFormat = paInt16,
           .suggestedLatency = lat,
            .hostApiSpecificStreamInfo = NULL
    };

    //******Initialize output device*******
    PaStreamParameters outparams;
    //**************
    dev = Pa_GetDefaultOutputDevice();
    if(dev == paNoDevice)
        return false;
    lat = Pa_GetDeviceInfo(dev)->defaultLowInputLatency;

    outparams = (PaStreamParameters) {
        .device = dev,
         .channelCount = n_channels,
          .sampleFormat = paInt16,
           .suggestedLatency = lat,
            .hostApiSpecificStreamInfo = NULL
    };

    //********Open play stream*******
    PaStream *pstream=NULL;
    if(Pa_OpenStream(
                &pstream,
                NULL,
                &outparams,
                sample_rate,
                frames_per_buffer,
                paClipOff,
                playCallback,
                a) != paNoError) return false; //**************

    //********Open record and listen stream*******
    PaStream *rstream=NULL;
    if(Pa_OpenStream(
                &rstream,
                &inparams,
                NULL,
                sample_rate,
                frames_per_buffer,
                paClipOff,
                recordCallback,
                a) != paNoError) return false;

    //*****Initialize communication ring buffers********************
    PaUtilRingBuffer rb;
    void *rb_data;
    size_t rb_size;

    rb_size = 1 << (sizeof(samples_per_chunk) * CHAR_BIT - __builtin_clz(samples_per_chunk * RB_MULTIPLIER));
    rb_data = malloc(sizeof(audio_sample_t) * rb_size);
    PaUtil_InitializeRingBuffer(&rb, sizeof(audio_sample_t), rb_size, rb_data);

    *a = (audio_t) {
        .sample_rate = sample_rate,
         .n_channels = n_channels,
          .samples_per_chunk = samples_per_chunk,

           .pstream = pstream,
            .rstream = rstream,

             .wakeup = false,
              .wakeup_sig   = PTHREAD_COND_INITIALIZER,
               .wakeup_mutex  = PTHREAD_MUTEX_INITIALIZER,

                .flags = DEFAULT,

                 .prbuf = NULL,
                  .prbuf_size = 0,
                   .prbuf_offset = 0,

                    .rb = rb,
                     .rb_data = rb_data
    };

    return true;
}

void audio_destroy(audio_t *a)
{
    Pa_CloseStream(a->pstream);
    Pa_CloseStream(a->rstream);
    Pa_Terminate();

    pthread_cond_destroy(&a->wakeup_sig);
    pthread_mutex_destroy(&a->wakeup_mutex);

    audio_clear(a);
    free(a->rb_data);
}
/* This should be called with the relevant info when initializing a stream for
   callback. */
PaError initializeBlioRingBuffers(
                                       PaMacBlio *blio,
                                       PaSampleFormat inputSampleFormat,
                                       PaSampleFormat outputSampleFormat,
                                       size_t framesPerBuffer,
                                       long ringBufferSize,
                                       int inChan,
                                       int outChan )
{
   void *data;
   int result;
   OSStatus err;

   /* zeroify things */
   bzero( blio, sizeof( PaMacBlio ) );
   /* this is redundant, but the buffers are used to check
      if the bufffers have been initialized, so we do it explicitly. */
   blio->inputRingBuffer.buffer = NULL;
   blio->outputRingBuffer.buffer = NULL;

   /* initialize simple data */
   blio->ringBufferFrames = ringBufferSize;
   blio->inputSampleFormat = inputSampleFormat;
   blio->inputSampleSizeActual = computeSampleSizeFromFormat(inputSampleFormat);
   blio->inputSampleSizePow2 = computeSampleSizeFromFormatPow2(inputSampleFormat);
   blio->outputSampleFormat = outputSampleFormat;
   blio->outputSampleSizeActual = computeSampleSizeFromFormat(outputSampleFormat);
   blio->outputSampleSizePow2 = computeSampleSizeFromFormatPow2(outputSampleFormat);

   blio->framesPerBuffer = framesPerBuffer;
   blio->inChan = inChan;
   blio->outChan = outChan;
   blio->statusFlags = 0;
   blio->errors = paNoError;
#ifdef PA_MAC_BLIO_MUTEX
   blio->isInputEmpty = false;
   blio->isOutputFull = false;
#endif

   /* setup ring buffers */
#ifdef PA_MAC_BLIO_MUTEX
   result = PaMacCore_SetUnixError( pthread_mutex_init(&(blio->inputMutex),NULL), 0 );
   if( result )
      goto error;
   result = UNIX_ERR( pthread_cond_init( &(blio->inputCond), NULL ) );
   if( result )
      goto error;
   result = UNIX_ERR( pthread_mutex_init(&(blio->outputMutex),NULL) );
   if( result )
      goto error;
   result = UNIX_ERR( pthread_cond_init( &(blio->outputCond), NULL ) );
#endif
   if( inChan ) {
      data = calloc( ringBufferSize, blio->inputSampleSizePow2*inChan );
      if( !data )
      {
         result = paInsufficientMemory;
         goto error;
      }

      err = PaUtil_InitializeRingBuffer(
            &blio->inputRingBuffer,
            1, ringBufferSize*blio->inputSampleSizePow2*inChan,
            data );
      assert( !err );
   }
   if( outChan ) {
      data = calloc( ringBufferSize, blio->outputSampleSizePow2*outChan );
      if( !data )
      {
         result = paInsufficientMemory;
         goto error;
      }

      err = PaUtil_InitializeRingBuffer(
            &blio->outputRingBuffer,
            1, ringBufferSize*blio->outputSampleSizePow2*outChan,
            data );
      assert( !err );
   }

   result = resetBlioRingBuffers( blio );
   if( result )
      goto error;

   return 0;

 error:
   destroyBlioRingBuffers( blio );
   return result;
}
示例#16
0
/*****************************************************************************
 * Setup a encoded digital stream (SPDIF)
 *****************************************************************************/
int CoreAudioAUHAL::OpenSPDIF(struct CoreAudioDeviceParameters *deviceParameters, const CStdString& strName, int channels, float sampleRate, int bitsPerSample, int packetSize)

{
	OSStatus                err = noErr;
    UInt32                  i_param_size = 0, b_mix = 0;
    Boolean                 b_writeable = false;
    AudioStreamID           *p_streams = NULL;
    int                     i = 0, i_streams = 0;

    // We're digital.
    s_lastPlayWasSpdif = true;
    
    /* Start doing the SPDIF setup proces */
    //deviceParameters->b_digital = true;
	deviceParameters->b_changed_mixing = false;

    /* Hog the device */
    i_param_size = sizeof(deviceParameters->i_hog_pid);
    deviceParameters->i_hog_pid = getpid();

    err = AudioDeviceSetProperty(deviceParameters->device_id, 0, 0, FALSE,
								 kAudioDevicePropertyHogMode, i_param_size, &deviceParameters->i_hog_pid);

    if( err != noErr )
    {
		CLog::Log(LOGERROR, "Failed to set hogmode: [%4.4s]", (char *)&err );
        return false;
    }

    /* Set mixable to false if we are allowed to */
    err = AudioDeviceGetPropertyInfo(deviceParameters->device_id, 0, FALSE, kAudioDevicePropertySupportsMixing,
									 &i_param_size, &b_writeable );

    err = AudioDeviceGetProperty(deviceParameters->device_id, 0, FALSE, kAudioDevicePropertySupportsMixing,
								 &i_param_size, &b_mix );

    if( !err && b_writeable )
    {
        b_mix = 0;
        err = AudioDeviceSetProperty( deviceParameters->device_id, 0, 0, FALSE,
									 kAudioDevicePropertySupportsMixing, i_param_size, &b_mix );
        deviceParameters->b_changed_mixing = true;
    }

    if( err != noErr )
    {
		CLog::Log(LOGERROR, "Failed to set mixmode: [%4.4s]", (char *)&err );
        return false;
    }

    /* Get a list of all the streams on this device */
    err = AudioDeviceGetPropertyInfo(deviceParameters->device_id, 0, FALSE,
									 kAudioDevicePropertyStreams,
									 &i_param_size, NULL );
    if( err != noErr )
    {
		CLog::Log(LOGERROR, "Could not get number of streams: [%4.4s]", (char *)&err );
        return false;
    }

    i_streams = i_param_size / sizeof( AudioStreamID );
    p_streams = (AudioStreamID *)malloc( i_param_size );
    if( p_streams == NULL )
        return false;

    err = AudioDeviceGetProperty(deviceParameters->device_id, 0, FALSE,
								 kAudioDevicePropertyStreams,
								 &i_param_size, p_streams );

    if( err != noErr )
    {
		CLog::Log(LOGERROR, "Could not get number of streams: [%4.4s]", (char *)&err );
        free( p_streams );
        return false;
    }

    for( i = 0; i < i_streams && deviceParameters->i_stream_index < 0 ; i++ )
    {
        /* Find a stream with a cac3 stream */
        AudioStreamBasicDescription *p_format_list = NULL;
        int                         i_formats = 0, j = 0;
        bool                  b_digital = false;

        /* Retrieve all the stream formats supported by each output stream */
        err = AudioStreamGetPropertyInfo( p_streams[i], 0,
										 kAudioStreamPropertyPhysicalFormats,
										 &i_param_size, NULL );
        if( err != noErr )
        {
			CLog::Log(LOGERROR, "Could not get number of streamformats: [%4.4s]", (char *)&err );
            continue;
        }

        i_formats = i_param_size / sizeof( AudioStreamBasicDescription );
        p_format_list = (AudioStreamBasicDescription *)malloc( i_param_size );
        if( p_format_list == NULL )
            continue;

        err = AudioStreamGetProperty( p_streams[i], 0,
									 kAudioStreamPropertyPhysicalFormats,
									 &i_param_size, p_format_list );
        if( err != noErr )
        {
			CLog::Log(LOGERROR, "Could not get the list of streamformats: [%4.4s]", (char *)&err );
            free( p_format_list );
            continue;
        }

        /* Check if one of the supported formats is a digital format */
        for( j = 0; j < i_formats; j++ )
        {
            if( p_format_list[j].mFormatID == 'IAC3' ||
			   p_format_list[j].mFormatID == kAudioFormat60958AC3 )
            {
                b_digital = true;
                break;
            }
        }

        if( b_digital )
        {
            /* if this stream supports a digital (cac3) format, then go set it. */
            int i_requested_rate_format = -1;
            int i_current_rate_format = -1;
            int i_backup_rate_format = -1;

            deviceParameters->i_stream_id = p_streams[i];
            deviceParameters->i_stream_index = i;

            if(deviceParameters->b_revert == false )
            {
                /* Retrieve the original format of this stream first if not done so already */
                i_param_size = sizeof(deviceParameters->sfmt_revert);
                err = AudioStreamGetProperty(deviceParameters->i_stream_id, 0,
											 kAudioStreamPropertyPhysicalFormat,
											 &i_param_size,
											 &deviceParameters->sfmt_revert );
                if( err != noErr )
                {
					CLog::Log(LOGERROR, "Could not retrieve the original streamformat: [%4.4s]", (char *)&err );
                    //continue;
                }
                else deviceParameters->b_revert = true;
            }

            for( j = 0; j < i_formats; j++ )
            {
                if( p_format_list[j].mFormatID == 'IAC3' ||
				   p_format_list[j].mFormatID == kAudioFormat60958AC3 )
                {
                    if( p_format_list[j].mSampleRate == sampleRate)
                    {
                        i_requested_rate_format = j;
                        break;
                    }
                    else if( p_format_list[j].mSampleRate == deviceParameters->sfmt_revert.mSampleRate )
                    {
                        i_current_rate_format = j;
                    }
                    else
                    {
                        if( i_backup_rate_format < 0 || p_format_list[j].mSampleRate > p_format_list[i_backup_rate_format].mSampleRate )
                            i_backup_rate_format = j;
                    }
                }

            }

            if( i_requested_rate_format >= 0 ) /* We prefer to output at the samplerate of the original audio */
                deviceParameters->stream_format = p_format_list[i_requested_rate_format];
            else if( i_current_rate_format >= 0 ) /* If not possible, we will try to use the current samplerate of the device */
                deviceParameters->stream_format = p_format_list[i_current_rate_format];
            else deviceParameters->stream_format = p_format_list[i_backup_rate_format]; /* And if we have to, any digital format will be just fine (highest rate possible) */
        }
        free( p_format_list );
    }
    free( p_streams );
	
	CLog::Log(LOGINFO, STREAM_FORMAT_MSG("original stream format: ", deviceParameters->sfmt_revert ) );

    if( !AudioStreamChangeFormat(deviceParameters, deviceParameters->i_stream_id, deviceParameters->stream_format))
        return false;

	// Get device hardware buffer size

	uint32_t audioDeviceLatency, audioStreamLatency, audioDeviceBufferFrameSize, audioDeviceSafetyOffset;
	deviceParameters->hardwareFrameLatency = 0;
	i_param_size = sizeof(uint32_t);

	err = AudioDeviceGetProperty(deviceParameters->device_id,
						   0, false,
						   kAudioDevicePropertyLatency,
						   &i_param_size,
						   &audioDeviceLatency);

	if (err == noErr) deviceParameters->hardwareFrameLatency += audioDeviceLatency;

	err = AudioDeviceGetProperty(deviceParameters->device_id,
						   0, false,
						   kAudioDevicePropertyBufferFrameSize,
						   &i_param_size,
						   &audioDeviceBufferFrameSize);

	if (err == noErr) deviceParameters->hardwareFrameLatency += audioDeviceBufferFrameSize;

	err = AudioDeviceGetProperty(deviceParameters->device_id,
						   0, false,
						   kAudioDevicePropertySafetyOffset,
						   &i_param_size,
						   &audioDeviceSafetyOffset);

	if (err == noErr) deviceParameters->hardwareFrameLatency += audioDeviceSafetyOffset;

	err = AudioStreamGetProperty(deviceParameters->i_stream_id,
						   0,
						   kAudioStreamPropertyLatency,
						   &i_param_size,
						   &audioStreamLatency);

	if (err == noErr) deviceParameters->hardwareFrameLatency += audioStreamLatency;


	CLog::Log(LOGINFO, "Hardware latency: %i frames (%.2f msec @ %.0fHz)", deviceParameters->hardwareFrameLatency,
			  (float)deviceParameters->hardwareFrameLatency / deviceParameters->stream_format.mSampleRate * 1000,
			  deviceParameters->stream_format.mSampleRate);

  	// initialise the CoreAudio sink buffer
	uint32_t framecount = 1;
	while(framecount <= deviceParameters->stream_format.mSampleRate) // ensure power of 2
	{
		framecount <<= 1;
	}

#warning free
	deviceParameters->outputBuffer = (PaUtilRingBuffer *)malloc(sizeof(PaUtilRingBuffer));
	deviceParameters->outputBufferData = calloc(1, framecount * channels * bitsPerSample/8); // use uncompressed size if encoding ac3

	PaUtil_InitializeRingBuffer(deviceParameters->outputBuffer, channels * bitsPerSample/8, framecount, deviceParameters->outputBufferData);
	/* Add IOProc callback */
	err = AudioDeviceCreateIOProcID(deviceParameters->device_id,
									(AudioDeviceIOProc)RenderCallbackSPDIF,
									deviceParameters,
									&deviceParameters->sInputIOProcID);
    if( err != noErr )
    {
		CLog::Log(LOGERROR, "AudioDeviceAddIOProcID failed: [%4.4s]", (char *)&err );
        return false;
    }

    /* Start device */
    err = AudioDeviceStart(deviceParameters->device_id, (AudioDeviceIOProc)RenderCallbackSPDIF );
    if( err != noErr )
    {
		CLog::Log(LOGERROR, "AudioDeviceStart failed: [%4.4s]", (char *)&err );

        err = AudioDeviceDestroyIOProcID(deviceParameters->device_id,
										 (AudioDeviceIOProc)RenderCallbackSPDIF);
        if( err != noErr )
        {
			CLog::Log(LOGERROR, "AudioDeviceRemoveIOProc failed: [%4.4s]", (char *)&err );
        }
        return false;
    }

    return true;
}
示例#17
0
void
CPaCommon::ReInit()
{
    Close();

    PaStreamParameters pParameters;

    memset(&pParameters, 0, sizeof(pParameters));
    pParameters.channelCount = 2;
    pParameters.hostApiSpecificStreamInfo = NULL;
    pParameters.sampleFormat = paInt16;

    int idev = -1;
    for (int i = 0; i < int(names.size()); i++)
    {
        if (names[i] == dev)
        {
            idev = i;
            break;
        }
    }

    if (idev < 0 || idev >= int(devices.size()))
    {
        if (is_capture)
            pParameters.device = Pa_GetDefaultInputDevice();
        else
            pParameters.device = Pa_GetDefaultOutputDevice();
    }
    else
    {
        cout << "opening " << names[idev] << endl;
        pParameters.device = devices[idev];
    }

    if (pParameters.device == paNoDevice)
        return;

    unsigned long minRingBufferSize;
    int err;

    if (is_capture)
    {
        pParameters.suggestedLatency = Pa_GetDeviceInfo(pParameters.device)->defaultLowInputLatency;
        minRingBufferSize = 2*iBufferSize*sizeof(short);
    }
    else
    {
        pParameters.suggestedLatency = 0.8;
        minRingBufferSize = 4*iBufferSize*sizeof(short);
    }

    /* See the specific host's API docs for info on using this field */
    pParameters.hostApiSpecificStreamInfo = NULL;

    /* flags that can be used to define dither, clip settings and more */
    if (is_capture)
    {
        err = Pa_OpenStream(&stream, &pParameters, NULL, samplerate,
                            framesPerBuffer, paNoFlag, captureCallback,
                            (void *) this);

        if (err != paNoError) {
            //throw string("PortAudio error: ") + Pa_GetErrorText(err);
		}
        err = Pa_StartStream(stream);
        if (err != paNoError) {
            //throw string("PortAudio error: ") + Pa_GetErrorText(err);
		}
    }
    else
    {
        err = Pa_OpenStream(&stream, NULL, &pParameters, samplerate,
                            framesPerBuffer, paNoFlag, playbackCallback,
                            (void *) this);
        if (err != paNoError) {
            //throw string("PortAudio error: ") + Pa_GetErrorText(err);
		}
    }

    unsigned long n = 2;
    while (n < minRingBufferSize)
        n <<= 2;				/* smallest power of 2 >= requested */

    if (ringBufferData)
        delete[] ringBufferData;
    ringBufferData = new char[n];
    PaUtil_InitializeRingBuffer(&ringBuffer, n, ringBufferData);

    device_changed = false;
    xrun = false;
}