コード例 #1
0
    String open (const BigInteger& inputChannels,
                 const BigInteger& outputChannels,
                 double requestedSampleRate,
                 int bufferSize)
    {
        close();

        lastError = String::empty;
        sampleRate = (int) requestedSampleRate;

        int preferredBufferSize = (bufferSize <= 0) ? getDefaultBufferSize() : bufferSize;

        activeOutputChans = outputChannels;
        activeOutputChans.setRange (2, activeOutputChans.getHighestBit(), false);
        numOutputChannels = activeOutputChans.countNumberOfSetBits();

        activeInputChans = inputChannels;
        activeInputChans.setRange (1, activeInputChans.getHighestBit(), false);
        numInputChannels = activeInputChans.countNumberOfSetBits();

        actualBufferSize = preferredBufferSize;

        inputBuffer.setSize  (jmax (1, numInputChannels),  actualBufferSize);
        outputBuffer.setSize (jmax (1, numOutputChannels), actualBufferSize);
        outputBuffer.clear();

        recorder = engine.createRecorder (numInputChannels,  sampleRate);
        player   = engine.createPlayer   (numOutputChannels, sampleRate);

        startThread (8);

        deviceOpen = true;
        return lastError;
    }
コード例 #2
0
ファイル: juce_ios_Audio.cpp プロジェクト: eriser/AwesomeJUCE
    String open (const BigInteger& inputChannels,
                 const BigInteger& outputChannels,
                 double sampleRate,
                 int bufferSize)
    {
        close();

        lastError = String::empty;
        preferredBufferSize = (bufferSize <= 0) ? getDefaultBufferSize() : bufferSize;

        //  xxx set up channel mapping

        activeOutputChans = outputChannels;
        activeOutputChans.setRange (2, activeOutputChans.getHighestBit(), false);
        numOutputChannels = activeOutputChans.countNumberOfSetBits();
        monoOutputChannelNumber = activeOutputChans.findNextSetBit (0);

        activeInputChans = inputChannels;
        activeInputChans.setRange (2, activeInputChans.getHighestBit(), false);
        numInputChannels = activeInputChans.countNumberOfSetBits();
        monoInputChannelNumber = activeInputChans.findNextSetBit (0);

        AudioSessionSetActive (true);

        UInt32 audioCategory = (numInputChannels > 0 && audioInputIsAvailable) ? kAudioSessionCategory_PlayAndRecord
                               : kAudioSessionCategory_MediaPlayback;

        AudioSessionSetProperty (kAudioSessionProperty_AudioCategory, sizeof (audioCategory), &audioCategory);

        if (audioCategory == kAudioSessionCategory_PlayAndRecord)
        {
            // (note: mustn't set this until after the audio category property has been set)
            UInt32 allowBluetoothInput = 1;
            AudioSessionSetProperty (kAudioSessionProperty_OverrideCategoryEnableBluetoothInput,
                                     sizeof (allowBluetoothInput), &allowBluetoothInput);
        }

        AudioSessionAddPropertyListener (kAudioSessionProperty_AudioRouteChange, routingChangedStatic, this);

        fixAudioRouteIfSetToReceiver();
        updateDeviceInfo();

        Float32 bufferDuration = preferredBufferSize / sampleRate;
        AudioSessionSetProperty (kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof (bufferDuration), &bufferDuration);
        actualBufferSize = preferredBufferSize;

        prepareFloatBuffers (actualBufferSize);

        isRunning = true;
        routingChanged (nullptr);  // creates and starts the AU

        lastError = audioUnit != 0 ? "" : "Couldn't open the device";
        return lastError;
    }
コード例 #3
0
ファイル: audiodevice_pa.cpp プロジェクト: crafn/clover
PaAudioDevice::PaAudioDevice():
	defaultDeviceDId(-1){
	chooseWiselyAndCreateNicely();

	paOutputStream= std::move(util::UniquePtr<PaOutputStream>(
		new PaOutputStream(
			getDefaultStreamParameters(),
			getDefaultSampleRate(),
			getDefaultBufferSize()
		)
	));
}
コード例 #4
0
ファイル: juce_ios_Audio.cpp プロジェクト: eriser/audtweak
    String open (const BigInteger& inputChannelsWanted,
                 const BigInteger& outputChannelsWanted,
                 double targetSampleRate, int bufferSize) override
    {
        close();

        lastError.clear();
        preferredBufferSize = (bufferSize <= 0) ? getDefaultBufferSize() : bufferSize;

        //  xxx set up channel mapping

        activeOutputChans = outputChannelsWanted;
        activeOutputChans.setRange (2, activeOutputChans.getHighestBit(), false);
        numOutputChannels = activeOutputChans.countNumberOfSetBits();
        monoOutputChannelNumber = activeOutputChans.findNextSetBit (0);

        activeInputChans = inputChannelsWanted;
        activeInputChans.setRange (2, activeInputChans.getHighestBit(), false);
        numInputChannels = activeInputChans.countNumberOfSetBits();
        monoInputChannelNumber = activeInputChans.findNextSetBit (0);

        AudioSessionSetActive (true);

        if (numInputChannels > 0 && audioInputIsAvailable)
        {
            setSessionUInt32Property (kAudioSessionProperty_AudioCategory, kAudioSessionCategory_PlayAndRecord);
            setSessionUInt32Property (kAudioSessionProperty_OverrideCategoryEnableBluetoothInput, 1);
        }
        else
        {
            setSessionUInt32Property (kAudioSessionProperty_AudioCategory, kAudioSessionCategory_MediaPlayback);
        }

        AudioSessionAddPropertyListener (kAudioSessionProperty_AudioRouteChange, routingChangedStatic, this);

        fixAudioRouteIfSetToReceiver();
        updateDeviceInfo();

        setSessionFloat64Property (kAudioSessionProperty_PreferredHardwareSampleRate, targetSampleRate);
        updateSampleRates();

        setSessionFloat64Property (kAudioSessionProperty_PreferredHardwareIOBufferDuration, preferredBufferSize / sampleRate);
        updateCurrentBufferSize();

        prepareFloatBuffers (actualBufferSize);

        isRunning = true;
        routingChanged (nullptr);  // creates and starts the AU

        lastError = audioUnit != 0 ? "" : "Couldn't open the device";
        return lastError;
    }
コード例 #5
0
    String open (const BigInteger& inputChannels,
                 const BigInteger& outputChannels,
                 double sampleRate,
                 int bufferSizeSamples)
    {
        isOpen_ = true;

        if (bufferSizeSamples <= 0)
            bufferSizeSamples = getDefaultBufferSize();

        lastError = internal->reopen (inputChannels, outputChannels, sampleRate, bufferSizeSamples);
        isOpen_ = lastError.isEmpty();
        return lastError;
    }
コード例 #6
0
ファイル: JackDevice.cpp プロジェクト: kushview/element
 int getBufferSizeSamples (int /*index*/)    { return getDefaultBufferSize(); }
コード例 #7
0
ファイル: juce_android_Audio.cpp プロジェクト: 0x4d52/ugen
    String open (const BigInteger& inputChannels,
                 const BigInteger& outputChannels,
                 double requestedSampleRate,
                 int bufferSize)
    {
        close();

        if (sampleRate != (int) requestedSampleRate)
            return "Sample rate not allowed";

        lastError = String::empty;
        int preferredBufferSize = (bufferSize <= 0) ? getDefaultBufferSize() : bufferSize;

        numDeviceInputChannels = 0;
        numDeviceOutputChannels = 0;

        activeOutputChans = outputChannels;
        activeOutputChans.setRange (2, activeOutputChans.getHighestBit(), false);
        numClientOutputChannels = activeOutputChans.countNumberOfSetBits();

        activeInputChans = inputChannels;
        activeInputChans.setRange (2, activeInputChans.getHighestBit(), false);
        numClientInputChannels = activeInputChans.countNumberOfSetBits();

        actualBufferSize = preferredBufferSize;
        inputChannelBuffer.setSize (2, actualBufferSize);
        inputChannelBuffer.clear();
        outputChannelBuffer.setSize (2, actualBufferSize);
        outputChannelBuffer.clear();

        JNIEnv* env = getEnv();

        if (numClientOutputChannels > 0)
        {
            numDeviceOutputChannels = 2;
            outputDevice = GlobalRef (env->NewObject (AudioTrack, AudioTrack.constructor,
                                                      STREAM_MUSIC, sampleRate, CHANNEL_OUT_STEREO, ENCODING_PCM_16BIT,
                                                      (jint) (minBufferSizeOut * numDeviceOutputChannels * sizeof (int16)), MODE_STREAM));

            if (env->CallIntMethod (outputDevice, AudioTrack.getState) != STATE_UNINITIALIZED)
                isRunning = true;
            else
                outputDevice.clear(); // failed to open the device
        }

        if (numClientInputChannels > 0 && numDeviceInputChannelsAvailable > 0)
        {
            numDeviceInputChannels = jmin (numClientInputChannels, numDeviceInputChannelsAvailable);
            inputDevice = GlobalRef (env->NewObject (AudioRecord, AudioRecord.constructor,
                                                     0 /* (default audio source) */, sampleRate,
                                                     numDeviceInputChannelsAvailable > 1 ? CHANNEL_IN_STEREO : CHANNEL_IN_MONO,
                                                     ENCODING_PCM_16BIT,
                                                     (jint) (minBufferSizeIn * numDeviceInputChannels * sizeof (int16))));

            if (env->CallIntMethod (inputDevice, AudioRecord.getState) != STATE_UNINITIALIZED)
                isRunning = true;
            else
                inputDevice.clear(); // failed to open the device
        }

        if (isRunning)
        {
            if (outputDevice != nullptr)
                env->CallVoidMethod (outputDevice, AudioTrack.play);

            if (inputDevice != nullptr)
                env->CallVoidMethod (inputDevice, AudioRecord.startRecording);

            startThread (8);
        }
        else
        {
            closeDevices();
        }

        return lastError;
    }
コード例 #8
0
 int getBufferSizeSamples (int index)        { return getDefaultBufferSize(); }
コード例 #9
0
    String open (const BigInteger& inputChannels,
                 const BigInteger& outputChannels,
                 double requestedSampleRate,
                 int bufferSize) override
    {
        close();

        lastError.clear();
        sampleRate = (int) requestedSampleRate;

        int preferredBufferSize = (bufferSize <= 0) ? getDefaultBufferSize() : bufferSize;

        activeOutputChans = outputChannels;
        activeOutputChans.setRange (2, activeOutputChans.getHighestBit(), false);
        int numOutputChannels = activeOutputChans.countNumberOfSetBits();

        activeInputChans = inputChannels;
        activeInputChans.setRange (1, activeInputChans.getHighestBit(), false);
        int numInputChannels = activeInputChans.countNumberOfSetBits();

        actualBufferSize = preferredBufferSize;

        const int audioBuffersToEnqueue = hasLowLatencyAudioPath() ? buffersToEnqueueForLowLatency
                                                                   : buffersToEnqueueSlowAudio;

        DBG ("OpenSL: numInputChannels = " << numInputChannels
              << ", numOutputChannels = " << numOutputChannels
              << ", nativeBufferSize = " << getNativeBufferSize()
              << ", nativeSampleRate = " << getNativeSampleRate()
              << ", actualBufferSize = " << actualBufferSize
              << ", audioBuffersToEnqueue = " << audioBuffersToEnqueue
              << ", sampleRate = " << sampleRate
              << ", supportsFloatingPoint = " << (supportsFloatingPoint ? "true" : "false"));

        if (numInputChannels > 0 && (! RuntimePermissions::isGranted (RuntimePermissions::recordAudio)))
        {
            // If you hit this assert, you probably forgot to get RuntimePermissions::recordAudio
            // before trying to open an audio input device. This is not going to work!
            jassertfalse;
            lastError = "Error opening OpenSL input device: the app was not granted android.permission.RECORD_AUDIO";
        }

        session = OpenSLSession::create (slLibrary, numInputChannels, numOutputChannels,
                                         sampleRate, actualBufferSize, audioBuffersToEnqueue,
                                         supportsFloatingPoint);
        if (session != nullptr)
            session->setAudioPreprocessingEnabled (audioProcessingEnabled);
        else
        {
            if (numInputChannels > 0 && numOutputChannels > 0 && RuntimePermissions::isGranted (RuntimePermissions::recordAudio))
            {
                // New versions of the Android emulator do not seem to support audio input anymore on OS X
                activeInputChans = BigInteger(0);
                numInputChannels = 0;

                session = OpenSLSession::create(slLibrary, numInputChannels, numOutputChannels,
                                                sampleRate, actualBufferSize, audioBuffersToEnqueue,
                                                supportsFloatingPoint);
            }
        }

        if (session == nullptr)
            lastError = "Unknown error initializing opensl session";

        deviceOpen = (session != nullptr);
        return lastError;
    }