int AudioPort::compareFormats(audio_format_t format1, audio_format_t format2)
{
    // NOTE: AUDIO_FORMAT_INVALID is also considered not PCM and will be compared equal to any
    // compressed format and better than any PCM format. This is by design of pickFormat()
    if (!audio_is_linear_pcm(format1)) {
        if (!audio_is_linear_pcm(format2)) {
            return 0;
        }
        return 1;
    }
    if (!audio_is_linear_pcm(format2)) {
        return -1;
    }

    int index1 = -1, index2 = -1;
    for (size_t i = 0;
            (i < ARRAY_SIZE(sPcmFormatCompareTable)) && ((index1 == -1) || (index2 == -1));
            i ++) {
        if (sPcmFormatCompareTable[i] == format1) {
            index1 = i;
        }
        if (sPcmFormatCompareTable[i] == format2) {
            index2 = i;
        }
    }
    // format1 not found => index1 < 0 => format2 > format1
    // format2 not found => index2 < 0 => format2 < format1
    return index1 - index2;
}
size_t AudioRecord::frameSize() const
{
    if (inputSource() == AUDIO_SOURCE_VOICE_COMMUNICATION) {
        if (audio_is_linear_pcm(mFormat)) {
            return channelCount()*audio_bytes_per_sample(mFormat);
        } else {
            return channelCount()*sizeof(int16_t);
        }
    } else {
        if (format() ==AUDIO_FORMAT_AMR_NB) {
            return channelCount() * AMR_FRAMESIZE; // Full rate framesize
        } else if (format() == AUDIO_FORMAT_EVRC) {
            return channelCount() * EVRC_FRAMESIZE; // Full rate framesize
        } else if (format() == AUDIO_FORMAT_QCELP) {
           return channelCount() * QCELP_FRAMESIZE; // Full rate framesize
        } else if (format() == AUDIO_FORMAT_AAC) {
            // Not actual framsize but for variable frame rate AAC encoding,
            // buffer size is treated as a frame size
            return AAC_FRAMESIZE;
        } else if(format() == AUDIO_FORMAT_AMR_WB) {
            return channelCount() * AMR_WB_FRAMESIZE;
        }
        if (audio_is_linear_pcm(mFormat)) {
            return channelCount()*audio_bytes_per_sample(mFormat);
        } else {
            return sizeof(uint8_t);
        }
   }
}
status_t AudioStreamOut::open(
        audio_io_handle_t handle,
        audio_devices_t devices,
        struct audio_config *config,
        const char *address)
{
    audio_stream_out_t *outStream;
    int status = hwDev()->open_output_stream(
            hwDev(),
            handle,
            devices,
            flags,
            config,
            &outStream,
            address);
    ALOGV("AudioStreamOut::open(), HAL open_output_stream returned "
            " %p, sampleRate %d, Format %#x, "
            "channelMask %#x, status %d",
            outStream,
            config->sample_rate,
            config->format,
            config->channel_mask,
            status);

    if (status == NO_ERROR) {
        stream = outStream;
        mHalFormatIsLinearPcm = audio_is_linear_pcm(config->format);
        ALOGI("AudioStreamOut::open(), mHalFormatIsLinearPcm = %d", (int)mHalFormatIsLinearPcm);
        mHalFrameSize = audio_stream_out_frame_size(stream);
    }

    return status;
}
// static
status_t AudioRecord::getMinFrameCount(
        int* frameCount,
        uint32_t sampleRate,
        int format,
        int channelCount)
{
    size_t size = 0;
    if (AudioSystem::getInputBufferSize(sampleRate, format, channelCount, &size)
            != NO_ERROR) {
        LOGE("AudioSystem could not query the input buffer size.");
        return NO_INIT;
    }

    if (size == 0) {
        LOGE("Unsupported configuration: sampleRate %d, format %d, channelCount %d",
            sampleRate, format, channelCount);
        return BAD_VALUE;
    }

    // We double the size of input buffer for ping pong use of record buffer.
    size <<= 1;

    if (audio_is_linear_pcm(format)) {
        size /= channelCount * audio_bytes_per_sample(format);
    }

    *frameCount = size;
    return NO_ERROR;
}
// static
status_t AudioRecord::getMinFrameCount(
        size_t* frameCount,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask)
{
    if (frameCount == NULL) {
        return BAD_VALUE;
    }

    size_t size;
    status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &size);
    if (status != NO_ERROR) {
        ALOGE("AudioSystem could not query the input buffer size for sampleRate %u, format %#x, "
              "channelMask %#x; status %d", sampleRate, format, channelMask, status);
        return status;
    }

    // handle non-linear-pcm formats and update frameCount
    if (!audio_is_linear_pcm(format)) {
        *frameCount = (size * 2) / sizeof(uint8_t);
        return NO_ERROR;
    }

    // We double the size of input buffer for ping pong use of record buffer.
    // Assumes audio_is_linear_pcm(format)
    if ((*frameCount = (size * 2) / (audio_channel_count_from_in_mask(channelMask) *
            audio_bytes_per_sample(format))) == 0) {
        ALOGE("Unsupported configuration: sampleRate %u, format %#x, channelMask %#x",
            sampleRate, format, channelMask);
        return BAD_VALUE;
    }

    return NO_ERROR;
}
Exemplo n.º 6
0
int AudioTrack::frameSize() const
{
    if (audio_is_linear_pcm(mFormat)) {
        return channelCount()*audio_bytes_per_sample(mFormat);
    } else {
        return sizeof(uint8_t);
    }
}
// static
status_t AudioRecord::getMinFrameCount(
        size_t* frameCount,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask)
{
    if (frameCount == NULL) {
        return BAD_VALUE;
    }

    // default to 0 in case of error
    *frameCount = 0;

    size_t size = 0;
    status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &size);
    if (status != NO_ERROR) {
        ALOGE("AudioSystem could not query the input buffer size; status %d", status);
        return NO_INIT;
    }

    if (size == 0) {
        ALOGE("Unsupported configuration: sampleRate %u, format %d, channelMask %#x",
            sampleRate, format, channelMask);
        return BAD_VALUE;
    }

    // We double the size of input buffer for ping pong use of record buffer.
    size <<= 1;
    uint32_t channelCount = popcount(channelMask);
#ifdef QCOM_HARDWARE
    if (audio_is_linear_pcm(format))
#endif
        size /= channelCount * audio_bytes_per_sample(format);
#ifdef QCOM_HARDWARE
    else
        size /= sizeof(uint8_t);
#endif

    *frameCount = size;
    return NO_ERROR;
}
Exemplo n.º 8
0
status_t AudioTrack::set(
        int streamType,
        uint32_t sampleRate,
        int format,
        int channelMask,
        int frameCount,
        uint32_t flags,
        callback_t cbf,
        void* user,
        int notificationFrames,
        const sp<IMemory>& sharedBuffer,
        bool threadCanCallJava,
        int sessionId)
{

    LOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size());

    AutoMutex lock(mLock);
    if (mAudioTrack != 0) {
        LOGE("Track already in use");
        return INVALID_OPERATION;
    }

    int afSampleRate;
    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
        return NO_INIT;
    }
    uint32_t afLatency;
    if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
        return NO_INIT;
    }

    // handle default values first.
    if (streamType == AUDIO_STREAM_DEFAULT) {
        streamType = AUDIO_STREAM_MUSIC;
    }
    if (sampleRate == 0) {
        sampleRate = afSampleRate;
    }
    // these below should probably come from the audioFlinger too...
    if (format == 0) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }
    if (channelMask == 0) {
        channelMask = AUDIO_CHANNEL_OUT_STEREO;
    }

    // validate parameters
    if (!audio_is_valid_format(format)) {
        LOGE("Invalid format");
        return BAD_VALUE;
    }

    // force direct flag if format is not linear PCM
    if (!audio_is_linear_pcm(format)) {
        flags |= AUDIO_POLICY_OUTPUT_FLAG_DIRECT;
    }

    if (!audio_is_output_channel(channelMask)) {
        LOGE("Invalid channel mask");
        return BAD_VALUE;
    }
    uint32_t channelCount = popcount(channelMask);

    audio_io_handle_t output = AudioSystem::getOutput(
                                    (audio_stream_type_t)streamType,
                                    sampleRate,format, channelMask,
                                    (audio_policy_output_flags_t)flags);

    if (output == 0) {
        LOGE("Could not get audio output for stream type %d", streamType);
        return BAD_VALUE;
    }

    mVolume[LEFT] = 1.0f;
    mVolume[RIGHT] = 1.0f;
    mSendLevel = 0;
    mFrameCount = frameCount;
    mNotificationFramesReq = notificationFrames;
    mSessionId = sessionId;
    mAuxEffectId = 0;

    // create the IAudioTrack
    status_t status = createTrack_l(streamType,
                                  sampleRate,
                                  (uint32_t)format,
                                  (uint32_t)channelMask,
                                  frameCount,
                                  flags,
                                  sharedBuffer,
                                  output,
                                  true);

    if (status != NO_ERROR) {
        return status;
    }

    if (cbf != 0) {
        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
        if (mAudioTrackThread == 0) {
          LOGE("Could not create callback thread");
          return NO_INIT;
        }
    }

    mStatus = NO_ERROR;

    mStreamType = streamType;
    mFormat = (uint32_t)format;
    mChannelMask = (uint32_t)channelMask;
    mChannelCount = channelCount;
    mSharedBuffer = sharedBuffer;
    mMuted = false;
    mActive = 0;
    mCbf = cbf;
    mUserData = user;
    mLoopCount = 0;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    mFlushed = false;
    mFlags = flags;
    AudioSystem::acquireAudioSessionId(mSessionId);
    mRestoreStatus = NO_ERROR;
    return NO_ERROR;
}
audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
{
    const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices();
    const DeviceVector &availableInputDevices = mApmObserver->getAvailableInputDevices();

    const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();

    uint32_t device = AUDIO_DEVICE_NONE;
    uint32_t availableOutputDevicesType = availableOutputDevices.types();

    switch (strategy) {

    case STRATEGY_TRANSMITTED_THROUGH_SPEAKER:
        device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        if (!device) {
            ALOGE("getDeviceForStrategy() no device found for "\
                    "STRATEGY_TRANSMITTED_THROUGH_SPEAKER");
        }
        break;

    case STRATEGY_SONIFICATION_RESPECTFUL:
        if (isInCall()) {
            device = getDeviceForStrategy(STRATEGY_SONIFICATION);
        } else if (outputs.isStreamActiveRemotely(AUDIO_STREAM_MUSIC,
                SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
            // while media is playing on a remote device, use the the sonification behavior.
            // Note that we test this usecase before testing if media is playing because
            //   the isStreamActive() method only informs about the activity of a stream, not
            //   if it's for local playback. Note also that we use the same delay between both tests
            device = getDeviceForStrategy(STRATEGY_SONIFICATION);
            //user "safe" speaker if available instead of normal speaker to avoid triggering
            //other acoustic safety mechanisms for notification
            if ((device & AUDIO_DEVICE_OUT_SPEAKER) &&
                    (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
                device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
                device &= ~AUDIO_DEVICE_OUT_SPEAKER;
            }
        } else if (outputs.isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
            // while media is playing (or has recently played), use the same device
            device = getDeviceForStrategy(STRATEGY_MEDIA);
        } else {
            // when media is not playing anymore, fall back on the sonification behavior
            device = getDeviceForStrategy(STRATEGY_SONIFICATION);
            //user "safe" speaker if available instead of normal speaker to avoid triggering
            //other acoustic safety mechanisms for notification
            if ((device & AUDIO_DEVICE_OUT_SPEAKER) &&
                    (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
                device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
                device &= ~AUDIO_DEVICE_OUT_SPEAKER;
            }
        }
        break;

    case STRATEGY_DTMF:
        if (!isInCall()) {
            // when off call, DTMF strategy follows the same rules as MEDIA strategy
            device = getDeviceForStrategy(STRATEGY_MEDIA);
            break;
        }
        // when in call, DTMF and PHONE strategies follow the same rules
        // FALL THROUGH

    case STRATEGY_PHONE:
        // Force use of only devices on primary output if:
        // - in call AND
        //   - cannot route from voice call RX OR
        //   - audio HAL version is < 3.0 and TX device is on the primary HW module
        if (getPhoneState() == AUDIO_MODE_IN_CALL) {
            audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
            sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
            audio_devices_t availPrimaryInputDevices =
                 availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle());
            audio_devices_t availPrimaryOutputDevices =
                    primaryOutput->supportedDevices() & availableOutputDevices.types();

            if (((availableInputDevices.types() &
                    AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
                    (((txDevice & availPrimaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
                         (primaryOutput->getAudioPort()->getModuleVersion() <
                             AUDIO_DEVICE_API_VERSION_3_0))) {
                availableOutputDevicesType = availPrimaryOutputDevices;
            }
        }
        // for phone strategy, we first consider the forced use and then the available devices by order
        // of priority
        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
        case AUDIO_POLICY_FORCE_BT_SCO:
            if (!isInCall() || strategy != STRATEGY_DTMF) {
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
                if (device) break;
            }
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
            if (device) break;
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
            if (device) break;
            // if SCO device is requested but no SCO device is available, fall back to default case
            // FALL THROUGH

        default:    // FORCE_NONE
            // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
            if (!isInCall() &&
                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                    (outputs.getA2dpOutput() != 0)) {
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
                if (device) break;
            }
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
            if (device) break;
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
            if (device) break;
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
            if (device) break;
            if (!isInCall()) {
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
                if (device) break;
            }
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_EARPIECE;
            if (device) break;
            device = mApmObserver->getDefaultOutputDevice()->type();
            if (device == AUDIO_DEVICE_NONE) {
                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE");
            }
            break;

        case AUDIO_POLICY_FORCE_SPEAKER:
            // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to
            // A2DP speaker when forcing to speaker output
            if (!isInCall() &&
                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                    (outputs.getA2dpOutput() != 0)) {
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
                if (device) break;
            }
            if (!isInCall()) {
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
                if (device) break;
                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
                if (device) break;
            }
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
            if (device) break;
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
            if (device) break;
            device = mApmObserver->getDefaultOutputDevice()->type();
            if (device == AUDIO_DEVICE_NONE) {
                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE, FORCE_SPEAKER");
            }
            break;
        }
    break;

    case STRATEGY_SONIFICATION:

        // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
        // handleIncallSonification().
        if (isInCall()) {
            device = getDeviceForStrategy(STRATEGY_PHONE);
            break;
        }
        // FALL THROUGH

    case STRATEGY_ENFORCED_AUDIBLE:
        // strategy STRATEGY_ENFORCED_AUDIBLE uses same routing policy as STRATEGY_SONIFICATION
        // except:
        //   - when in call where it doesn't default to STRATEGY_PHONE behavior
        //   - in countries where not enforced in which case it follows STRATEGY_MEDIA

        if ((strategy == STRATEGY_SONIFICATION) ||
                (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
            if (device == AUDIO_DEVICE_NONE) {
                ALOGE("getDeviceForStrategy() speaker device not found for STRATEGY_SONIFICATION");
            }
        }
        // The second device used for sonification is the same as the device used by media strategy
        // FALL THROUGH

    // FIXME: STRATEGY_ACCESSIBILITY and STRATEGY_REROUTING follow STRATEGY_MEDIA for now
    case STRATEGY_ACCESSIBILITY:
        if (strategy == STRATEGY_ACCESSIBILITY) {
            // do not route accessibility prompts to a digital output currently configured with a
            // compressed format as they would likely not be mixed and dropped.
            for (size_t i = 0; i < outputs.size(); i++) {
                sp<AudioOutputDescriptor> desc = outputs.valueAt(i);
                audio_devices_t devices = desc->device() &
                    (AUDIO_DEVICE_OUT_HDMI | AUDIO_DEVICE_OUT_SPDIF | AUDIO_DEVICE_OUT_HDMI_ARC);
                if (desc->isActive() && !audio_is_linear_pcm(desc->mFormat) &&
                        devices != AUDIO_DEVICE_NONE) {
                    availableOutputDevicesType = availableOutputDevices.types() & ~devices;
                }
            }
        }
        // FALL THROUGH

    case STRATEGY_REROUTING:
    case STRATEGY_MEDIA: {
        uint32_t device2 = AUDIO_DEVICE_NONE;
        if (strategy != STRATEGY_SONIFICATION) {
            // no sonification on remote submix (e.g. WFD)
            if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, String8("0")) != 0) {
                device2 = availableOutputDevices.types() & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
            }
        }
        if (isInCall() && (strategy == STRATEGY_MEDIA)) {
            device = getDeviceForStrategy(STRATEGY_PHONE);
            break;
        }
        if ((device2 == AUDIO_DEVICE_NONE) &&
                (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                (outputs.getA2dpOutput() != 0)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
            if (device2 == AUDIO_DEVICE_NONE) {
                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
            }
            if (device2 == AUDIO_DEVICE_NONE) {
                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
            }
        }
        if ((device2 == AUDIO_DEVICE_NONE) &&
            (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] == AUDIO_POLICY_FORCE_SPEAKER)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
        }
        if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
            // no sonification on aux digital (e.g. HDMI)
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
        }
        if ((device2 == AUDIO_DEVICE_NONE) &&
                (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
        }
        if (device2 == AUDIO_DEVICE_NONE) {
            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
        }
        int device3 = AUDIO_DEVICE_NONE;
        if (strategy == STRATEGY_MEDIA) {
            // ARC, SPDIF and AUX_LINE can co-exist with others.
            device3 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HDMI_ARC;
            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPDIF);
            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_LINE);
        }

        device2 |= device3;
        // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or
        // STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise
        device |= device2;

        // If hdmi system audio mode is on, remove speaker out of output list.
        if ((strategy == STRATEGY_MEDIA) &&
            (mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] ==
                AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
            device &= ~AUDIO_DEVICE_OUT_SPEAKER;
        }

        if (device) break;
        device = mApmObserver->getDefaultOutputDevice()->type();
        if (device == AUDIO_DEVICE_NONE) {
            ALOGE("getDeviceForStrategy() no device found for STRATEGY_MEDIA");
        }
        } break;

    default:
        ALOGW("getDeviceForStrategy() unknown strategy: %d", strategy);
        break;
    }

    ALOGVV("getDeviceForStrategy() strategy %d, device %x", strategy, device);
    return device;
}
Exemplo n.º 10
0
status_t AudioRecord::set(
        audio_source_t inputSource,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t frameCount,
        callback_t cbf,
        void* user,
        uint32_t notificationFrames,
        bool threadCanCallJava,
        int sessionId,
        transfer_type transferType,
        audio_input_flags_t flags)
{
    ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
          "notificationFrames %u, sessionId %d, transferType %d, flags %#x",
          inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
          sessionId, transferType, flags);

    switch (transferType) {
    case TRANSFER_DEFAULT:
        if (cbf == NULL || threadCanCallJava) {
            transferType = TRANSFER_SYNC;
        } else {
            transferType = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
        if (cbf == NULL) {
            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
            return BAD_VALUE;
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC:
        break;
    default:
        ALOGE("Invalid transfer type %d", transferType);
        return BAD_VALUE;
    }
    mTransfer = transferType;

    AutoMutex lock(mLock);

    // invariant that mAudioRecord != 0 is true only after set() returns successfully
    if (mAudioRecord != 0) {
        ALOGE("Track already in use");
        return INVALID_OPERATION;
    }

    // handle default values first.
    if (inputSource == AUDIO_SOURCE_DEFAULT) {
        inputSource = AUDIO_SOURCE_MIC;
    }
    mInputSource = inputSource;

    if (sampleRate == 0) {
        ALOGE("Invalid sample rate %u", sampleRate);
        return BAD_VALUE;
    }
    mSampleRate = sampleRate;

    // these below should probably come from the audioFlinger too...
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }

    // validate parameters
    if (!audio_is_valid_format(format)) {
        ALOGE("Invalid format %#x", format);
        return BAD_VALUE;
    }
    // Temporary restriction: AudioFlinger currently supports 16-bit PCM only
    if (format != AUDIO_FORMAT_PCM_16_BIT) {
        ALOGE("Format %#x is not supported", format);
        return BAD_VALUE;
    }
    mFormat = format;

    if (!audio_is_input_channel(channelMask)) {
        ALOGE("Invalid channel mask %#x", channelMask);
        return BAD_VALUE;
    }
    mChannelMask = channelMask;
    uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
    mChannelCount = channelCount;

    if (audio_is_linear_pcm(format)) {
        mFrameSize = channelCount * audio_bytes_per_sample(format);
    } else {
        mFrameSize = sizeof(uint8_t);
    }

    // mFrameCount is initialized in openRecord_l
    mReqFrameCount = frameCount;

    mNotificationFramesReq = notificationFrames;
    // mNotificationFramesAct is initialized in openRecord_l

    if (sessionId == AUDIO_SESSION_ALLOCATE) {
        mSessionId = AudioSystem::newAudioUniqueId();
    } else {
        mSessionId = sessionId;
    }
    ALOGV("set(): mSessionId %d", mSessionId);

    mFlags = flags;
    mCbf = cbf;

    if (cbf != NULL) {
        mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
        mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
    }

    // create the IAudioRecord
    status_t status = openRecord_l(0 /*epoch*/);

    if (status != NO_ERROR) {
        if (mAudioRecordThread != 0) {
            mAudioRecordThread->requestExit();   // see comment in AudioRecord.h
            mAudioRecordThread->requestExitAndWait();
            mAudioRecordThread.clear();
        }
        return status;
    }

    mStatus = NO_ERROR;
    mActive = false;
    mUserData = user;
    // TODO: add audio hardware input latency here
    mLatency = (1000*mFrameCount) / sampleRate;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    AudioSystem::acquireAudioSessionId(mSessionId, -1);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInOverrun = false;

    return NO_ERROR;
}
Exemplo n.º 11
0
status_t AudioTrack::set(
        int streamType,
        uint32_t sampleRate,
        int format,
        int channels,
        uint32_t flags,
        int sessionId,
        int lpaSessionId)
{

    // handle default values first.
    if (streamType == AUDIO_STREAM_DEFAULT) {
        streamType = AUDIO_STREAM_MUSIC;
    }
    // these below should probably come from the audioFlinger too...
    if (format == 0) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }
    // validate parameters
    if (!audio_is_valid_format(format)) {
        LOGE("Invalid format");
        return BAD_VALUE;
    }
    // force direct flag if format is not linear PCM
    if (!audio_is_linear_pcm(format)) {
        flags |= AUDIO_POLICY_OUTPUT_FLAG_DIRECT;
    }

    audio_io_handle_t output = AudioSystem::getSession((audio_stream_type_t)streamType,
            format, (audio_policy_output_flags_t)flags, lpaSessionId);

    if (output == 0) {
        LOGE("Could not get audio output for stream type %d", streamType);
        return BAD_VALUE;
    }
    mVolume[LEFT] = 1.0f;
    mVolume[RIGHT] = 1.0f;
    mStatus = NO_ERROR;
    mStreamType = streamType;
    mFormat = format;
    mChannelCount = 2;
    mSharedBuffer = NULL;
    mMuted = false;
    mActive = 0;
    mCbf = NULL;
    mNotificationFramesReq = 0;
    mRemainingFrames = 0;
    mUserData = NULL;
    mLatency = 0;
    mLoopCount = 0;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    mFlags = flags;
    mAudioTrack = NULL;
    mAudioSession = output;

    mSessionId = sessionId;
    mAuxEffectId = 0;

    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
    if (audioFlinger == 0) {
       LOGE("Could not get audioflinger");
       return NO_INIT;
    }
    status_t status;
    audioFlinger->createSession(getpid(),
                                sampleRate,
                                channels,
                                &mSessionId,
                                &status);
    if(status != NO_ERROR) {
        LOGE("createSession returned with status %d", status);
    }
    /* Make the track active and start output */
    android_atomic_or(1, &mActive);
    AudioSystem::startOutput(output, (audio_stream_type_t)mStreamType);
    LOGV("AudioTrack::set() - Started output(%d)",output);
    return NO_ERROR;
}
Exemplo n.º 12
0
static int derive_playback_app_type_cfg(struct audio_device *adev,
                                        struct audio_usecase *usecase,
                                        int *app_type,
                                        int *sample_rate)
{
    if (usecase->stream.out == NULL) {
        return -1;
    }
    struct stream_out *out = usecase->stream.out;
    struct stream_app_type_cfg *app_type_cfg = &out->app_type_cfg;

    *sample_rate = DEFAULT_OUTPUT_SAMPLING_RATE;

    // add speaker prot changes if needed
    // and use that to check for device
    if (audio_is_usb_out_device(out->devices)) {
        platform_check_and_update_copp_sample_rate(adev->platform,
                                                   usecase->out_snd_device,
                                                   out->sample_rate,
                                                   sample_rate);
    }

    app_type_cfg->mode = flags_to_mode(0 /*playback*/, out->flags);
    if (!audio_is_linear_pcm(out->format)) {
        platform_get_app_type_v2(adev->platform,
                                 PCM_PLAYBACK,
                                 app_type_cfg->mode,
                                 24,
                                 *sample_rate,
                                 app_type);
        ALOGV("Non pcm got app type %d", *app_type);
    } else if (out->format == AUDIO_FORMAT_PCM_16_BIT) {
        platform_get_app_type_v2(adev->platform,
                                 PCM_PLAYBACK,
                                 app_type_cfg->mode,
                                 16,
                                 *sample_rate,
                                 app_type);
    } else if (out->format == AUDIO_FORMAT_PCM_24_BIT_PACKED ||
               out->format == AUDIO_FORMAT_PCM_8_24_BIT) {
        platform_get_app_type_v2(adev->platform,
                                 PCM_PLAYBACK,
                                 app_type_cfg->mode,
                                 24,
                                 *sample_rate,
                                 app_type);
    } else if (out->format == AUDIO_FORMAT_PCM_32_BIT) {
        platform_get_app_type_v2(adev->platform,
                                 PCM_PLAYBACK,
                                 app_type_cfg->mode,
                                 32,
                                 *sample_rate,
                                 app_type);
    } else {
        ALOGE("%s bad format\n", __func__);
        return -1;
    }

    app_type_cfg->app_type = *app_type;
    app_type_cfg->sample_rate = *sample_rate;
    return 0;
}
status_t AudioRecord::set(
        audio_source_t inputSource,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t frameCount,
        callback_t cbf,
        void* user,
        uint32_t notificationFrames,
        bool threadCanCallJava,
        audio_session_t sessionId,
        transfer_type transferType,
        audio_input_flags_t flags,
        int uid,
        pid_t pid,
        const audio_attributes_t* pAttributes)
{
    ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
          "notificationFrames %u, sessionId %d, transferType %d, flags %#x, opPackageName %s "
          "uid %d, pid %d",
          inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
          sessionId, transferType, flags, String8(mOpPackageName).string(), uid, pid);

    switch (transferType) {
    case TRANSFER_DEFAULT:
        if (cbf == NULL || threadCanCallJava) {
            transferType = TRANSFER_SYNC;
        } else {
            transferType = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
        if (cbf == NULL) {
            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
            return BAD_VALUE;
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC:
        break;
    default:
        ALOGE("Invalid transfer type %d", transferType);
        return BAD_VALUE;
    }
    mTransfer = transferType;

    // invariant that mAudioRecord != 0 is true only after set() returns successfully
    if (mAudioRecord != 0) {
        ALOGE("Track already in use");
        return INVALID_OPERATION;
    }

    if (pAttributes == NULL) {
        memset(&mAttributes, 0, sizeof(audio_attributes_t));
        mAttributes.source = inputSource;
    } else {
        // stream type shouldn't be looked at, this track has audio attributes
        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
        ALOGV("Building AudioRecord with attributes: source=%d flags=0x%x tags=[%s]",
              mAttributes.source, mAttributes.flags, mAttributes.tags);
    }

    mSampleRate = sampleRate;

    // these below should probably come from the audioFlinger too...
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }

    // validate parameters
    // AudioFlinger capture only supports linear PCM
    if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
        ALOGE("Format %#x is not linear pcm", format);
        return BAD_VALUE;
    }
    mFormat = format;

    if (!audio_is_input_channel(channelMask)) {
        ALOGE("Invalid channel mask %#x", channelMask);
        return BAD_VALUE;
    }
    mChannelMask = channelMask;
    uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
    mChannelCount = channelCount;

    if (audio_is_linear_pcm(format)) {
        mFrameSize = channelCount * audio_bytes_per_sample(format);
    } else {
        mFrameSize = sizeof(uint8_t);
    }

    // mFrameCount is initialized in openRecord_l
    mReqFrameCount = frameCount;

    mNotificationFramesReq = notificationFrames;
    // mNotificationFramesAct is initialized in openRecord_l

    if (sessionId == AUDIO_SESSION_ALLOCATE) {
        mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
    } else {
        mSessionId = sessionId;
    }
    ALOGV("set(): mSessionId %d", mSessionId);

    int callingpid = IPCThreadState::self()->getCallingPid();
    int mypid = getpid();
    if (uid == -1 || (callingpid != mypid)) {
        mClientUid = IPCThreadState::self()->getCallingUid();
    } else {
        mClientUid = uid;
    }
    if (pid == -1 || (callingpid != mypid)) {
        mClientPid = callingpid;
    } else {
        mClientPid = pid;
    }

    mOrigFlags = mFlags = flags;
    mCbf = cbf;

    if (cbf != NULL) {
        mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
        mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
        // thread begins in paused state, and will not reference us until start()
    }

    // create the IAudioRecord
    status_t status = openRecord_l(0 /*epoch*/, mOpPackageName);

    if (status != NO_ERROR) {
        if (mAudioRecordThread != 0) {
            mAudioRecordThread->requestExit();   // see comment in AudioRecord.h
            mAudioRecordThread->requestExitAndWait();
            mAudioRecordThread.clear();
        }
        return status;
    }

    mStatus = NO_ERROR;
    mUserData = user;
    // TODO: add audio hardware input latency here
    mLatency = (1000 * mFrameCount) / mSampleRate;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    AudioSystem::acquireAudioSessionId(mSessionId, -1);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInOverrun = false;
    mFramesRead = 0;
    mFramesReadServerOffset = 0;

    return NO_ERROR;
}
status_t AudioRecord::set(
        audio_source_t inputSource,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        int frameCountInt,
        callback_t cbf,
        void* user,
        int notificationFrames,
        bool threadCanCallJava,
        int sessionId,
        transfer_type transferType,
        audio_input_flags_t flags)
{
    ALOGV("sampleRate %u, channelMask %#x, format %d", sampleRate, channelMask, format);
    ALOGV("inputSource %d", inputSource);
    switch (transferType) {
    case TRANSFER_DEFAULT:
        if (cbf == NULL || threadCanCallJava) {
            transferType = TRANSFER_SYNC;
        } else {
            transferType = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
        if (cbf == NULL) {
            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
            return BAD_VALUE;
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC:
        break;
    default:
        ALOGE("Invalid transfer type %d", transferType);
        return BAD_VALUE;
    }
    mTransfer = transferType;

    // FIXME "int" here is legacy and will be replaced by size_t later
    if (frameCountInt < 0) {
        ALOGE("Invalid frame count %d", frameCountInt);
        return BAD_VALUE;
    }
    size_t frameCount = frameCountInt;

    ALOGV("set(): sampleRate %u, channelMask %#x, frameCount %u", sampleRate, channelMask,
            frameCount);

    AutoMutex lock(mLock);

    if (mAudioRecord != 0) {
        ALOGE("Track already in use");
        return INVALID_OPERATION;
    }

    if (inputSource == AUDIO_SOURCE_DEFAULT) {
        inputSource = AUDIO_SOURCE_MIC;
    }
    mInputSource = inputSource;

    if (sampleRate == 0) {
        ALOGE("Invalid sample rate %u", sampleRate);
        return BAD_VALUE;
    }
    mSampleRate = sampleRate;

    // these below should probably come from the audioFlinger too...
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }

    // validate parameters
    if (!audio_is_valid_format(format)) {
        ALOGE("Invalid format %d", format);
        return BAD_VALUE;
    }
#if defined(QCOM_HARDWARE) && !defined(QCOM_DIRECTTRACK)
    if (format != AUDIO_FORMAT_PCM_16_BIT &&
           !audio_is_compress_voip_format(format) &&
           !audio_is_compress_capture_format(format)) {
#else
#ifndef QCOM_DIRECTTRACK
    // Temporary restriction: AudioFlinger currently supports 16-bit PCM only
    if (format != AUDIO_FORMAT_PCM_16_BIT) {
#endif
#endif
#ifndef QCOM_DIRECTTRACK
        ALOGE("Format %d is not supported", format);
        return BAD_VALUE;
    }
#endif

    mFormat = format;

    if (!audio_is_input_channel(channelMask)) {
        ALOGE("Invalid channel mask %#x", channelMask);
        return BAD_VALUE;
    }
    mChannelMask = channelMask;
    uint32_t channelCount = popcount(channelMask);
    mChannelCount = channelCount;

#ifdef QCOM_DIRECTTRACK
    mFrameSize = frameSize();

    size_t inputBuffSizeInBytes = -1;
    status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &inputBuffSizeInBytes);
    if (status != NO_ERROR) {
        ALOGE("AudioSystem could not query the input buffer size; status %d", status);
        return NO_INIT;
    }

    if (inputBuffSizeInBytes == 0) {
        ALOGE("Unsupported configuration: sampleRate %u, format %d, channelMask %#x",
            sampleRate, format, channelMask);
        return BAD_VALUE;
    }

    int minFrameCount = (inputBuffSizeInBytes * 2)/mFrameSize;
#else
    // Assumes audio_is_linear_pcm(format), else sizeof(uint8_t)
#ifdef QCOM_HARDWARE
    if (audio_is_linear_pcm(format))
        mFrameSize = channelCount * audio_bytes_per_sample(format);
    else
        mFrameSize = sizeof(uint8_t);
#else
    mFrameSize = channelCount * audio_bytes_per_sample(format);
#endif

    // validate framecount
    size_t minFrameCount = 0;
    status_t status = AudioRecord::getMinFrameCount(&minFrameCount,
            sampleRate, format, channelMask);
    if (status != NO_ERROR) {
        ALOGE("getMinFrameCount() failed; status %d", status);
        return status;
    }
#endif

    ALOGV("AudioRecord::set() minFrameCount = %d", minFrameCount);

    if (frameCount == 0) {
        frameCount = minFrameCount;
    } else if (frameCount < minFrameCount) {
        ALOGE("frameCount %u < minFrameCount %u", frameCount, minFrameCount);
        return BAD_VALUE;
    }
    mFrameCount = frameCount;

    mNotificationFramesReq = notificationFrames;
    mNotificationFramesAct = 0;

    if (sessionId == 0 ) {
        mSessionId = AudioSystem::newAudioSessionId();
    } else {
        mSessionId = sessionId;
    }
    ALOGV("set(): mSessionId %d", mSessionId);

    mFlags = flags;

    // create the IAudioRecord
    status = openRecord_l(0 /*epoch*/);
    if (status) {
        return status;
    }

    if (cbf != NULL) {
        mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
        mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
    }

    mStatus = NO_ERROR;

    // Update buffer size in case it has been limited by AudioFlinger during track creation
    mFrameCount = mCblk->frameCount_;

    mActive = false;
    mCbf = cbf;
    mRefreshRemaining = true;
    mUserData = user;
    // TODO: add audio hardware input latency here
    mLatency = (1000*mFrameCount) / sampleRate;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    AudioSystem::acquireAudioSessionId(mSessionId);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInOverrun = false;

    return NO_ERROR;
}

#ifdef QCOM_DIRECTTRACK
audio_source_t AudioRecord::inputSource() const
{
    return mInputSource;
}
#endif

// -------------------------------------------------------------------------

status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession)
{
    ALOGV("start, sync event %d trigger session %d", event, triggerSession);

    AutoMutex lock(mLock);
    if (mActive) {
        return NO_ERROR;
    }

    // reset current position as seen by client to 0
    mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());

    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
    int32_t flags = android_atomic_acquire_load(&mCblk->mFlags);

    status_t status = NO_ERROR;
    if (!(flags & CBLK_INVALID)) {
        ALOGV("mAudioRecord->start()");
        status = mAudioRecord->start(event, triggerSession);
        if (status == DEAD_OBJECT) {
            flags |= CBLK_INVALID;
        }
    }
    if (flags & CBLK_INVALID) {
        status = restoreRecord_l("start");
    }

    if (status != NO_ERROR) {
        ALOGE("start() status %d", status);
    } else {
        mActive = true;
        sp<AudioRecordThread> t = mAudioRecordThread;
        if (t != 0) {
            t->resume();
        } else {
            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
            get_sched_policy(0, &mPreviousSchedulingGroup);
            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
        }
    }

    return status;
}
Exemplo n.º 15
0
// must be called with mLock held
status_t AudioTrack::createTrack_l(
        int streamType,
        uint32_t sampleRate,
        uint32_t format,
        uint32_t channelMask,
        int frameCount,
        uint32_t flags,
        const sp<IMemory>& sharedBuffer,
        audio_io_handle_t output,
        bool enforceFrameCount)
{
    status_t status;
    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
    if (audioFlinger == 0) {
       LOGE("Could not get audioflinger");
       return NO_INIT;
    }

    int afSampleRate;
    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
        return NO_INIT;
    }
    int afFrameCount;
    if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
        return NO_INIT;
    }
    uint32_t afLatency;
    if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
        return NO_INIT;
    }

    mNotificationFramesAct = mNotificationFramesReq;
    if (!audio_is_linear_pcm(format)) {
        if (sharedBuffer != 0) {
            frameCount = sharedBuffer->size();
        }
    } else {
        // Ensure that buffer depth covers at least audio hardware latency
        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
        if (minBufCount < 2) minBufCount = 2;

        int minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;

        if (sharedBuffer == 0) {
            if (frameCount == 0) {
                frameCount = minFrameCount;
            }
            if (mNotificationFramesAct == 0) {
                mNotificationFramesAct = frameCount/2;
            }
            // Make sure that application is notified with sufficient margin
            // before underrun
            if (mNotificationFramesAct > (uint32_t)frameCount/2) {
                mNotificationFramesAct = frameCount/2;
            }
            if (frameCount < minFrameCount) {
                LOGW_IF(enforceFrameCount, "Minimum buffer size corrected from %d to %d",
                         frameCount, minFrameCount);
                frameCount = minFrameCount;
            }
        } else {
            // Ensure that buffer alignment matches channelcount
            int channelCount = popcount(channelMask);
            if (((uint32_t)sharedBuffer->pointer() & (channelCount | 1)) != 0) {
                LOGE("Invalid buffer alignement: address %p, channelCount %d", sharedBuffer->pointer(), channelCount);
                return BAD_VALUE;
            }
            frameCount = sharedBuffer->size()/channelCount/sizeof(int16_t);
        }
    }

    sp<IAudioTrack> track = audioFlinger->createTrack(getpid(),
                                                      streamType,
                                                      sampleRate,
                                                      format,
                                                      channelMask,
                                                      frameCount,
                                                      ((uint16_t)flags) << 16,
                                                      sharedBuffer,
                                                      output,
                                                      &mSessionId,
                                                      &status);

    if (track == 0) {
        LOGE("AudioFlinger could not create track, status: %d", status);
        return status;
    }
    sp<IMemory> cblk = track->getCblk();
    if (cblk == 0) {
        LOGE("Could not get control block");
        return NO_INIT;
    }
    mAudioTrack.clear();
    mAudioTrack = track;
    mCblkMemory.clear();
    mCblkMemory = cblk;
    mCblk = static_cast<audio_track_cblk_t*>(cblk->pointer());
    android_atomic_or(CBLK_DIRECTION_OUT, &mCblk->flags);
    if (sharedBuffer == 0) {
        mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);
    } else {
        mCblk->buffers = sharedBuffer->pointer();
         // Force buffer full condition as data is already present in shared memory
        mCblk->stepUser(mCblk->frameCount);
    }

    mCblk->volumeLR = (uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | uint16_t(mVolume[LEFT] * 0x1000);
    mCblk->sendLevel = uint16_t(mSendLevel * 0x1000);
    mAudioTrack->attachAuxEffect(mAuxEffectId);
    mCblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
    mCblk->waitTimeMs = 0;
    mRemainingFrames = mNotificationFramesAct;
    mLatency = afLatency + (1000*mCblk->frameCount) / sampleRate;
    return NO_ERROR;
}
Exemplo n.º 16
0
status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
{
    AutoMutex lock(mLock);
    int active;
    status_t result = NO_ERROR;
    audio_track_cblk_t* cblk = mCblk;
    uint32_t framesReq = audioBuffer->frameCount;
    uint32_t waitTimeMs = (waitCount < 0) ? cblk->bufferTimeoutMs : WAIT_PERIOD_MS;

    audioBuffer->frameCount  = 0;
    audioBuffer->size = 0;

    uint32_t framesAvail = cblk->framesAvailable();

    cblk->lock.lock();
    if (cblk->flags & CBLK_INVALID_MSK) {
        goto create_new_track;
    }
    cblk->lock.unlock();

    if (framesAvail == 0) {
        cblk->lock.lock();
        goto start_loop_here;
        while (framesAvail == 0) {
            active = mActive;
            if (UNLIKELY(!active)) {
                LOGV("Not active and NO_MORE_BUFFERS");
                cblk->lock.unlock();
                return NO_MORE_BUFFERS;
            }
            if (UNLIKELY(!waitCount)) {
                cblk->lock.unlock();
                return WOULD_BLOCK;
            }
            if (!(cblk->flags & CBLK_INVALID_MSK)) {
                mLock.unlock();
                result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
                cblk->lock.unlock();
                mLock.lock();
                if (mActive == 0) {
                    return status_t(STOPPED);
                }
                cblk->lock.lock();
            }

            if (cblk->flags & CBLK_INVALID_MSK) {
                goto create_new_track;
            }
            if (__builtin_expect(result!=NO_ERROR, false)) {
                cblk->waitTimeMs += waitTimeMs;
                if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) {
                    // timing out when a loop has been set and we have already written upto loop end
                    // is a normal condition: no need to wake AudioFlinger up.
                    if (cblk->user < cblk->loopEnd) {
                        LOGW(   "obtainBuffer timed out (is the CPU pegged?) %p "
                                "user=%08x, server=%08x", this, cblk->user, cblk->server);
                        //unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140)
                        cblk->lock.unlock();
                        result = mAudioTrack->start();
                        cblk->lock.lock();
                        if (result == DEAD_OBJECT) {
                            android_atomic_or(CBLK_INVALID_ON, &cblk->flags);
create_new_track:
                            result = restoreTrack_l(cblk, false);
                        }
                        if (result != NO_ERROR) {
                            LOGW("obtainBuffer create Track error %d", result);
                            cblk->lock.unlock();
                            return result;
                        }
                    }
                    cblk->waitTimeMs = 0;
                }

                if (--waitCount == 0) {
                    cblk->lock.unlock();
                    return TIMED_OUT;
                }
            }
            // read the server count again
        start_loop_here:
            framesAvail = cblk->framesAvailable_l();
        }
        cblk->lock.unlock();
    }

    // restart track if it was disabled by audioflinger due to previous underrun
    if (mActive && (cblk->flags & CBLK_DISABLED_MSK)) {
        android_atomic_and(~CBLK_DISABLED_ON, &cblk->flags);
        LOGW("obtainBuffer() track %p disabled, restarting", this);
        mAudioTrack->start();
    }

    cblk->waitTimeMs = 0;

    if (framesReq > framesAvail) {
        framesReq = framesAvail;
    }

    uint32_t u = cblk->user;
    uint32_t bufferEnd = cblk->userBase + cblk->frameCount;

    if (u + framesReq > bufferEnd && u < bufferEnd) {
        framesReq = bufferEnd - u;
    }

    audioBuffer->flags = mMuted ? Buffer::MUTE : 0;
    audioBuffer->channelCount = mChannelCount;
    audioBuffer->frameCount = framesReq;
    audioBuffer->size = framesReq * cblk->frameSize;
    if (audio_is_linear_pcm(mFormat)) {
        audioBuffer->format = AUDIO_FORMAT_PCM_16_BIT;
    } else {
        audioBuffer->format = mFormat;
    }
    audioBuffer->raw = (int8_t *)cblk->buffer(u);
    active = mActive;
    return active ? status_t(NO_ERROR) : status_t(STOPPED);
}
Exemplo n.º 17
0
status_t AudioPlayer::start(bool sourceAlreadyStarted) {
    CHECK(!mStarted);
    CHECK(mSource != NULL);

    status_t err;
    if (!sourceAlreadyStarted) {
        mSourcePaused = false;
        err = mSource->start();

        if (err != OK) {
            return err;
        }
    }
    ALOGD("start of Playback, useOffload %d",useOffload());

    // We allow an optional INFO_FORMAT_CHANGED at the very beginning
    // of playback, if there is one, getFormat below will retrieve the
    // updated format, if there isn't, we'll stash away the valid buffer
    // of data to be used on the first audio callback.

    CHECK(mFirstBuffer == NULL);

    MediaSource::ReadOptions options;
    if (mSeeking) {
        options.setSeekTo(mSeekTimeUs);
    }

    do {
        mFirstBufferResult = mSource->read(&mFirstBuffer, &options);
    } while (mFirstBufferResult == -EAGAIN);

    if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
        ALOGV("INFO_FORMAT_CHANGED!!!");

        CHECK(mFirstBuffer == NULL);
        mFirstBufferResult = OK;
        mIsFirstBuffer = false;

        if (mSeeking) {
            mPositionTimeRealUs = 0;
            mPositionTimeMediaUs = mSeekTimeUs;
            mSeeking = false;
        }

    } else {
        mIsFirstBuffer = true;

        if (mSeeking) {
            mPositionTimeRealUs = 0;
            if (mFirstBuffer == NULL || !mFirstBuffer->meta_data()->findInt64(
                    kKeyTime, &mPositionTimeMediaUs)) {
                return UNKNOWN_ERROR;
            }
            mSeeking = false;
        }

    }

    sp<MetaData> format = mSource->getFormat();
    const char *mime;
    bool success = format->findCString(kKeyMIMEType, &mime);
    CHECK(success);
    CHECK(useOffload() || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));

    success = format->findInt32(kKeySampleRate, &mSampleRate);
    CHECK(success);

    int32_t numChannels, channelMask = 0;
    success = format->findInt32(kKeyChannelCount, &numChannels);
    CHECK(success);

    format->findInt64(kKeyDuration, &mDurationUs);

    if(!format->findInt32(kKeyChannelMask, &channelMask)) {
        // log only when there's a risk of ambiguity of channel mask selection
        ALOGI_IF(numChannels > 2,
                "source format didn't specify channel mask, using (%d) channel order", numChannels);
        channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
    } else if (channelMask == 0) {
        channelMask = audio_channel_out_mask_from_count(numChannels);
        ALOGV("channel mask is zero,update from channel count %d", channelMask);
    }

    audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;

    int32_t bitWidth = 16;
#if defined(ENABLE_AV_ENHANCEMENTS) || defined(ENABLE_OFFLOAD_ENHANCEMENTS)
    format->findInt32(kKeySampleBits, &bitWidth);
#endif

    if (useOffload()) {
        if (mapMimeToAudioFormat(audioFormat, mime) != OK) {
            ALOGE("Couldn't map mime type \"%s\" to a valid AudioSystem::audio_format", mime);
            audioFormat = AUDIO_FORMAT_INVALID;
        } else if (audio_is_linear_pcm(audioFormat) || audio_is_offload_pcm(audioFormat)) {
#if defined(QCOM_HARDWARE) || defined(ENABLE_OFFLOAD_ENHANCEMENTS)
            // Override audio format for PCM offload
            if (bitWidth >= 24) {
                ALOGD("24-bit PCM offload enabled format=%d", audioFormat);
                audioFormat = AUDIO_FORMAT_PCM_24_BIT_OFFLOAD;
            } else {
                audioFormat = AUDIO_FORMAT_PCM_16_BIT_OFFLOAD;
            }
#endif
            ALOGV("Mime type \"%s\" mapped to audio_format 0x%x", mime, audioFormat);
        }
    }

    int avgBitRate = -1;
    format->findInt32(kKeyBitRate, &avgBitRate);

    if (mAudioSink.get() != NULL) {

        uint32_t flags = AUDIO_OUTPUT_FLAG_NONE;
        audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;

        if (allowDeepBuffering()) {
            flags |= AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
        }
        if (useOffload()) {
            flags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;

            int64_t durationUs;
            if (format->findInt64(kKeyDuration, &durationUs)) {
                offloadInfo.duration_us = durationUs;
            } else {
                offloadInfo.duration_us = -1;
            }

            offloadInfo.sample_rate = mSampleRate;
            offloadInfo.channel_mask = channelMask;
            offloadInfo.format = audioFormat;
            offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
            offloadInfo.bit_rate = avgBitRate;
            offloadInfo.has_video = ((mCreateFlags & HAS_VIDEO) != 0);
            offloadInfo.is_streaming = ((mCreateFlags & IS_STREAMING) != 0);
#if defined(ENABLE_AV_ENHANCEMENTS) || defined(ENABLE_OFFLOAD_ENHANCEMENTS)
            offloadInfo.bit_width = bitWidth >= 24 ? 24 : bitWidth;
#endif
        }

        status_t err = mAudioSink->open(
                mSampleRate, numChannels, channelMask, audioFormat,
                DEFAULT_AUDIOSINK_BUFFERCOUNT,
                &AudioPlayer::AudioSinkCallback,
                this,
                (audio_output_flags_t)flags,
                useOffload() ? &offloadInfo : NULL);

        if (err == OK) {
            mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
            mFrameSize = mAudioSink->frameSize();

            if (useOffload()) {
                // If the playback is offloaded to h/w we pass the
                // HAL some metadata information
                // We don't want to do this for PCM because it will be going
                // through the AudioFlinger mixer before reaching the hardware
                sendMetaDataToHal(mAudioSink, format);
            }

            err = mAudioSink->start();
            // do not alter behavior for non offloaded tracks: ignore start status.
            if (!useOffload()) {
                err = OK;
            }
        }

        if (err != OK) {
            if (mFirstBuffer != NULL) {
                mFirstBuffer->release();
                mFirstBuffer = NULL;
            }

            if (!sourceAlreadyStarted) {
                mSource->stop();
            }

            return err;
        }

    } else {
        // playing to an AudioTrack, set up mask if necessary
        audio_channel_mask_t audioMask = channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER ?
                audio_channel_out_mask_from_count(numChannels) : channelMask;
        if (0 == audioMask) {
            return BAD_VALUE;
        }

        mAudioTrack = new AudioTrack(
                AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask,
                0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);

        if ((err = mAudioTrack->initCheck()) != OK) {
            mAudioTrack.clear();

            if (mFirstBuffer != NULL) {
                mFirstBuffer->release();
                mFirstBuffer = NULL;
            }

            if (!sourceAlreadyStarted) {
                mSource->stop();
            }

            return err;
        }

        mLatencyUs = (int64_t)mAudioTrack->latency() * 1000;
        mFrameSize = mAudioTrack->frameSize();

        mAudioTrack->start();
    }

    mStarted = true;
    mPlaying = true;
    mPinnedTimeUs = -1ll;
    const char *componentName;
    if (!(format->findCString(kKeyDecoderComponent, &componentName))) {
          componentName = "none";
    }
    if (!strncmp(componentName, "OMX.qcom.", 9)) {
        mPauseRequired = true;
    } else {
        mPauseRequired = false;
    }

    return OK;
}