// checks if the IO profile is compatible with specified parameters.
// Sampling rate, format and channel mask must be specified in order to
// get a valid a match
bool IOProfile::isCompatibleProfile(audio_devices_t device,
                                    const String8& address,
                                    uint32_t samplingRate,
                                    uint32_t *updatedSamplingRate,
                                    audio_format_t format,
                                    audio_format_t *updatedFormat,
                                    audio_channel_mask_t channelMask,
                                    audio_channel_mask_t *updatedChannelMask,
                                    uint32_t flags) const
{
    const bool isPlaybackThread =
            getType() == AUDIO_PORT_TYPE_MIX && getRole() == AUDIO_PORT_ROLE_SOURCE;
    const bool isRecordThread =
            getType() == AUDIO_PORT_TYPE_MIX && getRole() == AUDIO_PORT_ROLE_SINK;
    ALOG_ASSERT(isPlaybackThread != isRecordThread);


    if (device != AUDIO_DEVICE_NONE) {
        // just check types if multiple devices are selected
        if (popcount(device & ~AUDIO_DEVICE_BIT_IN) > 1) {
            if ((mSupportedDevices.types() & device) != device) {
                return false;
            }
        } else if (mSupportedDevices.getDevice(device, address) == 0) {
            return false;
        }
    }

    if (!audio_is_valid_format(format) ||
            (isPlaybackThread && (samplingRate == 0 || !audio_is_output_channel(channelMask))) ||
            (isRecordThread && (!audio_is_input_channel(channelMask)))) {
         return false;
    }

    audio_format_t myUpdatedFormat = format;
    audio_channel_mask_t myUpdatedChannelMask = channelMask;
    uint32_t myUpdatedSamplingRate = samplingRate;
    if (isRecordThread)
    {
        if (checkCompatibleAudioProfile(
                myUpdatedSamplingRate, myUpdatedChannelMask, myUpdatedFormat) != NO_ERROR) {
            return false;
        }
    } else {
        if (checkExactAudioProfile(samplingRate, channelMask, format) != NO_ERROR) {
            return false;
        }
    }

    if (isPlaybackThread && (getFlags() & flags) != flags) {
        return false;
    }
    // The only input flag that is allowed to be different is the fast flag.
    // An existing fast stream is compatible with a normal track request.
    // An existing normal stream is compatible with a fast track request,
    // but the fast request will be denied by AudioFlinger and converted to normal track.
    if (isRecordThread && ((getFlags() ^ flags) &
            ~AUDIO_INPUT_FLAG_FAST)) {
        return false;
    }

    if (updatedSamplingRate != NULL) {
        *updatedSamplingRate = myUpdatedSamplingRate;
    }
    if (updatedFormat != NULL) {
        *updatedFormat = myUpdatedFormat;
    }
    if (updatedChannelMask != NULL) {
        *updatedChannelMask = myUpdatedChannelMask;
    }
    return true;
}
Ejemplo n.º 2
0
status_t AudioTrack::set(
        int streamType,
        uint32_t sampleRate,
        int format,
        int channelMask,
        int frameCount,
        uint32_t flags,
        callback_t cbf,
        void* user,
        int notificationFrames,
        const sp<IMemory>& sharedBuffer,
        bool threadCanCallJava,
        int sessionId)
{

    LOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size());

    AutoMutex lock(mLock);
    if (mAudioTrack != 0) {
        LOGE("Track already in use");
        return INVALID_OPERATION;
    }

    int afSampleRate;
    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
        return NO_INIT;
    }
    uint32_t afLatency;
    if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
        return NO_INIT;
    }

    // handle default values first.
    if (streamType == AUDIO_STREAM_DEFAULT) {
        streamType = AUDIO_STREAM_MUSIC;
    }
    if (sampleRate == 0) {
        sampleRate = afSampleRate;
    }
    // these below should probably come from the audioFlinger too...
    if (format == 0) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }
    if (channelMask == 0) {
        channelMask = AUDIO_CHANNEL_OUT_STEREO;
    }

    // validate parameters
    if (!audio_is_valid_format(format)) {
        LOGE("Invalid format");
        return BAD_VALUE;
    }

    // force direct flag if format is not linear PCM
    if (!audio_is_linear_pcm(format)) {
        flags |= AUDIO_POLICY_OUTPUT_FLAG_DIRECT;
    }

    if (!audio_is_output_channel(channelMask)) {
        LOGE("Invalid channel mask");
        return BAD_VALUE;
    }
    uint32_t channelCount = popcount(channelMask);

    audio_io_handle_t output = AudioSystem::getOutput(
                                    (audio_stream_type_t)streamType,
                                    sampleRate,format, channelMask,
                                    (audio_policy_output_flags_t)flags);

    if (output == 0) {
        LOGE("Could not get audio output for stream type %d", streamType);
        return BAD_VALUE;
    }

    mVolume[LEFT] = 1.0f;
    mVolume[RIGHT] = 1.0f;
    mSendLevel = 0;
    mFrameCount = frameCount;
    mNotificationFramesReq = notificationFrames;
    mSessionId = sessionId;
    mAuxEffectId = 0;

    // create the IAudioTrack
    status_t status = createTrack_l(streamType,
                                  sampleRate,
                                  (uint32_t)format,
                                  (uint32_t)channelMask,
                                  frameCount,
                                  flags,
                                  sharedBuffer,
                                  output,
                                  true);

    if (status != NO_ERROR) {
        return status;
    }

    if (cbf != 0) {
        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
        if (mAudioTrackThread == 0) {
          LOGE("Could not create callback thread");
          return NO_INIT;
        }
    }

    mStatus = NO_ERROR;

    mStreamType = streamType;
    mFormat = (uint32_t)format;
    mChannelMask = (uint32_t)channelMask;
    mChannelCount = channelCount;
    mSharedBuffer = sharedBuffer;
    mMuted = false;
    mActive = 0;
    mCbf = cbf;
    mUserData = user;
    mLoopCount = 0;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    mFlushed = false;
    mFlags = flags;
    AudioSystem::acquireAudioSessionId(mSessionId);
    mRestoreStatus = NO_ERROR;
    return NO_ERROR;
}
Ejemplo n.º 3
0
status_t AudioRecord::set(
        audio_source_t inputSource,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t frameCount,
        callback_t cbf,
        void* user,
        uint32_t notificationFrames,
        bool threadCanCallJava,
        int sessionId,
        transfer_type transferType,
        audio_input_flags_t flags)
{
    ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
          "notificationFrames %u, sessionId %d, transferType %d, flags %#x",
          inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
          sessionId, transferType, flags);

    switch (transferType) {
    case TRANSFER_DEFAULT:
        if (cbf == NULL || threadCanCallJava) {
            transferType = TRANSFER_SYNC;
        } else {
            transferType = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
        if (cbf == NULL) {
            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
            return BAD_VALUE;
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC:
        break;
    default:
        ALOGE("Invalid transfer type %d", transferType);
        return BAD_VALUE;
    }
    mTransfer = transferType;

    AutoMutex lock(mLock);

    // invariant that mAudioRecord != 0 is true only after set() returns successfully
    if (mAudioRecord != 0) {
        ALOGE("Track already in use");
        return INVALID_OPERATION;
    }

    // handle default values first.
    if (inputSource == AUDIO_SOURCE_DEFAULT) {
        inputSource = AUDIO_SOURCE_MIC;
    }
    mInputSource = inputSource;

    if (sampleRate == 0) {
        ALOGE("Invalid sample rate %u", sampleRate);
        return BAD_VALUE;
    }
    mSampleRate = sampleRate;

    // these below should probably come from the audioFlinger too...
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }

    // validate parameters
    if (!audio_is_valid_format(format)) {
        ALOGE("Invalid format %#x", format);
        return BAD_VALUE;
    }
    // Temporary restriction: AudioFlinger currently supports 16-bit PCM only
    if (format != AUDIO_FORMAT_PCM_16_BIT) {
        ALOGE("Format %#x is not supported", format);
        return BAD_VALUE;
    }
    mFormat = format;

    if (!audio_is_input_channel(channelMask)) {
        ALOGE("Invalid channel mask %#x", channelMask);
        return BAD_VALUE;
    }
    mChannelMask = channelMask;
    uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
    mChannelCount = channelCount;

    if (audio_is_linear_pcm(format)) {
        mFrameSize = channelCount * audio_bytes_per_sample(format);
    } else {
        mFrameSize = sizeof(uint8_t);
    }

    // mFrameCount is initialized in openRecord_l
    mReqFrameCount = frameCount;

    mNotificationFramesReq = notificationFrames;
    // mNotificationFramesAct is initialized in openRecord_l

    if (sessionId == AUDIO_SESSION_ALLOCATE) {
        mSessionId = AudioSystem::newAudioUniqueId();
    } else {
        mSessionId = sessionId;
    }
    ALOGV("set(): mSessionId %d", mSessionId);

    mFlags = flags;
    mCbf = cbf;

    if (cbf != NULL) {
        mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
        mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
    }

    // create the IAudioRecord
    status_t status = openRecord_l(0 /*epoch*/);

    if (status != NO_ERROR) {
        if (mAudioRecordThread != 0) {
            mAudioRecordThread->requestExit();   // see comment in AudioRecord.h
            mAudioRecordThread->requestExitAndWait();
            mAudioRecordThread.clear();
        }
        return status;
    }

    mStatus = NO_ERROR;
    mActive = false;
    mUserData = user;
    // TODO: add audio hardware input latency here
    mLatency = (1000*mFrameCount) / sampleRate;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    AudioSystem::acquireAudioSessionId(mSessionId, -1);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInOverrun = false;

    return NO_ERROR;
}
Ejemplo n.º 4
0
status_t AudioTrack::set(
        int streamType,
        uint32_t sampleRate,
        int format,
        int channels,
        uint32_t flags,
        int sessionId,
        int lpaSessionId)
{

    // handle default values first.
    if (streamType == AUDIO_STREAM_DEFAULT) {
        streamType = AUDIO_STREAM_MUSIC;
    }
    // these below should probably come from the audioFlinger too...
    if (format == 0) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }
    // validate parameters
    if (!audio_is_valid_format(format)) {
        LOGE("Invalid format");
        return BAD_VALUE;
    }
    // force direct flag if format is not linear PCM
    if (!audio_is_linear_pcm(format)) {
        flags |= AUDIO_POLICY_OUTPUT_FLAG_DIRECT;
    }

    audio_io_handle_t output = AudioSystem::getSession((audio_stream_type_t)streamType,
            format, (audio_policy_output_flags_t)flags, lpaSessionId);

    if (output == 0) {
        LOGE("Could not get audio output for stream type %d", streamType);
        return BAD_VALUE;
    }
    mVolume[LEFT] = 1.0f;
    mVolume[RIGHT] = 1.0f;
    mStatus = NO_ERROR;
    mStreamType = streamType;
    mFormat = format;
    mChannelCount = 2;
    mSharedBuffer = NULL;
    mMuted = false;
    mActive = 0;
    mCbf = NULL;
    mNotificationFramesReq = 0;
    mRemainingFrames = 0;
    mUserData = NULL;
    mLatency = 0;
    mLoopCount = 0;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    mFlags = flags;
    mAudioTrack = NULL;
    mAudioSession = output;

    mSessionId = sessionId;
    mAuxEffectId = 0;

    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
    if (audioFlinger == 0) {
       LOGE("Could not get audioflinger");
       return NO_INIT;
    }
    status_t status;
    audioFlinger->createSession(getpid(),
                                sampleRate,
                                channels,
                                &mSessionId,
                                &status);
    if(status != NO_ERROR) {
        LOGE("createSession returned with status %d", status);
    }
    /* Make the track active and start output */
    android_atomic_or(1, &mActive);
    AudioSystem::startOutput(output, (audio_stream_type_t)mStreamType);
    LOGV("AudioTrack::set() - Started output(%d)",output);
    return NO_ERROR;
}
status_t AudioRecord::set(
        int inputSource,
        uint32_t sampleRate,
        int format,
        uint32_t channelMask,
        int frameCount,
        uint32_t flags,
        callback_t cbf,
        void* user,
        int notificationFrames,
        bool threadCanCallJava,
        int sessionId)
{

    LOGV("set(): sampleRate %d, channelMask %d, frameCount %d",sampleRate, channelMask, frameCount);

    AutoMutex lock(mLock);

    if (mAudioRecord != 0) {
        return INVALID_OPERATION;
    }

    if (inputSource == AUDIO_SOURCE_DEFAULT) {
        inputSource = AUDIO_SOURCE_MIC;
    }

    if (sampleRate == 0) {
        sampleRate = DEFAULT_SAMPLE_RATE;
    }
    // these below should probably come from the audioFlinger too...
    if (format == 0) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }
    // validate parameters
    if (!audio_is_valid_format(format)) {
        LOGE("Invalid format");
        return BAD_VALUE;
    }

    if (!audio_is_input_channel(channelMask)) {
        return BAD_VALUE;
    }

    int channelCount = popcount(channelMask);

    if (sessionId == 0 ) {
        mSessionId = AudioSystem::newAudioSessionId();
    } else {
        mSessionId = sessionId;
    }
    LOGV("set(): mSessionId %d", mSessionId);

    audio_io_handle_t input = AudioSystem::getInput(inputSource,
                                                    sampleRate,
                                                    format,
                                                    channelMask,
                                                    (audio_in_acoustics_t)flags,
                                                    mSessionId);
    if (input == 0) {
        LOGE("Could not get audio input for record source %d", inputSource);
        return BAD_VALUE;
    }

    // validate framecount
    int minFrameCount = 0;
    status_t status = getMinFrameCount(&minFrameCount, sampleRate, format, channelCount);
    if (status != NO_ERROR) {
        return status;
    }
    LOGV("AudioRecord::set() minFrameCount = %d", minFrameCount);

    if (frameCount == 0) {
        frameCount = minFrameCount;
    } else if (frameCount < minFrameCount) {
        return BAD_VALUE;
    }

    if (notificationFrames == 0) {
        notificationFrames = frameCount/2;
    }

    // create the IAudioRecord
    status = openRecord_l(sampleRate, format, channelMask,
                        frameCount, flags, input);
    if (status != NO_ERROR) {
        return status;
    }

    if (cbf != 0) {
        mClientRecordThread = new ClientRecordThread(*this, threadCanCallJava);
        if (mClientRecordThread == 0) {
            return NO_INIT;
        }
    }

    mStatus = NO_ERROR;

    mFormat = format;
    // Update buffer size in case it has been limited by AudioFlinger during track creation
    mFrameCount = mCblk->frameCount;
    mChannelCount = (uint8_t)channelCount;
    mChannelMask = channelMask;
    mActive = 0;
    mCbf = cbf;
    mNotificationFrames = notificationFrames;
    mRemainingFrames = notificationFrames;
    mUserData = user;
    // TODO: add audio hardware input latency here
    mLatency = (1000*mFrameCount) / sampleRate;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    mInputSource = (uint8_t)inputSource;
    mFlags = flags;
    mInput = input;
    AudioSystem::acquireAudioSessionId(mSessionId);

    return NO_ERROR;
}
status_t AudioRecord::set(
        audio_source_t inputSource,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t frameCount,
        callback_t cbf,
        void* user,
        uint32_t notificationFrames,
        bool threadCanCallJava,
        audio_session_t sessionId,
        transfer_type transferType,
        audio_input_flags_t flags,
        int uid,
        pid_t pid,
        const audio_attributes_t* pAttributes)
{
    ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
          "notificationFrames %u, sessionId %d, transferType %d, flags %#x, opPackageName %s "
          "uid %d, pid %d",
          inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
          sessionId, transferType, flags, String8(mOpPackageName).string(), uid, pid);

    switch (transferType) {
    case TRANSFER_DEFAULT:
        if (cbf == NULL || threadCanCallJava) {
            transferType = TRANSFER_SYNC;
        } else {
            transferType = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
        if (cbf == NULL) {
            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
            return BAD_VALUE;
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC:
        break;
    default:
        ALOGE("Invalid transfer type %d", transferType);
        return BAD_VALUE;
    }
    mTransfer = transferType;

    // invariant that mAudioRecord != 0 is true only after set() returns successfully
    if (mAudioRecord != 0) {
        ALOGE("Track already in use");
        return INVALID_OPERATION;
    }

    if (pAttributes == NULL) {
        memset(&mAttributes, 0, sizeof(audio_attributes_t));
        mAttributes.source = inputSource;
    } else {
        // stream type shouldn't be looked at, this track has audio attributes
        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
        ALOGV("Building AudioRecord with attributes: source=%d flags=0x%x tags=[%s]",
              mAttributes.source, mAttributes.flags, mAttributes.tags);
    }

    mSampleRate = sampleRate;

    // these below should probably come from the audioFlinger too...
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }

    // validate parameters
    // AudioFlinger capture only supports linear PCM
    if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
        ALOGE("Format %#x is not linear pcm", format);
        return BAD_VALUE;
    }
    mFormat = format;

    if (!audio_is_input_channel(channelMask)) {
        ALOGE("Invalid channel mask %#x", channelMask);
        return BAD_VALUE;
    }
    mChannelMask = channelMask;
    uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
    mChannelCount = channelCount;

    if (audio_is_linear_pcm(format)) {
        mFrameSize = channelCount * audio_bytes_per_sample(format);
    } else {
        mFrameSize = sizeof(uint8_t);
    }

    // mFrameCount is initialized in openRecord_l
    mReqFrameCount = frameCount;

    mNotificationFramesReq = notificationFrames;
    // mNotificationFramesAct is initialized in openRecord_l

    if (sessionId == AUDIO_SESSION_ALLOCATE) {
        mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
    } else {
        mSessionId = sessionId;
    }
    ALOGV("set(): mSessionId %d", mSessionId);

    int callingpid = IPCThreadState::self()->getCallingPid();
    int mypid = getpid();
    if (uid == -1 || (callingpid != mypid)) {
        mClientUid = IPCThreadState::self()->getCallingUid();
    } else {
        mClientUid = uid;
    }
    if (pid == -1 || (callingpid != mypid)) {
        mClientPid = callingpid;
    } else {
        mClientPid = pid;
    }

    mOrigFlags = mFlags = flags;
    mCbf = cbf;

    if (cbf != NULL) {
        mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
        mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
        // thread begins in paused state, and will not reference us until start()
    }

    // create the IAudioRecord
    status_t status = openRecord_l(0 /*epoch*/, mOpPackageName);

    if (status != NO_ERROR) {
        if (mAudioRecordThread != 0) {
            mAudioRecordThread->requestExit();   // see comment in AudioRecord.h
            mAudioRecordThread->requestExitAndWait();
            mAudioRecordThread.clear();
        }
        return status;
    }

    mStatus = NO_ERROR;
    mUserData = user;
    // TODO: add audio hardware input latency here
    mLatency = (1000 * mFrameCount) / mSampleRate;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    AudioSystem::acquireAudioSessionId(mSessionId, -1);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInOverrun = false;
    mFramesRead = 0;
    mFramesReadServerOffset = 0;

    return NO_ERROR;
}
status_t AudioRecord::set(
        audio_source_t inputSource,
        uint32_t sampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        int frameCountInt,
        callback_t cbf,
        void* user,
        int notificationFrames,
        bool threadCanCallJava,
        int sessionId,
        transfer_type transferType,
        audio_input_flags_t flags)
{
    ALOGV("sampleRate %u, channelMask %#x, format %d", sampleRate, channelMask, format);
    ALOGV("inputSource %d", inputSource);
    switch (transferType) {
    case TRANSFER_DEFAULT:
        if (cbf == NULL || threadCanCallJava) {
            transferType = TRANSFER_SYNC;
        } else {
            transferType = TRANSFER_CALLBACK;
        }
        break;
    case TRANSFER_CALLBACK:
        if (cbf == NULL) {
            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
            return BAD_VALUE;
        }
        break;
    case TRANSFER_OBTAIN:
    case TRANSFER_SYNC:
        break;
    default:
        ALOGE("Invalid transfer type %d", transferType);
        return BAD_VALUE;
    }
    mTransfer = transferType;

    // FIXME "int" here is legacy and will be replaced by size_t later
    if (frameCountInt < 0) {
        ALOGE("Invalid frame count %d", frameCountInt);
        return BAD_VALUE;
    }
    size_t frameCount = frameCountInt;

    ALOGV("set(): sampleRate %u, channelMask %#x, frameCount %u", sampleRate, channelMask,
            frameCount);

    AutoMutex lock(mLock);

    if (mAudioRecord != 0) {
        ALOGE("Track already in use");
        return INVALID_OPERATION;
    }

    if (inputSource == AUDIO_SOURCE_DEFAULT) {
        inputSource = AUDIO_SOURCE_MIC;
    }
    mInputSource = inputSource;

    if (sampleRate == 0) {
        ALOGE("Invalid sample rate %u", sampleRate);
        return BAD_VALUE;
    }
    mSampleRate = sampleRate;

    // these below should probably come from the audioFlinger too...
    if (format == AUDIO_FORMAT_DEFAULT) {
        format = AUDIO_FORMAT_PCM_16_BIT;
    }

    // validate parameters
    if (!audio_is_valid_format(format)) {
        ALOGE("Invalid format %d", format);
        return BAD_VALUE;
    }
#if defined(QCOM_HARDWARE) && !defined(QCOM_DIRECTTRACK)
    if (format != AUDIO_FORMAT_PCM_16_BIT &&
           !audio_is_compress_voip_format(format) &&
           !audio_is_compress_capture_format(format)) {
#else
#ifndef QCOM_DIRECTTRACK
    // Temporary restriction: AudioFlinger currently supports 16-bit PCM only
    if (format != AUDIO_FORMAT_PCM_16_BIT) {
#endif
#endif
#ifndef QCOM_DIRECTTRACK
        ALOGE("Format %d is not supported", format);
        return BAD_VALUE;
    }
#endif

    mFormat = format;

    if (!audio_is_input_channel(channelMask)) {
        ALOGE("Invalid channel mask %#x", channelMask);
        return BAD_VALUE;
    }
    mChannelMask = channelMask;
    uint32_t channelCount = popcount(channelMask);
    mChannelCount = channelCount;

#ifdef QCOM_DIRECTTRACK
    mFrameSize = frameSize();

    size_t inputBuffSizeInBytes = -1;
    status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &inputBuffSizeInBytes);
    if (status != NO_ERROR) {
        ALOGE("AudioSystem could not query the input buffer size; status %d", status);
        return NO_INIT;
    }

    if (inputBuffSizeInBytes == 0) {
        ALOGE("Unsupported configuration: sampleRate %u, format %d, channelMask %#x",
            sampleRate, format, channelMask);
        return BAD_VALUE;
    }

    int minFrameCount = (inputBuffSizeInBytes * 2)/mFrameSize;
#else
    // Assumes audio_is_linear_pcm(format), else sizeof(uint8_t)
#ifdef QCOM_HARDWARE
    if (audio_is_linear_pcm(format))
        mFrameSize = channelCount * audio_bytes_per_sample(format);
    else
        mFrameSize = sizeof(uint8_t);
#else
    mFrameSize = channelCount * audio_bytes_per_sample(format);
#endif

    // validate framecount
    size_t minFrameCount = 0;
    status_t status = AudioRecord::getMinFrameCount(&minFrameCount,
            sampleRate, format, channelMask);
    if (status != NO_ERROR) {
        ALOGE("getMinFrameCount() failed; status %d", status);
        return status;
    }
#endif

    ALOGV("AudioRecord::set() minFrameCount = %d", minFrameCount);

    if (frameCount == 0) {
        frameCount = minFrameCount;
    } else if (frameCount < minFrameCount) {
        ALOGE("frameCount %u < minFrameCount %u", frameCount, minFrameCount);
        return BAD_VALUE;
    }
    mFrameCount = frameCount;

    mNotificationFramesReq = notificationFrames;
    mNotificationFramesAct = 0;

    if (sessionId == 0 ) {
        mSessionId = AudioSystem::newAudioSessionId();
    } else {
        mSessionId = sessionId;
    }
    ALOGV("set(): mSessionId %d", mSessionId);

    mFlags = flags;

    // create the IAudioRecord
    status = openRecord_l(0 /*epoch*/);
    if (status) {
        return status;
    }

    if (cbf != NULL) {
        mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
        mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
    }

    mStatus = NO_ERROR;

    // Update buffer size in case it has been limited by AudioFlinger during track creation
    mFrameCount = mCblk->frameCount_;

    mActive = false;
    mCbf = cbf;
    mRefreshRemaining = true;
    mUserData = user;
    // TODO: add audio hardware input latency here
    mLatency = (1000*mFrameCount) / sampleRate;
    mMarkerPosition = 0;
    mMarkerReached = false;
    mNewPosition = 0;
    mUpdatePeriod = 0;
    AudioSystem::acquireAudioSessionId(mSessionId);
    mSequence = 1;
    mObservedSequence = mSequence;
    mInOverrun = false;

    return NO_ERROR;
}

#ifdef QCOM_DIRECTTRACK
audio_source_t AudioRecord::inputSource() const
{
    return mInputSource;
}
#endif

// -------------------------------------------------------------------------

status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession)
{
    ALOGV("start, sync event %d trigger session %d", event, triggerSession);

    AutoMutex lock(mLock);
    if (mActive) {
        return NO_ERROR;
    }

    // reset current position as seen by client to 0
    mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());

    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
    int32_t flags = android_atomic_acquire_load(&mCblk->mFlags);

    status_t status = NO_ERROR;
    if (!(flags & CBLK_INVALID)) {
        ALOGV("mAudioRecord->start()");
        status = mAudioRecord->start(event, triggerSession);
        if (status == DEAD_OBJECT) {
            flags |= CBLK_INVALID;
        }
    }
    if (flags & CBLK_INVALID) {
        status = restoreRecord_l("start");
    }

    if (status != NO_ERROR) {
        ALOGE("start() status %d", status);
    } else {
        mActive = true;
        sp<AudioRecordThread> t = mAudioRecordThread;
        if (t != 0) {
            t->resume();
        } else {
            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
            get_sched_policy(0, &mPreviousSchedulingGroup);
            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
        }
    }

    return status;
}