status_t AudioRecord::set( audio_source_t inputSource, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, size_t frameCount, callback_t cbf, void* user, uint32_t notificationFrames, bool threadCanCallJava, int sessionId, transfer_type transferType, audio_input_flags_t flags) { ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, " "notificationFrames %u, sessionId %d, transferType %d, flags %#x", inputSource, sampleRate, format, channelMask, frameCount, notificationFrames, sessionId, transferType, flags); switch (transferType) { case TRANSFER_DEFAULT: if (cbf == NULL || threadCanCallJava) { transferType = TRANSFER_SYNC; } else { transferType = TRANSFER_CALLBACK; } break; case TRANSFER_CALLBACK: if (cbf == NULL) { ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL"); return BAD_VALUE; } break; case TRANSFER_OBTAIN: case TRANSFER_SYNC: break; default: ALOGE("Invalid transfer type %d", transferType); return BAD_VALUE; } mTransfer = transferType; AutoMutex lock(mLock); // invariant that mAudioRecord != 0 is true only after set() returns successfully if (mAudioRecord != 0) { ALOGE("Track already in use"); return INVALID_OPERATION; } // handle default values first. if (inputSource == AUDIO_SOURCE_DEFAULT) { inputSource = AUDIO_SOURCE_MIC; } mInputSource = inputSource; if (sampleRate == 0) { ALOGE("Invalid sample rate %u", sampleRate); return BAD_VALUE; } mSampleRate = sampleRate; // these below should probably come from the audioFlinger too... if (format == AUDIO_FORMAT_DEFAULT) { format = AUDIO_FORMAT_PCM_16_BIT; } // validate parameters if (!audio_is_valid_format(format)) { ALOGE("Invalid format %#x", format); return BAD_VALUE; } // Temporary restriction: AudioFlinger currently supports 16-bit PCM and compress formats only if (format != AUDIO_FORMAT_PCM_16_BIT && !audio_is_compress_voip_format(format) && !audio_is_compress_capture_format(format)) { ALOGE("Format %#x is not supported", format); return BAD_VALUE; } mFormat = format; if (!audio_is_input_channel(channelMask)) { ALOGE("Invalid channel mask %#x", channelMask); return BAD_VALUE; } mChannelMask = channelMask; uint32_t channelCount = audio_channel_count_from_in_mask(channelMask); mChannelCount = channelCount; if (audio_is_linear_pcm(format)) { mFrameSize = channelCount * audio_bytes_per_sample(format); } else { mFrameSize = sizeof(uint8_t); } // mFrameCount is initialized in openRecord_l mReqFrameCount = frameCount; mNotificationFramesReq = notificationFrames; // mNotificationFramesAct is initialized in openRecord_l if (sessionId == AUDIO_SESSION_ALLOCATE) { mSessionId = AudioSystem::newAudioUniqueId(); } else { mSessionId = sessionId; } ALOGV("set(): mSessionId %d", mSessionId); mFlags = flags; mCbf = cbf; if (cbf != NULL) { mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava); mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO); } // create the IAudioRecord status_t status = openRecord_l(0 /*epoch*/); if (status != NO_ERROR) { if (mAudioRecordThread != 0) { mAudioRecordThread->requestExit(); // see comment in AudioRecord.h mAudioRecordThread->requestExitAndWait(); mAudioRecordThread.clear(); } return status; } mStatus = NO_ERROR; mActive = false; mUserData = user; // TODO: add audio hardware input latency here mLatency = (1000*mFrameCount) / sampleRate; mMarkerPosition = 0; mMarkerReached = false; mNewPosition = 0; mUpdatePeriod = 0; AudioSystem::acquireAudioSessionId(mSessionId, -1); mSequence = 1; mObservedSequence = mSequence; mInOverrun = false; return NO_ERROR; }
// checks if the IO profile is compatible with specified parameters. // Sampling rate, format and channel mask must be specified in order to // get a valid a match bool IOProfile::isCompatibleProfile(audio_devices_t device, String8 address, uint32_t samplingRate, uint32_t *updatedSamplingRate, audio_format_t format, audio_format_t *updatedFormat, audio_channel_mask_t channelMask, audio_channel_mask_t *updatedChannelMask, uint32_t flags) const { const bool isPlaybackThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SOURCE; const bool isRecordThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK; ALOG_ASSERT(isPlaybackThread != isRecordThread); if (device != AUDIO_DEVICE_NONE) { // just check types if multiple devices are selected if (popcount(device & ~AUDIO_DEVICE_BIT_IN) > 1) { if ((mSupportedDevices.types() & device) != device) { return false; } } else if (mSupportedDevices.getDevice(device, address) == 0) { return false; } } if (samplingRate == 0) { return false; } uint32_t myUpdatedSamplingRate = samplingRate; if (isPlaybackThread && checkExactSamplingRate(samplingRate) != NO_ERROR) { return false; } if (isRecordThread && checkCompatibleSamplingRate(samplingRate, &myUpdatedSamplingRate) != NO_ERROR) { return false; } if (!audio_is_valid_format(format)) { return false; } if (isPlaybackThread && checkExactFormat(format) != NO_ERROR) { return false; } audio_format_t myUpdatedFormat = format; if (isRecordThread && checkCompatibleFormat(format, &myUpdatedFormat) != NO_ERROR) { return false; } if (isPlaybackThread && (!audio_is_output_channel(channelMask) || checkExactChannelMask(channelMask) != NO_ERROR)) { return false; } audio_channel_mask_t myUpdatedChannelMask = channelMask; if (isRecordThread && (!audio_is_input_channel(channelMask) || checkCompatibleChannelMask(channelMask, &myUpdatedChannelMask) != NO_ERROR)) { return false; } if (isPlaybackThread && (mFlags & flags) != flags) { return false; } // The only input flag that is allowed to be different is the fast flag. // An existing fast stream is compatible with a normal track request. // An existing normal stream is compatible with a fast track request, // but the fast request will be denied by AudioFlinger and converted to normal track. if (isRecordThread && ((mFlags ^ flags) & ~AUDIO_INPUT_FLAG_FAST)) { return false; } if (updatedSamplingRate != NULL) { *updatedSamplingRate = myUpdatedSamplingRate; } if (updatedFormat != NULL) { *updatedFormat = myUpdatedFormat; } if (updatedChannelMask != NULL) { *updatedChannelMask = myUpdatedChannelMask; } return true; }
status_t AudioRecord::set( audio_source_t inputSource, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, size_t frameCount, callback_t cbf, void* user, uint32_t notificationFrames, bool threadCanCallJava, audio_session_t sessionId, transfer_type transferType, audio_input_flags_t flags, int uid, pid_t pid, const audio_attributes_t* pAttributes) { ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, " "notificationFrames %u, sessionId %d, transferType %d, flags %#x, opPackageName %s " "uid %d, pid %d", inputSource, sampleRate, format, channelMask, frameCount, notificationFrames, sessionId, transferType, flags, String8(mOpPackageName).string(), uid, pid); switch (transferType) { case TRANSFER_DEFAULT: if (cbf == NULL || threadCanCallJava) { transferType = TRANSFER_SYNC; } else { transferType = TRANSFER_CALLBACK; } break; case TRANSFER_CALLBACK: if (cbf == NULL) { ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL"); return BAD_VALUE; } break; case TRANSFER_OBTAIN: case TRANSFER_SYNC: break; default: ALOGE("Invalid transfer type %d", transferType); return BAD_VALUE; } mTransfer = transferType; // invariant that mAudioRecord != 0 is true only after set() returns successfully if (mAudioRecord != 0) { ALOGE("Track already in use"); return INVALID_OPERATION; } if (pAttributes == NULL) { memset(&mAttributes, 0, sizeof(audio_attributes_t)); mAttributes.source = inputSource; } else { // stream type shouldn't be looked at, this track has audio attributes memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t)); ALOGV("Building AudioRecord with attributes: source=%d flags=0x%x tags=[%s]", mAttributes.source, mAttributes.flags, mAttributes.tags); } mSampleRate = sampleRate; // these below should probably come from the audioFlinger too... if (format == AUDIO_FORMAT_DEFAULT) { format = AUDIO_FORMAT_PCM_16_BIT; } // validate parameters // AudioFlinger capture only supports linear PCM if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) { ALOGE("Format %#x is not linear pcm", format); return BAD_VALUE; } mFormat = format; if (!audio_is_input_channel(channelMask)) { ALOGE("Invalid channel mask %#x", channelMask); return BAD_VALUE; } mChannelMask = channelMask; uint32_t channelCount = audio_channel_count_from_in_mask(channelMask); mChannelCount = channelCount; if (audio_is_linear_pcm(format)) { mFrameSize = channelCount * audio_bytes_per_sample(format); } else { mFrameSize = sizeof(uint8_t); } // mFrameCount is initialized in openRecord_l mReqFrameCount = frameCount; mNotificationFramesReq = notificationFrames; // mNotificationFramesAct is initialized in openRecord_l if (sessionId == AUDIO_SESSION_ALLOCATE) { mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION); } else { mSessionId = sessionId; } ALOGV("set(): mSessionId %d", mSessionId); int callingpid = IPCThreadState::self()->getCallingPid(); int mypid = getpid(); if (uid == -1 || (callingpid != mypid)) { mClientUid = IPCThreadState::self()->getCallingUid(); } else { mClientUid = uid; } if (pid == -1 || (callingpid != mypid)) { mClientPid = callingpid; } else { mClientPid = pid; } mOrigFlags = mFlags = flags; mCbf = cbf; if (cbf != NULL) { mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava); mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO); // thread begins in paused state, and will not reference us until start() } // create the IAudioRecord status_t status = openRecord_l(0 /*epoch*/, mOpPackageName); if (status != NO_ERROR) { if (mAudioRecordThread != 0) { mAudioRecordThread->requestExit(); // see comment in AudioRecord.h mAudioRecordThread->requestExitAndWait(); mAudioRecordThread.clear(); } return status; } mStatus = NO_ERROR; mUserData = user; // TODO: add audio hardware input latency here mLatency = (1000 * mFrameCount) / mSampleRate; mMarkerPosition = 0; mMarkerReached = false; mNewPosition = 0; mUpdatePeriod = 0; AudioSystem::acquireAudioSessionId(mSessionId, -1); mSequence = 1; mObservedSequence = mSequence; mInOverrun = false; mFramesRead = 0; mFramesReadServerOffset = 0; return NO_ERROR; }
status_t AudioRecord::set( int inputSource, uint32_t sampleRate, int format, uint32_t channelMask, int frameCount, uint32_t flags, callback_t cbf, void* user, int notificationFrames, bool threadCanCallJava, int sessionId) { LOGV("set(): sampleRate %d, channelMask %d, frameCount %d",sampleRate, channelMask, frameCount); AutoMutex lock(mLock); if (mAudioRecord != 0) { return INVALID_OPERATION; } if (inputSource == AUDIO_SOURCE_DEFAULT) { inputSource = AUDIO_SOURCE_MIC; } if (sampleRate == 0) { sampleRate = DEFAULT_SAMPLE_RATE; } // these below should probably come from the audioFlinger too... if (format == 0) { format = AUDIO_FORMAT_PCM_16_BIT; } // validate parameters if (!audio_is_valid_format(format)) { LOGE("Invalid format"); return BAD_VALUE; } if (!audio_is_input_channel(channelMask)) { return BAD_VALUE; } int channelCount = popcount(channelMask); if (sessionId == 0 ) { mSessionId = AudioSystem::newAudioSessionId(); } else { mSessionId = sessionId; } LOGV("set(): mSessionId %d", mSessionId); audio_io_handle_t input = AudioSystem::getInput(inputSource, sampleRate, format, channelMask, (audio_in_acoustics_t)flags, mSessionId); if (input == 0) { LOGE("Could not get audio input for record source %d", inputSource); return BAD_VALUE; } // validate framecount int minFrameCount = 0; status_t status = getMinFrameCount(&minFrameCount, sampleRate, format, channelCount); if (status != NO_ERROR) { return status; } LOGV("AudioRecord::set() minFrameCount = %d", minFrameCount); if (frameCount == 0) { frameCount = minFrameCount; } else if (frameCount < minFrameCount) { return BAD_VALUE; } if (notificationFrames == 0) { notificationFrames = frameCount/2; } // create the IAudioRecord status = openRecord_l(sampleRate, format, channelMask, frameCount, flags, input); if (status != NO_ERROR) { return status; } if (cbf != 0) { mClientRecordThread = new ClientRecordThread(*this, threadCanCallJava); if (mClientRecordThread == 0) { return NO_INIT; } } mStatus = NO_ERROR; mFormat = format; // Update buffer size in case it has been limited by AudioFlinger during track creation mFrameCount = mCblk->frameCount; mChannelCount = (uint8_t)channelCount; mChannelMask = channelMask; mActive = 0; mCbf = cbf; mNotificationFrames = notificationFrames; mRemainingFrames = notificationFrames; mUserData = user; // TODO: add audio hardware input latency here mLatency = (1000*mFrameCount) / sampleRate; mMarkerPosition = 0; mMarkerReached = false; mNewPosition = 0; mUpdatePeriod = 0; mInputSource = (uint8_t)inputSource; mFlags = flags; mInput = input; AudioSystem::acquireAudioSessionId(mSessionId); return NO_ERROR; }
status_t AudioRecord::set( audio_source_t inputSource, uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask, int frameCountInt, callback_t cbf, void* user, int notificationFrames, bool threadCanCallJava, int sessionId, transfer_type transferType, audio_input_flags_t flags) { ALOGV("sampleRate %u, channelMask %#x, format %d", sampleRate, channelMask, format); ALOGV("inputSource %d", inputSource); switch (transferType) { case TRANSFER_DEFAULT: if (cbf == NULL || threadCanCallJava) { transferType = TRANSFER_SYNC; } else { transferType = TRANSFER_CALLBACK; } break; case TRANSFER_CALLBACK: if (cbf == NULL) { ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL"); return BAD_VALUE; } break; case TRANSFER_OBTAIN: case TRANSFER_SYNC: break; default: ALOGE("Invalid transfer type %d", transferType); return BAD_VALUE; } mTransfer = transferType; // FIXME "int" here is legacy and will be replaced by size_t later if (frameCountInt < 0) { ALOGE("Invalid frame count %d", frameCountInt); return BAD_VALUE; } size_t frameCount = frameCountInt; ALOGV("set(): sampleRate %u, channelMask %#x, frameCount %u", sampleRate, channelMask, frameCount); AutoMutex lock(mLock); if (mAudioRecord != 0) { ALOGE("Track already in use"); return INVALID_OPERATION; } if (inputSource == AUDIO_SOURCE_DEFAULT) { inputSource = AUDIO_SOURCE_MIC; } mInputSource = inputSource; if (sampleRate == 0) { ALOGE("Invalid sample rate %u", sampleRate); return BAD_VALUE; } mSampleRate = sampleRate; // these below should probably come from the audioFlinger too... if (format == AUDIO_FORMAT_DEFAULT) { format = AUDIO_FORMAT_PCM_16_BIT; } // validate parameters if (!audio_is_valid_format(format)) { ALOGE("Invalid format %d", format); return BAD_VALUE; } #if defined(QCOM_HARDWARE) && !defined(QCOM_DIRECTTRACK) if (format != AUDIO_FORMAT_PCM_16_BIT && !audio_is_compress_voip_format(format) && !audio_is_compress_capture_format(format)) { #else #ifndef QCOM_DIRECTTRACK // Temporary restriction: AudioFlinger currently supports 16-bit PCM only if (format != AUDIO_FORMAT_PCM_16_BIT) { #endif #endif #ifndef QCOM_DIRECTTRACK ALOGE("Format %d is not supported", format); return BAD_VALUE; } #endif mFormat = format; if (!audio_is_input_channel(channelMask)) { ALOGE("Invalid channel mask %#x", channelMask); return BAD_VALUE; } mChannelMask = channelMask; uint32_t channelCount = popcount(channelMask); mChannelCount = channelCount; #ifdef QCOM_DIRECTTRACK mFrameSize = frameSize(); size_t inputBuffSizeInBytes = -1; status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &inputBuffSizeInBytes); if (status != NO_ERROR) { ALOGE("AudioSystem could not query the input buffer size; status %d", status); return NO_INIT; } if (inputBuffSizeInBytes == 0) { ALOGE("Unsupported configuration: sampleRate %u, format %d, channelMask %#x", sampleRate, format, channelMask); return BAD_VALUE; } int minFrameCount = (inputBuffSizeInBytes * 2)/mFrameSize; #else // Assumes audio_is_linear_pcm(format), else sizeof(uint8_t) #ifdef QCOM_HARDWARE if (audio_is_linear_pcm(format)) mFrameSize = channelCount * audio_bytes_per_sample(format); else mFrameSize = sizeof(uint8_t); #else mFrameSize = channelCount * audio_bytes_per_sample(format); #endif // validate framecount size_t minFrameCount = 0; status_t status = AudioRecord::getMinFrameCount(&minFrameCount, sampleRate, format, channelMask); if (status != NO_ERROR) { ALOGE("getMinFrameCount() failed; status %d", status); return status; } #endif ALOGV("AudioRecord::set() minFrameCount = %d", minFrameCount); if (frameCount == 0) { frameCount = minFrameCount; } else if (frameCount < minFrameCount) { ALOGE("frameCount %u < minFrameCount %u", frameCount, minFrameCount); return BAD_VALUE; } mFrameCount = frameCount; mNotificationFramesReq = notificationFrames; mNotificationFramesAct = 0; if (sessionId == 0 ) { mSessionId = AudioSystem::newAudioSessionId(); } else { mSessionId = sessionId; } ALOGV("set(): mSessionId %d", mSessionId); mFlags = flags; // create the IAudioRecord status = openRecord_l(0 /*epoch*/); if (status) { return status; } if (cbf != NULL) { mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava); mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO); } mStatus = NO_ERROR; // Update buffer size in case it has been limited by AudioFlinger during track creation mFrameCount = mCblk->frameCount_; mActive = false; mCbf = cbf; mRefreshRemaining = true; mUserData = user; // TODO: add audio hardware input latency here mLatency = (1000*mFrameCount) / sampleRate; mMarkerPosition = 0; mMarkerReached = false; mNewPosition = 0; mUpdatePeriod = 0; AudioSystem::acquireAudioSessionId(mSessionId); mSequence = 1; mObservedSequence = mSequence; mInOverrun = false; return NO_ERROR; } #ifdef QCOM_DIRECTTRACK audio_source_t AudioRecord::inputSource() const { return mInputSource; } #endif // ------------------------------------------------------------------------- status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession) { ALOGV("start, sync event %d trigger session %d", event, triggerSession); AutoMutex lock(mLock); if (mActive) { return NO_ERROR; } // reset current position as seen by client to 0 mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition()); mNewPosition = mProxy->getPosition() + mUpdatePeriod; int32_t flags = android_atomic_acquire_load(&mCblk->mFlags); status_t status = NO_ERROR; if (!(flags & CBLK_INVALID)) { ALOGV("mAudioRecord->start()"); status = mAudioRecord->start(event, triggerSession); if (status == DEAD_OBJECT) { flags |= CBLK_INVALID; } } if (flags & CBLK_INVALID) { status = restoreRecord_l("start"); } if (status != NO_ERROR) { ALOGE("start() status %d", status); } else { mActive = true; sp<AudioRecordThread> t = mAudioRecordThread; if (t != 0) { t->resume(); } else { mPreviousPriority = getpriority(PRIO_PROCESS, 0); get_sched_policy(0, &mPreviousSchedulingGroup); androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO); } } return status; }