//----------------------------------------------------------------------------- void android_audioRecorder_useRecordEventMask(CAudioRecorder *ar) { IRecord *pRecordItf = &ar->mRecord; SLuint32 eventFlags = pRecordItf->mCallbackEventsMask; if (ar->mAudioRecord == 0) { return; } if ((eventFlags & SL_RECORDEVENT_HEADATMARKER) && (pRecordItf->mMarkerPosition != 0)) { ar->mAudioRecord->setMarkerPosition((uint32_t)((((int64_t)pRecordItf->mMarkerPosition * sles_to_android_sampleRate(ar->mSampleRateMilliHz)))/1000)); } else { // clear marker ar->mAudioRecord->setMarkerPosition(0); } if (eventFlags & SL_RECORDEVENT_HEADATNEWPOS) { SL_LOGV("pos update period %d", pRecordItf->mPositionUpdatePeriod); ar->mAudioRecord->setPositionUpdatePeriod( (uint32_t)((((int64_t)pRecordItf->mPositionUpdatePeriod * sles_to_android_sampleRate(ar->mSampleRateMilliHz)))/1000)); } else { // clear periodic update ar->mAudioRecord->setPositionUpdatePeriod(0); } if (eventFlags & SL_RECORDEVENT_HEADATLIMIT) { // FIXME support SL_RECORDEVENT_HEADATLIMIT SL_LOGD("[ FIXME: IRecord_SetCallbackEventsMask(SL_RECORDEVENT_HEADATLIMIT) on an " "SL_OBJECTID_AUDIORECORDER to be implemented ]"); } if (eventFlags & SL_RECORDEVENT_HEADMOVING) { // FIXME support SL_RECORDEVENT_HEADMOVING SL_LOGD("[ FIXME: IRecord_SetCallbackEventsMask(SL_RECORDEVENT_HEADMOVING) on an " "SL_OBJECTID_AUDIORECORDER to be implemented ]"); } if (eventFlags & SL_RECORDEVENT_BUFFER_FULL) { // nothing to do for SL_RECORDEVENT_BUFFER_FULL since this will not be encountered on // recording to buffer queues } if (eventFlags & SL_RECORDEVENT_HEADSTALLED) { // nothing to do for SL_RECORDEVENT_HEADSTALLED, callback event will be checked against mask // when AudioRecord::EVENT_OVERRUN is encountered } }
//----------------------------------------------------------------------------- void android_audioRecorder_getPosition(CAudioRecorder *ar, SLmillisecond *pPosMsec) { if ((NULL == ar) || (ar->mAudioRecord == 0)) { *pPosMsec = 0; } else { uint32_t positionInFrames; ar->mAudioRecord->getPosition(&positionInFrames); if (ar->mSampleRateMilliHz == UNKNOWN_SAMPLERATE) { *pPosMsec = 0; } else { *pPosMsec = ((int64_t)positionInFrames * 1000) / sles_to_android_sampleRate(ar->mSampleRateMilliHz); } } }
//----------------------------------------------------------------------------- SLresult android_audioRecorder_realize(CAudioRecorder* ar, SLboolean async) { SL_LOGV("android_audioRecorder_realize(%p) entering", ar); SLresult result = SL_RESULT_SUCCESS; // initialize platform-independent CAudioRecorder fields if (SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE != ar->mDataSink.mLocator.mLocatorType) { SL_LOGE(ERROR_RECORDER_SINK_MUST_BE_ANDROIDSIMPLEBUFFERQUEUE); return SL_RESULT_CONTENT_UNSUPPORTED; } // the following platform-independent fields have been initialized in CreateAudioRecorder() // ar->mNumChannels // ar->mSampleRateMilliHz SL_LOGV("new AudioRecord %u channels, %u mHz", ar->mNumChannels, ar->mSampleRateMilliHz); // currently nothing analogous to canUseFastTrack() for recording audio_input_flags_t policy = AUDIO_INPUT_FLAG_FAST; // initialize platform-specific CAudioRecorder fields ar->mAudioRecord = new android::AudioRecord(); ar->mAudioRecord->set(ar->mRecordSource, // source sles_to_android_sampleRate(ar->mSampleRateMilliHz), // sample rate in Hertz AUDIO_FORMAT_PCM_16_BIT, //FIXME use format from buffer queue sink sles_to_android_channelMaskIn(ar->mNumChannels, 0 /*no channel mask*/), // channel config 0, //frameCount min audioRecorder_callback,// callback_t (void*)ar, // user, callback data, here the AudioRecorder 0, // notificationFrames false, // threadCanCallJava, note: this will prevent direct Java // callbacks, but we don't want them in the recording loop 0, // session ID android::AudioRecord::TRANSFER_CALLBACK, // transfer type policy); // audio_input_flags_t if (android::NO_ERROR != ar->mAudioRecord->initCheck()) { SL_LOGE("android_audioRecorder_realize(%p) error creating AudioRecord object", ar); result = SL_RESULT_CONTENT_UNSUPPORTED; } #ifdef MONITOR_RECORDING gMonitorFp = fopen(MONITOR_TARGET, "w"); if (NULL == gMonitorFp) { SL_LOGE("error opening %s", MONITOR_TARGET); } else { SL_LOGE("recording to %s", MONITOR_TARGET); } // SL_LOGE so it's always displayed #endif return result; }
//----------------------------------------------------------------------------- SLresult android_audioRecorder_realize(CAudioRecorder* ar, SLboolean async) { SL_LOGV("android_audioRecorder_realize(%p) entering", ar); SLresult result = SL_RESULT_SUCCESS; // already checked in created and checkSourceSink assert(ar->mDataSink.mLocator.mLocatorType == SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE); const SLDataFormat_PCM *df_pcm = &ar->mDataSink.mFormat.mPCM; // the following platform-independent fields have been initialized in CreateAudioRecorder() // ar->mNumChannels // ar->mSampleRateMilliHz uint32_t sampleRate = sles_to_android_sampleRate(df_pcm->samplesPerSec); checkAndSetPerformanceModePre(ar); audio_input_flags_t policy; switch (ar->mPerformanceMode) { case ANDROID_PERFORMANCE_MODE_NONE: case ANDROID_PERFORMANCE_MODE_POWER_SAVING: policy = AUDIO_INPUT_FLAG_NONE; break; case ANDROID_PERFORMANCE_MODE_LATENCY_EFFECTS: policy = AUDIO_INPUT_FLAG_FAST; break; case ANDROID_PERFORMANCE_MODE_LATENCY: default: policy = (audio_input_flags_t)(AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW); break; } SL_LOGV("Audio Record format: %dch(0x%x), %dbit, %dKHz", df_pcm->numChannels, df_pcm->channelMask, df_pcm->bitsPerSample, df_pcm->samplesPerSec / 1000000); // note that df_pcm->channelMask has already been validated during object creation. audio_channel_mask_t channelMask = sles_to_audio_input_channel_mask(df_pcm->channelMask); // To maintain backward compatibility with previous releases, ignore // channel masks that are not indexed. if (channelMask == AUDIO_CHANNEL_INVALID || audio_channel_mask_get_representation(channelMask) == AUDIO_CHANNEL_REPRESENTATION_POSITION) { channelMask = audio_channel_in_mask_from_count(df_pcm->numChannels); SL_LOGI("Emulating old channel mask behavior " "(ignoring positional mask %#x, using default mask %#x based on " "channel count of %d)", df_pcm->channelMask, channelMask, df_pcm->numChannels); } SL_LOGV("SLES channel mask %#x converted to Android mask %#x", df_pcm->channelMask, channelMask); // initialize platform-specific CAudioRecorder fields ar->mAudioRecord = new android::AudioRecord( ar->mRecordSource, // source sampleRate, // sample rate in Hertz sles_to_android_sampleFormat(df_pcm), // format channelMask, // channel mask android::String16(), // app ops 0, // frameCount audioRecorder_callback,// callback_t (void*)ar, // user, callback data, here the AudioRecorder 0, // notificationFrames AUDIO_SESSION_ALLOCATE, android::AudioRecord::TRANSFER_CALLBACK, // transfer type policy); // audio_input_flags_t android::status_t status = ar->mAudioRecord->initCheck(); if (android::NO_ERROR != status) { SL_LOGE("android_audioRecorder_realize(%p) error creating AudioRecord object; status %d", ar, status); // FIXME should return a more specific result depending on status result = SL_RESULT_CONTENT_UNSUPPORTED; ar->mAudioRecord.clear(); return result; } // update performance mode according to actual flags granted to AudioRecord checkAndSetPerformanceModePost(ar); // If there is a JavaAudioRoutingProxy associated with this recorder, hook it up... JNIEnv* j_env = NULL; jclass clsAudioRecord = NULL; jmethodID midRoutingProxy_connect = NULL; if (ar->mAndroidConfiguration.mRoutingProxy != NULL && (j_env = android::AndroidRuntime::getJNIEnv()) != NULL && (clsAudioRecord = j_env->FindClass("android/media/AudioRecord")) != NULL && (midRoutingProxy_connect = j_env->GetMethodID(clsAudioRecord, "deferred_connect", "(J)V")) != NULL) { j_env->ExceptionClear(); j_env->CallVoidMethod(ar->mAndroidConfiguration.mRoutingProxy, midRoutingProxy_connect, ar->mAudioRecord.get()); if (j_env->ExceptionCheck()) { SL_LOGE("Java exception releasing recorder routing object."); result = SL_RESULT_INTERNAL_ERROR; ar->mAudioRecord.clear(); return result; } } if (ar->mPerformanceMode != ANDROID_PERFORMANCE_MODE_LATENCY) { audio_session_t sessionId = ar->mAudioRecord->getSessionId(); // initialize AEC effect_descriptor_t *descriptor = &ar->mAcousticEchoCancellation.mAECDescriptor; if (memcmp(SL_IID_ANDROIDACOUSTICECHOCANCELLATION, &descriptor->type, sizeof(effect_uuid_t)) == 0) { if ((ar->mPerformanceMode != ANDROID_PERFORMANCE_MODE_LATENCY_EFFECTS) || (descriptor->flags & EFFECT_FLAG_HW_ACC_TUNNEL)) { SL_LOGV("Need to initialize AEC for AudioRecorder=%p", ar); android_aec_init(sessionId, &ar->mAcousticEchoCancellation); } } // initialize AGC descriptor = &ar->mAutomaticGainControl.mAGCDescriptor; if (memcmp(SL_IID_ANDROIDAUTOMATICGAINCONTROL, &descriptor->type, sizeof(effect_uuid_t)) == 0) { if ((ar->mPerformanceMode != ANDROID_PERFORMANCE_MODE_LATENCY_EFFECTS) || (descriptor->flags & EFFECT_FLAG_HW_ACC_TUNNEL)) { SL_LOGV("Need to initialize AGC for AudioRecorder=%p", ar); android_agc_init(sessionId, &ar->mAutomaticGainControl); } } // initialize NS descriptor = &ar->mNoiseSuppression.mNSDescriptor; if (memcmp(SL_IID_ANDROIDNOISESUPPRESSION, &descriptor->type, sizeof(effect_uuid_t)) == 0) { if ((ar->mPerformanceMode != ANDROID_PERFORMANCE_MODE_LATENCY_EFFECTS) || (descriptor->flags & EFFECT_FLAG_HW_ACC_TUNNEL)) { SL_LOGV("Need to initialize NS for AudioRecorder=%p", ar); android_ns_init(sessionId, &ar->mNoiseSuppression); } } } return result; }