size_t AudioPlayer::fillBuffer(void *data, size_t size) { if (mNumFramesPlayed == 0) { LOGV("AudioCallback"); } if (mReachedEOS) { return 0; } size_t size_done = 0; size_t size_remaining = size; while (size_remaining > 0) { MediaSource::ReadOptions options; { Mutex::Autolock autoLock(mLock); if (mSeeking) { if (mIsFirstBuffer) { if (mFirstBuffer != NULL) { mFirstBuffer->release(); mFirstBuffer = NULL; } mIsFirstBuffer = false; } options.setSeekTo(mSeekTimeUs); if (mInputBuffer != NULL) { mInputBuffer->release(); mInputBuffer = NULL; } mSeeking = false; if (mObserver) { mObserver->postAudioSeekComplete(); } } } if (mInputBuffer == NULL) { status_t err; if (mIsFirstBuffer) { mInputBuffer = mFirstBuffer; mFirstBuffer = NULL; err = mFirstBufferResult; mIsFirstBuffer = false; } else { err = mSource->read(&mInputBuffer, &options); } CHECK((err == OK && mInputBuffer != NULL) || (err != OK && mInputBuffer == NULL)); Mutex::Autolock autoLock(mLock); if (err != OK) { if (mObserver && !mReachedEOS) { mObserver->postAudioEOS(); } mReachedEOS = true; mFinalStatus = err; break; } CHECK(mInputBuffer->meta_data()->findInt64( kKeyTime, &mPositionTimeMediaUs)); mPositionTimeRealUs = ((mNumFramesPlayed + size_done / mFrameSize) * 1000000) / mSampleRate; LOGV("buffer->size() = %d, " "mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f", mInputBuffer->range_length(), mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6); } if (mInputBuffer->range_length() == 0) { mInputBuffer->release(); mInputBuffer = NULL; continue; } size_t copy = size_remaining; if (copy > mInputBuffer->range_length()) { copy = mInputBuffer->range_length(); } memcpy((char *)data + size_done, (const char *)mInputBuffer->data() + mInputBuffer->range_offset(), copy); mInputBuffer->set_range(mInputBuffer->range_offset() + copy, mInputBuffer->range_length() - copy); size_done += copy; size_remaining -= copy; } Mutex::Autolock autoLock(mLock); mNumFramesPlayed += size_done / mFrameSize; return size_done; }
static VideoFrame *extractVideoFrameWithCodecFlags( OMXClient *client, const sp<MetaData> &trackMeta, const sp<MediaSource> &source, uint32_t flags, int64_t frameTimeUs, int seekMode) { sp<MediaSource> decoder = OMXCodec::Create( client->interface(), source->getFormat(), false, source, NULL, flags | OMXCodec::kClientNeedsFramebuffer); if (decoder.get() == NULL) { ALOGV("unable to instantiate video decoder."); return NULL; } status_t err = decoder->start(); if (err != OK) { ALOGW("OMXCodec::start returned error %d (0x%08x)\n", err, err); return NULL; } // Read one output buffer, ignore format change notifications // and spurious empty buffers. MediaSource::ReadOptions options; if (seekMode < MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC || seekMode > MediaSource::ReadOptions::SEEK_CLOSEST) { ALOGE("Unknown seek mode: %d", seekMode); return NULL; } MediaSource::ReadOptions::SeekMode mode = static_cast<MediaSource::ReadOptions::SeekMode>(seekMode); int64_t thumbNailTime; if (frameTimeUs < 0) { if (!trackMeta->findInt64(kKeyThumbnailTime, &thumbNailTime) || thumbNailTime < 0) { thumbNailTime = 0; } options.setSeekTo(thumbNailTime, mode); } else { thumbNailTime = -1; options.setSeekTo(frameTimeUs, mode); } MediaBuffer *buffer = NULL; do { if (buffer != NULL) { buffer->release(); buffer = NULL; } err = decoder->read(&buffer, &options); options.clearSeekTo(); } while (err == INFO_FORMAT_CHANGED || (buffer != NULL && buffer->range_length() == 0)); if (err != OK) { CHECK(buffer == NULL); ALOGV("decoding frame failed."); decoder->stop(); return NULL; } ALOGV("successfully decoded video frame."); int32_t unreadable; if (buffer->meta_data()->findInt32(kKeyIsUnreadable, &unreadable) && unreadable != 0) { ALOGV("video frame is unreadable, decoder does not give us access " "to the video data."); buffer->release(); buffer = NULL; decoder->stop(); return NULL; } int64_t timeUs; CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs)); if (thumbNailTime >= 0) { if (timeUs != thumbNailTime) { const char *mime; CHECK(trackMeta->findCString(kKeyMIMEType, &mime)); ALOGV("thumbNailTime = %lld us, timeUs = %lld us, mime = %s", thumbNailTime, timeUs, mime); } } sp<MetaData> meta = decoder->getFormat(); int32_t width, height; CHECK(meta->findInt32(kKeyWidth, &width)); CHECK(meta->findInt32(kKeyHeight, &height)); int32_t crop_left, crop_top, crop_right, crop_bottom; if (!meta->findRect( kKeyCropRect, &crop_left, &crop_top, &crop_right, &crop_bottom)) { crop_left = crop_top = 0; crop_right = width - 1; crop_bottom = height - 1; } int32_t rotationAngle; if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) { rotationAngle = 0; // By default, no rotation } VideoFrame *frame = new VideoFrame; frame->mWidth = crop_right - crop_left + 1; frame->mHeight = crop_bottom - crop_top + 1; frame->mDisplayWidth = frame->mWidth; frame->mDisplayHeight = frame->mHeight; frame->mSize = frame->mWidth * frame->mHeight * 2; frame->mData = new uint8_t[frame->mSize]; frame->mRotationAngle = rotationAngle; int32_t displayWidth, displayHeight; if (meta->findInt32(kKeyDisplayWidth, &displayWidth)) { frame->mDisplayWidth = displayWidth; } if (meta->findInt32(kKeyDisplayHeight, &displayHeight)) { frame->mDisplayHeight = displayHeight; } int32_t srcFormat; CHECK(meta->findInt32(kKeyColorFormat, &srcFormat)); ColorConverter converter( (OMX_COLOR_FORMATTYPE)srcFormat, OMX_COLOR_Format16bitRGB565); if (converter.isValid()) { err = converter.convert( (const uint8_t *)buffer->data() + buffer->range_offset(), width, height, crop_left, crop_top, crop_right, crop_bottom, frame->mData, frame->mWidth, frame->mHeight, 0, 0, frame->mWidth - 1, frame->mHeight - 1); } else { ALOGE("Unable to instantiate color conversion from format 0x%08x to " "RGB565", srcFormat); err = ERROR_UNSUPPORTED; } buffer->release(); buffer = NULL; decoder->stop(); if (err != OK) { ALOGE("Colorconverter failed to convert frame."); delete frame; frame = NULL; } return frame; }
status_t Harness::testSeek( const char *componentName, const char *componentRole) { bool isEncoder = !strncmp(componentRole, "audio_encoder.", 14) || !strncmp(componentRole, "video_encoder.", 14); if (isEncoder) { // Not testing seek behaviour for encoders. printf(" * Not testing seek functionality for encoders.\n"); return OK; } const char *mime = GetMimeFromComponentRole(componentRole); if (!mime) { printf(" * Cannot perform seek test with this componentRole (%s)\n", componentRole); return OK; } sp<MediaSource> source = CreateSourceForMime(mime); if (source == NULL) { printf(" * Unable to open test content for type '%s', " "skipping test of componentRole %s\n", mime, componentRole); return OK; } sp<MediaSource> seekSource = CreateSourceForMime(mime); if (source == NULL || seekSource == NULL) { return UNKNOWN_ERROR; } CHECK_EQ(seekSource->start(), (status_t)OK); sp<MediaSource> codec = OMXCodec::Create( mOMX, source->getFormat(), false /* createEncoder */, source, componentName); CHECK(codec != NULL); CHECK_EQ(codec->start(), (status_t)OK); int64_t durationUs; CHECK(source->getFormat()->findInt64(kKeyDuration, &durationUs)); ALOGI("stream duration is %lld us (%.2f secs)", durationUs, durationUs / 1E6); static const int32_t kNumIterations = 5000; // We are always going to seek beyond EOS in the first iteration (i == 0) // followed by a linear read for the second iteration (i == 1). // After that it's all random. for (int32_t i = 0; i < kNumIterations; ++i) { int64_t requestedSeekTimeUs; int64_t actualSeekTimeUs; MediaSource::ReadOptions options; double r = uniform_rand(); if ((i == 1) || (i > 0 && r < 0.5)) { // 50% chance of just continuing to decode from last position. requestedSeekTimeUs = -1; ALOGI("requesting linear read"); } else { if (i == 0 || r < 0.55) { // 5% chance of seeking beyond end of stream. requestedSeekTimeUs = durationUs; ALOGI("requesting seek beyond EOF"); } else { requestedSeekTimeUs = (int64_t)(uniform_rand() * durationUs); ALOGI("requesting seek to %lld us (%.2f secs)", requestedSeekTimeUs, requestedSeekTimeUs / 1E6); } MediaBuffer *buffer = NULL; options.setSeekTo( requestedSeekTimeUs, MediaSource::ReadOptions::SEEK_NEXT_SYNC); if (seekSource->read(&buffer, &options) != OK) { CHECK(buffer == NULL); actualSeekTimeUs = -1; } else { CHECK(buffer != NULL); CHECK(buffer->meta_data()->findInt64(kKeyTime, &actualSeekTimeUs)); CHECK(actualSeekTimeUs >= 0); buffer->release(); buffer = NULL; } ALOGI("nearest keyframe is at %lld us (%.2f secs)", actualSeekTimeUs, actualSeekTimeUs / 1E6); } status_t err; MediaBuffer *buffer; for (;;) { err = codec->read(&buffer, &options); options.clearSeekTo(); if (err == INFO_FORMAT_CHANGED) { CHECK(buffer == NULL); continue; } if (err == OK) { CHECK(buffer != NULL); if (buffer->range_length() == 0) { buffer->release(); buffer = NULL; continue; } } else { CHECK(buffer == NULL); } break; } if (requestedSeekTimeUs < 0) { // Linear read. if (err != OK) { CHECK(buffer == NULL); } else { CHECK(buffer != NULL); buffer->release(); buffer = NULL; } } else if (actualSeekTimeUs < 0) { EXPECT(err != OK, "We attempted to seek beyond EOS and expected " "ERROR_END_OF_STREAM to be returned, but instead " "we got a valid buffer."); EXPECT(err == ERROR_END_OF_STREAM, "We attempted to seek beyond EOS and expected " "ERROR_END_OF_STREAM to be returned, but instead " "we found some other error."); CHECK_EQ(err, (status_t)ERROR_END_OF_STREAM); CHECK(buffer == NULL); } else { EXPECT(err == OK, "Expected a valid buffer to be returned from " "OMXCodec::read."); CHECK(buffer != NULL); int64_t bufferTimeUs; CHECK(buffer->meta_data()->findInt64(kKeyTime, &bufferTimeUs)); if (!CloseEnough(bufferTimeUs, actualSeekTimeUs)) { printf("\n * Attempted seeking to %lld us (%.2f secs)", requestedSeekTimeUs, requestedSeekTimeUs / 1E6); printf("\n * Nearest keyframe is at %lld us (%.2f secs)", actualSeekTimeUs, actualSeekTimeUs / 1E6); printf("\n * Returned buffer was at %lld us (%.2f secs)\n\n", bufferTimeUs, bufferTimeUs / 1E6); buffer->release(); buffer = NULL; CHECK_EQ(codec->stop(), (status_t)OK); return UNKNOWN_ERROR; } buffer->release(); buffer = NULL; } } CHECK_EQ(codec->stop(), (status_t)OK); return OK; }
size_t AudioOffloadPlayer::FillBuffer(void* aData, size_t aSize) { CHECK(mAudioSink.get()); if (mReachedEOS) { return 0; } size_t sizeDone = 0; size_t sizeRemaining = aSize; int64_t seekTimeUs = -1; while (sizeRemaining > 0) { MediaSource::ReadOptions options; bool refreshSeekTime = false; { android::Mutex::Autolock autoLock(mLock); if (mSeekTarget.IsValid()) { seekTimeUs = mSeekTarget.GetTime().ToMicroseconds(); options.setSeekTo(seekTimeUs); refreshSeekTime = true; if (mInputBuffer) { mInputBuffer->release(); mInputBuffer = nullptr; } } } if (!mInputBuffer) { status_t err; err = mSource->read(&mInputBuffer, &options); CHECK((!err && mInputBuffer) || (err && !mInputBuffer)); android::Mutex::Autolock autoLock(mLock); if (err != OK) { if (mSeekTarget.IsValid()) { mSeekTarget.Reset(); } AUDIO_OFFLOAD_LOG(LogLevel::Error, ("Error while reading media source %d " "Ok to receive EOS error at end", err)); if (!mReachedEOS) { // After seek there is a possible race condition if // OffloadThread is observing state_stopping_1 before // framesReady() > 0. Ensure sink stop is called // after last buffer is released. This ensures the // partial buffer is written to the driver before // stopping one is observed.The drawback is that // there will be an unnecessary call to the parser // after parser signalled EOS. if (sizeDone > 0) { AUDIO_OFFLOAD_LOG(LogLevel::Debug, ("send Partial buffer down")); AUDIO_OFFLOAD_LOG(LogLevel::Debug, ("skip calling stop till next" " fillBuffer")); break; } // no more buffers to push - stop() and wait for STREAM_END // don't set mReachedEOS until stream end received mAudioSink->Stop(); } break; } if(mInputBuffer->range_length() != 0) { CHECK(mInputBuffer->meta_data()->findInt64( kKeyTime, &mPositionTimeMediaUs)); } if (mSeekTarget.IsValid() && seekTimeUs == mSeekTarget.GetTime().ToMicroseconds()) { MOZ_ASSERT(mSeekTarget.IsValid()); mSeekTarget.Reset(); if (!mSeekPromise.IsEmpty()) { AUDIO_OFFLOAD_LOG(LogLevel::Debug, ("FillBuffer posting SEEK_COMPLETE")); MediaDecoder::SeekResolveValue val(mReachedEOS, mSeekTarget.mEventVisibility); mSeekPromise.Resolve(val, __func__); } } else if (mSeekTarget.IsValid()) { AUDIO_OFFLOAD_LOG(LogLevel::Debug, ("seek is updated during unlocking mLock")); } if (refreshSeekTime) { NotifyPositionChanged(); // need to adjust the mStartPosUs for offload decoding since parser // might not be able to get the exact seek time requested. mStartPosUs = mPositionTimeMediaUs; AUDIO_OFFLOAD_LOG(LogLevel::Debug, ("Adjust seek time to: %.2f", mStartPosUs / 1E6)); } } if (mInputBuffer->range_length() == 0) { mInputBuffer->release(); mInputBuffer = nullptr; continue; } size_t copy = sizeRemaining; if (copy > mInputBuffer->range_length()) { copy = mInputBuffer->range_length(); } memcpy((char *)aData + sizeDone, (const char *)mInputBuffer->data() + mInputBuffer->range_offset(), copy); mInputBuffer->set_range(mInputBuffer->range_offset() + copy, mInputBuffer->range_length() - copy); sizeDone += copy; sizeRemaining -= copy; } return sizeDone; }
void Process() { Frame* frame; int32_t w, h; int decode_done = 0; MediaSource::ReadOptions readopt; // GLuint texid; //SetPriority(THREAD_PRIORITY_ABOVE_NORMAL); do { #if defined(DEBUG_VERBOSE) unsigned int time = XbmcThreads::SystemClockMillis(); CLog::Log(LOGDEBUG, "%s: >>> Handling frame\n", CLASSNAME); #endif p->cur_frame = NULL; frame = (Frame*)malloc(sizeof(Frame)); if (!frame) { decode_done = 1; continue; } frame->eglimg = EGL_NO_IMAGE_KHR; frame->medbuf = NULL; if (p->resetting) { readopt.setSeekTo(0); p->resetting = false; } frame->status = p->decoder->read(&frame->medbuf, &readopt); readopt.clearSeekTo(); if (frame->status == OK) { if (!frame->medbuf->graphicBuffer().get()) // hw buffers { if (frame->medbuf->range_length() == 0) { CLog::Log(LOGERROR, "%s - Invalid buffer\n", CLASSNAME); frame->status = VC_ERROR; decode_done = 1; frame->medbuf->release(); frame->medbuf = NULL; } else frame->format = RENDER_FMT_YUV420P; } else frame->format = RENDER_FMT_EGLIMG; } if (frame->status == OK) { sp<MetaData> outFormat = p->decoder->getFormat(); outFormat->findInt32(kKeyWidth , &w); outFormat->findInt32(kKeyHeight, &h); frame->pts = 0; frame->width = w; frame->height = h; frame->medbuf->meta_data()->findInt64(kKeyTime, &(frame->pts)); } else if (frame->status == INFO_FORMAT_CHANGED) { int32_t cropLeft, cropTop, cropRight, cropBottom; sp<MetaData> outFormat = p->decoder->getFormat(); outFormat->findInt32(kKeyWidth , &p->width); outFormat->findInt32(kKeyHeight, &p->height); cropLeft = cropTop = cropRight = cropBottom = 0; if (!outFormat->findRect(kKeyCropRect, &cropLeft, &cropTop, &cropRight, &cropBottom)) { p->x = 0; p->y = 0; } else { p->x = cropLeft; p->y = cropTop; p->width = cropRight - cropLeft + 1; p->height = cropBottom - cropTop + 1; } outFormat->findInt32(kKeyColorFormat, &p->videoColorFormat); if (!outFormat->findInt32(kKeyStride, &p->videoStride)) p->videoStride = p->width; if (!outFormat->findInt32(kKeySliceHeight, &p->videoSliceHeight)) p->videoSliceHeight = p->height; #if defined(DEBUG_VERBOSE) CLog::Log(LOGDEBUG, ">>> new format col:%d, w:%d, h:%d, sw:%d, sh:%d, ctl:%d,%d; cbr:%d,%d\n", p->videoColorFormat, p->width, p->height, p->videoStride, p->videoSliceHeight, cropTop, cropLeft, cropBottom, cropRight); #endif if (frame->medbuf) frame->medbuf->release(); frame->medbuf = NULL; free(frame); continue; } else { CLog::Log(LOGERROR, "%s - decoding error (%d)\n", CLASSNAME,frame->status); if (frame->medbuf) frame->medbuf->release(); frame->medbuf = NULL; free(frame); continue; } if (frame->format == RENDER_FMT_EGLIMG) { if (!p->eglInitialized) { p->InitializeEGL(frame->width, frame->height); } else if (p->texwidth != frame->width || p->texheight != frame->height) { p->ReleaseEGL(); p->InitializeEGL(frame->width, frame->height); } ANativeWindowBuffer* graphicBuffer = frame->medbuf->graphicBuffer()->getNativeBuffer(); native_window_set_buffers_timestamp(p->natwin.get(), frame->pts * 1000); int err = p->natwin.get()->queueBuffer(p->natwin.get(), graphicBuffer); if (err == 0) frame->medbuf->meta_data()->setInt32(kKeyRendered, 1); frame->medbuf->release(); frame->medbuf = NULL; g_xbmcapp->UpdateStagefrightTexture(); // g_xbmcapp->GetSurfaceTexture()->updateTexImage(); if (!p->drop_state) { // static const EGLint eglImgAttrs[] = { EGL_IMAGE_PRESERVED_KHR, EGL_TRUE, EGL_NONE, EGL_NONE }; // EGLImageKHR img = eglCreateImageKHR(p->eglDisplay, EGL_NO_CONTEXT, // EGL_NATIVE_BUFFER_ANDROID, // (EGLClientBuffer)graphicBuffer->getNativeBuffer(), // eglImgAttrs); p->free_mutex.lock(); stSlot* cur_slot = p->getFreeSlot(); if (!cur_slot) { CLog::Log(LOGERROR, "STF: No free output buffers\n"); continue; } p->fbo.BindToTexture(GL_TEXTURE_2D, cur_slot->texid); p->fbo.BeginRender(); glDisable(GL_DEPTH_TEST); //glClear(GL_COLOR_BUFFER_BIT); const GLfloat triangleVertices[] = { -1.0f, 1.0f, -1.0f, -1.0f, 1.0f, -1.0f, 1.0f, 1.0f, }; glVertexAttribPointer(p->mPositionHandle, 2, GL_FLOAT, GL_FALSE, 0, triangleVertices); glEnableVertexAttribArray(p->mPositionHandle); glUseProgram(p->mPgm); glUniform1i(p->mTexSamplerHandle, 0); // glGenTextures(1, &texid); // glBindTexture(GL_TEXTURE_EXTERNAL_OES, texid); // glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, img); glBindTexture(GL_TEXTURE_EXTERNAL_OES, g_xbmcapp->GetAndroidTexture()); GLfloat texMatrix[16]; g_xbmcapp->GetStagefrightTransformMatrix(texMatrix); glUniformMatrix4fv(p->mTexMatrixHandle, 1, GL_FALSE, texMatrix); glDrawArrays(GL_TRIANGLE_FAN, 0, 4); glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0); p->fbo.EndRender(); glBindTexture(GL_TEXTURE_2D, 0); frame->eglimg = cur_slot->eglimg; p->free_mutex.unlock(); } } #if defined(DEBUG_VERBOSE) CLog::Log(LOGDEBUG, "%s: >>> pushed OUT frame; w:%d, h:%d, img:%p, tm:%d\n", CLASSNAME, frame->width, frame->height, frame->eglimg, XbmcThreads::SystemClockMillis() - time); #endif p->out_mutex.lock(); p->cur_frame = frame; while (p->cur_frame) p->out_condition.wait(p->out_mutex); p->out_mutex.unlock(); } while (!decode_done && !m_bStop); if (p->eglInitialized) p->ReleaseEGL(); }
status_t AudioPlayer::start(bool sourceAlreadyStarted) { CHECK(!mStarted); CHECK(mSource != NULL); status_t err; if (!sourceAlreadyStarted) { mSourcePaused = false; err = mSource->start(); if (err != OK) { return err; } } ALOGD("start of Playback, useOffload %d",useOffload()); // We allow an optional INFO_FORMAT_CHANGED at the very beginning // of playback, if there is one, getFormat below will retrieve the // updated format, if there isn't, we'll stash away the valid buffer // of data to be used on the first audio callback. CHECK(mFirstBuffer == NULL); MediaSource::ReadOptions options; if (mSeeking) { options.setSeekTo(mSeekTimeUs); } do { mFirstBufferResult = mSource->read(&mFirstBuffer, &options); } while (mFirstBufferResult == -EAGAIN); if (mFirstBufferResult == INFO_FORMAT_CHANGED) { ALOGV("INFO_FORMAT_CHANGED!!!"); CHECK(mFirstBuffer == NULL); mFirstBufferResult = OK; mIsFirstBuffer = false; if (mSeeking) { mPositionTimeRealUs = 0; mPositionTimeMediaUs = mSeekTimeUs; mSeeking = false; } } else { mIsFirstBuffer = true; if (mSeeking) { mPositionTimeRealUs = 0; if (mFirstBuffer == NULL || !mFirstBuffer->meta_data()->findInt64( kKeyTime, &mPositionTimeMediaUs)) { return UNKNOWN_ERROR; } mSeeking = false; } } sp<MetaData> format = mSource->getFormat(); const char *mime; bool success = format->findCString(kKeyMIMEType, &mime); CHECK(success); CHECK(useOffload() || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)); success = format->findInt32(kKeySampleRate, &mSampleRate); CHECK(success); int32_t numChannels, channelMask = 0; success = format->findInt32(kKeyChannelCount, &numChannels); CHECK(success); format->findInt64(kKeyDuration, &mDurationUs); if(!format->findInt32(kKeyChannelMask, &channelMask)) { // log only when there's a risk of ambiguity of channel mask selection ALOGI_IF(numChannels > 2, "source format didn't specify channel mask, using (%d) channel order", numChannels); channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER; } else if (channelMask == 0) { channelMask = audio_channel_out_mask_from_count(numChannels); ALOGV("channel mask is zero,update from channel count %d", channelMask); } audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT; int32_t bitWidth = 16; #if defined(ENABLE_AV_ENHANCEMENTS) || defined(ENABLE_OFFLOAD_ENHANCEMENTS) format->findInt32(kKeySampleBits, &bitWidth); #endif if (useOffload()) { if (mapMimeToAudioFormat(audioFormat, mime) != OK) { ALOGE("Couldn't map mime type \"%s\" to a valid AudioSystem::audio_format", mime); audioFormat = AUDIO_FORMAT_INVALID; } else if (audio_is_linear_pcm(audioFormat) || audio_is_offload_pcm(audioFormat)) { #if defined(QCOM_HARDWARE) || defined(ENABLE_OFFLOAD_ENHANCEMENTS) // Override audio format for PCM offload if (bitWidth >= 24) { ALOGD("24-bit PCM offload enabled format=%d", audioFormat); audioFormat = AUDIO_FORMAT_PCM_24_BIT_OFFLOAD; } else { audioFormat = AUDIO_FORMAT_PCM_16_BIT_OFFLOAD; } #endif ALOGV("Mime type \"%s\" mapped to audio_format 0x%x", mime, audioFormat); } } int avgBitRate = -1; format->findInt32(kKeyBitRate, &avgBitRate); if (mAudioSink.get() != NULL) { uint32_t flags = AUDIO_OUTPUT_FLAG_NONE; audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER; if (allowDeepBuffering()) { flags |= AUDIO_OUTPUT_FLAG_DEEP_BUFFER; } if (useOffload()) { flags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; int64_t durationUs; if (format->findInt64(kKeyDuration, &durationUs)) { offloadInfo.duration_us = durationUs; } else { offloadInfo.duration_us = -1; } offloadInfo.sample_rate = mSampleRate; offloadInfo.channel_mask = channelMask; offloadInfo.format = audioFormat; offloadInfo.stream_type = AUDIO_STREAM_MUSIC; offloadInfo.bit_rate = avgBitRate; offloadInfo.has_video = ((mCreateFlags & HAS_VIDEO) != 0); offloadInfo.is_streaming = ((mCreateFlags & IS_STREAMING) != 0); #if defined(ENABLE_AV_ENHANCEMENTS) || defined(ENABLE_OFFLOAD_ENHANCEMENTS) offloadInfo.bit_width = bitWidth >= 24 ? 24 : bitWidth; #endif } status_t err = mAudioSink->open( mSampleRate, numChannels, channelMask, audioFormat, DEFAULT_AUDIOSINK_BUFFERCOUNT, &AudioPlayer::AudioSinkCallback, this, (audio_output_flags_t)flags, useOffload() ? &offloadInfo : NULL); if (err == OK) { mLatencyUs = (int64_t)mAudioSink->latency() * 1000; mFrameSize = mAudioSink->frameSize(); if (useOffload()) { // If the playback is offloaded to h/w we pass the // HAL some metadata information // We don't want to do this for PCM because it will be going // through the AudioFlinger mixer before reaching the hardware sendMetaDataToHal(mAudioSink, format); } err = mAudioSink->start(); // do not alter behavior for non offloaded tracks: ignore start status. if (!useOffload()) { err = OK; } } if (err != OK) { if (mFirstBuffer != NULL) { mFirstBuffer->release(); mFirstBuffer = NULL; } if (!sourceAlreadyStarted) { mSource->stop(); } return err; } } else { // playing to an AudioTrack, set up mask if necessary audio_channel_mask_t audioMask = channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER ? audio_channel_out_mask_from_count(numChannels) : channelMask; if (0 == audioMask) { return BAD_VALUE; } mAudioTrack = new AudioTrack( AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask, 0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0); if ((err = mAudioTrack->initCheck()) != OK) { mAudioTrack.clear(); if (mFirstBuffer != NULL) { mFirstBuffer->release(); mFirstBuffer = NULL; } if (!sourceAlreadyStarted) { mSource->stop(); } return err; } mLatencyUs = (int64_t)mAudioTrack->latency() * 1000; mFrameSize = mAudioTrack->frameSize(); mAudioTrack->start(); } mStarted = true; mPlaying = true; mPinnedTimeUs = -1ll; const char *componentName; if (!(format->findCString(kKeyDecoderComponent, &componentName))) { componentName = "none"; } if (!strncmp(componentName, "OMX.qcom.", 9)) { mPauseRequired = true; } else { mPauseRequired = false; } return OK; }
void NuPlayer::GenericSource::readBuffer( bool audio, int64_t seekTimeUs, int64_t *actualTimeUs) { Track *track = audio ? &mAudioTrack : &mVideoTrack; CHECK(track->mSource != NULL); if (actualTimeUs) { *actualTimeUs = seekTimeUs; } MediaSource::ReadOptions options; bool seeking = false; if (seekTimeUs >= 0) { options.setSeekTo(seekTimeUs); seeking = true; } for (;;) { MediaBuffer *mbuf; status_t err = track->mSource->read(&mbuf, &options); options.clearSeekTo(); if (err == OK) { size_t outLength = mbuf->range_length(); if (audio && mAudioIsVorbis) { outLength += sizeof(int32_t); } sp<ABuffer> buffer = new ABuffer(outLength); memcpy(buffer->data(), (const uint8_t *)mbuf->data() + mbuf->range_offset(), mbuf->range_length()); if (audio && mAudioIsVorbis) { int32_t numPageSamples; if (!mbuf->meta_data()->findInt32( kKeyValidSamples, &numPageSamples)) { numPageSamples = -1; } memcpy(buffer->data() + mbuf->range_length(), &numPageSamples, sizeof(numPageSamples)); } int64_t timeUs; CHECK(mbuf->meta_data()->findInt64(kKeyTime, &timeUs)); buffer->meta()->setInt64("timeUs", timeUs); if (actualTimeUs) { *actualTimeUs = timeUs; } mbuf->release(); mbuf = NULL; if (seeking) { track->mPackets->queueDiscontinuity( ATSParser::DISCONTINUITY_SEEK, NULL); } track->mPackets->queueAccessUnit(buffer); break; } else if (err == INFO_FORMAT_CHANGED) { #if 0 track->mPackets->queueDiscontinuity( ATSParser::DISCONTINUITY_FORMATCHANGE, NULL); #endif } else { track->mPackets->signalEOS(err); break; } } }
bool OmxDecoder::ReadVideo(VideoFrame *aFrame, int64_t aTimeUs, bool aKeyframeSkip, bool aDoSeek) { if (!mVideoSource.get()) return false; ReleaseVideoBuffer(); status_t err; if (aDoSeek) { { Mutex::Autolock autoLock(mSeekLock); ReleaseAllPendingVideoBuffersLocked(); mIsVideoSeeking = true; } MediaSource::ReadOptions options; MediaSource::ReadOptions::SeekMode seekMode; // If the last timestamp of decoded frame is smaller than seekTime, // seek to next key frame. Otherwise seek to the previos one. OD_LOG("SeekTime: %lld, mLastSeekTime:%lld", aTimeUs, mLastSeekTime); if (mLastSeekTime == -1 || mLastSeekTime > aTimeUs) { seekMode = MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC; } else { seekMode = MediaSource::ReadOptions::SEEK_NEXT_SYNC; } mLastSeekTime = aTimeUs; bool findNextBuffer = true; while (findNextBuffer) { options.setSeekTo(aTimeUs, seekMode); findNextBuffer = false; if (mIsVideoSeeking) { err = mVideoSource->read(&mVideoBuffer, &options); Mutex::Autolock autoLock(mSeekLock); mIsVideoSeeking = false; PostReleaseVideoBuffer(nullptr, FenceHandle()); } else { err = mVideoSource->read(&mVideoBuffer); } // If there is no next Keyframe, jump to the previous key frame. if (err == ERROR_END_OF_STREAM && seekMode == MediaSource::ReadOptions::SEEK_NEXT_SYNC) { seekMode = MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC; findNextBuffer = true; { Mutex::Autolock autoLock(mSeekLock); mIsVideoSeeking = true; } continue; } else if (err != OK) { OD_LOG("Unexpected error when seeking to %lld", aTimeUs); break; } // For some codecs, the length of first decoded frame after seek is 0. // Need to ignore it and continue to find the next one if (mVideoBuffer->range_length() == 0) { PostReleaseVideoBuffer(mVideoBuffer, FenceHandle()); findNextBuffer = true; } } aDoSeek = false; } else { err = mVideoSource->read(&mVideoBuffer); } aFrame->mSize = 0; if (err == OK) { int64_t timeUs; int32_t unreadable; int32_t keyFrame; size_t length = mVideoBuffer->range_length(); if (!mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs) ) { NS_WARNING("OMX decoder did not return frame time"); return false; } if (!mVideoBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) { keyFrame = 0; } if (!mVideoBuffer->meta_data()->findInt32(kKeyIsUnreadable, &unreadable)) { unreadable = 0; } RefPtr<mozilla::layers::TextureClient> textureClient; if ((mVideoBuffer->graphicBuffer().get())) { textureClient = mNativeWindow->getTextureClientFromBuffer(mVideoBuffer->graphicBuffer().get()); } if (textureClient) { // Manually increment reference count to keep MediaBuffer alive // during TextureClient is in use. mVideoBuffer->add_ref(); GrallocTextureClientOGL* grallocClient = static_cast<GrallocTextureClientOGL*>(textureClient.get()); grallocClient->SetMediaBuffer(mVideoBuffer); // Set recycle callback for TextureClient textureClient->SetRecycleCallback(OmxDecoder::RecycleCallback, this); { Mutex::Autolock autoLock(mPendingVideoBuffersLock); // Store pending recycle TextureClient. MOZ_ASSERT(mPendingRecycleTexutreClients.find(textureClient) == mPendingRecycleTexutreClients.end()); mPendingRecycleTexutreClients.insert(textureClient); } aFrame->mGraphicBuffer = textureClient; aFrame->mRotation = mVideoRotation; aFrame->mTimeUs = timeUs; aFrame->mKeyFrame = keyFrame; aFrame->Y.mWidth = mVideoWidth; aFrame->Y.mHeight = mVideoHeight; // Release to hold video buffer in OmxDecoder more. // MediaBuffer's ref count is changed from 2 to 1. ReleaseVideoBuffer(); } else if (length > 0) { char *data = static_cast<char *>(mVideoBuffer->data()) + mVideoBuffer->range_offset(); if (unreadable) { LOG(LogLevel::Debug, "video frame is unreadable"); } if (!ToVideoFrame(aFrame, timeUs, data, length, keyFrame)) { return false; } } // Check if this frame is valid or not. If not, skip it. if ((aKeyframeSkip && timeUs < aTimeUs) || length == 0) { aFrame->mShouldSkip = true; } } else if (err == INFO_FORMAT_CHANGED) { // If the format changed, update our cached info. if (!SetVideoFormat()) { return false; } else { return ReadVideo(aFrame, aTimeUs, aKeyframeSkip, aDoSeek); } } else if (err == ERROR_END_OF_STREAM) { return false; } else if (err == -ETIMEDOUT) { LOG(LogLevel::Debug, "OmxDecoder::ReadVideo timed out, will retry"); return true; } else { // UNKNOWN_ERROR is sometimes is used to mean "out of memory", but // regardless, don't keep trying to decode if the decoder doesn't want to. LOG(LogLevel::Debug, "OmxDecoder::ReadVideo failed, err=%d", err); return false; } return true; }
size_t VideoEditorAudioPlayer::fillBuffer(void *data, size_t size) { if (mReachedEOS) { return 0; } size_t size_done = 0; size_t size_remaining = size; M4OSA_ERR err = M4NO_ERROR; M4AM_Buffer16 bgFrame = {NULL, 0}; M4AM_Buffer16 mixFrame = {NULL, 0}; M4AM_Buffer16 ptFrame = {NULL, 0}; int64_t currentSteamTS = 0; int64_t startTimeForBT = 0; M4OSA_Float fPTVolLevel = ((M4OSA_Float)mBGAudioStoryBoardCurrentMediaVolumeVal)/100; M4OSA_Int16 *pPTMdata=NULL; M4OSA_UInt32 uiPCMsize = 0; bool postSeekComplete = false; bool postEOS = false; while ((size_remaining > 0)&&(err==M4NO_ERROR)) { MediaSource::ReadOptions options; { Mutex::Autolock autoLock(mLock); if (mSeeking) { if (mIsFirstBuffer) { if (mFirstBuffer != NULL) { mFirstBuffer->release(); mFirstBuffer = NULL; } mIsFirstBuffer = false; } options.setSeekTo(mSeekTimeUs); if (mInputBuffer != NULL) { mInputBuffer->release(); mInputBuffer = NULL; } mSeeking = false; if (mObserver) { postSeekComplete = true; } } } if (mInputBuffer == NULL) { status_t status = OK; if (mIsFirstBuffer) { mInputBuffer = mFirstBuffer; mFirstBuffer = NULL; status = mFirstBufferResult; mIsFirstBuffer = false; } else { { Mutex::Autolock autoLock(mLock); status = mSource->read(&mInputBuffer, &options); } // Data is Primary Track, mix with background track // after reading same size from Background track PCM file if (status == OK) { // Mix only when skim point is after startTime of BT if (((mBGAudioStoryBoardSkimTimeStamp* 1000) + (mPositionTimeMediaUs - mSeekTimeUs)) >= (int64_t)(mAudioMixSettings->uiAddCts * 1000)) { ALOGV("VideoEditorAudioPlayer::INSIDE MIXING"); ALOGV("Checking %lld <= %lld", mBGAudioPCMFileSeekPoint-mBGAudioPCMFileOriginalSeekPoint, mBGAudioPCMFileTrimmedLength); M4OSA_Void* ptr; ptr = (M4OSA_Void*)((unsigned int)mInputBuffer->data() + mInputBuffer->range_offset()); M4OSA_UInt32 len = mInputBuffer->range_length(); M4OSA_Context fp = M4OSA_NULL; uiPCMsize = (mInputBuffer->range_length())/2; pPTMdata = (M4OSA_Int16*) ((uint8_t*) mInputBuffer->data() + mInputBuffer->range_offset()); ALOGV("mix with background malloc to do len %d", len); bgFrame.m_dataAddress = (M4OSA_UInt16*)M4OSA_32bitAlignedMalloc( len, 1, (M4OSA_Char*)"bgFrame"); bgFrame.m_bufferSize = len; mixFrame.m_dataAddress = (M4OSA_UInt16*)M4OSA_32bitAlignedMalloc(len, 1, (M4OSA_Char*)"mixFrame"); mixFrame.m_bufferSize = len; ALOGV("mix with bgm with size %lld", mBGAudioPCMFileLength); CHECK(mInputBuffer->meta_data()->findInt64(kKeyTime, &mPositionTimeMediaUs)); if (mBGAudioPCMFileSeekPoint - mBGAudioPCMFileOriginalSeekPoint <= (mBGAudioPCMFileTrimmedLength - len)) { ALOGV("Checking mBGAudioPCMFileHandle %d", (unsigned int)mBGAudioPCMFileHandle); if (mBGAudioPCMFileHandle != M4OSA_NULL) { ALOGV("fillBuffer seeking file to %lld", mBGAudioPCMFileSeekPoint); // TODO : 32bits required for OSAL M4OSA_UInt32 tmp32 = (M4OSA_UInt32)mBGAudioPCMFileSeekPoint; err = M4OSA_fileReadSeek(mBGAudioPCMFileHandle, M4OSA_kFileSeekBeginning, (M4OSA_FilePosition*)&tmp32); mBGAudioPCMFileSeekPoint = tmp32; if (err != M4NO_ERROR){ ALOGE("M4OSA_fileReadSeek err %d",(int)err); } err = M4OSA_fileReadData(mBGAudioPCMFileHandle, (M4OSA_Int8*)bgFrame.m_dataAddress, (M4OSA_UInt32*)&len); if (err == M4WAR_NO_DATA_YET ) { ALOGV("fillBuffer End of file reached"); err = M4NO_ERROR; // We reached the end of file // move to begin cut time equal value if (mAudioMixSettings->bLoop) { mBGAudioPCMFileSeekPoint = (((int64_t)(mAudioMixSettings->beginCutMs) * mAudioMixSettings->uiSamplingFrequency) * mAudioMixSettings->uiNbChannels * sizeof(M4OSA_UInt16)) / 1000; ALOGV("fillBuffer Looping \ to mBGAudioPCMFileSeekPoint %lld", mBGAudioPCMFileSeekPoint); } else { // No mixing; // take care of volume of primary track if (fPTVolLevel < 1.0) { setPrimaryTrackVolume(pPTMdata, uiPCMsize, fPTVolLevel); } } } else if (err != M4NO_ERROR ) { ALOGV("fileReadData for audio err %d", err); } else { mBGAudioPCMFileSeekPoint += len; ALOGV("fillBuffer mBGAudioPCMFileSeekPoint \ %lld", mBGAudioPCMFileSeekPoint); // Assign the ptr data to primary track ptFrame.m_dataAddress = (M4OSA_UInt16*)ptr; ptFrame.m_bufferSize = len; // Call to mix and duck mAudioProcess->mixAndDuck( &ptFrame, &bgFrame, &mixFrame); // Overwrite the decoded buffer memcpy((void *)ptr, (void *)mixFrame.m_dataAddress, len); } } } else if (mAudioMixSettings->bLoop){ // Move to begin cut time equal value mBGAudioPCMFileSeekPoint = mBGAudioPCMFileOriginalSeekPoint; } else { // No mixing; // take care of volume level of primary track if(fPTVolLevel < 1.0) { setPrimaryTrackVolume( pPTMdata, uiPCMsize, fPTVolLevel); } } if (bgFrame.m_dataAddress) { free(bgFrame.m_dataAddress); } if (mixFrame.m_dataAddress) { free(mixFrame.m_dataAddress); } } else {
bool OmxDecoder::ReadVideo(VideoFrame *aFrame, int64_t aTimeUs, bool aKeyframeSkip, bool aDoSeek) { if (!mVideoSource.get()) return false; ReleaseVideoBuffer(); status_t err; if (aDoSeek) { { Mutex::Autolock autoLock(mSeekLock); mIsVideoSeeking = true; } MediaSource::ReadOptions options; options.setSeekTo(aTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC); err = mVideoSource->read(&mVideoBuffer, &options); { Mutex::Autolock autoLock(mSeekLock); mIsVideoSeeking = false; ReleaseAllPendingVideoBuffersLocked(); } aDoSeek = false; } else { err = mVideoSource->read(&mVideoBuffer); } aFrame->mSize = 0; if (err == OK) { int64_t timeUs; int32_t unreadable; int32_t keyFrame; if (!mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs) ) { NS_WARNING("OMX decoder did not return frame time"); return false; } if (!mVideoBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) { keyFrame = 0; } if (!mVideoBuffer->meta_data()->findInt32(kKeyIsUnreadable, &unreadable)) { unreadable = 0; } mozilla::layers::SurfaceDescriptor *descriptor = nullptr; if ((mVideoBuffer->graphicBuffer().get())) { descriptor = mNativeWindow->getSurfaceDescriptorFromBuffer(mVideoBuffer->graphicBuffer().get()); } if (descriptor) { // Change the descriptor's size to video's size. There are cases that // GraphicBuffer's size and actual video size is different. // See Bug 850566. mozilla::layers::SurfaceDescriptorGralloc newDescriptor = descriptor->get_SurfaceDescriptorGralloc(); newDescriptor.size() = nsIntSize(mVideoWidth, mVideoHeight); mozilla::layers::SurfaceDescriptor descWrapper(newDescriptor); aFrame->mGraphicBuffer = new mozilla::layers::VideoGraphicBuffer(this, mVideoBuffer, descWrapper); aFrame->mRotation = mVideoRotation; aFrame->mTimeUs = timeUs; aFrame->mKeyFrame = keyFrame; aFrame->Y.mWidth = mVideoWidth; aFrame->Y.mHeight = mVideoHeight; } else if (mVideoBuffer->range_length() > 0) { char *data = static_cast<char *>(mVideoBuffer->data()) + mVideoBuffer->range_offset(); size_t length = mVideoBuffer->range_length(); if (unreadable) { LOG(PR_LOG_DEBUG, "video frame is unreadable"); } if (!ToVideoFrame(aFrame, timeUs, data, length, keyFrame)) { return false; } } if (aKeyframeSkip && timeUs < aTimeUs) { aFrame->mShouldSkip = true; } } else if (err == INFO_FORMAT_CHANGED) { // If the format changed, update our cached info. if (!SetVideoFormat()) { return false; } else { return ReadVideo(aFrame, aTimeUs, aKeyframeSkip, aDoSeek); } } else if (err == ERROR_END_OF_STREAM) { return false; } else if (err == -ETIMEDOUT) { LOG(PR_LOG_DEBUG, "OmxDecoder::ReadVideo timed out, will retry"); return true; } else { // UNKNOWN_ERROR is sometimes is used to mean "out of memory", but // regardless, don't keep trying to decode if the decoder doesn't want to. LOG(PR_LOG_DEBUG, "OmxDecoder::ReadVideo failed, err=%d", err); return false; } return true; }
static void playSource(OMXClient *client, sp<MediaSource> &source) { sp<MetaData> meta = source->getFormat(); const char *mime; CHECK(meta->findCString(kKeyMIMEType, &mime)); sp<MediaSource> rawSource; if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, mime)) { rawSource = source; } else { rawSource = OMXCodec::Create( client->interface(), meta, false /* createEncoder */, source, NULL /* matchComponentName */, gPreferSoftwareCodec ? OMXCodec::kPreferSoftwareCodecs : 0); if (rawSource == NULL) { fprintf(stderr, "Failed to instantiate decoder for '%s'.\n", mime); return; } } source.clear(); status_t err = rawSource->start(); if (err != OK) { fprintf(stderr, "rawSource returned error %d (0x%08x)\n", err, err); return; } if (gPlaybackAudio) { AudioPlayer *player = new AudioPlayer(NULL); player->setSource(rawSource); rawSource.clear(); player->start(true /* sourceAlreadyStarted */); status_t finalStatus; while (!player->reachedEOS(&finalStatus)) { usleep(100000ll); } delete player; player = NULL; return; } else if (gReproduceBug >= 3 && gReproduceBug <= 5) { int64_t durationUs; CHECK(meta->findInt64(kKeyDuration, &durationUs)); status_t err; MediaBuffer *buffer; MediaSource::ReadOptions options; int64_t seekTimeUs = -1; for (;;) { err = rawSource->read(&buffer, &options); options.clearSeekTo(); bool shouldSeek = false; if (err == INFO_FORMAT_CHANGED) { CHECK(buffer == NULL); printf("format changed.\n"); continue; } else if (err != OK) { printf("reached EOF.\n"); shouldSeek = true; } else { int64_t timestampUs; CHECK(buffer->meta_data()->findInt64(kKeyTime, ×tampUs)); bool failed = false; if (seekTimeUs >= 0) { int64_t diff = timestampUs - seekTimeUs; if (diff < 0) { diff = -diff; } if ((gReproduceBug == 4 && diff > 500000) || (gReproduceBug == 5 && timestampUs < 0)) { printf("wanted: %.2f secs, got: %.2f secs\n", seekTimeUs / 1E6, timestampUs / 1E6); printf("ERROR: "); failed = true; } } printf("buffer has timestamp %lld us (%.2f secs)\n", timestampUs, timestampUs / 1E6); buffer->release(); buffer = NULL; if (failed) { break; } shouldSeek = ((double)rand() / RAND_MAX) < 0.1; if (gReproduceBug == 3) { shouldSeek = false; } } seekTimeUs = -1; if (shouldSeek) { seekTimeUs = (rand() * (float)durationUs) / RAND_MAX; options.setSeekTo(seekTimeUs); printf("seeking to %lld us (%.2f secs)\n", seekTimeUs, seekTimeUs / 1E6); } } rawSource->stop(); return; } int n = 0; int64_t startTime = getNowUs(); long numIterationsLeft = gNumRepetitions; MediaSource::ReadOptions options; int64_t sumDecodeUs = 0; int64_t totalBytes = 0; while (numIterationsLeft-- > 0) { long numFrames = 0; MediaBuffer *buffer; for (;;) { int64_t startDecodeUs = getNowUs(); status_t err = rawSource->read(&buffer, &options); int64_t delayDecodeUs = getNowUs() - startDecodeUs; options.clearSeekTo(); if (err != OK) { CHECK(buffer == NULL); if (err == INFO_FORMAT_CHANGED) { printf("format changed.\n"); continue; } break; } if (buffer->range_length() > 0 && (n++ % 16) == 0) { printf("."); fflush(stdout); } sumDecodeUs += delayDecodeUs; totalBytes += buffer->range_length(); buffer->release(); buffer = NULL; ++numFrames; if (gMaxNumFrames > 0 && numFrames == gMaxNumFrames) { break; } if (gReproduceBug == 1 && numFrames == 40) { printf("seeking past the end now."); options.setSeekTo(0x7fffffffL); } else if (gReproduceBug == 2 && numFrames == 40) { printf("seeking to 5 secs."); options.setSeekTo(5000000); } } printf("$"); fflush(stdout); options.setSeekTo(0); } rawSource->stop(); printf("\n"); int64_t delay = getNowUs() - startTime; if (!strncasecmp("video/", mime, 6)) { printf("avg. %.2f fps\n", n * 1E6 / delay); printf("avg. time to decode one buffer %.2f usecs\n", (double)sumDecodeUs / n); printf("decoded a total of %d frame(s).\n", n); } else if (!strncasecmp("audio/", mime, 6)) { // Frame count makes less sense for audio, as the output buffer // sizes may be different across decoders. printf("avg. %.2f KB/sec\n", totalBytes / 1024 * 1E6 / delay); printf("decoded a total of %lld bytes\n", totalBytes); } }
status_t AudioPlayer::start(bool sourceAlreadyStarted) { CHECK(!mStarted); CHECK(mSource != NULL); status_t err; if (!sourceAlreadyStarted) { #ifdef QCOM_HARDWARE mSourcePaused = false; #endif err = mSource->start(); if (err != OK) { return err; } } // We allow an optional INFO_FORMAT_CHANGED at the very beginning // of playback, if there is one, getFormat below will retrieve the // updated format, if there isn't, we'll stash away the valid buffer // of data to be used on the first audio callback. CHECK(mFirstBuffer == NULL); MediaSource::ReadOptions options; if (mSeeking) { options.setSeekTo(mSeekTimeUs); mSeeking = false; } mFirstBufferResult = mSource->read(&mFirstBuffer, &options); if (mFirstBufferResult == INFO_FORMAT_CHANGED) { ALOGV("INFO_FORMAT_CHANGED!!!"); CHECK(mFirstBuffer == NULL); mFirstBufferResult = OK; mIsFirstBuffer = false; } else { mIsFirstBuffer = true; } sp<MetaData> format = mSource->getFormat(); const char *mime; bool success = format->findCString(kKeyMIMEType, &mime); CHECK(success); CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)); success = format->findInt32(kKeySampleRate, &mSampleRate); CHECK(success); int32_t numChannels, channelMask; success = format->findInt32(kKeyChannelCount, &numChannels); CHECK(success); if(!format->findInt32(kKeyChannelMask, &channelMask)) { // log only when there's a risk of ambiguity of channel mask selection ALOGI_IF(numChannels > 2, "source format didn't specify channel mask, using (%d) channel order", numChannels); channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER; } if (mAudioSink.get() != NULL) { status_t err = mAudioSink->open( mSampleRate, numChannels, channelMask, AUDIO_FORMAT_PCM_16_BIT, DEFAULT_AUDIOSINK_BUFFERCOUNT, &AudioPlayer::AudioSinkCallback, this, (mAllowDeepBuffering ? AUDIO_OUTPUT_FLAG_DEEP_BUFFER : AUDIO_OUTPUT_FLAG_NONE)); if (err != OK) { if (mFirstBuffer != NULL) { mFirstBuffer->release(); mFirstBuffer = NULL; } if (!sourceAlreadyStarted) { mSource->stop(); } return err; } mLatencyUs = (int64_t)mAudioSink->latency() * 1000; mFrameSize = mAudioSink->frameSize(); mAudioSink->start(); } else { // playing to an AudioTrack, set up mask if necessary audio_channel_mask_t audioMask = channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER ? audio_channel_out_mask_from_count(numChannels) : channelMask; if (0 == audioMask) { return BAD_VALUE; } mAudioTrack = new AudioTrack( AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask, 0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0); if ((err = mAudioTrack->initCheck()) != OK) { delete mAudioTrack; mAudioTrack = NULL; if (mFirstBuffer != NULL) { mFirstBuffer->release(); mFirstBuffer = NULL; } if (!sourceAlreadyStarted) { mSource->stop(); } return err; } mLatencyUs = (int64_t)mAudioTrack->latency() * 1000; mFrameSize = mAudioTrack->frameSize(); mAudioTrack->start(); } mStarted = true; mPinnedTimeUs = -1ll; return OK; }
size_t AudioPlayer::fillBuffer(void *data, size_t size) { if (mNumFramesPlayed == 0) { ALOGV("AudioCallback"); } if (mReachedEOS) { return 0; } bool postSeekComplete = false; bool postEOS = false; int64_t postEOSDelayUs = 0; size_t size_done = 0; size_t size_remaining = size; while (size_remaining > 0) { MediaSource::ReadOptions options; { Mutex::Autolock autoLock(mLock); if (mSeeking) { if (mIsFirstBuffer) { if (mFirstBuffer != NULL) { mFirstBuffer->release(); mFirstBuffer = NULL; } mIsFirstBuffer = false; } options.setSeekTo(mSeekTimeUs); if (mInputBuffer != NULL) { mInputBuffer->release(); mInputBuffer = NULL; } mSeeking = false; if (mObserver) { postSeekComplete = true; } } } if (mInputBuffer == NULL) { status_t err; if (mIsFirstBuffer) { mInputBuffer = mFirstBuffer; mFirstBuffer = NULL; err = mFirstBufferResult; mIsFirstBuffer = false; } else { err = mSource->read(&mInputBuffer, &options); #ifdef QCOM_HARDWARE if (err == OK && mInputBuffer == NULL && mSourcePaused) { ALOGV("mSourcePaused, return 0 from fillBuffer"); return 0; } #endif } CHECK((err == OK && mInputBuffer != NULL) || (err != OK && mInputBuffer == NULL)); Mutex::Autolock autoLock(mLock); if (err != OK) { if (mObserver && !mReachedEOS) { // We don't want to post EOS right away but only // after all frames have actually been played out. // These are the number of frames submitted to the // AudioTrack that you haven't heard yet. uint32_t numFramesPendingPlayout = getNumFramesPendingPlayout(); // These are the number of frames we're going to // submit to the AudioTrack by returning from this // callback. uint32_t numAdditionalFrames = size_done / mFrameSize; numFramesPendingPlayout += numAdditionalFrames; int64_t timeToCompletionUs = (1000000ll * numFramesPendingPlayout) / mSampleRate; ALOGV("total number of frames played: %lld (%lld us)", (mNumFramesPlayed + numAdditionalFrames), 1000000ll * (mNumFramesPlayed + numAdditionalFrames) / mSampleRate); ALOGV("%d frames left to play, %lld us (%.2f secs)", numFramesPendingPlayout, timeToCompletionUs, timeToCompletionUs / 1E6); postEOS = true; if (mAudioSink->needsTrailingPadding()) { postEOSDelayUs = timeToCompletionUs + mLatencyUs; } else { postEOSDelayUs = 0; } } mReachedEOS = true; mFinalStatus = err; break; } if (mAudioSink != NULL) { mLatencyUs = (int64_t)mAudioSink->latency() * 1000; } else { mLatencyUs = (int64_t)mAudioTrack->latency() * 1000; } if (mInputBuffer->range_length() != 0) { //check for non zero buffer CHECK(mInputBuffer->meta_data()->findInt64( kKeyTime, &mPositionTimeMediaUs)); } mPositionTimeRealUs = ((mNumFramesPlayed + size_done / mFrameSize) * 1000000) / mSampleRate; ALOGV("buffer->size() = %d, " "mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f", mInputBuffer->range_length(), mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6); } if (mInputBuffer->range_length() == 0) { mInputBuffer->release(); mInputBuffer = NULL; continue; } size_t copy = size_remaining; if (copy > mInputBuffer->range_length()) { copy = mInputBuffer->range_length(); } memcpy((char *)data + size_done, (const char *)mInputBuffer->data() + mInputBuffer->range_offset(), copy); mInputBuffer->set_range(mInputBuffer->range_offset() + copy, mInputBuffer->range_length() - copy); size_done += copy; size_remaining -= copy; } { Mutex::Autolock autoLock(mLock); mNumFramesPlayed += size_done / mFrameSize; if (mReachedEOS) { mPinnedTimeUs = mNumFramesPlayedSysTimeUs; } else { mNumFramesPlayedSysTimeUs = ALooper::GetNowUs(); mPinnedTimeUs = -1ll; } } if (postEOS) { mObserver->postAudioEOS(postEOSDelayUs); } if (postSeekComplete) { mObserver->postAudioSeekComplete(); } return size_done; }
bool OmxDecoder::ReadVideo(VideoFrame *aFrame, int64_t aSeekTimeUs) { if (!mVideoSource.get()) return false; for (;;) { ReleaseVideoBuffer(); status_t err; if (aSeekTimeUs != -1) { MediaSource::ReadOptions options; options.setSeekTo(aSeekTimeUs); err = mVideoSource->read(&mVideoBuffer, &options); } else { err = mVideoSource->read(&mVideoBuffer); } aSeekTimeUs = -1; if (err == OK) { if (mVideoBuffer->range_length() == 0) // If we get a spurious empty buffer, keep going continue; int64_t timeUs; int32_t unreadable; int32_t keyFrame; if (!mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs) ) { LOG("no key time"); return false; } if (!mVideoBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) { keyFrame = 0; } if (!mVideoBuffer->meta_data()->findInt32(kKeyIsUnreadable, &unreadable)) { unreadable = 0; } LOG("data: %p size: %u offset: %u length: %u unreadable: %d", mVideoBuffer->data(), mVideoBuffer->size(), mVideoBuffer->range_offset(), mVideoBuffer->range_length(), unreadable); char *data = reinterpret_cast<char *>(mVideoBuffer->data()) + mVideoBuffer->range_offset(); size_t length = mVideoBuffer->range_length(); if (unreadable) { LOG("video frame is unreadable"); } if (!ToVideoFrame(aFrame, timeUs, data, length, keyFrame)) { return false; } return true; } if (err == INFO_FORMAT_CHANGED) { // If the format changed, update our cached info. if (!SetVideoFormat()) { return false; } // Ok, try to read a buffer again. continue; } /* err == ERROR_END_OF_STREAM */ break; } return false; }
bool OmxDecoder::ReadVideo(VideoFrame *aFrame, int64_t aTimeUs, bool aKeyframeSkip, bool aDoSeek) { if (!mVideoSource.get()) return false; ReleaseVideoBuffer(); status_t err; if (aDoSeek) { MediaSource::ReadOptions options; options.setSeekTo(aTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC); err = mVideoSource->read(&mVideoBuffer, &options); } else { err = mVideoSource->read(&mVideoBuffer); } if (err == OK && mVideoBuffer->range_length() > 0) { int64_t timeUs; int64_t durationUs; int32_t unreadable; int32_t keyFrame; if (!mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs) ) { NS_WARNING("OMX decoder did not return frame time"); return false; } if (!mVideoBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) { keyFrame = 0; } if (!mVideoBuffer->meta_data()->findInt32(kKeyIsUnreadable, &unreadable)) { unreadable = 0; } mozilla::layers::SurfaceDescriptor *descriptor = nullptr; if ((mVideoBuffer->graphicBuffer().get())) { descriptor = mNativeWindow->getSurfaceDescriptorFromBuffer(mVideoBuffer->graphicBuffer().get()); } if (descriptor) { aFrame->mGraphicBuffer = new mozilla::layers::VideoGraphicBuffer(mVideoBuffer, descriptor); aFrame->mRotation = mVideoRotation; aFrame->mTimeUs = timeUs; aFrame->mEndTimeUs = timeUs + durationUs; aFrame->mKeyFrame = keyFrame; aFrame->Y.mWidth = mVideoWidth; aFrame->Y.mHeight = mVideoHeight; } else { char *data = static_cast<char *>(mVideoBuffer->data()) + mVideoBuffer->range_offset(); size_t length = mVideoBuffer->range_length(); if (unreadable) { LOG(PR_LOG_DEBUG, "video frame is unreadable"); } if (!ToVideoFrame(aFrame, timeUs, data, length, keyFrame)) { return false; } aFrame->mEndTimeUs = timeUs + durationUs; } if (aKeyframeSkip && timeUs < aTimeUs) { aFrame->mShouldSkip = true; } } else if (err == INFO_FORMAT_CHANGED) { // If the format changed, update our cached info. if (!SetVideoFormat()) { return false; } else { return ReadVideo(aFrame, aTimeUs, aKeyframeSkip, aDoSeek); } } else if (err == ERROR_END_OF_STREAM) { return false; } else if (err == UNKNOWN_ERROR) { // This sometimes is used to mean "out of memory", but regardless, // don't keep trying to decode if the decoder doesn't want to. return false; } return true; }
bool OmxDecoder::ReadVideo(VideoFrame *aFrame, int64_t aSeekTimeUs) { MOZ_ASSERT(aSeekTimeUs >= -1); if (!mVideoSource.get()) return false; ReleaseVideoBuffer(); status_t err; if (aSeekTimeUs != -1) { MediaSource::ReadOptions options; options.setSeekTo(aSeekTimeUs); err = mVideoSource->read(&mVideoBuffer, &options); } else { err = mVideoSource->read(&mVideoBuffer); } if (err == OK && mVideoBuffer->range_length() > 0) { int64_t timeUs; int32_t keyFrame; if (!mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs) ) { LOG("no frame time"); return false; } if (timeUs < 0) { LOG("frame time %lld must be nonnegative", timeUs); return false; } if (!mVideoBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) { keyFrame = 0; } char *data = reinterpret_cast<char *>(mVideoBuffer->data()) + mVideoBuffer->range_offset(); size_t length = mVideoBuffer->range_length(); if (!ToVideoFrame(aFrame, timeUs, data, length, keyFrame)) { return false; } } else if (err == INFO_FORMAT_CHANGED) { // If the format changed, update our cached info. LOG("mVideoSource INFO_FORMAT_CHANGED"); if (!SetVideoFormat()) return false; else return ReadVideo(aFrame, aSeekTimeUs); } else if (err == ERROR_END_OF_STREAM) { LOG("mVideoSource END_OF_STREAM"); } else if (err != OK) { LOG("mVideoSource ERROR %#x", err); } return err == OK; }
/** ******************************************************************************* * @brief Gets an access unit (AU) from the stream handler source. * @note AU is the smallest possible amount of data to be decoded by decoder * * @param context: (IN) Context of the reader * @param pStreamHandler (IN) The stream handler of the stream to make jump * @param pAccessUnit (I/O)Pointer to an access unit to fill with read data * @return M4NO_ERROR there is no error * @return M4ERR_PARAMETER at least one parameter is not properly set * @returns M4ERR_ALLOC memory allocation failed * @returns M4WAR_NO_MORE_AU there are no more access unit in the stream ******************************************************************************* */ M4OSA_ERR VideoEditorMp3Reader_getNextAu(M4OSA_Context context, M4_StreamHandler *pStreamHandler, M4_AccessUnit *pAccessUnit) { VideoEditorMp3Reader_Context *pReaderContext = (VideoEditorMp3Reader_Context*)context; M4OSA_ERR err = M4NO_ERROR; M4SYS_AccessUnit* pAu; MediaBuffer *mAudioBuffer; MediaSource::ReadOptions options; ALOGV("VideoEditorMp3Reader_getNextAu start"); M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER, "VideoEditorMp3Reader_getNextAu: invalid context"); M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER, "VideoEditorMp3Reader_getNextAu: invalid pointer to M4_StreamHandler"); M4OSA_DEBUG_IF1((pAccessUnit == 0), M4ERR_PARAMETER, "VideoEditorMp3Reader_getNextAu: invalid pointer to M4_AccessUnit"); if (pStreamHandler == (M4_StreamHandler*)pReaderContext->\ mAudioStreamHandler) { pAu = &pReaderContext->mAudioAu; } else { ALOGV("VideoEditorMp3Reader_getNextAu: StreamHandler is not known\n"); return M4ERR_PARAMETER; } if (pReaderContext->mSeeking) { options.setSeekTo(pReaderContext->mSeekTime); } pReaderContext->mMediaSource->read(&mAudioBuffer, &options); if (mAudioBuffer != NULL) { if ((pAu->dataAddress == NULL) || (pAu->size < mAudioBuffer->range_length())) { if (pAu->dataAddress != NULL) { free((M4OSA_Int32*)pAu->dataAddress); pAu->dataAddress = NULL; } pAu->dataAddress = (M4OSA_Int32*)M4OSA_32bitAlignedMalloc( (mAudioBuffer->range_length() + 3) & ~0x3, M4READER_MP3, (M4OSA_Char*)"pAccessUnit->m_dataAddress" ); if (pAu->dataAddress == NULL) { ALOGV("VideoEditorMp3Reader_getNextAu malloc failed"); pReaderContext->mMediaSource->stop(); pReaderContext->mMediaSource.clear(); pReaderContext->mDataSource.clear(); return M4ERR_ALLOC; } } pAu->size = mAudioBuffer->range_length(); memcpy((M4OSA_MemAddr8)pAu->dataAddress, (const char *)mAudioBuffer->data() + mAudioBuffer->range_offset(), mAudioBuffer->range_length()); mAudioBuffer->meta_data()->findInt64(kKeyTime, (int64_t*)&pAu->CTS); pAu->CTS = pAu->CTS / 1000; /*converting the microsec to millisec */ pAu->DTS = pAu->CTS; pAu->attribute = M4SYS_kFragAttrOk; mAudioBuffer->release(); ALOGV("VideoEditorMp3Reader_getNextAu AU CTS = %ld",pAu->CTS); pAccessUnit->m_dataAddress = (M4OSA_Int8*) pAu->dataAddress; pAccessUnit->m_size = pAu->size; pAccessUnit->m_CTS = pAu->CTS; pAccessUnit->m_DTS = pAu->DTS; pAccessUnit->m_attribute = pAu->attribute; } else { ALOGV("VideoEditorMp3Reader_getNextAu EOS reached."); pAccessUnit->m_size=0; err = M4WAR_NO_MORE_AU; } pAu->nbFrag = 0; options.clearSeekTo(); pReaderContext->mSeeking = M4OSA_FALSE; mAudioBuffer = NULL; ALOGV("VideoEditorMp3Reader_getNextAu end"); return err; }
size_t AudioPlayer::fillBuffer(void *data, size_t size) { ATRACE_CALL(); if (mNumFramesPlayed == 0) { ALOGV("AudioCallback"); } if (mReachedEOS) { return 0; } bool postSeekComplete = false; bool postEOS = false; int64_t postEOSDelayUs = 0; size_t size_done = 0; size_t size_remaining = size; while (size_remaining > 0) { MediaSource::ReadOptions options; bool refreshSeekTime = false; { Mutex::Autolock autoLock(mLock); if (mSeeking) { if (mIsFirstBuffer) { if (mFirstBuffer != NULL) { mFirstBuffer->release(); mFirstBuffer = NULL; } mIsFirstBuffer = false; } options.setSeekTo(mSeekTimeUs); refreshSeekTime = true; if (mInputBuffer != NULL) { mInputBuffer->release(); mInputBuffer = NULL; } mSeeking = false; if (mObserver) { postSeekComplete = true; } } } if (mInputBuffer == NULL) { status_t err; if (mIsFirstBuffer) { mInputBuffer = mFirstBuffer; mFirstBuffer = NULL; err = mFirstBufferResult; mIsFirstBuffer = false; } else { if(!mSourcePaused) { err = mSource->read(&mInputBuffer, &options); if (err == OK && mInputBuffer == NULL && mSourcePaused) { ALOGV("mSourcePaused, return 0 from fillBuffer"); return 0; } } else { break; } } if(err == -EAGAIN) { if(mSourcePaused){ break; } else { continue; } } CHECK((err == OK && mInputBuffer != NULL) || (err != OK && mInputBuffer == NULL)); Mutex::Autolock autoLock(mLock); if (err != OK && err != INFO_FORMAT_CHANGED) { if (!mReachedEOS) { if (useOffload()) { // After seek there is a possible race condition if // OffloadThread is observing state_stopping_1 before // framesReady() > 0. Ensure sink stop is called // after last buffer is released. This ensures the // partial buffer is written to the driver before // stopping one is observed.The drawback is that // there will be an unnecessary call to the parser // after parser signalled EOS. int64_t playPosition = 0; playPosition = getOutputPlayPositionUs_l(); if ((size_done > 0) && (playPosition < mDurationUs)) { ALOGW("send Partial buffer down\n"); ALOGW("skip calling stop till next fillBuffer\n"); break; } // no more buffers to push - stop() and wait for STREAM_END // don't set mReachedEOS until stream end received if (mAudioSink != NULL) { mAudioSink->stop(); } else { mAudioTrack->stop(); } } else { if (mObserver) { // We don't want to post EOS right away but only // after all frames have actually been played out. // These are the number of frames submitted to the // AudioTrack that you haven't heard yet. uint32_t numFramesPendingPlayout = getNumFramesPendingPlayout(); // These are the number of frames we're going to // submit to the AudioTrack by returning from this // callback. uint32_t numAdditionalFrames = size_done / mFrameSize; numFramesPendingPlayout += numAdditionalFrames; int64_t timeToCompletionUs = (1000000ll * numFramesPendingPlayout) / mSampleRate; ALOGV("total number of frames played: %lld (%lld us)", (mNumFramesPlayed + numAdditionalFrames), 1000000ll * (mNumFramesPlayed + numAdditionalFrames) / mSampleRate); ALOGV("%d frames left to play, %lld us (%.2f secs)", numFramesPendingPlayout, timeToCompletionUs, timeToCompletionUs / 1E6); postEOS = true; if (mAudioSink->needsTrailingPadding()) { postEOSDelayUs = timeToCompletionUs + mLatencyUs; } else { postEOSDelayUs = 0; } } mReachedEOS = true; } } mFinalStatus = err; break; } if (mAudioSink != NULL) { mLatencyUs = (int64_t)mAudioSink->latency() * 1000; } else { mLatencyUs = (int64_t)mAudioTrack->latency() * 1000; } if(mInputBuffer->range_length() != 0) { CHECK(mInputBuffer->meta_data()->findInt64( kKeyTime, &mPositionTimeMediaUs)); } // need to adjust the mStartPosUs for offload decoding since parser // might not be able to get the exact seek time requested. if (refreshSeekTime) { if (useOffload()) { if (postSeekComplete) { ALOGV("fillBuffer is going to post SEEK_COMPLETE"); mObserver->postAudioSeekComplete(); postSeekComplete = false; } mStartPosUs = mPositionTimeMediaUs; ALOGV("adjust seek time to: %.2f", mStartPosUs/ 1E6); } // clear seek time with mLock locked and once we have valid mPositionTimeMediaUs // and mPositionTimeRealUs // before clearing mSeekTimeUs check if a new seek request has been received while // we were reading from the source with mLock released. if (!mSeeking) { mSeekTimeUs = 0; } } if (!useOffload()) { mPositionTimeRealUs = ((mNumFramesPlayed + size_done / mFrameSize) * 1000000) / mSampleRate; ALOGV("buffer->size() = %d, " "mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f", mInputBuffer->range_length(), mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6); } } if (mInputBuffer->range_length() == 0) { mInputBuffer->release(); mInputBuffer = NULL; continue; } size_t copy = size_remaining; if (copy > mInputBuffer->range_length()) { copy = mInputBuffer->range_length(); } memcpy((char *)data + size_done, (const char *)mInputBuffer->data() + mInputBuffer->range_offset(), copy); mInputBuffer->set_range(mInputBuffer->range_offset() + copy, mInputBuffer->range_length() - copy); size_done += copy; size_remaining -= copy; } if (useOffload()) { // We must ask the hardware what it has played mPositionTimeRealUs = getOutputPlayPositionUs_l(); ALOGV("mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f", mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6); } { Mutex::Autolock autoLock(mLock); mNumFramesPlayed += size_done / mFrameSize; if (mReachedEOS) { mPinnedTimeUs = mNumFramesPlayedSysTimeUs; } else { mNumFramesPlayedSysTimeUs = ALooper::GetNowUs(); mPinnedTimeUs = -1ll; } } if (postEOS) { mObserver->postAudioEOS(postEOSDelayUs); } if (postSeekComplete) { mObserver->postAudioSeekComplete(); } return size_done; }