bool OmxDecoder::ReadAudio(AudioFrame *aFrame, int64_t aSeekTimeUs) { MOZ_ASSERT(aSeekTimeUs >= -1); status_t err; if (mAudioMetadataRead && aSeekTimeUs == -1) { // Use the data read into the buffer during metadata time err = OK; } else { ReleaseAudioBuffer(); if (aSeekTimeUs != -1) { ReadOptions options; options.setSeekTo(aSeekTimeUs); err = mAudioSource->read(&mAudioBuffer, &options); } else { err = mAudioSource->read(&mAudioBuffer); } } mAudioMetadataRead = false; aSeekTimeUs = -1; if (err == OK && mAudioBuffer->range_length() != 0) { int64_t timeUs; if (!mAudioBuffer->meta_data()->findInt64(kKeyTime, &timeUs)) { LOG("no frame time"); return false; } if (timeUs < 0) { LOG("frame time %lld must be nonnegative", timeUs); return false; } return ToAudioFrame(aFrame, timeUs, mAudioBuffer->data(), mAudioBuffer->range_offset(), mAudioBuffer->range_length(), mAudioChannels, mAudioSampleRate); } else if (err == INFO_FORMAT_CHANGED) { // If the format changed, update our cached info. LOG("mAudioSource INFO_FORMAT_CHANGED"); if (!SetAudioFormat()) return false; else return ReadAudio(aFrame, aSeekTimeUs); } else if (err == ERROR_END_OF_STREAM) { LOG("mAudioSource END_OF_STREAM"); } else if (err != OK) { LOG("mAudioSource ERROR %#x", err); } return err == OK; }
status_t M2VDecoder::read( MediaBuffer **out, const ReadOptions *options) { *out = NULL; //LOGI("M2VDecoder::read in"); int64_t seekTimeUs; ReadOptions::SeekMode mode; if (options && options->getSeekTo(&seekTimeUs, &mode)) { ALOGV("seek requested to %lld us (%.2f secs)", seekTimeUs, seekTimeUs / 1E6); CHECK(seekTimeUs >= 0); mPendingSeekTimeUs = seekTimeUs; mPendingSeekMode = mode; if (mInputBuffer) { mInputBuffer->release(); mInputBuffer = NULL; } sDecApi.reset_class_On2Decoder(pOn2Dec); } if (mInputBuffer == NULL) { ALOGV("fetching new input buffer."); bool seeking = false; for (;;) { if (mPendingSeekTimeUs >= 0) { ALOGV("reading data from timestamp %lld (%.2f secs)", mPendingSeekTimeUs, mPendingSeekTimeUs / 1E6); } ReadOptions seekOptions; if (mPendingSeekTimeUs >= 0) { seeking = true; seekOptions.setSeekTo(mPendingSeekTimeUs, mPendingSeekMode); mPendingSeekTimeUs = -1; } status_t err = mSource->read(&mInputBuffer, &seekOptions); seekOptions.clearSeekTo(); if (err != OK) { *out = NULL; return (*out == NULL) ? err : (status_t)OK; } if (mInputBuffer->range_length() > 0) { break; } mInputBuffer->release(); mInputBuffer = NULL; } if (seeking) { int64_t targetTimeUs; if (mInputBuffer->meta_data()->findInt64(kKeyTargetTime, &targetTimeUs) && targetTimeUs >= 0) { mTargetTimeUs = targetTimeUs; } else { mTargetTimeUs = -1; } } } MediaBuffer *aOutBuf = new MediaBuffer(sizeof(VPU_FRAME)); uint8_t * aOutBuffer = (uint8_t *)aOutBuf->data(); uint32_t aOutputLength = 0; uint8_t * pInput = (uint8_t *)mInputBuffer->data(); uint32_t aInBufSize = mInputBuffer->range_length(); //int64_t inputTime = 0LL; //mInputBuffer->meta_data()->findInt64(kKeyTime, &inputTime); int64_t outputTime = 0LL; /*if(mNumFramesOutput == 0) { pInput += 16; aInBufSize -= 16; }*/ //LOGI("before decoder m2v inputlen %d",aInBufSize); //LOGI("inpout data %c %c %c %c",pInput[0],pInput[1],pInput[2],pInput[3]); memset(aOutBuffer,0,sizeof(VPU_FRAME)); if(sDecApi.dec_oneframe_class_On2Decoder( pOn2Dec, aOutBuffer, (uint32_t*)&aOutputLength, pInput, &aInBufSize)){ sDecApi.get_oneframe_class_On2Decoder(pOn2Dec, aOutBuffer,(uint32_t*)&aOutputLength); aOutBuf->releaseframe(); mInputBuffer->release(); mInputBuffer = NULL; ALOGE("m2vdec failed"); return UNKNOWN_ERROR; } //LOGI("after decoder m2v aInBufSize %d aOutputLength %d ",aInBufSize,aOutputLength); if(mInputBuffer) { mInputBuffer->release(); mInputBuffer = NULL; } if (mInputBuffer == NULL) { for (;;) { status_t err = mSource->read(&mInputBuffer); if (err != OK) { *out = NULL; return (*out == NULL) ? err : (status_t)OK; } if (mInputBuffer->range_length() > 0) { break; } mInputBuffer->release(); mInputBuffer = NULL; } } sDecApi.get_oneframe_class_On2Decoder(pOn2Dec, aOutBuffer,(uint32_t*)&aOutputLength); if(aOutputLength) mNumFramesOutput++; VPU_FRAME *frame = (VPU_FRAME *)aOutBuffer; outputTime = ((int64_t)(frame->ShowTime.TimeHigh) <<32) | ((int64_t)(frame->ShowTime.TimeLow)); outputTime *=1000; aOutBuf->meta_data()->setInt64(kKeyTime,outputTime); if(aOutputLength <= 0) aOutBuf->set_range(0, 0); *out = aOutBuf; //LOGI("M2VDecoder::read out"); return OK; }
status_t VideoEditorSRC::getNextBuffer(AudioBufferProvider::Buffer *pBuffer, int64_t pts) { ALOGV("getNextBuffer %d, chan = %d", pBuffer->frameCount, mChannelCnt); uint32_t done = 0; uint32_t want = pBuffer->frameCount * mChannelCnt * 2; pBuffer->raw = malloc(want); while (mStarted && want > 0) { // If we don't have any data left, read a new buffer. if (!mBuffer) { // if we seek, reset the initial time stamp and accumulated time ReadOptions options; if (mSeekTimeUs >= 0) { ALOGV("%p cacheMore_l Seek requested = %lld", this, mSeekTimeUs); ReadOptions::SeekMode mode = mSeekMode; options.setSeekTo(mSeekTimeUs, mode); mSeekTimeUs = -1; mInitialTimeStampUs = -1; mAccuOutBufferSize = 0; } status_t err = mSource->read(&mBuffer, &options); if (err != OK) { free(pBuffer->raw); pBuffer->raw = NULL; pBuffer->frameCount = 0; } if (err == INFO_FORMAT_CHANGED) { ALOGV("getNextBuffer: source read returned INFO_FORMAT_CHANGED"); // At this point we cannot switch to a new AudioResampler because // we are in a callback called by the AudioResampler itself. So // just remember the fact that the format has changed, and let // read() handles this. mFormatChanged = true; return err; } // EOS or some other error if (err != OK) { ALOGV("EOS or some err: %d", err); // We cannot call stop() here because stop() will release the // AudioResampler, and we are in a callback of the AudioResampler. // So just remember the fact and let read() call stop(). mStopPending = true; return err; } CHECK(mBuffer); mLeftover = mBuffer->range_length(); if (mInitialTimeStampUs == -1) { int64_t curTS; sp<MetaData> from = mBuffer->meta_data(); from->findInt64(kKeyTime, &curTS); ALOGV("setting mInitialTimeStampUs to %lld", mInitialTimeStampUs); mInitialTimeStampUs = curTS; } } // Now copy data to the destination uint32_t todo = mLeftover; if (todo > want) { todo = want; } uint8_t* end = (uint8_t*)mBuffer->data() + mBuffer->range_offset() + mBuffer->range_length(); memcpy((uint8_t*)pBuffer->raw + done, end - mLeftover, todo); done += todo; want -= todo; mLeftover -= todo; // Release MediaBuffer as soon as possible. if (mLeftover == 0) { mBuffer->release(); mBuffer = NULL; } } pBuffer->frameCount = done / (mChannelCnt * 2); ALOGV("getNextBuffer done %d", pBuffer->frameCount); return OK; }
bool OmxDecoder::ReadVideo(VideoFrame *aFrame, int64_t aSeekTimeUs, BufferCallback *aBufferCallback) { MOZ_ASSERT(aSeekTimeUs >= -1); if (!mVideoSource.get()) return false; ReleaseVideoBuffer(); status_t err; if (aSeekTimeUs != -1) { ReadOptions options; options.setSeekTo(aSeekTimeUs); err = mVideoSource->read(&mVideoBuffer, &options); } else { err = mVideoSource->read(&mVideoBuffer); } aFrame->mSize = 0; if (err == OK && mVideoBuffer->range_length() > 0) { int64_t timeUs; int32_t keyFrame; if (!mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs) ) { LOG("no frame time"); return false; } if (timeUs < 0) { LOG("frame time %lld must be nonnegative", timeUs); return false; } if (!mVideoBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) { keyFrame = 0; } char *data = reinterpret_cast<char *>(mVideoBuffer->data()) + mVideoBuffer->range_offset(); size_t length = mVideoBuffer->range_length(); if (!ToVideoFrame(aFrame, timeUs, data, length, keyFrame, aBufferCallback)) { return false; } } else if (err == INFO_FORMAT_CHANGED) { // If the format changed, update our cached info. LOG("mVideoSource INFO_FORMAT_CHANGED"); if (!SetVideoFormat()) return false; else return ReadVideo(aFrame, aSeekTimeUs, aBufferCallback); } else if (err == ERROR_END_OF_STREAM) { LOG("mVideoSource END_OF_STREAM"); } else if (err != OK) { LOG("mVideoSource ERROR %#x", err); } return err == OK; }