bool OmxJpegImageDecoder::decodeSource(sp<MediaSource> decoder, const sp<MediaSource>& source, SkBitmap* bm) { status_t rt = decoder->start(); if (rt != OK) { ALOGE("Cannot start OMX Decoder!"); return false; } int64_t startTime = getNowUs(); MediaBuffer *buffer; // decode source status_t err = decoder->read(&buffer, NULL); int64_t duration = getNowUs() - startTime; if (err != OK) { CHECK(buffer == NULL); } printf("Duration in decoder->read(): %.1f (msecs). \n", duration / 1E3 ); // Copy pixels from buffer to bm. // May need to check buffer->rawBytes() == bm->rawBytes(). CHECK_EQ(buffer->size(), bm->getSize()); memcpy(bm->getPixels(), buffer->data(), buffer->size()); buffer->release(); decoder->stop(); return true; }
status_t TimedTextVOBSUBSource::read( int64_t *startTimeUs, int64_t *endTimeUs, Parcel *parcel, const MediaSource::ReadOptions *options) { MediaBuffer *textBuffer = NULL; uint32_t type; const int *paletteData; size_t paletteDataSize = 0; status_t err = mSource->read(&textBuffer, options); if (err != OK) { ALOGE("mSource->read() failed, error code %d\n", err); return err; } CHECK(textBuffer != NULL); textBuffer->meta_data()->findInt64(kKeyTime, startTimeUs); char * content = (char *)textBuffer->data(); size_t size = textBuffer->size(); CHECK_GE(*startTimeUs, 0); mSubParser->stInit(content, size); do { err = mSubParser->stParseControlPacket(); if (err != OK) break; if (mSubParser->m_iDataPacketSize <= 4) break; if (err != OK) break; err = mSubParser->stParseDataPacket(NULL, 0); if (err != OK) break; //*startTimeUs = (int64_t)(mSubParser->m_iBeginTime); ALOGE("Call extractAndAppendLocalDescriptions, send data to \n"); extractAndAppendLocalDescriptions(*startTimeUs, textBuffer, parcel); } while (false); textBuffer->release(); *endTimeUs = -1; mSubParser->incTmpFileIdx(); ALOGE("read() finished\n"); return OK; }
int OMXInterface::EncodeDecodeAudio(const char *mimeType, void *sourceData, size_t sourceSize, int32_t srcChannelCount, int32_t srcSampleRate, void **destData, size_t *destSize, bool encode) { sp<MetaData> meta, outFormat; void *retBuffer = NULL; int retVal = OK; meta = new MetaData; if (meta == NULL) { return ENOMEM; } meta->setCString(kKeyMIMEType, mimeType); meta->setInt32(kKeyChannelCount, srcChannelCount); meta->setInt32(kKeySampleRate, srcSampleRate); OMXClient *m_Client = new OMXClient; sp<MediaSource> m_SourceAudio = new MemAudioSource(sourceData, sourceSize, srcChannelCount, srcSampleRate); sp<MediaSource> m_Decoder = OMXCodec::Create(m_Client->interface(), meta, encode, m_SourceAudio, NULL); if (m_Decoder->start() == OK) { outFormat = m_Decoder->getFormat(); MediaBuffer *outMediaBuffer = NULL; if (m_Decoder->read(&outMediaBuffer) == OK) { size_t outSize = outMediaBuffer->size(); *destData = (void *)new char[outSize]; memcpy(*destData, outMediaBuffer->data(), outSize); if (destSize != NULL) { *destSize = outSize; } } else { retVal = -2; } m_Decoder->stop(); } else { retVal = -1; } m_Client->disconnect(); return retVal; }
void createMediaBufferCopy( const MediaBuffer& sourceBuffer, int64_t frameTime, MediaBuffer **newBuffer) { LOGV("createMediaBufferCopy"); size_t sourceSize = sourceBuffer.size(); void* sourcePointer = sourceBuffer.data(); (*newBuffer) = new MediaBuffer(sourceSize); memcpy((*newBuffer)->data(), sourcePointer, sourceSize); (*newBuffer)->meta_data()->setInt64(kKeyTime, frameTime); }
status_t AMRWBEncoder::read( MediaBuffer **out, const ReadOptions *options) { status_t err; *out = NULL; int64_t seekTimeUs; ReadOptions::SeekMode mode; CHECK(options == NULL || !options->getSeekTo(&seekTimeUs, &mode)); bool readFromSource = false; int64_t wallClockTimeUs = -1; while (mNumInputSamples < kNumSamplesPerFrame) { if (mInputBuffer == NULL) { err = mSource->read(&mInputBuffer, options); if (err != OK) { /*m@nufront start*/ if (mInputBuffer != NULL) { mInputBuffer->release(); mInputBuffer = NULL; } mStarted = false; mFrameEncodingCompletionCondition.signal(); ZJFLOGD("function out for ERROR_END_OF_STREAM"); return ERROR_END_OF_STREAM; #if 0 if (mNumInputSamples == 0) { return ERROR_END_OF_STREAM; } memset(&mInputFrame[mNumInputSamples], 0, sizeof(int16_t) * (kNumSamplesPerFrame - mNumInputSamples)); mNumInputSamples = kNumSamplesPerFrame; break; #endif /*m@nufront end*/ } size_t align = mInputBuffer->range_length() % sizeof(int16_t); CHECK_EQ(align, 0); int64_t timeUs; if (mInputBuffer->meta_data()->findInt64(kKeyDriftTime, &timeUs)) { wallClockTimeUs = timeUs; } if (mInputBuffer->meta_data()->findInt64(kKeyAnchorTime, &timeUs)) { mAnchorTimeUs = timeUs; } readFromSource = true; } else { readFromSource = false; } size_t copy = (kNumSamplesPerFrame - mNumInputSamples) * sizeof(int16_t); if (copy > mInputBuffer->range_length()) { copy = mInputBuffer->range_length(); } memcpy(&mInputFrame[mNumInputSamples], (const uint8_t *)mInputBuffer->data() + mInputBuffer->range_offset(), copy); mInputBuffer->set_range( mInputBuffer->range_offset() + copy, mInputBuffer->range_length() - copy); if (mInputBuffer->range_length() == 0) { mInputBuffer->release(); mInputBuffer = NULL; } mNumInputSamples += copy / sizeof(int16_t); if (mNumInputSamples >= kNumSamplesPerFrame) { mNumInputSamples %= kNumSamplesPerFrame; break; // Get a whole input frame 640 bytes } } VO_CODECBUFFER inputData; memset(&inputData, 0, sizeof(inputData)); inputData.Buffer = (unsigned char*) mInputFrame; inputData.Length = kInputBufferSize; CHECK(VO_ERR_NONE == mApiHandle->SetInputData(mEncoderHandle,&inputData)); /*m@nufrnt start*/ //MediaBuffer *buffer; //CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), OK); MediaBuffer *buffer; { Mutex::Autolock autoLock(mLock); buffer = new MediaBuffer(1024); ++mNumClientOwnedBuffers; buffer->setObserver(this); buffer->add_ref(); } /*m@nufrnt end*/ uint8_t *outPtr = (uint8_t *)buffer->data(); VO_CODECBUFFER outputData; memset(&outputData, 0, sizeof(outputData)); VO_AUDIO_OUTPUTINFO outputInfo; memset(&outputInfo, 0, sizeof(outputInfo)); VO_U32 ret = VO_ERR_NONE; outputData.Buffer = outPtr; outputData.Length = buffer->size(); ret = mApiHandle->GetOutputData(mEncoderHandle, &outputData, &outputInfo); CHECK(ret == VO_ERR_NONE || ret == VO_ERR_INPUT_BUFFER_SMALL); buffer->set_range(0, outputData.Length); ++mNumFramesOutput; int64_t mediaTimeUs = mNumFramesOutput * 20000LL; buffer->meta_data()->setInt64(kKeyTime, mAnchorTimeUs + mediaTimeUs); if (readFromSource && wallClockTimeUs != -1) { buffer->meta_data()->setInt64(kKeyDriftTime, mediaTimeUs - wallClockTimeUs); } *out = buffer; return OK; }
status_t MP3Source::read( MediaBuffer **out, const ReadOptions *options) { *out = NULL; int64_t seekTimeUs; ReadOptions::SeekMode mode; bool seekCBR = false; if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) { int64_t actualSeekTimeUs = seekTimeUs; #ifndef ANDROID_DEFAULT_CODE if(!mEnableTOC){ #endif if (mSeeker == NULL || !mSeeker->getOffsetForTime(&actualSeekTimeUs, &mCurrentPos)) { int32_t bitrate; if (!mMeta->findInt32(kKeyBitRate, &bitrate)) { // bitrate is in bits/sec. ALOGI("no bitrate"); return ERROR_UNSUPPORTED; } mCurrentTimeUs = seekTimeUs; mCurrentPos = mFirstFramePos + seekTimeUs * bitrate / 8000000; seekCBR = true; } else { mCurrentTimeUs = actualSeekTimeUs; } #ifndef ANDROID_DEFAULT_CODE }else{ MP3_EXTR_DBG("before getFramePos seekTimeUs=%lld",seekTimeUs); off_t ActualPos=0; status_t stat=getFramePos(seekTimeUs, &mCurrentTimeUs, &ActualPos, true); if(stat==BAD_VALUE){ int32_t bitrate; if (!mMeta->findInt32(kKeyBitRate, &bitrate)) { // bitrate is in bits/sec. MP3_EXTR_WARN("no bitrate"); return ERROR_UNSUPPORTED; } mCurrentTimeUs = seekTimeUs; mCurrentPos = mFirstFramePos + seekTimeUs * bitrate / 8000000; if (mSeeker == NULL || !mSeeker->getOffsetForTime(&actualSeekTimeUs, &mCurrentPos)) { int32_t bitrate; if (!mMeta->findInt32(kKeyBitRate, &bitrate)) { // bitrate is in bits/sec. ALOGI("no bitrate"); return ERROR_UNSUPPORTED; } mCurrentTimeUs = seekTimeUs; mCurrentPos = mFirstFramePos + seekTimeUs * bitrate / 8000000; seekCBR = true; } else { mCurrentTimeUs = actualSeekTimeUs; } }else if(stat == ERROR_END_OF_STREAM){ return stat; }else{ mCurrentPos= ActualPos; MP3_EXTR_DBG("after seek mCurrentTimeUs=%lld,pActualPos=%ld",mCurrentTimeUs,ActualPos); } } #endif mBasisTimeUs = mCurrentTimeUs; mSamplesRead = 0; } MediaBuffer *buffer; status_t err = mGroup->acquire_buffer(&buffer); if (err != OK) { return err; } size_t frame_size; int bitrate; int num_samples; int sample_rate; for (;;) { ssize_t n = mDataSource->readAt(mCurrentPos, buffer->data(), 4); if (n < 4) { buffer->release(); buffer = NULL; return ERROR_END_OF_STREAM; } uint32_t header = U32_AT((const uint8_t *)buffer->data()); if ((header & kMask) == (mFixedHeader & kMask) && GetMPEGAudioFrameSize( header, &frame_size, &sample_rate, NULL, &bitrate, &num_samples)) { // re-calculate mCurrentTimeUs because we might have called Resync() if (seekCBR) { mCurrentTimeUs = (mCurrentPos - mFirstFramePos) * 8000 / bitrate; mBasisTimeUs = mCurrentTimeUs; } break; } // Lost sync. ALOGV("lost sync! header = 0x%08x, old header = 0x%08x\n", header, mFixedHeader); off64_t pos = mCurrentPos; if (!Resync(mDataSource, mFixedHeader, &pos, NULL, NULL)) { ALOGE("Unable to resync. Signalling end of stream."); buffer->release(); buffer = NULL; return ERROR_END_OF_STREAM; } mCurrentPos = pos; // Try again with the new position. } CHECK(frame_size <= buffer->size()); ssize_t n = mDataSource->readAt(mCurrentPos, buffer->data(), frame_size); if (n < (ssize_t)frame_size) { buffer->release(); buffer = NULL; return ERROR_END_OF_STREAM; } buffer->set_range(0, frame_size); buffer->meta_data()->setInt64(kKeyTime, mCurrentTimeUs); buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1); mCurrentPos += frame_size; mSamplesRead += num_samples; mCurrentTimeUs = mBasisTimeUs + ((mSamplesRead * 1000000) / sample_rate); *out = buffer; return OK; }
status_t APESource::read( MediaBuffer **out, const ReadOptions *options) { *out = NULL; uint32_t newframe = 0 , firstbyte = 0; ///LOGV("APESource::read"); int64_t seekTimeUs; ReadOptions::SeekMode mode; int32_t bitrate = 0; if (!mMeta->findInt32(kKeyBitRate, &bitrate) || !mMeta->findInt32(kKeySampleRate, &mSampleRate)) { LOGI("no bitrate"); return ERROR_UNSUPPORTED; } if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) { { int64_t duration = 0; int64_t seektabletime = 0; if ((mTotalsample > 0) && (mTableOfContents[0] > 0) && (mSamplesPerFrame > 0) && mMeta->findInt64(kKeyDuration, &duration)) { ape_parser_ctx_t ape_ctx; uint32_t filepos, blocks_to_skip; ape_ctx.samplerate = mSampleRate; ape_ctx.blocksperframe = mSamplesPerFrame; ape_ctx.totalframes = mTotalFrame; ape_ctx.seektable = mTableOfContents; ape_ctx.firstframe = mTableOfContents[0]; if (ape_calc_seekpos_by_microsecond(&ape_ctx, seekTimeUs, &newframe, &filepos, &firstbyte, &blocks_to_skip) < 0) { LOGD("getseekto error exit"); return ERROR_UNSUPPORTED; } mCurrentPos = filepos; mCurrentTimeUs = (int64_t)newframe * mSamplesPerFrame * 1000000ll / mSampleRate; LOGD("getseekto seekTimeUs=%lld, Actual time%lld, filepos%x,frame %d, seekbyte %d", seekTimeUs, mCurrentTimeUs, mCurrentPos, newframe, firstbyte); } else { LOGD("getseekto parameter error exit"); return ERROR_UNSUPPORTED; } } } if ((mFileoffset != 0) && (mCurrentPos >= mFileoffset)) { LOGD("APESource::readAt to end filesize %x curr: %x", mFileoffset, mCurrentPos); return ERROR_END_OF_STREAM; } MediaBuffer *buffer; status_t err = mGroup->acquire_buffer(&buffer); if (err != OK) { LOGD("APESource::acquire_buffer fail"); return err; } size_t frame_size; frame_size = kMaxFrameSize; ssize_t n = 0; #ifdef ENABLE_MMRIOTHREAD if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) { ResetReadioPtr(mCurrentPos); } n = ReadBitsteam(buffer->data(), frame_size); #else ///frame_size = mMaxBufferSize; n = mDataSource->readAt(mCurrentPos, buffer->data(), frame_size); #endif ///LOGE("APESource::readAt %x, %x, %d, %d, %d, %d, %d", mCurrentPos, buffer->data(), buffer->size(), mTotalsample, bitrate, mSampleRate, frame_size); //ssize_t n = mDataSource->readAt(mCurrentPos, buffer->data(), frame_size); if ((mFileoffset != 0) && ((mCurrentPos + n) >= mFileoffset)) { frame_size = mFileoffset - mCurrentPos; memset(buffer->data() + frame_size, 0, n - frame_size); } else if ((n < (ssize_t)frame_size) && (n > 0)) { frame_size = n; off64_t fileoffset = 0; mDataSource->getSize(&fileoffset); LOGD("APESource::readAt not enough read %d frmsize %x, filepos %x, filesize %x", n, frame_size, mCurrentPos + frame_size, fileoffset); //if ((mCurrentPos + frame_size) >= fileoffset // && (mCurrentPos + frame_size) < mTableOfContents[mTotalFrame - 1]) if ((mCurrentPos + frame_size) >= fileoffset && (mCurrentPos + frame_size) < mTableOfContents[mSt_bound- 1]) { memset(buffer->data(), 0, buffer->size()); /// for this file is not complete error, frame buffer should not transfer to avoid decoding noise data. LOGD("APESource::file is not enough to end --> memset"); } } else if (n <= 0) { buffer->release(); buffer = NULL; LOGD("APESource::readAt EOS filepos %x frmsize %d", mCurrentPos, frame_size); return ERROR_END_OF_STREAM; } buffer->set_range(0, frame_size); if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) { buffer->meta_data()->setInt64(kKeyTime, mCurrentTimeUs); buffer->meta_data()->setInt32(kKeyNemFrame, newframe); buffer->meta_data()->setInt32(kKeySeekByte, firstbyte); } buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1); mCurrentPos += frame_size; mCurrentTimeUs += (int64_t)(frame_size * 8000000ll) / bitrate ; #ifdef ENABLE_MMRIOTHREAD UpdateReadPtr(frame_size); #endif *out = buffer; ///LOGE("APESource::kKeyTime done %x %lld", mCurrentPos, mCurrentTimeUs); return OK; }
bool OmxDecoder::ReadVideo(VideoFrame *aFrame, int64_t aSeekTimeUs) { if (!mVideoSource.get()) return false; ReleaseVideoBuffer(); status_t err; if (aSeekTimeUs != -1) { MediaSource::ReadOptions options; options.setSeekTo(aSeekTimeUs); err = mVideoSource->read(&mVideoBuffer, &options); } else { err = mVideoSource->read(&mVideoBuffer); } if (err == OK && mVideoBuffer->range_length() > 0) { int64_t timeUs; int32_t unreadable; int32_t keyFrame; if (!mVideoBuffer->meta_data()->findInt64(kKeyTime, &timeUs) ) { LOG("no key time"); return false; } if (!mVideoBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) { keyFrame = 0; } if (!mVideoBuffer->meta_data()->findInt32(kKeyIsUnreadable, &unreadable)) { unreadable = 0; } LOG("data: %p size: %u offset: %u length: %u unreadable: %d", mVideoBuffer->data(), mVideoBuffer->size(), mVideoBuffer->range_offset(), mVideoBuffer->range_length(), unreadable); char *data = reinterpret_cast<char *>(mVideoBuffer->data()) + mVideoBuffer->range_offset(); size_t length = mVideoBuffer->range_length(); if (unreadable) { LOG("video frame is unreadable"); } if (!ToVideoFrame(aFrame, timeUs, data, length, keyFrame)) { return false; } } else if (err == INFO_FORMAT_CHANGED) { // If the format changed, update our cached info. return SetVideoFormat(); } else if (err == ERROR_END_OF_STREAM) { return false; } return true; }
status_t ShoutcastSource::read( MediaBuffer **out, const ReadOptions *options) { CHECK(mStarted); *out = NULL; int64_t seekTimeUs; ReadOptions::SeekMode mode; if (options && options->getSeekTo(&seekTimeUs, &mode)) { return ERROR_UNSUPPORTED; } MediaBuffer *buffer; status_t err = mGroup->acquire_buffer(&buffer); if (err != OK) { return err; } *out = buffer; size_t num_bytes = buffer->size(); if (mMetaDataOffset > 0 && num_bytes > mBytesUntilMetaData) { num_bytes = mBytesUntilMetaData; } ssize_t n = mHttp->receive(buffer->data(), num_bytes); if (n <= 0) { return (status_t)n; } buffer->set_range(0, n); mBytesUntilMetaData -= (size_t)n; if (mBytesUntilMetaData == 0) { unsigned char num_16_byte_blocks = 0; n = mHttp->receive((char *)&num_16_byte_blocks, 1); CHECK_EQ(n, 1); char meta[255 * 16]; size_t meta_size = num_16_byte_blocks * 16; size_t meta_length = 0; while (meta_length < meta_size) { n = mHttp->receive(&meta[meta_length], meta_size - meta_length); if (n <= 0) { return (status_t)n; } meta_length += (size_t) n; } while (meta_length > 0 && meta[meta_length - 1] == '\0') { --meta_length; } if (meta_length > 0) { // Technically we should probably attach this meta data to the // next buffer. XXX buffer->meta_data()->setData('shou', 'shou', meta, meta_length); } mBytesUntilMetaData = mMetaDataOffset; } return OK; }
status_t AMRNBEncoder::read( MediaBuffer **out, const ReadOptions *options) { status_t err; *out = NULL; int64_t seekTimeUs; ReadOptions::SeekMode mode; CHECK(options == NULL || !options->getSeekTo(&seekTimeUs, &mode)); bool readFromSource = false; int64_t wallClockTimeUs = -1; while (mNumInputSamples < kNumSamplesPerFrame) { if (mInputBuffer == NULL) { err = mSource->read(&mInputBuffer, options); if (err != OK) { if (mNumInputSamples == 0) { return ERROR_END_OF_STREAM; } memset(&mInputFrame[mNumInputSamples], 0, sizeof(int16_t) * (kNumSamplesPerFrame - mNumInputSamples)); mNumInputSamples = kNumSamplesPerFrame; break; } size_t align = mInputBuffer->range_length() % sizeof(int16_t); CHECK_EQ(align, 0); readFromSource = true; int64_t timeUs; if (mInputBuffer->meta_data()->findInt64(kKeyDriftTime, &timeUs)) { wallClockTimeUs = timeUs; } if (mInputBuffer->meta_data()->findInt64(kKeyAnchorTime, &timeUs)) { mAnchorTimeUs = timeUs; } } else { readFromSource = false; } size_t copy = (kNumSamplesPerFrame - mNumInputSamples) * sizeof(int16_t); if (copy > mInputBuffer->range_length()) { copy = mInputBuffer->range_length(); } memcpy(&mInputFrame[mNumInputSamples], (const uint8_t *)mInputBuffer->data() + mInputBuffer->range_offset(), copy); mNumInputSamples += copy / sizeof(int16_t); mInputBuffer->set_range( mInputBuffer->range_offset() + copy, mInputBuffer->range_length() - copy); if (mInputBuffer->range_length() == 0) { mInputBuffer->release(); mInputBuffer = NULL; } } MediaBuffer *buffer; CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), OK); uint8_t *outPtr = (uint8_t *)buffer->data(); Frame_Type_3GPP frameType; int res = AMREncode( mEncState, mSidState, (Mode)mMode, mInputFrame, outPtr, &frameType, AMR_TX_WMF); CHECK(res >= 0); CHECK((size_t)res < buffer->size()); // Convert header byte from WMF to IETF format. outPtr[0] = ((outPtr[0] << 3) | 4) & 0x7c; buffer->set_range(0, res); // Each frame of 160 samples is 20ms long. int64_t mediaTimeUs = mNumFramesOutput * 20000LL; buffer->meta_data()->setInt64( kKeyTime, mAnchorTimeUs + mediaTimeUs); if (readFromSource && wallClockTimeUs != -1) { buffer->meta_data()->setInt64(kKeyDriftTime, mediaTimeUs - wallClockTimeUs); } ++mNumFramesOutput; *out = buffer; mNumInputSamples = 0; return OK; }
status_t MP3Source::read( MediaBuffer **out, const ReadOptions *options) { *out = NULL; int64_t seekTimeUs; if (options != NULL && options->getSeekTo(&seekTimeUs)) { int32_t bitrate; if (!mMeta->findInt32(kKeyBitRate, &bitrate)) { // bitrate is in kbits/sec. LOGI("no bitrate"); return ERROR_UNSUPPORTED; } mCurrentTimeUs = seekTimeUs; mCurrentPos = mFirstFramePos + seekTimeUs * bitrate / 1000000 * 125; } MediaBuffer *buffer; status_t err = mGroup->acquire_buffer(&buffer); if (err != OK) { return err; } size_t frame_size; for (;;) { ssize_t n = mDataSource->read_at(mCurrentPos, buffer->data(), 4); if (n < 4) { buffer->release(); buffer = NULL; return ERROR_END_OF_STREAM; } uint32_t header = U32_AT((const uint8_t *)buffer->data()); if (get_mp3_frame_size(header, &frame_size)) { break; } // Lost sync. LOGW("lost sync!\n"); off_t pos = mCurrentPos; if (!Resync(mDataSource, mFixedHeader, &pos, NULL)) { LOGE("Unable to resync. Signalling end of stream."); buffer->release(); buffer = NULL; return ERROR_END_OF_STREAM; } mCurrentPos = pos; // Try again with the new position. } CHECK(frame_size <= buffer->size()); ssize_t n = mDataSource->read_at(mCurrentPos, buffer->data(), frame_size); if (n < (ssize_t)frame_size) { buffer->release(); buffer = NULL; return ERROR_END_OF_STREAM; } buffer->set_range(0, frame_size); buffer->meta_data()->setInt32(kKeyTimeUnits, mCurrentTimeUs / 1000); buffer->meta_data()->setInt32(kKeyTimeScale, 1000); mCurrentPos += frame_size; mCurrentTimeUs += 1152 * 1000000 / 44100; *out = buffer; return OK; }