Esempio n. 1
0
MediaBuffer* MidiEngine::readBuffer() {
    EAS_STATE state;
    EAS_State(mEasData, mEasHandle, &state);
    if ((state == EAS_STATE_STOPPED) || (state == EAS_STATE_ERROR)) {
        return NULL;
    }
    MediaBuffer *buffer;
    status_t err = mGroup->acquire_buffer(&buffer);
    if (err != OK) {
        ALOGE("readBuffer: no buffer");
        return NULL;
    }
    EAS_I32 timeMs;
    EAS_GetLocation(mEasData, mEasHandle, &timeMs);
    int64_t timeUs = 1000ll * timeMs;
    buffer->meta_data()->setInt64(kKeyTime, timeUs);

    EAS_PCM* p = (EAS_PCM*) buffer->data();
    int numBytesOutput = 0;
    for (int i = 0; i < NUM_COMBINE_BUFFERS; i++) {
        EAS_I32 numRendered;
        EAS_RESULT result = EAS_Render(mEasData, p, mEasConfig->mixBufferSize, &numRendered);
        if (result != EAS_SUCCESS) {
            ALOGE("EAS_Render returned %ld", result);
            break;
        }
        p += numRendered * mEasConfig->numChannels;
        numBytesOutput += numRendered * mEasConfig->numChannels * sizeof(EAS_PCM);
    }
    buffer->set_range(0, numBytesOutput);
    ALOGV("readBuffer: returning %zd in buffer %p", buffer->range_length(), buffer);
    return buffer;
}
Esempio n. 2
0
status_t
RtspMediaSource::read(MediaBuffer** out, const ReadOptions* options)
{
  ReentrantMonitorAutoEnter mon(mMonitor);
  NS_ENSURE_TRUE(mIsStarted, MEDIA_ERROR_BASE);
  NS_ENSURE_TRUE(out, MEDIA_ERROR_BASE);
  *out = nullptr;

  // Video/audio track's initial frame size is FRAME_DEFAULT_SIZE.
  // We need to realloc the mBuffer if the mBuffer doesn't have enough space
  // for next ReadFrameFromTrack function. (actualFrameSize > mFrameMaxSize)
  status_t err;
  uint32_t readCount;
  uint32_t actualFrameSize;
  uint64_t time;
  nsresult rv;

  while (1) {
    err = mGroup->acquire_buffer(&mBuffer);
    NS_ENSURE_TRUE(err == OK, err);
    rv = mRtspResource->ReadFrameFromTrack((uint8_t *)mBuffer->data(),
                                           mFrameMaxSize, mTrackIdx, readCount,
                                           time, actualFrameSize);
    if (NS_FAILED(rv)) {
      // Release mGroup and mBuffer.
      stop();
      // Since RtspMediaSource is an implementation of Android media source,
      // it's held by OMXCodec and isn't released yet. So we have to re-construct
      // mGroup and mBuffer.
      start();
      NS_WARNING("ReadFrameFromTrack failed; releasing buffers and returning.");
      return ERROR_END_OF_STREAM;
    }
    if (actualFrameSize > mFrameMaxSize) {
      // release mGroup and mBuffer
      stop();
      // re-construct mGroup and mBuffer
      mFrameMaxSize = actualFrameSize;
      err = start();
      NS_ENSURE_TRUE(err == OK, err);
    } else {
      // ReadFrameFromTrack success, break the while loop.
      break;
    }
  }
  mBuffer->set_range(0, readCount);
  if (NS_SUCCEEDED(rv)) {
    mBuffer->meta_data()->clear();
    // fill the meta data
    mBuffer->meta_data()->setInt64(kKeyTime, time);
    *out = mBuffer;
    mBuffer = nullptr;
    return OK;
  }

  return ERROR_END_OF_STREAM;
}
MediaBuffer *FLACParser::readBuffer(bool doSeek, FLAC__uint64 sample)
{
    mWriteRequested = true;
    mWriteCompleted = false;
    if (doSeek) {
        // We implement the seek callback, so this works without explicit flush
        if (!FLAC__stream_decoder_seek_absolute(mDecoder, sample)) {
            ALOGE("FLACParser::readBuffer seek to sample %lld failed", (long long)sample);
            return NULL;
        }
        ALOGV("FLACParser::readBuffer seek to sample %lld succeeded", (long long)sample);
    } else {
        if (!FLAC__stream_decoder_process_single(mDecoder)) {
            ALOGE("FLACParser::readBuffer process_single failed");
            return NULL;
        }
    }
    if (!mWriteCompleted) {
        ALOGV("FLACParser::readBuffer write did not complete");
        return NULL;
    }
    // verify that block header keeps the promises made by STREAMINFO
    unsigned blocksize = mWriteHeader.blocksize;
    if (blocksize == 0 || blocksize > getMaxBlockSize()) {
        ALOGE("FLACParser::readBuffer write invalid blocksize %u", blocksize);
        return NULL;
    }
    if (mWriteHeader.sample_rate != getSampleRate() ||
        mWriteHeader.channels != getChannels() ||
        mWriteHeader.bits_per_sample != getBitsPerSample()) {
        ALOGE("FLACParser::readBuffer write changed parameters mid-stream: %d/%d/%d -> %d/%d/%d",
                getSampleRate(), getChannels(), getBitsPerSample(),
                mWriteHeader.sample_rate, mWriteHeader.channels, mWriteHeader.bits_per_sample);
        return NULL;
    }
    // acquire a media buffer
    CHECK(mGroup != NULL);
    MediaBuffer *buffer;
    status_t err = mGroup->acquire_buffer(&buffer);
    if (err != OK) {
        return NULL;
    }
    size_t bufferSize = blocksize * getChannels() * sizeof(short);
    CHECK(bufferSize <= mMaxBufferSize);
    short *data = (short *) buffer->data();
    buffer->set_range(0, bufferSize);
    // copy PCM from FLAC write buffer to our media buffer, with interleaving
    (*mCopy)(data, mWriteBuffer, blocksize, getChannels());
    // fill in buffer metadata
    CHECK(mWriteHeader.number_type == FLAC__FRAME_NUMBER_TYPE_SAMPLE_NUMBER);
    FLAC__uint64 sampleNumber = mWriteHeader.number.sample_number;
    int64_t timeUs = (1000000LL * sampleNumber) / getSampleRate();
    buffer->meta_data()->setInt64(kKeyTime, timeUs);
    buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
    return buffer;
}
MediaBuffer *MediaBuffer::clone() {
    MediaBuffer *buffer = new MediaBuffer(mData, mSize);
    buffer->set_range(mRangeOffset, mRangeLength);
    buffer->mMetaData = new MetaData(*mMetaData.get());

    add_ref();
    buffer->mOriginal = this;

    return buffer;
}
status_t DummyVideoSource::read(
        MediaBuffer **out,
        const MediaSource::ReadOptions *options) {

    ALOGV("read: E");

    const int32_t kTimeScale = 1000;  /* time scale in ms */
    bool seeking = false;
    int64_t seekTimeUs;
    ReadOptions::SeekMode seekMode;
    if (options && options->getSeekTo(&seekTimeUs, &seekMode)) {
        seeking = true;
        mImageSeekTime = seekTimeUs;
        M4OSA_clockGetTime(&mImagePlayStartTime, kTimeScale);
    }

    if ((mImageSeekTime == mImageClipDuration) ||
        (mFrameTimeUs == (int64_t)mImageClipDuration)) {
        ALOGV("read: EOS reached");
        *out = NULL;
        return ERROR_END_OF_STREAM;
    }

    status_t err = OK;
    MediaBuffer *buffer = new MediaBuffer(
            mImageBuffer, (mFrameWidth * mFrameHeight * 1.5));

    // Set timestamp of buffer
    if (mIsFirstImageFrame) {
        M4OSA_clockGetTime(&mImagePlayStartTime, kTimeScale);
        mFrameTimeUs =  (mImageSeekTime + 1);
        ALOGV("read: jpg 1st frame timeUs = %lld, begin cut time = %ld",
            mFrameTimeUs, mImageSeekTime);

        mIsFirstImageFrame = false;
    } else {
        M4OSA_Time  currentTimeMs;
        M4OSA_clockGetTime(&currentTimeMs, kTimeScale);

        mFrameTimeUs = mImageSeekTime +
            (currentTimeMs - mImagePlayStartTime) * 1000LL;

        ALOGV("read: jpg frame timeUs = %lld", mFrameTimeUs);
    }

    buffer->meta_data()->setInt64(kKeyTime, mFrameTimeUs);
    buffer->set_range(buffer->range_offset(),
                mFrameWidth * mFrameHeight * 1.5);

    *out = buffer;
    return err;
}
status_t AACSource::read(
        MediaBuffer **out, const ReadOptions *options) {
    *out = NULL;

    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
        if (mFrameDurationUs > 0) {
            int64_t seekFrame = seekTimeUs / mFrameDurationUs;
            mCurrentTimeUs = seekFrame * mFrameDurationUs;

            mOffset = mOffsetVector.itemAt(seekFrame);
        }
    }

    size_t frameSize, frameSizeWithoutHeader, headerSize;
    if ((frameSize = getAdtsFrameLength(mDataSource, mOffset, &headerSize)) == 0) {
        return ERROR_END_OF_STREAM;
    }

    MediaBuffer *buffer;
    status_t err = mGroup->acquire_buffer(&buffer);
    if (err != OK) {
        return err;
    }

    frameSizeWithoutHeader = frameSize - headerSize;
    if (mDataSource->readAt(mOffset + headerSize, buffer->data(),
                frameSizeWithoutHeader) != (ssize_t)frameSizeWithoutHeader) {
        buffer->release();
        buffer = NULL;

        return ERROR_IO;
    }

    buffer->set_range(0, frameSizeWithoutHeader);
    buffer->meta_data()->setInt64(kKeyTime, mCurrentTimeUs);
    buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);

    mOffset += frameSize;
    mCurrentTimeUs += mFrameDurationUs;

    *out = buffer;
    return OK;
}
status_t DummyVideoSource::read(
                        MediaBuffer **out,
                        const MediaSource::ReadOptions *options) {
    status_t err = OK;
    MediaBuffer *buffer;
    LOG2("DummyVideoSource::read START");

    bool seeking = false;
    int64_t seekTimeUs;
    ReadOptions::SeekMode seekMode;

    if (options && options->getSeekTo(&seekTimeUs, &seekMode)) {
        seeking = true;
        mImageSeekTime = seekTimeUs;
        M4OSA_clockGetTime(&mImagePlayStartTime, 1000); //1000 time scale for time in ms
    }

    if ((mImageSeekTime == mImageClipDuration) || (mFrameTimeUs == (int64_t)mImageClipDuration)) {
        LOG2("DummyVideoSource::read() End of stream reached; return NULL buffer");
        *out = NULL;
        return ERROR_END_OF_STREAM;
    }

    buffer = new MediaBuffer(mImageBuffer, (mFrameWidth*mFrameHeight*1.5));

    //set timestamp of buffer
    if (mIsFirstImageFrame) {
        M4OSA_clockGetTime(&mImagePlayStartTime, 1000); //1000 time scale for time in ms
        mFrameTimeUs =  (mImageSeekTime + 1);
        LOG2("DummyVideoSource::read() jpg 1st frame timeUs = %lld, begin cut time = %ld", mFrameTimeUs, mImageSeekTime);
        mIsFirstImageFrame = false;
    } else {
        M4OSA_Time  currentTimeMs;
        M4OSA_clockGetTime(&currentTimeMs, 1000);

        mFrameTimeUs = mImageSeekTime + (currentTimeMs - mImagePlayStartTime)*1000;
        LOG2("DummyVideoSource::read() jpg frame timeUs = %lld", mFrameTimeUs);
    }
    buffer->meta_data()->setInt64(kKeyTime, mFrameTimeUs);
    buffer->set_range(buffer->range_offset(), mFrameWidth*mFrameHeight*1.5);
    *out = buffer;
    return err;
}
Esempio n. 8
0
status_t MediaMuxer::writeSampleData(const sp<ABuffer> &buffer, size_t trackIndex,
                                     int64_t timeUs, uint32_t flags) {
    Mutex::Autolock autoLock(mMuxerLock);

    if (buffer.get() == NULL) {
        ALOGE("WriteSampleData() get an NULL buffer.");
        return -EINVAL;
    }

    if (mState != STARTED) {
        ALOGE("WriteSampleData() is called in invalid state %d", mState);
        return INVALID_OPERATION;
    }

    if (trackIndex >= mTrackList.size()) {
        ALOGE("WriteSampleData() get an invalid index %zu", trackIndex);
        return -EINVAL;
    }

    MediaBuffer* mediaBuffer = new MediaBuffer(buffer);

    mediaBuffer->add_ref(); // Released in MediaAdapter::signalBufferReturned().
    mediaBuffer->set_range(buffer->offset(), buffer->size());

    sp<MetaData> sampleMetaData = mediaBuffer->meta_data();
    sampleMetaData->setInt64(kKeyTime, timeUs);
    // Just set the kKeyDecodingTime as the presentation time for now.
    sampleMetaData->setInt64(kKeyDecodingTime, timeUs);

    if (flags & MediaCodec::BUFFER_FLAG_SYNCFRAME) {
        sampleMetaData->setInt32(kKeyIsSyncFrame, true);
    }

    sp<MediaAdapter> currentTrack = mTrackList[trackIndex];
    // This pushBuffer will wait until the mediaBuffer is consumed.
    return currentTrack->pushBuffer(mediaBuffer);
}
Esempio n. 9
0
status_t APESource::read(
    MediaBuffer **out, const ReadOptions *options)
{
    *out = NULL;
    uint32_t newframe = 0 , firstbyte = 0;

    ///LOGV("APESource::read");
    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    int32_t bitrate = 0;

    if (!mMeta->findInt32(kKeyBitRate, &bitrate)
            || !mMeta->findInt32(kKeySampleRate, &mSampleRate))
    {
        LOGI("no bitrate");
        return ERROR_UNSUPPORTED;
    }

    if (options != NULL && options->getSeekTo(&seekTimeUs, &mode))
    {

        {

            int64_t duration = 0;
            int64_t seektabletime = 0;

            if ((mTotalsample > 0) && (mTableOfContents[0] > 0) && (mSamplesPerFrame > 0)
                    && mMeta->findInt64(kKeyDuration, &duration))
            {
                ape_parser_ctx_t ape_ctx;
                uint32_t filepos, blocks_to_skip;
                ape_ctx.samplerate = mSampleRate;
                ape_ctx.blocksperframe = mSamplesPerFrame;
                ape_ctx.totalframes = mTotalFrame;
                ape_ctx.seektable = mTableOfContents;
                ape_ctx.firstframe = mTableOfContents[0];

                if (ape_calc_seekpos_by_microsecond(&ape_ctx,
                                                    seekTimeUs,
                                                    &newframe,
                                                    &filepos,
                                                    &firstbyte,
                                                    &blocks_to_skip) < 0)
                {
                    LOGD("getseekto error exit");
                    return ERROR_UNSUPPORTED;
                }

                mCurrentPos = filepos;
                mCurrentTimeUs = (int64_t)newframe * mSamplesPerFrame * 1000000ll / mSampleRate;

                LOGD("getseekto seekTimeUs=%lld, Actual time%lld, filepos%x,frame %d, seekbyte %d", seekTimeUs, mCurrentTimeUs, mCurrentPos, newframe, firstbyte);

            }
            else
            {
                LOGD("getseekto parameter error exit");
                return ERROR_UNSUPPORTED;
            }


        }

    }


    if ((mFileoffset != 0)
            && (mCurrentPos >= mFileoffset))
    {
        LOGD("APESource::readAt to end filesize %x curr: %x", mFileoffset, mCurrentPos);
        return ERROR_END_OF_STREAM;
    }

    MediaBuffer *buffer;
    status_t err = mGroup->acquire_buffer(&buffer);

    if (err != OK)
    {
        LOGD("APESource::acquire_buffer fail");
        return err;
    }

    size_t frame_size;
    frame_size = kMaxFrameSize;
    ssize_t n = 0;	

#ifdef ENABLE_MMRIOTHREAD
    if (options != NULL && options->getSeekTo(&seekTimeUs, &mode))
    {
        ResetReadioPtr(mCurrentPos);        
    }
    n = ReadBitsteam(buffer->data(), frame_size);
#else    
    ///frame_size = mMaxBufferSize;
    n = mDataSource->readAt(mCurrentPos, buffer->data(), frame_size);    
#endif

    ///LOGE("APESource::readAt  %x, %x, %d, %d, %d, %d, %d", mCurrentPos, buffer->data(), buffer->size(), mTotalsample, bitrate, mSampleRate, frame_size);
    //ssize_t n = mDataSource->readAt(mCurrentPos, buffer->data(), frame_size);

    if ((mFileoffset != 0)
            && ((mCurrentPos + n) >= mFileoffset))
    {
        frame_size = mFileoffset - mCurrentPos;
        memset(buffer->data() + frame_size, 0, n - frame_size);
    }
    else if ((n < (ssize_t)frame_size)
             && (n > 0))
    {
        frame_size = n;
        off64_t fileoffset = 0;
        mDataSource->getSize(&fileoffset);
        LOGD("APESource::readAt not enough read %d frmsize %x, filepos %x, filesize %x", n, frame_size, mCurrentPos + frame_size, fileoffset);

        //if ((mCurrentPos + frame_size) >= fileoffset
        //        && (mCurrentPos + frame_size) < mTableOfContents[mTotalFrame - 1])
        if ((mCurrentPos + frame_size) >= fileoffset && (mCurrentPos + frame_size) < mTableOfContents[mSt_bound- 1])
        {
            memset(buffer->data(), 0, buffer->size());
            /// for this file is not complete error, frame buffer should not transfer to avoid decoding noise data.
            LOGD("APESource::file is not enough to end --> memset");
        }
    }
    else if (n <= 0)
    {
        buffer->release();
        buffer = NULL;
        LOGD("APESource::readAt EOS filepos %x frmsize %d", mCurrentPos, frame_size);
        return ERROR_END_OF_STREAM;
    }

    buffer->set_range(0, frame_size);

    if (options != NULL && options->getSeekTo(&seekTimeUs, &mode))
    {
        buffer->meta_data()->setInt64(kKeyTime, mCurrentTimeUs);
        buffer->meta_data()->setInt32(kKeyNemFrame, newframe);
        buffer->meta_data()->setInt32(kKeySeekByte, firstbyte);
    }

    buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);

    mCurrentPos += frame_size;
    mCurrentTimeUs += (int64_t)(frame_size * 8000000ll) / bitrate ;
    
#ifdef ENABLE_MMRIOTHREAD    
    UpdateReadPtr(frame_size);
#endif

    *out = buffer;

    ///LOGE("APESource::kKeyTime done %x %lld", mCurrentPos, mCurrentTimeUs);
    return OK;
}
Esempio n. 10
0
status_t AMRSource::read(
        MediaBuffer **out, const ReadOptions *options) {
    *out = NULL;

    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    if (mOffsetTableLength > 0 && options && options->getSeekTo(&seekTimeUs, &mode)) {
        size_t size;
        int64_t seekFrame = seekTimeUs / 20000ll;  // 20ms per frame.
        mCurrentTimeUs = seekFrame * 20000ll;

        size_t index = seekFrame < 0 ? 0 : seekFrame / 50;
        if (index >= mOffsetTableLength) {
            index = mOffsetTableLength - 1;
        }

        mOffset = mOffsetTable[index] + (mIsWide ? 9 : 6);

        for (size_t i = 0; i< seekFrame - index * 50; i++) {
            status_t err;
            if ((err = getFrameSizeByOffset(mDataSource, mOffset,
                            mIsWide, &size)) != OK) {
                return err;
            }
            mOffset += size;
        }
    }

    uint8_t header;
    ssize_t n = mDataSource->readAt(mOffset, &header, 1);

    if (n < 1) {
        return ERROR_END_OF_STREAM;
    }

    if (header & 0x83) {
        // Padding bits must be 0.

        ALOGE("padding bits must be 0, header is 0x%02x", header);

        return ERROR_MALFORMED;
    }

    unsigned FT = (header >> 3) & 0x0f;

    size_t frameSize = getFrameSize(mIsWide, FT);
    if (frameSize == 0) {
        return ERROR_MALFORMED;
    }

    MediaBuffer *buffer;
    status_t err = mGroup->acquire_buffer(&buffer);
    if (err != OK) {
        return err;
    }

    n = mDataSource->readAt(mOffset, buffer->data(), frameSize);

    if (n != (ssize_t)frameSize) {
        buffer->release();
        buffer = NULL;

        if (n < 0) {
            return ERROR_IO;
        } else {
            // only partial frame is available, treat it as EOS.
            mOffset += n;
            return ERROR_END_OF_STREAM;
        }
    }

    buffer->set_range(0, frameSize);
    buffer->meta_data()->setInt64(kKeyTime, mCurrentTimeUs);
    buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);

    mOffset += frameSize;
    mCurrentTimeUs += 20000;  // Each frame is 20ms

    *out = buffer;

    return OK;
}
void MPEG4Writer::Track::threadEntry() {
    bool is_mpeg4 = false;
    sp<MetaData> meta = mSource->getFormat();
    const char *mime;
    meta->findCString(kKeyMIMEType, &mime);
    is_mpeg4 = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4);

    MediaBuffer *buffer;
    while (!mDone && mSource->read(&buffer) == OK) {
        if (buffer->range_length() == 0) {
            buffer->release();
            buffer = NULL;

            continue;
        }

        if (mCodecSpecificData == NULL && is_mpeg4) {
            const uint8_t *data =
                (const uint8_t *)buffer->data() + buffer->range_offset();

            const size_t size = buffer->range_length();

            size_t offset = 0;
            while (offset + 3 < size) {
                if (data[offset] == 0x00 && data[offset + 1] == 0x00
                    && data[offset + 2] == 0x01 && data[offset + 3] == 0xb6) {
                    break;
                }

                ++offset;
            }

            // CHECK(offset + 3 < size);
            if (offset + 3 >= size) {
                // XXX assume the entire first chunk of data is the codec specific
                // data.
                offset = size;
            }

            mCodecSpecificDataSize = offset;
            mCodecSpecificData = malloc(offset);
            memcpy(mCodecSpecificData, data, offset);

            buffer->set_range(buffer->range_offset() + offset, size - offset);
        }

        off_t offset = mOwner->addSample(buffer);

        SampleInfo info;
        info.size = buffer->range_length();
        info.offset = offset;

        int32_t units, scale;
        bool success =
            buffer->meta_data()->findInt32(kKeyTimeUnits, &units);
        CHECK(success);
        success =
            buffer->meta_data()->findInt32(kKeyTimeScale, &scale);
        CHECK(success);

        info.timestamp = (int64_t)units * 1000 / scale;

        mSampleInfos.push_back(info);

        buffer->release();
        buffer = NULL;
    }

    mReachedEOS = true;
}
Esempio n. 12
0
status_t WMAPRODecoder::read(
        MediaBuffer **out, const ReadOptions *options) {
    status_t err;
    static int j=0;
    j++;
	count++;
	*out = NULL;
	if(mFirstOutputBuf)
	{
		*out = mFirstOutputBuf;
		mFirstOutputBuf = NULL;
		return OK;
	}
    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;

    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
        CHECK(seekTimeUs >= 0);
        //when the user seek the video,then will entry into this case,so you must do some reset fun in this case
        //by Charles Chen at Feb,11th ,2011
        mNumFramesOutput = 0;

        //	LOGD("j=%d,us = %ld,ms= %ld ,s=%ld,mode=%ld \n",j,seekTimeUs,seekTimeUs/1000,seekTimeUs/1000000,mode);
        ALOGD("seek time is %lld in %s  \n",seekTimeUs,__FUNCTION__);

        if (mInputBuffer) {
            mInputBuffer->release();
            mInputBuffer = NULL;
        }

        mAnchorTimeUs = -1LL;//-1 mean after seek ,need Assignment first frame time after seek

    } else {
        seekTimeUs = -1;
    }

    if (mInputBuffer == NULL) {
    	status_t err1;

    	err = mSource->read(&mInputBuffer, options);

        if (err != OK ) {
            //LOGE("j=%d,mSource ->read error :%d \n",j,err);
            return err;
        }

#ifdef WFO
        unsigned char *p81;
        int  *p321;
        p81 = (unsigned char *)(mInputBuffer->data());
        p321 = ( int *)p81;
        p81 += 4;

        FILE *fp = NULL;
        fp = fopen("/data/data/test.bin","ab+");
        if(!fp)
        {
        ALOGE("create file error \n");
        return -1 ;
        }
        int helunsize = fwrite(p81,1,*p321,fp);
        fclose(fp);
        ALOGE("Write data is  %d/%d  count=%d\n",helunsize,*p321,count);
#endif

        int64_t timeUs;
        if (mInputBuffer->meta_data()->findInt64(kKeyTime, &timeUs)) {
            //if the audio codec can't make sure that one in one out,will not change the mAnchorTimeUs/mNumFramesOutput value
            //you could add the tmp time para Charles Chen Feb,11th ,2011
            //	LOGD("j=%d,us =%ld ,ms=%ld ,s=%ld\n",j,timeUs,timeUs/1000,timeUs/1000000);
            if(mAnchorTimeUs == -1LL)
            {
                mAnchorTimeUs = timeUs;
                mNumFramesOutput = 0;
                mReAsynThreshHold = timeUs;
            }
            mReAsynTime = timeUs;
        } else {
            // We must have a new timestamp after seeking.
            CHECK(seekTimeUs < 0);
        }
    }

    MediaBuffer *buffer;
    mBufferGroup->acquire_buffer(&buffer);

	int32_t inputUsedLen = mInputBuffer->range_offset();
	unsigned char *outbuf=(unsigned char *)(buffer->data());
	unsigned char *inbuf=(unsigned char *)(mInputBuffer->data());
	int *p32=(int *)inbuf;
	int inlen=*p32;
	int outlen=0;
	int res=-1;
	int writelen=0;
	ALOGV("count = %d ,inlen = %d \n",count,inlen);
	res = (*DecodeWmaPro)(inbuf+4,inlen,outbuf,&outlen,&writelen,codecid);

#ifdef WFPCM
    FILE *fp = NULL;
    fp = fopen("/data/data/pcm.pcm","ab+");
    if(!fp)
    {
        ALOGD("create file error \n");
        return  0;
    }
    fwrite(outbuf,1,outlen,fp);
   ALOGE("Write pcm is %d ,count = %d \n",outlen,count);
    fclose(fp);
#endif

	//LOGD("j=%d,res=%d,outlen=%d,inlen=%d,writelen=%d,codecid=%d \n",j,res,outlen,inlen,writelen,codecid);


	int32_t aOutputLength = 0;
	int32_t retValue = 0;
	mInputBuffer->release();
	mInputBuffer = NULL;

	buffer->set_range(0, outlen);//the aOutPutLen is the R/L totle sampleNum

   	buffer->meta_data()->setInt64(
            kKeyTime,
            mAnchorTimeUs
                + (mNumFramesOutput * 1000000) / mSampleRate);
	//LOGE("j=%d,numFrmaeOutput=%lld,outlen=%d,audio output time is: %lld us",j, mNumFramesOutput,outlen,mAnchorTimeUs + (mNumFramesOutput * 1000000) / mSampleRate);
    mNumFramesOutput +=( outlen /mNumChannels/2);

    *out = buffer;


    return OK;
}
Esempio n. 13
0
status_t MyVorbisExtractor::readNextPacket(MediaBuffer **out) {
    *out = NULL;

    MediaBuffer *buffer = NULL;
    int64_t timeUs = -1;

    for (;;) {
        size_t i;
        size_t packetSize = 0;
        bool gotFullPacket = false;
        for (i = mNextLaceIndex; i < mCurrentPage.mNumSegments; ++i) {
            uint8_t lace = mCurrentPage.mLace[i];

            packetSize += lace;

            if (lace < 255) {
                gotFullPacket = true;
                ++i;
                break;
            }
        }

        if (mNextLaceIndex < mCurrentPage.mNumSegments) {
            off64_t dataOffset = mOffset + 27 + mCurrentPage.mNumSegments;
            for (size_t j = 0; j < mNextLaceIndex; ++j) {
                dataOffset += mCurrentPage.mLace[j];
            }

            size_t fullSize = packetSize;
            if (buffer != NULL) {
                fullSize += buffer->range_length();
            }
            MediaBuffer *tmp = new MediaBuffer(fullSize);
            if (buffer != NULL) {
                memcpy(tmp->data(), buffer->data(), buffer->range_length());
                tmp->set_range(0, buffer->range_length());
                buffer->release();
            } else {
                // XXX Not only is this not technically the correct time for
                // this packet, we also stamp every packet in this page
                // with the same time. This needs fixing later.

                if (mVi.rate) {
                    // Rate may not have been initialized yet if we're currently
                    // reading the configuration packets...
                    // Fortunately, the timestamp doesn't matter for those.
                    timeUs = mCurrentPage.mGranulePosition * 1000000ll / mVi.rate;
                }
                tmp->set_range(0, 0);
            }
            buffer = tmp;

            ssize_t n = mSource->readAt(
                    dataOffset,
                    (uint8_t *)buffer->data() + buffer->range_length(),
                    packetSize);

            if (n < (ssize_t)packetSize) {
                LOGV("failed to read %d bytes at 0x%016llx, got %ld bytes",
                     packetSize, dataOffset, n);
                return ERROR_IO;
            }

            buffer->set_range(0, fullSize);

            mNextLaceIndex = i;

            if (gotFullPacket) {
                // We've just read the entire packet.

                if (timeUs >= 0) {
                    buffer->meta_data()->setInt64(kKeyTime, timeUs);
                }

                if (mFirstPacketInPage) {
                    buffer->meta_data()->setInt32(
                            kKeyValidSamples, mCurrentPageSamples);
                    mFirstPacketInPage = false;
                }

                *out = buffer;

                return OK;
            }

            // fall through, the buffer now contains the start of the packet.
        }

        CHECK_EQ(mNextLaceIndex, mCurrentPage.mNumSegments);

        mOffset += mCurrentPageSize;
        ssize_t n = readPage(mOffset, &mCurrentPage);

        if (n <= 0) {
            if (buffer) {
                buffer->release();
                buffer = NULL;
            }

            LOGV("readPage returned %ld", n);

            return n < 0 ? n : (status_t)ERROR_END_OF_STREAM;
        }

        mCurrentPageSamples =
            mCurrentPage.mGranulePosition - mPrevGranulePosition;
        mFirstPacketInPage = true;

        mPrevGranulePosition = mCurrentPage.mGranulePosition;

        mCurrentPageSize = n;
        mNextLaceIndex = 0;

        if (buffer != NULL) {
            if ((mCurrentPage.mFlags & 1) == 0) {
                // This page does not continue the packet, i.e. the packet
                // is already complete.

                if (timeUs >= 0) {
                    buffer->meta_data()->setInt64(kKeyTime, timeUs);
                }

                buffer->meta_data()->setInt32(
                        kKeyValidSamples, mCurrentPageSamples);
                mFirstPacketInPage = false;

                *out = buffer;

                return OK;
            }
        }
    }
}
Esempio n. 14
0
status_t ShoutcastSource::read(
        MediaBuffer **out, const ReadOptions *options) {
    CHECK(mStarted);

    *out = NULL;

    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
        return ERROR_UNSUPPORTED;
    }

    MediaBuffer *buffer;
    status_t err = mGroup->acquire_buffer(&buffer);
    if (err != OK) {
        return err;
    }

    *out = buffer;

    size_t num_bytes = buffer->size();
    if (mMetaDataOffset > 0 && num_bytes > mBytesUntilMetaData) {
        num_bytes = mBytesUntilMetaData;
    }

    ssize_t n = mHttp->receive(buffer->data(), num_bytes);

    if (n <= 0) {
        return (status_t)n;
    }

    buffer->set_range(0, n);

    mBytesUntilMetaData -= (size_t)n;

    if (mBytesUntilMetaData == 0) {
        unsigned char num_16_byte_blocks = 0;
        n = mHttp->receive((char *)&num_16_byte_blocks, 1);
        CHECK_EQ(n, 1);

        char meta[255 * 16];
        size_t meta_size = num_16_byte_blocks * 16;
        size_t meta_length = 0;
        while (meta_length < meta_size) {
            n = mHttp->receive(&meta[meta_length], meta_size - meta_length);
            if (n <= 0) {
                return (status_t)n;
            }

            meta_length += (size_t) n;
        }

        while (meta_length > 0 && meta[meta_length - 1] == '\0') {
            --meta_length;
        }

        if (meta_length > 0) {
            // Technically we should probably attach this meta data to the
            // next buffer. XXX
            buffer->meta_data()->setData('shou', 'shou', meta, meta_length);
        }

        mBytesUntilMetaData = mMetaDataOffset;
    }

    return OK;
}
status_t PCMSource::read(
        MediaBuffer **out, const ReadOptions *options) {
    *out = NULL;
    int64_t seekTimeUs;
    ReadOptions::SeekMode seek = ReadOptions::SEEK_CLOSEST_SYNC;
    if (options != NULL && options->getSeekTo(&seekTimeUs,&seek)) {
        int64_t pos = (seekTimeUs * mSampleRate) / 1000000 * mNumChannels * 2;
        if (pos > mSize) {
            pos = mSize;
        }
        mCurrentPos = pos + mOffset;
    }

    MediaBuffer *buffer;
    status_t err = mGroup->acquire_buffer(&buffer);
    if (err != OK) {
        return err;
    }

    ssize_t n = mDataSource->readAt(
            mCurrentPos, buffer->data(), mBufferSize);
    if (n <= 0) {
        buffer->release();
        buffer = NULL;
        return ERROR_END_OF_STREAM;
    }

    mCurrentPos += n;

    buffer->set_range(0, n);

    if (mBitsPerSample == 8) {
        // Convert 8-bit unsigned samples to 16-bit signed.

        MediaBuffer *tmp;
        CHECK_EQ(mGroup->acquire_buffer(&tmp), OK);

        // The new buffer holds the sample number of samples, but each
        // one is 2 bytes wide.
        tmp->set_range(0, 2 * n);

        int16_t *dst = (int16_t *)tmp->data();
        const uint8_t *src = (const uint8_t *)buffer->data();
        while (n-- > 0) {
            *dst++ = ((int16_t)(*src) - 128) * 256;
            ++src;
        }

        buffer->release();
        buffer = tmp;
    } else if (mBitsPerSample == 24) {
        // Convert 24-bit signed samples to 16-bit signed.

        const uint8_t *src =
            (const uint8_t *)buffer->data() + buffer->range_offset();
        int16_t *dst = (int16_t *)src;

        size_t numSamples = buffer->range_length() / 3;
        for (size_t i = 0; i < numSamples; ++i) {
            int32_t x = (int32_t)(src[0] | src[1] << 8 | src[2] << 16);
            x = (x << 8) >> 8;  // sign extension

            x = x >> 8;
            *dst++ = (int16_t)x;
            src += 3;
        }

        buffer->set_range(buffer->range_offset(), 2 * numSamples);
    }

    size_t bytesPerSample = mBitsPerSample >> 3;

    buffer->meta_data()->setInt64(
            kKeyTime,
            1000000LL * (mCurrentPos - mOffset)
                / (mNumChannels * bytesPerSample) / mSampleRate);


    *out = buffer;

    return OK;
}
Esempio n. 16
0
status_t MP3Source::read(
        MediaBuffer **out, const ReadOptions *options) {
    *out = NULL;

    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    bool seekCBR = false;

    if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) {
        int64_t actualSeekTimeUs = seekTimeUs;
#ifndef ANDROID_DEFAULT_CODE
		if(!mEnableTOC){
#endif
        if (mSeeker == NULL
                || !mSeeker->getOffsetForTime(&actualSeekTimeUs, &mCurrentPos)) {
            int32_t bitrate;
            if (!mMeta->findInt32(kKeyBitRate, &bitrate)) {
                // bitrate is in bits/sec.
                ALOGI("no bitrate");

                return ERROR_UNSUPPORTED;
            }

            mCurrentTimeUs = seekTimeUs;
            mCurrentPos = mFirstFramePos + seekTimeUs * bitrate / 8000000;
            seekCBR = true;
        } else {
            mCurrentTimeUs = actualSeekTimeUs;
        }
#ifndef ANDROID_DEFAULT_CODE
		}else{
			MP3_EXTR_DBG("before getFramePos seekTimeUs=%lld",seekTimeUs);
			off_t ActualPos=0;
			status_t stat=getFramePos(seekTimeUs, &mCurrentTimeUs, &ActualPos, true);		 
			if(stat==BAD_VALUE){
				int32_t bitrate;
	            if (!mMeta->findInt32(kKeyBitRate, &bitrate)) {
	                // bitrate is in bits/sec.
	                MP3_EXTR_WARN("no bitrate");
	                return ERROR_UNSUPPORTED;
	            }
				mCurrentTimeUs = seekTimeUs;
				mCurrentPos = mFirstFramePos + seekTimeUs * bitrate / 8000000;
				if (mSeeker == NULL || !mSeeker->getOffsetForTime(&actualSeekTimeUs, &mCurrentPos)) {
            		int32_t bitrate;
            		if (!mMeta->findInt32(kKeyBitRate, &bitrate)) {
              		  // bitrate is in bits/sec.
                		ALOGI("no bitrate");

             		   return ERROR_UNSUPPORTED;
           			 }

            		mCurrentTimeUs = seekTimeUs;
            		mCurrentPos = mFirstFramePos + seekTimeUs * bitrate / 8000000;
            		seekCBR = true;
        		} else {
            		mCurrentTimeUs = actualSeekTimeUs;
       			}
			}else if(stat == ERROR_END_OF_STREAM){
				return stat;
			}else{
				mCurrentPos= ActualPos;
				MP3_EXTR_DBG("after seek mCurrentTimeUs=%lld,pActualPos=%ld",mCurrentTimeUs,ActualPos);
			}


		}
#endif
        mBasisTimeUs = mCurrentTimeUs;
        mSamplesRead = 0;
    }

    MediaBuffer *buffer;
    status_t err = mGroup->acquire_buffer(&buffer);
    if (err != OK) {
        return err;
    }

    size_t frame_size;
    int bitrate;
    int num_samples;
    int sample_rate;
    for (;;) {
        ssize_t n = mDataSource->readAt(mCurrentPos, buffer->data(), 4);
        if (n < 4) {
            buffer->release();
            buffer = NULL;

            return ERROR_END_OF_STREAM;
        }

        uint32_t header = U32_AT((const uint8_t *)buffer->data());

        if ((header & kMask) == (mFixedHeader & kMask)
            && GetMPEGAudioFrameSize(
                header, &frame_size, &sample_rate, NULL,
                &bitrate, &num_samples)) {

            // re-calculate mCurrentTimeUs because we might have called Resync()
            if (seekCBR) {
                mCurrentTimeUs = (mCurrentPos - mFirstFramePos) * 8000 / bitrate;
                mBasisTimeUs = mCurrentTimeUs;
            }

            break;
        }

        // Lost sync.
        ALOGV("lost sync! header = 0x%08x, old header = 0x%08x\n", header, mFixedHeader);

        off64_t pos = mCurrentPos;
        if (!Resync(mDataSource, mFixedHeader, &pos, NULL, NULL)) {
            ALOGE("Unable to resync. Signalling end of stream.");

            buffer->release();
            buffer = NULL;

            return ERROR_END_OF_STREAM;
        }

        mCurrentPos = pos;

        // Try again with the new position.
    }

    CHECK(frame_size <= buffer->size());

    ssize_t n = mDataSource->readAt(mCurrentPos, buffer->data(), frame_size);
    if (n < (ssize_t)frame_size) {
        buffer->release();
        buffer = NULL;

        return ERROR_END_OF_STREAM;
    }

    buffer->set_range(0, frame_size);

    buffer->meta_data()->setInt64(kKeyTime, mCurrentTimeUs);
    buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);

    mCurrentPos += frame_size;

    mSamplesRead += num_samples;
    mCurrentTimeUs = mBasisTimeUs + ((mSamplesRead * 1000000) / sample_rate);

    *out = buffer;

    return OK;
}
status_t MatroskaSource::read(
        MediaBuffer **out, const ReadOptions *options) {
    *out = NULL;

    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
        mBlockIter.seek(seekTimeUs);
    }

    if (mBlockIter.eos()) {
        return ERROR_END_OF_STREAM;
    }

    const mkvparser::Block *block = mBlockIter.block();
    size_t size = block->GetSize();
    int64_t timeUs = mBlockIter.blockTimeUs();

    MediaBuffer *buffer = new MediaBuffer(size + 2);
    buffer->meta_data()->setInt64(kKeyTime, timeUs);
    buffer->meta_data()->setInt32(kKeyIsSyncFrame, block->IsKey());

    long res = block->Read(
            mExtractor->mReader, (unsigned char *)buffer->data() + 2);

    if (res != 0) {
        return ERROR_END_OF_STREAM;
    }

    buffer->set_range(2, size);

    if (mType == AVC) {
        CHECK(size >= 2);

        uint8_t *data = (uint8_t *)buffer->data();

        unsigned NALsize = data[2] << 8 | data[3];
        CHECK_EQ(size, NALsize + 2);

        memcpy(data, "\x00\x00\x00\x01", 4);
        buffer->set_range(0, size + 2);
    } else if (mType == AAC) {
        // There's strange junk at the beginning...

        const uint8_t *data = (const uint8_t *)buffer->data() + 2;
        size_t offset = 0;
        while (offset < size && data[offset] != 0x21) {
            ++offset;
        }
        buffer->set_range(2 + offset, size - offset);
    }

    *out = buffer;

#if 0
    hexdump((const uint8_t *)buffer->data() + buffer->range_offset(),
            buffer->range_length());
#endif

    mBlockIter.advance();

    return OK;
}
status_t MP3Source::read(
        MediaBuffer **out, const ReadOptions *options) {
    *out = NULL;

    int64_t seekTimeUs;
    if (options != NULL && options->getSeekTo(&seekTimeUs)) {
        int32_t bitrate;
        if (!mMeta->findInt32(kKeyBitRate, &bitrate)) {
            // bitrate is in kbits/sec.
            LOGI("no bitrate");

            return ERROR_UNSUPPORTED;
        }

        mCurrentTimeUs = seekTimeUs;
        mCurrentPos = mFirstFramePos + seekTimeUs * bitrate / 1000000 * 125;
    }

    MediaBuffer *buffer;
    status_t err = mGroup->acquire_buffer(&buffer);
    if (err != OK) {
        return err;
    }

    size_t frame_size;
    for (;;) {
        ssize_t n = mDataSource->read_at(mCurrentPos, buffer->data(), 4);
        if (n < 4) {
            buffer->release();
            buffer = NULL;

            return ERROR_END_OF_STREAM;
        }

        uint32_t header = U32_AT((const uint8_t *)buffer->data());
        
        if (get_mp3_frame_size(header, &frame_size)) {
            break;
        }

        // Lost sync.
        LOGW("lost sync!\n");

        off_t pos = mCurrentPos;
        if (!Resync(mDataSource, mFixedHeader, &pos, NULL)) {
            LOGE("Unable to resync. Signalling end of stream.");

            buffer->release();
            buffer = NULL;

            return ERROR_END_OF_STREAM;
        }

        mCurrentPos = pos;

        // Try again with the new position.
    }

    CHECK(frame_size <= buffer->size());

    ssize_t n = mDataSource->read_at(mCurrentPos, buffer->data(), frame_size);
    if (n < (ssize_t)frame_size) {
        buffer->release();
        buffer = NULL;

        return ERROR_END_OF_STREAM;
    }

    buffer->set_range(0, frame_size);

    buffer->meta_data()->setInt32(kKeyTimeUnits, mCurrentTimeUs / 1000);
    buffer->meta_data()->setInt32(kKeyTimeScale, 1000);

    mCurrentPos += frame_size;
    mCurrentTimeUs += 1152 * 1000000 / 44100;

    *out = buffer;

    return OK;
}
status_t AMRNBDecoder::read(
        MediaBuffer **out, const ReadOptions *options) {
    status_t err;

    *out = NULL;

    int64_t seekTimeUs;
    if (options && options->getSeekTo(&seekTimeUs)) {
        CHECK(seekTimeUs >= 0);

        mNumSamplesOutput = 0;

        if (mInputBuffer) {
            mInputBuffer->release();
            mInputBuffer = NULL;
        }
    } else {
        seekTimeUs = -1;
    }

    if (mInputBuffer == NULL) {
        err = mSource->read(&mInputBuffer, options);

        if (err != OK) {
            return err;
        }

        int64_t timeUs;
        if (mInputBuffer->meta_data()->findInt64(kKeyTime, &timeUs)) {
            mAnchorTimeUs = timeUs;
            mNumSamplesOutput = 0;
        } else {
            // We must have a new timestamp after seeking.
            CHECK(seekTimeUs < 0);
        }
    }

    MediaBuffer *buffer;
    CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), OK);

    const uint8_t *inputPtr =
        (const uint8_t *)mInputBuffer->data() + mInputBuffer->range_offset();

    size_t numBytesRead =
        AMRDecode(mState,
          (Frame_Type_3GPP)((inputPtr[0] >> 3) & 0x0f),
          (UWord8 *)&inputPtr[1],
          static_cast<int16_t *>(buffer->data()),
          MIME_IETF);

    ++numBytesRead;  // Include the frame type header byte.

    buffer->set_range(0, kNumSamplesPerFrame * sizeof(int16_t));

    if (numBytesRead > mInputBuffer->range_length()) {
        // This is bad, should never have happened, but did. Abort now.

        buffer->release();
        buffer = NULL;

        return ERROR_MALFORMED;
    }

    mInputBuffer->set_range(
            mInputBuffer->range_offset() + numBytesRead,
            mInputBuffer->range_length() - numBytesRead);

    if (mInputBuffer->range_length() == 0) {
        mInputBuffer->release();
        mInputBuffer = NULL;
    }

    buffer->meta_data()->setInt64(
            kKeyTime,
            mAnchorTimeUs
                + (mNumSamplesOutput * 1000000) / kSampleRate);

    mNumSamplesOutput += kNumSamplesPerFrame;

    *out = buffer;

    return OK;
}
status_t AMRNBEncoder::read(
        MediaBuffer **out, const ReadOptions *options) {
    status_t err;

    *out = NULL;

    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    CHECK(options == NULL || !options->getSeekTo(&seekTimeUs, &mode));
    bool readFromSource = false;
    int64_t wallClockTimeUs = -1;

    while (mNumInputSamples < kNumSamplesPerFrame) {
        if (mInputBuffer == NULL) {
            err = mSource->read(&mInputBuffer, options);

            if (err != OK) {
                if (mNumInputSamples == 0) {
                    return ERROR_END_OF_STREAM;
                }
                memset(&mInputFrame[mNumInputSamples],
                       0,
                       sizeof(int16_t)
                            * (kNumSamplesPerFrame - mNumInputSamples));
                mNumInputSamples = kNumSamplesPerFrame;
                break;
            }

            size_t align = mInputBuffer->range_length() % sizeof(int16_t);
            CHECK_EQ(align, 0);
            readFromSource = true;

            int64_t timeUs;
            if (mInputBuffer->meta_data()->findInt64(kKeyDriftTime, &timeUs)) {
                wallClockTimeUs = timeUs;
            }
            if (mInputBuffer->meta_data()->findInt64(kKeyAnchorTime, &timeUs)) {
                mAnchorTimeUs = timeUs;
            }
        } else {
            readFromSource = false;
        }

        size_t copy =
            (kNumSamplesPerFrame - mNumInputSamples) * sizeof(int16_t);

        if (copy > mInputBuffer->range_length()) {
            copy = mInputBuffer->range_length();
        }

        memcpy(&mInputFrame[mNumInputSamples],
               (const uint8_t *)mInputBuffer->data()
                    + mInputBuffer->range_offset(),
               copy);

        mNumInputSamples += copy / sizeof(int16_t);

        mInputBuffer->set_range(
                mInputBuffer->range_offset() + copy,
                mInputBuffer->range_length() - copy);

        if (mInputBuffer->range_length() == 0) {
            mInputBuffer->release();
            mInputBuffer = NULL;
        }
    }

    MediaBuffer *buffer;
    CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), OK);

    uint8_t *outPtr = (uint8_t *)buffer->data();

    Frame_Type_3GPP frameType;
    int res = AMREncode(
            mEncState, mSidState, (Mode)mMode,
            mInputFrame, outPtr, &frameType, AMR_TX_WMF);

    CHECK(res >= 0);
    CHECK((size_t)res < buffer->size());

    // Convert header byte from WMF to IETF format.
    outPtr[0] = ((outPtr[0] << 3) | 4) & 0x7c;

    buffer->set_range(0, res);

    // Each frame of 160 samples is 20ms long.
    int64_t mediaTimeUs = mNumFramesOutput * 20000LL;
    buffer->meta_data()->setInt64(
            kKeyTime, mAnchorTimeUs + mediaTimeUs);

    if (readFromSource && wallClockTimeUs != -1) {
        buffer->meta_data()->setInt64(kKeyDriftTime,
            mediaTimeUs - wallClockTimeUs);
    }

    ++mNumFramesOutput;

    *out = buffer;

    mNumInputSamples = 0;

    return OK;
}
bool LivePhotoSource:: threadLoop() {
	ALOGD("+");
	status_t err = OK;
    MediaBuffer *buffer = NULL;
	int32_t isSync = false;

	while(mSourceStarted && !exitPending() && ((err = mSource->read(&buffer)) == OK)) {
        MediaBuffer* copy = new MediaBuffer(buffer->range_length(), buffer->meta_data());
        memcpy( copy->data(), (uint8_t *)buffer->data() + buffer->range_offset(), buffer->range_length() );
        copy->set_range(0, buffer->range_length());

		int64_t latestTimestampUs = 0;
		//CHECK(copy->meta_data()->findInt64(kKeyTime, &latestTimestampUs));
		copy->meta_data()->findInt64(kKeyTime, &latestTimestampUs);
		ALOGI("cur timestamp is %lldus", latestTimestampUs);
		{
			Mutex::Autolock _lock(mLock);
			
			int32_t isCodecConfig;
			if(copy->meta_data()->findInt32(kKeyIsCodecConfig, &isCodecConfig) && isCodecConfig ) {
				if(mCodecConfigBuffer != NULL) {
					mCodecConfigBuffer->release();
					mCodecConfigBuffer = NULL;
				}
				
				ALOGD("keep codec config buffer");
				mCodecConfigBuffer = copy;
			}
			else {
		        mMediaBufferPool.push_back(copy);

				if(mLivePhotoStarted) {
					mFrameAvailableCond.signal();
					copy->meta_data()->findInt32(kKeyIsSyncFrame, &isSync);
					
					if (!isSync) {
						if (reinterpret_cast<MediaCodecSource *>(mSource.get())->
						                  requestIDRFrame() != OK)
							ALOGW("Send force I cmd fail");
					}
					else {
						mSourceStarted = false;
						buffer->release();
						buffer = NULL;
						break; // 
					}
				}
				else {
					updateBufferPool();
				}
			}
		}

		buffer->release();
		buffer = NULL;
	}

	{
		Mutex::Autolock _lock(mLock);
		if(err != OK) {
			ALOGE("read source err(%d) . this is a bad livephoto", err);
		}
		
		if(mSourceStarted && mLivePhotoStarted) {
			mLivePhotoStarted = false;
			mSourceStarted = false;
			ALOGE("there is an error with exiting while when livephoto started");
			mFrameAvailableCond.signal();
		}
		ALOGD("Thread exit signal");
		mThreadExitCond.signal();
		mIsThreadExit = true;
	}
	
	ALOGD("-");
	return false;
}
Esempio n. 22
0
 virtual status_t readMultiple(
         Vector<MediaBuffer *> *buffers, uint32_t maxNumBuffers, const ReadOptions *options) {
     ALOGV("readMultiple");
     if (buffers == NULL || !buffers->isEmpty()) {
         return BAD_VALUE;
     }
     Parcel data, reply;
     data.writeInterfaceToken(BpMediaSource::getInterfaceDescriptor());
     data.writeUint32(maxNumBuffers);
     if (options != nullptr) {
         data.writeByteArray(sizeof(*options), (uint8_t*) options);
     }
     status_t ret = remote()->transact(READMULTIPLE, data, &reply);
     mMemoryCache.gc();
     if (ret != NO_ERROR) {
         return ret;
     }
     // wrap the returned data in a vector of MediaBuffers
     int32_t buftype;
     uint32_t bufferCount = 0;
     while ((buftype = reply.readInt32()) != NULL_BUFFER) {
         LOG_ALWAYS_FATAL_IF(bufferCount >= maxNumBuffers,
                 "Received %u+ buffers and requested %u buffers",
                 bufferCount + 1, maxNumBuffers);
         MediaBuffer *buf;
         if (buftype == SHARED_BUFFER || buftype == SHARED_BUFFER_INDEX) {
             uint64_t index = reply.readUint64();
             ALOGV("Received %s index %llu",
                     buftype == SHARED_BUFFER ? "SHARED_BUFFER" : "SHARED_BUFFER_INDEX",
                     (unsigned long long) index);
             sp<IMemory> mem;
             if (buftype == SHARED_BUFFER) {
                 sp<IBinder> binder = reply.readStrongBinder();
                 mem = interface_cast<IMemory>(binder);
                 LOG_ALWAYS_FATAL_IF(mem.get() == nullptr,
                         "Received NULL IMemory for shared buffer");
                 mMemoryCache.insert(index, mem);
             } else {
                 mem = mMemoryCache.lookup(index);
                 LOG_ALWAYS_FATAL_IF(mem.get() == nullptr,
                         "Received invalid IMemory index for shared buffer: %llu",
                         (unsigned long long)index);
             }
             size_t offset = reply.readInt32();
             size_t length = reply.readInt32();
             buf = new RemoteMediaBufferWrapper(mem);
             buf->set_range(offset, length);
             buf->meta_data()->updateFromParcel(reply);
         } else { // INLINE_BUFFER
             int32_t len = reply.readInt32();
             ALOGV("INLINE_BUFFER status %d and len %d", ret, len);
             buf = new MediaBuffer(len);
             reply.read(buf->data(), len);
             buf->meta_data()->updateFromParcel(reply);
         }
         buffers->push_back(buf);
         ++bufferCount;
         ++mBuffersSinceStop;
     }
     ret = reply.readInt32();
     ALOGV("readMultiple status %d, bufferCount %u, sinceStop %u",
             ret, bufferCount, mBuffersSinceStop);
     return ret;
 }
Esempio n. 23
0
status_t AMRSource::read(
    MediaBuffer **out, const ReadOptions *options) {
    *out = NULL;

    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
        uint64_t seekFrame = seekTimeUs / 20000ll;  // 20ms per frame.
        mCurrentTimeUs = seekFrame * 20000ll;
        uint32_t framesize=0;
        uint64_t offset = 0, numframes = 0;
        seekFrame = seekFrame + 1; //why seekframe+1, since the array starts from zero
        LOGI("seekframe %lld", seekFrame);
        for (List<AMRFrameTableEntry>::iterator it = mAMRFrameTableEntries.begin();
                it != mAMRFrameTableEntries.end(); ++it) {

            numframes = it->mNumFrames;
            framesize = it->mFrameSize;
            if(seekFrame >= mTotalFrames)
            {
                LOGE("seek beyond EOF");
                return ERROR_OUT_OF_RANGE;
            }

            if(seekFrame > numframes)
            {
                offset = offset + (numframes * framesize);
                seekFrame = seekFrame - numframes;
                LOGV("> offset %lld seekFrame %lld numframes %lld framesize %d", offset, seekFrame, numframes, framesize);
            }
            else
            {
                offset = offset + (seekFrame * framesize);
                LOGV("!> offset %lld numframes %lld framesize %d", offset, numframes, framesize);
                break;
            }
        }
        mOffset = offset;
    }

    uint8_t header;
    ssize_t n = mDataSource->readAt(mOffset, &header, 1);

    if (n < 1) {
        return ERROR_END_OF_STREAM;
    }

    if (header & 0x83) {
        // Padding bits must be 0.

        LOGE("padding bits must be 0, header is 0x%02x", header);

        return ERROR_MALFORMED;
    }

    unsigned FT = (header >> 3) & 0x0f;

    if (FT > MAX_AMRMODE) {

        LOGE("illegal AMR frame type %d", FT);

        return ERROR_MALFORMED;
    }

    size_t frameSize = getFrameSize(mIsWide, FT);

    MediaBuffer *buffer;
    status_t err = mGroup->acquire_buffer(&buffer);
    if (err != OK) {
        return err;
    }

    n = mDataSource->readAt(mOffset, buffer->data(), frameSize);

    if (n != (ssize_t)frameSize) {
        buffer->release();
        buffer = NULL;

        return ERROR_IO;
    }

    buffer->set_range(0, frameSize);
    buffer->meta_data()->setInt64(kKeyTime, mCurrentTimeUs);
    buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);

    mOffset += frameSize;
    mCurrentTimeUs += 20000;  // Each frame is 20ms

    *out = buffer;

    return OK;
}
Esempio n. 24
0
status_t AMRSource::read(
        MediaBuffer **out, const ReadOptions *options) {
    *out = NULL;

    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
        int64_t seekFrame = seekTimeUs / 20000ll;  // 20ms per frame.
        mCurrentTimeUs = seekFrame * 20000ll;
        mOffset = seekFrame * mFrameSize + (mIsWide ? 9 : 6);
    }

    uint8_t header;
    ssize_t n = mDataSource->readAt(mOffset, &header, 1);

    if (n < 1) {
        return ERROR_END_OF_STREAM;
    }

    if (header & 0x83) {
        // Padding bits must be 0.

        LOGE("padding bits must be 0, header is 0x%02x", header);

        return ERROR_MALFORMED;
    }

    unsigned FT = (header >> 3) & 0x0f;

    if (FT > 8 || (!mIsWide && FT > 7)) {

        LOGE("illegal AMR frame type %d", FT);

        return ERROR_MALFORMED;
    }

    size_t frameSize = getFrameSize(mIsWide, FT);
    CHECK_EQ(frameSize, mFrameSize);

    MediaBuffer *buffer;
    status_t err = mGroup->acquire_buffer(&buffer);
    if (err != OK) {
        return err;
    }

    n = mDataSource->readAt(mOffset, buffer->data(), frameSize);

    if (n != (ssize_t)frameSize) {
        buffer->release();
        buffer = NULL;

        return ERROR_IO;
    }

    buffer->set_range(0, frameSize);
    buffer->meta_data()->setInt64(kKeyTime, mCurrentTimeUs);
    buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);

    mOffset += frameSize;
    mCurrentTimeUs += 20000;  // Each frame is 20ms

    *out = buffer;

    return OK;
}
Esempio n. 25
0
status_t AACDecoder::read(
        MediaBuffer **out, const ReadOptions *options) {
    /*
     ** end of aac audio stream in the case of initCheck is not OK,
     ** avoid abnormal playing later.   @Jun 16, 2011. by hbb
     */
    if(mInitCheck != OK) {
        ALOGE("mInitCheck is not OK, so end aac audio stream");
        return ERROR_END_OF_STREAM;
    }

    status_t err;

    *out = NULL;

    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
        CHECK(seekTimeUs >= 0);

        mNumSamplesOutput = 0;

        if (mInputBuffer) {
            mInputBuffer->release();
            mInputBuffer = NULL;
        }

        // Make sure that the next buffer output does not still
        // depend on fragments from the last one decoded.
        PVMP4AudioDecoderResetBuffer(mDecoderBuf);
    } else {
        seekTimeUs = -1;
    }

repeat:
    if (mInputBuffer == NULL) {
        err = mSource->read(&mInputBuffer, options);

        if (err != OK) {
            return err;
        }

        int64_t timeUs;
        if (mInputBuffer->meta_data()->findInt64(kKeyTime, &timeUs)) {
            if(mAnchorTimeUs != timeUs)
            {
            mAnchorTimeUs = timeUs;
            mNumSamplesOutput = 0;
            }
        } else {
            // We must have a new timestamp after seeking.
            CHECK(seekTimeUs < 0);
        }
    }

    MediaBuffer *buffer;
    CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), (status_t)OK);

    mConfig->pInputBuffer =
        (UChar *)mInputBuffer->data() + mInputBuffer->range_offset();

    mConfig->inputBufferCurrentLength = mInputBuffer->range_length();
    mConfig->inputBufferMaxLength = 0;
    mConfig->inputBufferUsedLength = 0;
    mConfig->remainderBits = 0;

    mConfig->pOutputBuffer = static_cast<Int16 *>(buffer->data());
    mConfig->pOutputBuffer_plus = &mConfig->pOutputBuffer[2048];
#if WRITE_FILE
	if(aacFp)
		fwrite(mConfig->pInputBuffer,1, mConfig->inputBufferCurrentLength,aacFp);
#endif
	//ALOGE("inputlen %d input[0] = %x input[1]%x",  mConfig->inputBufferCurrentLength,(mConfig->pInputBuffer)[0],(mConfig->pInputBuffer)[1]);
    Int decoderErr = MP4AUDEC_SUCCESS;
	if(mConfig->isMutilChannle)
	{
		decoderErr = PVMP4AudioDecodeFrameSixChannel(mConfig, mDecoderBuf);
	}
	else
	{
		decoderErr = PVMP4AudioDecodeFrame(mConfig, mDecoderBuf);
	}


	if (mInputBuffer != NULL) {
		   mInputBuffer->set_range(
				   mInputBuffer->range_offset() + mConfig->inputBufferUsedLength,
				   mInputBuffer->range_length() - mConfig->inputBufferUsedLength);
		   if (mInputBuffer->range_length() <= 3) {
			   mInputBuffer->release();
			   mInputBuffer = NULL;
		   }
	   }

	//if the input data no enough,will drop this frame inputdata. get the next frame data.
	if(decoderErr != MP4AUDEC_SUCCESS)
	{

		 if(mInputBuffer)
		{
		   mInputBuffer->release();
		   mInputBuffer = NULL;
		}

		 if(buffer)
		{
			buffer->release();
			buffer = NULL;
		}
		 goto repeat;

	}

    /*
     * AAC+/eAAC+ streams can be signalled in two ways: either explicitly
     * or implicitly, according to MPEG4 spec. AAC+/eAAC+ is a dual
     * rate system and the sampling rate in the final output is actually
     * doubled compared with the core AAC decoder sampling rate.
     *
     * Explicit signalling is done by explicitly defining SBR audio object
     * type in the bitstream. Implicit signalling is done by embedding
     * SBR content in AAC extension payload specific to SBR, and hence
     * requires an AAC decoder to perform pre-checks on actual audio frames.
     *
     * Thus, we could not say for sure whether a stream is
     * AAC+/eAAC+ until the first data frame is decoded.
     */
    if (++mNumDecodedBuffers <= 2) {
        ALOGV("audio/extended audio object type: %d + %d",
            mConfig->audioObjectType, mConfig->extendedAudioObjectType);
        ALOGV("aac+ upsampling factor: %d desired channels: %d",
            mConfig->aacPlusUpsamplingFactor, mConfig->desiredChannels);

        CHECK(mNumDecodedBuffers > 0);
        if (mNumDecodedBuffers == 1) {
            mUpsamplingFactor = mConfig->aacPlusUpsamplingFactor;
            // Check on the sampling rate to see whether it is changed.
            int32_t sampleRate;
            CHECK(mMeta->findInt32(kKeySampleRate, &sampleRate));
			ALOGV("--->aac samplerae %d",sampleRate);
            if (mConfig->samplingRate != sampleRate) {
                mMeta->setInt32(kKeySampleRate, mConfig->samplingRate);
                ALOGV("Sample rate was %d Hz, but now is %d Hz",
                        sampleRate, mConfig->samplingRate);
                buffer->release();
               // mInputBuffer->release();
               // mInputBuffer = NULL;
                return INFO_FORMAT_CHANGED;
            }
        } else {  // mNumDecodedBuffers == 2
            if (mConfig->extendedAudioObjectType == MP4AUDIO_AAC_LC ||
                mConfig->extendedAudioObjectType == MP4AUDIO_LTP) {
                if (mUpsamplingFactor == 2) {
                    // The stream turns out to be not aacPlus mode anyway
                    ALOGV("Disable AAC+/eAAC+ since extended audio object type is %d",
                        mConfig->extendedAudioObjectType);
                    mConfig->aacPlusEnabled = 0;
                }
            } else {
                if (mUpsamplingFactor == 1) {
                    // aacPlus mode does not buy us anything, but to cause
                    // 1. CPU load to increase, and
                    // 2. a half speed of decoding
                    ALOGV("Disable AAC+/eAAC+ since upsampling factor is 1");
                    mConfig->aacPlusEnabled = 0;
                }
            }
        }
    }

    size_t numOutBytes =
        mConfig->frameLength * sizeof(int16_t) * mConfig->desiredChannels;
    if (mUpsamplingFactor == 2) {
        if (mConfig->desiredChannels == 1) {
            memcpy(&mConfig->pOutputBuffer[1024], &mConfig->pOutputBuffer[2048], numOutBytes * 2);
        }
        numOutBytes *= 2;
    }

    if (decoderErr != MP4AUDEC_SUCCESS) {
        ALOGW("AAC decoder returned error %d, substituting silence", decoderErr);

        memset(buffer->data(), 0, numOutBytes);

        // Discard input buffer.
        if(mInputBuffer)
        {
        mInputBuffer->release();
        mInputBuffer = NULL;
        }
        // fall through
    }

    buffer->set_range(0, numOutBytes);



    buffer->meta_data()->setInt64(
            kKeyTime,
            mAnchorTimeUs
                + (mNumSamplesOutput * 1000000) / mConfig->samplingRate);

    mNumSamplesOutput += mConfig->frameLength;

    *out = buffer;

    return OK;
}
status_t M2VDecoder::read(
        MediaBuffer **out, const ReadOptions *options) {
    *out = NULL;
	//LOGI("M2VDecoder::read in");

    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
        ALOGV("seek requested to %lld us (%.2f secs)", seekTimeUs, seekTimeUs / 1E6);

        CHECK(seekTimeUs >= 0);
        mPendingSeekTimeUs = seekTimeUs;
        mPendingSeekMode = mode;

        if (mInputBuffer) {
            mInputBuffer->release();
            mInputBuffer = NULL;
        }
		sDecApi.reset_class_On2Decoder(pOn2Dec);
    }

    if (mInputBuffer == NULL) {
        ALOGV("fetching new input buffer.");

        bool seeking = false;

            for (;;) {
                if (mPendingSeekTimeUs >= 0) {
                    ALOGV("reading data from timestamp %lld (%.2f secs)",
                         mPendingSeekTimeUs, mPendingSeekTimeUs / 1E6);
                }

                ReadOptions seekOptions;
                if (mPendingSeekTimeUs >= 0) {
                    seeking = true;

                    seekOptions.setSeekTo(mPendingSeekTimeUs, mPendingSeekMode);
                    mPendingSeekTimeUs = -1;
                }
                status_t err = mSource->read(&mInputBuffer, &seekOptions);
                seekOptions.clearSeekTo();

                if (err != OK) {

                    *out = NULL;
                    return (*out == NULL)  ? err : (status_t)OK;
                }

                if (mInputBuffer->range_length() > 0) {
                    break;
                }

                mInputBuffer->release();
                mInputBuffer = NULL;
            }

        if (seeking) {
            int64_t targetTimeUs;
            if (mInputBuffer->meta_data()->findInt64(kKeyTargetTime, &targetTimeUs)
                    && targetTimeUs >= 0) {
                mTargetTimeUs = targetTimeUs;
            } else {
                mTargetTimeUs = -1;
            }
        }
    }

	MediaBuffer *aOutBuf = new MediaBuffer(sizeof(VPU_FRAME));
	uint8_t * aOutBuffer = (uint8_t *)aOutBuf->data();
	uint32_t aOutputLength = 0;
	uint8_t * pInput = (uint8_t *)mInputBuffer->data();
	uint32_t aInBufSize = mInputBuffer->range_length();
	//int64_t inputTime = 0LL;
	//mInputBuffer->meta_data()->findInt64(kKeyTime, &inputTime);
	int64_t outputTime = 0LL;
	/*if(mNumFramesOutput == 0)
	{
		 pInput += 16;
        aInBufSize -= 16;
	}*/
	//LOGI("before decoder m2v inputlen %d",aInBufSize);
	//LOGI("inpout data %c  %c %c %c",pInput[0],pInput[1],pInput[2],pInput[3]);
    memset(aOutBuffer,0,sizeof(VPU_FRAME));

	if(sDecApi.dec_oneframe_class_On2Decoder(
            pOn2Dec,
            aOutBuffer,
            (uint32_t*)&aOutputLength,
            pInput,
            &aInBufSize)){
    	sDecApi.get_oneframe_class_On2Decoder(pOn2Dec, aOutBuffer,(uint32_t*)&aOutputLength);
        aOutBuf->releaseframe();
		mInputBuffer->release();
		mInputBuffer = NULL;
		ALOGE("m2vdec failed");
        return UNKNOWN_ERROR;
    }
	//LOGI("after decoder m2v aInBufSize %d aOutputLength %d ",aInBufSize,aOutputLength);
	if(mInputBuffer)
	{
		mInputBuffer->release();
		mInputBuffer = NULL;
	}
	if (mInputBuffer == NULL) {
            for (;;)
			{
                status_t err = mSource->read(&mInputBuffer);
                if (err != OK) {

                    *out = NULL;
                    return (*out == NULL)  ? err : (status_t)OK;
                }

                if (mInputBuffer->range_length() > 0) {
                    break;
                }

                mInputBuffer->release();
                mInputBuffer = NULL;
            }
    }
	sDecApi.get_oneframe_class_On2Decoder(pOn2Dec, aOutBuffer,(uint32_t*)&aOutputLength);

	if(aOutputLength)
		mNumFramesOutput++;

	VPU_FRAME *frame = (VPU_FRAME *)aOutBuffer;
	outputTime = ((int64_t)(frame->ShowTime.TimeHigh) <<32) | ((int64_t)(frame->ShowTime.TimeLow));
	outputTime *=1000;
	aOutBuf->meta_data()->setInt64(kKeyTime,outputTime);
	if(aOutputLength <= 0)
		aOutBuf->set_range(0, 0);
	*out = aOutBuf;
	//LOGI("M2VDecoder::read out");
	return OK;
}
Esempio n. 27
0
status_t AMRSource::read(
        MediaBuffer **out, const ReadOptions *options) {
    *out = NULL;

    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
#ifdef MTK_AOSP_ENHANCEMENT

        // Maybe risk (int)index >= (uint)mOffsetTableLength warning: comparison between signed and unsigned integer expressions
        // should check seekTimeUs < 0 case
	if (seekTimeUs < 0) {
	    ALOGW("seekTimeUs:%lld < 0", seekTimeUs);
	    seekTimeUs = 0;
	}
#endif
        size_t size;
        int64_t seekFrame = seekTimeUs / 20000ll;  // 20ms per frame.
        mCurrentTimeUs = seekFrame * 20000ll;

        size_t index = seekFrame < 0 ? 0 : seekFrame / 50;
        if (index >= mOffsetTableLength) {
            index = mOffsetTableLength - 1;
        }

        mOffset = mOffsetTable[index] + (mIsWide ? 9 : 6);

        for (size_t i = 0; i< seekFrame - index * 50; i++) {
            status_t err;
            if ((err = getFrameSizeByOffset(mDataSource, mOffset,
                            mIsWide, &size)) != OK) {
                return err;
            }
            mOffset += size;
        }
    }

    uint8_t header;
    ssize_t n = mDataSource->readAt(mOffset, &header, 1);

    if (n < 1) {
        return ERROR_END_OF_STREAM;
    }
#ifdef MTK_AOSP_ENHANCEMENT

    int count = 0;
	while(header & 0x83)
	{  
        if ((count % 10) == 0)
            SXLOGW("AMRSource::read--Frame head error,skip until to find an valid one count %d",count);
	    mOffset++;
	    count++;
	    if (count>320) {
	    	SXLOGW("getFrameSizeByOffset--can not find the correct frame header in 64 byte");
	        return ERROR_END_OF_STREAM;
	    }
		n = mDataSource->readAt(mOffset, &header, 1);
	    if (n < 1) {
            return ERROR_END_OF_STREAM;
		}
	}
#else
    if (header & 0x83) {
        // Padding bits must be 0.

        ALOGE("padding bits must be 0, header is 0x%02x", header);

        return ERROR_MALFORMED;
    }
#endif

    unsigned FT = (header >> 3) & 0x0f;

    size_t frameSize = getFrameSize(mIsWide, FT);
    if (frameSize == 0) {
        return ERROR_MALFORMED;
    }

    MediaBuffer *buffer;
    status_t err = mGroup->acquire_buffer(&buffer);
    if (err != OK) {
        return err;
    }

    n = mDataSource->readAt(mOffset, buffer->data(), frameSize);

    if (n != (ssize_t)frameSize) {
        buffer->release();
        buffer = NULL;

        return ERROR_IO;
    }

    buffer->set_range(0, frameSize);
    buffer->meta_data()->setInt64(kKeyTime, mCurrentTimeUs);
    buffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);

    mOffset += frameSize;
    mCurrentTimeUs += 20000;  // Each frame is 20ms

    *out = buffer;

    return OK;
}
Esempio n. 28
0
status_t AMRWBEncoder::read(
        MediaBuffer **out, const ReadOptions *options) {
    status_t err;

    *out = NULL;

    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    CHECK(options == NULL || !options->getSeekTo(&seekTimeUs, &mode));
    bool readFromSource = false;
    int64_t wallClockTimeUs = -1;

    while (mNumInputSamples < kNumSamplesPerFrame) {
        if (mInputBuffer == NULL) {
            err = mSource->read(&mInputBuffer, options);

            if (err != OK) {
/*m@nufront start*/
                if (mInputBuffer != NULL) {
                    mInputBuffer->release();
                    mInputBuffer = NULL;
                }
                mStarted = false;
                mFrameEncodingCompletionCondition.signal();
                ZJFLOGD("function out for ERROR_END_OF_STREAM");
                return ERROR_END_OF_STREAM;
#if 0
                if (mNumInputSamples == 0) {
                    return ERROR_END_OF_STREAM;
                }
                memset(&mInputFrame[mNumInputSamples],
                       0,
                       sizeof(int16_t)
                            * (kNumSamplesPerFrame - mNumInputSamples));
                mNumInputSamples = kNumSamplesPerFrame;
                break;
#endif
/*m@nufront end*/
            }

            size_t align = mInputBuffer->range_length() % sizeof(int16_t);
            CHECK_EQ(align, 0);

            int64_t timeUs;
            if (mInputBuffer->meta_data()->findInt64(kKeyDriftTime, &timeUs)) {
                wallClockTimeUs = timeUs;
            }
            if (mInputBuffer->meta_data()->findInt64(kKeyAnchorTime, &timeUs)) {
                mAnchorTimeUs = timeUs;
            }
            readFromSource = true;
        } else {
            readFromSource = false;
        }

        size_t copy =
            (kNumSamplesPerFrame - mNumInputSamples) * sizeof(int16_t);

        if (copy > mInputBuffer->range_length()) {
            copy = mInputBuffer->range_length();
        }

        memcpy(&mInputFrame[mNumInputSamples],
               (const uint8_t *)mInputBuffer->data()
                    + mInputBuffer->range_offset(),
               copy);

        mInputBuffer->set_range(
                mInputBuffer->range_offset() + copy,
                mInputBuffer->range_length() - copy);

        if (mInputBuffer->range_length() == 0) {
            mInputBuffer->release();
            mInputBuffer = NULL;
        }

        mNumInputSamples += copy / sizeof(int16_t);
        if (mNumInputSamples >= kNumSamplesPerFrame) {
            mNumInputSamples %= kNumSamplesPerFrame;
            break;  // Get a whole input frame 640 bytes
        }
    }

    VO_CODECBUFFER inputData;
    memset(&inputData, 0, sizeof(inputData));
    inputData.Buffer = (unsigned char*) mInputFrame;
    inputData.Length = kInputBufferSize;
    CHECK(VO_ERR_NONE == mApiHandle->SetInputData(mEncoderHandle,&inputData));

    /*m@nufrnt start*/
    //MediaBuffer *buffer;
    //CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), OK);
    MediaBuffer *buffer;
    {
        Mutex::Autolock autoLock(mLock);
        buffer = new MediaBuffer(1024);
        ++mNumClientOwnedBuffers;
        buffer->setObserver(this);
        buffer->add_ref();
    }
    /*m@nufrnt end*/

    uint8_t *outPtr = (uint8_t *)buffer->data();

    VO_CODECBUFFER outputData;
    memset(&outputData, 0, sizeof(outputData));
    VO_AUDIO_OUTPUTINFO outputInfo;
    memset(&outputInfo, 0, sizeof(outputInfo));

    VO_U32 ret = VO_ERR_NONE;
    outputData.Buffer = outPtr;
    outputData.Length = buffer->size();
    ret = mApiHandle->GetOutputData(mEncoderHandle, &outputData, &outputInfo);
    CHECK(ret == VO_ERR_NONE || ret == VO_ERR_INPUT_BUFFER_SMALL);

    buffer->set_range(0, outputData.Length);
    ++mNumFramesOutput;

    int64_t mediaTimeUs = mNumFramesOutput * 20000LL;
    buffer->meta_data()->setInt64(kKeyTime, mAnchorTimeUs + mediaTimeUs);
    if (readFromSource && wallClockTimeUs != -1) {
        buffer->meta_data()->setInt64(kKeyDriftTime, mediaTimeUs - wallClockTimeUs);
    }

    *out = buffer;
    return OK;
}
Esempio n. 29
0
status_t FLACDecoder::read(MediaBuffer **out, const ReadOptions* options) {
    int err = 0;
    *out = NULL;
    uint32 blockSize, usedBitstream, availLength = 0;
    uint32 flacOutputBufSize = FLAC_OUTPUT_BUFFER_SIZE;
    int *status = 0;

    bool seekSource = false, eos = false;

    if (!mInitStatus) {
        return NO_INIT;
    }

    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
        ALOGD("qti_flac: Seek to %lld", seekTimeUs);
        CHECK(seekTimeUs >= 0);
        mNumFramesOutput = 0;
        seekSource = true;

        if (mInputBuffer) {
            mInputBuffer->release();
            mInputBuffer = NULL;
        }
    }
    else {
        seekTimeUs = -1;
    }

    if (mInputBuffer) {
        mInputBuffer->release();
        mInputBuffer = NULL;
    }

    if (!eos) {
        err = mSource->read(&mInputBuffer, options);
        if (err != OK) {
            ALOGE("qti_flac: Parser returned %d", err);
            eos = true;
            return err;
        }
    }

    int64_t timeUs;
    if (mInputBuffer->meta_data()->findInt64(kKeyTime, &timeUs)) {
        mAnchorTimeUs = timeUs;
        mNumFramesOutput = 0;
        ALOGVV("qti_flac: mAnchorTimeUs %lld", mAnchorTimeUs);
    }
    else {
        CHECK(seekTimeUs < 0);
    }

    if (!eos) {
        if (mInputBuffer) {
            ALOGVV("qti_flac: Parser filled %d bytes", mInputBuffer->range_length());
            availLength = mInputBuffer->range_length();
            status = (*mProcessData)(&pFlacDecState,
                                     (uint8*)mInputBuffer->data(),
                                     availLength,
                                     mOutBuffer,
                                     &flacOutputBufSize,
                                     &usedBitstream,
                                     &blockSize);
        }

        ALOGVV("qti_flac: status %d, availLength %d, usedBitstream %d, blockSize %d",
                (int)status, availLength, usedBitstream, blockSize);

        MediaBuffer *buffer;
        CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), (status_t)OK);

        buffer->set_range(0, blockSize*mNumChannels*2);

        uint16_t *ptr = (uint16_t *) mOutBuffer;

        //Interleave the output from decoder for multichannel clips.
        if (mNumChannels > 1) {
            for (uint32_t k = 0; k < blockSize; k++) {
                for (uint32_t i = k, j = mNumChannels*k; i < blockSize*mNumChannels; i += blockSize, j++) {
                    mTmpBuf[j] = ptr[i];
                }
            }
            memcpy((uint16_t *)buffer->data(), mTmpBuf, blockSize*mNumChannels*2);
        }
        else {
            memcpy((uint16_t *)buffer->data(), mOutBuffer, blockSize*mNumChannels*2);
        }

        int64_t time = 0;
        time = mAnchorTimeUs + (mNumFramesOutput*1000000)/mSampleRate;
        buffer->meta_data()->setInt64(kKeyTime, time);
        mNumFramesOutput += blockSize;
        ALOGVV("qti_flac: time = %lld", time);

        *out = buffer;
    }

    return OK;
}