Esempio n. 1
0
status_t ESExtractor::Track::dequeueAccessUnitMPEGVideo(sp<ABuffer> &mAccessUnit) {
    const uint8_t *data = mExtractor->mBuffer->data();
    size_t size = mExtractor->mBuffer->size();
    bool sawPictureStart = false;
    int pprevStartCode = -1;
    int prevStartCode = -1;
    int currentStartCode = -1;

    size_t offset = 0;
    size_t lastGOPOff = -1;

    while (offset + 3 < size) {
        if (U24_AT(data + offset) != 0x000001) {
            ++offset;
            continue;
        }
        pprevStartCode = prevStartCode;
        prevStartCode = currentStartCode;
        currentStartCode = data[offset + 3];
        ALOGV("pprevStartCode:0x%x,prevStartCode:0x%x,currentStartCode:0x%x,offset:%d",pprevStartCode,prevStartCode,currentStartCode,offset);

        if (currentStartCode == 0xb3 && mQueueFormat == NULL) {
            memmove(mExtractor->mBuffer->data(), mExtractor->mBuffer->data() + offset, size - offset);
            size -= offset;
            offset = 0;
            mExtractor->mBuffer->setRange(0, size);
        }

        if ((prevStartCode == 0xb3 && currentStartCode != 0xb5)
                || (pprevStartCode == 0xb3 && prevStartCode == 0xb5)) {
            // seqHeader without/with extension

            if (mQueueFormat == NULL) {
                CHECK_GE(size, 7u);

                unsigned width =
                    (data[4] << 4) | data[5] >> 4;

                unsigned height =
                    ((data[5] & 0x0f) << 8) | data[6];
                
                mQueueFormat = new MetaData;
                mQueueFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG2);
                mQueueFormat->setInt32(kKeyWidth, (int32_t)width);
                mQueueFormat->setInt32(kKeyHeight, (int32_t)height);

                ALOGI("found MPEG2 video codec config (%d x %d)", width, height);

                sp<ABuffer> csd = new ABuffer(offset);
                memcpy(csd->data(), data, offset);

                memmove(mExtractor->mBuffer->data(),
                        mExtractor->mBuffer->data() + offset,
                        mExtractor->mBuffer->size() - offset);

                mExtractor->mBuffer->setRange(0, mExtractor->mBuffer->size() - offset);
                size -= offset;
                offset = 0;

                sp<ABuffer> esds = MakeMPEGVideoESDS(csd);
                mQueueFormat->setData(kKeyESDS, kTypeESDS, esds->data(), esds->size());
                ALOGV("dequeueAccessUnitMPEGVideo:get mQueueFormat,return GETFORMATDONE");
                return GETFORMATDONE;
            }
        }

        if (mQueueFormat != NULL && (currentStartCode == 0x00 || (sawPictureStart && currentStartCode == 0xB7))) { //ALPS00473447
            // Picture start
            ALOGV("dequeueAccessUnitMPEGVideo:Picture start");
            if (!sawPictureStart) {
                sawPictureStart = true;
            } else {
                mAccessUnit = new ABuffer(offset);
                memcpy(mAccessUnit->data(), data, offset);

                memmove(mExtractor->mBuffer->data(),
                        mExtractor->mBuffer->data() + offset,
                        mExtractor->mBuffer->size() - offset);

                mExtractor->mBuffer->setRange(0, mExtractor->mBuffer->size() - offset);

                offset = 0;
                mAccessUnit->meta()->setInt32("invt", (int32_t)true);
                mAccessUnit->meta()->setInt64("timeUs", 0);
                ALOGV("dequeueAccessUnitMPEGVideo:return OPCONTINUE");
                return GETAUDONE;
            }
        }
        ++offset;
    }
status_t MatroskaSource::read(
        MediaBuffer **out, const ReadOptions *options) {
    *out = NULL;

    int64_t targetSampleTimeUs = -1ll;

    int64_t seekTimeUs;
    ReadOptions::SeekMode mode;
    if (options && options->getSeekTo(&seekTimeUs, &mode)
            && !mExtractor->isLiveStreaming()) {
        clearPendingFrames();

        // The audio we want is located by using the Cues to seek the video
        // stream to find the target Cluster then iterating to finalize for
        // audio.
        int64_t actualFrameTimeUs;
        mBlockIter.seek(seekTimeUs, mIsAudio, &actualFrameTimeUs);

        if (mode == ReadOptions::SEEK_CLOSEST) {
            targetSampleTimeUs = actualFrameTimeUs;
        }
    }

    while (mPendingFrames.empty()) {
        status_t err = readBlock();

        if (err != OK) {
            clearPendingFrames();

            return err;
        }
    }

    MediaBuffer *frame = *mPendingFrames.begin();
    mPendingFrames.erase(mPendingFrames.begin());

    if (mType != AVC || mNALSizeLen == 0) {
        if (targetSampleTimeUs >= 0ll) {
            frame->meta_data()->setInt64(
                    kKeyTargetTime, targetSampleTimeUs);
        }

        *out = frame;

        return OK;
    }

    // Each input frame contains one or more NAL fragments, each fragment
    // is prefixed by mNALSizeLen bytes giving the fragment length,
    // followed by a corresponding number of bytes containing the fragment.
    // We output all these fragments into a single large buffer separated
    // by startcodes (0x00 0x00 0x00 0x01).
    //
    // When mNALSizeLen is 0, we assume the data is already in the format
    // desired.

    const uint8_t *srcPtr =
        (const uint8_t *)frame->data() + frame->range_offset();

    size_t srcSize = frame->range_length();

    size_t dstSize = 0;
    MediaBuffer *buffer = NULL;
    uint8_t *dstPtr = NULL;

    for (int32_t pass = 0; pass < 2; ++pass) {
        size_t srcOffset = 0;
        size_t dstOffset = 0;
        while (srcOffset + mNALSizeLen <= srcSize) {
            size_t NALsize;
            switch (mNALSizeLen) {
                case 1: NALsize = srcPtr[srcOffset]; break;
                case 2: NALsize = U16_AT(srcPtr + srcOffset); break;
                case 3: NALsize = U24_AT(srcPtr + srcOffset); break;
                case 4: NALsize = U32_AT(srcPtr + srcOffset); break;
                default:
                    TRESPASS();
            }

            if (srcOffset + mNALSizeLen + NALsize <= srcOffset + mNALSizeLen) {
                frame->release();
                frame = NULL;

                return ERROR_MALFORMED;
            } else if (srcOffset + mNALSizeLen + NALsize > srcSize) {
                break;
            }

            if (pass == 1) {
                memcpy(&dstPtr[dstOffset], "\x00\x00\x00\x01", 4);

                if (frame != buffer) {
                    memcpy(&dstPtr[dstOffset + 4],
                           &srcPtr[srcOffset + mNALSizeLen],
                           NALsize);
                }
            }

            dstOffset += 4;  // 0x00 00 00 01
            dstOffset += NALsize;

            srcOffset += mNALSizeLen + NALsize;
        }

        if (srcOffset < srcSize) {
            // There were trailing bytes or not enough data to complete
            // a fragment.

            frame->release();
            frame = NULL;

            return ERROR_MALFORMED;
        }

        if (pass == 0) {
            dstSize = dstOffset;

            if (dstSize == srcSize && mNALSizeLen == 4) {
                // In this special case we can re-use the input buffer by substituting
                // each 4-byte nal size with a 4-byte start code
                buffer = frame;
            } else {
                buffer = new MediaBuffer(dstSize);
            }

            int64_t timeUs;
            CHECK(frame->meta_data()->findInt64(kKeyTime, &timeUs));
            int32_t isSync;
            CHECK(frame->meta_data()->findInt32(kKeyIsSyncFrame, &isSync));

            buffer->meta_data()->setInt64(kKeyTime, timeUs);
            buffer->meta_data()->setInt32(kKeyIsSyncFrame, isSync);

            dstPtr = (uint8_t *)buffer->data();
        }
    }

    if (frame != buffer) {
        frame->release();
        frame = NULL;
    }

    if (targetSampleTimeUs >= 0ll) {
        buffer->meta_data()->setInt64(
                kKeyTargetTime, targetSampleTimeUs);
    }

    *out = buffer;

    return OK;
}
Esempio n. 3
0
status_t ESExtractor::dequeueES() {

    unsigned streamType;
    uint8_t pprevStartCode = 0xff;
    uint8_t prevStartCode = 0xff;
    uint8_t currentStartCode = 0xff;

    if (mBuffer->size() < 4) {
        ALOGD("dequeueES:mBuffer->size() < 4");
        return -EAGAIN;
    }

    if (mTrack == NULL && mScanning) {
        const uint8_t *data = mBuffer->data();
        size_t size = mBuffer->size();
        size_t offset = 0;
        for (;;) {
            if (offset + 3 >= size) {
                return ERROR_MALFORMED;
            }
            if (U24_AT(data + offset) != 0x000001) {
                ++offset;
                continue;
            }
            pprevStartCode = prevStartCode;
            prevStartCode = currentStartCode;
            currentStartCode = data[offset+3];
            if (0xb3 == prevStartCode) {//MPEG1/2
                if (0xb5 == currentStartCode) {
                    streamType = ATSParser::STREAMTYPE_MPEG2_VIDEO;
                    mTrack = new Track(this, streamType);
                    ALOGD("streamType:STREAMTYPE_MPEG2_VIDEO");
                    return OK;
                }
                else {
                    streamType = ATSParser::STREAMTYPE_MPEG1_VIDEO;
                    mTrack = new Track(this, streamType);
                    ALOGD("streamType:STREAMTYPE_MPEG1_VIDEO");
                    return OK;
                }
            }
            else if ((0x44 == currentStartCode) && (0x42 == prevStartCode) && (0x40 == pprevStartCode)) {
                streamType = ATSParser::STREAMTYPE_HEVC;
                mTrack = new Track(this, streamType);
                ALOGD("streamType:STREAMTYPE_HEVC");
                return OK;
            }
            offset++;
        }
    }
    else if (mTrack != NULL) {
        
        if (!mTrack->mExtractor->getDequeueState()){
            return OK;
        }

        sp<ABuffer> accessUnit;
        status_t err = mTrack->dequeueAccessUnit(accessUnit);
        switch (err) {
            case GETAUDONE:
                ALOGD("dequeueES:dequeueAccessUnit return GETAUDONE");
                if (mTrack->getSource() == NULL) {
                    ALOGV("dequeueES:mTrack->mSource is NULL");
                    sp<MetaData> meta = mTrack->getQueueFormat();
                    if (meta != NULL) {
                        ALOGV("dequeueES:Got The Queue Format");
                        mTrack->setSource(meta);
                        ALOGV("dequeueES:set The mSource,queue this AU");
                        mTrack->getSource()->queueAccessUnit(accessUnit);
                    }
                } 
                else if (mTrack->getQueueFormat() != NULL) {
                    ALOGV("dequeueES:mTrack->mSource is not NULL and mTrack->getQueueFormat() is not NULL");
                    mTrack->getSource()->queueAccessUnit(accessUnit);
                }
                return OK;
            case -EAGAIN:
                ALOGD("dequeueES:dequeueAccessUnit return -EAGAIN");
                return -EAGAIN;
            case GETFORMATDONE:
                ALOGD("dequeueES:dequeueAccessUnit return GETFORMATDONE");
                return OK;
            case ERROR_MALFORMED:
                return ERROR_MALFORMED;
        }
        
    }
    
    return ERROR_MALFORMED;
}
// static
sp<VBRISeeker> VBRISeeker::CreateFromSource(
        const sp<DataSource> &source, off64_t post_id3_pos) {
    off64_t pos = post_id3_pos;

    uint8_t header[4];
    ssize_t n = source->readAt(pos, header, sizeof(header));
    if (n < (ssize_t)sizeof(header)) {
        return NULL;
    }

    uint32_t tmp = U32_AT(&header[0]);
    size_t frameSize;
    int sampleRate;
    if (!GetMPEGAudioFrameSize(tmp, &frameSize, &sampleRate)) {
        return NULL;
    }

    // VBRI header follows 32 bytes after the header _ends_.
    pos += sizeof(header) + 32;

    uint8_t vbriHeader[26];
    n = source->readAt(pos, vbriHeader, sizeof(vbriHeader));
    if (n < (ssize_t)sizeof(vbriHeader)) {
        return NULL;
    }

    if (memcmp(vbriHeader, "VBRI", 4)) {
        return NULL;
    }

    size_t numFrames = U32_AT(&vbriHeader[14]);

    int64_t durationUs =
        numFrames * 1000000ll * (sampleRate >= 32000 ? 1152 : 576) / sampleRate;

    ALOGV("duration = %.2f secs", durationUs / 1E6);

    size_t numEntries = U16_AT(&vbriHeader[18]);
    size_t entrySize = U16_AT(&vbriHeader[22]);
    size_t scale = U16_AT(&vbriHeader[20]);

    ALOGV("%zu entries, scale=%zu, size_per_entry=%zu",
         numEntries,
         scale,
         entrySize);

    size_t totalEntrySize = numEntries * entrySize;
    uint8_t *buffer = new uint8_t[totalEntrySize];

    n = source->readAt(pos + sizeof(vbriHeader), buffer, totalEntrySize);
    if (n < (ssize_t)totalEntrySize) {
        delete[] buffer;
        buffer = NULL;

        return NULL;
    }

    sp<VBRISeeker> seeker = new VBRISeeker;
    seeker->mBasePos = post_id3_pos + frameSize;
    // only update mDurationUs if the calculated duration is valid (non zero)
    // otherwise, leave duration at -1 so that getDuration() and getOffsetForTime()
    // return false when called, to indicate that this vbri tag does not have the
    // requested information
    if (durationUs) {
        seeker->mDurationUs = durationUs;
    }

    off64_t offset = post_id3_pos;
    for (size_t i = 0; i < numEntries; ++i) {
        uint32_t numBytes;
        switch (entrySize) {
            case 1: numBytes = buffer[i]; break;
            case 2: numBytes = U16_AT(buffer + 2 * i); break;
            case 3: numBytes = U24_AT(buffer + 3 * i); break;
            default:
            {
                CHECK_EQ(entrySize, 4u);
                numBytes = U32_AT(buffer + 4 * i); break;
            }
        }

        numBytes *= scale;

        seeker->mSegments.push(numBytes);

        ALOGV("entry #%zu: %u offset 0x%016llx", i, numBytes, offset);
        offset += numBytes;
    }

    delete[] buffer;
    buffer = NULL;

    ALOGI("Found VBRI header.");

    return seeker;
}