コード例 #1
0
TEST(AnimatedWebPTests, progressiveDecode)
{
    RefPtr<SharedBuffer> fullData = readFile("/LayoutTests/fast/images/resources/webp-animated.webp");
    ASSERT_TRUE(fullData.get());
    const size_t fullLength = fullData->size();

    OwnPtr<WEBPImageDecoder>  decoder;
    ImageFrame* frame;

    Vector<unsigned> truncatedHashes;
    Vector<unsigned> progressiveHashes;

    // Compute hashes when the file is truncated.
    const size_t increment = 1;
    for (size_t i = 1; i <= fullLength; i += increment) {
        decoder = createDecoder();
        RefPtr<SharedBuffer> data = SharedBuffer::create(fullData->data(), i);
        decoder->setData(data.get(), i == fullLength);
        frame = decoder->frameBufferAtIndex(0);
        if (!frame) {
            truncatedHashes.append(0);
            continue;
        }
        truncatedHashes.append(hashSkBitmap(frame->getSkBitmap()));
    }

    // Compute hashes when the file is progressively decoded.
    decoder = createDecoder();
    for (size_t i = 1; i <= fullLength; i += increment) {
        RefPtr<SharedBuffer> data = SharedBuffer::create(fullData->data(), i);
        decoder->setData(data.get(), i == fullLength);
        frame = decoder->frameBufferAtIndex(0);
        if (!frame) {
            progressiveHashes.append(0);
            continue;
        }
        progressiveHashes.append(hashSkBitmap(frame->getSkBitmap()));
    }

    bool match = true;
    for (size_t i = 0; i < truncatedHashes.size(); ++i) {
        if (truncatedHashes[i] != progressiveHashes[i]) {
            match = false;
            break;
        }
    }
    EXPECT_TRUE(match);
}
コード例 #2
0
ファイル: video_input.cpp プロジェクト: eXcomm/ring-daemon
bool VideoInput::captureFrame()
{
    // Return true if capture could continue, false if must be stop

    if (not decoder_)
        return false;

    const auto ret = decoder_->decode(getNewFrame());

    switch (ret) {
        case MediaDecoder::Status::ReadError:
        case MediaDecoder::Status::DecodeError:
            return false;

        // End of streamed file
        case MediaDecoder::Status::EOFError:
            createDecoder();
            return static_cast<bool>(decoder_);

        case MediaDecoder::Status::FrameFinished:
            publishFrame();

        // continue decoding
        case MediaDecoder::Status::Success:
        default:
            return true;
    }
}
コード例 #3
0
// Reproduce a crash that used to happen for a specific file with specific sequence of method calls.
TEST(AnimatedWebPTests, reproCrash)
{
    OwnPtr<WEBPImageDecoder> decoder = createDecoder();

    RefPtr<SharedBuffer> fullData = readFile("/LayoutTests/fast/images/resources/invalid_vp8_vp8x.webp");
    ASSERT_TRUE(fullData.get());

    // Parse partial data up to which error in bitstream is not detected.
    const size_t partialSize = 32768;
    ASSERT_GT(fullData->size(), partialSize);
    RefPtr<SharedBuffer> data = SharedBuffer::create(fullData->data(), partialSize);
    decoder->setData(data.get(), false);
    EXPECT_EQ(1u, decoder->frameCount());
    ImageFrame* frame = decoder->frameBufferAtIndex(0);
    ASSERT_TRUE(frame);
    EXPECT_EQ(ImageFrame::FramePartial, frame->status());
    EXPECT_FALSE(decoder->failed());

    // Parse full data now. The error in bitstream should now be detected.
    decoder->setData(fullData.get(), true);
    EXPECT_EQ(1u, decoder->frameCount());
    frame = decoder->frameBufferAtIndex(0);
    ASSERT_TRUE(frame);
    EXPECT_EQ(ImageFrame::FramePartial, frame->status());
    EXPECT_EQ(cAnimationLoopOnce, decoder->repetitionCount());
    EXPECT_TRUE(decoder->failed());
}
コード例 #4
0
ファイル: Encoding.cpp プロジェクト: lriki/Lumino
String TextEncoding::decode(const byte_t* bytes, int length, int* outUsedDefaultCharCount)
{
    // TODO: this が UTF16 なら memcpy でよい

    // 入力に入っている最悪パターンの文字数
    size_t srcMaxCharCount = length / minByteCount();
    srcMaxCharCount += 1; // Decoder・Encoder の状態保存により前回の余り文字が1つ追加されるかもしれない
    srcMaxCharCount += 1; // \0 の分

    // 出力バッファに必要な最大バイト数
    size_t utf16MaxByteCount = srcMaxCharCount * 4; // UTF16 は1文字最大4バイト

    // 出力バッファ作成
    std::vector<byte_t> output(utf16MaxByteCount + sizeof(uint16_t));

    // convert
    std::unique_ptr<TextDecoder> decoder(createDecoder());
    TextDecodeResult result;
    decoder->convertToUTF16(
        bytes,
        length,
        (UTF16*)output.data(),
        utf16MaxByteCount / sizeof(UTF16), // \0 強制格納に備え、1文字分余裕のあるサイズを指定する
        &result);

    if (outUsedDefaultCharCount) {
        *outUsedDefaultCharCount = decoder->usedDefaultCharCount();
    }

    // 出力バッファのサイズを、実際に使用したバイト数にする
    output.resize(result.outputByteCount);

    return String((Char*)output.data(), output.size() / sizeof(Char));
}
コード例 #5
0
ファイル: video_input.cpp プロジェクト: dot-Sean/telephony
void VideoInput::process()
{
    bool newDecoderCreated = false;

    if (switchPending_.exchange(false)) {
        deleteDecoder();
        createDecoder();
        newDecoderCreated = true;
    }

    if (not decoder_) {
        loop_.stop();
        return;
    }

    captureFrame();

    if (newDecoderCreated) {
        /* Signal the client about the new sink */
        Manager::instance().getVideoManager()->startedDecoding(sinkID_, sink_.openedName(),
                decoder_->getWidth(), decoder_->getHeight(), false);
        DEBUG("LOCAL: shm sink <%s> started: size = %dx%d",
              sink_.openedName().c_str(), decoder_->getWidth(),
              decoder_->getHeight());
    }
}
コード例 #6
0
ファイル: video_input.cpp プロジェクト: dot-Sean/telephony
bool VideoInput::captureFrame()
{
    VideoPacket pkt;
    const auto ret = decoder_->decode(getNewFrame(), pkt);

    switch (ret) {
        case VideoDecoder::Status::FrameFinished:
            break;

        case VideoDecoder::Status::ReadError:
        case VideoDecoder::Status::DecodeError:
            loop_.stop();
            // fallthrough
        case VideoDecoder::Status::Success:
            return false;

            // Play in loop
        case VideoDecoder::Status::EOFError:
            deleteDecoder();
            createDecoder();
            return false;
    }

    publishFrame();
    return true;
}
コード例 #7
0
TEST(AnimatedWebPTests, frameIsCompleteAndDuration)
{
    OwnPtr<WEBPImageDecoder> decoder = createDecoder();

    RefPtr<SharedBuffer> data = readFile("/LayoutTests/fast/images/resources/webp-animated.webp");
    ASSERT_TRUE(data.get());

    ASSERT_GE(data->size(), 10u);
    RefPtr<SharedBuffer> tempData = SharedBuffer::create(data->data(), data->size() - 10);
    decoder->setData(tempData.get(), false);

    EXPECT_EQ(2u, decoder->frameCount());
    EXPECT_FALSE(decoder->failed());
    EXPECT_TRUE(decoder->frameIsCompleteAtIndex(0));
    EXPECT_EQ(1000, decoder->frameDurationAtIndex(0));
    EXPECT_TRUE(decoder->frameIsCompleteAtIndex(1));
    EXPECT_EQ(500, decoder->frameDurationAtIndex(1));

    decoder->setData(data.get(), true);
    EXPECT_EQ(3u, decoder->frameCount());
    EXPECT_TRUE(decoder->frameIsCompleteAtIndex(0));
    EXPECT_EQ(1000, decoder->frameDurationAtIndex(0));
    EXPECT_TRUE(decoder->frameIsCompleteAtIndex(1));
    EXPECT_EQ(500, decoder->frameDurationAtIndex(1));
    EXPECT_TRUE(decoder->frameIsCompleteAtIndex(2));
    EXPECT_EQ(1000.0, decoder->frameDurationAtIndex(2));
}
コード例 #8
0
TEST(AnimatedWebPTests, DISABLED_resumePartialDecodeAfterClearFrameBufferCache)
{
    RefPtr<SharedBuffer> fullData = readFile("/LayoutTests/fast/images/resources/webp-animated-large.webp");
    ASSERT_TRUE(fullData.get());
    Vector<unsigned> baselineHashes;
    createDecodingBaseline(fullData.get(), &baselineHashes);
    size_t frameCount = baselineHashes.size();

    OwnPtr<WEBPImageDecoder> decoder = createDecoder();

    // Let frame 0 be partially decoded.
    size_t partialSize = 1;
    do {
        RefPtr<SharedBuffer> data = SharedBuffer::create(fullData->data(), partialSize);
        decoder->setData(data.get(), false);
        ++partialSize;
    } while (!decoder->frameCount() || decoder->frameBufferAtIndex(0)->status() == ImageFrame::FrameEmpty);

    // Skip to the last frame and clear.
    decoder->setData(fullData.get(), true);
    EXPECT_EQ(frameCount, decoder->frameCount());
    ImageFrame* lastFrame = decoder->frameBufferAtIndex(frameCount - 1);
    EXPECT_EQ(baselineHashes[frameCount - 1], hashSkBitmap(lastFrame->getSkBitmap()));
    decoder->clearCacheExceptFrame(kNotFound);

    // Resume decoding of the first frame.
    ImageFrame* firstFrame = decoder->frameBufferAtIndex(0);
    EXPECT_EQ(ImageFrame::FrameComplete, firstFrame->status());
    EXPECT_EQ(baselineHashes[0], hashSkBitmap(firstFrame->getSkBitmap()));
}
コード例 #9
0
TEST(AnimatedWebPTests, updateRequiredPreviousFrameAfterFirstDecode)
{
    OwnPtr<WEBPImageDecoder> decoder = createDecoder();

    RefPtr<SharedBuffer> fullData = readFile("/LayoutTests/fast/images/resources/webp-animated.webp");
    ASSERT_TRUE(fullData.get());

    // Give it data that is enough to parse but not decode in order to check the status
    // of requiredPreviousFrameIndex before decoding.
    size_t partialSize = 1;
    do {
        RefPtr<SharedBuffer> data = SharedBuffer::create(fullData->data(), partialSize);
        decoder->setData(data.get(), false);
        ++partialSize;
    } while (!decoder->frameCount() || decoder->frameBufferAtIndex(0)->status() == ImageFrame::FrameEmpty);

    EXPECT_EQ(kNotFound, decoder->frameBufferAtIndex(0)->requiredPreviousFrameIndex());
    size_t frameCount = decoder->frameCount();
    for (size_t i = 1; i < frameCount; ++i)
        EXPECT_EQ(i - 1, decoder->frameBufferAtIndex(i)->requiredPreviousFrameIndex());

    decoder->setData(fullData.get(), true);
    for (size_t i = 0; i < frameCount; ++i)
        EXPECT_EQ(kNotFound, decoder->frameBufferAtIndex(i)->requiredPreviousFrameIndex());
}
コード例 #10
0
ファイル: ImageSourceSkia.cpp プロジェクト: Sumxx/src
void ImageSourceSkia::setData(SharedBuffer* data,
                              bool allDataReceived,
                              const IntSize& preferredIconSize)
{
    if (!m_decoder)
        m_decoder = createDecoder(data->buffer(), preferredIconSize);

    ImageSource::setData(data, allDataReceived);
}
コード例 #11
0
TEST(StaticWebPTests, notAnimated)
{
    OwnPtr<WEBPImageDecoder> decoder = createDecoder();
    RefPtr<SharedBuffer> data = readFile("/LayoutTests/fast/images/resources/webp-color-profile-lossy.webp");
    ASSERT_TRUE(data.get());
    decoder->setData(data.get(), true);
    EXPECT_EQ(1u, decoder->frameCount());
    EXPECT_EQ(cAnimationNone, decoder->repetitionCount());
}
コード例 #12
0
void
VideoInput::process()
{
    if (switchPending_)
        createDecoder();

    if (not captureFrame()) {
        loop_.stop();
        return;
    }
}
コード例 #13
0
ファイル: ImageSourceWx.cpp プロジェクト: Gin-Rye/duibrowser
void ImageSource::setData(SharedBuffer* data, bool allDataReceived)
{
    // Make the decoder by sniffing the bytes.
    // This method will examine the data and instantiate an instance of the appropriate decoder plugin.
    // If insufficient bytes are available to determine the image type, no decoder plugin will be
    // made.
    m_decoder = createDecoder(*data);
    if (!m_decoder)
        return;
    m_decoder->setData(data, allDataReceived);
}
コード例 #14
0
bool VppInputDecodeCapi::init(const char* inputFileName, uint32_t /*fourcc*/, int /*width*/, int /*height*/)
{
    m_input.reset(DecodeInput::create(inputFileName));
    if (!m_input)
        return false;
    m_decoder = createDecoder(m_input->getMimeType());
    if (!m_decoder) {
        fprintf(stderr, "failed create decoder for %s", m_input->getMimeType());
        return false;
    }
    return true;
}
コード例 #15
0
TEST(BMPImageDecoderTest, isSizeAvailable)
{
    const char* bmpFile = "/LayoutTests/fast/images/resources/lenna.bmp"; // 256x256
    RefPtr<SharedBuffer> data = readFile(bmpFile);
    ASSERT_TRUE(data.get());

    OwnPtr<ImageDecoder> decoder = createDecoder();
    decoder->setData(data.get(), true);
    EXPECT_TRUE(decoder->isSizeAvailable());
    EXPECT_EQ(256, decoder->size().width());
    EXPECT_EQ(256, decoder->size().height());
}
コード例 #16
0
ファイル: VideoDecoderD3D.cpp プロジェクト: Andytianya/QtAV
bool VideoDecoderD3DPrivate::setup(AVCodecContext *avctx)
{
    const int w = codedWidth(avctx);
    const int h = codedHeight(avctx);
    if (avctx->hwaccel_context && surface_width == aligned(w) && surface_height == aligned(h))
        return true;
    width = avctx->width; // not necessary. set in decode()
    height = avctx->height;
    codec_ctx->hwaccel_context = NULL;
    releaseUSWC();
    destroyDecoder();
    avctx->hwaccel_context = NULL;

    /* Allocates all surfaces needed for the decoder */
    if (surface_auto) {
        switch (codec_ctx->codec_id) {
        case QTAV_CODEC_ID(HEVC):
        case QTAV_CODEC_ID(H264):
            surface_count = 16 + 4;
            break;
        case QTAV_CODEC_ID(MPEG1VIDEO):
        case QTAV_CODEC_ID(MPEG2VIDEO):
            surface_count = 2 + 4;
        default:
            surface_count = 2 + 4;
            break;
        }
        if (avctx->active_thread_type & FF_THREAD_FRAME)
            surface_count += avctx->thread_count;
    }
    qDebug(">>>>>>>>>>>>>>>>>>>>>surfaces: %d, active_thread_type: %d, threads: %d, refs: %d", surface_count, avctx->active_thread_type, avctx->thread_count, avctx->refs);
    if (surface_count == 0) {
        qWarning("internal error: wrong surface count.  %u auto=%d", surface_count, surface_auto);
        surface_count = 16 + 4;
    }
    qDeleteAll(surfaces);
    surfaces.clear();
    hw_surfaces.clear();
    surfaces.resize(surface_count);
    if (!createDecoder(codec_ctx->codec_id, w, h, surfaces))
        return false;
    hw_surfaces.resize(surface_count);
    for (int i = 0; i < surfaces.size(); ++i) {
        hw_surfaces[i] = surfaces[i]->getSurface();
    }
    surface_order = 0;
    surface_width = aligned(w);
    surface_height = aligned(h);
    setupAVVAContext(avctx); //can not use codec_ctx for threaded mode!
    initUSWC(surface_width);
    return true;
}
コード例 #17
0
// Test if a BMP decoder returns a proper error while decoding an empty image.
TEST(BMPImageDecoderTest, emptyImage)
{
    const char* bmpFile = "/LayoutTests/fast/images/resources/0x0.bmp"; // 0x0
    RefPtr<SharedBuffer> data = readFile(bmpFile);
    ASSERT_TRUE(data.get());

    OwnPtr<ImageDecoder> decoder = createDecoder();
    decoder->setData(data.get(), true);

    ImageFrame* frame = decoder->frameBufferAtIndex(0);
    ASSERT_TRUE(frame);
    EXPECT_EQ(ImageFrame::FrameEmpty, frame->status());
    EXPECT_TRUE(decoder->failed());
}
コード例 #18
0
ファイル: newAudioDecode.cpp プロジェクト: youzzzwww/waveTest
int decoderIni(int sampleRate, int channel)
{
	prev_sample_rate = sampleRate;
	prev_channel = channel;
	//初始化解码器
	handle = createDecoder(channel, sampleRate);
	if(handle<0)
	{
		printf("create encoder fail\n");
		return 0;
	}
	memset(in, 0, IN_BUFF_SIZE * sizeof(char));
	memset(out, 0, OUT_BUFF_SIZE * sizeof(char));
	return 0;
}
コード例 #19
0
TEST(AnimatedWebPTests, uniqueGenerationIDs)
{
    OwnPtr<WEBPImageDecoder> decoder = createDecoder();

    RefPtr<SharedBuffer> data = readFile("/LayoutTests/fast/images/resources/webp-animated.webp");
    ASSERT_TRUE(data.get());
    decoder->setData(data.get(), true);

    ImageFrame* frame = decoder->frameBufferAtIndex(0);
    uint32_t generationID0 = frame->getSkBitmap().getGenerationID();
    frame = decoder->frameBufferAtIndex(1);
    uint32_t generationID1 = frame->getSkBitmap().getGenerationID();

    EXPECT_TRUE(generationID0 != generationID1);
}
コード例 #20
0
TEST(AnimatedWebPTests,
     verifyAnimationParametersOpaqueFramesTransparentBackground) {
  std::unique_ptr<ImageDecoder> decoder = createDecoder();
  EXPECT_EQ(cAnimationLoopOnce, decoder->repetitionCount());

  RefPtr<SharedBuffer> data =
      readFile("/LayoutTests/images/resources/webp-animated-opaque.webp");
  ASSERT_TRUE(data.get());
  decoder->setData(data.get(), true);

  const int canvasWidth = 94;
  const int canvasHeight = 87;
  const struct AnimParam {
    int xOffset, yOffset, width, height;
    ImageFrame::DisposalMethod disposalMethod;
    ImageFrame::AlphaBlendSource alphaBlendSource;
    unsigned duration;
    bool hasAlpha;
  } frameParameters[] = {
      {4, 10, 33, 32, ImageFrame::DisposeOverwriteBgcolor,
       ImageFrame::BlendAtopPreviousFrame, 1000u, true},
      {34, 30, 33, 32, ImageFrame::DisposeOverwriteBgcolor,
       ImageFrame::BlendAtopPreviousFrame, 1000u, true},
      {62, 50, 32, 32, ImageFrame::DisposeOverwriteBgcolor,
       ImageFrame::BlendAtopPreviousFrame, 1000u, true},
      {10, 54, 32, 33, ImageFrame::DisposeOverwriteBgcolor,
       ImageFrame::BlendAtopPreviousFrame, 1000u, true},
  };

  for (size_t i = 0; i < WTF_ARRAY_LENGTH(frameParameters); ++i) {
    const ImageFrame* const frame = decoder->frameBufferAtIndex(i);
    EXPECT_EQ(ImageFrame::FrameComplete, frame->getStatus());
    EXPECT_EQ(canvasWidth, frame->bitmap().width());
    EXPECT_EQ(canvasHeight, frame->bitmap().height());
    EXPECT_EQ(frameParameters[i].xOffset, frame->originalFrameRect().x());
    EXPECT_EQ(frameParameters[i].yOffset, frame->originalFrameRect().y());
    EXPECT_EQ(frameParameters[i].width, frame->originalFrameRect().width());
    EXPECT_EQ(frameParameters[i].height, frame->originalFrameRect().height());
    EXPECT_EQ(frameParameters[i].disposalMethod, frame->getDisposalMethod());
    EXPECT_EQ(frameParameters[i].alphaBlendSource,
              frame->getAlphaBlendSource());
    EXPECT_EQ(frameParameters[i].duration, frame->duration());
    EXPECT_EQ(frameParameters[i].hasAlpha, frame->hasAlpha());
  }

  EXPECT_EQ(WTF_ARRAY_LENGTH(frameParameters), decoder->frameCount());
  EXPECT_EQ(cAnimationLoopInfinite, decoder->repetitionCount());
}
コード例 #21
0
	HBitmapDecoder CodecLibrary::createDecoder(std::istream& sourceStream){
		char str[100];
		sourceStream.getline(str, 100);
		std::string firstChunk(str);
		for(auto pair : myDecoders)
		{
			if (pair.second->isSupported(firstChunk))
			{
				sourceStream.clear();
				sourceStream.seekg(0);
				return createDecoder(pair.first, sourceStream);
			}
		}
		
		throw std::runtime_error("No matching decder found");
	}
コード例 #22
0
TEST(BMPImageDecoderTest, parseAndDecode)
{
    const char* bmpFile = "/LayoutTests/fast/images/resources/lenna.bmp"; // 256x256
    RefPtr<SharedBuffer> data = readFile(bmpFile);
    ASSERT_TRUE(data.get());

    OwnPtr<ImageDecoder> decoder = createDecoder();
    decoder->setData(data.get(), true);

    ImageFrame* frame = decoder->frameBufferAtIndex(0);
    ASSERT_TRUE(frame);
    EXPECT_EQ(ImageFrame::FrameComplete, frame->status());
    EXPECT_EQ(256, frame->getSkBitmap().width());
    EXPECT_EQ(256, frame->getSkBitmap().height());
    EXPECT_FALSE(decoder->failed());
}
コード例 #23
0
ファイル: ImageSourceSkia.cpp プロジェクト: Sumxx/src
void ImageSource::setData(SharedBuffer* data, bool allDataReceived)
{
    // Make the decoder by sniffing the bytes.
    // This method will examine the data and instantiate an instance of the appropriate decoder plugin.
    // If insufficient bytes are available to determine the image type, no decoder plugin will be
    // made.
    if (!m_decoder)
        m_decoder = createDecoder(data->buffer(), IntSize());

    // CreateDecoder will return NULL if the decoder could not be created. Plus,
    // we should not send more data to a decoder which has already decided it
    // has failed.
    if (!m_decoder || m_decoder->failed())
        return;
    m_decoder->setData(data, allDataReceived);
}
コード例 #24
0
ファイル: CodecPlugin.cpp プロジェクト: KDE/kwave
void Kwave::CodecPlugin::load(QStringList &/* params */)
{
    use();

    m_codec.m_use_count++;
    if (m_codec.m_use_count == 1)
    {
	m_codec.m_encoder = createEncoder();
	if (m_codec.m_encoder)
	    Kwave::CodecManager::registerEncoder(*m_codec.m_encoder);

	m_codec.m_decoder = createDecoder();
	if (m_codec.m_decoder)
	    Kwave::CodecManager::registerDecoder(*m_codec.m_decoder);
    }
}
コード例 #25
0
TEST(AnimatedWebPTests, truncatedInBetweenFrame)
{
    OwnPtr<WEBPImageDecoder> decoder = createDecoder();

    RefPtr<SharedBuffer> fullData = readFile("/LayoutTests/fast/images/resources/invalid-animated-webp4.webp");
    ASSERT_TRUE(fullData.get());
    RefPtr<SharedBuffer> data = SharedBuffer::create(fullData->data(), fullData->size() - 1);
    decoder->setData(data.get(), false);

    ImageFrame* frame = decoder->frameBufferAtIndex(1);
    ASSERT_TRUE(frame);
    EXPECT_EQ(ImageFrame::FrameComplete, frame->status());
    frame = decoder->frameBufferAtIndex(2);
    ASSERT_TRUE(frame);
    EXPECT_EQ(ImageFrame::FramePartial, frame->status());
    EXPECT_TRUE(decoder->failed());
}
コード例 #26
0
ファイル: gst_exporter.cpp プロジェクト: studnitz/octopus
QGst::BinPtr GstExporter::createFileSrcBin(const QString path, const int i) {
  qDebug() << "creating filesrc bin, path: " << path << " i: " << i;
  QGst::BinPtr videoBin;

  QDir current = QDir::current();
  current.cd("recordings");
  const QString fullPath = current.absoluteFilePath(path);

  try {
    char* srcname = nameWithIndex("file", i);
    QGst::ElementPtr src = QGst::ElementFactory::make("filesrc", srcname);
    src->setProperty("location", fullPath);

    char* demuxName = nameWithIndex("demux", i);
    QGst::ElementPtr demuxer = QGst::ElementFactory::make("qtdemux", demuxName);
    QGst::BinPtr decoder = createDecoder(i);
    QGst::ElementPtr scale = QGst::ElementFactory::make("videoscale");
    QGst::ElementPtr capsfilter =
        createCapsFilter(elementWidthPx, elementHeightPx);

    char* binname = nameWithIndex("filebin", i);
    videoBin = QGst::Bin::create(binname);

    videoBin->add(src, demuxer, decoder, capsfilter, scale);
    src->link(demuxer);
    videoBin->linkMany(decoder, scale, capsfilter);

    qDebug() << "filesrc bin: Added and linked all elements";

    QGst::PadPtr filterPadSrc = capsfilter->getStaticPad("src");
    videoBin->addPad(QGst::GhostPad::create(filterPadSrc, "src"));

    qDebug() << "filesrc bin: Added Ghostpad to the bin";

    QGlib::connect(demuxer, "pad-added", this, &GstExporter::callbackNewPad,
                   QGlib::PassSender);

  } catch (const QGlib::Error& error) {
    qCritical() << "Failed to create a filesource:" << error;
    return QGst::BinPtr();
  }

  return videoBin;
}
コード例 #27
0
TEST(AnimatedWebPTests, verifyAnimationParametersTransparentImage)
{
    OwnPtr<WEBPImageDecoder> decoder = createDecoder();
    EXPECT_EQ(cAnimationLoopOnce, decoder->repetitionCount());

    RefPtr<SharedBuffer> data = readFile("/LayoutTests/fast/images/resources/webp-animated.webp");
    ASSERT_TRUE(data.get());
    decoder->setData(data.get(), true);

    const int canvasWidth = 11;
    const int canvasHeight = 29;
    const struct AnimParam {
        int xOffset, yOffset, width, height;
        ImageFrame::DisposalMethod disposalMethod;
        ImageFrame::AlphaBlendSource alphaBlendSource;
        unsigned duration;
        bool hasAlpha;
    } frameParameters[] = {
        { 0, 0, 11, 29, ImageFrame::DisposeKeep, ImageFrame::BlendAtopPreviousFrame, 1000u, true },
        { 2, 10, 7, 17, ImageFrame::DisposeKeep, ImageFrame::BlendAtopPreviousFrame, 500u, true },
        { 2, 2, 7, 16, ImageFrame::DisposeKeep, ImageFrame::BlendAtopPreviousFrame, 1000u, true },
    };

    for (size_t i = 0; i < ARRAY_SIZE(frameParameters); ++i) {
        const ImageFrame* const frame = decoder->frameBufferAtIndex(i);
        EXPECT_EQ(ImageFrame::FrameComplete, frame->status());
        EXPECT_EQ(canvasWidth, frame->getSkBitmap().width());
        EXPECT_EQ(canvasHeight, frame->getSkBitmap().height());
        EXPECT_EQ(frameParameters[i].xOffset, frame->originalFrameRect().x());
        EXPECT_EQ(frameParameters[i].yOffset, frame->originalFrameRect().y());
        EXPECT_EQ(frameParameters[i].width, frame->originalFrameRect().width());
        EXPECT_EQ(frameParameters[i].height, frame->originalFrameRect().height());
        EXPECT_EQ(frameParameters[i].disposalMethod, frame->disposalMethod());
        EXPECT_EQ(frameParameters[i].alphaBlendSource, frame->alphaBlendSource());
        EXPECT_EQ(frameParameters[i].duration, frame->duration());
        EXPECT_EQ(frameParameters[i].hasAlpha, frame->hasAlpha());
    }

    EXPECT_EQ(ARRAY_SIZE(frameParameters), decoder->frameCount());
    EXPECT_EQ(cAnimationLoopInfinite, decoder->repetitionCount());
}
コード例 #28
0
TEST(AnimatedWebPTests, truncatedLastFrame)
{
    OwnPtr<WEBPImageDecoder> decoder = createDecoder();

    RefPtr<SharedBuffer> data = readFile("/LayoutTests/fast/images/resources/invalid-animated-webp2.webp");
    ASSERT_TRUE(data.get());
    decoder->setData(data.get(), true);

    size_t frameCount = 8;
    EXPECT_EQ(frameCount, decoder->frameCount());
    ImageFrame* frame = decoder->frameBufferAtIndex(0);
    ASSERT_TRUE(frame);
    EXPECT_EQ(ImageFrame::FrameComplete, frame->status());
    EXPECT_FALSE(decoder->failed());
    frame = decoder->frameBufferAtIndex(frameCount - 1);
    ASSERT_TRUE(frame);
    EXPECT_EQ(ImageFrame::FramePartial, frame->status());
    EXPECT_TRUE(decoder->failed());
    frame = decoder->frameBufferAtIndex(0);
    ASSERT_TRUE(frame);
    EXPECT_EQ(ImageFrame::FrameComplete, frame->status());
}
コード例 #29
0
TEST(AnimatedWEBPTests, clearCacheExceptFrameWithAncestors) {
  std::unique_ptr<ImageDecoder> decoder = createDecoder();

  RefPtr<SharedBuffer> fullData =
      readFile("/LayoutTests/images/resources/webp-animated.webp");
  ASSERT_TRUE(fullData.get());
  decoder->setData(fullData.get(), true);

  ASSERT_EQ(3u, decoder->frameCount());
  // We need to store pointers to the image frames, since calling
  // frameBufferAtIndex will decode the frame if it is not FrameComplete,
  // and we want to read the status of the frame without decoding it again.
  ImageFrame* buffers[3];
  size_t bufferSizes[3];
  for (size_t i = 0; i < decoder->frameCount(); i++) {
    buffers[i] = decoder->frameBufferAtIndex(i);
    ASSERT_EQ(ImageFrame::FrameComplete, buffers[i]->getStatus());
    bufferSizes[i] = decoder->frameBytesAtIndex(i);
  }

  // Explicitly set the required previous frame for the frames, since this test
  // is designed on this chain. Whether the frames actually depend on each
  // other is not important for this test - clearCacheExceptFrame just looks at
  // the frame status and the required previous frame.
  buffers[1]->setRequiredPreviousFrameIndex(0);
  buffers[2]->setRequiredPreviousFrameIndex(1);

  // Clear the cache except for a single frame. All other frames should be
  // cleared to FrameEmpty, since this frame is FrameComplete.
  EXPECT_EQ(bufferSizes[0] + bufferSizes[2], decoder->clearCacheExceptFrame(1));
  EXPECT_EQ(ImageFrame::FrameEmpty, buffers[0]->getStatus());
  EXPECT_EQ(ImageFrame::FrameComplete, buffers[1]->getStatus());
  EXPECT_EQ(ImageFrame::FrameEmpty, buffers[2]->getStatus());

  // Verify that the required previous frame is also preserved if the provided
  // frame is not FrameComplete. The simulated situation is:
  //
  // Frame 0          <---------    Frame 1         <---------    Frame 2
  // FrameComplete    depends on    FrameComplete   depends on    FramePartial
  //
  // The expected outcome is that frame 1 and frame 2 are preserved, since
  // frame 1 is necessary to fully decode frame 2.
  for (size_t i = 0; i < decoder->frameCount(); i++) {
    ASSERT_EQ(ImageFrame::FrameComplete,
              decoder->frameBufferAtIndex(i)->getStatus());
  }
  buffers[2]->setStatus(ImageFrame::FramePartial);
  EXPECT_EQ(bufferSizes[0], decoder->clearCacheExceptFrame(2));
  EXPECT_EQ(ImageFrame::FrameEmpty, buffers[0]->getStatus());
  EXPECT_EQ(ImageFrame::FrameComplete, buffers[1]->getStatus());
  EXPECT_EQ(ImageFrame::FramePartial, buffers[2]->getStatus());

  // Verify that the nearest FrameComplete required frame is preserved if
  // earlier required frames in the ancestor list are not FrameComplete. The
  // simulated situation is:
  //
  // Frame 0          <---------    Frame 1      <---------    Frame 2
  // FrameComplete    depends on    FrameEmpty   depends on    FramePartial
  //
  // The expected outcome is that frame 0 and frame 2 are preserved. Frame 2
  // should be preserved since it is the frame passed to clearCacheExceptFrame.
  // Frame 0 should be preserved since it is the nearest FrameComplete ancestor.
  // Thus, since frame 1 is FrameEmpty, no data is cleared in this case.
  for (size_t i = 0; i < decoder->frameCount(); i++) {
    ASSERT_EQ(ImageFrame::FrameComplete,
              decoder->frameBufferAtIndex(i)->getStatus());
  }
  buffers[1]->setStatus(ImageFrame::FrameEmpty);
  buffers[2]->setStatus(ImageFrame::FramePartial);
  EXPECT_EQ(0u, decoder->clearCacheExceptFrame(2));
  EXPECT_EQ(ImageFrame::FrameComplete, buffers[0]->getStatus());
  EXPECT_EQ(ImageFrame::FrameEmpty, buffers[1]->getStatus());
  EXPECT_EQ(ImageFrame::FramePartial, buffers[2]->getStatus());
}