コード例 #1
0
ファイル: gfx.cpp プロジェクト: RobLoach/scummvm
void OSystem_Android::initTexture(GLESBaseTexture **texture,
                                  uint width, uint height,
                                  const Graphics::PixelFormat *format) {
    assert(texture);
    Graphics::PixelFormat format_clut8 =
        Graphics::PixelFormat::createFormatCLUT8();
    Graphics::PixelFormat format_current;
    Graphics::PixelFormat format_new;

    if (*texture)
        format_current = (*texture)->getPixelFormat();
    else
        format_current = Graphics::PixelFormat();

    if (format)
        format_new = *format;
    else
        format_new = format_clut8;

    if (format_current != format_new) {
        if (*texture)
            LOGD("switching pixel format from: %s",
                 getPixelFormatName((*texture)->getPixelFormat()).c_str());

        delete *texture;

        if (format_new == GLES565Texture::pixelFormat())
            *texture = new GLES565Texture();
        else if (format_new == GLES5551Texture::pixelFormat())
            *texture = new GLES5551Texture();
        else if (format_new == GLES8888Texture::pixelFormat())
            *texture = new GLES8888Texture();
        else if (format_new == GLES4444Texture::pixelFormat())
            *texture = new GLES4444Texture();
        else {
            // TODO what now?
            if (format_new != format_clut8)
                LOGE("unsupported pixel format: %s",
                     getPixelFormatName(format_new).c_str());

            *texture = new GLESFakePalette565Texture;
        }

        LOGD("new pixel format: %s",
             getPixelFormatName((*texture)->getPixelFormat()).c_str());
    }

    (*texture)->allocBuffer(width, height);
}
コード例 #2
0
VideoFrameDesc VideoCodec::getVideoFrameDesc() const
{
    assert(_avCodecContext != NULL);
    VideoFrameDesc videoFrameDesc(_avCodecContext->width, _avCodecContext->height, getPixelFormatName(_avCodecContext->pix_fmt));
    double fps = 1.0 * _avCodecContext->time_base.den / (_avCodecContext->time_base.num * _avCodecContext->ticks_per_frame);
    if(!std::isinf(fps))
        videoFrameDesc._fps = fps;
    return videoFrameDesc;
}
コード例 #3
0
void FilterGraph::addInBuffer(const std::vector<IFrame*>& inputs)
{
    for(std::vector<IFrame*>::const_reverse_iterator it = inputs.rbegin(); it != inputs.rend(); ++it)
    {
        std::string filterName;
        std::stringstream filterOptions;
        // audio frame
        if((*it)->isAudioFrame())
        {
            filterName = "abuffer";
            const AudioFrame* audioFrame = dynamic_cast<const AudioFrame*>(*it);
            filterOptions << "time_base=" << _codec.getAVCodecContext().time_base.num << "/"
                          << _codec.getAVCodecContext().time_base.den << ":";
            filterOptions << "sample_rate=" << audioFrame->getSampleRate() << ":";
            filterOptions << "sample_fmt=" << getSampleFormatName(audioFrame->getSampleFormat()) << ":";
            filterOptions << "channel_layout=0x" << std::hex << audioFrame->getChannelLayout();

            const AudioFrameDesc audioFrameDesc(audioFrame->getSampleRate(),
                                                audioFrame->getNbChannels(),
                                                getSampleFormatName(audioFrame->getSampleFormat()));
            _inputAudioFrameBuffers.insert(_inputAudioFrameBuffers.begin(), AudioFrameBuffer(audioFrameDesc));
        }
        // video frame
        else if((*it)->isVideoFrame())
        {
            filterName = "buffer";
            const VideoFrame* videoFrame = dynamic_cast<const VideoFrame*>(*it);
            filterOptions << "video_size=" << videoFrame->getWidth() << "x" << videoFrame->getHeight() << ":";
            filterOptions << "pix_fmt=" << getPixelFormatName(videoFrame->getPixelFormat()) << ":";
            filterOptions << "time_base=" << _codec.getAVCodecContext().time_base.num << "/"
                          << _codec.getAVCodecContext().time_base.den << ":";
            filterOptions << "pixel_aspect=" << _codec.getAVCodecContext().sample_aspect_ratio.num << "/"
                          << _codec.getAVCodecContext().sample_aspect_ratio.den;
        }
        // invalid frame
        else
            throw std::runtime_error("Cannot create input buffer of filter graph: the given frame is invalid.");

        // add in buffer
        LOG_INFO("Add filter '" << filterName << "' at the beginning of the graph.")
        _filters.insert(_filters.begin(), new Filter(filterName, filterOptions.str(), "in"));
    }
}