rs2::frame hole_filling_filter::prepare_target_frame(const rs2::frame& f, const rs2::frame_source& source)
    {
        // Allocate and copy the content of the input data to the target
        rs2::frame tgt = source.allocate_video_frame(_target_stream_profile, f, int(_bpp), int(_width), int(_height), int(_stride), _extension_type);

        memmove(const_cast<void*>(tgt.get_data()), f.get_data(), _current_frm_size_pixels * _bpp);
        return tgt;
    }
예제 #2
0
    rs2::frame align::allocate_aligned_frame(const rs2::frame_source& source, const rs2::video_frame& from, const rs2::video_frame& to)
    {
        rs2::frame rv;
        auto from_bytes_per_pixel = from.get_bytes_per_pixel();

        auto from_profile = from.get_profile().as<rs2::video_stream_profile>();
        auto to_profile = to.get_profile().as<rs2::video_stream_profile>();

        auto aligned_profile = create_aligned_profile(from_profile, to_profile);

        auto ext = from.is<rs2::depth_frame>() ? RS2_EXTENSION_DEPTH_FRAME : RS2_EXTENSION_VIDEO_FRAME;

        rv = source.allocate_video_frame(
            *aligned_profile,
            from,
            from_bytes_per_pixel,
            to.get_width(),
            to.get_height(),
            to.get_width() * from_bytes_per_pixel,
            ext);
        return rv;
    }
예제 #3
0
    rs2::frame yuy2rgb::process_frame(const rs2::frame_source& source, const rs2::frame& f)
    {
        auto p = f.get_profile();
        if (p.get() != _source_stream_profile.get())
        {
            _source_stream_profile = p;
            _target_stream_profile = p.clone(p.stream_type(), p.stream_index(), RS2_FORMAT_RGB8);
        }
        
        rs2::frame ret;

        auto vf = f.as<rs2::video_frame>();
        ret = source.allocate_video_frame(_target_stream_profile, f, _traget_bpp, 
            vf.get_width(), vf.get_height(), vf.get_width() * _traget_bpp, RS2_EXTENSION_VIDEO_FRAME);

        byte* planes[1];
        planes[0] = (byte*)ret.get_data();

        unpack_yuy2_rgb8(planes, (const byte*)f.get_data(), vf.get_width(), vf.get_height());

        return ret;
    }
예제 #4
0
    rs2::frame align::process_frame(const rs2::frame_source& source, const rs2::frame& f)
    {
        rs2::frame rv;
        std::vector<rs2::frame> output_frames;
        std::vector<rs2::frame> other_frames;

        auto frames = f.as<rs2::frameset>();
        auto depth = frames.first_or_default(RS2_STREAM_DEPTH, RS2_FORMAT_Z16).as<rs2::depth_frame>();

        _depth_scale = ((librealsense::depth_frame*)depth.get())->get_units();

        if (_to_stream_type == RS2_STREAM_DEPTH)
            frames.foreach([&other_frames](const rs2::frame& f) {if (f.get_profile().stream_type() != RS2_STREAM_DEPTH) other_frames.push_back(f); });
        else
            frames.foreach([this, &other_frames](const rs2::frame& f) {if (f.get_profile().stream_type() == _to_stream_type) other_frames.push_back(f); });

        if (_to_stream_type == RS2_STREAM_DEPTH)
        {
            for (auto from : other_frames)
            {
                auto aligned_frame = allocate_aligned_frame(source, from, depth);
                align_frames(aligned_frame, from, depth);
                output_frames.push_back(aligned_frame);
            }
        }
        else
        {
            auto to = other_frames.front();
            auto aligned_frame = allocate_aligned_frame(source, depth, to);
            align_frames(aligned_frame, depth, to);
            output_frames.push_back(aligned_frame);
        }

        auto new_composite = source.allocate_composite_frame(std::move(output_frames));
        return new_composite;
    }
예제 #5
0
 rs2::points pointcloud::allocate_points(const rs2::frame_source& source, const rs2::frame& depth)
 {
     return source.allocate_points(_output_stream, depth);
 }
 rs2::frame disparity_transform::prepare_target_frame(const rs2::frame& f, const rs2::frame_source& source)
 {
     return source.allocate_video_frame(_target_stream_profile, f, int(_bpp), int(_width), int(_height), int(_width*_bpp),
         _transform_to_disparity ? RS2_EXTENSION_DISPARITY_FRAME :RS2_EXTENSION_DEPTH_FRAME);
 }
예제 #7
0
    rs2::frame colorizer::process_frame(const rs2::frame_source& source, const rs2::frame& f)
    {
        if (f.get_profile().get() != _source_stream_profile.get())
        {
            _source_stream_profile = f.get_profile();
            _target_stream_profile = f.get_profile().clone(RS2_STREAM_DEPTH, 0, RS2_FORMAT_RGB8);
        }
        auto make_equalized_histogram = [this](const rs2::video_frame& depth, rs2::video_frame rgb)
        {
            const auto max_depth = 0x10000;
            static uint32_t histogram[max_depth];
            memset(histogram, 0, sizeof(histogram));

            const auto w = depth.get_width(), h = depth.get_height();
            const auto depth_data = reinterpret_cast<const uint16_t*>(depth.get_data());
            auto rgb_data = reinterpret_cast<uint8_t*>(const_cast<void *>(rgb.get_data()));

            for (auto i = 0; i < w*h; ++i) ++histogram[depth_data[i]];
            for (auto i = 2; i < max_depth; ++i) histogram[i] += histogram[i - 1]; // Build a cumulative histogram for the indices in [1,0xFFFF]
            auto cm = _maps[_map_index];
            for (auto i = 0; i < w*h; ++i)
            {
                auto d = depth_data[i];

                if (d)
                {
                    auto f = histogram[d] / (float)histogram[0xFFFF]; // 0-255 based on histogram location

                    auto c = cm->get(f);
                    rgb_data[i * 3 + 0] = (uint8_t)c.x;
                    rgb_data[i * 3 + 1] = (uint8_t)c.y;
                    rgb_data[i * 3 + 2] = (uint8_t)c.z;
                }
                else
                {
                    rgb_data[i * 3 + 0] = 0;
                    rgb_data[i * 3 + 1] = 0;
                    rgb_data[i * 3 + 2] = 0;
                }
            }
        };

        auto make_value_cropped_frame = [this](const rs2::video_frame& depth, rs2::video_frame rgb)
        {
            const auto max_depth = 0x10000;
            const auto w = depth.get_width(), h = depth.get_height();
            const auto depth_data = reinterpret_cast<const uint16_t*>(depth.get_data());
            auto rgb_data = reinterpret_cast<uint8_t*>(const_cast<void *>(rgb.get_data()));

            auto fi = (frame_interface*)depth.get();
            auto df = dynamic_cast<librealsense::depth_frame*>(fi);
            auto depth_units = df->get_units();

            for (auto i = 0; i < w*h; ++i)
            {
                auto d = depth_data[i];

                if (d)
                {
                    auto f = (d * depth_units - _min) / (_max - _min);

                    auto c = _maps[_map_index]->get(f);
                    rgb_data[i * 3 + 0] = (uint8_t)c.x;
                    rgb_data[i * 3 + 1] = (uint8_t)c.y;
                    rgb_data[i * 3 + 2] = (uint8_t)c.z;
                }
                else
                {
                    rgb_data[i * 3 + 0] = 0;
                    rgb_data[i * 3 + 1] = 0;
                    rgb_data[i * 3 + 2] = 0;
                }
            }
        };
        rs2::frame ret;

        auto vf = f.as<rs2::video_frame>();
        //rs2_extension ext = f.is<rs2::disparity_frame>() ? RS2_EXTENSION_DISPARITY_FRAME : RS2_EXTENSION_DEPTH_FRAME;
        ret = source.allocate_video_frame(_target_stream_profile, f, 3, vf.get_width(), vf.get_height(), vf.get_width() * 3, RS2_EXTENSION_VIDEO_FRAME);

        if (_equalize)
            make_equalized_histogram(f, ret);
        else
            make_value_cropped_frame(f, ret);

        return ret;
    }