Ejemplo n.º 1
0
    rs2::frame pointcloud::process_frame(const rs2::frame_source& source, const rs2::frame& f)
    {
        rs2::frame rv;
        if (auto composite = f.as<rs2::frameset>())
        {
            auto texture = composite.first(_stream_filter.stream);
            inspect_other_frame(texture);

            auto depth = composite.first(RS2_STREAM_DEPTH, RS2_FORMAT_Z16);
            inspect_depth_frame(depth);
            rv = process_depth_frame(source, depth);
        }
        else
        {
            if (f.is<rs2::depth_frame>())
            {
                inspect_depth_frame(f);
                rv = process_depth_frame(source, f);
            }
            if (f.get_profile().stream_type() == _stream_filter.stream && f.get_profile().format() == _stream_filter.format)
            {
                inspect_other_frame(f);
            }
        }
        return rv;
    }
Ejemplo n.º 2
0
    void pointcloud::inspect_other_frame(const rs2::frame& other)
    {
        if (_stream_filter != _prev_stream_filter)
        {
            _prev_stream_filter = _stream_filter;
        }

        if (_extrinsics.has_value() && other.get_profile().get() == _other_stream.get_profile().get())
            return;

        _other_stream = other;
        _other_intrinsics = optional_value<rs2_intrinsics>();
        _extrinsics = optional_value<rs2_extrinsics>();

        if (!_other_intrinsics)
        {
            auto stream_profile = _other_stream.get_profile();
            if (auto video = stream_profile.as<rs2::video_stream_profile>())
            {
                _other_intrinsics = video.get_intrinsics();
                _occlusion_filter->set_texel_intrinsics(_other_intrinsics.value());
            }
        }

        set_extrinsics();
    }
Ejemplo n.º 3
0
    void operator()(int size, const rs2::frame& rs_matrix, char* max_matrix)
    {
      const auto image = (const uint8_t *)(rs_matrix.get_data());
      auto matrix_out = (char*)(max_matrix);

      std::copy(image, image + size, matrix_out);
    }
Ejemplo n.º 4
0
    rs2::frame hole_filling_filter::prepare_target_frame(const rs2::frame& f, const rs2::frame_source& source)
    {
        // Allocate and copy the content of the input data to the target
        rs2::frame tgt = source.allocate_video_frame(_target_stream_profile, f, int(_bpp), int(_width), int(_height), int(_stride), _extension_type);

        memmove(const_cast<void*>(tgt.get_data()), f.get_data(), _current_frm_size_pixels * _bpp);
        return tgt;
    }
    bool disparity_transform::should_process(const rs2::frame& frame)
    {
        if (!frame)
            return false;

        if (frame.is<rs2::frameset>())
            return false;

        if (_transform_to_disparity && (frame.get_profile().stream_type() != RS2_STREAM_DEPTH || frame.get_profile().format() != RS2_FORMAT_Z16))
            return false;

        if (!_transform_to_disparity && (frame.get_profile().stream_type() != RS2_STREAM_DEPTH ||
            (frame.get_profile().format() != RS2_FORMAT_DISPARITY16 && frame.get_profile().format() != RS2_FORMAT_DISPARITY32)))
            return false;

        if (frame.is<rs2::disparity_frame>() == _transform_to_disparity)
            return false;

        return true;
    }
void compare_frame_md(rs2::frame origin_depth, rs2::frame result_depth)
{
    for (auto i = 0; i < rs2_frame_metadata_value::RS2_FRAME_METADATA_COUNT; i++)
    {
        bool origin_supported = origin_depth.supports_frame_metadata((rs2_frame_metadata_value)i);
        bool result_supported = result_depth.supports_frame_metadata((rs2_frame_metadata_value)i);
        REQUIRE(origin_supported == result_supported);
        if (origin_supported && result_supported)
        {
            //FRAME_TIMESTAMP and SENSOR_TIMESTAMP metadatas are not included in post proccesing frames,
            //TIME_OF_ARRIVAL continues to increase  after post proccesing
            if (i == RS2_FRAME_METADATA_FRAME_TIMESTAMP ||
                i == RS2_FRAME_METADATA_SENSOR_TIMESTAMP ||
                i == RS2_FRAME_METADATA_TIME_OF_ARRIVAL) continue;
            rs2_metadata_type origin_val = origin_depth.get_frame_metadata((rs2_frame_metadata_value)i);
            rs2_metadata_type result_val = result_depth.get_frame_metadata((rs2_frame_metadata_value)i);
            REQUIRE(origin_val == result_val);
        }
    }
}
bool validate_ppf_results(rs2::frame origin_depth, rs2::frame result_depth, const ppf_test_config& reference_data, size_t frame_idx)
{
    std::vector<uint16_t> diff2orig;
    std::vector<uint16_t> diff2ref;

    // Basic sanity scenario with no filters applied.
    // validating domain transform in/out conversion. Requiring input=output
    bool domain_transform_only = (reference_data.downsample_scale == 1) &&
        (!reference_data.spatial_filter) && (!reference_data.temporal_filter);

    auto result_profile = result_depth.get_profile().as<rs2::video_stream_profile>();
    REQUIRE(result_profile);
    CAPTURE(result_profile.width());
    CAPTURE(result_profile.height());

    REQUIRE(result_profile.width() == reference_data.output_res_x);
    REQUIRE(result_profile.height() == reference_data.output_res_y);

    auto pixels = result_profile.width()*result_profile.height();
    diff2ref.resize(pixels);
    if (domain_transform_only)
        diff2orig.resize(pixels);

    // Pixel-by-pixel comparison of the resulted filtered depth vs data ercorded with external tool
    auto v1 = reinterpret_cast<const uint16_t*>(result_depth.get_data());
    auto v2 = reinterpret_cast<const uint16_t*>(reference_data._output_frames[frame_idx].data());

    for (auto i = 0; i < pixels; i++)
    {
        uint16_t diff = std::abs(*v1++ - *v2++);
        diff2ref[i] = diff;
    }

    // validating depth<->disparity domain transformation is lostless.
    if (domain_transform_only)
        REQUIRE(profile_diffs("./DomainTransform.txt",diff2orig, 0, 0, frame_idx));

    // Validate the filters
    // The differences between the reference code and librealsense implementation are byte-compared below
    return profile_diffs("./Filterstransform.txt", diff2ref, 0.f, 0, frame_idx);
}
Ejemplo n.º 8
0
    void  hole_filling_filter::update_configuration(const rs2::frame& f)
    {
        if (f.get_profile().get() != _source_stream_profile.get())
        {
            _source_stream_profile = f.get_profile();
            _target_stream_profile = _source_stream_profile.clone(RS2_STREAM_DEPTH, 0, _source_stream_profile.format());

            environment::get_instance().get_extrinsics_graph().register_same_extrinsics(
                *(stream_interface*)(f.get_profile().get()->profile),
                *(stream_interface*)(_target_stream_profile.get()->profile));

            _extension_type = f.is<rs2::disparity_frame>() ? RS2_EXTENSION_DISPARITY_FRAME : RS2_EXTENSION_DEPTH_FRAME;
            _bpp = (_extension_type == RS2_EXTENSION_DISPARITY_FRAME) ? sizeof(float) : sizeof(uint16_t);
            auto vp = _target_stream_profile.as<rs2::video_stream_profile>();
            _width = vp.width();
            _height = vp.height();
            _stride = _width * _bpp;
            _current_frm_size_pixels = _width * _height;

        }
    }
Ejemplo n.º 9
0
    bool pointcloud::should_process(const rs2::frame& frame)
    {
        if (!frame)
            return false;

        auto set = frame.as<rs2::frameset>();

        if (set)
        {
            //process composite frame only if it contains both a depth frame and the requested texture frame
            if (_stream_filter.stream == RS2_STREAM_ANY)
                return false;

            auto tex = set.first_or_default(_stream_filter.stream, _stream_filter.format);
            if (!tex)
                return false;
            auto depth = set.first_or_default(RS2_STREAM_DEPTH, RS2_FORMAT_Z16);
            if (!depth)
                return false;
        }
        else
        {
            if (frame.get_profile().stream_type() == RS2_STREAM_DEPTH && frame.get_profile().format() == RS2_FORMAT_Z16)
                return true;

            auto p = frame.get_profile();
            if (p.stream_type() == _stream_filter.stream && p.format() == _stream_filter.format && p.stream_index() == _stream_filter.index)
                return true;
            return false;

            //TODO: switch to this code when map_to api is removed
            //if (_stream_filter != RS2_STREAM_ANY)
            //    return false;
            //process single frame only if it is a depth frame
            //if (frame.get_profile().stream_type() != RS2_STREAM_DEPTH || frame.get_profile().format() != RS2_FORMAT_Z16)
            //    return false;
        }

        return true;
    }
Ejemplo n.º 10
0
    void pointcloud::inspect_depth_frame(const rs2::frame& depth)
    {
        if (!_output_stream || _depth_stream.get_profile().get() != depth.get_profile().get())
        {
            _output_stream = depth.get_profile().as<rs2::video_stream_profile>().clone(
                RS2_STREAM_DEPTH, depth.get_profile().stream_index(), RS2_FORMAT_XYZ32F);
            _depth_stream = depth;
            _depth_intrinsics = optional_value<rs2_intrinsics>();
            _depth_units = optional_value<float>();
            _extrinsics = optional_value<rs2_extrinsics>();
        }

        bool found_depth_intrinsics = false;
        bool found_depth_units = false;

        if (!_depth_intrinsics)
        {
            auto stream_profile = depth.get_profile();
            if (auto video = stream_profile.as<rs2::video_stream_profile>())
            {
                _depth_intrinsics = video.get_intrinsics();
                _pixels_map.resize(_depth_intrinsics->height*_depth_intrinsics->width);
                _occlusion_filter->set_depth_intrinsics(_depth_intrinsics.value());

                preprocess();

                found_depth_intrinsics = true;
            }
        }

        if (!_depth_units)
        {
            auto sensor = ((frame_interface*)depth.get())->get_sensor().get();
            _depth_units = sensor->get_option(RS2_OPTION_DEPTH_UNITS).query();
            found_depth_units = true;
        }

        set_extrinsics();
    }
Ejemplo n.º 11
0
    rs2::frame yuy2rgb::process_frame(const rs2::frame_source& source, const rs2::frame& f)
    {
        auto p = f.get_profile();
        if (p.get() != _source_stream_profile.get())
        {
            _source_stream_profile = p;
            _target_stream_profile = p.clone(p.stream_type(), p.stream_index(), RS2_FORMAT_RGB8);
        }
        
        rs2::frame ret;

        auto vf = f.as<rs2::video_frame>();
        ret = source.allocate_video_frame(_target_stream_profile, f, _traget_bpp, 
            vf.get_width(), vf.get_height(), vf.get_width() * _traget_bpp, RS2_EXTENSION_VIDEO_FRAME);

        byte* planes[1];
        planes[0] = (byte*)ret.get_data();

        unpack_yuy2_rgb8(planes, (const byte*)f.get_data(), vf.get_width(), vf.get_height());

        return ret;
    }
Ejemplo n.º 12
0
    rs2::frame align::process_frame(const rs2::frame_source& source, const rs2::frame& f)
    {
        rs2::frame rv;
        std::vector<rs2::frame> output_frames;
        std::vector<rs2::frame> other_frames;

        auto frames = f.as<rs2::frameset>();
        auto depth = frames.first_or_default(RS2_STREAM_DEPTH, RS2_FORMAT_Z16).as<rs2::depth_frame>();

        _depth_scale = ((librealsense::depth_frame*)depth.get())->get_units();

        if (_to_stream_type == RS2_STREAM_DEPTH)
            frames.foreach([&other_frames](const rs2::frame& f) {if (f.get_profile().stream_type() != RS2_STREAM_DEPTH) other_frames.push_back(f); });
        else
            frames.foreach([this, &other_frames](const rs2::frame& f) {if (f.get_profile().stream_type() == _to_stream_type) other_frames.push_back(f); });

        if (_to_stream_type == RS2_STREAM_DEPTH)
        {
            for (auto from : other_frames)
            {
                auto aligned_frame = allocate_aligned_frame(source, from, depth);
                align_frames(aligned_frame, from, depth);
                output_frames.push_back(aligned_frame);
            }
        }
        else
        {
            auto to = other_frames.front();
            auto aligned_frame = allocate_aligned_frame(source, depth, to);
            align_frames(aligned_frame, depth, to);
            output_frames.push_back(aligned_frame);
        }

        auto new_composite = source.allocate_composite_frame(std::move(output_frames));
        return new_composite;
    }
Ejemplo n.º 13
0
    bool align::should_process(const rs2::frame& frame)
    {
        if (!frame)
            return false;

        auto set = frame.as<rs2::frameset>();
        if (!set)
            return false;

        auto profile = frame.get_profile();
        rs2_stream stream = profile.stream_type();
        rs2_format format = profile.format();
        int index = profile.stream_index();

        //process composite frame only if it contains both a depth frame and the requested texture frame
        bool has_tex = false, has_depth = false;
        set.foreach([this, &has_tex](const rs2::frame& frame) { if (frame.get_profile().stream_type() == _to_stream_type) has_tex = true; });
        set.foreach([&has_depth](const rs2::frame& frame) 
            { if (frame.get_profile().stream_type() == RS2_STREAM_DEPTH && frame.get_profile().format() == RS2_FORMAT_Z16) has_depth = true; });
        if (!has_tex || !has_depth)
            return false;

        return true;
    }
    void  disparity_transform::update_transformation_profile(const rs2::frame& f)
    {
        if(f.get_profile().get() != _source_stream_profile.get())
        {
            _source_stream_profile = f.get_profile();

            // Check if the new frame originated from stereo-based depth sensor
            // and retrieve the stereo baseline parameter that will be used in transformations
            auto snr = ((frame_interface*)f.get())->get_sensor().get();
            librealsense::depth_stereo_sensor* dss;

            // Playback sensor
            if (auto a = As<librealsense::extendable_interface>(snr))
            {
                librealsense::depth_stereo_sensor* ptr;
                if (_stereoscopic_depth = a->extend_to(TypeToExtension<librealsense::depth_stereo_sensor>::value, (void**)&ptr))
                {
                    dss = ptr;
                    _depth_units = dss->get_depth_scale();
                    _stereo_baseline_meter = dss->get_stereo_baseline_mm()*0.001f;
                }
            }
            else // Live sensor
            {
                _stereoscopic_depth = Is<librealsense::depth_stereo_sensor>(snr);
                if (_stereoscopic_depth)
                {
                    dss = As<librealsense::depth_stereo_sensor>(snr);
                    _depth_units = dss->get_depth_scale();
                    _stereo_baseline_meter = dss->get_stereo_baseline_mm()* 0.001f;
                }
            }

            if (_stereoscopic_depth)
            {
                auto vp = _source_stream_profile.as<rs2::video_stream_profile>();
                _focal_lenght_mm    = vp.get_intrinsics().fx;
                const uint8_t fractional_bits = 5;
                const uint8_t fractions = 1 << fractional_bits;
                _d2d_convert_factor = (_stereo_baseline_meter * _focal_lenght_mm * fractions) / _depth_units;
                _width = vp.width();
                _height = vp.height();
                _update_target = true;
            }
        }

        // Adjust the target profile
        if (_update_target)
        {
            auto tgt_format = _transform_to_disparity ? RS2_FORMAT_DISPARITY32 : RS2_FORMAT_Z16;
            _target_stream_profile = _source_stream_profile.clone(RS2_STREAM_DEPTH, 0, tgt_format);
            auto src_vspi = dynamic_cast<video_stream_profile_interface*>(_source_stream_profile.get()->profile);
            auto tgt_vspi = dynamic_cast<video_stream_profile_interface*>(_target_stream_profile.get()->profile);
            rs2_intrinsics src_intrin   = src_vspi->get_intrinsics();

            tgt_vspi->set_intrinsics([src_intrin]() { return src_intrin; });
            tgt_vspi->set_dims(src_intrin.width, src_intrin.height);

            _update_target = false;
        }
    }
Ejemplo n.º 15
0
    rs2::frame colorizer::process_frame(const rs2::frame_source& source, const rs2::frame& f)
    {
        if (f.get_profile().get() != _source_stream_profile.get())
        {
            _source_stream_profile = f.get_profile();
            _target_stream_profile = f.get_profile().clone(RS2_STREAM_DEPTH, 0, RS2_FORMAT_RGB8);
        }
        auto make_equalized_histogram = [this](const rs2::video_frame& depth, rs2::video_frame rgb)
        {
            const auto max_depth = 0x10000;
            static uint32_t histogram[max_depth];
            memset(histogram, 0, sizeof(histogram));

            const auto w = depth.get_width(), h = depth.get_height();
            const auto depth_data = reinterpret_cast<const uint16_t*>(depth.get_data());
            auto rgb_data = reinterpret_cast<uint8_t*>(const_cast<void *>(rgb.get_data()));

            for (auto i = 0; i < w*h; ++i) ++histogram[depth_data[i]];
            for (auto i = 2; i < max_depth; ++i) histogram[i] += histogram[i - 1]; // Build a cumulative histogram for the indices in [1,0xFFFF]
            auto cm = _maps[_map_index];
            for (auto i = 0; i < w*h; ++i)
            {
                auto d = depth_data[i];

                if (d)
                {
                    auto f = histogram[d] / (float)histogram[0xFFFF]; // 0-255 based on histogram location

                    auto c = cm->get(f);
                    rgb_data[i * 3 + 0] = (uint8_t)c.x;
                    rgb_data[i * 3 + 1] = (uint8_t)c.y;
                    rgb_data[i * 3 + 2] = (uint8_t)c.z;
                }
                else
                {
                    rgb_data[i * 3 + 0] = 0;
                    rgb_data[i * 3 + 1] = 0;
                    rgb_data[i * 3 + 2] = 0;
                }
            }
        };

        auto make_value_cropped_frame = [this](const rs2::video_frame& depth, rs2::video_frame rgb)
        {
            const auto max_depth = 0x10000;
            const auto w = depth.get_width(), h = depth.get_height();
            const auto depth_data = reinterpret_cast<const uint16_t*>(depth.get_data());
            auto rgb_data = reinterpret_cast<uint8_t*>(const_cast<void *>(rgb.get_data()));

            auto fi = (frame_interface*)depth.get();
            auto df = dynamic_cast<librealsense::depth_frame*>(fi);
            auto depth_units = df->get_units();

            for (auto i = 0; i < w*h; ++i)
            {
                auto d = depth_data[i];

                if (d)
                {
                    auto f = (d * depth_units - _min) / (_max - _min);

                    auto c = _maps[_map_index]->get(f);
                    rgb_data[i * 3 + 0] = (uint8_t)c.x;
                    rgb_data[i * 3 + 1] = (uint8_t)c.y;
                    rgb_data[i * 3 + 2] = (uint8_t)c.z;
                }
                else
                {
                    rgb_data[i * 3 + 0] = 0;
                    rgb_data[i * 3 + 1] = 0;
                    rgb_data[i * 3 + 2] = 0;
                }
            }
        };
        rs2::frame ret;

        auto vf = f.as<rs2::video_frame>();
        //rs2_extension ext = f.is<rs2::disparity_frame>() ? RS2_EXTENSION_DISPARITY_FRAME : RS2_EXTENSION_DEPTH_FRAME;
        ret = source.allocate_video_frame(_target_stream_profile, f, 3, vf.get_width(), vf.get_height(), vf.get_width() * 3, RS2_EXTENSION_VIDEO_FRAME);

        if (_equalize)
            make_equalized_histogram(f, ret);
        else
            make_value_cropped_frame(f, ret);

        return ret;
    }