ImagePlane::ImagePlane(const ImagePlane &_ip) : ll_(_ip.ll()), ul_(_ip.ul()), lr_(_ip.lr()), ur_(_ip.ur()), c_(_ip.c()) {}
TEST(ImagePlane, GetPixelPtrByIndex) { const ImagePlane<unsigned char> plane(lenaGray8, sizeof(lenaGray8), LENA_GRAY8_WIDTH, LENA_GRAY8_HEIGHT, LENA_GRAY8_WIDTH, true); for (std::size_t i = 0; i < sizeof(lenaGray8); i++) { EXPECT_EQ(*plane.getPixelPtr(i), lenaGray8[i]); } }
TEST(ImagePlane, GetPixelPtrByXY) { const ImagePlane<unsigned char> plane(lenaGray8, sizeof(lenaGray8), LENA_GRAY8_WIDTH, LENA_GRAY8_HEIGHT, LENA_GRAY8_WIDTH, true); for (std::size_t x = 0; x < plane.width(); x++) { for (std::size_t y = 0; y < plane.height(); y++) { EXPECT_EQ(*plane.getPixelPtr(x, y), lenaGray8[y * LENA_GRAY8_WIDTH + x]); } } }
void Depth::process(const ImagePlane<const void> &src, const ImagePlane<void> &dst, void *tmp) const { if (dst.format().type >= PixelType::HALF) convert_depth(*m_depth, src, dst); else convert_dithered(*m_dither, src, dst, tmp); }
void Unresize::invoke_impl_v(const ImagePlane<const void> &src, const ImagePlane<void> &dst, void *tmp) const { switch (src.format().type) { case PixelType::HALF: m_impl->process_f16_v(plane_cast<const uint16_t>(src), plane_cast<uint16_t>(dst), (uint16_t *)tmp); break; case PixelType::FLOAT: m_impl->process_f32_v(plane_cast<const float>(src), plane_cast<float>(dst), (float *)tmp); break; default: throw zimg::error::UnsupportedOperation{ "only HALF and FLOAT supported for unresize" }; } }
TEST(ImagePlane, InitializationImage) { const ImagePlane<unsigned char> plane(lenaGray8, sizeof(lenaGray8), LENA_GRAY8_WIDTH, LENA_GRAY8_HEIGHT, LENA_GRAY8_WIDTH, true); EXPECT_EQ(plane.pixels(), LENA_GRAY8_WIDTH * LENA_GRAY8_HEIGHT); EXPECT_EQ(plane.size(), plane.pixels() * sizeof(unsigned char)); EXPECT_EQ(plane.width(), LENA_GRAY8_WIDTH); EXPECT_EQ(plane.height(), LENA_GRAY8_HEIGHT); EXPECT_EQ(plane.stride(), LENA_GRAY8_WIDTH); }
void Resize::process2d(const ImagePlane<const void> &src, const ImagePlane<void> &dst, void *tmp) const { PixelType type = src.format().type; LinearAllocator alloc{ tmp }; ResizeContext ctx = get_context(alloc, type); LineBuffer<void> src_buf{ (void *)src.data(), 0, (unsigned)src.width(), (unsigned)src.stride() * pixel_size(type), UINT_MAX }; LineBuffer<void> dst_buf{ dst.data(), 0, (unsigned)dst.width(), (unsigned)dst.stride() * pixel_size(type), UINT_MAX }; bool overflow_flag = false; unsigned buffer_pos = 0; unsigned src_linesize = m_src_width * pixel_size(type); unsigned dst_linesize = m_dst_width * pixel_size(type); for (unsigned i = 0; i < m_dst_height; i += ctx.out_buffering2) { const LineBuffer<void> *in_buf = &src_buf; LineBuffer<void> *out_buf = &dst_buf; unsigned dep2_first = ctx.impl2->dependent_line(i); unsigned dep2_last = std::min(dep2_first + ctx.in_buffering2, ctx.tmp_height); for (; buffer_pos < dep2_last; buffer_pos += ctx.out_buffering1) { unsigned dep1_first = ctx.impl1->dependent_line(buffer_pos); unsigned dep1_last = dep1_first + ctx.in_buffering1; if (dep1_last > m_src_height) { if (!overflow_flag) { copy_buffer_lines(src_buf, ctx.src_border_buf, src_linesize, dep1_first, m_src_height); overflow_flag = true; } in_buf = &ctx.src_border_buf; } invoke_impl(ctx.impl1, type, *in_buf, ctx.tmp_buf, buffer_pos, ctx.tmp_data); } if (i + ctx.out_buffering2 > m_dst_height) out_buf = &ctx.dst_border_buf; invoke_impl(ctx.impl2, type, ctx.tmp_buf, *out_buf, i, ctx.tmp_data); if (i + ctx.out_buffering2 > m_dst_height) copy_buffer_lines(ctx.dst_border_buf, dst_buf, dst_linesize, i, m_dst_height); } }
void Unresize::process(const ImagePlane<const void> &src, const ImagePlane<void> &dst, void *tmp) const { PixelType type = src.format().type; int pxsize = pixel_size(type); if (m_src_width == m_dst_width) { invoke_impl_v(src, dst, tmp); } else if (m_src_height == m_dst_height) { invoke_impl_h(src, dst, tmp); } else { double xscale = (double)m_dst_width / (double)m_src_width; double yscale = (double)m_dst_height / (double)m_src_height; // Downscaling cost is proportional to input size, whereas upscaling cost is proportional to output size. // Horizontal operation is roughly twice as costly as vertical operation for SIMD cores. double h_first_cost = std::max(xscale, 1.0) * 2.0 + xscale * std::max(yscale, 1.0); double v_first_cost = std::max(yscale, 1.0) + yscale * std::max(xscale, 1.0) * 2.0; char *tmp1 = (char *)tmp; char *tmp2 = tmp1 + max_frame_size(type) * pxsize; if (h_first_cost < v_first_cost) { int tmp_stride = align(m_dst_width, ALIGNMENT / pxsize); ImagePlane<void> tmp_plane{ tmp1, m_dst_width, m_src_height, tmp_stride, type }; invoke_impl_h(src, tmp_plane, tmp2); invoke_impl_v(tmp_plane, dst, tmp2); } else { int tmp_stride = align(m_src_width, ALIGNMENT / pxsize); ImagePlane<void> tmp_plane{ tmp1, m_src_width, m_dst_height, tmp_stride, type }; invoke_impl_v(src, tmp_plane, tmp2); invoke_impl_h(tmp_plane, dst, tmp2); } } }
TEST(ImagePlane, InitializationEmpty) { const ImagePlane<unsigned char> plane1(128, 128); const ImagePlane<unsigned long> plane2(4000, 4000); EXPECT_EQ(plane1.pixels(), 128 * 128); EXPECT_EQ(plane1.size(), plane1.pixels() * sizeof(unsigned char)); EXPECT_EQ(plane1.width(), 128); EXPECT_EQ(plane1.height(), 128); EXPECT_EQ(plane1.stride(), 128); EXPECT_EQ(plane2.pixels(), 4000 * 4000); EXPECT_EQ(plane2.size(), plane2.pixels() * sizeof(unsigned long)); EXPECT_EQ(plane2.width(), 4000); EXPECT_EQ(plane2.height(), 4000); EXPECT_EQ(plane2.stride(), 4000); }