bool ImageBufAlgo::rotate180 (ImageBuf &dst, const ImageBuf &src, ROI roi, int nthreads) { if (&dst == &src) { // Handle in-place operation ImageBuf tmp; tmp.swap (const_cast<ImageBuf&>(src)); return rotate180 (dst, tmp, roi, nthreads); } ROI src_roi = roi.defined() ? roi : src.roi(); ROI src_roi_full = src.roi_full(); int xoffset = src_roi.xbegin - src_roi_full.xbegin; int xstart = src_roi_full.xend - xoffset - src_roi.width(); int yoffset = src_roi.ybegin - src_roi_full.ybegin; int ystart = src_roi_full.yend - yoffset - src_roi.height(); ROI dst_roi (xstart, xstart+src_roi.width(), ystart, ystart+src_roi.height(), src_roi.zbegin, src_roi.zend, src_roi.chbegin, src_roi.chend); ASSERT (dst_roi.width() == src_roi.width() && dst_roi.height() == src_roi.height()); // Compute the destination ROI, it's the source ROI reflected across // the midline of the display window. IBAprep (dst_roi, &dst, &src); bool ok; OIIO_DISPATCH_TYPES2 (ok, "rotate180", rotate180_, dst.spec().format, src.spec().format, dst, src, dst_roi, nthreads); return ok; }
bool ImageBufAlgo::render_line (ImageBuf &dst, int x1, int y1, int x2, int y2, array_view<const float> color, bool skip_first_point, ROI roi, int nthreads) { if (! IBAprep (roi, &dst)) return false; if (int(color.size()) < roi.chend) { dst.error ("Not enough channels for the color (needed %d)", roi.chend); return false; // Not enough color channels specified } const ImageSpec &spec (dst.spec()); // Alpha: if the image's spec designates an alpha channel, use it if // it's within the range specified by color. Otherwise, if color // includes more values than the highest channel roi says we should // modify, assume the first extra value is alpha. If all else fails, // make the line opaque (alpha=1.0). float alpha = 1.0f; if (spec.alpha_channel >= 0 && spec.alpha_channel < int(color.size())) alpha = color[spec.alpha_channel]; else if (int(color.size()) == roi.chend+1) alpha = color[roi.chend]; bool ok; OIIO_DISPATCH_TYPES (ok, "render_line", render_line_, dst.spec().format, dst, x1, y1, x2, y2, color, alpha, skip_first_point, roi, nthreads); return ok; }
/// Fix all non-finite pixels (nan/inf) using the specified approach bool ImageBufAlgo::fixNonFinite (ImageBuf &src, NonFiniteFixMode mode, int *pixelsFixed, ROI roi, int nthreads) { // If no ROI is defined, use the data window of src. if (! roi.defined()) roi = get_roi(src.spec()); roi.chend = std::min (roi.chend, src.nchannels()); // Initialize if (pixelsFixed) *pixelsFixed = 0; switch (src.spec().format.basetype) { case TypeDesc::FLOAT : return fixNonFinite_<float> (src, mode, pixelsFixed, roi, nthreads); case TypeDesc::HALF : return fixNonFinite_<half> (src, mode, pixelsFixed, roi, nthreads); case TypeDesc::DOUBLE: return fixNonFinite_<double> (src, mode, pixelsFixed, roi, nthreads); default: // All other format types aren't capable of having nonfinite // pixel values. return true; } }
bool ImageBufAlgo::sub(ImageBuf& dst, Image_or_Const A_, Image_or_Const B_, ROI roi, int nthreads) { pvt::LoggedTimer logtime("IBA::sub"); if (A_.is_img() && B_.is_img()) { const ImageBuf &A(A_.img()), &B(B_.img()); if (!IBAprep(roi, &dst, &A, &B)) return false; ROI origroi = roi; roi.chend = std::min(roi.chend, std::min(A.nchannels(), B.nchannels())); bool ok; OIIO_DISPATCH_COMMON_TYPES3(ok, "sub", sub_impl, dst.spec().format, A.spec().format, B.spec().format, dst, A, B, roi, nthreads); if (roi.chend < origroi.chend && A.nchannels() != B.nchannels()) { // Edge case: A and B differed in nchannels, we allocated dst to be // the bigger of them, but adjusted roi to be the lesser. Now handle // the channels that got left out because they were not common to // all the inputs. ASSERT(roi.chend <= dst.nchannels()); roi.chbegin = roi.chend; roi.chend = origroi.chend; if (A.nchannels() > B.nchannels()) { // A exists copy(dst, A, dst.spec().format, roi, nthreads); } else { // B exists copy(dst, B, dst.spec().format, roi, nthreads); } } return ok; } if (A_.is_val() && B_.is_img()) // canonicalize to A_img, B_val A_.swap(B_); if (A_.is_img() && B_.is_val()) { const ImageBuf& A(A_.img()); cspan<float> b = B_.val(); if (!IBAprep(roi, &dst, &A, IBAprep_CLAMP_MUTUAL_NCHANNELS | IBAprep_SUPPORT_DEEP)) return false; IBA_FIX_PERCHAN_LEN_DEF(b, A.nchannels()); // Negate b (into a copy) int nc = A.nchannels(); float* vals = ALLOCA(float, nc); for (int c = 0; c < nc; ++c) vals[c] = -b[c]; b = cspan<float>(vals, nc); if (dst.deep()) { // While still serial, set up all the sample counts dst.deepdata()->set_all_samples(A.deepdata()->all_samples()); return add_impl_deep(dst, A, b, roi, nthreads); } bool ok; OIIO_DISPATCH_COMMON_TYPES2(ok, "sub", add_impl, dst.spec().format, A.spec().format, dst, A, b, roi, nthreads); return ok; } // Remaining cases: error dst.error("ImageBufAlgo::sub(): at least one argument must be an image"); return false; }
static bool same_size (const ImageBuf &A, const ImageBuf &B) { const ImageSpec &a (A.spec()), &b (B.spec()); return (a.width == b.width && a.height == b.height && a.depth == b.depth && a.nchannels == b.nchannels); }
bool ImageBufAlgo::channels (ImageBuf &dst, const ImageBuf &src, int nchannels, const int *channelorder, const float *channelvalues, const std::string *newchannelnames, bool shuffle_channel_names) { // Not intended to create 0-channel images. if (nchannels <= 0) { dst.error ("%d-channel images not supported", nchannels); return false; } // If we dont have a single source channel, // hard to know how big to make the additional channels if (src.spec().nchannels == 0) { dst.error ("%d-channel images not supported", src.spec().nchannels); return false; } // If channelorder is NULL, it will be interpreted as // {0, 1, ..., nchannels-1}. int *local_channelorder = NULL; if (! channelorder) { local_channelorder = ALLOCA (int, nchannels); for (int c = 0; c < nchannels; ++c) local_channelorder[c] = c; channelorder = local_channelorder; }
bool ImageBufAlgo::compare (const ImageBuf &A, const ImageBuf &B, float failthresh, float warnthresh, ImageBufAlgo::CompareResults &result, ROI roi, int nthreads) { // If no ROI is defined, use the union of the data windows of the two // images. if (! roi.defined()) roi = roi_union (get_roi(A.spec()), get_roi(B.spec())); roi.chend = std::min (roi.chend, std::max(A.nchannels(), B.nchannels())); // Deep and non-deep images cannot be compared if (B.deep() != A.deep()) return false; bool ok; OIIO_DISPATCH_TYPES2 (ok, "compare", compare_, A.spec().format, B.spec().format, A, B, failthresh, warnthresh, result, roi, nthreads); // FIXME - The nthreads argument is for symmetry with the rest of // ImageBufAlgo and for future expansion. But for right now, we // don't actually split by threads. Maybe later. return ok; }
bool ImageBufAlgo::colorconvert (ImageBuf &dst, const ImageBuf &src, const ColorProcessor* processor, bool unpremult, ROI roi, int nthreads) { // If the processor is NULL, return false (error) if (!processor) { dst.error ("Passed NULL ColorProcessor to colorconvert() [probable application bug]"); return false; } // If the processor is a no-op and the conversion is being done // in place, no work needs to be done. Early exit. if (processor->isNoOp() && (&dst == &src)) return true; if (! IBAprep (roi, &dst, &src)) return false; // If the processor is a no-op (and it's not an in-place conversion), // use paste() to simplify the operation. if (processor->isNoOp()) { roi.chend = std::max (roi.chbegin+4, roi.chend); return ImageBufAlgo::paste (dst, roi.xbegin, roi.ybegin, roi.zbegin, roi.chbegin, src, roi, nthreads); } bool ok = true; OIIO_DISPATCH_COMMON_TYPES2 (ok, "colorconvert", colorconvert_impl, dst.spec().format, src.spec().format, dst, src, processor, unpremult, roi, nthreads); return ok; }
bool ImageBufAlgo::flip(ImageBuf &dst, const ImageBuf &src, ROI roi, int nthreads) { if (&dst == &src) { // Handle in-place operation ImageBuf tmp; tmp.swap (const_cast<ImageBuf&>(src)); return flip (dst, tmp, roi, nthreads); } pvt::LoggedTimer logtime("IBA::flip"); ROI src_roi = roi.defined() ? roi : src.roi(); ROI src_roi_full = src.roi_full(); int offset = src_roi.ybegin - src_roi_full.ybegin; int start = src_roi_full.yend - offset - src_roi.height(); ROI dst_roi (src_roi.xbegin, src_roi.xend, start, start+src_roi.height(), src_roi.zbegin, src_roi.zend, src_roi.chbegin, src_roi.chend); ASSERT (dst_roi.width() == src_roi.width() && dst_roi.height() == src_roi.height()); // Compute the destination ROI, it's the source ROI reflected across // the midline of the display window. if (! IBAprep (dst_roi, &dst, &src)) return false; bool ok; OIIO_DISPATCH_COMMON_TYPES2 (ok, "flip", flip_, dst.spec().format, src.spec().format, dst, src, dst_roi, nthreads); return ok; }
bool ImageBufAlgo::absdiff (ImageBuf &dst, const ImageBuf &A, const ImageBuf &B, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &A, &B)) return false; ROI origroi = roi; roi.chend = std::min (roi.chend, std::min (A.nchannels(), B.nchannels())); bool ok; OIIO_DISPATCH_COMMON_TYPES3 (ok, "absdiff", absdiff_impl, dst.spec().format, A.spec().format, B.spec().format, dst, A, B, roi, nthreads); if (roi.chend < origroi.chend && A.nchannels() != B.nchannels()) { // Edge case: A and B differed in nchannels, we allocated dst to be // the bigger of them, but adjusted roi to be the lesser. Now handle // the channels that got left out because they were not common to // all the inputs. ASSERT (roi.chend <= dst.nchannels()); roi.chbegin = roi.chend; roi.chend = origroi.chend; if (A.nchannels() > B.nchannels()) { // A exists abs (dst, A, roi, nthreads); } else { // B exists abs (dst, B, roi, nthreads); } } return ok; }
static bool render_box_ (ImageBuf &dst, array_view<const float> color, ROI roi=ROI(), int nthreads=1) { if (nthreads != 1 && roi.npixels() >= 1000) { // Lots of pixels and request for multi threads? Parallelize. ImageBufAlgo::parallel_image ( OIIO::bind(render_box_<T>, OIIO::ref(dst), color, _1 /*roi*/, 1 /*nthreads*/), roi, nthreads); return true; } // Serial case float alpha = 1.0f; if (dst.spec().alpha_channel >= 0 && dst.spec().alpha_channel < int(color.size())) alpha = color[dst.spec().alpha_channel]; else if (int(color.size()) == roi.chend+1) alpha = color[roi.chend]; if (alpha == 1.0f) { for (ImageBuf::Iterator<T> r (dst, roi); !r.done(); ++r) for (int c = roi.chbegin; c < roi.chend; ++c) r[c] = color[c]; } else { for (ImageBuf::Iterator<T> r (dst, roi); !r.done(); ++r) for (int c = roi.chbegin; c < roi.chend; ++c) r[c] = color[c] + r[c] * (1.0f-alpha); // "over" } return true; }
bool ImageBufAlgo::flop (ImageBuf &dst, const ImageBuf &src, ROI roi, int nthreads) { IBAprep (roi, &dst); OIIO_DISPATCH_TYPES2 ("flop", flop_, dst.spec().format, src.spec().format, dst, src, roi, nthreads); return false; }
bool ImageBufAlgo::mul (ImageBuf &dst, const ImageBuf &A, const float *b, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &A)) return false; OIIO_DISPATCH_TYPES2 ("mul", mul_impl, dst.spec().format, A.spec().format, dst, A, b, roi, nthreads); return true; }
bool ImageBufAlgo::sub (ImageBuf &dst, const ImageBuf &A, const ImageBuf &B, ROI roi, int nthreads) { IBAprep (roi, &dst, &A, &B); OIIO_DISPATCH_COMMON_TYPES3 ("sub", sub_impl, dst.spec().format, A.spec().format, B.spec().format, dst, A, B, roi, nthreads); return true; }
static inline void transfer_pixels_ (ImageBuf &buf, ColorTransfer *tfunc) { for (ImageBuf::Iterator<T> pixel (buf); pixel.valid(); ++pixel) { convert_types (buf.spec().format, pixel.rawptr(), buf.spec().format, pixel.rawptr(), buf.nchannels(), tfunc, buf.spec().alpha_channel, buf.spec().z_channel); } }
bool ImageBufAlgo::resize (ImageBuf &dst, const ImageBuf &src, const std::string &filtername_, float fwidth, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &src)) return false; const ImageSpec &srcspec (src.spec()); const ImageSpec &dstspec (dst.spec()); if (dstspec.nchannels != srcspec.nchannels) { dst.error ("channel number mismatch: %d vs. %d", dst.spec().nchannels, src.spec().nchannels); return false; } if (dstspec.depth > 1 || srcspec.depth > 1) { dst.error ("ImageBufAlgo::resize does not support volume images"); return false; } // Resize ratios float wratio = float(dstspec.full_width) / float(srcspec.full_width); float hratio = float(dstspec.full_height) / float(srcspec.full_height); // Set up a shared pointer with custom deleter to make sure any // filter we allocate here is properly destroyed. boost::shared_ptr<Filter2D> filter ((Filter2D*)NULL, Filter2D::destroy); std::string filtername = filtername_; if (filtername.empty()) { // No filter name supplied -- pick a good default if (wratio > 1.0f || hratio > 1.0f) filtername = "blackman-harris"; else filtername = "lanczos3"; } for (int i = 0, e = Filter2D::num_filters(); i < e; ++i) { FilterDesc fd; Filter2D::get_filterdesc (i, &fd); if (fd.name == filtername) { float w = fwidth > 0.0f ? fwidth : fd.width * std::max (1.0f, wratio); float h = fwidth > 0.0f ? fwidth : fd.width * std::max (1.0f, hratio); filter.reset (Filter2D::create (filtername, w, h)); break; } } if (! filter) { dst.error ("Filter \"%s\" not recognized", filtername); return false; } OIIO_DISPATCH_TYPES2 ("resize", resize_, dstspec.format, srcspec.format, dst, src, filter.get(), roi, nthreads); return false; }
bool ImageBufAlgo::mul (ImageBuf &dst, const ImageBuf &A, const float *b, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &A, IBAprep_CLAMP_MUTUAL_NCHANNELS)) return false; bool ok; OIIO_DISPATCH_COMMON_TYPES2 (ok, "mul", mul_impl, dst.spec().format, A.spec().format, dst, A, b, roi, nthreads); return ok; }
bool ImageBufAlgo::premult (ImageBuf &dst, ROI roi, int nthreads) { if (dst.spec().alpha_channel < 0) return true; IBAprep (roi, &dst); OIIO_DISPATCH_TYPES ("premult", premult_, dst.spec().format, dst, roi, nthreads); return true; }
bool ImageBufAlgo::pow (ImageBuf &dst, const ImageBuf &A, const float *b, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &A)) return false; bool ok; OIIO_DISPATCH_TYPES2 (ok, "pow", pow_impl, dst.spec().format, A.spec().format, dst, A, b, roi, nthreads); return ok; }
bool ImageBufAlgo::mul (ImageBuf &dst, const ImageBuf &A, const ImageBuf &B, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &A, &B)) return false; OIIO_DISPATCH_COMMON_TYPES3 ("mul", mul_impl, dst.spec().format, A.spec().format, B.spec().format, dst, A, B, roi, nthreads); return true; }
bool ImageBufAlgo::crop (ImageBuf &dst, const ImageBuf &src, ROI roi, int nthreads) { dst.clear (); roi.chend = std::min (roi.chend, src.nchannels()); IBAprep (roi, &dst, &src); OIIO_DISPATCH_TYPES2 ("crop", crop_, dst.spec().format, src.spec().format, dst, src, roi, nthreads); return false; }
bool ImageBufAlgo::circular_shift (ImageBuf &dst, const ImageBuf &src, int xshift, int yshift, int zshift, ROI roi, int nthreads) { IBAprep (roi, &dst, &src); OIIO_DISPATCH_TYPES2 ("circular_shift", circular_shift_, dst.spec().format, src.spec().format, dst, src, xshift, yshift, zshift, roi, roi, nthreads); return false; }
bool ImageBufAlgo::sub (ImageBuf &dst, const ImageBuf &A, const ImageBuf &B, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &A, &B)) return false; bool ok; OIIO_DISPATCH_COMMON_TYPES3 (ok, "sub", sub_impl, dst.spec().format, A.spec().format, B.spec().format, dst, A, B, roi, nthreads); return ok; }
bool ImageBufAlgo::div (ImageBuf &dst, const ImageBuf &A, const ImageBuf &B, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &A, &B, NULL, IBAprep_CLAMP_MUTUAL_NCHANNELS)) return false; bool ok; OIIO_DISPATCH_COMMON_TYPES3 (ok, "div", div_impl, dst.spec().format, A.spec().format, B.spec().format, dst, A, B, roi, nthreads); return ok; }
bool ImageBufAlgo::mul (ImageBuf &dst, const ImageBuf &A, float b, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &A)) return false; int nc = A.nchannels(); float *vals = ALLOCA (float, nc); for (int c = 0; c < nc; ++c) vals[c] = b; OIIO_DISPATCH_TYPES2 ("mul", mul_impl, dst.spec().format, A.spec().format, dst, A, vals, roi, nthreads); }
bool ImageBufAlgo::transpose (ImageBuf &dst, const ImageBuf &src, ROI roi, int nthreads) { if (! roi.defined()) roi = get_roi (src.spec()); roi.chend = std::min (roi.chend, src.nchannels()); ROI dst_roi (roi.ybegin, roi.yend, roi.xbegin, roi.xend, roi.zbegin, roi.zend, roi.chbegin, roi.chend); IBAprep (dst_roi, &dst); OIIO_DISPATCH_TYPES2 ("transpose", transpose_, dst.spec().format, src.spec().format, dst, src, roi, nthreads); return false; }
bool ImageBufAlgo::sub (ImageBuf &dst, const ImageBuf &A, const float *b, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &A)) return false; int nc = A.nchannels(); float *vals = ALLOCA (float, nc); for (int c = 0; c < nc; ++c) vals[c] = -b[c]; OIIO_DISPATCH_TYPES2 ("sub", add_impl, dst.spec().format, A.spec().format, dst, A, vals, roi, nthreads); return true; }
inline void print_subimage (ImageBuf &img0, int subimage, int miplevel) { if (img0.nsubimages() > 1) std::cout << "Subimage " << subimage << ' '; if (img0.nmiplevels() > 1) std::cout << " MIP level " << miplevel << ' '; if (img0.nsubimages() > 1 || img0.nmiplevels() > 1) std::cout << ": "; std::cout << img0.spec().width << " x " << img0.spec().height; if (img0.spec().depth > 1) std::cout << " x " << img0.spec().depth; std::cout << ", " << img0.spec().nchannels << " channel\n"; }
bool ImageBufAlgo::pow (ImageBuf &dst, const ImageBuf &A, float b, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &A, IBAprep_CLAMP_MUTUAL_NCHANNELS)) return false; int nc = A.nchannels(); float *vals = ALLOCA (float, nc); for (int c = 0; c < nc; ++c) vals[c] = b; bool ok; OIIO_DISPATCH_COMMON_TYPES2 (ok, "pow", pow_impl, dst.spec().format, A.spec().format, dst, A, vals, roi, nthreads); return ok; }
bool ImageBufAlgo::div (ImageBuf &dst, const ImageBuf &A, const float *b, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &A, IBAprep_CLAMP_MUTUAL_NCHANNELS)) return false; int nc = dst.nchannels(); float *binv = OIIO_ALLOCA (float, nc); for (int c = 0; c < nc; ++c) binv[c] = (b[c] == 0.0f) ? 1.0f : 1.0f/b[c]; bool ok; OIIO_DISPATCH_COMMON_TYPES2 (ok, "div", mul_impl, dst.spec().format, A.spec().format, dst, A, binv, roi, nthreads); return ok; }