bool ImageBufAlgo::flip(ImageBuf &dst, const ImageBuf &src, ROI roi, int nthreads) { if (&dst == &src) { // Handle in-place operation ImageBuf tmp; tmp.swap (const_cast<ImageBuf&>(src)); return flip (dst, tmp, roi, nthreads); } pvt::LoggedTimer logtime("IBA::flip"); ROI src_roi = roi.defined() ? roi : src.roi(); ROI src_roi_full = src.roi_full(); int offset = src_roi.ybegin - src_roi_full.ybegin; int start = src_roi_full.yend - offset - src_roi.height(); ROI dst_roi (src_roi.xbegin, src_roi.xend, start, start+src_roi.height(), src_roi.zbegin, src_roi.zend, src_roi.chbegin, src_roi.chend); ASSERT (dst_roi.width() == src_roi.width() && dst_roi.height() == src_roi.height()); // Compute the destination ROI, it's the source ROI reflected across // the midline of the display window. if (! IBAprep (dst_roi, &dst, &src)) return false; bool ok; OIIO_DISPATCH_COMMON_TYPES2 (ok, "flip", flip_, dst.spec().format, src.spec().format, dst, src, dst_roi, nthreads); return ok; }
bool ImageBufAlgo::sub(ImageBuf& dst, Image_or_Const A_, Image_or_Const B_, ROI roi, int nthreads) { pvt::LoggedTimer logtime("IBA::sub"); if (A_.is_img() && B_.is_img()) { const ImageBuf &A(A_.img()), &B(B_.img()); if (!IBAprep(roi, &dst, &A, &B)) return false; ROI origroi = roi; roi.chend = std::min(roi.chend, std::min(A.nchannels(), B.nchannels())); bool ok; OIIO_DISPATCH_COMMON_TYPES3(ok, "sub", sub_impl, dst.spec().format, A.spec().format, B.spec().format, dst, A, B, roi, nthreads); if (roi.chend < origroi.chend && A.nchannels() != B.nchannels()) { // Edge case: A and B differed in nchannels, we allocated dst to be // the bigger of them, but adjusted roi to be the lesser. Now handle // the channels that got left out because they were not common to // all the inputs. ASSERT(roi.chend <= dst.nchannels()); roi.chbegin = roi.chend; roi.chend = origroi.chend; if (A.nchannels() > B.nchannels()) { // A exists copy(dst, A, dst.spec().format, roi, nthreads); } else { // B exists copy(dst, B, dst.spec().format, roi, nthreads); } } return ok; } if (A_.is_val() && B_.is_img()) // canonicalize to A_img, B_val A_.swap(B_); if (A_.is_img() && B_.is_val()) { const ImageBuf& A(A_.img()); cspan<float> b = B_.val(); if (!IBAprep(roi, &dst, &A, IBAprep_CLAMP_MUTUAL_NCHANNELS | IBAprep_SUPPORT_DEEP)) return false; IBA_FIX_PERCHAN_LEN_DEF(b, A.nchannels()); // Negate b (into a copy) int nc = A.nchannels(); float* vals = ALLOCA(float, nc); for (int c = 0; c < nc; ++c) vals[c] = -b[c]; b = cspan<float>(vals, nc); if (dst.deep()) { // While still serial, set up all the sample counts dst.deepdata()->set_all_samples(A.deepdata()->all_samples()); return add_impl_deep(dst, A, b, roi, nthreads); } bool ok; OIIO_DISPATCH_COMMON_TYPES2(ok, "sub", add_impl, dst.spec().format, A.spec().format, dst, A, b, roi, nthreads); return ok; } // Remaining cases: error dst.error("ImageBufAlgo::sub(): at least one argument must be an image"); return false; }
bool ImageBufAlgo::transpose (ImageBuf &dst, const ImageBuf &src, ROI roi, int nthreads) { pvt::LoggedTimer logtime("IBA::transpose"); if (! roi.defined()) roi = get_roi (src.spec()); roi.chend = std::min (roi.chend, src.nchannels()); ROI dst_roi (roi.ybegin, roi.yend, roi.xbegin, roi.xend, roi.zbegin, roi.zend, roi.chbegin, roi.chend); bool dst_initialized = dst.initialized(); if (! IBAprep (dst_roi, &dst)) return false; if (! dst_initialized) { ROI r = src.roi_full(); ROI dst_roi_full (r.ybegin, r.yend, r.xbegin, r.xend, r.zbegin, r.zend, r.chbegin, r.chend); dst.set_roi_full (dst_roi_full); } bool ok; if (dst.spec().format == src.spec().format) { OIIO_DISPATCH_TYPES (ok, "transpose", transpose_, dst.spec().format, dst, src, roi, nthreads); } else { OIIO_DISPATCH_COMMON_TYPES2 (ok, "transpose", transpose_, dst.spec().format, src.spec().format, dst, src, roi, nthreads); } return ok; }
bool ImageBufAlgo::colorconvert (ImageBuf &dst, const ImageBuf &src, const ColorProcessor* processor, bool unpremult, ROI roi, int nthreads) { // If the processor is NULL, return false (error) if (!processor) { dst.error ("Passed NULL ColorProcessor to colorconvert() [probable application bug]"); return false; } // If the processor is a no-op and the conversion is being done // in place, no work needs to be done. Early exit. if (processor->isNoOp() && (&dst == &src)) return true; if (! IBAprep (roi, &dst, &src)) return false; // If the processor is a no-op (and it's not an in-place conversion), // use paste() to simplify the operation. if (processor->isNoOp()) { roi.chend = std::max (roi.chbegin+4, roi.chend); return ImageBufAlgo::paste (dst, roi.xbegin, roi.ybegin, roi.zbegin, roi.chbegin, src, roi, nthreads); } bool ok = true; OIIO_DISPATCH_COMMON_TYPES2 (ok, "colorconvert", colorconvert_impl, dst.spec().format, src.spec().format, dst, src, processor, unpremult, roi, nthreads); return ok; }
bool ImageBufAlgo::pow (ImageBuf &dst, const ImageBuf &A, const float *b, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &A, IBAprep_CLAMP_MUTUAL_NCHANNELS)) return false; bool ok; OIIO_DISPATCH_COMMON_TYPES2 (ok, "pow", pow_impl, dst.spec().format, A.spec().format, dst, A, b, roi, nthreads); return ok; }
bool ImageBufAlgo::pow (ImageBuf &dst, const ImageBuf &A, float b, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &A, IBAprep_CLAMP_MUTUAL_NCHANNELS)) return false; int nc = A.nchannels(); float *vals = ALLOCA (float, nc); for (int c = 0; c < nc; ++c) vals[c] = b; bool ok; OIIO_DISPATCH_COMMON_TYPES2 (ok, "pow", pow_impl, dst.spec().format, A.spec().format, dst, A, vals, roi, nthreads); return ok; }
bool ImageBufAlgo::div (ImageBuf &dst, const ImageBuf &A, const float *b, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &A, IBAprep_CLAMP_MUTUAL_NCHANNELS)) return false; int nc = dst.nchannels(); float *binv = OIIO_ALLOCA (float, nc); for (int c = 0; c < nc; ++c) binv[c] = (b[c] == 0.0f) ? 1.0f : 1.0f/b[c]; bool ok; OIIO_DISPATCH_COMMON_TYPES2 (ok, "div", mul_impl, dst.spec().format, A.spec().format, dst, A, binv, roi, nthreads); return ok; }
bool ImageBufAlgo::mad (ImageBuf &dst, const ImageBuf &A, const float *B, const float *C, ROI roi, int nthreads) { if (!A.initialized()) { dst.error ("Uninitialized input image"); return false; } if (! IBAprep (roi, &dst, &A)) return false; bool ok; OIIO_DISPATCH_COMMON_TYPES2 (ok, "mad", mad_implf, dst.spec().format, A.spec().format, dst, A, B, C, roi, nthreads); return ok; }
bool ImageBufAlgo::div(ImageBuf& dst, Image_or_Const A_, Image_or_Const B_, ROI roi, int nthreads) { pvt::LoggedTimer logtime("IBA::div"); if (A_.is_img() && B_.is_img()) { const ImageBuf &A(A_.img()), &B(B_.img()); if (!IBAprep(roi, &dst, &A, &B, IBAprep_CLAMP_MUTUAL_NCHANNELS)) return false; bool ok; OIIO_DISPATCH_COMMON_TYPES3(ok, "div", div_impl, dst.spec().format, A.spec().format, B.spec().format, dst, A, B, roi, nthreads); return ok; } if (A_.is_val() && B_.is_img()) // canonicalize to A_img, B_val A_.swap(B_); if (A_.is_img() && B_.is_val()) { const ImageBuf& A(A_.img()); cspan<float> b = B_.val(); if (!IBAprep(roi, &dst, &A, IBAprep_CLAMP_MUTUAL_NCHANNELS | IBAprep_SUPPORT_DEEP)) return false; IBA_FIX_PERCHAN_LEN_DEF(b, dst.nchannels()); int nc = dst.nchannels(); float* binv = OIIO_ALLOCA(float, nc); for (int c = 0; c < nc; ++c) binv[c] = (b[c] == 0.0f) ? 0.0f : 1.0f / b[c]; b = cspan<float>(binv, nc); // re-wrap if (dst.deep()) { // While still serial, set up all the sample counts dst.deepdata()->set_all_samples(A.deepdata()->all_samples()); return mul_impl_deep(dst, A, b, roi, nthreads); } bool ok; OIIO_DISPATCH_COMMON_TYPES2(ok, "div", mul_impl, dst.spec().format, A.spec().format, dst, A, b, roi, nthreads); return ok; } // Remaining cases: error dst.error("ImageBufAlgo::div(): at least one argument must be an image"); return false; }
bool ImageBufAlgo::mad (ImageBuf &dst, const ImageBuf &A, float b, float c, ROI roi, int nthreads) { if (!A.initialized()) { dst.error ("Uninitialized input image"); return false; } if (! IBAprep (roi, &dst, &A)) return false; std::vector<float> B (roi.chend, b); std::vector<float> C (roi.chend, c); bool ok; OIIO_DISPATCH_COMMON_TYPES2 (ok, "mad", mad_implf, dst.spec().format, A.spec().format, dst, A, &B[0], &C[0], roi, nthreads); return ok; }
bool ImageBufAlgo::convolve (ImageBuf &dst, const ImageBuf &src, const ImageBuf &kernel, bool normalize, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &src, IBAprep_REQUIRE_SAME_NCHANNELS)) return false; bool ok; const ImageBuf *K = &kernel; ImageBuf Ktmp; if (kernel.spec().format != TypeDesc::FLOAT) { Ktmp.copy (kernel, TypeDesc::FLOAT); K = &Ktmp; } OIIO_DISPATCH_COMMON_TYPES2 (ok, "convolve", convolve_, dst.spec().format, src.spec().format, dst, src, *K, normalize, roi, nthreads); return ok; }
bool ImageBufAlgo::rotate270 (ImageBuf &dst, const ImageBuf &src, ROI roi, int nthreads) { if (&dst == &src) { // Handle in-place operation ImageBuf tmp; tmp.swap (const_cast<ImageBuf&>(src)); return rotate270 (dst, tmp, roi, nthreads); } pvt::LoggedTimer logtime("IBA::rotate270"); ROI src_roi = roi.defined() ? roi : src.roi(); ROI src_roi_full = src.roi_full(); // Rotated full ROI swaps width and height, and keeps its origin // where the original origin was. ROI dst_roi_full (src_roi_full.xbegin, src_roi_full.xbegin+src_roi_full.height(), src_roi_full.ybegin, src_roi_full.ybegin+src_roi_full.width(), src_roi_full.zbegin, src_roi_full.zend, src_roi_full.chbegin, src_roi_full.chend); ROI dst_roi (src_roi.ybegin, src_roi.yend, src_roi_full.xend-src_roi.xend, src_roi_full.xend-src_roi.xbegin, src_roi.zbegin, src_roi.zend, src_roi.chbegin, src_roi.chend); ASSERT (dst_roi.width() == src_roi.height() && dst_roi.height() == src_roi.width()); bool dst_initialized = dst.initialized(); if (! IBAprep (dst_roi, &dst, &src)) return false; if (! dst_initialized) dst.set_roi_full (dst_roi_full); bool ok; OIIO_DISPATCH_COMMON_TYPES2 (ok, "rotate270", rotate270_, dst.spec().format, src.spec().format, dst, src, dst_roi, nthreads); return ok; }
bool ImageBufAlgo::clamp (ImageBuf &dst, const ImageBuf &src, const float *min, const float *max, bool clampalpha01, ROI roi, int nthreads) { if (! IBAprep (roi, &dst, &src)) return false; std::vector<float> minvec, maxvec; if (! min) { minvec.resize (dst.nchannels(), -std::numeric_limits<float>::max()); min = &minvec[0]; } if (! max) { maxvec.resize (dst.nchannels(), std::numeric_limits<float>::max()); max = &maxvec[0]; } bool ok; OIIO_DISPATCH_COMMON_TYPES2 (ok, "clamp", clamp_, dst.spec().format, src.spec().format, dst, src, min, max, clampalpha01, roi, nthreads); return ok; }
bool ImageBufAlgo::mad (ImageBuf &dst, const ImageBuf &A_, const ImageBuf &B_, const ImageBuf &C_, ROI roi, int nthreads) { const ImageBuf *A = &A_, *B = &B_, *C = &C_; if (!A->initialized() || !B->initialized() || !C->initialized()) { dst.error ("Uninitialized input image"); return false; } // To avoid the full cross-product of dst/A/B/C types, force A,B,C to // all be the same data type, copying if we have to. TypeDesc abc_type = type_merge (A->spec().format, B->spec().format, C->spec().format); ImageBuf Anew, Bnew, Cnew; if (A->spec().format != abc_type) { Anew.copy (*A, abc_type); A = &Anew; } if (B->spec().format != abc_type) { Bnew.copy (*B, abc_type); B = &Bnew; } if (C->spec().format != abc_type) { Cnew.copy (*C, abc_type); C = &Cnew; } ASSERT (A->spec().format == B->spec().format && A->spec().format == C->spec().format); if (! IBAprep (roi, &dst, A, B, C)) return false; bool ok; OIIO_DISPATCH_COMMON_TYPES2 (ok, "mad", mad_impl, dst.spec().format, abc_type, dst, *A, *B, *C, roi, nthreads); return ok; }