static bool fill_corners_ (ImageBuf &dst, const float *topleft, const float *topright, const float *bottomleft, const float *bottomright, ROI origroi, ROI roi=ROI(), int nthreads=1) { if (nthreads != 1 && roi.npixels() >= 1000) { // Lots of pixels and request for multi threads? Parallelize. ImageBufAlgo::parallel_image ( OIIO::bind(fill_corners_<T>, OIIO::ref(dst), topleft, topright, bottomleft, bottomright, origroi, _1 /*roi*/, 1 /*nthreads*/), roi, nthreads); return true; } // Serial case float w = std::max (1, origroi.width() - 1); float h = std::max (1, origroi.height() - 1); for (ImageBuf::Iterator<T> p (dst, roi); !p.done(); ++p) { float u = (p.x() - origroi.xbegin) / w; float v = (p.y() - origroi.ybegin) / h; for (int c = roi.chbegin; c < roi.chend; ++c) p[c] = bilerp (topleft[c], topright[c], bottomleft[c], bottomright[c], u, v); } return true; }
static bool circular_shift_ (ImageBuf &dst, const ImageBuf &src, int xshift, int yshift, int zshift, ROI dstroi, ROI roi, int nthreads) { if (nthreads != 1 && roi.npixels() >= 1000) { // Possible multiple thread case -- recurse via parallel_image ImageBufAlgo::parallel_image ( boost::bind(circular_shift_<DSTTYPE,SRCTYPE>, boost::ref(dst), boost::cref(src), xshift, yshift, zshift, dstroi, _1 /*roi*/, 1 /*nthreads*/), roi, nthreads); return true; } // Serial case int width = dstroi.width(), height = dstroi.height(), depth = dstroi.depth(); ImageBuf::ConstIterator<SRCTYPE,DSTTYPE> s (src, roi); ImageBuf::Iterator<DSTTYPE,DSTTYPE> d (dst); for ( ; ! s.done(); ++s) { int dx = s.x() + xshift; OIIO::wrap_periodic (dx, dstroi.xbegin, width); int dy = s.y() + yshift; OIIO::wrap_periodic (dy, dstroi.ybegin, height); int dz = s.z() + zshift; OIIO::wrap_periodic (dz, dstroi.zbegin, depth); d.pos (dx, dy, dz); if (! d.exists()) continue; for (int c = roi.chbegin; c < roi.chend; ++c) d[c] = s[c]; } return true; }
void GetROIBounds(const ROI& roi, vector<Vec3F>& bounds) { bounds.push_back(Vec3F(1.0, 0.0, -roi.Left())); // left bounds.push_back(Vec3F(0.0, 1.0, -roi.Top())); // top bounds.push_back(Vec3F(-1.0, 0.0, roi.Right()-1)); // right bounds.push_back(Vec3F(0.0, -1.0, roi.Bottom()-1)); // bottom }
static bool compare_ (const ImageBuf &A, const ImageBuf &B, float failthresh, float warnthresh, ImageBufAlgo::CompareResults &result, ROI roi, int nthreads) { imagesize_t npels = roi.npixels(); imagesize_t nvals = npels * roi.nchannels(); int Achannels = A.nchannels(), Bchannels = B.nchannels(); // Compare the two images. // double totalerror = 0; double totalsqrerror = 0; result.maxerror = 0; result.maxx=0, result.maxy=0, result.maxz=0, result.maxc=0; result.nfail = 0, result.nwarn = 0; float maxval = 1.0; // max possible value ImageBuf::ConstIterator<Atype> a (A, roi, ImageBuf::WrapBlack); ImageBuf::ConstIterator<Btype> b (B, roi, ImageBuf::WrapBlack); bool deep = A.deep(); // Break up into batches to reduce cancelation errors as the error // sums become too much larger than the error for individual pixels. const int batchsize = 4096; // As good a guess as any for ( ; ! a.done(); ) { double batcherror = 0; double batch_sqrerror = 0; if (deep) { for (int i = 0; i < batchsize && !a.done(); ++i, ++a, ++b) { bool warned = false, failed = false; // For this pixel for (int c = roi.chbegin; c < roi.chend; ++c) for (int s = 0, e = a.deep_samples(); s < e; ++s) { compare_value (a, c, a.deep_value(c,s), b.deep_value(c,s), result, maxval, batcherror, batch_sqrerror, failed, warned, failthresh, warnthresh); } } } else { // non-deep for (int i = 0; i < batchsize && !a.done(); ++i, ++a, ++b) { bool warned = false, failed = false; // For this pixel for (int c = roi.chbegin; c < roi.chend; ++c) compare_value (a, c, c < Achannels ? a[c] : 0.0f, c < Bchannels ? b[c] : 0.0f, result, maxval, batcherror, batch_sqrerror, failed, warned, failthresh, warnthresh); } } totalerror += batcherror; totalsqrerror += batch_sqrerror; } result.meanerror = totalerror / nvals; result.rms_error = sqrt (totalsqrerror / nvals); result.PSNR = 20.0 * log10 (maxval / result.rms_error); return result.nfail == 0; }
void set_roi_full (ImageSpec &spec, const ROI &newroi) { spec.full_x = newroi.xbegin; spec.full_y = newroi.ybegin; spec.full_z = newroi.zbegin; spec.full_width = newroi.width(); spec.full_height = newroi.height(); spec.full_depth = newroi.depth(); }
bool ImageBufAlgo::rotate180 (ImageBuf &dst, const ImageBuf &src, ROI roi, int nthreads) { if (&dst == &src) { // Handle in-place operation ImageBuf tmp; tmp.swap (const_cast<ImageBuf&>(src)); return rotate180 (dst, tmp, roi, nthreads); } ROI src_roi = roi.defined() ? roi : src.roi(); ROI src_roi_full = src.roi_full(); int xoffset = src_roi.xbegin - src_roi_full.xbegin; int xstart = src_roi_full.xend - xoffset - src_roi.width(); int yoffset = src_roi.ybegin - src_roi_full.ybegin; int ystart = src_roi_full.yend - yoffset - src_roi.height(); ROI dst_roi (xstart, xstart+src_roi.width(), ystart, ystart+src_roi.height(), src_roi.zbegin, src_roi.zend, src_roi.chbegin, src_roi.chend); ASSERT (dst_roi.width() == src_roi.width() && dst_roi.height() == src_roi.height()); // Compute the destination ROI, it's the source ROI reflected across // the midline of the display window. IBAprep (dst_roi, &dst, &src); bool ok; OIIO_DISPATCH_TYPES2 (ok, "rotate180", rotate180_, dst.spec().format, src.spec().format, dst, src, dst_roi, nthreads); return ok; }
bool ImageBufAlgo::isMonochrome (const ImageBuf &src, ROI roi, int nthreads) { // If no ROI is defined, use the data window of src. if (! roi.defined()) roi = get_roi(src.spec()); roi.chend = std::min (roi.chend, src.nchannels()); if (roi.nchannels() < 2) return true; // 1 or fewer channels are always "monochrome" OIIO_DISPATCH_TYPES ("isMonochrome", isMonochrome_, src.spec().format, src, roi, nthreads); // FIXME - The nthreads argument is for symmetry with the rest of // ImageBufAlgo and for future expansion. But for right now, we // don't actually split by threads. Maybe later. };
static bool transpose_ (ImageBuf &dst, const ImageBuf &src, ROI roi, int nthreads) { if (nthreads != 1 && roi.npixels() >= 1000) { // Possible multiple thread case -- recurse via parallel_image ImageBufAlgo::parallel_image ( boost::bind(transpose_<DSTTYPE,SRCTYPE>, boost::ref(dst), boost::cref(src), _1 /*roi*/, 1 /*nthreads*/), roi, nthreads); return true; } // Serial case ImageBuf::ConstIterator<SRCTYPE,DSTTYPE> s (src, roi); ImageBuf::Iterator<DSTTYPE,DSTTYPE> d (dst); for ( ; ! s.done(); ++s) { d.pos (s.y(), s.x(), s.z()); if (! d.exists()) continue; for (int c = roi.chbegin; c < roi.chend; ++c) d[c] = s[c]; } return true; }
/// Fix all non-finite pixels (nan/inf) using the specified approach bool ImageBufAlgo::fixNonFinite (ImageBuf &src, NonFiniteFixMode mode, int *pixelsFixed, ROI roi, int nthreads) { // If no ROI is defined, use the data window of src. if (! roi.defined()) roi = get_roi(src.spec()); roi.chend = std::min (roi.chend, src.nchannels()); // Initialize if (pixelsFixed) *pixelsFixed = 0; switch (src.spec().format.basetype) { case TypeDesc::FLOAT : return fixNonFinite_<float> (src, mode, pixelsFixed, roi, nthreads); case TypeDesc::HALF : return fixNonFinite_<half> (src, mode, pixelsFixed, roi, nthreads); case TypeDesc::DOUBLE: return fixNonFinite_<double> (src, mode, pixelsFixed, roi, nthreads); default: // All other format types aren't capable of having nonfinite // pixel values. return true; } }
bool ImageBufAlgo::flip(ImageBuf &dst, const ImageBuf &src, ROI roi, int nthreads) { if (&dst == &src) { // Handle in-place operation ImageBuf tmp; tmp.swap (const_cast<ImageBuf&>(src)); return flip (dst, tmp, roi, nthreads); } pvt::LoggedTimer logtime("IBA::flip"); ROI src_roi = roi.defined() ? roi : src.roi(); ROI src_roi_full = src.roi_full(); int offset = src_roi.ybegin - src_roi_full.ybegin; int start = src_roi_full.yend - offset - src_roi.height(); ROI dst_roi (src_roi.xbegin, src_roi.xend, start, start+src_roi.height(), src_roi.zbegin, src_roi.zend, src_roi.chbegin, src_roi.chend); ASSERT (dst_roi.width() == src_roi.width() && dst_roi.height() == src_roi.height()); // Compute the destination ROI, it's the source ROI reflected across // the midline of the display window. if (! IBAprep (dst_roi, &dst, &src)) return false; bool ok; OIIO_DISPATCH_COMMON_TYPES2 (ok, "flip", flip_, dst.spec().format, src.spec().format, dst, src, dst_roi, nthreads); return ok; }
static bool noise_salt_ (ImageBuf &dst, float saltval, float saltportion, bool mono, int seed, ROI roi, int nthreads) { if (nthreads != 1 && roi.npixels() >= 1000) { // Lots of pixels and request for multi threads? Parallelize. ImageBufAlgo::parallel_image ( OIIO::bind(noise_salt_<T>, OIIO::ref(dst), saltval, saltportion, mono, seed, _1 /*roi*/, 1 /*nthreads*/), roi, nthreads); return true; } // Serial case for (ImageBuf::Iterator<T> p (dst, roi); !p.done(); ++p) { int x = p.x(), y = p.y(), z = p.z(); float n = 0.0; for (int c = roi.chbegin; c < roi.chend; ++c) { if (c == roi.chbegin || !mono) n = hashrand (x, y, z, c, seed); if (n < saltportion) p[c] = saltval; } } return true; }
static bool render_box_ (ImageBuf &dst, array_view<const float> color, ROI roi=ROI(), int nthreads=1) { if (nthreads != 1 && roi.npixels() >= 1000) { // Lots of pixels and request for multi threads? Parallelize. ImageBufAlgo::parallel_image ( OIIO::bind(render_box_<T>, OIIO::ref(dst), color, _1 /*roi*/, 1 /*nthreads*/), roi, nthreads); return true; } // Serial case float alpha = 1.0f; if (dst.spec().alpha_channel >= 0 && dst.spec().alpha_channel < int(color.size())) alpha = color[dst.spec().alpha_channel]; else if (int(color.size()) == roi.chend+1) alpha = color[roi.chend]; if (alpha == 1.0f) { for (ImageBuf::Iterator<T> r (dst, roi); !r.done(); ++r) for (int c = roi.chbegin; c < roi.chend; ++c) r[c] = color[c]; } else { for (ImageBuf::Iterator<T> r (dst, roi); !r.done(); ++r) for (int c = roi.chbegin; c < roi.chend; ++c) r[c] = color[c] + r[c] * (1.0f-alpha); // "over" } return true; }
bool ImageBufAlgo::transpose (ImageBuf &dst, const ImageBuf &src, ROI roi, int nthreads) { pvt::LoggedTimer logtime("IBA::transpose"); if (! roi.defined()) roi = get_roi (src.spec()); roi.chend = std::min (roi.chend, src.nchannels()); ROI dst_roi (roi.ybegin, roi.yend, roi.xbegin, roi.xend, roi.zbegin, roi.zend, roi.chbegin, roi.chend); bool dst_initialized = dst.initialized(); if (! IBAprep (dst_roi, &dst)) return false; if (! dst_initialized) { ROI r = src.roi_full(); ROI dst_roi_full (r.ybegin, r.yend, r.xbegin, r.xend, r.zbegin, r.zend, r.chbegin, r.chend); dst.set_roi_full (dst_roi_full); } bool ok; if (dst.spec().format == src.spec().format) { OIIO_DISPATCH_TYPES (ok, "transpose", transpose_, dst.spec().format, dst, src, roi, nthreads); } else { OIIO_DISPATCH_COMMON_TYPES2 (ok, "transpose", transpose_, dst.spec().format, src.spec().format, dst, src, roi, nthreads); } return ok; }
static inline bool isConstantColor_ (const ImageBuf &src, float *color, ROI roi, int nthreads) { // Iterate using the native typing (for speed). std::vector<T> constval (roi.nchannels()); ImageBuf::ConstIterator<T,T> s (src, roi); for (int c = roi.chbegin; c < roi.chend; ++c) constval[c] = s[c]; // Loop over all pixels ... for ( ; ! s.done(); ++s) { for (int c = roi.chbegin; c < roi.chend; ++c) if (constval[c] != s[c]) return false; } if (color) { ImageBuf::ConstIterator<T,float> s (src, roi); for (int c = 0; c < roi.chbegin; ++c) color[c] = 0.0f; for (int c = roi.chbegin; c < roi.chend; ++c) color[c] = s[c]; for (int c = roi.chend; c < src.nchannels(); ++c) color[c] = 0.0f; } return true; }
static bool div_impl (ImageBuf &R, const ImageBuf &A, const ImageBuf &B, ROI roi, int nthreads) { if (nthreads != 1 && roi.npixels() >= 1000) { // Possible multiple thread case -- recurse via parallel_image ImageBufAlgo::parallel_image ( boost::bind(div_impl<Rtype,Atype,Btype>, boost::ref(R), boost::cref(A), boost::cref(B), _1 /*roi*/, 1 /*nthreads*/), roi, nthreads); return true; } // Serial case ImageBuf::Iterator<Rtype> r (R, roi); ImageBuf::ConstIterator<Atype> a (A, roi); ImageBuf::ConstIterator<Btype> b (B, roi); for ( ; !r.done(); ++r, ++a, ++b) for (int c = roi.chbegin; c < roi.chend; ++c) { float v = b[c]; r[c] = (v == 0.0f) ? 0.0f : (a[c] / v); } return true; }
static bool noise_gaussian_ (ImageBuf &dst, float mean, float stddev, bool mono, int seed, ROI roi, int nthreads) { if (nthreads != 1 && roi.npixels() >= 1000) { // Lots of pixels and request for multi threads? Parallelize. ImageBufAlgo::parallel_image ( OIIO::bind(noise_gaussian_<T>, OIIO::ref(dst), mean, stddev, mono, seed, _1 /*roi*/, 1 /*nthreads*/), roi, nthreads); return true; } // Serial case for (ImageBuf::Iterator<T> p (dst, roi); !p.done(); ++p) { int x = p.x(), y = p.y(), z = p.z(); float n = 0.0; for (int c = roi.chbegin; c < roi.chend; ++c) { if (c == roi.chbegin || !mono) n = mean + stddev * hashnormal (x, y, z, c, seed); p[c] = p[c] + n; } } return true; }
static bool checker_ (ImageBuf &dst, Dim3 size, const float *color1, const float *color2, Dim3 offset, ROI roi, int nthreads=1) { if (nthreads != 1 && roi.npixels() >= 1000) { // Lots of pixels and request for multi threads? Parallelize. ImageBufAlgo::parallel_image ( OIIO::bind(checker_<T>, OIIO::ref(dst), size, color1, color2, offset, _1 /*roi*/, 1 /*nthreads*/), roi, nthreads); return true; } // Serial case for (ImageBuf::Iterator<T> p (dst, roi); !p.done(); ++p) { int xtile = (p.x()-offset.x)/size.x; xtile += (p.x()<offset.x); int ytile = (p.y()-offset.y)/size.y; ytile += (p.y()<offset.y); int ztile = (p.z()-offset.z)/size.z; ztile += (p.z()<offset.z); int v = xtile + ytile + ztile; if (v & 1) for (int c = roi.chbegin; c < roi.chend; ++c) p[c] = color2[c]; else for (int c = roi.chbegin; c < roi.chend; ++c) p[c] = color1[c]; } return true; }
static bool clamp_ (ImageBuf &dst, const float *min, const float *max, bool clampalpha01, ROI roi, int nthreads) { if (nthreads != 1 && roi.npixels() >= 1000) { // Lots of pixels and request for multi threads? Parallelize. ImageBufAlgo::parallel_image ( boost::bind(clamp_<D>, boost::ref(dst), min, max, clampalpha01, _1 /*roi*/, 1 /*nthreads*/), roi, nthreads); return true; } // Serial case for (ImageBuf::Iterator<D> d (dst, roi); ! d.done(); ++d) { for (int c = roi.chbegin; c < roi.chend; ++c) d[c] = OIIO::clamp<float> (d[c], min[c], max[c]); } int a = dst.spec().alpha_channel; if (clampalpha01 && a >= roi.chbegin && a < roi.chend) { for (ImageBuf::Iterator<D> d (dst, roi); ! d.done(); ++d) { d[a] = OIIO::clamp<float> (d[a], 0.0f, 1.0f); } } return true; }
bool ImageBufAlgo::compare (const ImageBuf &A, const ImageBuf &B, float failthresh, float warnthresh, ImageBufAlgo::CompareResults &result, ROI roi, int nthreads) { // If no ROI is defined, use the union of the data windows of the two // images. if (! roi.defined()) roi = roi_union (get_roi(A.spec()), get_roi(B.spec())); roi.chend = std::min (roi.chend, std::max(A.nchannels(), B.nchannels())); // Deep and non-deep images cannot be compared if (B.deep() != A.deep()) return false; bool ok; OIIO_DISPATCH_TYPES2 (ok, "compare", compare_, A.spec().format, B.spec().format, A, B, failthresh, warnthresh, result, roi, nthreads); // FIXME - The nthreads argument is for symmetry with the rest of // ImageBufAlgo and for future expansion. But for right now, we // don't actually split by threads. Maybe later. return ok; }
bool ImageBufAlgo::isConstantColor (const ImageBuf &src, float *color, ROI roi, int nthreads) { // If no ROI is defined, use the data window of src. if (! roi.defined()) roi = get_roi(src.spec()); roi.chend = std::min (roi.chend, src.nchannels()); if (roi.nchannels() == 0) return true; OIIO_DISPATCH_TYPES ("isConstantColor", isConstantColor_, src.spec().format, src, color, roi, nthreads); // FIXME - The nthreads argument is for symmetry with the rest of // ImageBufAlgo and for future expansion. But for right now, we // don't actually split by threads. Maybe later. };
static bool rangeexpand_ (ImageBuf &R, bool useluma, ROI roi, int nthreads) { if (nthreads != 1 && roi.npixels() >= 1000) { // Possible multiple thread case -- recurse via parallel_image ImageBufAlgo::parallel_image ( boost::bind(rangeexpand_<Rtype>, boost::ref(R), useluma, _1 /*roi*/, 1 /*nthreads*/), roi, nthreads); return true; } const ImageSpec &Rspec (R.spec()); int alpha_channel = Rspec.alpha_channel; int z_channel = Rspec.z_channel; if (roi.nchannels() < 3 || (alpha_channel >= roi.chbegin && alpha_channel < roi.chbegin+3) || (z_channel >= roi.chbegin && z_channel < roi.chbegin+3)) { useluma = false; // No way to use luma } ImageBuf::Iterator<Rtype> r (R, roi); for (ImageBuf::Iterator<Rtype> r (R, roi); !r.done(); ++r) { if (useluma) { float luma = 0.21264f * r[roi.chbegin] + 0.71517f * r[roi.chbegin+1] + 0.07219f * r[roi.chbegin+2]; if (fabsf(luma) <= 1.0f) continue; // Not HDR, no range compression needed float scale = rangeexpand (luma) / luma; for (int c = roi.chbegin; c < roi.chend; ++c) { if (c == alpha_channel || c == z_channel) continue; r[c] = r[c] * scale; } } else { for (int c = roi.chbegin; c < roi.chend; ++c) { if (c == alpha_channel || c == z_channel) continue; r[c] = rangeexpand (r[c]); } } } return true; }
bool ImageBufAlgo::paste (ImageBuf &dst, int xbegin, int ybegin, int zbegin, int chbegin, const ImageBuf &src, ROI srcroi, int nthreads) { if (! srcroi.defined()) srcroi = get_roi(src.spec()); ROI dstroi (xbegin, xbegin+srcroi.width(), ybegin, ybegin+srcroi.height(), zbegin, zbegin+srcroi.depth(), chbegin, chbegin+srcroi.nchannels()); ROI dstroi_save = dstroi; // save the original IBAprep (dstroi, &dst); // do the actual copying OIIO_DISPATCH_TYPES2 ("paste", paste_, dst.spec().format, src.spec().format, dst, dstroi_save, src, srcroi, nthreads); return false; }
bool ImageBufAlgo::transpose (ImageBuf &dst, const ImageBuf &src, ROI roi, int nthreads) { if (! roi.defined()) roi = get_roi (src.spec()); roi.chend = std::min (roi.chend, src.nchannels()); ROI dst_roi (roi.ybegin, roi.yend, roi.xbegin, roi.xend, roi.zbegin, roi.zend, roi.chbegin, roi.chend); IBAprep (dst_roi, &dst); OIIO_DISPATCH_TYPES2 ("transpose", transpose_, dst.spec().format, src.spec().format, dst, src, roi, nthreads); return false; }
bool ImageBufAlgo::color_count (const ImageBuf &src, imagesize_t *count, int ncolors, const float *color, const float *eps, ROI roi, int nthreads) { // If no ROI is defined, use the data window of src. if (! roi.defined()) roi = get_roi(src.spec()); roi.chend = std::min (roi.chend, src.nchannels()); if (! eps) { float *localeps = ALLOCA (float, roi.chend); for (int c = 0; c < roi.chend; ++c) localeps[c] = 0.001f; eps = localeps; }
/*-----------------------------------------------------------------------**/ void add::addGrey(image &src, image &tgt, ROI roi, int value){ tgt.resize(src.getNumberOfRows(), src.getNumberOfColumns()); for (int i=0; i<src.getNumberOfRows(); i++){ for (int j=0; j<src.getNumberOfColumns(); j++){ if (roi.InROI(i,j)){ tgt.setPixel(i,j,src.getPixel(i,j) + value); //check for values outside range if (tgt.getPixel(i,j) > 255) tgt.setPixel(i,j,255); else if (tgt.getPixel(i,j) < 0) tgt.setPixel(i,j,0); }else{ tgt.setPixel(i,j,src.getPixel(i,j)); } } } }
static bool convolve_ (ImageBuf &dst, const ImageBuf &src, const ImageBuf &kernel, bool normalize, ROI roi, int nthreads) { if (nthreads != 1 && roi.npixels() >= 1000) { // Lots of pixels and request for multi threads? Parallelize. ImageBufAlgo::parallel_image ( boost::bind(convolve_<DSTTYPE,SRCTYPE>, boost::ref(dst), boost::cref(src), boost::cref(kernel), normalize, _1 /*roi*/, 1 /*nthreads*/), roi, nthreads); return true; } // Serial case float scale = 1.0f; if (normalize) { scale = 0.0f; for (ImageBuf::ConstIterator<float> k (kernel); ! k.done(); ++k) scale += k[0]; scale = 1.0f / scale; } float *sum = ALLOCA (float, roi.chend); ROI kroi = get_roi (kernel.spec()); ImageBuf::Iterator<DSTTYPE> d (dst, roi); ImageBuf::ConstIterator<SRCTYPE> s (src, roi, ImageBuf::WrapClamp); for ( ; ! d.done(); ++d) { for (int c = roi.chbegin; c < roi.chend; ++c) sum[c] = 0.0f; for (ImageBuf::ConstIterator<float> k (kernel, kroi); !k.done(); ++k) { float kval = k[0]; s.pos (d.x() + k.x(), d.y() + k.y(), d.z() + k.z()); for (int c = roi.chbegin; c < roi.chend; ++c) sum[c] += kval * s[c]; } for (int c = roi.chbegin; c < roi.chend; ++c) d[c] = scale * sum[c]; } return true; }
bool ImageBufAlgo::isConstantChannel (const ImageBuf &src, int channel, float val, ROI roi, int nthreads) { // If no ROI is defined, use the data window of src. if (! roi.defined()) roi = get_roi(src.spec()); if (channel < 0 || channel >= src.nchannels()) return false; // that channel doesn't exist in the image OIIO_DISPATCH_TYPES ("isConstantChannel", isConstantChannel_, src.spec().format, src, channel, val, roi, nthreads); // FIXME - The nthreads argument is for symmetry with the rest of // ImageBufAlgo and for future expansion. But for right now, we // don't actually split by threads. Maybe later. };
bool ImageBufAlgo::computePixelStats (PixelStats &stats, const ImageBuf &src, ROI roi, int nthreads) { if (! roi.defined()) roi = get_roi (src.spec()); else roi.chend = std::min (roi.chend, src.nchannels()); int nchannels = src.spec().nchannels; if (nchannels == 0) { src.error ("%d-channel images not supported", nchannels); return false; } OIIO_DISPATCH_TYPES ("computePixelStats", computePixelStats_, src.spec().format, src, stats, roi, nthreads); return false; }
static bool mul_impl (ImageBuf &R, const float *val, ROI roi, int nthreads) { if (nthreads != 1 && roi.npixels() >= 1000) { // Possible multiple thread case -- recurse via parallel_image ImageBufAlgo::parallel_image ( boost::bind(mul_impl<Rtype>, boost::ref(R), val, _1 /*roi*/, 1 /*nthreads*/), roi, nthreads); return true; } ImageBuf::Iterator<Rtype> r (R, roi); for (ImageBuf::Iterator<Rtype> r (R, roi); !r.done(); ++r) for (int c = roi.chbegin; c < roi.chend; ++c) r[c] = r[c] * val[c]; return true; }
static bool fill_const_ (ImageBuf &dst, const float *values, ROI roi=ROI(), int nthreads=1) { if (nthreads != 1 && roi.npixels() >= 1000) { // Lots of pixels and request for multi threads? Parallelize. ImageBufAlgo::parallel_image ( OIIO::bind(fill_const_<T>, OIIO::ref(dst), values, _1 /*roi*/, 1 /*nthreads*/), roi, nthreads); return true; } // Serial case for (ImageBuf::Iterator<T> p (dst, roi); !p.done(); ++p) for (int c = roi.chbegin; c < roi.chend; ++c) p[c] = values[c]; return true; }