static void SuperPixelThreaded( Image& target, const GenericImage<P>& source, const DebayerInstance& instance ) { int target_w = source.Width() >> 1; int target_h = source.Height() >> 1; target.AllocateData( target_w, target_h, 3, ColorSpace::RGB ); target.Status().Initialize( "SuperPixel debayering", target_h ); int numberOfThreads = Thread::NumberOfThreads( target_h, 1 ); int rowsPerThread = target_h/numberOfThreads; AbstractImage::ThreadData data( target, target_h ); ReferenceArray<SuperPixelThread<P> > threads; for ( int i = 0, j = 1; i < numberOfThreads; ++i, ++j ) threads.Add( new SuperPixelThread<P>( data, target, source, instance, i*rowsPerThread, (j < numberOfThreads) ? j*rowsPerThread : target_h ) ); AbstractImage::RunThreads( threads, data ); threads.Destroy(); target.Status() = data.status; }
template <class P1, class P2> static void ApplyFilter_2( GenericImage<P1>& image, const GenericImage<P2>& sharp, float amount, float threshold, float deringing, float rangeLow, float rangeHigh, pcl_bool disableExtension, int c, pcl_bool highPass ) { float rangeWidth = 1 + rangeHigh + rangeLow; bool isRange = rangeWidth + 1 != 1; StandardStatus callback; StatusMonitor monitor; monitor.SetCallback( &callback ); monitor.Initialize( "<end><cbr>Larson-Sekanina filter", image.NumberOfPixels() ); for ( int x = 0; x < image.Width(); ++x ) for ( int y = 0; y < image.Height(); ++y, ++monitor ) { double f1, f2; P1::FromSample( f1, image.Pixel( x, y, c ) ); P2::FromSample( f2, sharp.Pixel( x, y ) ); Apply_PixelValues( f1, f2, threshold, deringing, amount, highPass ); if ( disableExtension ) image.Pixel( x, y, c ) = P1::ToSample( f1 ); else { if ( isRange ) f1 = (f1 + rangeLow)/rangeWidth; image.Pixel( x, y, c ) = P1::ToSample( pcl::Range( f1, 0.0, 1.0 ) ); } } if ( disableExtension ) Console().WarningLn( "<end><cbr>*** Warning: Dynamic range extension has been disabled - check pixel values!" ); }
template <class P> static void Rotate180( GenericImage<P>& image ) { size_type N = image.NumberOfPixels(); int n = image.NumberOfChannels(); if ( image.Status().IsInitializationEnabled() ) image.Status().Initialize( "Rotate 180 degrees", n*N ); for ( int c = 0; c < n; ++c, image.Status() += N ) for ( int y0 = 0, y1 = image.Height()-1; y0 <= y1; ++y0, --y1 ) { typename P::sample* f0 = image.ScanLine( y0, c ); typename P::sample* f1 = image.ScanLine( y1, c ); if ( y0 != y1 ) { int x0 = 0, x1 = image.Width()-1; while ( x0 < x1 ) { pcl::Swap( f0[x0], f1[x1] ); pcl::Swap( f0[x1], f1[x0] ); ++x0; --x1; } if ( x0 == x1 ) pcl::Swap( f0[x0], f1[x0] ); } else for ( typename P::sample* f = f0, * g = f0+image.Width()-1; f < g; ) pcl::Swap( *f++, *g-- ); } }
static void VNGThreaded( Image& target, const GenericImage<P>& source, const DebayerInstance& instance ) { int target_w = source.Width(); int target_h = source.Height(); target.AllocateData( target_w, target_h, 3, ColorSpace::RGB ); target.Status().Initialize( "VNG debayering", target_h-4 ); int numberOfThreads = Thread::NumberOfThreads( target_h-4, 1 ); int rowsPerThread = (target_h - 4)/numberOfThreads; AbstractImage::ThreadData data( target, target_h-4 ); ReferenceArray<VNGThread<P> > threads; for ( int i = 0, j = 1; i < numberOfThreads; ++i, ++j ) threads.Add( new VNGThread<P>( data, target, source, instance, i*rowsPerThread + 2, (j < numberOfThreads) ? j*rowsPerThread + 2 : target_h-2 ) ); AbstractImage::RunThreads( threads, data ); threads.Destroy(); // copy top and bottom two rows from the adjecent ones for ( int col = 0; col < target_w; col++ ) for ( int i = 0; i < 3; i++ ) { target.Pixel( col, 0, i ) = target.Pixel( col, 1, i ) = target.Pixel( col, 2, i ); target.Pixel( col, target_h-1, i ) = target.Pixel( col, target_h-2, i ) = target.Pixel( col, target_h-3, i ); } target.Status() = data.status; }
static void Apply( GenericImage<P>& image, const LocalHistogramEqualizationInstance& instance ) { if ( image.IsColor() ) { Image L; image.GetLightness( L ); L.Status() = image.Status(); Apply( L, instance ); image.Status() = L.Status(); image.SetLightness( L ); return; } // create copy of the luminance to evaluate histogram from GenericImage<P> imageCopy( image ); imageCopy.EnsureUnique(); // really not necessary, but we'll be safer if this is done size_type N = image.NumberOfPixels(); int numberOfThreads = Thread::NumberOfThreads( image.Height(), 1 ); int rowsPerThread = image.Height()/numberOfThreads; image.Status().Initialize( "CLAHE", N ); AbstractImage::ThreadData data( image, N ); // create processing threads ReferenceArray<LocalHistogramEqualizationThread<P> > threads; for ( int i = 0, j = 1; i < numberOfThreads; ++i, ++j ) threads.Add( new LocalHistogramEqualizationThread<P>( data, instance, image, imageCopy, i*rowsPerThread, (j < numberOfThreads) ? j*rowsPerThread : image.Height() ) ); AbstractImage::RunThreads( threads, data ); threads.Destroy(); image.Status() = data.status; }
static void ApplyInPlaceFourierTransform( GenericImage<P>& image, FFTDirection::value_type dir, bool parallel, int maxProcessors ) { int w = FFTC::OptimizedLength( image.Width() ); int h = FFTC::OptimizedLength( image.Height() ); if ( w != image.Width() || h != image.Height() ) { StatusCallback* s = image.GetStatusCallback(); // don't update status here image.SetStatusCallback( 0 ); image.ShiftToCenter( w, h ); image.SetStatusCallback( s ); } bool statusInitialized = false; if ( image.Status().IsInitializationEnabled() ) { image.Status().Initialize( (dir == FFTDirection::Backward) ? "Inverse FFT" : "FFT", image.NumberOfSelectedChannels()*size_type( w + h ) ); image.Status().DisableInitialization(); statusInitialized = true; } try { FFTC F( h, w, image.Status() ); F.EnableParallelProcessing( parallel, maxProcessors ); for ( int c = image.FirstSelectedChannel(); c <= image.LastSelectedChannel(); ++c ) F( image[c], image[c], (dir == FFTDirection::Backward) ? PCL_FFT_BACKWARD : PCL_FFT_FORWARD ); if ( statusInitialized ) image.Status().EnableInitialization(); } catch ( ... ) { if ( statusInitialized ) image.Status().EnableInitialization(); throw; } }
template <class P> static void Rotate90CW( GenericImage<P>& image ) { image.SetUnique(); int w = image.Width(); int h = image.Height(); int h1 = h - 1; int n = image.NumberOfChannels(); size_type N = image.NumberOfPixels(); typename GenericImage<P>::color_space cs0 = image.ColorSpace(); StatusMonitor status = image.Status(); typename P::sample** f0 = 0; try { if ( image.Status().IsInitializationEnabled() ) status.Initialize( "Rotate 90 degrees, clockwise", n*N ); f0 = image.ReleaseData(); typename GenericImage<P>::sample_array tmp( N ); for ( int c = 0; c < n; ++c, status += N ) { typename P::sample* f = f0[c]; typename P::sample* t = tmp.Begin(); ::memcpy( t, f, N*P::BytesPerSample() ); for ( int y = 0; y < h; ++y ) for ( int x = 0, h1y = h1-y; x < w; ++x, ++t ) f[x*h + h1y] = *t; } image.ImportData( f0, h, w, n, cs0 ).Status() = status; } catch ( ... ) { if ( f0 != 0 ) { for ( int c = 0; c < n; ++c ) if ( f0[c] != 0 ) image.Allocator().Deallocate( f0[c] ); image.Allocator().Deallocate( f0 ); image.FreeData(); } throw; } }
static void Apply( GenericImage<P>& image, const PhotometricSuperflatInstance& instance ) { PolynomialSurface* S; if ( !File::Exists( instance.starDatabasePath ) ) throw Error( "No such file: " + instance.starDatabasePath ); S = new PolynomialSurface( instance.starDatabasePath, image.Width(), image.Height() ); //S->PrintCatalog(); S->PrintCatalogSummary(); S->PlotXYKeyedToRelativeFlux(false); String eqn = S->ComputeBestFitModel(instance.fitDegree); S->PlotXYKeyedToRelativeFlux(true); S->ShowBestFitModelImage(); delete(S); };
template <class P> static void HorizontalMirror( GenericImage<P>& image ) { size_type N = image.NumberOfPixels(); int n = image.NumberOfChannels(); if ( image.Status().IsInitializationEnabled() ) image.Status().Initialize( "Horizontal mirror", n*N ); for ( int c = 0; c < n; ++c, image.Status() += N ) for ( int y = 0; y < image.Height(); ++y ) for ( typename P::sample* f = image.ScanLine( y, c ), * g = f + image.Width()-1; f < g; ) { pcl::Swap( *f++, *g-- ); } }
template <class P> static void VerticalMirror( GenericImage<P>& image ) { size_type N = image.NumberOfPixels(); int n = image.NumberOfChannels(); if ( image.Status().IsInitializationEnabled() ) image.Status().Initialize( "Vertical mirror", n*N ); for ( int c = 0; c < n; ++c, image.Status() += N ) for ( int y0 = 0, y1 = image.Height()-1; y0 < y1; ++y0, --y1 ) for ( typename P::sample* f0 = image.ScanLine( y0, c ), * f1 = image.ScanLine( y1, c ), * fw = f0 + image.Width(); f0 < fw; ) { pcl::Swap( *f0++, *f1++ ); } }
static void ApplyInverseRealFourierTransform_2( GenericImage<P>& image, const GenericImage<P1>& dft, bool parallel, int maxProcessors ) { if ( dft.IsEmpty() ) { image.FreeData(); return; } int w = dft.Width(); int h = dft.Height(); image.AllocateData( 2*(w - 1), h, dft.NumberOfChannels(), dft.ColorSpace() ); bool statusInitialized = false; if ( image.Status().IsInitializationEnabled() ) { image.Status().Initialize( "Inverse FFT", image.NumberOfChannels()*size_type( w + h ) ); image.Status().DisableInitialization(); statusInitialized = true; } try { FFTR F( h, w, image.Status() ); F.EnableParallelProcessing( parallel, maxProcessors ); for ( int c = 0; c < image.NumberOfChannels(); ++c ) F( image[c], dft[c] ); if ( statusInitialized ) image.Status().EnableInitialization(); } catch ( ... ) { if ( statusInitialized ) image.Status().EnableInitialization(); throw; } }
template <class P> static void Apply( GenericImage<P>& image, const MorphologicalTransformation& transformation ) { if ( image.IsEmptySelection() ) return; image.EnsureUnique(); int n = transformation.OverlappingDistance(); if ( n > image.Height() || n > image.Width() ) { image.Zero(); return; } /* * Dilation requires a reflected structure. We'll unreflect it once the * transformation has finished. */ bool didReflect = false; if ( transformation.Operator().IsDilation() != transformation.Structure().IsReflected() ) { const_cast<StructuringElement&>( transformation.Structure() ).Reflect(); didReflect = true; } int numberOfRows = image.SelectedRectangle().Height(); int numberOfThreads = transformation.IsParallelProcessingEnabled() ? Min( transformation.MaxProcessors(), pcl::Thread::NumberOfThreads( numberOfRows, n ) ) : 1; int rowsPerThread = numberOfRows/numberOfThreads; size_type N = image.NumberOfSelectedSamples(); if ( image.Status().IsInitializationEnabled() ) image.Status().Initialize( "Morphological transformation, " + transformation.Operator().Description(), N ); ThreadData<P> data( image, transformation, N ); ReferenceArray<Thread<P> > threads; for ( int i = 0, j = 1, y0 = image.SelectedRectangle().y0; i < numberOfThreads; ++i, ++j ) threads.Add( new Thread<P>( data, y0 + i*rowsPerThread, y0 + ((j < numberOfThreads) ? j*rowsPerThread : numberOfRows), i > 0, j < numberOfThreads ) ); try { AbstractImage::RunThreads( threads, data ); if ( didReflect ) const_cast<StructuringElement&>( transformation.Structure() ).Reflect(); } catch ( ... ) { if ( didReflect ) const_cast<StructuringElement&>( transformation.Structure() ).Reflect(); throw; } image.SetStatusCallback( nullptr ); int c0 = image.SelectedChannel(); Point p0 = image.SelectedRectangle().LeftTop(); for ( int i = 0, j = 1; i < numberOfThreads; ++i, ++j ) { if ( i > 0 ) image.Mov( threads[i].UpperOverlappingRegion(), Point( p0.x, p0.y + i*rowsPerThread ), c0 ); if ( j < numberOfThreads ) image.Mov( threads[i].LowerOverlappingRegion(), Point( p0.x, p0.y + j*rowsPerThread - threads[i].LowerOverlappingRegion().Height() ), c0 ); } image.Status() = data.status; threads.Destroy(); }
static void WriteJP2KImage( const GenericImage<P>& img, const ImageInfo& info, const ImageOptions& options, jas_stream_t* jp2Stream, jas_image_t* jp2Image, int jp2Format, const JPEG2000ImageOptions& jp2Options ) { jas_matrix_t* pixels = nullptr; try { pixels = jas_matrix_create( 1, img.Width() ); if ( pixels == nullptr ) throw Error( "Memory allocation error writing JPEG2000 image." ); for ( int c = 0; c < img.NumberOfChannels(); ++c ) { for ( int y = 0; y < img.Height(); ++y ) { const typename P::sample* f = img.ScanLine( y, c ); if ( options.bitsPerSample == 8 ) { if ( jp2Options.signedSample ) { int8 v; for ( int x = 0; x < img.Width(); ++x ) { P::FromSample( v, *f++ ); jas_matrix_set( pixels, 0, x, v ); } } else { uint8 v; for ( int x = 0; x < img.Width(); ++x ) { P::FromSample( v, *f++ ); jas_matrix_set( pixels, 0, x, v ); } } } else { if ( jp2Options.signedSample ) { int16 v; for ( int x = 0; x < img.Width(); ++x ) { P::FromSample( v, *f++ ); jas_matrix_set( pixels, 0, x, v ); } } else { uint16 v; for ( int x = 0; x < img.Width(); ++x ) { P::FromSample( v, *f++ ); jas_matrix_set( pixels, 0, x, v ); } } } jas_image_writecmpt( jp2Image, c, 0, y, img.Width(), 1, pixels ); } } IsoString jp2OptionsStr; jp2OptionsStr.AppendFormat( "mode=%s", jp2Options.lossyCompression ? "real" : "int" ); if ( jp2Options.lossyCompression ) jp2OptionsStr.AppendFormat( " rate=%g", jp2Options.compressionRate ); if ( jp2Options.tiledImage ) { jp2OptionsStr.AppendFormat( " tilewidth=%d", Range( jp2Options.tileWidth, 8, img.Width() ) ); jp2OptionsStr.AppendFormat( " tileheight=%d", Range( jp2Options.tileHeight, 8, img.Height() ) ); } if ( jp2Options.numberOfLayers > 1 ) { jp2OptionsStr.Append( " ilyrrates=" ); float dr = (jp2Options.lossyCompression ? jp2Options.compressionRate : 1.0F)/jp2Options.numberOfLayers; for ( int l = 1; ; ) { jp2OptionsStr.AppendFormat( "%g", l*dr ); if ( ++l == jp2Options.numberOfLayers ) break; jp2OptionsStr.Append( ',' ); } jp2OptionsStr.Append( " prg=" ); switch ( jp2Options.progressionOrder ) { default: case JPEG2000ProgressionOrder::LRCP: jp2OptionsStr.Append( "lrcp" ); break; case JPEG2000ProgressionOrder::RLCP: jp2OptionsStr.Append( "rlcp" ); break; case JPEG2000ProgressionOrder::RPCL: jp2OptionsStr.Append( "rpcl" ); break; case JPEG2000ProgressionOrder::PCRL: jp2OptionsStr.Append( "pcrl" ); break; case JPEG2000ProgressionOrder::CPRL: jp2OptionsStr.Append( "cprl" ); break; } } if ( jas_image_encode( jp2Image, jp2Stream, jp2Format, jp2OptionsStr.Begin() ) < 0 ) throw Error( "Unable to encode JPEG2000 image." ); jas_matrix_destroy( pixels ), pixels = nullptr; } catch ( ... ) { if ( pixels != nullptr ) jas_matrix_destroy( pixels ); throw; } }
template <class P> static void Apply( GenericImage<P>& image, const Translation& translation ) { if ( translation.Delta() == 0.0 ) return; int width = image.Width(); int height = image.Height(); if ( width == 0 || height == 0 ) return; image.EnsureUnique(); typename P::sample* f = nullptr; typename P::sample** f0 = nullptr; int n = image.NumberOfChannels(); typename GenericImage<P>::color_space cs0 = image.ColorSpace(); StatusMonitor status = image.Status(); int numberOfThreads = translation.IsParallelProcessingEnabled() ? Min( translation.MaxProcessors(), pcl::Thread::NumberOfThreads( height, 1 ) ) : 1; int rowsPerThread = height/numberOfThreads; try { size_type N = size_type( width )*size_type( height ); if ( status.IsInitializationEnabled() ) status.Initialize( String().Format( "Translate dx=%.3lf, dy=%.3lf, ", translation.Delta().x, translation.Delta().y ) + translation.Interpolation().Description(), size_type( n )*N ); f0 = image.ReleaseData(); for ( int c = 0; c < n; ++c ) { ThreadData<P> data( translation.Delta(), width, height, status, N ); data.f = f = image.Allocator().AllocatePixels( size_type( width )*size_type( height ) ); data.fillValue = (c < translation.FillValues().Length()) ? P::ToSample( translation.FillValues()[c] ) : P::MinSampleValue(); ReferenceArray<Thread<P> > threads; for ( int i = 0, j = 1; i < numberOfThreads; ++i, ++j ) threads.Add( new Thread<P>( data, translation.Interpolation().NewInterpolator<P>( f0[c], width, height ), i*rowsPerThread, (j < numberOfThreads) ? j*rowsPerThread : height ) ); AbstractImage::RunThreads( threads, data ); threads.Destroy(); image.Allocator().Deallocate( f0[c] ); f0[c] = f; f = nullptr; status = data.status; } image.ImportData( f0, width, height, n, cs0 ).Status() = status; } catch ( ... ) { if ( f != nullptr ) image.Allocator().Deallocate( f ); if ( f0 != nullptr ) { for ( int c = 0; c < n; ++c ) if ( f0[c] != nullptr ) image.Allocator().Deallocate( f0[c] ); image.Allocator().Deallocate( f0 ); } image.FreeData(); throw; } }
template <class P1, class P2> static void Convolve_2( const GenericImage<P1>& image, GenericImage<P2>& sharp, pcl_enum interpolation, float dR, float angleD, DPoint center, int c ) { PixelInterpolation* P = 0; PixelInterpolation::Interpolator<P1>* interpolator = 0; try { switch ( interpolation ) { case LSInterpolation::Bilinear: P = new BilinearPixelInterpolation(); break; default: case LSInterpolation::Bicubic: P = new BicubicPixelInterpolation(); break; case LSInterpolation::BicubicSpline: P = new BicubicSplinePixelInterpolation(); break; case LSInterpolation::BicubicBSpline: P = new BicubicBSplinePixelInterpolation(); break; } interpolator = P->NewInterpolator<P1>( image[c], image.Width(), image.Height() ); int w = image.Width() - 1; int h = image.Height() - 1; double fimg, fsharp; StatusMonitor monitor; monitor.Initialize( "<end><cbr>High-pass Larson-Sekanina filter", image.NumberOfPixels() ); sharp.Zero(); float dAlpha = Rad( angleD ); for ( int x = 0; x < image.Width(); ++x ) for ( int y = 0; y < image.Height(); ++y, ++monitor ) { // Get the central value P1::FromSample( fimg, image.Pixel( x, y, c ) ); fsharp = fimg+fimg; double r, theta; ToPolar( x, y, center, r, theta); DPoint delta; // Positive differential ToCartesian( r-dR, theta+dAlpha, center, delta ); if ( delta.x < 0 ) delta.x = Abs( delta.x ); else if ( delta.x > w ) delta.x = 2*w - delta.x; if ( delta.y < 0 ) delta.y = Abs( delta.y ); else if ( delta.y > h ) delta.y = 2*h - delta.y; P1::FromSample( fimg, (*interpolator)( delta ) ); fsharp -= fimg; //Negative differential ToCartesian( r-dR, theta-dAlpha, center, delta ); if ( delta.x < 0 ) delta.x = Abs( delta.x ); else if ( delta.x > w ) delta.x = 2*w - delta.x; if ( delta.y < 0 ) delta.y = Abs( delta.y ); else if ( delta.y > h ) delta.y = 2*h - delta.y; P1::FromSample( fimg, (*interpolator)( delta ) ); fsharp -= fimg; sharp.Pixel( x, y ) = P2::ToSample( fsharp ); } delete interpolator; delete P; } catch ( ... ) { if ( interpolator != 0 ) delete interpolator; if ( P != 0 ) delete P; throw; } }
static void ReadJPEGImage( GenericImage<P>& img, JPEGReader& reader, JPEGFileData* fileData ) { if ( !reader.IsOpen() ) throw JPEG::InvalidReadOperation( String() ); JSAMPLE* buffer = nullptr; // one-row sample array for scanline reading typename P::sample** v = nullptr; // pointers to destination scan lines try { // Set parameters for decompression. // Most parameters have already been established by Open(). // We just ensure that we'll get either a grayscale or a RGB color image. if ( jpeg_decompressor->out_color_space != JCS_GRAYSCALE ) jpeg_decompressor->out_color_space = JCS_RGB; // Start decompressor. ::jpeg_start_decompress( jpeg_decompressor ); // Allocate pixel data. img.AllocateData( jpeg_decompressor->output_width, jpeg_decompressor->output_height, jpeg_decompressor->output_components, (jpeg_decompressor->out_color_space == JCS_GRAYSCALE) ? ColorSpace::Gray : ColorSpace::RGB ); // Initialize status callback. if ( img.Status().IsInitializationEnabled() ) img.Status().Initialize( String().Format( "Decompressing JPEG: %d channel(s), %dx%d pixels", img.NumberOfChannels(), img.Width(), img.Height() ), img.NumberOfSamples() ); // // Read pixels row by row. // // JSAMPLEs per row in output buffer. int row_stride = img.Width() * img.NumberOfChannels(); // Make a one-row-high sample array. buffer = new JSAMPLE[ row_stride ]; // JPEG organization is chunky; PCL images are planar. v = new typename P::sample*[ img.NumberOfChannels() ]; while ( jpeg_decompressor->output_scanline < jpeg_decompressor->output_height ) { ::jpeg_read_scanlines( jpeg_decompressor, &buffer, 1 ); const JSAMPLE* b = buffer; for ( int c = 0; c < img.NumberOfChannels(); ++c ) v[c] = img.ScanLine( jpeg_decompressor->output_scanline-1, c ); for ( int i = 0; i < img.Width(); ++i ) for ( int c = 0; c < img.NumberOfChannels(); ++c, ++b ) *v[c]++ = P::IsFloatSample() ? typename P::sample( *b ) : P::ToSample( *b ); img.Status() += img.Width()*img.NumberOfChannels(); } // Clean up temporary structures. delete [] v, v = nullptr; delete [] buffer, buffer = nullptr; // Finish decompression. ::jpeg_finish_decompress( jpeg_decompressor ); // ### TODO --> At this point we might check whether any corrupt-data // warnings occurred (test whether jerr.pub.num_warnings is nonzero). } catch ( ... ) { reader.Close(); if ( buffer != nullptr ) delete [] buffer; if ( v != nullptr ) delete [] v; img.FreeData(); throw; } }
template <class P> inline static void Apply( GenericImage<P>& image, const IntegerResample& Z ) { int width = image.Width(); int w0 = width; int height = image.Height(); int h0 = height; Z.GetNewSizes( width, height ); if ( width == w0 && height == h0 ) return; if ( width == 0 || height == 0 ) { image.FreeData(); return; } image.EnsureUnique(); typename P::sample* f = 0; typename P::sample** f0 = 0; int n = image.NumberOfChannels(); size_type N = image.NumberOfPixels(); typename GenericImage<P>::color_space cs0 = image.ColorSpace(); StatusMonitor status = image.Status(); int z = pcl::Abs( Z.ZoomFactor() ); int z2 = z*z; int n2 = z2 >> 1; try { if ( status.IsInitializationEnabled() ) { String info = (Z.ZoomFactor() > 0) ? "Upsampling" : "Downsampling"; info.AppendFormat( " %d:%d, %dx%d", (Z.ZoomFactor() > 0) ? z : 1, (Z.ZoomFactor() > 0) ? 1 : z, width, height ); if ( Z.ZoomFactor() < 0 ) { info += ", "; switch ( Z.DownsampleMode() ) { default: case IntegerDownsampleMode::Average: info += "average"; break; case IntegerDownsampleMode::Median: info += "median"; break; case IntegerDownsampleMode::Maximum: info += "maximum"; break; case IntegerDownsampleMode::Minimum: info += "minimum"; break; } } status.Initialize( info, n*N ); } GenericVector<typename P::sample> fm; if ( Z.ZoomFactor() < 0 && Z.DownsampleMode() == IntegerDownsampleMode::Median ) fm = GenericVector<typename P::sample>( z2 ); f0 = image.ReleaseData(); for ( int c = 0; c < n; ++c, status += N ) { f = image.Allocator().AllocatePixels( width, height ); if ( Z.ZoomFactor() > 0 ) { const typename P::sample* f0c = f0[c]; for ( int y = 0; y < h0; ++y ) { int yz = y*z; for ( int x = 0; x < w0; ++x ) { int xz = x*z; typename P::sample v = *f0c++; for ( int i = 0; i < z; ++i ) { typename P::sample* fi = f + (size_type( yz + i )*width + xz); for ( int j = 0; j < z; ++j ) *fi++ = v; } } } } else { typename P::sample* fz = f; for ( int y = 0; y < height; ++y ) { const typename P::sample* fy = f0[c] + size_type( y )*z*w0; for ( int x = 0; x < width; ++x ) { const typename P::sample* fyx = fy + x*z; switch ( Z.DownsampleMode() ) { default: case IntegerDownsampleMode::Average: { double s = 0; for ( int i = 0; i < z; ++i, fyx += w0 ) for ( int j = 0; j < z; ++j ) s += fyx[j]; *fz++ = typename P::sample( P::IsFloatSample() ? s/z2 : Round( s/z2 ) ); } break; case IntegerDownsampleMode::Median: { typename P::sample* fmi = *fm; for ( int i = 0; i < z; ++i, fyx += w0 ) for ( int j = 0; j < z; ++j ) *fmi++ = fyx[j]; *fz++ = (z & 1) ? *Select( *fm, fm.At( z2 ), n2 ) : P::FloatToSample( 0.5*(double( *Select( *fm, fm.At( z2 ), n2 ) ) + double( *Select( *fm, fm.At( z2 ), n2-1 ) )) ); } break; case IntegerDownsampleMode::Maximum: { *fz = P::MinSampleValue(); for ( int i = 0; i < z; ++i, fyx += w0 ) for ( int j = 0; j < z; ++j ) if ( fyx[j] > *fz ) *fz = fyx[j]; ++fz; } break; case IntegerDownsampleMode::Minimum: { *fz = P::MaxSampleValue(); for ( int i = 0; i < z; ++i, fyx += w0 ) for ( int j = 0; j < z; ++j ) if ( fyx[j] < *fz ) *fz = fyx[j]; ++fz; } break; } } } } image.Allocator().Deallocate( f0[c] ); f0[c] = f; f = 0; } image.ImportData( f0, width, height, n, cs0 ).Status() = status; } catch ( ... ) { if ( f != 0 ) image.Allocator().Deallocate( f ); if ( f0 != 0 ) { for ( int c = 0; c < n; ++c ) if ( f0[c] != 0 ) image.Allocator().Deallocate( f0[c] ); image.Allocator().Deallocate( f0 ); } image.FreeData(); throw; } }
template <class P> static void Apply( GenericImage<P>& image, const Resample& resample ) { int width = image.Width(); int w0 = width; int height = image.Height(); int h0 = height; resample.GetNewSizes( width, height ); if ( width == w0 && height == h0 ) return; if ( width <= 0 || height <= 0 ) { image.FreeData(); return; } image.EnsureUnique(); typename P::sample* f = nullptr; typename P::sample** f0 = nullptr; int n = image.NumberOfChannels(); typename GenericImage<P>::color_space cs0 = image.ColorSpace(); double rx = double( w0 )/width; double ry = double( h0 )/height; StatusMonitor status = image.Status(); int numberOfThreads = resample.IsParallelProcessingEnabled() ? Min( resample.MaxProcessors(), pcl::Thread::NumberOfThreads( height, 1 ) ) : 1; int rowsPerThread = height/numberOfThreads; try { size_type N = size_type( width )*size_type( height ); if ( status.IsInitializationEnabled() ) status.Initialize( String().Format( "Resampling to %dx%d px, ", width, height ) + resample.Interpolation().Description(), size_type( n )*N ); f0 = image.ReleaseData(); for ( int c = 0; c < n; ++c ) { ThreadData<P> data( rx, ry, width, status, N ); data.f = f = image.Allocator().AllocatePixels( width, height ); ReferenceArray<Thread<P> > threads; for ( int i = 0, j = 1; i < numberOfThreads; ++i, ++j ) threads.Add( new Thread<P>( data, resample.Interpolation().NewInterpolator<P>( f0[c], w0, h0 ), i*rowsPerThread, (j < numberOfThreads) ? j*rowsPerThread : height ) ); AbstractImage::RunThreads( threads, data ); threads.Destroy(); image.Allocator().Deallocate( f0[c] ); f0[c] = f; f = nullptr; status = data.status; } image.ImportData( f0, width, height, n, cs0 ).Status() = status; } catch ( ... ) { if ( f != nullptr ) image.Allocator().Deallocate( f ); if ( f0 != nullptr ) { for ( int c = 0; c < n; ++c ) if ( f0[c] != nullptr ) image.Allocator().Deallocate( f0[c] ); image.Allocator().Deallocate( f0 ); } image.FreeData(); throw; } }