static void Apply( GenericImage<P>& image, const ColorSaturationInstance& instance, bool useLUT = false ) { if ( instance.Curve().IsIdentity() ) { Console().WriteLn( "<end><cbr><* Identity *>" ); return; } size_type N = image.NumberOfPixels(); int numberOfThreads = Thread::NumberOfThreads( N, 16 ); size_type pixelsPerThread = N/numberOfThreads; image.Status().Initialize( "Color saturation transformation, HSVL space", N ); ThreadData data( image, N ); if ( useLUT ) data.lut = MakeLUT( instance ); ReferenceArray<ColorSaturationThread<P> > threads; for ( int i = 0, j = 1; i < numberOfThreads; ++i, ++j ) threads.Add( new ColorSaturationThread<P>( instance, data, image, i*pixelsPerThread, (j < numberOfThreads) ? j*pixelsPerThread : N ) ); AbstractImage::RunThreads( threads, data ); threads.Destroy(); image.Status() = data.status; }
static void SuperPixelThreaded( Image& target, const GenericImage<P>& source, const DebayerInstance& instance ) { int target_w = source.Width() >> 1; int target_h = source.Height() >> 1; target.AllocateData( target_w, target_h, 3, ColorSpace::RGB ); target.Status().Initialize( "SuperPixel debayering", target_h ); int numberOfThreads = Thread::NumberOfThreads( target_h, 1 ); int rowsPerThread = target_h/numberOfThreads; AbstractImage::ThreadData data( target, target_h ); ReferenceArray<SuperPixelThread<P> > threads; for ( int i = 0, j = 1; i < numberOfThreads; ++i, ++j ) threads.Add( new SuperPixelThread<P>( data, target, source, instance, i*rowsPerThread, (j < numberOfThreads) ? j*rowsPerThread : target_h ) ); AbstractImage::RunThreads( threads, data ); threads.Destroy(); target.Status() = data.status; }
static void VNGThreaded( Image& target, const GenericImage<P>& source, const DebayerInstance& instance ) { int target_w = source.Width(); int target_h = source.Height(); target.AllocateData( target_w, target_h, 3, ColorSpace::RGB ); target.Status().Initialize( "VNG debayering", target_h-4 ); int numberOfThreads = Thread::NumberOfThreads( target_h-4, 1 ); int rowsPerThread = (target_h - 4)/numberOfThreads; AbstractImage::ThreadData data( target, target_h-4 ); ReferenceArray<VNGThread<P> > threads; for ( int i = 0, j = 1; i < numberOfThreads; ++i, ++j ) threads.Add( new VNGThread<P>( data, target, source, instance, i*rowsPerThread + 2, (j < numberOfThreads) ? j*rowsPerThread + 2 : target_h-2 ) ); AbstractImage::RunThreads( threads, data ); threads.Destroy(); // copy top and bottom two rows from the adjecent ones for ( int col = 0; col < target_w; col++ ) for ( int i = 0; i < 3; i++ ) { target.Pixel( col, 0, i ) = target.Pixel( col, 1, i ) = target.Pixel( col, 2, i ); target.Pixel( col, target_h-1, i ) = target.Pixel( col, target_h-2, i ) = target.Pixel( col, target_h-3, i ); } target.Status() = data.status; }
void GenericImage::gen_normalMap(const char* filename) { GenericImage dst; dst.width = width; dst.height = height; dst.components = 3; dst.pixels = (unsigned char*)malloc(width * height * 3); char* nmap = gen_normal(); //cast from char to unsigned char to save as an normalmap image //and be able to look at the normalmap and make sense int index=0; unsigned char* p = (unsigned char*)dst.pixels; for(int y = 0; y < height; y++) { for(int x = 0; x < width; x++) { p[3*index + 0] = (unsigned char)(nmap[3*index + 0] + 127); p[3*index + 1] = (unsigned char)(nmap[3*index + 1] + 127); p[3*index + 2] = (unsigned char)(nmap[3*index + 2] + 127); //fprintf(stderr, "normal map[%d](%d, %d, %d)\n", index, nmap[3*index + 0], nmap[3*index + 1], nmap[3*index + 2]); index++; } } bool ret = dst.save(filename); if(ret) printf("success! normalmap saved to image %s\n", filename); else printf("error! normalmap save to image %s failed\n", filename); }
static void ReadJP2KImage( GenericImage<P>& img, jas_stream_t* jp2Stream, jas_image_t* jp2Image ) { int width = jas_image_cmptwidth( jp2Image, 0 ); int height = jas_image_cmptheight( jp2Image, 0 ); int numberOfChannels = jas_image_numcmpts( jp2Image ); jas_matrix_t* pixels = nullptr; try { pixels = jas_matrix_create( 1, width ); if ( pixels == nullptr ) throw Error( "Memory allocation error reading JPEG2000 image" ); // Allocate pixel data img.AllocateData( width, height, numberOfChannels, (jas_clrspc_fam( jas_image_clrspc( jp2Image ) ) == JAS_CLRSPC_FAM_GRAY) ? ColorSpace::Gray : ColorSpace::RGB ); for ( int c = 0; c < numberOfChannels; ++c ) { int n = jas_image_cmptprec( jp2Image, c ); bool s = jas_image_cmptsgnd( jp2Image, c ) != 0; for ( int y = 0; y < height; ++y ) { jas_image_readcmpt( jp2Image, c, 0, y, width, 1, pixels ); typename P::sample* f = img.ScanLine( y, c ); if ( n == 8 ) { if ( s ) for ( int x = 0; x < width; ++x ) *f++ = P::ToSample( int8( jas_matrix_get( pixels, 0, x ) ) ); else for ( int x = 0; x < width; ++x ) *f++ = P::ToSample( uint8( jas_matrix_get( pixels, 0, x ) ) ); } else { if ( s ) for ( int x = 0; x < width; ++x ) *f++ = P::ToSample( int16( jas_matrix_get( pixels, 0, x ) ) ); else for ( int x = 0; x < width; ++x ) *f++ = P::ToSample( uint16( jas_matrix_get( pixels, 0, x ) ) ); } } } jas_matrix_destroy( pixels ), pixels = nullptr; } catch ( ... ) { if ( pixels != nullptr ) jas_matrix_destroy( pixels ); throw; } }
static void CombineChannels( GenericImage<P>& img, int colorSpace, const String& baseId, const Rect& r, const GenericImage<P0>* src0, const GenericImage<P1>* src1, const GenericImage<P2>* src2 ) { bool allChannels = src0 != 0 && src1 != 0 && src2 != 0; typename P::sample* R = img.PixelData( 0 ); typename P::sample* G = img.PixelData( 1 ); typename P::sample* B = img.PixelData( 2 ); const RGBColorSystem& rgbws = img.RGBWorkingSpace(); for ( int y = r.y0; y < r.y1; ++y ) { const typename P0::sample* data0 = (src0 != 0) ? src0->PixelAddress( r.x0, y ) : 0; const typename P1::sample* data1 = (src1 != 0) ? src1->PixelAddress( r.x0, y ) : 0; const typename P2::sample* data2 = (src2 != 0) ? src2->PixelAddress( r.x0, y ) : 0; for ( int x = r.x0; x < r.x1; ++x, ++img.Status() ) { if ( colorSpace == ColorSpaceId::RGB ) { if ( data0 != 0 ) P0::FromSample( *R++, *data0++ ); if ( data1 != 0 ) P1::FromSample( *G++, *data1++ ); if ( data2 != 0 ) P2::FromSample( *B++, *data2++ ); } else { RGBColorSystem::sample ch0, ch1, ch2; RGBColorSystem::sample r, g, b; if ( !allChannels ) { P::FromSample( r, *R ); P::FromSample( g, *G ); P::FromSample( b, *B ); FromRGB( colorSpace, rgbws, ch0, ch1, ch2, r, g, b ); } if ( data0 != 0 ) P0::FromSample( ch0, *data0++ ); if ( data1 != 0 ) P1::FromSample( ch1, *data1++ ); if ( data2 != 0 ) P2::FromSample( ch2, *data2++ ); ToRGB( colorSpace, rgbws, r, g, b, ch0, ch1, ch2 ); *R++ = P::ToSample( r ); *G++ = P::ToSample( g ); *B++ = P::ToSample( b ); } } } }
static void WriteJPEGImage( const GenericImage<P>& image, JPEGWriter* writer ) { if ( writer == 0 || !writer->IsOpen() ) throw Error( "JPEG format: Attempt to write an image before creating a file" ); StandardStatus status; image.SetStatusCallback( &status ); image.SelectNominalChannels(); // JPEG doesn't support alpha channels writer->WriteImage( image ); }
template <class P> static void Rotate180( GenericImage<P>& image ) { size_type N = image.NumberOfPixels(); int n = image.NumberOfChannels(); if ( image.Status().IsInitializationEnabled() ) image.Status().Initialize( "Rotate 180 degrees", n*N ); for ( int c = 0; c < n; ++c, image.Status() += N ) for ( int y0 = 0, y1 = image.Height()-1; y0 <= y1; ++y0, --y1 ) { typename P::sample* f0 = image.ScanLine( y0, c ); typename P::sample* f1 = image.ScanLine( y1, c ); if ( y0 != y1 ) { int x0 = 0, x1 = image.Width()-1; while ( x0 < x1 ) { pcl::Swap( f0[x0], f1[x1] ); pcl::Swap( f0[x1], f1[x0] ); ++x0; --x1; } if ( x0 == x1 ) pcl::Swap( f0[x0], f1[x0] ); } else for ( typename P::sample* f = f0, * g = f0+image.Width()-1; f < g; ) pcl::Swap( *f++, *g-- ); } }
GenericImage<rgb3> to_small_gs( GenericImage<rgb3> const &input_image ) { GenericImage<rgb3> image_output{input_image.width( ), input_image.height( )}; std::transform( input_image.cbegin( ), input_image.cend( ), image_output.begin( ), []( rgb3 const &rgb ) { return static_cast<uint8_t>( rgb.too_float_gs( ) ); } ); return image_output; }
static void ApplyInverseRealFourierTransform_1( GenericImage<P>& image, const DComplexImage& dft, bool parallel, int maxProcessors ) { DImage tmp; tmp.Status() = image.Status(); image.FreeData(); ApplyInverseRealFourierTransform_2( tmp, dft, parallel, maxProcessors ); image.SetStatusCallback( 0 ); image.Assign( tmp ); image.Status() = tmp.Status(); }
static void ReadJPEGImage( GenericImage<P>& image, JPEGReader* reader, int& readCount ) { if ( reader == 0 || !reader->IsOpen() ) throw Error( "JPEG format: Attempt to read an image before opening a file" ); try { /* * The readCount thing is a trick to allow reading the same JPEG image * multiple times from the same format instance. Ugly but heck, it works. */ if ( readCount ) { String filePath = reader->Path(); reader = new JPEGReader; reader->Open( filePath ); } StandardStatus status; image.SetStatusCallback( &status ); reader->ReadImage( image ); if ( readCount ) delete reader; ++readCount; } catch ( ... ) { if ( readCount ) delete reader; throw; } }
static void Apply( GenericImage<P>& image, const PhotometricSuperflatInstance& instance ) { PolynomialSurface* S; if ( !File::Exists( instance.starDatabasePath ) ) throw Error( "No such file: " + instance.starDatabasePath ); S = new PolynomialSurface( instance.starDatabasePath, image.Width(), image.Height() ); //S->PrintCatalog(); S->PrintCatalogSummary(); S->PlotXYKeyedToRelativeFlux(false); String eqn = S->ComputeBestFitModel(instance.fitDegree); S->PlotXYKeyedToRelativeFlux(true); S->ShowBestFitModelImage(); delete(S); };
static void Apply( GenericImage<P>& image, const AnnotationInstance& instance ) { int relPosX = 0, relPosY = 0; Bitmap annotationBmp = AnnotationRenderer::CreateAnnotationBitmap( instance, relPosX, relPosY, false ); // blend bitmap to the image image.Blend( annotationBmp, Point( instance.annotationPositionX - relPosX, instance.annotationPositionY - relPosY ) ); }
template <class P1, class P2> static void ApplyFilter_2( GenericImage<P1>& image, const GenericImage<P2>& sharp, float amount, float threshold, float deringing, float rangeLow, float rangeHigh, pcl_bool disableExtension, int c, pcl_bool highPass ) { float rangeWidth = 1 + rangeHigh + rangeLow; bool isRange = rangeWidth + 1 != 1; StandardStatus callback; StatusMonitor monitor; monitor.SetCallback( &callback ); monitor.Initialize( "<end><cbr>Larson-Sekanina filter", image.NumberOfPixels() ); for ( int x = 0; x < image.Width(); ++x ) for ( int y = 0; y < image.Height(); ++y, ++monitor ) { double f1, f2; P1::FromSample( f1, image.Pixel( x, y, c ) ); P2::FromSample( f2, sharp.Pixel( x, y ) ); Apply_PixelValues( f1, f2, threshold, deringing, amount, highPass ); if ( disableExtension ) image.Pixel( x, y, c ) = P1::ToSample( f1 ); else { if ( isRange ) f1 = (f1 + rangeLow)/rangeWidth; image.Pixel( x, y, c ) = P1::ToSample( pcl::Range( f1, 0.0, 1.0 ) ); } } if ( disableExtension ) Console().WarningLn( "<end><cbr>*** Warning: Dynamic range extension has been disabled - check pixel values!" ); }
template <class P> static void Apply( GenericImage<P>& image, const HistogramTransformation& H ) { if ( image.IsEmptySelection() ) return; image.SetUnique(); Rect r = image.SelectedRectangle(); int h = r.Height(); int numberOfThreads = H.IsParallelProcessingEnabled() ? Min( H.MaxProcessors(), pcl::Thread::NumberOfThreads( h, 1 ) ) : 1; int rowsPerThread = h/numberOfThreads; H.UpdateFlags(); size_type N = image.NumberOfSelectedSamples(); if ( image.Status().IsInitializationEnabled() ) image.Status().Initialize( "Histogram transformation", N ); ThreadData<P> data( image, H, N ); PArray<Thread<P> > threads; for ( int i = 0, j = 1; i < numberOfThreads; ++i, ++j ) threads.Add( new Thread<P>( data, i*rowsPerThread, (j < numberOfThreads) ? j*rowsPerThread : h ) ); AbstractImage::RunThreads( threads, data ); threads.Destroy(); image.Status() = data.status; }
template <class P> static void Apply( GenericImage<P>& image, const typename P::sample* lut ) { if ( image.IsEmptySelection() ) return; Rect r = image.SelectedRectangle(); if ( image.Status().IsInitializationEnabled() ) image.Status().Initialize( "LUT-based histogram transformation", image.NumberOfSelectedSamples() ); for ( int c = image.FirstSelectedChannel(), w = r.Width(); c <= image.LastSelectedChannel(); ++c ) for ( int y = r.y0; y < r.y1; ++y ) for ( typename P::sample* p = image.ScanLine( y, c ) + r.x0, * pw = p + w; p < pw; ++p ) *p = lut[*p]; image.Status() += image.NumberOfSelectedSamples(); }
void Normalize( GenericImage<P>& image ) { image.Status().Initialize( "Normalizing sample values", 2*image.NumberOfNominalSamples() ); image.Status().DisableInitialization(); image.SelectNominalChannels(); image.Truncate( -instance.rangeLow, 1 + instance.rangeHigh ); // N*n image.Normalize(); // N*n image.Status().EnableInitialization(); }
static void Apply( GenericImage<P>& image, const FFTConvolution& F ) { Rect r = image.SelectedRectangle(); if ( F.m_h.IsNull() ) if ( !F.m_filter.IsNull() ) F.m_h = Initialize( *F.m_filter, r.Width(), r.Height(), F.IsParallelProcessingEnabled(), F.MaxProcessors() ); else F.m_h = Initialize( F.m_image, r.Width(), r.Height(), F.IsParallelProcessingEnabled(), F.MaxProcessors() ); Convolve( image, *F.m_h, F.IsParallelProcessingEnabled(), F.MaxProcessors() ); }
static void ApplyInverseRealFourierTransform( GenericImage<P>& image, const ImageVariant& dft, bool parallel, int maxProcessors ) { if ( !dft || dft->IsEmpty() ) { image.FreeData(); return; } switch ( dft.BitsPerSample() ) { case 32: ApplyInverseRealFourierTransform_1( image, static_cast<const FComplexImage&>( *dft ), parallel, maxProcessors ); break; case 64: ApplyInverseRealFourierTransform_1( image, static_cast<const DComplexImage&>( *dft ), parallel, maxProcessors ); break; } }
template <class P> static void HorizontalMirror( GenericImage<P>& image ) { size_type N = image.NumberOfPixels(); int n = image.NumberOfChannels(); if ( image.Status().IsInitializationEnabled() ) image.Status().Initialize( "Horizontal mirror", n*N ); for ( int c = 0; c < n; ++c, image.Status() += N ) for ( int y = 0; y < image.Height(); ++y ) for ( typename P::sample* f = image.ScanLine( y, c ), * g = f + image.Width()-1; f < g; ) { pcl::Swap( *f++, *g-- ); } }
GenericImage<rgb3> FilterDAWGS::filter( GenericImage<rgb3> const &input_image ) { std::vector<uint32_t> const keys = [&]( ) { std::vector<uint32_t> v{}; v.resize( input_image.size( ) ); daw::algorithm::parallel::transform( input_image.begin( ), input_image.end( ), v.begin( ), []( auto rgb ) { return daw::imaging::FilterDAWGS::too_gs( rgb ); } ); daw::algorithm::parallel::sort( v.begin( ), v.end( ) ); v.erase( std::unique( v.begin( ), v.end( ) ), v.end( ) ); return v; }( ); // If we must compress as there isn't room for number of grayscale items if( keys.size( ) <= 256 ) { std::cerr << "Already a grayscale image or has enough room for all " "possible values and no compression needed:" << keys.size( ) << std::endl; return impl::to_small_gs( input_image ); } std::array<uint32_t, 256> bins = [&keys]( ) { std::array<uint32_t, 256> a{}; auto const inc = static_cast<float>( keys.size( ) ) / 256.0f; for( size_t n=0; n<255; ++n ) { a[n] = keys[static_cast<size_t>(static_cast<float>(n)*inc)]; } a[255] = keys.back( ); return a; }( ); GenericImage<rgb3> output_image{input_image.width( ), input_image.height( )}; daw::algorithm::parallel::transform( input_image.cbegin( ), input_image.cend( ), output_image.begin( ), [&bins]( auto rgb ) -> uint8_t { auto const val = FilterDAWGS::too_gs( rgb ); for( uint8_t n=0; n<static_cast<uint8_t>( bins.size( ) ); ++n ) { if( bins[n] >= val ) { return n; } } std::abort( ); } ); return output_image; }
static void Apply( GenericImage<P>& image, const CurvesTransformationInstance& instance, bool useLUT = false ) { int numberOfCurves = 0; if ( !instance[CurveIndex::RGBK].IsIdentity() ) numberOfCurves = image.NumberOfNominalChannels(); if ( image.IsColor() ) { for ( int c = 0; c < image.NumberOfNominalChannels(); ++c ) if ( !instance[c].IsIdentity() ) ++numberOfCurves; if ( !instance[CurveIndex::L].IsIdentity() || !instance[CurveIndex::a].IsIdentity() || !instance[CurveIndex::b].IsIdentity() || !instance[CurveIndex::c].IsIdentity() ) ++numberOfCurves; if ( !instance[CurveIndex::H].IsIdentity() || !instance[CurveIndex::S].IsIdentity() ) ++numberOfCurves; } if ( image.HasAlphaChannels() && !instance[CurveIndex::A].IsIdentity() ) ++numberOfCurves; if ( numberOfCurves == 0 ) { Console().WriteLn( "<end><cbr><* Identity *>" ); return; } size_type N = image.NumberOfPixels(); int numberOfThreads = Thread::NumberOfThreads( N, 256 ); size_type pixelsPerThread = N/numberOfThreads; image.Status().Initialize( "Curves transformation", numberOfCurves*N ); ThreadData data( image, numberOfCurves*N ); if ( useLUT ) data.lut.Generate( image, instance ); ReferenceArray<CurvesThread<P> > threads; for ( int i = 0, j = 1; i < numberOfThreads; ++i, ++j ) threads.Add( new CurvesThread<P>( instance, data, image, i*pixelsPerThread, (j < numberOfThreads) ? j*pixelsPerThread : N ) ); AbstractImage::RunThreads( threads, data ); threads.Destroy(); image.Status() = data.status; }
template <class P> static void VerticalMirror( GenericImage<P>& image ) { size_type N = image.NumberOfPixels(); int n = image.NumberOfChannels(); if ( image.Status().IsInitializationEnabled() ) image.Status().Initialize( "Vertical mirror", n*N ); for ( int c = 0; c < n; ++c, image.Status() += N ) for ( int y0 = 0, y1 = image.Height()-1; y0 < y1; ++y0, --y1 ) for ( typename P::sample* f0 = image.ScanLine( y0, c ), * f1 = image.ScanLine( y1, c ), * fw = f0 + image.Width(); f0 < fw; ) { pcl::Swap( *f0++, *f1++ ); } }
template <class P, class S> static void Apply( GenericImage<P>& image, S*, const ICCProfileTransformation& T, ICCProfileTransformation::transformation_handle transformation ) { if ( image.IsEmptySelection() || T.Profiles().IsEmpty() ) return; if ( image.ColorSpace() != ColorSpace::RGB && image.ColorSpace() != ColorSpace::Gray ) throw Error( String().Format( "Unsupported color space %X in ICC color transformation.", image.ColorSpace() ) ); image.EnsureUnique(); Rect r = image.SelectedRectangle(); int h = r.Height(); int numberOfThreads = T.IsParallelProcessingEnabled() ? Min( T.MaxProcessors(), pcl::Thread::NumberOfThreads( h, 1 ) ) : 1; int rowsPerThread = h/numberOfThreads; size_type N = image.NumberOfSelectedPixels(); if ( image.Status().IsInitializationEnabled() ) image.Status().Initialize( "In-place ICC color profile transformation", N ); ThreadData<P> data( image, T, transformation, N ); ReferenceArray<Thread<P,S> > threads; for ( int i = 0, j = 1; i < numberOfThreads; ++i, ++j ) threads.Add( new Thread<P,S>( data, i*rowsPerThread, (j < numberOfThreads) ? j*rowsPerThread : h ) ); AbstractImage::RunThreads( threads, data ); threads.Destroy(); image.Status() = data.status; }
template <class P> static void RealMinMax( const GenericImage<P>& image, const Rect& r, int c, double& min, double& max, int y0, int y1 ) { int w = r.Width(); min = max = image( r.LeftTop(), c ); for ( int y = r.y0+y0, y01 = r.y0+y1; y < y01; ++y ) { const typename P::sample* f = image.ScanLine( y, c ) + r.x0; const typename P::sample* fw = f + w; do { if ( *f < min ) min = *f; else if ( max < *f ) max = *f; } while ( ++f < fw ); } }
static void LoadAndTransformImage( GenericImage<P>& transform, const GenericImage<P1>& image, bool parallel, int maxProcessors ) { Rect r = image.SelectedRectangle(); if ( !r.IsRect() ) return; int w = FFTC::OptimizedLength( r.Width() ); int h = FFTC::OptimizedLength( r.Height() ); int dw2 = (w - r.Width()) >> 1; int dh2 = (h - r.Height()) >> 1; transform.AllocateData( w, h, image.NumberOfSelectedChannels(), (image.NumberOfSelectedChannels() < 3 || image.FirstSelectedChannel() != 0) ? ColorSpace::Gray : image.ColorSpace() ); transform.Zero().Move( image, Point( dw2, dh2 ) ); ApplyInPlaceFourierTransform( transform, FFTDirection::Forward, parallel, maxProcessors ); }
template <class P> inline static void Apply( GenericImage<P>& image, const IntegerResample& Z ) { int width = image.Width(); int w0 = width; int height = image.Height(); int h0 = height; Z.GetNewSizes( width, height ); if ( width == w0 && height == h0 ) return; if ( width == 0 || height == 0 ) { image.FreeData(); return; } image.EnsureUnique(); typename P::sample* f = 0; typename P::sample** f0 = 0; int n = image.NumberOfChannels(); size_type N = image.NumberOfPixels(); typename GenericImage<P>::color_space cs0 = image.ColorSpace(); StatusMonitor status = image.Status(); int z = pcl::Abs( Z.ZoomFactor() ); int z2 = z*z; int n2 = z2 >> 1; try { if ( status.IsInitializationEnabled() ) { String info = (Z.ZoomFactor() > 0) ? "Upsampling" : "Downsampling"; info.AppendFormat( " %d:%d, %dx%d", (Z.ZoomFactor() > 0) ? z : 1, (Z.ZoomFactor() > 0) ? 1 : z, width, height ); if ( Z.ZoomFactor() < 0 ) { info += ", "; switch ( Z.DownsampleMode() ) { default: case IntegerDownsampleMode::Average: info += "average"; break; case IntegerDownsampleMode::Median: info += "median"; break; case IntegerDownsampleMode::Maximum: info += "maximum"; break; case IntegerDownsampleMode::Minimum: info += "minimum"; break; } } status.Initialize( info, n*N ); } GenericVector<typename P::sample> fm; if ( Z.ZoomFactor() < 0 && Z.DownsampleMode() == IntegerDownsampleMode::Median ) fm = GenericVector<typename P::sample>( z2 ); f0 = image.ReleaseData(); for ( int c = 0; c < n; ++c, status += N ) { f = image.Allocator().AllocatePixels( width, height ); if ( Z.ZoomFactor() > 0 ) { const typename P::sample* f0c = f0[c]; for ( int y = 0; y < h0; ++y ) { int yz = y*z; for ( int x = 0; x < w0; ++x ) { int xz = x*z; typename P::sample v = *f0c++; for ( int i = 0; i < z; ++i ) { typename P::sample* fi = f + (size_type( yz + i )*width + xz); for ( int j = 0; j < z; ++j ) *fi++ = v; } } } } else { typename P::sample* fz = f; for ( int y = 0; y < height; ++y ) { const typename P::sample* fy = f0[c] + size_type( y )*z*w0; for ( int x = 0; x < width; ++x ) { const typename P::sample* fyx = fy + x*z; switch ( Z.DownsampleMode() ) { default: case IntegerDownsampleMode::Average: { double s = 0; for ( int i = 0; i < z; ++i, fyx += w0 ) for ( int j = 0; j < z; ++j ) s += fyx[j]; *fz++ = typename P::sample( P::IsFloatSample() ? s/z2 : Round( s/z2 ) ); } break; case IntegerDownsampleMode::Median: { typename P::sample* fmi = *fm; for ( int i = 0; i < z; ++i, fyx += w0 ) for ( int j = 0; j < z; ++j ) *fmi++ = fyx[j]; *fz++ = (z & 1) ? *Select( *fm, fm.At( z2 ), n2 ) : P::FloatToSample( 0.5*(double( *Select( *fm, fm.At( z2 ), n2 ) ) + double( *Select( *fm, fm.At( z2 ), n2-1 ) )) ); } break; case IntegerDownsampleMode::Maximum: { *fz = P::MinSampleValue(); for ( int i = 0; i < z; ++i, fyx += w0 ) for ( int j = 0; j < z; ++j ) if ( fyx[j] > *fz ) *fz = fyx[j]; ++fz; } break; case IntegerDownsampleMode::Minimum: { *fz = P::MaxSampleValue(); for ( int i = 0; i < z; ++i, fyx += w0 ) for ( int j = 0; j < z; ++j ) if ( fyx[j] < *fz ) *fz = fyx[j]; ++fz; } break; } } } } image.Allocator().Deallocate( f0[c] ); f0[c] = f; f = 0; } image.ImportData( f0, width, height, n, cs0 ).Status() = status; } catch ( ... ) { if ( f != 0 ) image.Allocator().Deallocate( f ); if ( f0 != 0 ) { for ( int c = 0; c < n; ++c ) if ( f0[c] != 0 ) image.Allocator().Deallocate( f0[c] ); image.Allocator().Deallocate( f0 ); } image.FreeData(); throw; } }
template <class P> static void Apply( const GenericImage<P>& image, MultiscaleMedianTransform& T ) { InitializeStructures(); bool statusInitialized = false; StatusMonitor& status = (StatusMonitor&)image.Status(); try { if ( status.IsInitializationEnabled() ) { status.Initialize( String( T.m_medianWaveletTransform ? "Median-wavelet" : "Multiscale median" ) + " transform", image.NumberOfSelectedSamples()*T.m_numberOfLayers*(T.m_medianWaveletTransform ? 2 : 1) ); status.DisableInitialization(); statusInitialized = true; } GenericImage<P> cj0( image ); cj0.Status().Clear(); for ( int j = 1, j0 = 0; ; ++j, ++j0 ) { GenericImage<P> cj( cj0 ); cj.Status() = status; MedianFilterLayer( cj, T.FilterSize( j0 ), T.m_parallel, T.m_maxProcessors ); if ( T.m_medianWaveletTransform ) { GenericImage<P> w0( cj0 ); GenericImage<P> d0( cj0 ); d0 -= cj; for ( int c = 0; c < d0.NumberOfChannels(); ++c ) { w0.SelectChannel( c ); d0.SelectChannel( c ); cj.SelectChannel( c ); double t = T.m_medianWaveletThreshold*d0.MAD( d0.Median() )/0.6745; for ( typename GenericImage<P>::sample_iterator iw( w0 ), id( d0 ), ic( cj ); iw; ++iw, ++id, ++ic ) if ( Abs( *id ) > t ) *iw = *ic; } w0.ResetSelections(); cj.ResetSelections(); w0.Status() = cj.Status(); LinearFilterLayer( w0, T.FilterSize( j0 ), T.m_parallel, T.m_maxProcessors ); cj = w0; } status = cj.Status(); cj.Status().Clear(); if ( T.m_layerEnabled[j0] ) { cj0 -= cj; T.m_transform[j0] = Image( cj0 ); } if ( j == T.m_numberOfLayers ) { if ( T.m_layerEnabled[j] ) T.m_transform[j] = Image( cj ); break; } cj0 = cj; } if ( statusInitialized ) status.EnableInitialization(); } catch ( ... ) { T.DestroyLayers(); if ( statusInitialized ) status.EnableInitialization(); throw; } }
template <class P> static void Compute( const GenericImage<P>& image, ImageStatistics::Data& data, bool /*parallel*/, int /*maxProcessors*/ ) { data.AssignStatisticalData( ImageStatistics::Data() ); size_type N = image.NumberOfSelectedPixels(); if ( N == 0 ) return; if ( image.Status().IsInitializationEnabled() ) image.Status().Initialize( "Computing image statistics", N ); size_type NS = N/8; size_type NN = N - 7*NS; Rect rect = image.SelectedRectangle(); int channel = image.SelectedChannel(); data.minimum = data.maximum = 0; data.minPos = data.maxPos = Point( 0 ); if ( data.rejectLow || data.rejectHigh ) { // Rejection bounds in the native range double s0 = 0, s1 = 0; if ( data.rejectLow ) s0 = data.low * P::MaxSampleValue(); if ( data.rejectHigh ) s1 = data.high * P::MaxSampleValue(); Array<double> v; v.Reserve( N ); // clearly, optimize for speed typename GenericImage<P>::const_roi_sample_iterator i( image, rect, channel ); P::FromSample( data.minimum, *i ); data.maximum = data.minimum; if ( data.noExtremes ) { if ( data.rejectLow ) { if ( data.rejectHigh ) { for ( ; i; ++i ) if ( *i > s0 ) if ( *i < s1 ) { double f; P::FromSample( f, *i ); v.Append( f ); } } else { for ( ; i; ++i ) if ( *i > s0 ) { double f; P::FromSample( f, *i ); v.Append( f ); } } } else { for ( ; i; ++i ) if ( *i < s1 ) { double f; P::FromSample( f, *i ); v.Append( f ); } } } else // !data.noExtremes { bool extremesSeen = false; if ( data.rejectLow ) { if ( data.rejectHigh ) { for ( int y = rect.y0; y < rect.y1; ++y ) for ( int x = rect.x0; x < rect.x1; ++x, ++i ) if ( *i > s0 ) if ( *i < s1 ) { double f; P::FromSample( f, *i ); v.Append( f ); if ( extremesSeen ) { if ( f < data.minimum ) { data.minimum = f; data.minPos.x = x; data.minPos.y = y; } else if ( f > data.maximum ) { data.maximum = f; data.maxPos.x = x; data.maxPos.y = y; } } else { data.minimum = data.maximum = f; data.minPos.x = data.maxPos.x = x; data.minPos.y = data.maxPos.y = y; extremesSeen = true; } } } else { for ( int y = rect.y0; y < rect.y1; ++y ) for ( int x = rect.x0; x < rect.x1; ++x, ++i ) if ( *i > s0 ) { double f; P::FromSample( f, *i ); v.Append( f ); if ( extremesSeen ) { if ( f < data.minimum ) { data.minimum = f; data.minPos.x = x; data.minPos.y = y; } else if ( f > data.maximum ) { data.maximum = f; data.maxPos.x = x; data.maxPos.y = y; } } else { data.minimum = data.maximum = f; data.minPos.x = data.maxPos.x = x; data.minPos.y = data.maxPos.y = y; extremesSeen = true; } } } } else { for ( int y = rect.y0; y < rect.y1; ++y ) for ( int x = rect.x0; x < rect.x1; ++x, ++i ) if ( *i < s1 ) { double f; P::FromSample( f, *i ); v.Append( f ); if ( extremesSeen ) { if ( f < data.minimum ) { data.minimum = f; data.minPos.x = x; data.minPos.y = y; } else if ( f > data.maximum ) { data.maximum = f; data.maxPos.x = x; data.maxPos.y = y; } } else { data.minimum = data.maximum = f; data.minPos.x = data.maxPos.x = x; data.minPos.y = data.maxPos.y = y; extremesSeen = true; } } } } data.count = v.Length(); if ( !data.noSumOfSquares ) data.sumOfSquares = pcl::SumOfSquares( v.Begin(), v.End() ); image.Status() += NS; if ( !data.noMean ) { data.mean = pcl::Mean( v.Begin(), v.End() ); image.Status() += NS; if ( !data.noVariance ) { data.variance = pcl::Variance( v.Begin(), v.End(), data.mean ); data.stdDev = Sqrt( data.variance ); } image.Status() += NS; } else { image.Status() += 2*NS; } if ( !data.noMedian ) { data.median = pcl::Median( v.Begin(), v.End() ); image.Status() += NS; if ( !data.noAvgDev ) data.avgDev = pcl::AvgDev( v.Begin(), v.End(), data.median ); image.Status() += NS; if ( !data.noMAD ) { data.MAD = pcl::MAD( v.Begin(), v.End(), data.median ); if ( !data.noBWMV ) data.bwmv = pcl::BiweightMidvariance( v.Begin(), v.End(), data.median, 1.4826*data.MAD ); } if ( !data.noPBMV ) data.pbmv = pcl::BendMidvariance( v.Begin(), v.End(), data.median, 0.2 ); image.Status() += NS; } else { image.Status() += 3*NS; } if ( !data.noSn ) data.Sn = pcl::Sn( v.Begin(), v.End() ); image.Status() += NS; if ( !data.noQn ) data.Qn = pcl::Qn( v.Begin(), v.End() ); image.Status() += NN; } else { data.count = N; DMatrix V( image, rect, channel ); if ( !data.noExtremes ) { double* i = V.Begin(); data.minimum = data.maximum = *i; for ( int y = rect.y0; y < rect.y1; ++y ) for ( int x = rect.x0; x < rect.x1; ++x, ++i ) if ( *i < data.minimum ) { data.minimum = *i; data.minPos.x = x; data.minPos.y = y; } else if ( *i > data.maximum ) { data.maximum = *i; data.maxPos.x = x; data.maxPos.y = y; } } if ( !data.noSumOfSquares ) data.sumOfSquares = pcl::SumOfSquares( V.Begin(), V.End() ); image.Status() += NS; if ( !data.noMean ) { data.mean = pcl::Mean( V.Begin(), V.End() ); image.Status() += NS; if ( !data.noVariance ) { data.variance = pcl::Variance( V.Begin(), V.End(), data.mean ); data.stdDev = Sqrt( data.variance ); } image.Status() += NS; } else { image.Status() += 2*NS; } if ( !data.noMedian ) { data.median = pcl::Median( V.Begin(), V.End() ); image.Status() += NS; if ( !data.noAvgDev ) data.avgDev = pcl::AvgDev( V.Begin(), V.End(), data.median ); image.Status() += NS; if ( !data.noMAD ) { data.MAD = pcl::MAD( V.Begin(), V.End(), data.median ); if ( !data.noBWMV ) data.bwmv = pcl::BiweightMidvariance( V.Begin(), V.End(), data.median, 1.4826*data.MAD ); } if ( !data.noPBMV ) data.pbmv = pcl::BendMidvariance( V.Begin(), V.End(), data.median, 0.2 ); image.Status() += NS; } else { image.Status() += 3*NS; } if ( !data.noSn ) data.Sn = pcl::Sn( V.Begin(), V.End() ); image.Status() += NS; if ( !data.noQn ) data.Qn = pcl::Qn( V.Begin(), V.End() ); image.Status() += NN; } }
static void Apply( GenericImage<P>& img, const View& view, const FluxCalibrationInstance& instance ) { FITSKeywordArray inputKeywords; view.Window().GetKeywords( inputKeywords ); if ( KeywordExists( inputKeywords, "FLXMIN" ) || KeywordExists( inputKeywords, "FLXRANGE" ) || KeywordExists( inputKeywords, "FLX2DN" ) ) { throw Error( "Already calibrated image" ); } if ( img.IsColor() ) throw Error( "Can't calibrate a color image" ); float Wc = instance.p_wavelength.GetValue( inputKeywords ); float Tr = Max( 1.0F, instance.p_transmissivity.GetValue( inputKeywords ) ); float Delta = instance.p_filterWidth.GetValue( inputKeywords ); float Ap = instance.p_aperture.GetValue( inputKeywords ) / 10; // mm -> cm float Cobs = Max( 0.0F, instance.p_centralObstruction.GetValue( inputKeywords ) ) / 10; // mm -> cm float ExpT = instance.p_exposureTime.GetValue( inputKeywords ); float AtmE = Max( 0.0F, instance.p_atmosphericExtinction.GetValue( inputKeywords ) ); float G = Max( 1.0F, instance.p_sensorGain.GetValue( inputKeywords ) ); float QEff = Max( 1.0F, instance.p_quantumEfficiency.GetValue( inputKeywords ) ); if ( Wc <= 0 ) throw Error( "Invalid filter wavelength" ); if ( Tr <= 0 || Tr > 1 ) throw Error( "Invalid filter transmissivity" ); if ( Delta <= 0 ) throw Error( "Invalid filter width" ); if ( Ap <= 0 ) throw Error( "Invalid aperture" ); if ( Cobs < 0 || Cobs >= Ap ) throw Error( "Invalid central obstruction area" ); if ( ExpT <= 0 ) throw Error( "Invalid exposure time" ); if ( AtmE < 0 || AtmE >= 1 ) throw Error( "Invalid atmospheric extinction" ); if ( G <= 0 ) throw Error( "Invalid sensor gain" ); if ( QEff <= 0 || QEff > 1 ) throw Error( "Invalid quantum efficiency" ); FITSKeywordArray keywords; float pedestal = 0; bool foundPedestal = false; for ( FITSKeywordArray::const_iterator i = inputKeywords.Begin(); i != inputKeywords.End(); ++i ) if ( i->name == "PEDESTAL" ) { if ( i->value.TryToFloat( pedestal ) ) foundPedestal = true; pedestal /= 65535; // 2^16-1 maximum value of a 16bit CCD. } else keywords.Add( *i ); if ( foundPedestal ) Console().NoteLn( "<end><cbr><br>* FluxCalibration: PEDESTAL keyword found: " + view.FullId() ); // double F = Wc * inv_ch * (1 - Tr) * Delta * Ap * Cobs * ExpT * AtmE * G * QEff; double F = Wc * inv_ch * (1 - AtmE) * Delta * ( Const<double>::pi() / 4 * ( Ap*Ap - Cobs*Cobs ) ) * ExpT * Tr * G * QEff; size_type N = img.NumberOfPixels(); typename P::sample* f = img.PixelData( 0 ); const typename P::sample* fN = f + N; double flxMin = DBL_MAX; double flxMax = 0; for ( ; f < fN; ++f, ++img.Status() ) { double I; P::FromSample( I, *f ); I = (I - pedestal)/F; *f = P::ToSample( I ); if ( I < flxMin ) flxMin = I; if ( I > flxMax ) flxMax = I; } img.Rescale(); keywords.Add( FITSHeaderKeyword( "FLXMIN", IsoString().Format( "%.8e", flxMin ), "" ) ); keywords.Add( FITSHeaderKeyword( "FLXRANGE", IsoString().Format( "%.8e", flxMax - flxMin ), "FLXRANGE*pixel_value + FLXMIN = erg/cm^2/s/nm" ) ); keywords.Add( FITSHeaderKeyword( "FLX2DN", IsoString().Format( "%.8e", F*65535 ), "(FLXRANGE*pixel_value + FLXMIN)*FLX2DN = DN" ) ); view.Window().SetKeywords( keywords ); }