Image::Image(const Extent3D& extent, const ImageFormat format, const DataType dataType, const ColorRGBAd& fillColor) : extent_ { extent }, format_ { format }, dataType_ { dataType }, data_ { GenerateImageBuffer(format, dataType, GetNumPixels(), fillColor) } { }
void Image::Resize(const Extent3D& extent, const ColorRGBAd& fillColor, const Offset3D& offset) { if (extent != GetExtent()) { /* Store ownership of current image buffer in temporary image */ Image prevImage; prevImage.extent_ = GetExtent(); prevImage.format_ = GetFormat(); prevImage.dataType_ = GetDataType(); prevImage.data_ = std::move(data_); if ( extent.width > GetExtent().width || extent.height > GetExtent().height || extent.depth > GetExtent().depth ) { /* Resize image buffer with fill color */ extent_ = extent; data_ = GenerateImageBuffer(GetFormat(), GetDataType(), GetNumPixels(), fillColor); } else { /* Resize image buffer with uninitialized image buffer */ extent_ = extent; data_ = GenerateEmptyByteBuffer(GetDataSize(), false); } /* Copy previous image into new image */ Blit(offset, prevImage, { 0, 0, 0 }, prevImage.GetExtent()); } }
// private cv::Point ImageData::GetPixelCoordinatesFromIndex(const int index) const { CHECK_GE(index, 0) << "Pixel index must be at least 0."; CHECK_LT(index, GetNumPixels()) << "Pixel index was out of bounds."; const int x = index % image_size_.width; // col const int y = index / image_size_.width; // row return cv::Point(x, y); }
// Write(): // Writes out a code run to the specified output file. The line argument // is the line of IMG_BYTE's the image we are outputing. Offset is // where in the line this code is. bool BMP_RLE8_Code::Write( bofstream &out, IMG_BYTE *line, int offset ) { if( mode == RLE8_ENCODED ) { out << GetNumPixels() << line[offset]; } else { if( IsValid() ) { out << (char)0x00 << GetNumPixels(); for( int i=0; i < GetNumPixels(); i++ ) out << line[ offset + i ]; if( GetNumPixels() % 2 > 0 ) out << (char)0x00; } else { for( int i=0; i < GetNumPixels(); i++ ) out << (char)0x01 << line[ offset + i ]; } } return true; }
void Image::Resize(const Extent3D& extent, const ColorRGBAd& fillColor) { if (extent_ != extent) { /* Generate new image buffer with fill color */ extent_ = extent; data_ = GenerateImageBuffer(GetFormat(), GetDataType(), GetNumPixels(), fillColor); } else { /* Clear image by fill color */ Fill({ 0, 0, 0 }, extent, fillColor); } }
std::vector<float> HistogramDrawingArea::GetPercentages( unsigned int numPixelValues, const int* pHistogram ) { int numPixels = GetNumPixels( numPixelValues, pHistogram ); std::vector<float> percentages; percentages.assign( 65536, 0 ); if ( numPixels == 0 ) { return percentages; } for ( unsigned int i=0; i < numPixelValues; i++ ) { float currPercentage = (pHistogram[i] * 100)/ static_cast<float>(numPixels); percentages[i] = currPercentage; } return percentages; }
ImageData::ImageData( const double* pixel_values, const cv::Size& size, const int num_channels) { CHECK_NOTNULL(pixel_values); CHECK_GE(num_channels, 1) << "The image must have at least one channel."; // Set image size and make sure the number of pixels is accurate. image_size_ = size; const int num_pixels = GetNumPixels(); CHECK_GE(num_pixels, 1) << "Number of pixels must be positive."; // Add each channel to the ImageData. for (int channel_index = 0; channel_index < num_channels; ++channel_index) { const double* channel_pixels = &pixel_values[channel_index * num_pixels]; const cv::Mat channel_image( size, util::kOpenCvMatrixType, const_cast<void*>(reinterpret_cast<const void*>(channel_pixels))); channels_.push_back(channel_image.clone()); // copy data } spectral_mode_ = GetDefaultSpectralMode(channels_.size()); }
// OutputAsAscii(): // Outputs the code as ASCII text to the output file specified. The // line argument is a line of IMG_BYTE's from the image we are saving. // offset is where in the lin this code is. bool BMP_RLE8_Code::OutputAsAscii( ostream &out, IMG_BYTE *line, int offset ) { if( mode == RLE8_ENCODED ) { out << " (" << offset << ") Encoded Mode, " << (int)GetNumPixels() << " pixels of index " << (int)line[ offset ] << endl; } else { if( GetNumPixels() < 3 ) { out << " (" << offset << ") Aboslute Mode, " << (int)GetNumPixels() << " pixels, converted to " << (int)GetNumPixels() << " 1-pixel Encoded Mode outputs: "; for( int i=0; i < GetNumPixels(); i++ ) out << " " << (int)line[ offset + i ]; out << endl; } else { out << " (" << offset << ") Aboslute Mode, " << (int)GetNumPixels() << " pixels, indices"; for( int i=0; i < GetNumPixels(); i++ ) out << " " << (int)line[ offset + i ]; out << endl; if( GetNumPixels() % 2 > 0 ) out << " Adding 1 pad byte for Absolute Mode word alignment" << endl; } } return true; }
ImageData IRLSMapSolver::Solve(const ImageData& initial_estimate) { const int num_pixels = GetNumPixels(); const int num_channels = GetNumChannels(); const cv::Size image_size = GetImageSize(); CHECK_EQ(initial_estimate.GetNumPixels(), num_pixels); CHECK_EQ(initial_estimate.GetNumChannels(), num_channels); CHECK_EQ(initial_estimate.GetImageSize(), image_size); // If the split_channels option is set, loop over the channels here and solve // them independently. Otherwise, solve all channels at once. const int num_channels_per_split = solver_options_.split_channels ? 1 : num_channels; const int num_solver_rounds = num_channels / num_channels_per_split; const int num_data_points = num_channels_per_split * num_pixels; if (num_channels_per_split != num_channels) { LOG(INFO) << "Splitting up image into " << num_solver_rounds << " sections with " << num_channels_per_split << " channel(s) in each section."; } // Scale the option stop criteria parameters based on the number of // parameters and strength of the regularizers. IRLSMapSolverOptions solver_options_scaled = solver_options_; solver_options_scaled.AdjustThresholdsAdaptively( num_data_points, GetRegularizationParameterSum()); if (IsVerbose()) { solver_options_scaled.PrintSolverOptions(); } ImageData estimated_image; for (int i = 0; i < num_solver_rounds; ++i) { if (num_solver_rounds > 1) { LOG(INFO) << "Starting solver on image subset #" << (i + 1) << "."; } const int channel_start = i * num_channels_per_split; const int channel_end = channel_start + num_channels_per_split; // Copy the initial estimate data (within the appropriate channel range) to // the solver's array. alglib::real_1d_array solver_data; solver_data.setlength(num_data_points); for (int channel = 0; channel < num_channels_per_split; ++channel) { double* data_ptr = solver_data.getcontent() + (num_pixels * channel); const double* channel_ptr = initial_estimate.GetChannelData( channel_start + channel); std::copy(channel_ptr, channel_ptr + num_pixels, data_ptr); } // Set up the base objective function (just data term). The regularization // term depends on the IRLS weights, so it gets added in the IRLS loop. ObjectiveFunction objective_function_data_term_only(num_data_points); std::shared_ptr<ObjectiveTerm> data_term(new ObjectiveDataTerm( image_model_, observations_, channel_start, channel_end, image_size)); objective_function_data_term_only.AddTerm(data_term); RunIRLSLoop( solver_options_scaled, objective_function_data_term_only, regularizers_, image_size, channel_start, channel_end, &solver_data); for (int channel = 0; channel < num_channels_per_split; ++channel) { const double* data_ptr = solver_data.getcontent() + (num_pixels * channel); estimated_image.AddChannel(data_ptr, image_size); } } return estimated_image; }
std::uint32_t Image::GetDataSize() const { return (GetNumPixels() * GetBytesPerPixel()); }