void CharFeatureCollection::calculateOutputDataMatrix(std::vector<ImageChar*> &imageChars) { // check if there are features: if (mImageCharFeatureVec.empty()) { std::cout << "Exception in CharFeatureCollection::calculateOutputDataMatrix(std::vector<ImageChar> &imageChars): no feature vector list!" << std::endl; throw NoDataException("No features specified for feature vector calculation!"); } // calc. size of one column: int nCols = 0; for (int i=0; i<mImageCharFeatureVec.size(); ++i) { nCols += mImageCharFeatureVec[i]->vectorSize(); } // resize output matrix: const int nRows = imageChars.size(); std::cout << "2 matrix dimensions are: " << nRows << " x " << nCols << std::endl; mDataMatrix.resize(nRows, nCols); #if 1 // take preprocessed image from pointer in ImageChar #pragma omp parallel for for (int i=0; i<nRows; ++i) { #if 0 if (imageChars[i]->mPreprocessingResults.isEmpty()) { imageChars[i]->mPreprocessingResults } #else // GrayImage<> *pPreprImage = imageChars[i]->pPreprImage; GrayImage<> *pPreprImage = imageChars[i]->mPreprocessingResults.mpProcessedImage; if (!pPreprImage) { std::cerr << "Preprocessing image not available while computing features!" << std::endl; throw NoDataException("Preprocessing image not available while computing features!"); } #endif this->calculateOutputDataRow(*pPreprImage, i, mDataMatrix); std::cout << "2Computed feature " << i+1 << " of " << nRows << std::endl; // store reference to feature vector in ImageChar of mpImageCharsVec: imageChars[i]->setFeatureData(&mDataMatrix, i); } // end for all rows i #else // extract bounding box image from ImageChar for (int i=0; i<nRows; ++i) { GrayImage<> *pImage = imageChars[i]->pImage; BoundingBox bbox = imageChars[i]->bBox; pImage->setRoi(bbox); GrayImage<> charImage; pImage->computeRoiImage(charImage); pImage->releaseRoi(); // std::cout << "current row: " << i << std::endl; this->calculateOutputDataRow(charImage, i, mDataMatrix); // store reference to feature vector in ImageChar of mpImageCharsVec: imageChars[i]->setFeatureData(&mDataMatrix, i); } // end for all rows i #endif return; } // end calculateOutputDataMatrix
GrayImage TextLineTracer::downscale(GrayImage const& input, Dpi const& dpi) { // Downscale to 200 DPI. QSize downscaled_size(input.size()); if (dpi.horizontal() < 180 || dpi.horizontal() > 220 || dpi.vertical() < 180 || dpi.vertical() > 220) { downscaled_size.setWidth(std::max<int>(1, input.width() * 200 / dpi.horizontal())); downscaled_size.setHeight(std::max<int>(1, input.height() * 200 / dpi.vertical())); } return scaleToGray(input, downscaled_size); }
void process_slice( const boost::filesystem::path &folderPath, uint z, Z3i::DigitalSet &set3d ) { boost::filesystem::path filePath = folderPath ; filePath /= QString("slice-%1.pgm").arg( z, 0, 10 ).toStdString() ; GrayImage image = PNMReader<GrayImage>::importPGM( filePath.string() ); Z2i::DigitalSet *objpts = newDigitalSet2DFromBinaryImage( image ) ; if ( !objpts->empty() ) { VertexPolygon convexhull ; Z2i::Object4_8 obj( Z2i::dt4_8, *objpts) ; Z2i::Object4_8 boundary = obj.border() ; if ( boundary.size() <= 3 ) { trace.info()<<"Warning : boundary is "; for ( Z2i::DigitalSet::ConstIterator pt = boundary.pointSet().begin() ; pt != boundary.pointSet().end() ; pt++ ) trace.info()<<(*pt)<<" " ; trace.info()<<std::endl; } Geom2D::ConvexHull( boundary.pointSet(), convexhull ) ; EdgePolygon segs ; init_edges_polygon( segs, convexhull ) ; uint new3Dpts = 0 ; if ( segs.size() >= 3 ) for ( Z2i::Domain::ConstIterator pt = image.domain().begin() ; pt != image.domain().end() ; pt++ ) { if ( image( *pt ) == (unsigned char) 255 ) continue ; bool bInside = true ; for ( uint iSeg = 0 ; iSeg != segs.size() && bInside; iSeg++ ) if ( segs[iSeg].signedDistance( *pt ) < 0 ) bInside = false ; if ( !bInside ) continue ; set3d.insertNew( Z3i::Point( (*pt).at(0),(*pt).at(1), z ) ) ; new3Dpts++ ; } Z2i::Point UL, BR ; boundary.pointSet().computeBoundingBox( UL, BR ) ; std::cerr<<QString( "Slice %1").arg( z,10,10,QLatin1Char('0')).toStdString() ; std::cerr<<setw(6)<<objpts->size()<<" " <<setw(6)<<boundary.size()<<" " <<setw(6)<<new3Dpts<<" " <<setw(4)<<UL.at(0)<<"," <<setw(4)<<UL.at(1)<<" - " <<setw(4)<<BR.at(0)<<"," <<setw(4)<<BR.at(1)<<std::endl; } delete objpts ; }
GrayImage gaussBlur(GrayImage const& src, float h_sigma, float v_sigma) { using namespace boost::lambda; if (src.isNull()) { return src; } GrayImage dst(src.size()); gaussBlurGeneric( src.size(), h_sigma, v_sigma, src.data(), src.stride(), StaticCastValueConv<float>(), dst.data(), dst.stride(), _1 = bind<uint8_t>(RoundAndClipValueConv<uint8_t>(), _2) ); return dst; }
void GaussianPyramid::BuildPyramid(GrayImage *level0, unsigned nbLevels) { GrayImage *pLevel = level0; _levels.push_back(pLevel); GaussianFilter gf(_sigma); // build the nbLevels: unsigned w = pLevel->width(); unsigned h = pLevel->height(); if (nbLevels != 0) { for (unsigned int i = 0; i < nbLevels; ++i) { //soc w = pLevel->width() >> 1; h = pLevel->height() >> 1; GrayImage *img = new GrayImage(w, h); for (unsigned int y = 0; y < h; ++y) { for (unsigned int x = 0; x < w; ++x) { float v = gf.getSmoothedPixel<GrayImage>(pLevel, 2 * x, 2 * y); img->setPixel(x, y, v); } } _levels.push_back(img); pLevel = img; } } else { while ((w > 1) && (h > 1)) {
static bool checkScale(GrayImage const& img, QSize const& new_size) { GrayImage const scaled1(scaleToGray(img, new_size)); GrayImage const scaled2(img.toQImage().scaled( new_size, Qt::IgnoreAspectRatio, Qt::SmoothTransformation )); return fuzzyCompare(scaled1, scaled2); }
/*static*/ void PreprocessingResults::computeOtherPreprocessings( const GrayImage<> &preprocessedImage, GrayImage<float> &intInvImage, GrayImage<float> &distTransformImage) { #if 1 // 2nd: compute integral invariant image: int radius = 2; bool oddSize = true; Kernel<> mask; Kernel<>::createSphereKernel( mask, radius, oddSize ); // std::cout << mask << std::endl; ImageIntegralInvariant::calcIntegralInvariantImage( mask, preprocessedImage, intInvImage ); #endif #if 1 // 3rd: compute distance transform of inverted(!) glyph: GrayImage<> invPreprocessedGlyphImage = preprocessedImage; invPreprocessedGlyphImage.invert(); ImageFilter::distTransform(invPreprocessedGlyphImage, distTransformImage); #endif return; }
void PolynomialSurface::prepareEquationsAndDataPoints( GrayImage const& image, BinaryImage const& mask, std::vector<double>& equations, std::vector<double>& data_points) const { int const width = image.width(); int const height = image.height(); double const xscale = calcScale(width); double const yscale = calcScale(height); uint8_t const* image_line = image.data(); int const image_bpl = image.stride(); uint32_t const* mask_line = mask.data(); int const mask_wpl = mask.wordsPerLine(); int const last_word_idx = (width - 1) >> 5; int const last_word_mask = ~uint32_t(0) << (31 - ((width - 1) & 31)); for (int y = 0; y < height; ++y) { double const y_adjusted = y * yscale; int idx = 0; // Full words. for (; idx < last_word_idx; ++idx) { processMaskWord( image_line, mask_line[idx], idx, y, y_adjusted, xscale, equations, data_points ); } // Last word. processMaskWord( image_line, mask_line[idx] & last_word_mask, idx, y, y_adjusted, xscale, equations, data_points ); image_line += image_bpl; mask_line += mask_wpl; } }
// Dilate (a standard image processing operation) a hands mask void dilateMask(GrayImage &img, int amount) { static GrayImage tmp; img.CopyTo(tmp); for (int y = amount; y < img.rows - amount; y++) { for (int x = amount; x < img.cols - amount; x++) { // Examine neighboring pixels for (int dy = -amount; dy <= amount; dy++) { for (int dx = -amount; dx <= amount; dx++) { if (img.data[y+dy][x+dx] != kMaskUnoccupied) { tmp.data[y][x] = img.data[y+dy][x+dx]; goto donePixel; } } } donePixel:; } } tmp.CopyTo(img); }
// Erode (a standard image processing operation) a grayscale image void erode(GrayImage &img, int amount) { static GrayImage tmp; img.CopyTo(tmp); // Two-dimensional kernel for (int y = 0; y < img.rows; y++) { for (int x = 0; x < img.cols; x++) { // Examine neighboring pixels int min = 255; for (int dy = -amount; dy <= amount; dy++) { for (int dx = -amount; dx <= amount; dx++) { if (img.data[y+dy][x+dx] < min) min = img.data[y+dy][x+dx]; } } tmp.data[y][x] = min; } } tmp.CopyTo(img); }
void PolynomialSurface::prepareEquationsAndDataPoints( GrayImage const& image, std::vector<double>& equations, std::vector<double>& data_points) const { int const width = image.width(); int const height = image.height(); uint8_t const* line = image.data(); int const bpl = image.stride(); // Pretend that both x and y positions of pixels // lie in range of [0, 1]. double const xscale = calcScale(width); double const yscale = calcScale(height); for (int y = 0; y < height; ++y, line += bpl) { double const y_adjusted = yscale * y; for (int x = 0; x < width; ++x) { double const x_adjusted = xscale * x; data_points.push_back((1.0 / 255.0) * line[x]); double pow1 = 1.0; for (int i = 0; i <= m_vertDegree; ++i) { double pow2 = pow1; for (int j = 0; j <= m_horDegree; ++j) { equations.push_back(pow2); pow2 *= x_adjusted; } pow1 *= y_adjusted; } } } }
void PreprocessingResults::computePreprocessings( const PreprocessingParameters parameters, const GrayImage<> &glyphImage) { this->clear(); // 1st: preprocess glyph image: mpProcessedImage = new GrayImage<>(); PreprocessingResults::preprocessGlyph(parameters, glyphImage, *mpProcessedImage, &isWhiteSpace); // pImageChar->pPreprImage = mpProcessedImage; // std::cout << "finished processing image " << i << std::endl; // std::cout << mProcessedImagesVec[i] << std::endl; if (isWhiteSpace) { // jump out as this is a whitespace character return; } #if 1 // 2nd: compute integral invariant image: int radius = 2; bool oddSize = true; Kernel<> mask; Kernel<>::createSphereKernel( mask, radius, oddSize ); // std::cout << mask << std::endl; mpIntInvImage = new GrayImage<float>(); ImageIntegralInvariant::calcIntegralInvariantImage( mask, *mpProcessedImage, *mpIntInvImage ); #endif #if 1 // 3rd: compute distance transform of inverted(!) glyph: GrayImage<> invPreprocessedGlyphImage = *mpProcessedImage; invPreprocessedGlyphImage.invert(); mpDistTransformImage = new GrayImage<float>(); ImageFilter::distTransform(invPreprocessedGlyphImage, *mpDistTransformImage); #endif return; }
// Box blur using summed area tables - constant time with respect to kernel size. // Based off http://www.openprocessing.org/sketch/2934 void boxBlur(GrayImage &img, int size) { static GrayImage tmp; static int sum[480][640]; img.CopyTo(tmp); // Top row and left column sum[0][0] = img.data[0][0]; for (int x = 1; x < img.cols; ++x) { sum[0][x] = sum[0][x-1] + img.data[0][x]; } for (int y = 1; y < img.rows; ++y) { sum[y][0] = sum[y-1][0] + img.data[y][0]; } // Rest of summed area table for (int y = 1; y < img.rows; y++) { for (int x = 1; x < img.cols; x++) { sum[y][x] = tmp.data[y][x] + sum[y][x-1] + sum[y-1][x] - sum[y-1][x-1]; } } int denom = 1.0f / (float)(((2*size) + 1) * ((2*size) + 1)); for (int y = 0; y < img.rows; y++) { for (int x = 0; x < img.cols; x++) { const int xKernSize = std::min(x, size); const int yKernSize = std::min(y, size); const int xMin = x - xKernSize; const int yMin = y - yKernSize; const int pSum = sum[y][x] - sum[yMin][x] - sum[y][xMin] + sum[yMin][xMin]; const int avg = pSum / std::max(xKernSize * yKernSize, 1); tmp.data[y][x] = avg; } } tmp.CopyTo(img); }
Z3i::Domain scene_dimensions( const boost::filesystem::path &folderPath ) { uint depth = 0 ; boost::filesystem::path filePath; do { filePath = folderPath ; filePath /= QString("slice-%1.pgm").arg( depth, 0, 10 ).toStdString() ; depth++ ; } while ( boost::filesystem::exists( filePath ) ) ; depth-- ; if ( depth == 0 ) return Z3i::Domain( Z3i::Point(0,0,0), Z3i::Point(0,0,0) ) ; filePath = folderPath ; filePath /= QString("slice-%1.pgm").arg( depth/2, 0, 10 ).toStdString() ; /** "slice-0.pgm" ;*/ GrayImage image = PNMReader<GrayImage>::importPGM( filePath.string() ); #if 0 Z2i::DigitalSet *pts = newDigitalSet2DFromBinaryImage( image ) ; Board2D board ; board << image.domain()<<*pts ; board.saveSVG( "cropcheck.svg"); trace.info()<<"Get "<<pts->size()<<" points on slide "<<filePath.string()<<std::endl; delete pts ; #endif return Z3i::Domain( Z3i::Point(0,0,0), Z3i::Point( image.domain().upperBound().at(0),image.domain().upperBound().at(1),depth) ) ; }
void MotionBlur::applyEffect(ColorImage& image, KinectData& kinectData, const GrayImage& handsMask, int timeElapsed) { for (int y = 0; y < image.rows; y++) { for (int x = 0; x < image.cols; x++) { // Leave the area where hand currently is alone if (handsMask.data[y][x] != kMaskUnoccupied) continue; unsigned int blendRed = 0; unsigned int blendGreen = 0; unsigned int blendBlue = 0; unsigned int blendSamplesUsed = 0; // Blend from each buffer for (int buf = 0; buf < historyLength; buf++) { // Include only pixels where hand was if (maskHistory[buf].data[y][x] != kMaskUnoccupied) { blendRed += imgHistory[buf].data[y][x].red; blendGreen += imgHistory[buf].data[y][x].green; blendBlue += imgHistory[buf].data[y][x].blue; blendSamplesUsed++; } } // If any blending to be done, average the current pixel and the old ones. // Current frame makes up half the contribution, with the other half coming from all the other frames combined. if (blendSamplesUsed > 0) { blendRed /= blendSamplesUsed; blendGreen /= blendSamplesUsed; blendBlue /= blendSamplesUsed; image.data[y][x].red = (image.data[y][x].red + blendRed) / 2; image.data[y][x].green = (image.data[y][x].green + blendGreen) / 2; image.data[y][x].blue = (image.data[y][x].blue + blendBlue) / 2; } } } // Store current frame in buffers and increment position in ring image.CopyTo(imgHistory[curBuffer]); handsMask.CopyTo(maskHistory[curBuffer]); curBuffer++; curBuffer = curBuffer % historyLength; }
void PreprocessingResults::computePreprocessings(const PreprocessingParameters parameters, ImageChar *pImageChar) { this->clear(); // extract glyph from document image: GrayImage<> *pImage = pImageChar->pImage; BoundingBox bbox = pImageChar->bBox; GrayImage<> glyphImage; #pragma omp critical // critical region, since a pointer to common images is set { pImage->setRoi(bbox); pImage->computeRoiImage(glyphImage); pImage->releaseRoi(); } // printf("Hello World from thread %d\n", omp_get_thread_num()); // std::cout << bbox << std::endl; // 1st: preprocess glyph image: mpProcessedImage = new GrayImage<>(); PreprocessingResults::preprocessGlyph(parameters, glyphImage, *mpProcessedImage, &isWhiteSpace); // pImageChar->pPreprImage = mpProcessedImage; // std::cout << "finished processing image " << i << std::endl; // std::cout << mProcessedImagesVec[i] << std::endl; if (isWhiteSpace) { // jump out as this is a whitespace character return; } #if 1 // 2nd: compute integral invariant image: int radius = 2; bool oddSize = true; Kernel<> mask; Kernel<>::createSphereKernel( mask, radius, oddSize ); // std::cout << mask << std::endl; mpIntInvImage = new GrayImage<float>(); ImageIntegralInvariant::calcIntegralInvariantImage( mask, *mpProcessedImage, *mpIntInvImage ); #endif #if 1 // 3rd: compute distance transform of inverted(!) glyph: GrayImage<> invPreprocessedGlyphImage = *mpProcessedImage; invPreprocessedGlyphImage.invert(); mpDistTransformImage = new GrayImage<float>(); ImageFilter::distTransform(invPreprocessedGlyphImage, *mpDistTransformImage); #endif return; } // end computePreprocessings(ImageChar *pImageChar)
void AppCanvas::readDepthPixels(int x, int y, int w, int h, GrayImage& oImage) const { float *z = new float[w * h]; memset(z, 0, sizeof(float) * w * h); int xsch = width(); int ysch = height(); if (_pass_z.buf) { int xmin = border().getMin().x(); int ymin = border().getMin().y(); int xmax = border().getMax().x(); int ymax = border().getMax().y(); int rectx = _pass_z.width; int recty = _pass_z.height; float xfac = ((float)rectx) / ((float)(xmax - xmin)); float yfac = ((float)recty) / ((float)(ymax - ymin)); #if 0 if (G.debug & G_DEBUG_FREESTYLE) { printf("readDepthPixels %d x %d @ (%d, %d) in %d x %d [%d x %d] -- %d x %d @ %d%%\n", w, h, x, y, xsch, ysch, xmax - xmin, ymax - ymin, rectx, recty, (int)(xfac * 100.0f)); } #endif int ii, jj; for (int j = 0; j < h; j++) { jj = (int)((y - ymin + j) * yfac); if (jj < 0 || jj >= recty) continue; for (int i = 0; i < w; i++) { ii = (int)((x - xmin + i) * xfac); if (ii < 0 || ii >= rectx) continue; z[w * j + i] = _pass_z.buf[rectx * jj + ii]; } } } oImage.setArray(z, xsch, ysch, w, h, x, y, false); }
void SteerableViewMap::saveSteerableViewMap() const { for (unsigned int i = 0; i <= _nbOrientations; ++i) { if (_imagesPyramids[i] == 0) { cerr << "SteerableViewMap warning: orientation " << i << " of steerable View Map whas not been computed yet" << endl; continue; } int ow = _imagesPyramids[i]->width(0); int oh = _imagesPyramids[i]->height(0); //soc QString base("SteerableViewMap"); string base("SteerableViewMap"); stringstream filename; for (int j = 0; j < _imagesPyramids[i]->getNumberOfLevels(); ++j) { //soc float coeff = 1.0f; // 1 / 255.0f; // 100 * 255; // * pow(2, j); //soc QImage qtmp(ow, oh, QImage::Format_RGB32); ImBuf *ibuf = IMB_allocImBuf(ow, oh, 32, IB_rect); int rowbytes = ow * 4; char *pix; for (int y = 0; y < oh; ++y) { //soc for (int x = 0; x < ow; ++x) { //soc int c = (int)(coeff * _imagesPyramids[i]->pixel(x, y, j)); if (c > 255) c = 255; //int c = (int)(_imagesPyramids[i]->pixel(x, y, j)); //soc qtmp.setPixel(x, y, qRgb(c, c, c)); pix = (char *)ibuf->rect + y * rowbytes + x * 4; pix[0] = pix[1] = pix[2] = c; } } //soc qtmp.save(base+QString::number(i)+"-"+QString::number(j)+".png", "PNG"); filename << base; filename << i << "-" << j << ".png"; ibuf->ftype = PNG; IMB_saveiff(ibuf, const_cast<char *>(filename.str().c_str()), 0); } #if 0 QString base("SteerableViewMap"); for (unsigned j = 0; j < _imagesPyramids[i]->getNumberOfLevels(); ++j) { GrayImage *img = _imagesPyramids[i]->getLevel(j); int ow = img->width(); int oh = img->height(); float coeff = 1.0f; // 100 * 255; // * pow(2, j); QImage qtmp(ow, oh, 32); for (unsigned int y = 0; y < oh; ++y) { for (unsigned int x = 0; x < ow; ++x) { int c = (int)(coeff * img->pixel(x, y)); if (c > 255) c = 255; //int c = (int)(_imagesPyramids[i]->pixel(x, y, j)); qtmp.setPixel(x, y, qRgb(c, c, c)); } } qtmp.save(base + QString::number(i) + "-" + QString::number(j) + ".png", "PNG"); } #endif } }
float ImagePyramid::pixel(int x, int y, int level) { GrayImage *img = _levels[level]; if (0 == level) { return img->pixel(x, y); } unsigned int i = 1 << level; unsigned int sx = x >> level; unsigned int sy = y >> level; if (sx >= img->width()) sx = img->width() - 1; if (sy >= img->height()) sy = img->height() - 1; // bilinear interpolation float A = i * (sx + 1) - x; float B = x - i * sx; float C = i * (sy + 1) - y; float D = y - i * sy; float P1(0), P2(0); P1 = A * img->pixel(sx, sy); if (sx < img->width() - 1) { if (x % i != 0) P1 += B * img->pixel(sx + 1, sy); } else { P1 += B * img->pixel(sx, sy); } if (sy < img->height() - 1) { if (y % i != 0) { P2 = A * img->pixel(sx, sy + 1); if (sx < img->width() - 1) { if (x % i != 0) P2 += B * img->pixel(sx + 1, sy + 1); } else { P2 += B * img->pixel(sx, sy + 1); } } } else { P2 = P1; } return (1.0f / (float)(1 << (2 * level))) * (C * P1 + D * P2); }
/*static*/ void PreprocessingResults::preprocessGlyph( const PreprocessingParameters parameters, const GrayImage<> &src, GrayImage<> &dst, bool *whitespace/*=NULL*/ ) { // std::cout << preprocessedGlyphImage << std::endl; GrayImage<> tmpImage1 = src; // dst = src; if (parameters.binarize) { // binarize image tmpImage1.binarizeOtsu(); } if (parameters.invert) { // invert image tmpImage1.invert(); } if (parameters.preMedian) { // do pre median filtering dst = tmpImage1; ImFi::median(dst, tmpImage1, parameters.preMedianMaskSize); } // tmpImage1.show("nachinvert"); double thresh = 0.05; double pixelDens = tmpImage1.pixelDensity(); bool isWs = false; if (pixelDens < thresh) { isWs = true; } if (whitespace != NULL) *whitespace = isWs; // dst = tmpImage1; // isWs = makeWhitespaceImageOne(dst, tmpImage1, thresh); if (!isWs) { // tmpImage1.show("nachwhitespace"); // std::cout << tmpImage1 << std::endl; // do size normalization: dst = tmpImage1; uint8 padValue = 0; #if 0 // compute center of mass index: Index ctrOfMassIndex; dst.computeCenterOfMassIndex(ctrOfMassIndex, true); // center image on center of mass: ImOp::extendToCenter(dst, tmpImage1, ctrOfMassIndex, 0, padValue); #else dst.computeCenterOfMassCenteredImage(tmpImage1, padValue); // tmpImage1.show("nachcenterofmass"); #endif // std::cout << tmpImage2 << std::endl; // CV_INTER_NN - nearest-neigbor interpolation, // CV_INTER_LINEAR - bilinear interpolation (used by default) // CV_INTER_AREA - resampling using pixel area relation. It is preferred method for image decimation that gives moire-free results. In case of zooming it is similar to CV_INTER_NN method. // CV_INTER_CUBIC - bicubic interpolation. tmpImage1.resize(parameters.sizeNormWidth, parameters.sizeNormHeight, CV_INTER_LINEAR); // tmpImage1.show("nachresize"); // std::cout << tmpImage1 << std::endl; if (parameters.postMedian) { // do pre median filtering dst = tmpImage1; ImFi::median(dst, tmpImage1, parameters.postMedianMaskSize); } tmpImage1.binarize(); // tmpImage1.show("nachbinarize"); dst = tmpImage1; } // end if not whitespace else { dst.init(parameters.sizeNormWidth, parameters.sizeNormHeight); dst.fill(0); dst(0,0) = 255; } return; } // end preprocessGlyph