コード例 #1
0
ファイル: coutln.cpp プロジェクト: 0ximDigital/appsScanner
// Draws the outline in the given colour, normalized using the given denorm,
// making use of sub-pixel accurate information if available.
void C_OUTLINE::plot_normed(const DENORM& denorm, ScrollView::Color colour,
                            ScrollView* window) const {
  window->Pen(colour);
  if (stepcount == 0) {
    window->Rectangle(box.left(), box.top(), box.right(), box.bottom());
    return;
  }
  const DENORM* root_denorm = denorm.RootDenorm();
  ICOORD pos = start;                   // current position
  FCOORD f_pos = sub_pixel_pos_at_index(pos, 0);
  FCOORD pos_normed;
  denorm.NormTransform(root_denorm, f_pos, &pos_normed);
  window->SetCursor(IntCastRounded(pos_normed.x()),
                    IntCastRounded(pos_normed.y()));
  for (int s = 0; s < stepcount; pos += step(s++)) {
    int edge_weight = edge_strength_at_index(s);
    if (edge_weight == 0) {
      // This point has conflicting gradient and step direction, so ignore it.
      continue;
    }
    FCOORD f_pos = sub_pixel_pos_at_index(pos, s);
    FCOORD pos_normed;
    denorm.NormTransform(root_denorm, f_pos, &pos_normed);
    window->DrawTo(IntCastRounded(pos_normed.x()),
                   IntCastRounded(pos_normed.y()));
  }
}
コード例 #2
0
ファイル: blobs.cpp プロジェクト: 11110101/tess-two
// Normalize in-place using the DENORM.
void TESSLINE::Normalize(const DENORM& denorm) {
  EDGEPT* pt = loop;
  do {
    denorm.LocalNormTransform(pt->pos, &pt->pos);
    pt = pt->next;
  } while (pt != loop);
  SetupFromPos();
}
コード例 #3
0
ファイル: blobs.cpp プロジェクト: ErwcPKerr/node-dv
// Normalize in-place using the DENORM.
void TBLOB::Normalize(const DENORM& denorm) {
  // TODO(rays) outline->Normalize is more accurate, but breaks tests due
  // the changes it makes. Reinstate this code with a retraining.
#if 1
  for (TESSLINE* outline = outlines; outline != NULL; outline = outline->next) {
    outline->Normalize(denorm);
  }
#else
  denorm.LocalNormBlob(this);
#endif
}
コード例 #4
0
ファイル: intfx.cpp プロジェクト: dqsoft/tesseract
// Helper normalizes the direction, assuming that it is at the given
// unnormed_pos, using the given denorm, starting at the root_denorm.
uint8_t NormalizeDirection(uint8_t dir, const FCOORD& unnormed_pos,
                         const DENORM& denorm, const DENORM* root_denorm) {
  // Convert direction to a vector.
  FCOORD unnormed_end;
  unnormed_end.from_direction(dir);
  unnormed_end += unnormed_pos;
  FCOORD normed_pos, normed_end;
  denorm.NormTransform(root_denorm, unnormed_pos, &normed_pos);
  denorm.NormTransform(root_denorm, unnormed_end, &normed_end);
  normed_end -= normed_pos;
  return normed_end.to_direction();
}
コード例 #5
0
ファイル: M_Utils.cpp プロジェクト: Andy0898/Project_Isagoge
BLOB_CHOICE* M_Utils::runBlobOCR(BLOBNBOX* blob, Tesseract* ocrengine) {
  // * Normalize blob height to x-height (current OSD):
  // SetupNormalization(NULL, NULL, &rotation, NULL, NULL, 0,
  //                    box.rotational_x_middle(rotation),
  //                    box.rotational_y_middle(rotation),
  //                    kBlnXHeight / box.rotational_height(rotation),
  //                    kBlnXHeight / box.rotational_height(rotation),
  //                    0, kBlnBaselineOffset);
  BLOB_CHOICE_LIST ratings_lang;
  C_BLOB* cblob = blob->cblob();
  TBLOB* tblob = TBLOB::PolygonalCopy(cblob);
  const TBOX& box = tblob->bounding_box();

  // Normalize the blob. Set the origin to the place we want to be the
  // bottom-middle, and scaling is to mpx, box_, NULL);
  float scaling = static_cast<float>(kBlnXHeight) / box.height();
  DENORM denorm;
  float x_orig = (box.left() + box.right()) / 2.0f, y_orig = box.bottom();
  denorm.SetupNormalization(NULL, NULL, NULL, NULL, NULL, 0,
                            x_orig, y_orig, scaling, scaling,
                            0.0f, static_cast<float>(kBlnBaselineOffset));
  TBLOB* normed_blob = new TBLOB(*tblob);
  normed_blob->Normalize(denorm);
  ocrengine->AdaptiveClassifier(normed_blob, denorm, &ratings_lang, NULL);
  delete normed_blob;
  delete tblob;

  // Get the best choice from ratings_lang and rating_equ. As the choice in the
  // list has already been sorted by the certainty, we simply use the first
  // choice.
  BLOB_CHOICE *lang_choice = NULL;
  if (ratings_lang.length() > 0) {
    BLOB_CHOICE_IT choice_it(&ratings_lang);
    lang_choice = choice_it.data();
  }
  return lang_choice;
}
コード例 #6
0
ファイル: intfx.cpp プロジェクト: dqsoft/tesseract
// Gathers outline points and their directions from start_index into dirs by
// stepping along the outline and normalizing the coordinates until the
// required feature_length has been collected or end_index is reached.
// On input pos must point to the position corresponding to start_index and on
// return pos is updated to the current raw position, and pos_normed is set to
// the normed version of pos.
// Since directions wrap-around, they need special treatment to get the mean.
// Provided the cluster of directions doesn't straddle the wrap-around point,
// the simple mean works. If they do, then, unless the directions are wildly
// varying, the cluster rotated by 180 degrees will not straddle the wrap-
// around point, so mean(dir + 180 degrees) - 180 degrees will work. Since
// LLSQ conveniently stores the mean of 2 variables, we use it to store
// dir and dir+128 (128 is 180 degrees) and then use the resulting mean
// with the least variance.
static int GatherPoints(const C_OUTLINE* outline, double feature_length,
                        const DENORM& denorm, const DENORM* root_denorm,
                        int start_index, int end_index,
                        ICOORD* pos, FCOORD* pos_normed,
                        LLSQ* points, LLSQ* dirs) {
  int step_length = outline->pathlength();
  ICOORD step = outline->step(start_index % step_length);
  // Prev_normed is the start point of this collection and will be set on the
  // first iteration, and on later iterations used to determine the length
  // that has been collected.
  FCOORD prev_normed;
  points->clear();
  dirs->clear();
  int num_points = 0;
  int index;
  for (index = start_index; index <= end_index; ++index, *pos += step) {
    step = outline->step(index % step_length);
    int edge_weight = outline->edge_strength_at_index(index % step_length);
    if (edge_weight == 0) {
      // This point has conflicting gradient and step direction, so ignore it.
      continue;
    }
    // Get the sub-pixel precise location and normalize.
    FCOORD f_pos = outline->sub_pixel_pos_at_index(*pos, index % step_length);
    denorm.NormTransform(root_denorm, f_pos, pos_normed);
    if (num_points == 0) {
      // The start of this segment.
      prev_normed = *pos_normed;
    } else {
      FCOORD offset = *pos_normed - prev_normed;
      float length = offset.length();
      if (length > feature_length) {
        // We have gone far enough from the start. We will use this point in
        // the next set so return what we have so far.
        return index;
      }
    }
    points->add(pos_normed->x(), pos_normed->y(), edge_weight);
    int direction = outline->direction_at_index(index % step_length);
    if (direction >= 0) {
      direction = NormalizeDirection(direction, f_pos, denorm, root_denorm);
      // Use both the direction and direction +128 so we are not trying to
      // take the mean of something straddling the wrap-around point.
      dirs->add(direction, Modulo(direction + 128, 256));
    }
    ++num_points;
  }
  return index;
}
コード例 #7
0
ファイル: blobs.cpp プロジェクト: ErwcPKerr/node-dv
// Normalizes the blob for classification only if needed.
// (Normally this means a non-zero classify rotation.)
// If no Normalization is needed, then NULL is returned, and the denorm is
// unchanged. Otherwise a new TBLOB is returned and the denorm points to
// a new DENORM. In this case, both the TBLOB and DENORM must be deleted.
TBLOB* TBLOB::ClassifyNormalizeIfNeeded(const DENORM** denorm) const {
  TBLOB* rotated_blob = NULL;
  // If necessary, copy the blob and rotate it. The rotation is always
  // +/- 90 degrees, as 180 was already taken care of.
  if ((*denorm)->block() != NULL &&
      (*denorm)->block()->classify_rotation().y() != 0.0) {
    TBOX box = bounding_box();
    int x_middle = (box.left() + box.right()) / 2;
    int y_middle = (box.top() + box.bottom()) / 2;
    rotated_blob = new TBLOB(*this);
    const FCOORD& rotation = (*denorm)->block()->classify_rotation();
    DENORM* norm = new DENORM;
    // Move the rotated blob back to the same y-position so that we
    // can still distinguish similar glyphs with differeny y-position.
    float target_y = kBlnBaselineOffset +
        (rotation.y() > 0 ? x_middle - box.left() : box.right() - x_middle);
    norm->SetupNormalization(NULL, NULL, &rotation, *denorm, NULL, 0,
                             x_middle, y_middle, 1.0f, 1.0f, 0.0f, target_y);
    //                             x_middle, y_middle, 1.0f, 1.0f, 0.0f, y_middle);
    rotated_blob->Normalize(*norm);
    *denorm = norm;
  }
  return rotated_blob;
}
コード例 #8
0
ファイル: intfx.cpp プロジェクト: dqsoft/tesseract
// Extracts Tesseract features and appends them to the features vector.
// Startpt to lastpt, inclusive, MUST have the same src_outline member,
// which may be nullptr. The vector from lastpt to its next is included in
// the feature extraction. Hidden edges should be excluded by the caller.
// If force_poly is true, the features will be extracted from the polygonal
// approximation even if more accurate data is available.
static void ExtractFeaturesFromRun(
    const EDGEPT* startpt, const EDGEPT* lastpt,
    const DENORM& denorm, double feature_length, bool force_poly,
    GenericVector<INT_FEATURE_STRUCT>* features) {
  const EDGEPT* endpt = lastpt->next;
  const C_OUTLINE* outline = startpt->src_outline;
  if (outline != nullptr && !force_poly) {
    // Detailed information is available. We have to normalize only from
    // the root_denorm to denorm.
    const DENORM* root_denorm = denorm.RootDenorm();
    int total_features = 0;
    // Get the features from the outline.
    int step_length = outline->pathlength();
    int start_index = startpt->start_step;
    // pos is the integer coordinates of the binary image steps.
    ICOORD pos = outline->position_at_index(start_index);
    // We use an end_index that allows us to use a positive increment, but that
    // may be beyond the bounds of the outline steps/ due to wrap-around, to
    // so we use % step_length everywhere, except for start_index.
    int end_index = lastpt->start_step + lastpt->step_count;
    if (end_index <= start_index)
      end_index += step_length;
    LLSQ prev_points;
    LLSQ prev_dirs;
    FCOORD prev_normed_pos = outline->sub_pixel_pos_at_index(pos, start_index);
    denorm.NormTransform(root_denorm, prev_normed_pos, &prev_normed_pos);
    LLSQ points;
    LLSQ dirs;
    FCOORD normed_pos(0.0f, 0.0f);
    int index = GatherPoints(outline, feature_length, denorm, root_denorm,
                             start_index, end_index, &pos, &normed_pos,
                             &points, &dirs);
    while (index <= end_index) {
      // At each iteration we nominally have 3 accumulated sets of points and
      // dirs: prev_points/dirs, points/dirs, next_points/dirs and sum them
      // into sum_points/dirs, but we don't necessarily get any features out,
      // so if that is the case, we keep accumulating instead of rotating the
      // accumulators.
      LLSQ next_points;
      LLSQ next_dirs;
      FCOORD next_normed_pos(0.0f, 0.0f);
      index = GatherPoints(outline, feature_length, denorm, root_denorm,
                           index, end_index, &pos, &next_normed_pos,
                           &next_points, &next_dirs);
      LLSQ sum_points(prev_points);
      // TODO(rays) find out why it is better to use just dirs and next_dirs
      // in sum_dirs, instead of using prev_dirs as well.
      LLSQ sum_dirs(dirs);
      sum_points.add(points);
      sum_points.add(next_points);
      sum_dirs.add(next_dirs);
      bool made_features = false;
      // If we have some points, we can try making some features.
      if (sum_points.count() > 0) {
        // We have gone far enough from the start. Make a feature and restart.
        FCOORD fit_pt = sum_points.mean_point();
        FCOORD fit_vector = MeanDirectionVector(sum_points, sum_dirs,
                                                prev_normed_pos, normed_pos);
        // The segment to which we fit features is the line passing through
        // fit_pt in direction of fit_vector that starts nearest to
        // prev_normed_pos and ends nearest to normed_pos.
        FCOORD start_pos = prev_normed_pos.nearest_pt_on_line(fit_pt,
                                                              fit_vector);
        FCOORD end_pos = normed_pos.nearest_pt_on_line(fit_pt, fit_vector);
        // Possible correction to match the adjacent polygon segment.
        if (total_features == 0 && startpt != endpt) {
          FCOORD poly_pos(startpt->pos.x, startpt->pos.y);
          denorm.LocalNormTransform(poly_pos, &start_pos);
        }
        if (index > end_index && startpt != endpt) {
          FCOORD poly_pos(endpt->pos.x, endpt->pos.y);
          denorm.LocalNormTransform(poly_pos, &end_pos);
        }
        int num_features = ComputeFeatures(start_pos, end_pos, feature_length,
                                           features);
        if (num_features > 0) {
          // We made some features so shuffle the accumulators.
          prev_points = points;
          prev_dirs = dirs;
          prev_normed_pos = normed_pos;
          points = next_points;
          dirs = next_dirs;
          made_features = true;
          total_features += num_features;
        }
        // The end of the next set becomes the end next time around.
        normed_pos = next_normed_pos;
      }
      if (!made_features) {
        // We didn't make any features, so keep the prev accumulators and
        // add the next ones into the current.
        points.add(next_points);
        dirs.add(next_dirs);
      }
    }
  } else {
    // There is no outline, so we are forced to use the polygonal approximation.
    const EDGEPT* pt = startpt;
    do {
      FCOORD start_pos(pt->pos.x, pt->pos.y);
      FCOORD end_pos(pt->next->pos.x, pt->next->pos.y);
      denorm.LocalNormTransform(start_pos, &start_pos);
      denorm.LocalNormTransform(end_pos, &end_pos);
      ComputeFeatures(start_pos, end_pos, feature_length, features);
    } while ((pt = pt->next) != endpt);
  }
}
コード例 #9
0
ファイル: blobs.cpp プロジェクト: 11110101/tess-two
// Collects edges into the given bounding box, LLSQ accumulator and/or x_coords,
// y_coords vectors.
// For a description of x_coords/y_coords, see GetEdgeCoords above.
// Startpt to lastpt, inclusive, MUST have the same src_outline member,
// which may be NULL. The vector from lastpt to its next is included in
// the accumulation. Hidden edges should be excluded by the caller.
// The input denorm should be the normalizations that have been applied from
// the image to the current state of the TBLOB from which startpt, lastpt come.
// box is the bounding box of the blob from which the EDGEPTs are taken and
// indices into x_coords, y_coords are offset by box.botleft().
static void CollectEdgesOfRun(const EDGEPT* startpt, const EDGEPT* lastpt,
                              const DENORM& denorm, const TBOX& box,
                              TBOX* bounding_box,
                              LLSQ* accumulator,
                              GenericVector<GenericVector<int> > *x_coords,
                              GenericVector<GenericVector<int> > *y_coords) {
  const C_OUTLINE* outline = startpt->src_outline;
  int x_limit = box.width() - 1;
  int y_limit = box.height() - 1;
  if (outline != NULL) {
    // Use higher-resolution edge points stored on the outline.
    // The outline coordinates may not match the binary image because of the
    // rotation for vertical text lines, but the root_denorm IS the matching
    // start of the DENORM chain.
    const DENORM* root_denorm = denorm.RootDenorm();
    int step_length = outline->pathlength();
    int start_index = startpt->start_step;
    // Note that if this run straddles the wrap-around point of the outline,
    // that lastpt->start_step may have a lower index than startpt->start_step,
    // and we want to use an end_index that allows us to use a positive
    // increment, so we add step_length if necessary, but that may be beyond the
    // bounds of the outline steps/ due to wrap-around, so we use % step_length
    // everywhere, except for start_index.
    int end_index = lastpt->start_step + lastpt->step_count;
    if (end_index <= start_index)
      end_index += step_length;
    // pos is the integer coordinates of the binary image steps.
    ICOORD pos = outline->position_at_index(start_index);
    FCOORD origin(box.left(), box.bottom());
    // f_pos is a floating-point version of pos that offers improved edge
    // positioning using greyscale information or smoothing of edge steps.
    FCOORD f_pos = outline->sub_pixel_pos_at_index(pos, start_index);
    // pos_normed is f_pos after the appropriate normalization, and relative
    // to origin.
    // prev_normed is the previous value of pos_normed.
    FCOORD prev_normed;
    denorm.NormTransform(root_denorm, f_pos, &prev_normed);
    prev_normed -= origin;
    for (int index = start_index; index < end_index; ++index) {
      ICOORD step = outline->step(index % step_length);
      // Only use the point if its edge strength is positive. This excludes
      // points that don't provide useful information, eg
      // ___________
      //            |___________
      // The vertical step provides only noisy, damaging information, as even
      // with a greyscale image, the positioning of the edge there may be a
      // fictitious extrapolation, so previous processing has eliminated it.
      if (outline->edge_strength_at_index(index % step_length) > 0) {
        FCOORD f_pos = outline->sub_pixel_pos_at_index(pos,
                                                       index % step_length);
        FCOORD pos_normed;
        denorm.NormTransform(root_denorm, f_pos, &pos_normed);
        pos_normed -= origin;
        // Accumulate the information that is selected by the caller.
        if (bounding_box != NULL) {
          SegmentBBox(pos_normed, prev_normed, bounding_box);
        }
        if (accumulator != NULL) {
          SegmentLLSQ(pos_normed, prev_normed, accumulator);
        }
        if (x_coords != NULL && y_coords != NULL) {
          SegmentCoords(pos_normed, prev_normed, x_limit, y_limit,
                        x_coords, y_coords);
        }
        prev_normed = pos_normed;
      }
      pos += step;
    }
  } else {
    // There is no outline, so we are forced to use the polygonal approximation.
    const EDGEPT* endpt = lastpt->next;
    const EDGEPT* pt = startpt;
    do {
      FCOORD next_pos(pt->next->pos.x - box.left(),
                      pt->next->pos.y - box.bottom());
      FCOORD pos(pt->pos.x - box.left(), pt->pos.y - box.bottom());
      if (bounding_box != NULL) {
        SegmentBBox(next_pos, pos, bounding_box);
      }
      if (accumulator != NULL) {
        SegmentLLSQ(next_pos, pos, accumulator);
      }
      if (x_coords != NULL && y_coords != NULL) {
        SegmentCoords(next_pos, pos, x_limit, y_limit, x_coords, y_coords);
      }
    } while ((pt = pt->next) != endpt);
  }
}