Example #1
0
// Given an initial estimate of line spacing (m_in) and the positions of each
// baseline, computes the line spacing of the block more accurately in m_out,
// and the corresponding intercept in c_out, and the number of spacings seen
// in index_delta. Returns the error of fit to the line spacing model.
// Uses a simple linear regression, but optimized the offset using the median.
double BaselineBlock::FitLineSpacingModel(
    const GenericVector<double>& positions, double m_in,
    double* m_out, double* c_out, int* index_delta) {
  if (m_in == 0.0f || positions.size() < 2) {
    *m_out = m_in;
    *c_out = 0.0;
    if (index_delta != NULL) *index_delta = 0;
    return 0.0;
  }
  GenericVector<double> offsets;
  // Get the offset (remainder) linespacing for each line and choose the median.
  for (int i = 0; i < positions.size(); ++i)
    offsets.push_back(fmod(positions[i], m_in));
  // Get the median offset.
  double median_offset = MedianOfCircularValues(m_in, &offsets);
  // Now fit a line to quantized line number and offset.
  LLSQ llsq;
  int min_index = MAX_INT32;
  int max_index = -MAX_INT32;
  for (int i = 0; i < positions.size(); ++i) {
    double y_pos = positions[i];
    int row_index = IntCastRounded((y_pos - median_offset) / m_in);
    UpdateRange(row_index, &min_index, &max_index);
    llsq.add(row_index, y_pos);
  }
  // Get the refined line spacing.
  *m_out = llsq.m();
  // Use the median offset rather than the mean.
  offsets.truncate(0);
  for (int i = 0; i < positions.size(); ++i)
    offsets.push_back(fmod(positions[i], *m_out));
  // Get the median offset.
  if (debug_level_ > 2) {
    for (int i = 0; i < offsets.size(); ++i)
      tprintf("%d: %g\n", i, offsets[i]);
  }
  *c_out = MedianOfCircularValues(*m_out, &offsets);
  if (debug_level_ > 1) {
    tprintf("Median offset = %g, compared to mean of %g.\n",
            *c_out, llsq.c(*m_out));
  }
  // Index_delta is the number of hypothesized line gaps present.
  if (index_delta != NULL)
    *index_delta = max_index - min_index;
  // Use the regression model's intercept to compute the error, as it may be
  // a full line-spacing in disagreement with the median.
  double rms_error = llsq.rms(*m_out, llsq.c(*m_out));
  if (debug_level_ > 1) {
    tprintf("Linespacing of y=%g x + %g improved to %g x + %g, rms=%g\n",
            m_in, median_offset, *m_out, *c_out, rms_error);
  }
  return rms_error;
}
Example #2
0
// Fits a straight baseline to the points. Returns true if it had enough
// points to be reasonably sure of the fitted baseline.
// If use_box_bottoms is false, baselines positions are formed by
// considering the outlines of the blobs.
bool BaselineRow::FitBaseline(bool use_box_bottoms) {
  // Deterministic fitting is used wherever possible.
  fitter_.Clear();
  // Linear least squares is a backup if the DetLineFit produces a bad line.
  LLSQ llsq;
  BLOBNBOX_IT blob_it(blobs_);

  for (blob_it.mark_cycle_pt(); !blob_it.cycled_list(); blob_it.forward()) {
    BLOBNBOX* blob = blob_it.data();
    if (!use_box_bottoms) blob->EstimateBaselinePosition();
    const TBOX& box = blob->bounding_box();
    int x_middle = (box.left() + box.right()) / 2;
#ifdef kDebugYCoord
    if (box.bottom() < kDebugYCoord && box.top() > kDebugYCoord) {
      tprintf("Box bottom = %d, baseline pos=%d for box at:",
              box.bottom(), blob->baseline_position());
      box.print();
    }
#endif
    fitter_.Add(ICOORD(x_middle, blob->baseline_position()), box.width() / 2);
    llsq.add(x_middle, blob->baseline_position());
  }
  // Fit the line.
  ICOORD pt1, pt2;
  baseline_error_ = fitter_.Fit(&pt1, &pt2);
  baseline_pt1_ = pt1;
  baseline_pt2_ = pt2;
  if (baseline_error_ > max_baseline_error_ &&
      fitter_.SufficientPointsForIndependentFit()) {
    // The fit was bad but there were plenty of points, so try skipping
    // the first and last few, and use the new line if it dramatically improves
    // the error of fit.
    double error = fitter_.Fit(kNumSkipPoints, kNumSkipPoints, &pt1, &pt2);
    if (error < baseline_error_ / 2.0) {
      baseline_error_ = error;
      baseline_pt1_ = pt1;
      baseline_pt2_ = pt2;
    }
  }
  int debug = 0;
#ifdef kDebugYCoord
  Print();
  debug = bounding_box_.bottom() < kDebugYCoord &&
      bounding_box_.top() > kDebugYCoord
            ? 3 : 2;
#endif
  // Now we obtained a direction from that fit, see if we can improve the
  // fit using the same direction and some other start point.
  FCOORD direction(pt2 - pt1);
  double target_offset = direction * pt1;
  good_baseline_ = false;
  FitConstrainedIfBetter(debug, direction, 0.0, target_offset);
  // Wild lines can be produced because DetLineFit allows vertical lines, but
  // vertical text has been rotated so angles over pi/4 should be disallowed.
  // Near vertical lines can still be produced by vertically aligned components
  // on very short lines.
  double angle = BaselineAngle();
  if (fabs(angle) > M_PI * 0.25) {
    // Use the llsq fit as a backup.
    baseline_pt1_ = llsq.mean_point();
    baseline_pt2_ = baseline_pt1_ + FCOORD(1.0f, llsq.m());
    // TODO(rays) get rid of this when m and c are no longer used.
    double m = llsq.m();
    double c = llsq.c(m);
    baseline_error_ = llsq.rms(m, c);
    good_baseline_ = false;
  }
  return good_baseline_;
}
Example #3
0
// Extracts Tesseract features and appends them to the features vector.
// Startpt to lastpt, inclusive, MUST have the same src_outline member,
// which may be nullptr. The vector from lastpt to its next is included in
// the feature extraction. Hidden edges should be excluded by the caller.
// If force_poly is true, the features will be extracted from the polygonal
// approximation even if more accurate data is available.
static void ExtractFeaturesFromRun(
    const EDGEPT* startpt, const EDGEPT* lastpt,
    const DENORM& denorm, double feature_length, bool force_poly,
    GenericVector<INT_FEATURE_STRUCT>* features) {
  const EDGEPT* endpt = lastpt->next;
  const C_OUTLINE* outline = startpt->src_outline;
  if (outline != nullptr && !force_poly) {
    // Detailed information is available. We have to normalize only from
    // the root_denorm to denorm.
    const DENORM* root_denorm = denorm.RootDenorm();
    int total_features = 0;
    // Get the features from the outline.
    int step_length = outline->pathlength();
    int start_index = startpt->start_step;
    // pos is the integer coordinates of the binary image steps.
    ICOORD pos = outline->position_at_index(start_index);
    // We use an end_index that allows us to use a positive increment, but that
    // may be beyond the bounds of the outline steps/ due to wrap-around, to
    // so we use % step_length everywhere, except for start_index.
    int end_index = lastpt->start_step + lastpt->step_count;
    if (end_index <= start_index)
      end_index += step_length;
    LLSQ prev_points;
    LLSQ prev_dirs;
    FCOORD prev_normed_pos = outline->sub_pixel_pos_at_index(pos, start_index);
    denorm.NormTransform(root_denorm, prev_normed_pos, &prev_normed_pos);
    LLSQ points;
    LLSQ dirs;
    FCOORD normed_pos(0.0f, 0.0f);
    int index = GatherPoints(outline, feature_length, denorm, root_denorm,
                             start_index, end_index, &pos, &normed_pos,
                             &points, &dirs);
    while (index <= end_index) {
      // At each iteration we nominally have 3 accumulated sets of points and
      // dirs: prev_points/dirs, points/dirs, next_points/dirs and sum them
      // into sum_points/dirs, but we don't necessarily get any features out,
      // so if that is the case, we keep accumulating instead of rotating the
      // accumulators.
      LLSQ next_points;
      LLSQ next_dirs;
      FCOORD next_normed_pos(0.0f, 0.0f);
      index = GatherPoints(outline, feature_length, denorm, root_denorm,
                           index, end_index, &pos, &next_normed_pos,
                           &next_points, &next_dirs);
      LLSQ sum_points(prev_points);
      // TODO(rays) find out why it is better to use just dirs and next_dirs
      // in sum_dirs, instead of using prev_dirs as well.
      LLSQ sum_dirs(dirs);
      sum_points.add(points);
      sum_points.add(next_points);
      sum_dirs.add(next_dirs);
      bool made_features = false;
      // If we have some points, we can try making some features.
      if (sum_points.count() > 0) {
        // We have gone far enough from the start. Make a feature and restart.
        FCOORD fit_pt = sum_points.mean_point();
        FCOORD fit_vector = MeanDirectionVector(sum_points, sum_dirs,
                                                prev_normed_pos, normed_pos);
        // The segment to which we fit features is the line passing through
        // fit_pt in direction of fit_vector that starts nearest to
        // prev_normed_pos and ends nearest to normed_pos.
        FCOORD start_pos = prev_normed_pos.nearest_pt_on_line(fit_pt,
                                                              fit_vector);
        FCOORD end_pos = normed_pos.nearest_pt_on_line(fit_pt, fit_vector);
        // Possible correction to match the adjacent polygon segment.
        if (total_features == 0 && startpt != endpt) {
          FCOORD poly_pos(startpt->pos.x, startpt->pos.y);
          denorm.LocalNormTransform(poly_pos, &start_pos);
        }
        if (index > end_index && startpt != endpt) {
          FCOORD poly_pos(endpt->pos.x, endpt->pos.y);
          denorm.LocalNormTransform(poly_pos, &end_pos);
        }
        int num_features = ComputeFeatures(start_pos, end_pos, feature_length,
                                           features);
        if (num_features > 0) {
          // We made some features so shuffle the accumulators.
          prev_points = points;
          prev_dirs = dirs;
          prev_normed_pos = normed_pos;
          points = next_points;
          dirs = next_dirs;
          made_features = true;
          total_features += num_features;
        }
        // The end of the next set becomes the end next time around.
        normed_pos = next_normed_pos;
      }
      if (!made_features) {
        // We didn't make any features, so keep the prev accumulators and
        // add the next ones into the current.
        points.add(next_points);
        dirs.add(next_dirs);
      }
    }
  } else {
    // There is no outline, so we are forced to use the polygonal approximation.
    const EDGEPT* pt = startpt;
    do {
      FCOORD start_pos(pt->pos.x, pt->pos.y);
      FCOORD end_pos(pt->next->pos.x, pt->next->pos.y);
      denorm.LocalNormTransform(start_pos, &start_pos);
      denorm.LocalNormTransform(end_pos, &end_pos);
      ComputeFeatures(start_pos, end_pos, feature_length, features);
    } while ((pt = pt->next) != endpt);
  }
}