void FlowUtilityUnit::ProcessFrame(FrameSetPtr input,
                                     list<FrameSetPtr>* output) {

    OpticalFlowFrame* flow_frame =
        dynamic_cast<OpticalFlowFrame*>(input->at(flow_stream_idx_).get());

    if (options_.impose_grid_flow()) {
      for (int f = 0; f < flow_frame->MatchedFrames(); ++f) {
        flow_frame->ImposeLocallyConsistentGridFlow(frame_width_,
                                                    frame_height_,
                                                    f,
                                                    options_.grid_ratio_x(),
                                                    options_.grid_ratio_y(),
                                                    options_.grid_num_offsets(),
                                                    options_.grid_num_scales());
      }
    }

    FeatureMatchList feature_list;
    if (options_.plot_flow() || !options_.feature_match_file().empty()) {
      flow_frame->GetFeatureMatchList(frame_width_,
                                      flow_frame->MatchedFrames() *
                                          options_.frac_feature_matches(),
                                      &feature_list);
    }

    if (options_.plot_flow()) {
      const VideoFrame* video_frame =
            dynamic_cast<const VideoFrame*>(input->at(video_stream_idx_).get());

      ASSERT_LOG(video_frame);

      IplImage image;
      video_frame->ImageView(&image);
      PlotFlow(feature_list, flow_frame->MatchedFrames(), &image);
    }

    if (!options_.feature_match_file().empty()) {
      // Save feature_list to file.
      FlowFrame proto_frame;
      OpticalFlowFrame::FeatureMatchListToFlowFrame(feature_list, &proto_frame);
      int frame_size = proto_frame.ByteSize();
      ofs_.write(reinterpret_cast<const char*>(&frame_size), sizeof(frame_size));
      proto_frame.SerializeToOstream(&ofs_);
    }

    output->push_back(input);
  }
void VideoReaderUnit::ProcessFrame(FrameSetPtr input, list<FrameSetPtr>* output) {
  if (!used_as_root_) {
    input->push_back(shared_ptr<VideoFrame>(ReadNextFrame()));
    output->push_back(input);
    ++frame_num_;
  }
}
void VideoDisplayQtUnit::ProcessFrame(FrameSetPtr input, list<FrameSetPtr>* output) {
  // Fps control -> early return if updated too frequently.
  ptime curr_time = boost::posix_time::microsec_clock::local_time();
  float msecs_passed = boost::posix_time::time_period(
                         last_update_time_, curr_time).length().total_milliseconds();

  output->push_back(input);
  if (msecs_passed < kMinFrameTime) {
    // Ok if we wait a bit?
    if (msecs_passed + kMaxWaitTime < kMinFrameTime) {
        boost::thread::sleep(boost::get_system_time() +
                             boost::posix_time::milliseconds(
                               kMinFrameTime - msecs_passed));
    } else {
      return;
    }
  }

  const VideoFrame* frame =
    dynamic_cast<const VideoFrame*>(input->at(video_stream_idx_).get());
  CHECK_NOTNULL(frame);

  // Draw.
  std::unique_ptr<QImage> image;
  if (options_.upscale != 1.0f) {
    // Resize.
    cv::Mat mat_view;
    frame->MatView(&mat_view);
    cv::resize(mat_view, *scaled_image_, scaled_image_->size());

    // Make Qt image.
    image.reset(new QImage((const uint8_t*)scaled_image_->data,
                           scaled_image_->cols,
                           scaled_image_->rows,
                           scaled_image_->step[0],
                           QImage::Format_RGB888));
  } else {
    // Make Qt image.
    image.reset(new QImage((const uint8_t*)frame->data(),
                           frame->width(),
                           frame->height(),
                           frame->width_step(),
                           QImage::Format_RGB888));
  }

  // Show Qt image
  main_window_->DrawImage(image->rgbSwapped());

  // Force refresh
  QApplication::processEvents();
  // fps control
  last_update_time_ = boost::posix_time::microsec_clock::local_time();
}
	void VideoWriterUnit2::ProcessFrame(FrameSetPtr input, std::list<FrameSetPtr>* output)
	{
		// Retrieve video frame
		const VideoFrame* frame = input->at(video_stream_idx_)->AsPtr<VideoFrame>();
		cv::Mat image;
		frame->MatView(&image);

		writer_.write(image);

		// Forward input
		output->push_back(input);
	}
void VideoDisplayUnit::ProcessFrame(FrameSetPtr input, list<FrameSetPtr>* output) {
  const VideoFrame* frame = input->at(video_stream_idx_)->AsPtr<VideoFrame>();

  cv::Mat image;
  frame->MatView(&image);

  if (frame_buffer_) {
    cv::resize(image, *frame_buffer_, frame_buffer_->size());
    cv::imshow(window_name_.c_str(), *frame_buffer_);
  } else {
    cv::imshow(window_name_.c_str(), image);
  }

  output->push_back(input);
  cv::waitKey(1);
}
void VideoWriterUnit::ProcessFrame(FrameSetPtr input, list<FrameSetPtr>* output) {
  // Write single frame.
  const VideoFrame* frame = input->at(video_stream_idx_)->AsPtr<VideoFrame>();

  // Copy video_frame to frame_bgr_.
  const uint8_t* src_data = frame->data();
  uint8_t* dst_data = frame_bgr_->data[0];

  for (int i = 0;
       i < frame_height_;
       ++i, src_data += frame->width_step(), dst_data += frame_bgr_->linesize[0]) {
   memset(dst_data, 0, LineSize());
   memcpy(dst_data, src_data, 3 * frame_width_);
  }

  // Convert bgr picture to codec.
  sws_scale(sws_context_, frame_bgr_->data, frame_bgr_->linesize, 0, frame_height_,
            frame_encode_->data, frame_encode_->linesize);
  int got_frame;
  EncodeFrame(frame_encode_, &got_frame);

  ++frame_num_;
  output->push_back(input);
}
  void ColorTransform::ProcessFrame(FrameSetPtr input, list<FrameSetPtr>* output) {
    VideoFrame* frame = dynamic_cast<VideoFrame*>(input->at(video_stream_idx_).get());
    ASSERT_LOG(frame);
    
    IplImage image;
    frame->ImageView(&image);

    // Read transform weights.
    float weights[3];
    float shift;
    ifs_ >> weights[0] >> weights[1] >> weights[2] >> shift;
    
    for (int i = 0; i < frame_height_; ++i) {
      uchar* color_ptr = RowPtr<uchar>(&image, i);
      
      for (int j = 0; j < frame_width_; ++j, color_ptr +=3 ) {
        color_ptr[0] = (uchar) clamp_uchar((float)color_ptr[0] * weights[0] * shift);
        color_ptr[1] = (uchar) clamp_uchar((float)color_ptr[1] * weights[1] * shift);
        color_ptr[2] = (uchar) clamp_uchar((float)color_ptr[2] * weights[2] * shift);
      }
    }
    
    output->push_back(input);
  }
void VideoPipelineStats::ProcessFrame(FrameSetPtr input, list<FrameSetPtr>* output) {
  const int bins = sinks_.size();
  const int border = 40;
  const float scale = (options_.frame_height - border) /
    (options_.max_queue_height * 1.3);

  // Create a new frameset and video-frame.
  shared_ptr<VideoFrame> curr_frame(new VideoFrame(options_.frame_width,
                                                   options_.frame_height,
                                                   3,
                                                   frame_width_step_));

  cv::Mat image;
  curr_frame->MatView(&image);
  image.setTo(180);

  // Draw histogram.
  const int spacing = image.cols / (bins + 2);
  for (int i = 0; i < bins; ++i) {
    // Locations.
    int val = sinks_[i].first->GetQueueSize();
    int col = (i + 1) * spacing;
    int row = options_.frame_height - border - (val * scale);
    cv::Point pt1(col - spacing / 3, row);
    cv::Point pt2(col + spacing / 3, image.rows - border);
    // Bar plot.
    cv::rectangle(image, pt1, pt2, CV_RGB(0, 0, 200), -2);   // Neg. last arg for filled.

    // Value text.
    cv::Point txt_pt(col - spacing / 3, options_.frame_height - border / 3 - 10);
    cv::putText(image,
                base::StringPrintf("%02d", val),
                txt_pt,
                cv::FONT_HERSHEY_SIMPLEX,
                0.5,
                CV_RGB(0, 0, 0),
                2);

    // Name text.
    txt_pt.y += 15;
    cv::putText(image,
                sinks_[i].second,
                txt_pt,
                cv::FONT_HERSHEY_SIMPLEX,
                0.4,
                CV_RGB(0, 0, 0),
                1);

    // Fps text.
    txt_pt.y = border / 3;
    cv::putText(image,
                base::StringPrintf("%3.1f", sinks_[i].first->MinTreeRate()),
                txt_pt,
                cv::FONT_HERSHEY_SIMPLEX,
                0.4,
                CV_RGB(0, 0, 0),
                1);

  }

  // Print running time.
  boost::posix_time::ptime curr_time = boost::posix_time::microsec_clock::local_time();
  float secs_passed = boost::posix_time::time_period(
      start_time_, curr_time).length().total_milliseconds() * 1.e-3f;
  cv::putText(image,
              base::StringPrintf("up: %5.1f", secs_passed),
              cv::Point(options_.frame_width - 75, border / 3),
              cv::FONT_HERSHEY_SIMPLEX,
              0.4,
              CV_RGB(0, 0, 0),
              1);

  // Pass to output.
  input->push_back(curr_frame);
  output->push_back(input);
}
void SegmentationDisplayUnit::ProcessFrame(FrameSetPtr input,
                                           list<FrameSetPtr>* output) {
  // Fps control, early return if updated too frequently.
  ptime curr_time = boost::posix_time::microsec_clock::local_time();
  float msecs_passed = boost::posix_time::time_period(
                           last_update_time_, curr_time).length().total_milliseconds();
  output->push_back(input);


  const PointerFrame<SegmentationDesc>& seg_frame =
    input->at(seg_stream_idx_)->As<PointerFrame<SegmentationDesc>>();

  const SegmentationDesc& desc = seg_frame.Ref();

  // Update hierarchy if present.
  if (desc.hierarchy_size() > 0) {
    seg_hier_.reset(new SegmentationDesc(desc));
  }

  if (msecs_passed < kMinFrameTime) {
    // Ok if we wait a bit?
    if (msecs_passed + kMaxWaitTime < kMinFrameTime) {
        boost::thread::sleep(boost::get_system_time() +
                             boost::posix_time::milliseconds(
                               kMinFrameTime - msecs_passed));
    } else {
      return;
    }
  }
 
  // Fetch video frame.
  cv::Mat frame_view;
  if (vid_stream_idx_ >= 0) {
    const VideoFrame* frame =
        dynamic_cast<const VideoFrame*>(input->at(vid_stream_idx_).get());
    CHECK_NOTNULL(frame);
    frame->MatView(&frame_view);
  }

  // Parse slider (percentage)
  // Only update if slider was changed.
  const float curr_level = main_window_->GetLevel();
  if (curr_level != fractional_level_) {
    fractional_level_ = curr_level;

    // Fractional specification for hierarchy-level.
    absolute_level_ =
      (int)(fractional_level_ * (seg_hier_->hierarchy_size() - 1.f));
  }

  // Render segmentation.
  RenderRegionsRandomColor(absolute_level_,
                           options_.highlight_edges,
                           options_.draw_shape_descriptors,
                           desc,
                           &seg_hier_->hierarchy(),
                           render_frame_.get());

  if (options_.concat_with_source) {
    CHECK_GE(vid_stream_idx_, 0)
        << "Request concatenation with source but no video stream present.";
    cv::Mat top_half = output_frame_->rowRange(0, frame_height_);
    render_frame_->copyTo(top_half);
    cv::Mat bottom_half = output_frame_->rowRange(frame_height_, 2 * frame_height_);
    frame_view.copyTo(bottom_half);
  } else {
    if (vid_stream_idx_ >= 0) {
      // Blend with input.
      cv::addWeighted(frame_view,
                      1.0 - options_.blend_alpha,
                      *render_frame_,
                      options_.blend_alpha,
                      0,              // Offset.
                      *output_frame_);
    } else {
      *output_frame_ = *render_frame_;
    }
  }

  // Draw.
  std::unique_ptr<QImage> image;
  if (options_.upscale != 1.0f) {
    // Resize
    cv::resize(*output_frame_, *scaled_image_, scaled_image_->size());
    image.reset(new QImage((const uint8_t*)scaled_image_->data,
                           scaled_image_->cols,
                           scaled_image_->rows,
                           scaled_image_->step[0],
                           QImage::Format_RGB888));
  } else {
    image.reset(new QImage((const uint8_t*)output_frame_->data,
                           output_frame_->cols,
                           output_frame_->rows,
                           output_frame_->step[0],
                           QImage::Format_RGB888));
  }

  // Show Qt image
  main_window_->DrawImage(image->rgbSwapped());

  // Force refresh
  QApplication::processEvents();
  // fps control
  last_update_time_ = boost::posix_time::microsec_clock::local_time();
}
  void ColorCheckerUnit::ProcessFrame(FrameSetPtr input, list<FrameSetPtr>* output) {
    VideoFrame* frame = dynamic_cast<VideoFrame*>(input->at(video_stream_idx_).get());
    ASSERT_LOG(frame);

    IplImage image;
    frame->ImageView(&image);
    
    const VideoFrame* lum_frame = dynamic_cast<const VideoFrame*>(input->at(lum_stream_idx_).get());
    ASSERT_LOG(lum_frame);
    
    IplImage lum_image;
    lum_frame->ImageView(&lum_image);
    
    // Sum according to checker image.
    vector<CvPoint3D32f> colors(256);
    vector<float> pixel_num(256);
    
    for (int i = 0; i < frame_height_; ++i) {
      const uchar* mask_ptr = RowPtr<const uchar>(checker_img_, i);
      uchar* color_ptr = RowPtr<uchar>(&image, i);
      const uchar* lum_ptr = RowPtr<const uchar>(&lum_image, i);
      
      for (int j = 0; j < frame_width_; ++j, ++mask_ptr, color_ptr +=3, ++lum_ptr) {
        int idx = mask_ptr[0];
        if (idx == 0)     // Don't sample background
          continue;
        
        // Is pixel within range?
        int min_val = std::min(std::min(color_ptr[0], color_ptr[1]), color_ptr[2]);
        int max_val = std::max(std::max(color_ptr[0], color_ptr[1]), color_ptr[2]);
        
        if (min_val >= min_thresh_ && max_val <= max_thresh_) {
//        if (lum_ptr[0] >= min_thresh_ && lum_ptr[0] <= max_thresh_) {
          ++pixel_num[idx];
          colors[idx] = colors[idx] + cvPoint3D32f(color_ptr[0],
                                                   color_ptr[1],
                                                   color_ptr[2]);
          // Mark pixel as processed!
          color_ptr[0] ^= 0xff;
          color_ptr[1] ^= 0xff;
          color_ptr[2] ^= 0xff;
        }
      }
    }
    
    if (frame_num_ != 0)
      ofs_ << "\n";
    
    // Normalize and output.
    int processed_colors = 0;
    for (vector<int>::const_iterator c = checker_ids_.begin(); c != checker_ids_.end(); ++c) {
      if (pixel_num[*c] > 0) {
        ++processed_colors;
        colors[*c] = colors[*c] * (1.0 / pixel_num[*c]);
      }
      
      ofs_ << *c << " " << pixel_num[*c] << " " << colors[*c].x << " " 
           << colors[*c].y << " " << colors[*c].z << " ";
    }
    std::cout << "Frame " << frame_num_ << ": Processed colors " << processed_colors << "\n";
    
    ++frame_num_;
    output->push_back(input);
  }
  void OpticalFlowUnit::KeyFrameTrack(FrameSetPtr input, const FrameSet& feedback_input,
                                      list<FrameSetPtr>* output) {
    // Get luminance IplImage.
    const VideoFrame* frame = dynamic_cast<const VideoFrame*>(input->at(video_stream_idx_).get());
    ASSERT_LOG(frame);

    IplImage image;
    frame->ImageView(&image);

    ASSERT_LOG(image.nChannels == 1);

    const int retrack_dist_ = 1000;

    bool reset_key_frame = false;
    if (key_frame_stream_idx_ >= 0 && processed_frames_ > 0) {
      const ValueFrame<bool>* key_frame =
          dynamic_cast<ValueFrame<bool>*>(feedback_input.at(key_frame_stream_idx_).get());

      reset_key_frame = key_frame->value();
    }

    // We set a key-frame is reset_key_frame is already set
    // or frame_num_ frames have passed.
    if (processed_frames_ - last_key_frame_ >= frame_num_) {
      reset_key_frame = true;
    }

    const int frames_to_track = 1;

    int frame_size = 0;           // Size of optical flow frame.
    if (processed_frames_ > 0) {
      // Key-frame: Track all frames until last key-frame.
      if (reset_key_frame)
        frame_size = processed_frames_ - last_key_frame_;
      else if (processed_frames_ - last_key_frame_ <= frames_to_track) {
         // Second frame. Prev. frame and key-frame are identical.
        frame_size = processed_frames_ - last_key_frame_;
      }
      else if ((processed_frames_ - last_key_frame_) % retrack_dist_ == 0)
        frame_size = processed_frames_ - last_key_frame_;
      else
        frame_size = frames_to_track + 1;   // plus key-frame track.
    }

    shared_ptr<OpticalFlowFrame> off(new OpticalFlowFrame(frame_size, reset_key_frame));

    int num_features = max_features_;

    // Track w.r.t. previous frame
    if (processed_frames_ > 0) {
      // Extract features in current image.
      cvGoodFeaturesToTrack(&image, eig_image_.get(), tmp_image_.get(),
                            cur_features_.get(), &num_features, .005, 2, 0);

      CvTermCriteria term_criteria = cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS,
                                                    50, .02);
      int flow_flags = 0;

      // Track w.r.t. previous frame.
      // If key-frame, track w.r.t to all previous frames until last key-frame.
      int tracked_frames = std::min(frame_size, frames_to_track); //1;
      if (reset_key_frame)
        tracked_frames = processed_frames_ - last_key_frame_;
      else {
        // Do a dense track from time to time to rid out false matches.
        if ((processed_frames_ - last_key_frame_) % retrack_dist_ == 0)
          tracked_frames = processed_frames_ - last_key_frame_;
      }


      for (int t = 0; t < tracked_frames; ++t) {
        ASSURE_LOG(processed_frames_ - t - 1 >= 0);
        cvCalcOpticalFlowPyrLK(&image,
                               prev_image_[t].get(),
                               cur_pyramid_.get(),
                               prev_pyramid_[t].get(),
                               cur_features_.get(),
                               prev_features_.get(),
                               num_features,
                               cvSize(10, 10),
                               3,
                               flow_status_.get(),
                               flow_track_error_.get(),
                               term_criteria, flow_flags);

        flow_flags |= CV_LKFLOW_INITIAL_GUESSES;
        flow_flags |= CV_LKFLOW_PYR_A_READY;
        float frame_diam = sqrt((float)(frame_width_ * frame_width_ +
                                        frame_height_ * frame_height_));

        off->AddFeatures(prev_features_.get(), cur_features_.get(),
                         flow_status_.get(), num_features, frame_diam * .05,
                         t, -t - 1, 0.f);
      }

      if (reset_key_frame) {
        // Is key-frame --> reinitialize.
        last_key_frame_ = processed_frames_;
      // Test for sufficient data and no retrack happens.
      } else if (processed_frames_ - last_key_frame_ > frames_to_track &&
                 (processed_frames_ - last_key_frame_) % retrack_dist_ != 0) {
        // Track w.r.t. prev. key-frame.
        const DataFrame* seg_frame = feedback_input[seg_stream_idx_].get();
        Segment::SegmentationDesc desc;
        desc.ParseFromArray(seg_frame->data(), seg_frame->size());

        // Expect per-frame segmentation.
        ASSURE_LOG(desc.hierarchy_size() > 1)
            << "Hierarchy expected to be present in every frame.";

        const RegionFlowFrame* flow_frame =
            dynamic_cast<const RegionFlowFrame*>(feedback_input[region_flow_idx_].get());

        // Get region_flow w.r.t prev. key_frame, which is always the last frame in region_flow.
        vector<RegionFlowFrame::RegionFlow> region_flow;
        flow_frame->FillVector(&region_flow, flow_frame->MatchedFrames() - 1);
        ASSURE_LOG(flow_frame->FrameID(flow_frame->MatchedFrames() - 1) + processed_frames_ - 1
                   == last_key_frame_);

        int level = flow_frame->Level();

        Segment::SegmentationDescToIdImage(id_img_.get(),
                                           frame_width_ * sizeof(int),    // width_step
                                           frame_width_,
                                           frame_height_,
                                           level,
                                           desc,
                                           0);  // no hierarchy.

        // Initialize prev. feature locations with guess from region flow.
        int tracked_feature = 0;

        // Buffer region vector lookups.
        // Neighbor processing later on, can be costly if executed for each feature indepently.
        hash_map<int, CvPoint2D32f> region_vec;

        for (int i = 0; i < num_features; ++i) {
          int x = prev_features_[i].x + 0.5;
          int y = prev_features_[i].y + 0.5;

          x = std::max(0, std::min(x, frame_width_ - 1));
          y = std::max(0, std::min(y, frame_height_ - 1));

          int region_id = id_img_[y * frame_width_ + x];

          // Buffered?
          hash_map<int, CvPoint2D32f>::const_iterator look_up_loc = region_vec.find(region_id);
          if (look_up_loc != region_vec.end() &&
              look_up_loc->first == region_id) {
            prev_features_[i] = prev_features_[i] + look_up_loc->second;
            ++tracked_feature;
            continue;
          }

          RegionFlowFrame::RegionFlow rf_to_find;
          rf_to_find.region_id = region_id;
          vector<RegionFlowFrame::RegionFlow>::const_iterator rf_loc =
          std::lower_bound(region_flow.begin(), region_flow.end(), rf_to_find);

          if(rf_loc != region_flow.end() &&
             rf_loc->region_id == region_id) {
            prev_features_[i] = prev_features_[i] + rf_loc->mean_vector;
            region_vec[region_id] = rf_loc->mean_vector;
            ++tracked_feature;
          } else {
            // Poll the neighbors recursively.
            int neighbor_matches = 0;
            CvPoint2D32f neighbor_vec = cvPoint2D32f(0, 0);

            const int max_rounds = 2;
            vector<int> to_visit(1, region_id);
            vector<int> visited;

            for (int round = 0; round < max_rounds; ++round) {
              // Add all neighbors of all elements in to_visit that are not in visited to to_visit.
              vector<int> new_to_visit;

              for (vector<int>::const_iterator v = to_visit.begin(); v != to_visit.end(); ++v) {
                const Segment::CompoundRegion& comp_region =
                    GetCompoundRegionFromId(*v, desc.hierarchy(level));

                for (int j = 0,
                     n_sz = comp_region.neighbor_id_size();
                     j < n_sz;
                     ++j) {
                  int n_id = comp_region.neighbor_id(j);
                  vector<int>::const_iterator visit_loc = std::lower_bound(visited.begin(),
                                                                             visited.end(),
                                                                             n_id);
                  if (visit_loc == visited.end() ||
                      *visit_loc != n_id)
                    new_to_visit.push_back(n_id);
                }
              }

              // Remove duplicates from new_to_vist.
              std::sort(new_to_visit.begin(), new_to_visit.end());
              vector<int>::iterator new_to_visit_end = std::unique(new_to_visit.begin(),
                                                                   new_to_visit.end());

              // Process all members in new_to_visit.
              for (vector<int>::const_iterator v = new_to_visit.begin();
                   v != new_to_visit_end;
                   ++v) {
                RegionFlowFrame::RegionFlow rf_to_find;
                rf_to_find.region_id = *v;

                vector<RegionFlowFrame::RegionFlow>::const_iterator rf_loc =
                std::lower_bound(region_flow.begin(), region_flow.end(), rf_to_find);

                if(rf_loc != region_flow.end() &&
                   rf_loc->region_id == rf_to_find.region_id) {
                  ++neighbor_matches;
                  neighbor_vec = neighbor_vec + rf_loc->mean_vector;
                }
              }

              if (neighbor_matches > 0) {
                ++tracked_feature;
                prev_features_[i] = prev_features_[i] + neighbor_vec * (1.0f / neighbor_matches);
                region_vec[region_id] = neighbor_vec * (1.0f / neighbor_matches);
                break;
              }

              // Setup next round.
              visited.insert(visited.end(), to_visit.begin(), to_visit.end());
              to_visit.swap(new_to_visit);
              visited.insert(visited.end(), to_visit.begin(), to_visit.end());
            }
          }  // end polling neighbors.
        } // end for loop over features.

        std::cout << "Tracked feature ratio: " << (float)tracked_feature / num_features << "\n";

        // Compute flow to key frame.
        int t = processed_frames_ - last_key_frame_ - 1;
        flow_flags |= CV_LKFLOW_INITIAL_GUESSES;
        flow_flags |= CV_LKFLOW_PYR_A_READY;

        cvCalcOpticalFlowPyrLK(&image,
                               prev_image_[t].get(),
                               cur_pyramid_.get(),
                               prev_pyramid_[t].get(),
                               cur_features_.get(),
                               prev_features_.get(),
                               num_features,
                               cvSize(10, 10),
                               3,
                               flow_status_.get(),
                               flow_track_error_.get(),
                               term_criteria, flow_flags);

        float frame_diam = sqrt((float)(frame_width_ * frame_width_ +
                                        frame_height_ * frame_height_));

        // Add to end.
        off->AddFeatures(prev_features_.get(), cur_features_.get(),
                         flow_status_.get(), num_features, frame_diam * .05,
                         frame_size - 1, -t - 1, 0.f);

      }
    }

    input->push_back(off);
    prev_image_.push_front(cvCloneImageShared(&image));

    ++processed_frames_;
    output->push_back(input);
  }
  void OpticalFlowUnit::PreviousFrameTrack(FrameSetPtr input,
                                           list<FrameSetPtr>* output,
                                           const vector<CvPoint2D32f>* polygon) {
    // Get luminance IplImage.
    const VideoFrame* frame =
        dynamic_cast<const VideoFrame*>(input->at(video_stream_idx_).get());

    ASSERT_LOG(frame);

    IplImage image;
    frame->ImageView(&image);

    ASSERT_LOG(image.nChannels == 1);

    // First match always direct predecessor.
    int num_matches = processed_frames_ > 0 ? 1 : 0;
    if (processed_frames_ > 0) {
      // For t = num_matches - 1, we need have
      // processed_frames_ - 1 - t * matching_stride_ >= 0
      // t * matching_stride <= processed_frames_ - 1
      // num_matches - 1  <= (processed_frames_ - 1) / matching_stride
      // num_matches <= (processed_frames_ - 1) / matching_stride + 1
      num_matches += std::min(frame_num_ - 1,
                             (processed_frames_ - 1) / matching_stride_);
    }

    shared_ptr<OpticalFlowFrame> off(new OpticalFlowFrame(num_matches, false));

    int num_features = max_features_;
    const float frame_diam = sqrt((float)(frame_width_ * frame_width_ +
                                          frame_height_ * frame_height_));

    // Extract features in current image.
    // Change param to .05 for fewer features and more speed.
    cvGoodFeaturesToTrack(&image,
                          eig_image_.get(),
                          tmp_image_.get(),
                          cur_features_.get(),
                          &num_features,
                          .005,
                          5,
                          0);

    if (polygon) {
      // Reject all features out of bounds.
      vector<CvPoint2D32f> outline(*polygon);
      for (int i = 0; i < outline.size(); ++i) {
        outline[i].y = frame_height_ - 1 - outline[i].y;
      }
      outline.push_back(outline[0]);

      // Use prev. features as tmp. buffer
      int p = 0;
      for (int i = 0; i < num_features; ++i) {
        CvPoint2D32f pt = cur_features_[i];
        pt.y = frame_height_ - 1 - pt.y;
        if (WithinConvexPoly(pt, outline, 0))
          prev_features_[p++] = cur_features_[i];
      }

      cur_features_.swap(prev_features_);
      num_features = p;
    }

    CvTermCriteria term_criteria = cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS,
                                                  50, .02);
    int flow_flags = 0;

    for (int t = 0; t < off->MatchedFrames(); ++t ) {
      int prev_frame = processed_frames_ - 1 - t * matching_stride_;
      ASSERT_LOG(prev_frame >= 0);

      // Find correspond features in previous image for each extracted
      // feature in current frame.
      cvCalcOpticalFlowPyrLK(&image,
                             prev_image_[t * matching_stride_].get(),
                             cur_pyramid_.get(),
                             prev_pyramid_[0].get(),  // just one pyramid here.
                             cur_features_.get(),
                             prev_features_.get(),
                             num_features,
                             cvSize(10, 10),
                             3,
                             flow_status_.get(),
                             flow_track_error_.get(),
                             term_criteria,
                             flow_flags);

      flow_flags |= CV_LKFLOW_PYR_A_READY;
      flow_flags |= CV_LKFLOW_INITIAL_GUESSES;

      off->AddFeatures(prev_features_.get(),
                       cur_features_.get(),
                       flow_status_.get(),
                       num_features,
                       frame_diam * .05,
                       t,
                       -1 - t * matching_stride_,
                       0.f);
    }

    input->push_back(off);
    prev_image_.push_front(cvCloneImageShared(&image));

    ++processed_frames_;

    output->push_back(input);
  }