Пример #1
0
/**
 * Run the optical flow with EDGEFLOW on a new image frame
 * @param[in] *opticflow The opticalflow structure that keeps track of previous images
 * @param[in] *state The state of the drone
 * @param[in] *img The image frame to calculate the optical flow from
 * @param[out] *result The optical flow result
 */
void calc_edgeflow_tot(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct image_t *img,
                       struct opticflow_result_t *result)
{
  // Define Static Variables
  static struct edge_hist_t edge_hist[MAX_HORIZON];
  static uint8_t current_frame_nr = 0;
  struct edge_flow_t edgeflow;
  static uint8_t previous_frame_offset[2] = {1, 1};

  // Define Normal variables
  struct edgeflow_displacement_t displacement;
  displacement.x = malloc(sizeof(int32_t) * img->w);
  displacement.y = malloc(sizeof(int32_t) * img->h);

  // If the methods just switched to this one, reintialize the
  // array of edge_hist structure.
  if (opticflow->just_switched_method == 1) {
    int i;
    for (i = 0; i < MAX_HORIZON; i++) {
      edge_hist[i].x = malloc(sizeof(int32_t) * img->w);
      edge_hist[i].y = malloc(sizeof(int32_t) * img->h);
      FLOAT_RATES_ZERO(edge_hist[i].rates);
    }
  }

  uint16_t disp_range;
  if (opticflow->search_distance < DISP_RANGE_MAX) {
    disp_range = opticflow->search_distance;
  } else {
    disp_range = DISP_RANGE_MAX;
  }

  uint16_t window_size;

  if (opticflow->window_size < MAX_WINDOW_SIZE) {
    window_size = opticflow->window_size;
  } else {
    window_size = MAX_WINDOW_SIZE;
  }

  uint16_t RES = opticflow->subpixel_factor;

  //......................Calculating EdgeFlow..................... //

  // Calculate current frame's edge histogram
  int32_t *edge_hist_x = edge_hist[current_frame_nr].x;
  int32_t *edge_hist_y = edge_hist[current_frame_nr].y;
  calculate_edge_histogram(img, edge_hist_x, 'x', 0);
  calculate_edge_histogram(img, edge_hist_y, 'y', 0);


  // Copy frame time and angles of image to calculated edge histogram
  edge_hist[current_frame_nr].frame_time = img->ts;
  edge_hist[current_frame_nr].rates = state->rates;

  // Calculate which previous edge_hist to compare with the current
  uint8_t previous_frame_nr[2];
  calc_previous_frame_nr(result, opticflow, current_frame_nr, previous_frame_offset, previous_frame_nr);

  //Select edge histogram from the previous frame nr
  int32_t *prev_edge_histogram_x = edge_hist[previous_frame_nr[0]].x;
  int32_t *prev_edge_histogram_y = edge_hist[previous_frame_nr[1]].y;

  //Calculate the corresponding derotation of the two frames
  int16_t der_shift_x = 0;
  int16_t der_shift_y = 0;

  if (opticflow->derotation) {
    der_shift_x = (int16_t)(edge_hist[current_frame_nr].rates.p  /
                            result->fps *
                            (float)img->w / (OPTICFLOW_FOV_W) * opticflow->derotation_correction_factor_x);
    der_shift_y = (int16_t)(edge_hist[current_frame_nr].rates.q /
                            result->fps *
                            (float)img->h / (OPTICFLOW_FOV_H) * opticflow->derotation_correction_factor_y);
  }

  // Estimate pixel wise displacement of the edge histograms for x and y direction
  calculate_edge_displacement(edge_hist_x, prev_edge_histogram_x,
                              displacement.x, img->w,
                              window_size, disp_range,  der_shift_x);
  calculate_edge_displacement(edge_hist_y, prev_edge_histogram_y,
                              displacement.y, img->h,
                              window_size, disp_range, der_shift_y);

  // Fit a line on the pixel displacement to estimate
  // the global pixel flow and divergence (RES is resolution)
  line_fit(displacement.x, &edgeflow.div_x,
           &edgeflow.flow_x, img->w,
           window_size + disp_range, RES);
  line_fit(displacement.y, &edgeflow.div_y,
           &edgeflow.flow_y, img->h,
           window_size + disp_range, RES);

  /* Save Resulting flow in results
   * Warning: The flow detected here is different in sign
   * and size, therefore this will be multiplied with
   * the same subpixel factor and -1 to make it on par with
   * the LK algorithm of t opticalflow_calculator.c
   * */
  edgeflow.flow_x = -1 * edgeflow.flow_x;
  edgeflow.flow_y = -1 * edgeflow.flow_y;

  result->flow_x = (int16_t)edgeflow.flow_x / previous_frame_offset[0];
  result->flow_y = (int16_t)edgeflow.flow_y / previous_frame_offset[1];

  //Fill up the results optic flow to be on par with LK_fast9
  result->flow_der_x =  result->flow_x;
  result->flow_der_y =  result->flow_y;
  result->corner_cnt = getAmountPeaks(edge_hist_x, 500 , img->w);
  result->tracked_cnt = getAmountPeaks(edge_hist_x, 500 , img->w);
  result->divergence = (float)edgeflow.flow_x / RES;
  result->div_size = 0.0f;
  result->noise_measurement = 0.0f;
  result->surface_roughness = 0.0f;

  //......................Calculating VELOCITY ..................... //

  /*Estimate fps per direction
   * This is the fps with adaptive horizon for subpixel flow, which is not similar
   * to the loop speed of the algorithm. The faster the quadcopter flies
   * the higher it becomes
  */
  float fps_x = 0;
  float fps_y = 0;
  float time_diff_x = (float)(timeval_diff(&edge_hist[previous_frame_nr[0]].frame_time, &img->ts)) / 1000.;
  float time_diff_y = (float)(timeval_diff(&edge_hist[previous_frame_nr[1]].frame_time, &img->ts)) / 1000.;
  fps_x = 1 / (time_diff_x);
  fps_y = 1 / (time_diff_y);

  result->fps = fps_x;

  // Calculate velocity
  float vel_x = edgeflow.flow_x * fps_x * state->agl * OPTICFLOW_FOV_W / (img->w * RES);
  float vel_y = edgeflow.flow_y * fps_y * state->agl * OPTICFLOW_FOV_H / (img->h * RES);

  //Apply a  median filter to the velocity if wanted
  if (opticflow->median_filter == true) {
    result->vel_x = (float)update_median_filter(&vel_x_filt, (int32_t)(vel_x * 1000)) / 1000;
    result->vel_y = (float)update_median_filter(&vel_y_filt, (int32_t)(vel_y * 1000)) / 1000;
  } else {
    result->vel_x = vel_x;
    result->vel_y = vel_y;
  }

  result->noise_measurement = 0.2;



#if OPTICFLOW_SHOW_FLOW
  draw_edgeflow_img(img, edgeflow, prev_edge_histogram_x, edge_hist_x);
#endif
  // Increment and wrap current time frame
  current_frame_nr = (current_frame_nr + 1) % MAX_HORIZON;
}
Пример #2
0
/**
 * Run the optical flow with EDGEFLOW on a new image frame
 * @param[in] *opticflow The opticalflow structure that keeps track of previous images
 * @param[in] *state The state of the drone
 * @param[in] *img The image frame to calculate the optical flow from
 * @param[out] *result The optical flow result
 */
void calc_edgeflow_tot(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct image_t *img,
                       struct opticflow_result_t *result)
{
    // Define Static Variables
    static struct edge_hist_t edge_hist[MAX_HORIZON];
    static uint8_t current_frame_nr = 0;
    static struct edge_flow_t edgeflow;
    static uint8_t previous_frame_offset[2] = {1, 1};

    // Define Normal variables
    struct edgeflow_displacement_t displacement;
    uint16_t disp_range;
    if (opticflow->search_distance < DISP_RANGE_MAX) {
        disp_range = opticflow->search_distance;
    } else {
        disp_range = DISP_RANGE_MAX;
    }

    uint16_t window_size;

    if (opticflow->window_size < MAX_WINDOW_SIZE) {
        window_size = opticflow->window_size;
    } else {
        window_size = MAX_WINDOW_SIZE;
    }

    uint16_t RES = opticflow->subpixel_factor;

    //......................Calculating EdgeFlow..................... //

    // Calculate current frame's edge histogram
    int32_t *edge_hist_x = edge_hist[current_frame_nr].x;
    int32_t *edge_hist_y = edge_hist[current_frame_nr].y;
    calculate_edge_histogram(img, edge_hist_x, 'x', 0);
    calculate_edge_histogram(img, edge_hist_y, 'y', 0);

    // Copy frame time and angles of image to calculated edge histogram
    memcpy(&edge_hist[current_frame_nr].frame_time, &img->ts, sizeof(struct timeval));
    edge_hist[current_frame_nr].pitch = state->theta;
    edge_hist[current_frame_nr].roll = state->phi;

    // Calculate which previous edge_hist to compare with the current
    uint8_t previous_frame_nr[2];
    calc_previous_frame_nr(result, opticflow, current_frame_nr, previous_frame_offset, previous_frame_nr);

    //Select edge histogram from the previous frame nr
    int32_t *prev_edge_histogram_x = edge_hist[previous_frame_nr[0]].x;
    int32_t *prev_edge_histogram_y = edge_hist[previous_frame_nr[1]].y;

    //Calculate the corresponding derotation of the two frames
    int16_t der_shift_x = 0;
    int16_t der_shift_y = 0;

    if (opticflow->derotation) {
        der_shift_x = -(int16_t)((edge_hist[previous_frame_nr[0]].roll - edge_hist[current_frame_nr].roll) *
                                 (float)img->w / (OPTICFLOW_FOV_W));
        der_shift_y = -(int16_t)((edge_hist[previous_frame_nr[1]].pitch - edge_hist[current_frame_nr].pitch) *
                                 (float)img->h / (OPTICFLOW_FOV_H));
    }

    // Estimate pixel wise displacement of the edge histograms for x and y direction
    calculate_edge_displacement(edge_hist_x, prev_edge_histogram_x,
                                displacement.x, img->w,
                                window_size, disp_range,  der_shift_x);
    calculate_edge_displacement(edge_hist_y, prev_edge_histogram_y,
                                displacement.y, img->h,
                                window_size, disp_range, der_shift_y);

    // Fit a line on the pixel displacement to estimate
    // the global pixel flow and divergence (RES is resolution)
    line_fit(displacement.x, &edgeflow.div_x,
             &edgeflow.flow_x, img->w,
             window_size + disp_range, RES);
    line_fit(displacement.y, &edgeflow.div_y,
             &edgeflow.flow_y, img->h,
             window_size + disp_range, RES);

    /* Save Resulting flow in results
     * Warning: The flow detected here is different in sign
     * and size, therefore this will be multiplied with
     * the same subpixel factor and -1 to make it on par with
     * the LK algorithm of t opticalflow_calculator.c
     * */
    edgeflow.flow_x = -1 * edgeflow.flow_x;
    edgeflow.flow_y = -1 * edgeflow.flow_y;

    result->flow_x = (int16_t)edgeflow.flow_x / previous_frame_offset[0];
    result->flow_y = (int16_t)edgeflow.flow_y / previous_frame_offset[1];

    //Fill up the results optic flow to be on par with LK_fast9
    result->flow_der_x =  result->flow_x;
    result->flow_der_y =  result->flow_y;
    result->corner_cnt = getAmountPeaks(edge_hist_x, 500 , img->w);
    result->tracked_cnt = getAmountPeaks(edge_hist_x, 500 , img->w);
    result->divergence = (float)edgeflow.flow_x / RES;
    result->div_size = 0.0f;
    result->noise_measurement = 0.0f;
    result->surface_roughness = 0.0f;

    //......................Calculating VELOCITY ..................... //

    /*Estimate fps per direction
     * This is the fps with adaptive horizon for subpixel flow, which is not similar
     * to the loop speed of the algorithm. The faster the quadcopter flies
     * the higher it becomes
    */
    float fps_x = 0;
    float fps_y = 0;
    float time_diff_x = (float)(timeval_diff(&edge_hist[previous_frame_nr[0]].frame_time, &img->ts)) / 1000.;
    float time_diff_y = (float)(timeval_diff(&edge_hist[previous_frame_nr[1]].frame_time, &img->ts)) / 1000.;
    fps_x = 1 / (time_diff_x);
    fps_y = 1 / (time_diff_y);

    result->fps = fps_x;

    // Calculate velocity
    float vel_x = edgeflow.flow_x * fps_x * state->agl * OPTICFLOW_FOV_W / (img->w * RES);
    float vel_y = edgeflow.flow_y * fps_y * state->agl * OPTICFLOW_FOV_H / (img->h * RES);
    result->vel_x = vel_x;
    result->vel_y = vel_y;

    /* Rotate velocities from camera frame coordinates to body coordinates.
    * IMPORTANT This frame to body orientation should be the case for the Parrot
    * ARdrone and Bebop, however this can be different for other quadcopters
    * ALWAYS double check!
    */
    result->vel_body_x = - vel_y;
    result->vel_body_y = vel_x;

#if OPTICFLOW_DEBUG && OPTICFLOW_SHOW_FLOW
    draw_edgeflow_img(img, edgeflow, displacement, *edge_hist_x)
#endif
    // Increment and wrap current time frame
    current_frame_nr = (current_frame_nr + 1) % MAX_HORIZON;
}