Ejemplo n.º 1
0
void ins_update_sonar() {
  static float last_offset = 0.;
  // new value filtered with median_filter
  ins_sonar_alt = update_median_filter(&sonar_median, sonar_meas);
  float sonar = (ins_sonar_alt - ins_sonar_offset) * INS_SONAR_SENS;

#ifdef INS_SONAR_VARIANCE_THRESHOLD
  /* compute variance of error between sonar and baro alt */
  int32_t err = POS_BFP_OF_REAL(sonar) + ins_baro_alt; // sonar positive up, baro positive down !!!!
  var_err[var_idx] = POS_FLOAT_OF_BFP(err);
  var_idx = (var_idx + 1) % VAR_ERR_MAX;
  float var = variance_float(var_err, VAR_ERR_MAX);
  DOWNLINK_SEND_INS_SONAR(DefaultChannel,DefaultDevice,&err, &sonar, &var);
  //DOWNLINK_SEND_INS_SONAR(DefaultChannel,DefaultDevice,&ins_sonar_alt, &sonar, &var);
#endif

  /* update filter assuming a flat ground */
  if (sonar < INS_SONAR_MAX_RANGE
#ifdef INS_SONAR_MIN_RANGE
      && sonar > INS_SONAR_MIN_RANGE
#endif
#ifdef INS_SONAR_THROTTLE_THRESHOLD
      && stabilization_cmd[COMMAND_THRUST] < INS_SONAR_THROTTLE_THRESHOLD
#endif
#ifdef INS_SONAR_STAB_THRESHOLD
      && stabilization_cmd[COMMAND_ROLL] < INS_SONAR_STAB_THRESHOLD
      && stabilization_cmd[COMMAND_ROLL] > -INS_SONAR_STAB_THRESHOLD
      && stabilization_cmd[COMMAND_PITCH] < INS_SONAR_STAB_THRESHOLD
      && stabilization_cmd[COMMAND_PITCH] > -INS_SONAR_STAB_THRESHOLD
      && stabilization_cmd[COMMAND_YAW] < INS_SONAR_STAB_THRESHOLD
      && stabilization_cmd[COMMAND_YAW] > -INS_SONAR_STAB_THRESHOLD
#endif
#ifdef INS_SONAR_BARO_THRESHOLD
      && ins_baro_alt > -POS_BFP_OF_REAL(INS_SONAR_BARO_THRESHOLD) /* z down */
#endif
#ifdef INS_SONAR_VARIANCE_THRESHOLD
      && var < INS_SONAR_VARIANCE_THRESHOLD
#endif
      && ins_update_on_agl
      && baro.status == BS_RUNNING) {
    vff_update_alt_conf(-sonar, VFF_R_SONAR_0 + VFF_R_SONAR_OF_M * fabs(sonar));
    last_offset = vff_offset;
  }
  else {
    /* update offset with last value to avoid divergence */
    vff_update_offset(last_offset);
  }
}
Ejemplo n.º 2
0
/**
 * Poll lidar for data
 * run at max 50Hz
 */
void lidar_lite_periodic(void)
{
    switch (lidar.status) {
    case LIDAR_INIT_RANGING:
        // ask for i2c frame
        lidar.trans.buf[0] = LIDAR_LITE_REG_ADDR; // sets register pointer to  (0x00)
        lidar.trans.buf[1] = LIDAR_LITE_REG_VAL; // take acquisition & correlation processing with DC correction
        if (i2c_transmit(&LIDAR_LITE_I2C_DEV, &lidar.trans, lidar.addr, 2)) {
            // transaction OK, increment status
            lidar.status = LIDAR_REQ_READ;
        }
        break;
    case LIDAR_REQ_READ:
        lidar.trans.buf[0] = LIDAR_LITE_READ_ADDR; // sets register pointer to results register
        lidar.trans.buf[1] = 0;
        if (i2c_transceive(&LIDAR_LITE_I2C_DEV, &lidar.trans, lidar.addr, 1,2)) {
            // transaction OK, increment status
            lidar.status = LIDAR_READ_DISTANCE;
        }
        break;
    case LIDAR_READ_DISTANCE:
        // filter data
        /*
        lidar.distance_raw = (uint16_t)((lidar.trans.buf[0] << 8) + lidar.trans.buf[1]);
        lidar.distance = update_median_filter(&lidar_filter, (int32_t)lidar.distance_raw);
        */
        //lidar.distance_raw = (uint32_t)((lidar.trans.buf[0] << 8) | lidar.trans.buf[1]);
        lidar.distance_raw = update_median_filter(&lidar_filter, (uint32_t)((lidar.trans.buf[0] << 8) | lidar.trans.buf[1]));
        lidar.distance = ((float)lidar.distance_raw)/100.0;

        // send message (if requested)
        if (lidar.update_agl) {
            AbiSendMsgAGL(AGL_LIDAR_LITE_ID, lidar.distance);
        }

        // increment status
        lidar.status = LIDAR_INIT_RANGING;
        break;
    default:
        break;
    }
}
Ejemplo n.º 3
0
void ins_update_baro() {
  int32_t baro_pressure = update_median_filter(&baro_median, baro.absolute);
  if (baro.status == BS_RUNNING) {
    if (!ins_baro_initialised) {
      ins_qfe = baro_pressure;
      ins_baro_initialised = TRUE;
    }
    if (ins.vf_realign) {
      ins.vf_realign = FALSE;
      ins_qfe = baro_pressure;
      vff_realign(0.);
      ins_ltp_accel.z = ACCEL_BFP_OF_REAL(vff_zdotdot);
      ins_ltp_speed.z = SPEED_BFP_OF_REAL(vff_zdot);
      ins_ltp_pos.z   = POS_BFP_OF_REAL(vff_z);
    }
    else { /* not realigning, so normal update with baro measurement */
      ins_baro_alt = ((baro_pressure - ins_qfe) * INS_BARO_SENS_NUM)/INS_BARO_SENS_DEN;
      float alt_float = POS_FLOAT_OF_BFP(ins_baro_alt);
      vff_update_baro(alt_float);
    }
  }
}
Ejemplo n.º 4
0
/**
 * Poll lidar for data
 */
void lidar_sf11_periodic(void)
{
  switch (lidar_sf11.status) {
    case LIDAR_SF11_REQ_READ:
      // read two bytes
      lidar_sf11.trans.buf[0] = 0; // set tx to zero
      i2c_transceive(&LIDAR_SF11_I2C_DEV, &lidar_sf11.trans, lidar_sf11.addr, 1, 2);
      break;
    case LIDAR_SF11_READ_OK:
      // process results
      // filter data
      lidar_sf11.distance_raw = update_median_filter(
                                  &lidar_sf11_filter,
                                  (uint32_t)((lidar_sf11.trans.buf[0] << 8) | lidar_sf11.trans.buf[1]));
      lidar_sf11.distance = ((float)lidar_sf11.distance_raw)/100.0;

      // compensate AGL measurement for body rotation
      if (lidar_sf11.compensate_rotation) {
          float phi = stateGetNedToBodyEulers_f()->phi;
          float theta = stateGetNedToBodyEulers_f()->theta;
          float gain = (float)fabs( (double) (cosf(phi) * cosf(theta)));
          lidar_sf11.distance = lidar_sf11.distance / gain;
      }

      // send message (if requested)
      if (lidar_sf11.update_agl) {
        AbiSendMsgAGL(AGL_LIDAR_SF11_ID, lidar_sf11.distance);
      }

      // reset status
      lidar_sf11.status = LIDAR_SF11_REQ_READ;
      break;
    default:
      break;
  }
}
Ejemplo n.º 5
0
/**
 * Run the optical flow with EDGEFLOW on a new image frame
 * @param[in] *opticflow The opticalflow structure that keeps track of previous images
 * @param[in] *state The state of the drone
 * @param[in] *img The image frame to calculate the optical flow from
 * @param[out] *result The optical flow result
 */
void calc_edgeflow_tot(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct image_t *img,
                       struct opticflow_result_t *result)
{
  // Define Static Variables
  static struct edge_hist_t edge_hist[MAX_HORIZON];
  static uint8_t current_frame_nr = 0;
  struct edge_flow_t edgeflow;
  static uint8_t previous_frame_offset[2] = {1, 1};

  // Define Normal variables
  struct edgeflow_displacement_t displacement;
  displacement.x = malloc(sizeof(int32_t) * img->w);
  displacement.y = malloc(sizeof(int32_t) * img->h);

  // If the methods just switched to this one, reintialize the
  // array of edge_hist structure.
  if (opticflow->just_switched_method == 1) {
    int i;
    for (i = 0; i < MAX_HORIZON; i++) {
      edge_hist[i].x = malloc(sizeof(int32_t) * img->w);
      edge_hist[i].y = malloc(sizeof(int32_t) * img->h);
      FLOAT_RATES_ZERO(edge_hist[i].rates);
    }
  }

  uint16_t disp_range;
  if (opticflow->search_distance < DISP_RANGE_MAX) {
    disp_range = opticflow->search_distance;
  } else {
    disp_range = DISP_RANGE_MAX;
  }

  uint16_t window_size;

  if (opticflow->window_size < MAX_WINDOW_SIZE) {
    window_size = opticflow->window_size;
  } else {
    window_size = MAX_WINDOW_SIZE;
  }

  uint16_t RES = opticflow->subpixel_factor;

  //......................Calculating EdgeFlow..................... //

  // Calculate current frame's edge histogram
  int32_t *edge_hist_x = edge_hist[current_frame_nr].x;
  int32_t *edge_hist_y = edge_hist[current_frame_nr].y;
  calculate_edge_histogram(img, edge_hist_x, 'x', 0);
  calculate_edge_histogram(img, edge_hist_y, 'y', 0);


  // Copy frame time and angles of image to calculated edge histogram
  edge_hist[current_frame_nr].frame_time = img->ts;
  edge_hist[current_frame_nr].rates = state->rates;

  // Calculate which previous edge_hist to compare with the current
  uint8_t previous_frame_nr[2];
  calc_previous_frame_nr(result, opticflow, current_frame_nr, previous_frame_offset, previous_frame_nr);

  //Select edge histogram from the previous frame nr
  int32_t *prev_edge_histogram_x = edge_hist[previous_frame_nr[0]].x;
  int32_t *prev_edge_histogram_y = edge_hist[previous_frame_nr[1]].y;

  //Calculate the corresponding derotation of the two frames
  int16_t der_shift_x = 0;
  int16_t der_shift_y = 0;

  if (opticflow->derotation) {
    der_shift_x = (int16_t)(edge_hist[current_frame_nr].rates.p  /
                            result->fps *
                            (float)img->w / (OPTICFLOW_FOV_W) * opticflow->derotation_correction_factor_x);
    der_shift_y = (int16_t)(edge_hist[current_frame_nr].rates.q /
                            result->fps *
                            (float)img->h / (OPTICFLOW_FOV_H) * opticflow->derotation_correction_factor_y);
  }

  // Estimate pixel wise displacement of the edge histograms for x and y direction
  calculate_edge_displacement(edge_hist_x, prev_edge_histogram_x,
                              displacement.x, img->w,
                              window_size, disp_range,  der_shift_x);
  calculate_edge_displacement(edge_hist_y, prev_edge_histogram_y,
                              displacement.y, img->h,
                              window_size, disp_range, der_shift_y);

  // Fit a line on the pixel displacement to estimate
  // the global pixel flow and divergence (RES is resolution)
  line_fit(displacement.x, &edgeflow.div_x,
           &edgeflow.flow_x, img->w,
           window_size + disp_range, RES);
  line_fit(displacement.y, &edgeflow.div_y,
           &edgeflow.flow_y, img->h,
           window_size + disp_range, RES);

  /* Save Resulting flow in results
   * Warning: The flow detected here is different in sign
   * and size, therefore this will be multiplied with
   * the same subpixel factor and -1 to make it on par with
   * the LK algorithm of t opticalflow_calculator.c
   * */
  edgeflow.flow_x = -1 * edgeflow.flow_x;
  edgeflow.flow_y = -1 * edgeflow.flow_y;

  result->flow_x = (int16_t)edgeflow.flow_x / previous_frame_offset[0];
  result->flow_y = (int16_t)edgeflow.flow_y / previous_frame_offset[1];

  //Fill up the results optic flow to be on par with LK_fast9
  result->flow_der_x =  result->flow_x;
  result->flow_der_y =  result->flow_y;
  result->corner_cnt = getAmountPeaks(edge_hist_x, 500 , img->w);
  result->tracked_cnt = getAmountPeaks(edge_hist_x, 500 , img->w);
  result->divergence = (float)edgeflow.flow_x / RES;
  result->div_size = 0.0f;
  result->noise_measurement = 0.0f;
  result->surface_roughness = 0.0f;

  //......................Calculating VELOCITY ..................... //

  /*Estimate fps per direction
   * This is the fps with adaptive horizon for subpixel flow, which is not similar
   * to the loop speed of the algorithm. The faster the quadcopter flies
   * the higher it becomes
  */
  float fps_x = 0;
  float fps_y = 0;
  float time_diff_x = (float)(timeval_diff(&edge_hist[previous_frame_nr[0]].frame_time, &img->ts)) / 1000.;
  float time_diff_y = (float)(timeval_diff(&edge_hist[previous_frame_nr[1]].frame_time, &img->ts)) / 1000.;
  fps_x = 1 / (time_diff_x);
  fps_y = 1 / (time_diff_y);

  result->fps = fps_x;

  // Calculate velocity
  float vel_x = edgeflow.flow_x * fps_x * state->agl * OPTICFLOW_FOV_W / (img->w * RES);
  float vel_y = edgeflow.flow_y * fps_y * state->agl * OPTICFLOW_FOV_H / (img->h * RES);

  //Apply a  median filter to the velocity if wanted
  if (opticflow->median_filter == true) {
    result->vel_x = (float)update_median_filter(&vel_x_filt, (int32_t)(vel_x * 1000)) / 1000;
    result->vel_y = (float)update_median_filter(&vel_y_filt, (int32_t)(vel_y * 1000)) / 1000;
  } else {
    result->vel_x = vel_x;
    result->vel_y = vel_y;
  }

  result->noise_measurement = 0.2;



#if OPTICFLOW_SHOW_FLOW
  draw_edgeflow_img(img, edgeflow, prev_edge_histogram_x, edge_hist_x);
#endif
  // Increment and wrap current time frame
  current_frame_nr = (current_frame_nr + 1) % MAX_HORIZON;
}
Ejemplo n.º 6
0
/**
 * Run the optical flow with fast9 and lukaskanade on a new image frame
 * @param[in] *opticflow The opticalflow structure that keeps track of previous images
 * @param[in] *state The state of the drone
 * @param[in] *img The image frame to calculate the optical flow from
 * @param[out] *result The optical flow result
 */
void calc_fast9_lukas_kanade(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct image_t *img,
                             struct opticflow_result_t *result)
{
  if (opticflow->just_switched_method) {
    opticflow_calc_init(opticflow, img->w, img->h);
  }

  // variables for size_divergence:
  float size_divergence; int n_samples;

  // variables for linear flow fit:
  float error_threshold;
  int n_iterations_RANSAC, n_samples_RANSAC, success_fit;
  struct linear_flow_fit_info fit_info;

  // Update FPS for information
  result->fps = 1 / (timeval_diff(&opticflow->prev_timestamp, &img->ts) / 1000.);
  opticflow->prev_timestamp = img->ts;

  // Convert image to grayscale
  image_to_grayscale(img, &opticflow->img_gray);

  // Copy to previous image if not set
  if (!opticflow->got_first_img) {
    image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
    opticflow->got_first_img = true;
  }

  // *************************************************************************************
  // Corner detection
  // *************************************************************************************

  // FAST corner detection
  // TODO: There is something wrong with fast9_detect destabilizing FPS. This problem is reduced with putting min_distance
  // to 0 (see defines), however a more permanent solution should be considered
  fast9_detect(img, opticflow->fast9_threshold, opticflow->fast9_min_distance,
               opticflow->fast9_padding, opticflow->fast9_padding, &result->corner_cnt,
               &opticflow->fast9_rsize,
               opticflow->fast9_ret_corners);

  // Adaptive threshold
  if (opticflow->fast9_adaptive) {
    // Decrease and increase the threshold based on previous values
    if (result->corner_cnt < 40
        && opticflow->fast9_threshold > FAST9_LOW_THRESHOLD) { // TODO: Replace 40 with OPTICFLOW_MAX_TRACK_CORNERS / 2
      opticflow->fast9_threshold--;
    } else if (result->corner_cnt > OPTICFLOW_MAX_TRACK_CORNERS * 2 && opticflow->fast9_threshold < FAST9_HIGH_THRESHOLD) {
      opticflow->fast9_threshold++;
    }
  }

#if OPTICFLOW_SHOW_CORNERS
  image_show_points(img, opticflow->fast9_ret_corners, result->corner_cnt);
#endif

  // Check if we found some corners to track
  if (result->corner_cnt < 1) {
    image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
    return;
  }

  // *************************************************************************************
  // Corner Tracking
  // *************************************************************************************

  // Execute a Lucas Kanade optical flow
  result->tracked_cnt = result->corner_cnt;
  struct flow_t *vectors = opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, opticflow->fast9_ret_corners,
                                       &result->tracked_cnt,
                                       opticflow->window_size / 2, opticflow->subpixel_factor, opticflow->max_iterations,
                                       opticflow->threshold_vec, opticflow->max_track_corners, opticflow->pyramid_level);

#if OPTICFLOW_SHOW_FLOW
  printf("show: n tracked = %d\n", result->tracked_cnt);
  image_show_flow(img, vectors, result->tracked_cnt, opticflow->subpixel_factor);
#endif

  // Estimate size divergence:
  if (SIZE_DIV) {
    n_samples = 100;
    size_divergence = get_size_divergence(vectors, result->tracked_cnt, n_samples);
    result->div_size = size_divergence;
  } else {
    result->div_size = 0.0f;
  }
  if (LINEAR_FIT) {
    // Linear flow fit (normally derotation should be performed before):
    error_threshold = 10.0f;
    n_iterations_RANSAC = 20;
    n_samples_RANSAC = 5;
    success_fit = analyze_linear_flow_field(vectors, result->tracked_cnt, error_threshold, n_iterations_RANSAC,
                                            n_samples_RANSAC, img->w, img->h, &fit_info);

    if (!success_fit) {
      fit_info.divergence = 0.0f;
      fit_info.surface_roughness = 0.0f;
    }

    result->divergence = fit_info.divergence;
    result->surface_roughness = fit_info.surface_roughness;
  } else {
    result->divergence = 0.0f;
    result->surface_roughness = 0.0f;
  }


  // Get the median flow
  qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow);
  if (result->tracked_cnt == 0) {
    // We got no flow
    result->flow_x = 0;
    result->flow_y = 0;
  } else if (result->tracked_cnt > 3) {
    // Take the average of the 3 median points
    result->flow_x = vectors[result->tracked_cnt / 2 - 1].flow_x;
    result->flow_y = vectors[result->tracked_cnt / 2 - 1].flow_y;
    result->flow_x += vectors[result->tracked_cnt / 2].flow_x;
    result->flow_y += vectors[result->tracked_cnt / 2].flow_y;
    result->flow_x += vectors[result->tracked_cnt / 2 + 1].flow_x;
    result->flow_y += vectors[result->tracked_cnt / 2 + 1].flow_y;
    result->flow_x /= 3;
    result->flow_y /= 3;
  } else {
    // Take the median point
    result->flow_x = vectors[result->tracked_cnt / 2].flow_x;
    result->flow_y = vectors[result->tracked_cnt / 2].flow_y;
  }

  // Flow Derotation
  float diff_flow_x = 0;
  float diff_flow_y = 0;

  /*// Flow Derotation TODO:
  float diff_flow_x = (state->phi - opticflow->prev_phi) * img->w / OPTICFLOW_FOV_W;
  float diff_flow_y = (state->theta - opticflow->prev_theta) * img->h / OPTICFLOW_FOV_H;*/

  if (opticflow->derotation && result->tracked_cnt > 5) {
    diff_flow_x = (state->rates.p)  / result->fps * img->w /
                  OPTICFLOW_FOV_W;// * img->w / OPTICFLOW_FOV_W;
    diff_flow_y = (state->rates.q) / result->fps * img->h /
                  OPTICFLOW_FOV_H;// * img->h / OPTICFLOW_FOV_H;
  }

  result->flow_der_x = result->flow_x - diff_flow_x * opticflow->subpixel_factor *
                       opticflow->derotation_correction_factor_x;
  result->flow_der_y = result->flow_y - diff_flow_y * opticflow->subpixel_factor *
                       opticflow->derotation_correction_factor_y;
  opticflow->prev_rates = state->rates;

  // Velocity calculation
  // Right now this formula is under assumption that the flow only exist in the center axis of the camera.
  // TODO Calculate the velocity more sophisticated, taking into account the drone's angle and the slope of the ground plane.
  float vel_x = result->flow_der_x * result->fps * state->agl / opticflow->subpixel_factor  / OPTICFLOW_FX;
  float vel_y = result->flow_der_y * result->fps * state->agl / opticflow->subpixel_factor  / OPTICFLOW_FY;

  //Apply a  median filter to the velocity if wanted
  if (opticflow->median_filter == true) {
    result->vel_x = (float)update_median_filter(&vel_x_filt, (int32_t)(vel_x * 1000)) / 1000;
    result->vel_y = (float)update_median_filter(&vel_y_filt, (int32_t)(vel_y * 1000)) / 1000;
  } else {
    result->vel_x = vel_x;
    result->vel_y = vel_y;
  }
  // Velocity calculation: uncomment if focal length of the camera is not known or incorrect.
  //  result->vel_x =  - result->flow_der_x * result->fps * state->agl / opticflow->subpixel_factor * OPTICFLOW_FOV_W / img->w
  //  result->vel_y =  result->flow_der_y * result->fps * state->agl / opticflow->subpixel_factor * OPTICFLOW_FOV_H / img->h


  // Determine quality of noise measurement for state filter
  //TODO develop a noise model based on groundtruth

  float noise_measurement_temp = (1 - ((float)result->tracked_cnt / ((float)opticflow->max_track_corners * 1.25)));
  result->noise_measurement = noise_measurement_temp;

  // *************************************************************************************
  // Next Loop Preparation
  // *************************************************************************************
  free(vectors);
  image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
}