Beispiel #1
0
void init(char* image_path) {
  srand(time(0));
  int image_tex = texture_load(image_path);
  if(!image_tex) {
    fprintf(stderr, "texture load failed\n");
    exit(1);
  }

  // get the target image
  base_image = image_from_tex(image_tex);

  population_size = INIT_POP_SIZE;
  pop_select = population_size * POP_SELECT_FACTOR;
  
  // create initial population
  fprintf(stderr, "seeding initial population...");
  population = s_malloc(population_size * sizeof(t_image*));

  for(int i = 0; i < population_size; i++) {
    population[i] = random_image(base_image->width, base_image->height);
  }
  qsort(population, population_size, sizeof(t_image*), image_fitness_cmp);
  best_image = image_copy(population[0]);
  last_best = image_fitness(population[0]);
  fprintf(stderr, "done\n");
}
Beispiel #2
0
void InvertMaskCommand::onExecute(Context* context)
{
  bool hasMask = false;
  {
    const ActiveDocumentReader document(context);
    if (document->isMaskVisible())
      hasMask = true;
  }

  // without mask?...
  if (!hasMask) {
    // so we select all
    Command* mask_all_cmd =
      CommandsModule::instance()->getCommandByName(CommandId::MaskAll);
    context->executeCommand(mask_all_cmd);
  }
  // invert the current mask
  else {
    ActiveDocumentWriter document(context);
    Sprite* sprite(document->getSprite());
    UndoTransaction undo(document, "Mask Invert", undo::DoesntModifyDocument);
    if (undo.isEnabled())
      undo.pushUndoer(new undoers::SetMask(undo.getObjects(), document));

    /* create a new mask */
    UniquePtr<Mask> mask(new Mask());

    /* select all the sprite area */
    mask->replace(0, 0, sprite->getWidth(), sprite->getHeight());

    /* remove in the new mask the current sprite marked region */
    const gfx::Rect& maskBounds = document->getMask()->getBounds();
    image_rectfill(mask->getBitmap(),
                   maskBounds.x, maskBounds.y,
                   maskBounds.x + maskBounds.w-1,
                   maskBounds.y + maskBounds.h-1, 0);

    // Invert the current mask in the sprite
    document->getMask()->invert();
    if (document->getMask()->getBitmap()) {
      // Copy the inverted region in the new mask
      image_copy(mask->getBitmap(),
                 document->getMask()->getBitmap(),
                 document->getMask()->getBounds().x,
                 document->getMask()->getBounds().y);
    }

    // We need only need the area inside the sprite
    mask->intersect(0, 0, sprite->getWidth(), sprite->getHeight());

    // Set the new mask
    document->setMask(mask);
    undo.commit();

    document->generateMaskBoundaries();
    update_screen_for_document(document);
  }
}
Beispiel #3
0
image* image_clone(image* src)
{
    if(!src)
        return NULL;

    image* img = image_create(src->width, src->height, src->n_channels, src->type);
    image_copy(src, img);
    return img;
}
Beispiel #4
0
//Main function
int main(int argc, char* argv[])
{
    if (argc != 2) {
        std::cout << "Usage: colorcanny image" << std::endl;
        return(0);
    }

    //CImg for output
    cimg_library::CImg <unsigned char> image(argv[1]);
    cimg_library::CImg <unsigned char> image_copy(image.width(), image.height(), image.depth(), image.spectrum());
    cimg_library::CImg <unsigned char> image_qwaf(image.width(), image.height(), image.depth(), image.spectrum());
    cimg_library::CImg <unsigned char> sobel(image.width(), image.height(), image.depth(), image.spectrum());
    cimg_library::CImg <unsigned char> sobel_dir(image.width(), image.height(), image.depth(), image.spectrum());
    cimg_library::CImg <unsigned char> nms_image(image.width(), image.height(), image.depth(), image.spectrum());
    cimg_library::CImg <unsigned char> nms_image_dir(image.width(), image.height(), image.depth(), image.spectrum());

    //Do a copy for testing
    for (int x=0; x<image.width(); x++) {
        for (int y=0; y<image.height(); y++) {
            unsigned char col[] = {image(x,y,0,0), image(x,y,0,1), image(x,y,0,2)};
            image_copy.draw_point(x,y,col);
        }
    }

    applyQWAF(image, image_qwaf);
    image_qwaf.save("image_qwaf.bmp");

    //Copy to see if everything is working properly
    image_copy.save("image_copy.bmp");

    //Apply a gaussian blur
    image.blur(1.6);

    //Vectors
    std::vector<unsigned char> magnitude;
    std::vector<unsigned char> nms;
    std::vector<float> direction;

    //Apply the sobel operator to the image and then non-maximum supression
    sobelOperator(image, magnitude, direction);
    NMS(nms, magnitude, direction, image.width(), image.height());

    //Write the magnitude and direction out to images
    writeMagnitude(sobel, magnitude);
    writeMagnitudeDirection(sobel_dir, magnitude, direction);
    writeMagnitude(nms_image, nms);
    writeMagnitudeDirection(nms_image_dir, nms, direction);

    //Write the images out to files
    sobel.save("sobel.bmp");
    sobel_dir.save("sobel_direction.bmp");
    nms_image.save("non_maxima.bmp");
    nms_image_dir.save("non_maxima_dir.bmp");

    //Return successful
    return 0;
}
Beispiel #5
0
Image* image_crop(const Image* image, int x, int y, int w, int h, int bgcolor)
{
  if (w < 1) throw std::invalid_argument("image_crop: Width is less than 1");
  if (h < 1) throw std::invalid_argument("image_crop: Height is less than 1");

  Image* trim = Image::create(image->getPixelFormat(), w, h);
  trim->mask_color = image->mask_color;

  image_clear(trim, bgcolor);
  image_copy(trim, image, -x, -y);

  return trim;
}
Beispiel #6
0
void UndoTransaction::flipImageWithMask(Image* image, const Mask* mask, raster::algorithm::FlipType flipType, int bgcolor)
{
  UniquePtr<Image> flippedImage((Image::createCopy(image)));

  // Flip the portion of the bitmap.
  raster::algorithm::flip_image_with_mask(flippedImage, mask, flipType, bgcolor);

  // Insert the undo operation.
  if (isEnabled()) {
    UniquePtr<Dirty> dirty((new Dirty(image, flippedImage)));
    dirty->saveImagePixels(image);

    m_undoHistory->pushUndoer(new undoers::DirtyArea(m_undoHistory->getObjects(), image, dirty));
  }

  // Copy the flipped image into the image specified as argument.
  image_copy(image, flippedImage, 0, 0);
}
Beispiel #7
0
void vgCopyImage(VGImage dst, VGint dx, VGint dy,
                 VGImage src, VGint sx, VGint sy,
                 VGint width, VGint height,
                 VGboolean dither)
{
   struct vg_context *ctx = vg_current_context();

   if (src == VG_INVALID_HANDLE || dst == VG_INVALID_HANDLE) {
      vg_set_error(ctx, VG_BAD_HANDLE_ERROR);
      return;
   }

   if (width <= 0 || height <= 0) {
      vg_set_error(ctx, VG_ILLEGAL_ARGUMENT_ERROR);
      return;
   }
   vg_validate_state(ctx);
   image_copy((struct vg_image*)dst, dx, dy,
              (struct vg_image*)src, sx, sy,
              width, height, dither);
}
Beispiel #8
0
void display() {
  if(!done && image_match(best_image, base_image)) {
    done = 1;
  }
  if(!paused && !done && generations < MAX_GENS) {
    float r;
    double accum_fitness = 0;
    fprintf(stderr, "evolving...");
    // current best is start of sorted list
    t_image** new_pop = s_malloc(population_size * sizeof(t_image*));
    // always add the best solution to survivors
    new_pop[0] = image_copy(best_image);
    // get more survivors using roulette wheel selection
    for(int i = 1; i < pop_select; i++) {
      r = rand_float();
      for(int j = 0; j < population_size; j++) {
        accum_fitness += image_fitness(population[j]);
        if(accum_fitness >= r) {
          new_pop[i] = image_copy(population[j]);
          break;
        }
      }
      accum_fitness = 0;
    }

    // get a normalised fitness distribution for the survivors
    double survivor_fit[pop_select];
    double survivor_sum_fit = 0;
    for(int i = 0; i < pop_select; i++) {
      survivor_fit[i] = image_fitness(new_pop[i]);
      survivor_sum_fit += survivor_fit[i];
    }
    for(int i = 0; i < pop_select; i++) {
      survivor_fit[i] /= survivor_sum_fit;
    }

    // fill rest of population with children
    int i = pop_select;
    while(i < population_size) {
      // 2 random parents from survivors
      t_image* parents[N_PARENTS] = {0};
      for(int j = 0; j < N_PARENTS; j++) {
        r = rand_float();
        for(int k = 0; k < pop_select; k++) {
          accum_fitness += survivor_fit[k];
          if(accum_fitness >= r) {
            parents[j] = new_pop[k];
            break;
          }
        }
        accum_fitness = 0;
      }

      int p = 0;
      t_image* children[N_CHILDREN] = {0};
      // seed children with parent data
      for(int c = 0; c < N_CHILDREN; c++) {
        children[c] = image_copy(parents[p]);
        /* NOTE: can get away with this instead of mod as parents is 2,
           but if we were to use more have to change it */
        p = !p;
      }

      for(int x = 0; x < base_image->width; x++) {
        for(int y = 0; y < base_image->height; y++) {
          // crossover
          for(int c = 0; c < N_CHILDREN; c++) {
            /*
            // use 3 bits of c to get the 8 combinations for crossover
            p = (c & (1 << 0)) ? 1 : 0;
            children[c]->pix[x][y].r = parents[p]->pix[x][y].r;
            p = (c & (1 << 1)) ? 1 : 0;
            children[c]->pix[x][y].g = parents[p]->pix[x][y].g;
            p = (c & (1 << 2)) ? 1 : 0;
            children[c]->pix[x][y].b = parents[p]->pix[x][y].b;
            */
            average_rgb(x, y, parents, children[c]);
          }

          // mutation
          for(int c = 0; c < N_CHILDREN; c++) {
            if(rand_float() <= MUTATION_RATE)
              mutate_rgb(&children[c]->pix[x][y]);
          }
        }
      }

      // add to new population
      for(int c = 0; c < N_CHILDREN; c++) {
        // this child is 'born'
        if(i < population_size && rand_float() <= CROSSOVER_RATE) {
          new_pop[i++] = children[c];
        // you are dead to me!
        } else {
          free_image(children[c]);
        }
      }
    }

    // deallocate last generation
    for(int i = 0; i < population_size; i++) {
      free_image(population[i]);
    }
    free(population);
    
    // set new population as current
    population = new_pop;
    // determine new best member and compare to old best
    qsort(population, population_size, sizeof(t_image*), image_fitness_cmp);
    double current_fit = image_fitness(population[0]);
    if(current_fit < last_best) {
      free_image(best_image);
      best_image = image_copy(population[0]);
      last_best = current_fit;
    }
    generations++;
    fprintf(stderr, "done | gen %d | f %.1f\n", generations, current_fit);
  }

  // draw the best image
  glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
  glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
  glMatrixMode(GL_PROJECTION);
  glLoadIdentity();
  glOrtho(0, base_image->width, base_image->height, 0, 0, 1);
  glMatrixMode(GL_MODELVIEW);
  glBegin(GL_POINTS);
  for(int x = 0; x < best_image->width; x++) {
    for(int y = 0; y < best_image->height; y++) {
      glColor3f(best_image->pix[x][y].r/255.0, best_image->pix[x][y].g/255.0,
                  best_image->pix[x][y].b/255.0);
      glVertex2f(x + 0.5f, y + 0.5f);
    }
  }
  glEnd();
  
  glutSwapBuffers();
}
/**
 * Run the optical flow on a new image frame
 * @param[in] *opticflow The opticalflow structure that keeps track of previous images
 * @param[in] *state The state of the drone
 * @param[in] *img The image frame to calculate the optical flow from
 * @param[out] *result The optical flow result
 */
void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct image_t *img, struct opticflow_result_t *result)
{
  // variables for size_divergence:
  float size_divergence; int n_samples;

  // variables for linear flow fit:
  float error_threshold; int n_iterations_RANSAC, n_samples_RANSAC, success_fit; struct linear_flow_fit_info fit_info;

  // Update FPS for information
  result->fps = 1 / (timeval_diff(&opticflow->prev_timestamp, &img->ts) / 1000.);
  memcpy(&opticflow->prev_timestamp, &img->ts, sizeof(struct timeval));

  // Convert image to grayscale
  image_to_grayscale(img, &opticflow->img_gray);

  // Copy to previous image if not set
  if (!opticflow->got_first_img) {
    image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
    opticflow->got_first_img = TRUE;
  }

  // *************************************************************************************
  // Corner detection
  // *************************************************************************************

  // FAST corner detection (TODO: non fixed threshold)
  struct point_t *corners = fast9_detect(img, opticflow->fast9_threshold, opticflow->fast9_min_distance,
                                         20, 20, &result->corner_cnt);

  // Adaptive threshold
  if (opticflow->fast9_adaptive) {

    // Decrease and increase the threshold based on previous values
    if (result->corner_cnt < 40 && opticflow->fast9_threshold > 5) {
      opticflow->fast9_threshold--;
    } else if (result->corner_cnt > 50 && opticflow->fast9_threshold < 60) {
      opticflow->fast9_threshold++;
    }
  }

#if OPTICFLOW_DEBUG && OPTICFLOW_SHOW_CORNERS
  image_show_points(img, corners, result->corner_cnt);
#endif

  // Check if we found some corners to track
  if (result->corner_cnt < 1) {
    free(corners);
    image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
    return;
  }

  // *************************************************************************************
  // Corner Tracking
  // *************************************************************************************

  // Execute a Lucas Kanade optical flow
  result->tracked_cnt = result->corner_cnt;
  struct flow_t *vectors = opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, corners, &result->tracked_cnt,
                                       opticflow->window_size / 2, opticflow->subpixel_factor, opticflow->max_iterations,
                                       opticflow->threshold_vec, opticflow->max_track_corners);

#if OPTICFLOW_DEBUG && OPTICFLOW_SHOW_FLOW
  image_show_flow(img, vectors, result->tracked_cnt, opticflow->subpixel_factor);
#endif

  // Estimate size divergence:
  if (SIZE_DIV) {
    n_samples = 100;
    size_divergence = get_size_divergence(vectors, result->tracked_cnt, n_samples);
    result->div_size = size_divergence;
  } else {
    result->div_size = 0.0f;
  }
  if (LINEAR_FIT) {
    // Linear flow fit (normally derotation should be performed before):
    error_threshold = 10.0f;
    n_iterations_RANSAC = 20;
    n_samples_RANSAC = 5;
    success_fit = analyze_linear_flow_field(vectors, result->tracked_cnt, error_threshold, n_iterations_RANSAC, n_samples_RANSAC, img->w, img->h, &fit_info);

    if (!success_fit) {
      fit_info.divergence = 0.0f;
      fit_info.surface_roughness = 0.0f;
    }

    result->divergence = fit_info.divergence;
    result->surface_roughness = fit_info.surface_roughness;
  } else {
    result->divergence = 0.0f;
    result->surface_roughness = 0.0f;
  }


  // Get the median flow
  qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow);
  if (result->tracked_cnt == 0) {
    // We got no flow
    result->flow_x = 0;
    result->flow_y = 0;
  } else if (result->tracked_cnt > 3) {
    // Take the average of the 3 median points
    result->flow_x = vectors[result->tracked_cnt / 2 - 1].flow_x;
    result->flow_y = vectors[result->tracked_cnt / 2 - 1].flow_y;
    result->flow_x += vectors[result->tracked_cnt / 2].flow_x;
    result->flow_y += vectors[result->tracked_cnt / 2].flow_y;
    result->flow_x += vectors[result->tracked_cnt / 2 + 1].flow_x;
    result->flow_y += vectors[result->tracked_cnt / 2 + 1].flow_y;
    result->flow_x /= 3;
    result->flow_y /= 3;
  } else {
    // Take the median point
    result->flow_x = vectors[result->tracked_cnt / 2].flow_x;
    result->flow_y = vectors[result->tracked_cnt / 2].flow_y;
  }

  // Flow Derotation
  float diff_flow_x = (state->phi - opticflow->prev_phi) * img->w / OPTICFLOW_FOV_W;
  float diff_flow_y = (state->theta - opticflow->prev_theta) * img->h / OPTICFLOW_FOV_H;
  result->flow_der_x = result->flow_x - diff_flow_x * opticflow->subpixel_factor;
  result->flow_der_y = result->flow_y - diff_flow_y * opticflow->subpixel_factor;
  opticflow->prev_phi = state->phi;
  opticflow->prev_theta = state->theta;

  // Velocity calculation
  result->vel_x = -result->flow_der_x * result->fps * state->agl / opticflow->subpixel_factor * img->w / OPTICFLOW_FX;
  result->vel_y =  result->flow_der_y * result->fps * state->agl / opticflow->subpixel_factor * img->h / OPTICFLOW_FY;

  // *************************************************************************************
  // Next Loop Preparation
  // *************************************************************************************
  free(corners);
  free(vectors);
  image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
}
Beispiel #10
0
bool FliFormat::onLoad(FileOp* fop)
{
#define SETPAL()                                                \
  do {                                                          \
      for (c=0; c<256; c++) {                                   \
        pal->setEntry(c, _rgba(cmap[c*3],                       \
                               cmap[c*3+1],                     \
                               cmap[c*3+2], 255));              \
      }                                                         \
      pal->setFrame(frpos_out);                                 \
      sprite->setPalette(pal, true);                            \
    } while (0)

  unsigned char cmap[768];
  unsigned char omap[768];
  s_fli_header fli_header;
  Image *bmp, *old, *image;
  Sprite *sprite;
  LayerImage *layer;
  Palette *pal;
  int c, w, h;
  FrameNumber frpos_in;
  FrameNumber frpos_out;
  int index = 0;
  Cel *cel;

  /* open the file to read in binary mode */
  FileHandle f(fop->filename.c_str(), "rb");

  fli_read_header(f, &fli_header);
  fseek(f, 128, SEEK_SET);

  if (fli_header.magic == NO_HEADER) {
    fop_error(fop, "The file doesn't have a FLIC header\n");
    return false;
  }

  /* size by frame */
  w = fli_header.width;
  h = fli_header.height;

  /* create the bitmaps */
  bmp = Image::create(IMAGE_INDEXED, w, h);
  old = Image::create(IMAGE_INDEXED, w, h);
  pal = new Palette(FrameNumber(0), 256);
  if (!bmp || !old || !pal) {
    fop_error(fop, "Not enough memory.\n");
    if (bmp) image_free(bmp);
    if (old) image_free(old);
    if (pal) delete pal;
    return false;
  }

  // Create the image
  sprite = new Sprite(IMAGE_INDEXED, w, h, 256);
  layer = new LayerImage(sprite);
  sprite->getFolder()->addLayer(layer);
  layer->configureAsBackground();

  // Set frames and speed
  sprite->setTotalFrames(FrameNumber(fli_header.frames));
  sprite->setDurationForAllFrames(fli_header.speed);

  /* write frame by frame */
  for (frpos_in = frpos_out = FrameNumber(0);
       frpos_in < sprite->getTotalFrames();
       ++frpos_in) {
    /* read the frame */
    fli_read_frame(f, &fli_header,
                   (unsigned char *)old->dat, omap,
                   (unsigned char *)bmp->dat, cmap);

    /* first frame, or the frames changes, or the palette changes */
    if ((frpos_in == 0) ||
        (image_count_diff(old, bmp))
#ifndef USE_LINK /* TODO this should be configurable through a check-box */
        || (memcmp(omap, cmap, 768) != 0)
#endif
        ) {
      /* the image changes? */
      if (frpos_in != 0)
        ++frpos_out;

      /* add the new frame */
      image = Image::createCopy(bmp);
      if (!image) {
        fop_error(fop, "Not enough memory\n");
        break;
      }

      index = sprite->getStock()->addImage(image);
      if (index < 0) {
        image_free(image);
        fop_error(fop, "Not enough memory\n");
        break;
      }

      cel = new Cel(frpos_out, index);
      layer->addCel(cel);

      /* first frame or the palette changes */
      if ((frpos_in == 0) || (memcmp(omap, cmap, 768) != 0))
        SETPAL();
    }
#ifdef USE_LINK
    /* the palette changes */
    else if (memcmp(omap, cmap, 768) != 0) {
      ++frpos_out;
      SETPAL();

      // Add link
      cel = new Cel(frpos_out, index);
      layer_add_cel(layer, cel);
    }
#endif
    // The palette and the image don't change: add duration to the last added frame
    else {
      sprite->setFrameDuration(frpos_out,
                               sprite->getFrameDuration(frpos_out)+fli_header.speed);
    }

    /* update the old image and color-map to the new ones to compare later */
    image_copy(old, bmp, 0, 0);
    memcpy(omap, cmap, 768);

    /* update progress */
    fop_progress(fop, (float)(frpos_in+1) / (float)(sprite->getTotalFrames()));
    if (fop_is_stop(fop))
      break;

    /* just one frame? */
    if (fop->oneframe)
      break;
  }

  // Update number of frames
  sprite->setTotalFrames(frpos_out.next());

  // Destroy the bitmaps
  image_free(bmp);
  image_free(old);
  delete pal;

  fop->document = new Document(sprite);
  return true;
}
Beispiel #11
0
void histogram_equalize_rgb(void *src_pixels, void *dst_pixels, size_t width, size_t height, size_t stride) {
    image_copy(src_pixels, dst_pixels, height, stride);
    histogram_equalize_channel(src_pixels, dst_pixels, width, height, stride, RED);
    histogram_equalize_channel(src_pixels, dst_pixels, width, height, stride, GREEN);
    histogram_equalize_channel(src_pixels, dst_pixels, width, height, stride, BLUE);    
}
Beispiel #12
0
int main(int argc, char* argv[])
{
    printf("[i] Start...\n");

    char file_name[] = "test.bmp";
    char* filename=file_name;

    if(argc >= 2) {
        filename = argv[1];
    }

    printf("[i] file: %s\n", filename);
	
    image* img = image_create(320, 240, 3, CV_DEPTH_8U);

    if(!img) {
        printf("[!] Error: image_create()\n");
        return -1;
    }
    image_delete(&img);

    // test image loading
    image* img2 = image_load(filename);
    printf("[i] image size: %dx%dx%d (%d)\n", img2->width, img2->height, img2->n_channels, img2->size);
    image_save(img2, "test2_load_save.bmp");

    printf("[i] == Tests == \n");
#if 1
    // copy
    printf("[i] image_copy \n");
    img = image_create(img2->width, img2->height, img2->n_channels, CV_DEPTH_8U);
    image_copy(img2, img);
    image_save(img, "test2_copy.bmp");

    // test convert image to grayscale
    printf("[i] image_convert_color \n");
    image* img_gray = image_create(img2->width, img2->height, 1, CV_DEPTH_8U);
    gettimeofday(&t0, NULL);
    image_convert_color(img2, img_gray, CV_RGB2GRAY);
    gettimeofday(&t1, NULL);
    image_save(img_gray, "test3_gray.bmp");

    // test borders detection
    printf("[i] image_thin_borders \n");
    image* img_borders = NULL;
    gettimeofday(&t2, NULL);
    image_thin_borders(img_gray, &img_borders);
    gettimeofday(&t3, NULL);
    image_save(img_borders, "test4_thin_borders.bmp");

    // min-max-loc
    printf("[i] image_min_max_loc \n");
    double _min, _max;
    gettimeofday(&t4, NULL);
    image_min_max_loc(img_gray, &_min, &_max, NULL, NULL);
    gettimeofday(&t5, NULL);
    printf("[i] min=%0.2f max=%0.2f\n", _min, _max);

    // threshold
    printf("[i] image_threshold \n");
    image* img_thr = image_create(img2->width, img2->height, 1, CV_DEPTH_8U);
    gettimeofday(&t6, NULL);
    image_threshold(img_gray, img_thr, 60);
    gettimeofday(&t7, NULL);
    image_save(img_thr, "test5_threshold.bmp");
#endif

#if 1
    // rotate180
    printf("[i] image_rotate180 \n");
    image_rotate180(img2);
    image_save(img2, "test6_rotate180.bmp");
    image_rotate180(img2);
    // reflect vertical
    printf("[i] image_reflect_vertical \n");
    image_reflect_vertical(img2);
    image_save(img2, "test7_reflect_vertical.bmp");
    image_reflect_vertical(img2);
#endif // rotate180

    int colors_count;
    cv_point center;

#if 1
    // simple resize
    printf("[i] image_resize \n");
    image *img_small = image_create(160, 120, 3, CV_DEPTH_8U);
    gettimeofday(&t8, NULL);
    image_resize(img2, img_small);
    gettimeofday(&t9, NULL);
    image_save(img_small, "test8_resize.bmp");

 //   image* img_small_gray = image_create(80, 60, 1, CV_DEPTH_8U);
//    image_convert_color(img_small, img_small_gray, CV_RGB2GRAY);

//    image* img_small_borders = NULL;
//    image_thin_borders(img_small_gray, &img_small_borders);
//    image_save(img_small_borders, "test_resize_thin_borders.bmp");

#if 1
    // k-meanes colorer
    printf("[i] image_kmeans_colorer \n");
    image* img_kmeanes = image_create(160, 120, 3, CV_DEPTH_8U);
    image* img_kmeanes_idx = image_create(160, 120, 1, CV_DEPTH_8U);

#define CLUSTER_COUNT 10
    int cluster_count = CLUSTER_COUNT;
    cv_color_cluster clusters[CLUSTER_COUNT];

    gettimeofday(&t10, NULL);
    colors_count = image_kmeans_colorer(img_small, img_kmeanes, img_kmeanes_idx, clusters, cluster_count);
    gettimeofday(&t11, NULL);

    printf("[i] colors count: %d\n", colors_count);
    image_save(img_kmeanes, "test_kmeanscolorer.bmp");

#if 0
    print_color_clusters(clusters, CLUSTER_COUNT);
    printf("[i] === colors clusters after sort:\n");
    sort_color_clusters_by_count(clusters, CLUSTER_COUNT);
    print_color_clusters(clusters, CLUSTER_COUNT);
#endif

    image_delete(&img_kmeanes);
    image_delete(&img_kmeanes_idx);
#endif // k-meanes colorer

    image_delete(&img_small);
//    image_delete(&img_small_gray);
//    image_delete(&img_small_borders);
#endif // simple resize

#if 1
    // HSV
    printf("[i] image_hsv2rgb \n");
    image* img_hsv = image_create(img2->width, img2->height, 3, CV_DEPTH_8U);
    image* img_bgr = image_create(img2->width, img2->height, 3, CV_DEPTH_8U);

    gettimeofday(&t12, NULL);
    image_rgb2hsv(img2, img_hsv);
    gettimeofday(&t13, NULL);

    image_hsv2rgb(img_hsv, img_bgr);

    image_save(img_hsv, "test9_rgb2hsv.bmp");
    image_save(img_bgr, "test9_hsv2rgb.bmp");

    image_delete(&img_hsv);
    image_delete(&img_bgr);
#endif // HSV

#if 1
    // hsv colorer
    printf("[i] image_hsv_colorer \n");
    image* img_hsv_col = image_create(img2->width, img2->height, 3, CV_DEPTH_8U);
    image* img_hsv_idx = image_create(img2->width, img2->height, 1, CV_DEPTH_8U);

#define COLORS_COUNT 10
    cv_color_cluster clusters2[COLORS_COUNT];

    gettimeofday(&t14, NULL);
    colors_count = image_hsv_colorer(img2, img_hsv_col, img_hsv_idx, clusters2, COLORS_COUNT);
    gettimeofday(&t15, NULL);

    printf("[i] colors count: %d\n", colors_count);
    image_save(img_hsv_col, "test_hsvcolorer.bmp");

#if 1
    print_color_clusters(clusters2, COLORS_COUNT);
    printf("[i] === colors clusters after sort:\n");
    sort_color_clusters_by_count(clusters2, COLORS_COUNT);
    print_color_clusters(clusters2, COLORS_COUNT);

    center = get_color_center(clusters2[0].id, img_hsv_idx);
    printf("[i] first color center:  %03d %03d\n", center.x, center.y);
    center = get_color_center(clusters2[1].id, img_hsv_idx);
    printf("[i] second color center: %03d %03d\n", center.x, center.y);
    center = get_color_center(clusters2[2].id, img_hsv_idx);
    printf("[i] third color center:  %03d %03d\n", center.x, center.y);
#endif // print_color_clusters

    image_delete(&img_hsv_col);
    image_delete(&img_hsv_idx);
#endif // hsv colorer

    printf("[i] == Performance == \n");
    print_performance("image_convert_color", t1, t0);
    print_performance("image_thin_borders", t3, t2);
    print_performance("image_min_max_loc", t5, t4);
    print_performance("image_threshold", t7, t6);
    print_performance("image_resize", t9, t8);
    print_performance("image_kmeans_colorer", t11, t10);
    print_performance("image_rgb2hsv", t13, t12);
    print_performance("image_hsv_colorer", t15, t14);

    image_delete(&img);
    image_delete(&img2);
#if 1
    image_delete(&img_gray);
    image_delete(&img_borders);
    image_delete(&img_thr);
#endif

    printf("[i] End.\n");
    return 0;
}
/**
 * Run the optical flow on a new image frame
 * @param[in] *opticflow The opticalflow structure that keeps track of previous images
 * @param[in] *state The state of the drone
 * @param[in] *img The image frame to calculate the optical flow from
 * @param[out] *result The optical flow result
 */
void opticflow_calc_frame(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct image_t *img, struct opticflow_result_t *result)
{
  // Update FPS for information
  result->fps = 1 / (timeval_diff(&opticflow->prev_timestamp, &img->ts) / 1000.);
  memcpy(&opticflow->prev_timestamp, &img->ts, sizeof(struct timeval));

  // Convert image to grayscale
  image_to_grayscale(img, &opticflow->img_gray);

  // Copy to previous image if not set
  if (!opticflow->got_first_img) {
    image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
    opticflow->got_first_img = TRUE;
  }

  // *************************************************************************************
  // Corner detection
  // *************************************************************************************

  // FAST corner detection (TODO: non fixed threashold)
  struct point_t *corners = fast9_detect(img, opticflow->fast9_threshold, opticflow->fast9_min_distance,
                                         20, 20, &result->corner_cnt);

  // Adaptive threshold
  if (opticflow->fast9_adaptive) {

    // Decrease and increase the threshold based on previous values
    if (result->corner_cnt < 40 && opticflow->fast9_threshold > 5) {
      opticflow->fast9_threshold--;
    } else if (result->corner_cnt > 50 && opticflow->fast9_threshold < 60) {
      opticflow->fast9_threshold++;
    }
  }

#if OPTICFLOW_DEBUG && OPTICFLOW_SHOW_CORNERS
  image_show_points(img, corners, result->corner_cnt);
#endif

  // Check if we found some corners to track
  if (result->corner_cnt < 1) {
    free(corners);
    image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
    return;
  }

  // *************************************************************************************
  // Corner Tracking
  // *************************************************************************************

  // Execute a Lucas Kanade optical flow
  result->tracked_cnt = result->corner_cnt;
  struct flow_t *vectors = opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, corners, &result->tracked_cnt,
                                       opticflow->window_size / 2, opticflow->subpixel_factor, opticflow->max_iterations,
                                       opticflow->threshold_vec, opticflow->max_track_corners);

#if OPTICFLOW_DEBUG && OPTICFLOW_SHOW_FLOW
  image_show_flow(img, vectors, result->tracked_cnt, opticflow->subpixel_factor);
#endif

  // Get the median flow
  qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow);
  if (result->tracked_cnt == 0) {
    // We got no flow
    result->flow_x = 0;
    result->flow_y = 0;
  } else if (result->tracked_cnt > 3) {
    // Take the average of the 3 median points
    result->flow_x = vectors[result->tracked_cnt / 2 - 1].flow_x;
    result->flow_y = vectors[result->tracked_cnt / 2 - 1].flow_y;
    result->flow_x += vectors[result->tracked_cnt / 2].flow_x;
    result->flow_y += vectors[result->tracked_cnt / 2].flow_y;
    result->flow_x += vectors[result->tracked_cnt / 2 + 1].flow_x;
    result->flow_y += vectors[result->tracked_cnt / 2 + 1].flow_y;
    result->flow_x /= 3;
    result->flow_y /= 3;
  } else {
    // Take the median point
    result->flow_x = vectors[result->tracked_cnt / 2].flow_x;
    result->flow_y = vectors[result->tracked_cnt / 2].flow_y;
  }

  // Flow Derotation
  float diff_flow_x = (state->phi - opticflow->prev_phi) * img->w / OPTICFLOW_FOV_W;
  float diff_flow_y = (state->theta - opticflow->prev_theta) * img->h / OPTICFLOW_FOV_H;
  result->flow_der_x = result->flow_x - diff_flow_x * opticflow->subpixel_factor;
  result->flow_der_y = result->flow_y - diff_flow_y * opticflow->subpixel_factor;
  opticflow->prev_phi = state->phi;
  opticflow->prev_theta = state->theta;

  // Velocity calculation
  result->vel_x = -result->flow_der_x * result->fps / opticflow->subpixel_factor * img->w / OPTICFLOW_FX;
  result->vel_y =  result->flow_der_y * result->fps / opticflow->subpixel_factor * img->h / OPTICFLOW_FY;

  // *************************************************************************************
  // Next Loop Preparation
  // *************************************************************************************
  free(corners);
  free(vectors);
  image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
}
Beispiel #14
0
void UndoTransaction::flattenLayers(int bgcolor)
{
  Image* cel_image;
  Cel* cel;
  int frame;

  // create a temporary image
  UniquePtr<Image> image_wrap(Image::create(m_sprite->getPixelFormat(),
                                            m_sprite->getWidth(),
                                            m_sprite->getHeight()));
  Image* image = image_wrap.get();

  /* get the background layer from the sprite */
  LayerImage* background = m_sprite->getBackgroundLayer();
  if (!background) {
    /* if there aren't a background layer we must to create the background */
    background = new LayerImage(m_sprite);

    if (isEnabled())
      m_undoHistory->pushUndoer(new undoers::AddLayer(m_undoHistory->getObjects(),
          m_sprite->getFolder(), background));

    m_sprite->getFolder()->add_layer(background);

    if (isEnabled())
      m_undoHistory->pushUndoer(new undoers::MoveLayer(m_undoHistory->getObjects(),
          background));

    background->configureAsBackground();
  }

  /* copy all frames to the background */
  for (frame=0; frame<m_sprite->getTotalFrames(); frame++) {
    /* clear the image and render this frame */
    image_clear(image, bgcolor);
    layer_render(m_sprite->getFolder(), image, 0, 0, frame);

    cel = background->getCel(frame);
    if (cel) {
      cel_image = m_sprite->getStock()->getImage(cel->getImage());
      ASSERT(cel_image != NULL);

      /* we have to save the current state of `cel_image' in the undo */
      if (isEnabled()) {
        Dirty* dirty = new Dirty(cel_image, image);
        dirty->saveImagePixels(cel_image);
        m_undoHistory->pushUndoer(new undoers::DirtyArea(
            m_undoHistory->getObjects(), cel_image, dirty));
        delete dirty;
      }
    }
    else {
      /* if there aren't a cel in this frame in the background, we
         have to create a copy of the image for the new cel */
      cel_image = Image::createCopy(image);
      /* TODO error handling: if (!cel_image) { ... } */

      /* here we create the new cel (with the new image `cel_image') */
      cel = new Cel(frame, m_sprite->getStock()->addImage(cel_image));
      /* TODO error handling: if (!cel) { ... } */

      /* and finally we add the cel in the background */
      background->addCel(cel);
    }

    image_copy(cel_image, image, 0, 0);
  }

  /* select the background */
  if (m_sprite->getCurrentLayer() != background) {
    if (isEnabled())
      m_undoHistory->pushUndoer(new undoers::SetCurrentLayer(
          m_undoHistory->getObjects(), m_sprite));

    m_sprite->setCurrentLayer(background);
  }

  // Remove old layers.
  LayerList layers = m_sprite->getFolder()->get_layers_list();
  LayerIterator it = layers.begin();
  LayerIterator end = layers.end();

  for (; it != end; ++it) {
    if (*it != background) {
      Layer* old_layer = *it;

      // Remove the layer
      if (isEnabled())
        m_undoHistory->pushUndoer(new undoers::RemoveLayer(m_undoHistory->getObjects(),
            old_layer));

      m_sprite->getFolder()->remove_layer(old_layer);

      // Destroy the layer
      delete old_layer;
    }
  }
}
Beispiel #15
0
void UndoTransaction::backgroundFromLayer(LayerImage* layer, int bgcolor)
{
  ASSERT(layer);
  ASSERT(layer->is_image());
  ASSERT(layer->is_readable());
  ASSERT(layer->is_writable());
  ASSERT(layer->getSprite() == m_sprite);
  ASSERT(m_sprite->getBackgroundLayer() == NULL);

  // create a temporary image to draw each frame of the new
  // `Background' layer
  UniquePtr<Image> bg_image_wrap(Image::create(m_sprite->getPixelFormat(),
                                               m_sprite->getWidth(),
                                               m_sprite->getHeight()));
  Image* bg_image = bg_image_wrap.get();

  CelIterator it = layer->getCelBegin();
  CelIterator end = layer->getCelEnd();

  for (; it != end; ++it) {
    Cel* cel = *it;
    ASSERT((cel->getImage() > 0) &&
           (cel->getImage() < m_sprite->getStock()->size()));

    // get the image from the sprite's stock of images
    Image* cel_image = m_sprite->getStock()->getImage(cel->getImage());
    ASSERT(cel_image);

    image_clear(bg_image, bgcolor);
    image_merge(bg_image, cel_image,
                cel->getX(),
                cel->getY(),
                MID(0, cel->getOpacity(), 255),
                layer->getBlendMode());

    // now we have to copy the new image (bg_image) to the cel...
    setCelPosition(cel, 0, 0);

    // same size of cel-image and bg-image
    if (bg_image->w == cel_image->w &&
        bg_image->h == cel_image->h) {
      if (isEnabled())
        m_undoHistory->pushUndoer(new undoers::ImageArea(m_undoHistory->getObjects(),
            cel_image, 0, 0, cel_image->w, cel_image->h));

      image_copy(cel_image, bg_image, 0, 0);
    }
    else {
      replaceStockImage(cel->getImage(), Image::createCopy(bg_image));
    }
  }

  // Fill all empty cels with a flat-image filled with bgcolor
  for (int frame=0; frame<m_sprite->getTotalFrames(); frame++) {
    Cel* cel = layer->getCel(frame);
    if (!cel) {
      Image* cel_image = Image::create(m_sprite->getPixelFormat(), m_sprite->getWidth(), m_sprite->getHeight());
      image_clear(cel_image, bgcolor);

      // Add the new image in the stock
      int image_index = addImageInStock(cel_image);

      // Create the new cel and add it to the new background layer
      cel = new Cel(frame, image_index);
      addCel(layer, cel);
    }
  }

  configureLayerAsBackground(layer);
}
Beispiel #16
0
	int start(const std::vector<CL_String> &args)
	{
		CL_ConsoleWindow console("Console");

		try
		{
			CL_DisplayWindow window("Image test", 1024, 768);
			CL_GraphicContext gc = window.get_gc();

			// Connect the Window close event
			CL_Slot slot_quit = window.sig_window_close().connect(this, &App::on_window_close);

			CL_BlendMode blend_mode1;
			blend_mode1.enable_blending(true);
			gc.set_blend_mode(blend_mode1);

			quit = false;

			CL_ResourceManager resources("resources.xml");

			CL_Texture texture(gc, "Images/square.png");

			CL_Image image_texture(gc, texture, CL_Rect(0, 0, texture.get_size()));
			CL_Image image_loaded(gc, "Images/square.png");
			CL_Image image_resources(gc, "entire_image", &resources);
			CL_Image image_copy(image_texture);
			CL_Image image_top_right(gc, "image_top_right", &resources);
			CL_Image image_bottom_right(gc, "image_bottom_right", &resources);
			CL_Image image_black(gc, "image_black", &resources);

			CL_Font small_font = CL_Font(gc, "Tahoma", 12);

			//CL_Console::write_line("Color: %1,%2,%3,%4", image_resources.get_color().r, image_resources.get_color().g, image_resources.get_color().b, image_resources.get_color().a);
			//CL_Console::write_line("Scale: %1,%2", image_resources.get_scale_x(), image_resources.get_scale_y());
			//CL_Console::write_line("Translation: %1,%2,%3", image_resources.get_alignment());

			while((!quit) && (!window.get_ic().get_keyboard().get_keycode(CL_KEY_ESCAPE)))
			{
				gc.clear(CL_Colorf(0.5f,0.5f,0.5f));

				small_font.draw_text(gc, 10, 40, "Image From Texture (10,60)");
				image_texture.draw(gc, 10, 60);

				small_font.draw_text(gc, 150, 40, "Image From Load (150,60)");
				image_loaded.draw(gc, 150, 60);

				small_font.draw_text(gc, 300, 40, "Image From Resources (300,60)");
				image_resources.draw(gc, 300, 60);

				small_font.draw_text(gc, 450, 40, "Image Copied (450,60)");
				image_copy.draw(gc, 450, 60);

				small_font.draw_text(gc, 10, 190, "Image - Top Right (10,200)");
				image_top_right.draw(gc, 10, 200);

				small_font.draw_text(gc, 150, 190, "Image - Top Right (150,200)");
				image_texture.draw(gc, CL_Rect(32, 0, CL_Size(32, 32)), CL_Rect(150, 200, CL_Size(32, 32)));

				small_font.draw_text(gc, 300, 190, "Image - Bottom Right (300,200)");
				image_bottom_right.draw(gc, 300, 200);

				small_font.draw_text(gc, 450, 190, "Image - Bottom Right (450,200)");
				image_texture.draw(gc, CL_Rect(32, 32, CL_Size(32, 32)), CL_Rect(450, 200, CL_Size(32, 32)));

				small_font.draw_text(gc, 10, 290, "700 Images (10,300)");
				for(int i=0;i<700;i++)
					image_texture.draw(gc, 10, 300);

				small_font.draw_text(gc, 150, 290, "br image (150,400) Size(128,256)");
				image_bottom_right.draw(gc, CL_Rect(150, 300, CL_Size(128, 256)));

				small_font.draw_text(gc, 300, 290, "Image - black");
				image_black.draw(gc, 300, 300);

				small_font.draw_text(gc, 300, 490, "Image - Scale (1.5, 2.5)");
				image_texture.set_scale(1.5f, 2.5f);
				image_texture.draw(gc, 300, 500);
				image_texture.set_scale(1.0f, 1.0f);

				small_font.draw_text(gc, 450, 460, "Image - Alignment (4 images with 8 pixel offset)");
				small_font.draw_text(gc, 450, 475, "(top left, top right, bottom left, bottom right)");
				small_font.draw_text(gc, 450, 490, "(Circle denotes the draw origin)");
				const int offset = 96;

				image_texture.set_alignment(origin_top_left, 8, 8);
				image_texture.draw(gc, 450+offset, 500+offset);
				image_texture.set_alignment(origin_top_right, -8, 8);
				image_texture.draw(gc, 450+offset, 500+offset);

				image_texture.set_alignment(origin_bottom_left, 8, -8);
				image_texture.draw(gc, 450+offset, 500+offset);
				image_texture.set_alignment(origin_bottom_right, -8, -8);
				image_texture.draw(gc, 450+offset, 500+offset);

				CL_Draw::circle(gc, 450+offset, 500+offset, 4, CL_Colorf(1.0f, 1.0f, 1.0f, 0.9f));

				small_font.draw_text(gc, 700, 460, "Image - Center Alignment (4 images with 8 pixel offset)");
				small_font.draw_text(gc, 700, 475, "(top center, right center, bottom center, left center)");
				small_font.draw_text(gc, 700, 490, "(Circle denotes the draw origin)");

				image_texture.set_alignment(origin_top_center, 0, 8);
				image_texture.draw(gc, 700+offset, 500+offset);
				image_texture.set_alignment(origin_bottom_center, 0, -8);
				image_texture.draw(gc, 700+offset, 500+offset);

				image_texture.set_alignment(origin_center_left, 8, 0);
				image_texture.draw(gc, 700+offset, 500+offset);
				image_texture.set_alignment(origin_center_right, -8, 0);
				image_texture.draw(gc, 700+offset, 500+offset);

				CL_Draw::circle(gc, 700+offset, 500+offset, 4, CL_Colorf(1.0f, 1.0f, 1.0f, 0.9f));

				small_font.draw_text(gc, 700, 160, "Image - Center Align (4 images with 64 pixel offset)");
				small_font.draw_text(gc, 700, 175, "Also Includes a centered image (Without offset)");
				small_font.draw_text(gc, 700, 190, "(Circle denotes the draw origin)");

				const int center_image_offset = 64;

				image_texture.set_alignment(origin_center, 0, 0);
				image_texture.draw(gc, 700+offset, 200+offset);

				image_texture.set_alignment(origin_center, 0, center_image_offset);
				image_texture.draw(gc, 700+offset, 200+offset);
				image_texture.set_alignment(origin_center, 0, -center_image_offset);
				image_texture.draw(gc, 700+offset, 200+offset);

				image_texture.set_alignment(origin_center, center_image_offset, 0);
				image_texture.draw(gc, 700+offset, 200+offset);
				image_texture.set_alignment(origin_center, -center_image_offset, 0);
				image_texture.draw(gc, 700+offset, 200+offset);

				CL_Draw::circle(gc, 700+offset, 200+offset, 4, CL_Colorf(1.0f, 1.0f, 1.0f, 0.9f));

				// Restore alignment
				image_texture.set_alignment(origin_top_left, 0, 0);

				dump_fps();

				window.flip(1);
				CL_KeepAlive::process();
			}

			return 0;
		}
		catch(CL_Exception error)
		{
			CL_Console::write_line("Exception caught:");
			CL_Console::write_line(error.message);
			console.display_close_message();

			return -1;
		}

		return 0;
	}
Beispiel #17
0
void histogram_equalize_color_to_gray(void *src_pixels, void *dst_pixels, size_t width, size_t height, size_t stride) {
    image_copy(src_pixels, dst_pixels, height, stride);
    
    histogram_equalize_channel(src_pixels, dst_pixels, width, height, stride, GRAY);
}
/**
 * Run the optical flow with fast9 and lukaskanade on a new image frame
 * @param[in] *opticflow The opticalflow structure that keeps track of previous images
 * @param[in] *state The state of the drone
 * @param[in] *img The image frame to calculate the optical flow from
 * @param[out] *result The optical flow result
 */
void calc_fast9_lukas_kanade(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct image_t *img,
                             struct opticflow_result_t *result)
{
    // variables for size_divergence:
    float size_divergence;
    int n_samples;

    // variables for linear flow fit:
    float error_threshold;
    int n_iterations_RANSAC, n_samples_RANSAC, success_fit;
    struct linear_flow_fit_info fit_info;

    // Update FPS for information
    result->fps = 1 / (timeval_diff(&opticflow->prev_timestamp, &img->ts) / 1000.);
    memcpy(&opticflow->prev_timestamp, &img->ts, sizeof(struct timeval));

    // Convert image to grayscale
    image_to_grayscale(img, &opticflow->img_gray);

    // Copy to previous image if not set
    if (!opticflow->got_first_img) {
        image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
        opticflow->got_first_img = true;
    }

    // *************************************************************************************
    // Corner detection
    // *************************************************************************************

    // FAST corner detection (TODO: non fixed threshold)
    struct point_t *corners = fast9_detect(img, opticflow->fast9_threshold, opticflow->fast9_min_distance,
                                           0, 0, &result->corner_cnt);

    // Adaptive threshold
    if (opticflow->fast9_adaptive) {

        // Decrease and increase the threshold based on previous values
        if (result->corner_cnt < 40 && opticflow->fast9_threshold > 5) {
            opticflow->fast9_threshold--;
        } else if (result->corner_cnt > 50 && opticflow->fast9_threshold < 60) {
            opticflow->fast9_threshold++;
        }
    }

#if OPTICFLOW_DEBUG && OPTICFLOW_SHOW_CORNERS
    image_show_points(img, corners, result->corner_cnt);
#endif

    // Check if we found some corners to track
    if (result->corner_cnt < 1) {
        free(corners);
        image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
        return;
    }

    // *************************************************************************************
    // Corner Tracking
    // *************************************************************************************

    // Execute a Lucas Kanade optical flow
    result->tracked_cnt = result->corner_cnt;
    struct flow_t *vectors = opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, corners, &result->tracked_cnt,
                                         opticflow->window_size / 2, opticflow->subpixel_factor, opticflow->max_iterations,
                                         opticflow->threshold_vec, opticflow->max_track_corners, opticflow->pyramid_level);

#if OPTICFLOW_DEBUG && OPTICFLOW_SHOW_FLOW
    image_show_flow(img, vectors, result->tracked_cnt, opticflow->subpixel_factor);
#endif

    // Estimate size divergence:
    if (SIZE_DIV) {
        n_samples = 100;
        size_divergence = get_size_divergence(vectors, result->tracked_cnt, n_samples);
        result->div_size = size_divergence;
    } else {
        result->div_size = 0.0f;
    }
    if (LINEAR_FIT) {
        // Linear flow fit (normally derotation should be performed before):
        error_threshold = 10.0f;
        n_iterations_RANSAC = 20;
        n_samples_RANSAC = 5;
        success_fit = analyze_linear_flow_field(vectors, result->tracked_cnt, error_threshold, n_iterations_RANSAC,
                                                n_samples_RANSAC, img->w, img->h, &fit_info);

        if (!success_fit) {
            fit_info.divergence = 0.0f;
            fit_info.surface_roughness = 0.0f;
        }

        result->divergence = fit_info.divergence;
        result->surface_roughness = fit_info.surface_roughness;
    } else {
        result->divergence = 0.0f;
        result->surface_roughness = 0.0f;
    }


    // Get the median flow
    qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow);
    if (result->tracked_cnt == 0) {
        // We got no flow
        result->flow_x = 0;
        result->flow_y = 0;
    } else if (result->tracked_cnt > 3) {
        // Take the average of the 3 median points
        result->flow_x = vectors[result->tracked_cnt / 2 - 1].flow_x;
        result->flow_y = vectors[result->tracked_cnt / 2 - 1].flow_y;
        result->flow_x += vectors[result->tracked_cnt / 2].flow_x;
        result->flow_y += vectors[result->tracked_cnt / 2].flow_y;
        result->flow_x += vectors[result->tracked_cnt / 2 + 1].flow_x;
        result->flow_y += vectors[result->tracked_cnt / 2 + 1].flow_y;
        result->flow_x /= 3;
        result->flow_y /= 3;
    } else {
        // Take the median point
        result->flow_x = vectors[result->tracked_cnt / 2].flow_x;
        result->flow_y = vectors[result->tracked_cnt / 2].flow_y;
    }

    // Flow Derotation
    float diff_flow_x = 0;
    float diff_flow_y = 0;

    /*// Flow Derotation TODO:
    float diff_flow_x = (state->phi - opticflow->prev_phi) * img->w / OPTICFLOW_FOV_W;
    float diff_flow_y = (state->theta - opticflow->prev_theta) * img->h / OPTICFLOW_FOV_H;*/

    if (opticflow->derotation) {
        diff_flow_x = (state->phi - opticflow->prev_phi) * img->w / OPTICFLOW_FOV_W;
        diff_flow_y = (state->theta - opticflow->prev_theta) * img->h / OPTICFLOW_FOV_H;
    }

    result->flow_der_x = result->flow_x - diff_flow_x * opticflow->subpixel_factor;
    result->flow_der_y = result->flow_y - diff_flow_y * opticflow->subpixel_factor;
    opticflow->prev_phi = state->phi;
    opticflow->prev_theta = state->theta;

    // Velocity calculation
    // Right now this formula is under assumption that the flow only exist in the center axis of the camera.
    // TODO Calculate the velocity more sophisticated, taking into account the drone's angle and the slope of the ground plane.
    float vel_x = result->flow_der_x * result->fps * state->agl / opticflow->subpixel_factor  / OPTICFLOW_FX;
    float vel_y = result->flow_der_y * result->fps * state->agl / opticflow->subpixel_factor  / OPTICFLOW_FY;
    result->vel_x = vel_x;
    result->vel_y = vel_y;

    // Velocity calculation: uncomment if focal length of the camera is not known or incorrect.
    //  result->vel_x =  - result->flow_der_x * result->fps * state->agl / opticflow->subpixel_factor * OPTICFLOW_FOV_W / img->w
    //  result->vel_y =  result->flow_der_y * result->fps * state->agl / opticflow->subpixel_factor * OPTICFLOW_FOV_H / img->h

    // Rotate velocities from camera frame coordinates to body coordinates.
    // IMPORTANT for control! This the case on the ARDrone and bebop, but on other systems this might be different!
    result->vel_body_x = vel_y;
    result->vel_body_y = - vel_x;

    // Determine quality of noise measurement for state filter
    //TODO Experiment with multiple noise measurement models
    if (result->tracked_cnt < 10) {
        result->noise_measurement = (float)result->tracked_cnt / (float)opticflow->max_track_corners;
    } else {
        result->noise_measurement = 1.0;
    }

    // *************************************************************************************
    // Next Loop Preparation
    // *************************************************************************************
    free(corners);
    free(vectors);
    image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
}
Beispiel #19
0
void ExpandCelCanvas::commit()
{
  ASSERT(!m_closed);
  ASSERT(!m_committed);

  undo::UndoHistory* undo = m_document->getUndoHistory();

  // If the size of each image is the same, we can create an undo
  // with only the differences between both images.
  if (m_cel->getX() == m_originalCelX &&
      m_cel->getY() == m_originalCelY &&
      m_celImage->w == m_dstImage->w &&
      m_celImage->h == m_dstImage->h) {
    // Was m_celImage created in the start of the tool-loop?.
    if (m_celCreated) {
      // We can keep the m_celImage

      // We copy the destination image to the m_celImage
      image_copy(m_celImage, m_dstImage, 0, 0);

      // Add the m_celImage in the images stock of the sprite.
      m_cel->setImage(m_sprite->getStock()->addImage(m_celImage));

      // Is the undo enabled?.
      if (undo->isEnabled()) {
        // We can temporary remove the cel.
        static_cast<LayerImage*>(m_sprite->getCurrentLayer())->removeCel(m_cel);

        // We create the undo information (for the new m_celImage
        // in the stock and the new cel in the layer)...
        undo->pushUndoer(new undoers::OpenGroup());
        undo->pushUndoer(new undoers::AddImage(undo->getObjects(),
                                               m_sprite->getStock(), m_cel->getImage()));
        undo->pushUndoer(new undoers::AddCel(undo->getObjects(),
                                             m_sprite->getCurrentLayer(), m_cel));
        undo->pushUndoer(new undoers::CloseGroup());

        // And finally we add the cel again in the layer.
        static_cast<LayerImage*>(m_sprite->getCurrentLayer())->addCel(m_cel);
      }
    }
    // If the m_celImage was already created before the whole process...
    else {
      // Add to the undo history the differences between m_celImage and m_dstImage
      if (undo->isEnabled()) {
        UniquePtr<Dirty> dirty(new Dirty(m_celImage, m_dstImage));

        dirty->saveImagePixels(m_celImage);
        if (dirty != NULL)
          undo->pushUndoer(new undoers::DirtyArea(undo->getObjects(), m_celImage, dirty));
      }

      // Copy the destination to the cel image.
      image_copy(m_celImage, m_dstImage, 0, 0);
    }
  }
  // If the size of both images are different, we have to
  // replace the entire image.
  else {
    if (undo->isEnabled()) {
      undo->pushUndoer(new undoers::OpenGroup());

      if (m_cel->getX() != m_originalCelX ||
          m_cel->getY() != m_originalCelY) {
        int x = m_cel->getX();
        int y = m_cel->getY();
        m_cel->setPosition(m_originalCelX, m_originalCelY);

        undo->pushUndoer(new undoers::SetCelPosition(undo->getObjects(), m_cel));

        m_cel->setPosition(x, y);
      }

      undo->pushUndoer(new undoers::ReplaceImage(undo->getObjects(),
                                                 m_sprite->getStock(), m_cel->getImage()));
      undo->pushUndoer(new undoers::CloseGroup());
    }

    // Replace the image in the stock.
    m_sprite->getStock()->replaceImage(m_cel->getImage(), m_dstImage);

    // Destroy the old cel image.
    image_free(m_celImage);

    // Now the m_dstImage is used, so we haven't to destroy it.
    m_dstImage = NULL;
  }

  m_committed = true;
}
Beispiel #20
0
bool FliFormat::onSave(FileOp* fop)
{
  Sprite* sprite = fop->document->getSprite();
  unsigned char cmap[768];
  unsigned char omap[768];
  s_fli_header fli_header;
  int c, times;
  Image *bmp, *old;
  Palette *pal;

  /* prepare fli header */
  fli_header.filesize = 0;
  fli_header.frames = 0;
  fli_header.width = sprite->getWidth();
  fli_header.height = sprite->getHeight();

  if ((fli_header.width == 320) && (fli_header.height == 200))
    fli_header.magic = HEADER_FLI;
  else
    fli_header.magic = HEADER_FLC;

  fli_header.depth = 8;
  fli_header.flags = 3;
  fli_header.speed = get_time_precision(sprite);
  fli_header.created = 0;
  fli_header.updated = 0;
  fli_header.aspect_x = 1;
  fli_header.aspect_y = 1;
  fli_header.oframe1 = fli_header.oframe2 = 0;

  /* open the file to write in binary mode */
  FileHandle f(fop->filename.c_str(), "wb");

  fseek(f, 128, SEEK_SET);

  /* create the bitmaps */
  bmp = Image::create(IMAGE_INDEXED, sprite->getWidth(), sprite->getHeight());
  old = Image::create(IMAGE_INDEXED, sprite->getWidth(), sprite->getHeight());
  if ((!bmp) || (!old)) {
    fop_error(fop, "Not enough memory for temporary bitmaps.\n");
    if (bmp) image_free(bmp);
    if (old) image_free(old);
    return false;
  }

  /* write frame by frame */
  for (FrameNumber frpos(0);
       frpos < sprite->getTotalFrames();
       ++frpos) {
    /* get color map */
    pal = sprite->getPalette(frpos);
    for (c=0; c<256; c++) {
      cmap[3*c  ] = _rgba_getr(pal->getEntry(c));
      cmap[3*c+1] = _rgba_getg(pal->getEntry(c));
      cmap[3*c+2] = _rgba_getb(pal->getEntry(c));
    }

    /* render the frame in the bitmap */
    image_clear(bmp, 0);
    layer_render(sprite->getFolder(), bmp, 0, 0, frpos);

    /* how many times this frame should be written to get the same
       time that it has in the sprite */
    times = sprite->getFrameDuration(frpos) / fli_header.speed;

    for (c=0; c<times; c++) {
      /* write this frame */
      if (frpos == 0 && c == 0)
        fli_write_frame(f, &fli_header, NULL, NULL,
                        (unsigned char *)bmp->dat, cmap, W_ALL);
      else
        fli_write_frame(f, &fli_header,
                        (unsigned char *)old->dat, omap,
                        (unsigned char *)bmp->dat, cmap, W_ALL);

      /* update the old image and color-map to the new ones to compare later */
      image_copy(old, bmp, 0, 0);
      memcpy(omap, cmap, 768);
    }

    /* update progress */
    fop_progress(fop, (float)(frpos.next()) / (float)(sprite->getTotalFrames()));
  }

  /* write the header and close the file */
  fli_write_header(f, &fli_header);

  /* destroy the bitmaps */
  image_free(bmp);
  image_free(old);

  return true;
}
/**
 * Run the optical flow with fast9 and lukaskanade on a new image frame
 * @param[in] *opticflow The opticalflow structure that keeps track of previous images
 * @param[in] *state The state of the drone
 * @param[in] *img The image frame to calculate the optical flow from
 * @param[out] *result The optical flow result
 */
void calc_fast9_lukas_kanade(struct opticflow_t *opticflow, struct opticflow_state_t *state, struct image_t *img,
                             struct opticflow_result_t *result)
{
  if (opticflow->just_switched_method) {
    opticflow_calc_init(opticflow, img->w, img->h);
  }

  // variables for size_divergence:
  float size_divergence; int n_samples;

  // variables for linear flow fit:
  float error_threshold;
  int n_iterations_RANSAC, n_samples_RANSAC, success_fit;
  struct linear_flow_fit_info fit_info;

  // Update FPS for information
  result->fps = 1 / (timeval_diff(&opticflow->prev_timestamp, &img->ts) / 1000.);
  opticflow->prev_timestamp = img->ts;

  // Convert image to grayscale
  image_to_grayscale(img, &opticflow->img_gray);

  // Copy to previous image if not set
  if (!opticflow->got_first_img) {
    image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
    opticflow->got_first_img = true;
  }

  // *************************************************************************************
  // Corner detection
  // *************************************************************************************

  // FAST corner detection
  // TODO: There is something wrong with fast9_detect destabilizing FPS. This problem is reduced with putting min_distance
  // to 0 (see defines), however a more permanent solution should be considered
  fast9_detect(img, opticflow->fast9_threshold, opticflow->fast9_min_distance,
               opticflow->fast9_padding, opticflow->fast9_padding, &result->corner_cnt,
               &opticflow->fast9_rsize,
               opticflow->fast9_ret_corners);

  // Adaptive threshold
  if (opticflow->fast9_adaptive) {
    // Decrease and increase the threshold based on previous values
    if (result->corner_cnt < 40
        && opticflow->fast9_threshold > FAST9_LOW_THRESHOLD) { // TODO: Replace 40 with OPTICFLOW_MAX_TRACK_CORNERS / 2
      opticflow->fast9_threshold--;
    } else if (result->corner_cnt > OPTICFLOW_MAX_TRACK_CORNERS * 2 && opticflow->fast9_threshold < FAST9_HIGH_THRESHOLD) {
      opticflow->fast9_threshold++;
    }
  }

#if OPTICFLOW_SHOW_CORNERS
  image_show_points(img, opticflow->fast9_ret_corners, result->corner_cnt);
#endif

  // Check if we found some corners to track
  if (result->corner_cnt < 1) {
    image_copy(&opticflow->img_gray, &opticflow->prev_img_gray);
    return;
  }

  // *************************************************************************************
  // Corner Tracking
  // *************************************************************************************

  // Execute a Lucas Kanade optical flow
  result->tracked_cnt = result->corner_cnt;
  struct flow_t *vectors = opticFlowLK(&opticflow->img_gray, &opticflow->prev_img_gray, opticflow->fast9_ret_corners,
                                       &result->tracked_cnt,
                                       opticflow->window_size / 2, opticflow->subpixel_factor, opticflow->max_iterations,
                                       opticflow->threshold_vec, opticflow->max_track_corners, opticflow->pyramid_level);

#if OPTICFLOW_SHOW_FLOW
  printf("show: n tracked = %d\n", result->tracked_cnt);
  image_show_flow(img, vectors, result->tracked_cnt, opticflow->subpixel_factor);
#endif

  // Estimate size divergence:
  if (SIZE_DIV) {
    n_samples = 100;
    size_divergence = get_size_divergence(vectors, result->tracked_cnt, n_samples);
    result->div_size = size_divergence;
  } else {
    result->div_size = 0.0f;
  }
  if (LINEAR_FIT) {
    // Linear flow fit (normally derotation should be performed before):
    error_threshold = 10.0f;
    n_iterations_RANSAC = 20;
    n_samples_RANSAC = 5;
    success_fit = analyze_linear_flow_field(vectors, result->tracked_cnt, error_threshold, n_iterations_RANSAC,
                                            n_samples_RANSAC, img->w, img->h, &fit_info);

    if (!success_fit) {
      fit_info.divergence = 0.0f;
      fit_info.surface_roughness = 0.0f;
    }

    result->divergence = fit_info.divergence;
    result->surface_roughness = fit_info.surface_roughness;
  } else {
    result->divergence = 0.0f;
    result->surface_roughness = 0.0f;
  }


  // Get the median flow
  qsort(vectors, result->tracked_cnt, sizeof(struct flow_t), cmp_flow);
  if (result->tracked_cnt == 0) {
    // We got no flow
    result->flow_x = 0;
    result->flow_y = 0;
  } else if (result->tracked_cnt > 3) {
    // Take the average of the 3 median points
    result->flow_x = vectors[result->tracked_cnt / 2 - 1].flow_x;
    result->flow_y = vectors[result->tracked_cnt / 2 - 1].flow_y;
    result->flow_x += vectors[result->tracked_cnt / 2].flow_x;
    result->flow_y += vectors[result->tracked_cnt / 2].flow_y;
    result->flow_x += vectors[result->tracked_cnt / 2 + 1].flow_x;
    result->flow_y += vectors[result->tracked_cnt / 2 + 1].flow_y;
    result->flow_x /= 3;
    result->flow_y /= 3;
  } else {
    // Take the median point
    result->flow_x = vectors[result->tracked_cnt / 2].flow_x;
    result->flow_y = vectors[result->tracked_cnt / 2].flow_y;
  }

  // Flow Derotation
  float diff_flow_x = 0;
  float diff_flow_y = 0;

  /*// Flow Derotation TODO:
  float diff_flow_x = (state->phi - opticflow->prev_phi) * img->w / OPTICFLOW_FOV_W;
  float diff_flow_y = (state->theta - opticflow->prev_theta) * img->h / OPTICFLOW_FOV_H;*/

  if (opticflow->derotation && result->tracked_cnt > 5) {
    diff_flow_x = (state->rates.p)  / result->fps * img->w /
                  OPTICFLOW_FOV_W;// * img->w / OPTICFLOW_FOV_W;
    diff_flow_y = (state->rates.q) / result->fps * img->h /
                  OPTICFLOW_FOV_H;// * img->h / OPTICFLOW_FOV_H;
  }

  result->flow_der_x = result->flow_x - diff_flow_x * opticflow->subpixel_factor *
                       opticflow->derotation_correction_factor_x;
  result->flow_der_y = result->flow_y - diff_flow_y * opticflow->subpixel_factor *
                       opticflow->derotation_correction_factor_y;
  opticflow->prev_rates = state->rates;

  // Velocity calculation
  // Right now this formula is under assumption that the flow only exist in the center axis of the camera.
  // TODO Calculate the velocity more sophisticated, taking into account the drone's angle and the slope of the ground plane.
  float vel_x = result->flow_der_x * result->fps * state->agl / opticflow->subpixel_factor  / OPTICFLOW_FX;
  float vel_y = result->flow_der_y * result->fps * state->agl / opticflow->subpixel_factor  / OPTICFLOW_FY;

  //Apply a  median filter to the velocity if wanted
  if (opticflow->median_filter == true) {
    result->vel_x = (float)update_median_filter(&vel_x_filt, (int32_t)(vel_x * 1000)) / 1000;
    result->vel_y = (float)update_median_filter(&vel_y_filt, (int32_t)(vel_y * 1000)) / 1000;
  } else {
    result->vel_x = vel_x;
    result->vel_y = vel_y;
  }
  // Velocity calculation: uncomment if focal length of the camera is not known or incorrect.
  //  result->vel_x =  - result->flow_der_x * result->fps * state->agl / opticflow->subpixel_factor * OPTICFLOW_FOV_W / img->w
  //  result->vel_y =  result->flow_der_y * result->fps * state->agl / opticflow->subpixel_factor * OPTICFLOW_FOV_H / img->h


  // Determine quality of noise measurement for state filter
  //TODO develop a noise model based on groundtruth

  float noise_measurement_temp = (1 - ((float)result->tracked_cnt / ((float)opticflow->max_track_corners * 1.25)));
  result->noise_measurement = noise_measurement_temp;

  // *************************************************************************************
  // Next Loop Preparation
  // *************************************************************************************
  free(vectors);
  image_switch(&opticflow->img_gray, &opticflow->prev_img_gray);
}
Beispiel #22
0
void DocumentApi::flattenLayers(Sprite* sprite, int bgcolor)
{
  Image* cel_image;
  Cel* cel;

  DocumentUndo* undo = m_document->getUndo();

  // Create a temporary image.
  UniquePtr<Image> image_wrap(Image::create(sprite->getPixelFormat(),
                                            sprite->getWidth(),
                                            sprite->getHeight()));
  Image* image = image_wrap.get();

  // Get the background layer from the sprite.
  LayerImage* background = sprite->getBackgroundLayer();
  if (!background) {
    // If there aren't a background layer we must to create the background.
    background = new LayerImage(sprite);

    addLayer(sprite->getFolder(), background, NULL);
    configureLayerAsBackground(background);
  }

  // Copy all frames to the background.
  for (FrameNumber frame(0); frame<sprite->getTotalFrames(); ++frame) {
    // Clear the image and render this frame.
    image_clear(image, bgcolor);
    layer_render(sprite->getFolder(), image, 0, 0, frame);

    cel = background->getCel(frame);
    if (cel) {
      cel_image = sprite->getStock()->getImage(cel->getImage());
      ASSERT(cel_image != NULL);

      // We have to save the current state of `cel_image' in the undo.
      if (undo->isEnabled()) {
        Dirty* dirty = new Dirty(cel_image, image);
        dirty->saveImagePixels(cel_image);
        m_undoers->pushUndoer(new undoers::DirtyArea(
            getObjects(), cel_image, dirty));
        delete dirty;
      }
    }
    else {
      // If there aren't a cel in this frame in the background, we
      // have to create a copy of the image for the new cel.
      cel_image = Image::createCopy(image);
      // TODO error handling: if createCopy throws

      // Here we create the new cel (with the new image `cel_image').
      cel = new Cel(frame, sprite->getStock()->addImage(cel_image));
      // TODO error handling: if new Cel throws

      // And finally we add the cel in the background.
      background->addCel(cel);
    }

    image_copy(cel_image, image, 0, 0);
  }

  // Delete old layers.
  LayerList layers = sprite->getFolder()->getLayersList();
  LayerIterator it = layers.begin();
  LayerIterator end = layers.end();
  for (; it != end; ++it)
    if (*it != background)
      removeLayer(*it);
}