Пример #1
0
int main (int argc, char *argv[], char *arge[]) {
    char message[512];
    int a, b;
    
    printf("Chiffrement affine de la forme y = Ax + B\n"
           "résolution dans Z/%dZ\n\n",mod);
    
    printf("Saisir le coefficient A (inversible dans Z/%dZ) : ",mod);
    scanf("%d",&a);
    while (pgcd(mod, a) != 1) { // inversibles
        printf("\e[1m\e[38;5;9m%d n'est pas inversible dans Z/%dZ !\n\e[m",a,mod);
        printf("Saisir le coefficient A (inversible dans Z/%dZ) : ",mod);
        scanf("%d",&a);
    }
    
    if (a>mod) {
        printf(" = %d soit %d dans Z/%dZ\n",a,a%mod,mod);
        a %= mod;
    }else if (b<0) {
        printf("B = %d soit %d dans Z/%dZ\n",a,(a%mod)+mod,mod);
        a = (a%mod)+mod;
    }
    
    
    printf("Saisir le coefficient B : ");
    scanf("%d",&b);
    
    if (b>mod) {
        printf("B = %d soit %d dans Z/%dZ\n",b,b%mod,mod);
        b %= mod;
    }else if (b<0) {
        printf("B = %d soit %d dans Z/%dZ\n",b,(b%mod)+mod,mod);
        b = (b%mod)+mod;
    }
    
    printf("Chiffrement affine de la forme y = %dx + %d\n",a,b);
    
    
    printf("Message ? (1 seule lettre pour tout afficher)\n");
    scanf(" %511[^\n]s",message);
    
    if (strlen(message) == 1) {
        strcpy(message, "A-B-C-D-E-F-G-H-I-J-K-L-M-N-O-P-Q-R-S-T-U-V-W-X-Y-Z- -,-.-?-:");
        printf("Message trop court, voici l'alphabet :\n"
               "%s\n",message);
        affine(message, a, b);
        printf("%s\n",message);
    }else {
        printf("MSG = \"%s\"\n",message);
        affine(message, a, b);
        printf("DEC = \"%s\"\n",message);
    }
    
    return 0;
}
Пример #2
0
// Generation of the AES Sbox
void gensbox()
{
  int i;
  byte x=0;
  printf("byte sbox[256]={");
  for(i=0;i<256;i++)
  {
    if((i%8)==0) printf("\n");
    printf("0x%02x",affine(inverse(x)));
    x++;
    if(i<255) printf(",");
  }
  printf("};\n");
}
Пример #3
0
/*!
 * \brief shot_detector::processCloud The service function. It process the clouds with shot and returns the pose of the object
 * \param req
 * \param res
 * \return
 */
bool shot_detector::processCloud(apc_msgs::shot_detector_srv::Request &req, apc_msgs::shot_detector_srv::Response &res)
{
    pcl_functions::convertMsg(req.targetcloud, model);
    //loadModel(*model,"/home/niko/projects/apc/catkin/src/apc_ros/apc_object_detection/optimized_poisson_textured_mesh.ply");
    pcl_functions::convertMsg(req.pointcloud, scene);
    //pcl::io::loadPCDFile("/home/niko/projects/apc/catkin/src/apc_ros/apc_object_detection/niko_file.pcd",*scene);
    std::cerr << "Originally positions" << std::endl;
    std::cerr << scene->points[1].x << std::endl;
    std::cerr << scene->points[1].y << std::endl;
    std::cerr << scene->points[1].z << std::endl;
    //Downsample the model and the scene so they have rougly the same resolution
    pcl::PointCloud<PointType>::Ptr scene_filter (new pcl::PointCloud<PointType> ());
    pcl_functions::voxelFilter(scene, scene_filter, voxel_sample_);
    scene = scene_filter;
    pcl::PointCloud<PointType>::Ptr model_filter (new pcl::PointCloud<PointType> ());
    pcl_functions::voxelFilter(model, model_filter, voxel_sample_);
    model = model_filter;
    // Randomly select a couple of keypoints so we don't calculte descriptors for everything
    sampleKeypoints(model, model_keypoints, model_ss_);
    sampleKeypoints(scene, scene_keypoints, scene_ss_);
    //Calculate the Normals
    calcNormals(model, model_normals);
    calcNormals(scene, scene_normals);
    //Calculate the shot descriptors at each keypoint in the scene
    calcSHOTDescriptors(model, model_keypoints, model_normals, model_descriptors);
    calcSHOTDescriptors(scene, scene_keypoints, scene_normals, scene_descriptors);
    ransac(rototranslations, model, scene);
    // Compare descriptors and try to find correspondences
    //  compare(model_descriptors,scene_descriptors);
    Eigen::Matrix4f pose;
    // if(model_scene_corrs->size ()!=0){
    //groupCorrespondences();
    pose = refinePose(rototranslations, model, scene);

    //}
    std::cerr << pose << std::endl;
    if(pose == Eigen::Matrix4f::Identity())
        return false;
    Eigen::Matrix4d md(pose.cast<double>());
    Eigen::Affine3d affine(md);
    geometry_msgs::Pose transform;
    tf::poseEigenToMsg(affine, transform);
    res.pose = transform;
    return true;
}
Пример #4
0
/*!
  Creates a 6x6 jacobian from a transform.  Code adapted from Peter Corke's
  Robot Toolbox for MATLAB.

 MATLAB representation:
\verbatim
	J = [	t(1:3,1)'	cross(t(1:3,4),t(1:3,1))'
			t(1:3,2)'	cross(t(1:3,4),t(1:3,2))'
			t(1:3,3)'	cross(t(1:3,4),t(1:3,3))'
			zeros(3,3)		t(1:3,1:3)'		];
\endverbatim

The jacobian is returned is in column-major format.
*/
void
transf::jacobian(double jac[])
{
  mat3 m = affine();
  vec3 p = translation();

  // first 3 columns of jac
  jac[0]  = m.element(0,0); jac[6]  = m.element(0,1); jac[12] = m.element(0,2);
  jac[1]  = m.element(1,0); jac[7]  = m.element(1,1); jac[13] = m.element(1,2);
  jac[2]  = m.element(2,0); jac[8]  = m.element(2,1); jac[14] = m.element(2,2);
  jac[3]  = 0.0;            jac[9]  = 0.0;            jac[15] = 0.0;
  jac[4]  = 0.0;            jac[10] = 0.0;            jac[16] = 0.0;
  jac[5]  = 0.0;            jac[11] = 0.0;            jac[17] = 0.0;

  // 4th column of jac
  jac[18] = p.y()*m.element(0,2) - p.z()*m.element(0,1);
  jac[19] = p.y()*m.element(1,2) - p.z()*m.element(1,1);
  jac[20] = p.y()*m.element(2,2) - p.z()*m.element(2,1);
  jac[21] = m.element(0,0);
  jac[22] = m.element(1,0);
  jac[23] = m.element(2,0);

  // 5th column of jac
  jac[24] = p.z()*m.element(0,0) - p.x()*m.element(0,2);
  jac[25] = p.z()*m.element(1,0) - p.x()*m.element(1,2);
  jac[26] = p.z()*m.element(2,0) - p.x()*m.element(2,2);
  jac[27] = m.element(0,1);
  jac[28] = m.element(1,1);
  jac[29] = m.element(2,1);

  // 6th column of jac
  jac[30] = p.x()*m.element(0,1) - p.y()*m.element(0,0);
  jac[31] = p.x()*m.element(1,1) - p.y()*m.element(1,0);
  jac[32] = p.x()*m.element(2,1) - p.y()*m.element(2,0);
  jac[33] = m.element(0,2);
  jac[34] = m.element(1,2);
  jac[35] = m.element(2,2);
}
Пример #5
0
void Knob::initPath() {
    double data[DATA_LEN][DATA_LEN];
    for (int i=0;i<DATA_LEN;i++) {
        for (int j=0;j<DATA_LEN;j++) {
            data[i][j] = ctl[i][j];
        }
    }

    jitter (data, XVARY, YVARY, XDBVARY, XDFVARY);
    cPath.setFillRule(Qt::WindingFill);
    cPathReverse.setFillRule(Qt::WindingFill);
    cPath.moveTo(data[0][X],data[0][Y]);
    cPathReverse.moveTo(data[DATA_LEN-1][X],data[DATA_LEN-1][Y]);
    for (int i = 0; i < (DATA_LEN-1); i++) {
      curveTo (cPath,        data,               i, true);
      curveTo (cPathReverse, data,    DATA_LEN-1-i, false);
    }

    // Transform to coincide with line segment (x1,y1)-(x2,y2)

    QMatrix affine(x2-x1, y2-y1,  y1-y2, x2-x1,  x1, y1);
    cPath=affine.map(cPath);
    cPathReverse=affine.map(cPathReverse);
}
Пример #6
0
void render(int w, int h, int nFuncs, TransformFunc *funcs, int num) {
    Color **hist = malloc(w * sizeof(Color*));
    for (int x = 0; x < w; ++x) {
        hist[x] = malloc(h * sizeof(Color));
        for (int y = 0; y < h; ++y) {
            hist[x][y] = (Color) {0.0, 0.0, 0.0};
        }
    }

    float maxR = 0, maxG = 0, maxB = 0;
    Point p = {0.5, 0.5};
    float color = 0.5;
    for (unsigned long long i = 0; i < ((unsigned long long)(w) *
                (unsigned long long)(w) * (unsigned long long)(h) *
                (unsigned long long)(h) / 10000ULL); ++i) {
        int affIdx = rand() % sizeof(affines) / sizeof(float[6]);
        affine(&p, affines[affIdx]);
        int idx = rand() % nFuncs;
        funcs[idx](&p, affines[affIdx]);
        int x = (int)((p.x + 1) * (w / 2));
        int y = (int)((p.y + 1) * (h / 2));
        color = (color + ((float)idx / nFuncs)) / 2;
        if (x >= 0 && x < w && y >= 0 && y < h) {
            // do NOT try to "optimize" this by creating a 'rgb' local variable
            // for some reason that makes it slower
            hist[x][y].r += hue2rgb(color).r;
            if (hist[x][y].r > maxR) maxR = hist[x][y].r;
            hist[x][y].g += hue2rgb(color).g;
            if (hist[x][y].g > maxG) maxG = hist[x][y].g;
            hist[x][y].b += hue2rgb(color).b;
            if (hist[x][y].b > maxB) maxB = hist[x][y].b;
        }
    }

    output(hist, w, h, num, maxR, maxG, maxB);
}
Пример #7
0
static void test_invert(skiatest::Reporter* reporter) {
    SkMatrix44 inverse(SkMatrix44::kUninitialized_Constructor);
    double inverseData[16];

    SkMatrix44 identity(SkMatrix44::kIdentity_Constructor);
    identity.invert(&inverse);
    inverse.asRowMajord(inverseData);
    assert16<double>(reporter, inverseData,
                     1, 0, 0, 0,
                     0, 1, 0, 0,
                     0, 0, 1, 0,
                     0, 0, 0, 1);

    SkMatrix44 translation(SkMatrix44::kUninitialized_Constructor);
    translation.setTranslate(2, 3, 4);
    translation.invert(&inverse);
    inverse.asRowMajord(inverseData);
    assert16<double>(reporter, inverseData,
                     1, 0, 0, -2,
                     0, 1, 0, -3,
                     0, 0, 1, -4,
                     0, 0, 0, 1);

    SkMatrix44 scale(SkMatrix44::kUninitialized_Constructor);
    scale.setScale(2, 4, 8);
    scale.invert(&inverse);
    inverse.asRowMajord(inverseData);
    assert16<double>(reporter, inverseData,
                     0.5, 0,    0,     0,
                     0,   0.25, 0,     0,
                     0,   0,    0.125, 0,
                     0,   0,    0,     1);

    SkMatrix44 scaleTranslation(SkMatrix44::kUninitialized_Constructor);
    scaleTranslation.setScale(32, 128, 1024);
    scaleTranslation.preTranslate(2, 3, 4);
    scaleTranslation.invert(&inverse);
    inverse.asRowMajord(inverseData);
    assert16<double>(reporter, inverseData,
                     0.03125,  0,          0,            -2,
                     0,        0.0078125,  0,            -3,
                     0,        0,          0.0009765625, -4,
                     0,        0,          0,             1);

    SkMatrix44 rotation(SkMatrix44::kUninitialized_Constructor);
    rotation.setRotateDegreesAbout(0, 0, 1, 90);
    rotation.invert(&inverse);
    SkMatrix44 expected(SkMatrix44::kUninitialized_Constructor);
    double expectedInverseRotation[16] =
    {   0,  1, 0, 0,
        -1, 0, 0, 0,
        0,  0, 1, 0,
        0,  0, 0, 1
    };
    expected.setRowMajord(expectedInverseRotation);
    REPORTER_ASSERT(reporter, nearly_equal(expected, inverse));

    SkMatrix44 affine(SkMatrix44::kUninitialized_Constructor);
    affine.setRotateDegreesAbout(0, 0, 1, 90);
    affine.preScale(10, 20, 100);
    affine.preTranslate(2, 3, 4);
    affine.invert(&inverse);
    double expectedInverseAffine[16] =
    {   0,    0.1,  0,   -2,
        -0.05, 0,   0,   -3,
        0,     0,  0.01, -4,
        0,     0,   0,   1
    };
    expected.setRowMajord(expectedInverseAffine);
    REPORTER_ASSERT(reporter, nearly_equal(expected, inverse));

    SkMatrix44 perspective(SkMatrix44::kIdentity_Constructor);
    perspective.setDouble(3, 2, 1.0);
    perspective.invert(&inverse);
    double expectedInversePerspective[16] =
    {   1, 0,  0, 0,
        0, 1,  0, 0,
        0, 0,  1, 0,
        0, 0, -1, 1
    };
    expected.setRowMajord(expectedInversePerspective);
    REPORTER_ASSERT(reporter, nearly_equal(expected, inverse));

    SkMatrix44 affineAndPerspective(SkMatrix44::kIdentity_Constructor);
    affineAndPerspective.setDouble(3, 2, 1.0);
    affineAndPerspective.preScale(10, 20, 100);
    affineAndPerspective.preTranslate(2, 3, 4);
    affineAndPerspective.invert(&inverse);
    double expectedInverseAffineAndPerspective[16] =
    {   0.1, 0,    2,   -2,
        0,  0.05,  3,   -3,
        0,   0,   4.01, -4,
        0,   0,   -1,    1
    };
    expected.setRowMajord(expectedInverseAffineAndPerspective);
    REPORTER_ASSERT(reporter, nearly_equal(expected, inverse));
}
Пример #8
0
double cost6(double x1,double x2,double x3,double x4,double x5,double x6) {
		
		return affine(x1,x2,x3,x4,x5,x6);

	}
  void GeometricConsistencyGrouping::recognize(
    const sensor_msgs::PointCloud2::ConstPtr& scene_cloud_msg,
    const sensor_msgs::PointCloud2::ConstPtr& scene_feature_msg)
  {
    boost::mutex::scoped_lock lock(mutex_);
    if (!reference_cloud_ || !reference_feature_) {
      NODELET_ERROR_THROTTLE(1.0, "Not yet reference is available");
      return;
    }

    pcl::PointCloud<pcl::SHOT352>::Ptr
      scene_feature (new pcl::PointCloud<pcl::SHOT352>);
    pcl::PointCloud<pcl::PointNormal>::Ptr
      scene_cloud (new pcl::PointCloud<pcl::PointNormal>);
    pcl::fromROSMsg(*scene_cloud_msg, *scene_cloud);
    pcl::fromROSMsg(*scene_feature_msg, *scene_feature);

    pcl::CorrespondencesPtr model_scene_corrs (new pcl::Correspondences ());
    pcl::KdTreeFLANN<pcl::SHOT352> match_search;
    match_search.setInputCloud (reference_feature_);

    for (size_t i = 0; i < scene_feature->size(); ++i)
    {
      std::vector<int> neigh_indices(1);
      std::vector<float> neigh_sqr_dists(1);
      if (!pcl_isfinite (scene_feature->at(i).descriptor[0])) { //skipping NaNs
        continue;
      }
      int found_neighs
        = match_search.nearestKSearch(scene_feature->at(i), 1,
                                      neigh_indices, neigh_sqr_dists);
      //  add match only if the squared descriptor distance is less than 0.25
      // (SHOT descriptor distances are between 0 and 1 by design)
      if (found_neighs == 1 && neigh_sqr_dists[0] < 0.25f) {
        pcl::Correspondence corr (neigh_indices[0], static_cast<int> (i), neigh_sqr_dists[0]);
        model_scene_corrs->push_back (corr);
      }
    }

    pcl::GeometricConsistencyGrouping<pcl::PointNormal, pcl::PointNormal> gc_clusterer;
    gc_clusterer.setGCSize(gc_size_);
    gc_clusterer.setGCThreshold(gc_thresh_);

    gc_clusterer.setInputCloud(reference_cloud_);
    gc_clusterer.setSceneCloud(scene_cloud);
    gc_clusterer.setModelSceneCorrespondences(model_scene_corrs);

    //gc_clusterer.cluster (clustered_corrs);
    std::vector<pcl::Correspondences> clustered_corrs;
    std::vector<Eigen::Matrix4f,
                Eigen::aligned_allocator<Eigen::Matrix4f> > rototranslations;
    gc_clusterer.recognize(rototranslations, clustered_corrs);
    if (rototranslations.size() > 0) {
      NODELET_INFO("detected %lu objects", rototranslations.size());
      Eigen::Matrix4f result_mat = rototranslations[0];
      Eigen::Affine3f affine(result_mat);
      geometry_msgs::PoseStamped ros_pose;
      tf::poseEigenToMsg(affine, ros_pose.pose);
      ros_pose.header = scene_cloud_msg->header;
      pub_output_.publish(ros_pose);
    }
    else {
      NODELET_WARN("Failed to find object");
    }
    
  }
Пример #10
0
static int canvasFunc( ClientData data, Tcl_Interp *interp,
      int objc, Tcl_Obj * const objv[] )
{
   const char *cmds[] = { "delete", "configure", "cget", "isMapped",
         "getCurrentSize", "update", "raise", "lower", 
         "create", "itemDelete", "itemShow",
         "itemConfigure", "itemCget", "itemCommand", 
         "affine", "scale", "move", "rotate",
         "windowToCanvas", "canvasToWindow",
         "findItemAt", "getBounds", "findWithTag",
         NULL };
   enum cmdIdx { DeleteIdx, ConfigureIdx, CgetIdx, IsMappedIdx,
         GetCurSizeIdx, UpdateIdx, RaiseIdx, LowerIdx,
         CreateIdx, ItemDeleteIdx, ItemShowIdx,
         ItemConfigureIdx, ItemCgetIdx, ItemCommandIdx, 
         AffineIdx, ScaleIdx, MoveIdx, RotateIdx,
         WindowToCanvasIdx, CanvasToWindowIdx,
         FindItemAtIdx, GetBoundsIdx, GetIDsFromTagIdx };
   CanvasParams *para = (CanvasParams *)data;
   GtkWidget    *widget = GTK_WIDGET( para->canvas );
   int idx;

   if( objc < 2 )
   {
      Tcl_WrongNumArgs( interp, 1, objv, "command" );
      return TCL_ERROR;
   }

   if( Tcl_GetIndexFromObj( interp, objv[1], cmds, "command", 
         TCL_EXACT, &idx ) != TCL_OK )
      return TCL_ERROR;

   switch( idx )
   {
      case DeleteIdx:
            return gnoclDelete( interp, widget, objc, objv );

      case ConfigureIdx:
            {
               int ret = TCL_ERROR;
               if( gnoclParseAndSetOptions( interp, objc - 1, objv + 1, 
                     canvasOptions, G_OBJECT( widget ) ) == TCL_OK )
               {

                  if( canvasOptions[antialiasedIdx].status 
                        == GNOCL_STATUS_CHANGED )
                  {
                     Tcl_SetResult( interp, "antialiasing cannot be changed "
                           "after creation", TCL_STATIC );
                  }
                  else
                     ret = configure( interp, para, canvasOptions );
               }
               gnoclClearOptions( canvasOptions );
               return ret;
            }
            break;
      case CgetIdx:
            {
               int     idx;
               switch( gnoclCget( interp, objc, objv, G_OBJECT( widget ), 
                     canvasOptions, &idx ) )
               {
                  case GNOCL_CGET_ERROR:  
                           return TCL_ERROR;
                  case GNOCL_CGET_HANDLED:
                           return TCL_OK;
                  case GNOCL_CGET_NOTHANDLED:
                           return cget( interp, para->canvas, 
                                 canvasOptions, idx );
               }
               break;
            }
      case IsMappedIdx:
            return isMapped( interp, widget, objc, objv );
      case GetCurSizeIdx:
            return getCurSize( interp, widget, objc, objv );
      case UpdateIdx:
            if( objc != 2 )
            {
               Tcl_WrongNumArgs( interp, 2, objv, NULL );
               return TCL_ERROR;
            }
            gnome_canvas_update_now( para->canvas );
            break;
      case CreateIdx:
            return canvasCreateItem( interp, objc, objv, para );
      case RaiseIdx:
      case LowerIdx:
      case ItemDeleteIdx:
      case ItemShowIdx:
      case ItemConfigureIdx:
      case ItemCgetIdx:
      case ItemCommandIdx:
      case AffineIdx:
      case ScaleIdx:
      case MoveIdx:
      case RotateIdx:
      case GetBoundsIdx:
      case GetIDsFromTagIdx:
            {
               GPtrArray *items;
               int       ret;

               if( objc < 3 )
               {
                  Tcl_WrongNumArgs( interp, 2, objv, 
                        "tag-or-id ?option val ...?" );
                  return TCL_ERROR;
               }
               if( gnoclCanvasItemsFromTagOrId( interp, para, 
                     Tcl_GetString( objv[2] ), &items ) != TCL_OK )
                  return TCL_ERROR;

               switch( idx )
               {
                  case RaiseIdx:
                  case LowerIdx:
                     ret = itemRaise( interp, objc, objv, para, items,
                           idx == RaiseIdx );
                     break;
                  case ItemDeleteIdx:
                     ret = itemDelete( interp, objc, objv, para, items );
                     break;
                  case ItemShowIdx:
                     ret = itemShow( interp, objc, objv, para, items );
                     break;
                  case ItemConfigureIdx:
                     ret = itemConfigure( interp, objc, objv, para, items );
                     break;
                  case ItemCgetIdx:
                     ret = itemCget( interp, objc, objv, para, items );
                     break;
                  case ItemCommandIdx:
                     ret = itemCommand( interp, objc, objv, para, items );
                     break;
                  case AffineIdx:
                     ret = affine( interp, objc, objv, para, items, Affine );
                     break;
                  case ScaleIdx:
                     ret = affine( interp, objc, objv, para, items, Scale );
                     break;
                  case MoveIdx:
                     ret = affine( interp, objc, objv, para, items, Move );
                     break;
                  case RotateIdx:
                     ret = affine( interp, objc, objv, para, items, Rotate );
                     break;
                  case GetBoundsIdx:
                     ret = itemBounds( interp, objc, objv, para, items );
                     break;
                  case GetIDsFromTagIdx:
                     ret = getIDs( interp, objc, objv, para, items );
                     break;
                  default:
                     assert( 0 );
               }
               if( items )
                  g_ptr_array_free( items, 0 );
               return ret;
            }
            break; 
      case WindowToCanvasIdx:
            return windowToCanvas( interp, objc, objv, para, 0 );
      case CanvasToWindowIdx:
            return windowToCanvas( interp, objc, objv, para, 1 );
      case FindItemAtIdx:
            return findItemAt( interp, objc, objv, para );
   }

   return TCL_OK;
}
Пример #11
0
//calculate the value, gradient components of the volume
//- may be needed by the transfer functions
void vFileVolume::calcGradient()
{
	unsigned char *vdata = (unsigned char*)voxelData;
	float *gradV3 = new float[dimX*dimY*dimZ*3]; //individual components of the gradient
	float *gmag = new float[dimX*dimY*dimZ]; //gradient magnitude
	float gmmax = -100000000;
	float gmmin = 100000000;
	float dmax = -100000000;
	float dmin = 100000000;


	//compute the gradient
	cerr << "   computing 1st derivative";
	int i;
	for(i = 0; i < dimZ; ++i){
		for(int j = 0; j < dimY; ++j){
			for(int k = 0; k < dimX; ++k){
				if((k<1)||(k>dimX-2)||(j<1)||(j>dimY-2)||(i<1)||(i>dimZ-2)){
					gradV3[i*dimX*dimY*3 + j*dimX*3 + k*3 + 0] = 0;
					gradV3[i*dimX*dimY*3 + j*dimX*3 + k*3 + 1] = 0;
					gradV3[i*dimX*dimY*3 + j*dimX*3 + k*3 + 2] = 0;
					gmag[i*dimX*dimY + j*dimX + k] = 0;
				}
				else {
					float dx = (float)(vdata[i*dimX*dimY + j*dimX + (k+1)]
									- vdata[i*dimX*dimY + j*dimX + (k-1)]);
					float dy = (float)(vdata[i*dimX*dimY + (j+1)*dimX + k]
									- vdata[i*dimX*dimY + (j-1)*dimX + k]);
					float dz = (float)(vdata[(i+1)*dimX*dimY + j*dimX + k]
									- vdata[(i-1)*dimX*dimY + j*dimX + k]);

					gradV3[i*dimX*dimY*3 + j*dimX*3 + k*3 + 0] = dx;
					gradV3[i*dimX*dimY*3 + j*dimX*3 + k*3 + 1] = dy;
					gradV3[i*dimX*dimY*3 + j*dimX*3 + k*3 + 2] = dz;
					gmag[i*dimX*dimY + j*dimX + k] = (float)sqrt(dx*dx+dy*dy+dz*dz);
					gmmax = MAX(gmag[i*dimX*dimY + j*dimX + k], gmmax);
					gmmin = MIN(gmag[i*dimX*dimY + j*dimX + k], gmmin);
					dmax = MAX(vdata[i*dimX*dimY + j*dimX +k], dmax);
					dmin = MIN(vdata[i*dimX*dimY + j*dimX +k], dmin);
				}
			}
		}
	}
	cerr << " - done" << endl;
	cerr << "   quantizing";
	gradData = new unsigned char[dimX*dimY*dimZ]; //stores the 1st derivative

	for(i = 0; i < dimZ; ++i){
		for(int j = 0; j < (dimY); ++j){
			for(int k = 0; k < (dimX); ++k){
				if((k<1)||(k>dimX-2)||(j<1)||(j>dimY-2)||(i<1)||(i>dimZ-2)){
					gradData[i*dimX*dimY + j*dimX + k] = 0;
				}
				else {
					gradData[i*dimX*dimY + j*dimX + k] = (unsigned char)affine(gmmin, gmag[i*dimX*dimY + j*dimX + k], gmmax, 0, 255);
				}
			}
		}
	}
	cerr << " - done" << endl;
	delete [] gradV3;
	delete [] gmag;
}
Пример #12
0
void colorengine::threadFunc()
{
    std::unique_ptr<float> regdata(new float[width_*height_]);

    std::vector<float> scalar_constant(3);
    scalar_constant.push_back(0);
    scalar_constant.push_back(0);
    scalar_constant.push_back(0);

    _XTIFFInitialize();
    rawdata_tiff = TIFFOpen(raw_tiff_path.c_str(), "w");
    TIFFSetField(rawdata_tiff, TIFFTAG_NFILTERS, filter_->nfilters());
    TIFFSetField(rawdata_tiff, TIFFTAG_NLIGHTS, nlights_);

    for (auto filter_index = 0; filter_index < filter_->nfilters(); ++filter_index) {
        std::vector<float> cmf = filter_->cmfValues(filter_->wavelengthAtPos(filter_index));
        float illuminant = filter_->illuminantValue(filter_->wavelengthAtPos(filter_index));
        for (auto xyz_index = 0; xyz_index < 3; ++xyz_index) {
            scalar_constant[xyz_index] += cmf[xyz_index] * illuminant;
        }
    }
    int page_index = 0;
    for (int filter_index = 0; filter_index < filter_->nfilters(); ++filter_index) {  
        for (int light_index = 0; light_index < nlights_; ++light_index) {
            std::shared_ptr<unsigned short> data = data_queue_.pop();
            if (cancel_) {
                return;
            }
            std::unique_ptr<float> floatdata(new float[width_*height_]);

            for (auto y = 0; y < height_; ++y) {
                for (auto x = 0; x < width_; ++x) {
                    if (data.get()[y*width_+x] - bias_data.get()[y*width_+x] < 0) {
                        data.get()[y*width_+x] = 0;
                    } else {
                        data.get()[y*width_+x] -= bias_data.get()[y*width_+x];
                    }
                }
            }
            for (auto y = 0; y < height_; ++y) {
                for (auto x = 0; x <width_; ++x) {
                    if (flat_data[light_index]->filterData(filter_index)[y*width_+x] == 0) {
                        floatdata.get()[y*width_+x] = 0;
                    } else {
                        floatdata.get()[y*width_+x] = (float)data.get()[y*width_+x] / (float)flat_data[light_index]->filterData(filter_index)[y*width_+x];
                    }
                }
            }
            // subtract bias, divide flat field
            cv::Mat floatdatamat(height_, width_, CV_32F, floatdata.get());
            // Normalize data to a white reference

            std::vector<float> values;

            for (auto y = wtpt_rect_.y(); y < wtpt_rect_.y() + wtpt_rect_.size().height(); ++y) {
                for (auto x = wtpt_rect_.x(); x < wtpt_rect_.x() + wtpt_rect_.size().width(); ++x) {
                    values.push_back(floatdata.get()[y*width_+x]);
                }
            }
            std::nth_element(values.begin(), values.begin()+(values.size()/2), values.end()); // median sort in constant time
            float measured_wtpt = values[values.size()/2];

            for (auto y = 0; y < height_; ++y) {
                for (auto x = 0; x < width_; ++x) {
                    floatdata.get()[y*width_+x] /= (measured_wtpt / absolute_wtpt_values_[filter_index]);
                }
            }

            if (filter_index == 0) { // use first filter as registration target
                for (auto y = 0; y < height_; ++y) {
                    for (auto x = 0; x < width_; ++x) {
                        regdata.get()[y*width_+x] = floatdata.get()[y*width_+x];
                    }
                }
                cv::Mat regdatamat(height_, width_, CV_32F, regdata.get());

            } else {                // Register image to regtarget_data
                // Phase-correlate based registration algorithm to align image planes based on two concentric circle targets
                // Concentric targets are used because of their non-repeating nature; phase correlate gets tripped up by repeating patterns as the peaks can be matched at errant points

                //cv::mat construction is efficient and does not copy data.  Initialize registration target from our regdata


                cv::Mat registerTo(height_, width_, CV_32F, regdata.get());
                cv::Mat reg0_source, reg0_target, reg1_source, reg1_target;
                cv::Mat reg0_sourcef, reg0_targetf, reg1_sourcef, reg1_targetf;

                // These cv::Mats' are our registration targets; selected by the user in the main application.

                //reg0_source = registration target 0 on the image that is going to be registered
                //reg0_target = registration target 1 on the image that is going to be registered to
                reg0_sourcef = floatdatamat(cv::Range(regtargets[0].y(), regtargets[0].y() + regtargets[0].size().height()), cv::Range(regtargets[0].x(), regtargets[0].x() + regtargets[0].size().width()));
                reg0_targetf = registerTo(cv::Range(regtargets[0].y(), regtargets[0].y() + regtargets[0].size().height()), cv::Range(regtargets[0].x(), regtargets[0].x() + regtargets[0].size().width()));// - (reg_size/2), regtargets[0].y + (reg_size/2)), cv::Range(regtargets[0].x - (reg_size/2), regtargets[0].x + (reg_size/2)));
                reg1_sourcef = floatdatamat(cv::Range(regtargets[1].y(), regtargets[1].y() + regtargets[1].size().height()), cv::Range(regtargets[1].x(), regtargets[1].x() + regtargets[1].size().width()));// - (reg_size/2), regtargets[1].y + (reg_size/2)), cv::Range(regtargets[1].x - (reg_size/2), regtargets[1].x + (reg_size/2)));
                reg1_targetf = registerTo(cv::Range(regtargets[1].y(), regtargets[1].y() + regtargets[1].size().height()), cv::Range(regtargets[1].x(), regtargets[1].x() + regtargets[1].size().width()));// - (reg_size/2), regtargets[1].y + (reg_size/2)), cv::Range(regtargets[1].x - (reg_size/2), regtargets[1].x + (reg_size/2)));

                double min, max;
                cv::Point minLoc, maxLoc;

                cv::minMaxLoc(reg0_sourcef, &min, &max, &minLoc, &maxLoc);
                reg0_sourcef *= 255/max;
                cv::minMaxLoc(reg1_sourcef, &min, &max, &minLoc, &maxLoc);
                reg1_sourcef *= 255/max;
                cv::minMaxLoc(reg0_targetf, &min, &max, &minLoc, &maxLoc);
                reg0_targetf *= 255/max;
                cv::minMaxLoc(reg1_targetf, &min, &max, &minLoc, &maxLoc);
                reg1_targetf *= 255/max;

                reg0_sourcef.convertTo(reg0_source, CV_8U);
                reg0_targetf.convertTo(reg0_target, CV_8U);
                reg1_sourcef.convertTo(reg1_source, CV_8U);
                reg1_targetf.convertTo(reg1_target, CV_8U);

                cv::adaptiveThreshold(reg0_source, reg0_source, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY, 11, 2);
                cv::adaptiveThreshold(reg0_target, reg0_target, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY, 11, 2);
                cv::adaptiveThreshold(reg1_source, reg1_source, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY, 11, 2);
                cv::adaptiveThreshold(reg1_target, reg1_target, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY, 11, 2);

                reg0_source.convertTo(reg0_source, CV_32F);
                reg1_source.convertTo(reg1_source, CV_32F);
                reg0_target.convertTo(reg0_target, CV_32F);
                reg1_target.convertTo(reg1_target, CV_32F);

                reg0_sourcef /= 255;
                reg0_targetf /= 255;
                reg1_sourcef /= 255;
                reg1_targetf /= 255;

                cv::Point2d offset_center = cv::phaseCorrelate(reg0_source, reg0_target);
                cv::Point2d offset_corner = cv::phaseCorrelate(reg1_source, reg1_target);

                float r, r_prime, deltaX, deltaY;

                deltaX = offset_corner.x - offset_center.x;
                deltaY = offset_corner.y - offset_center.y;

                r = pow((regtargets[1].center().x() - regtargets[0].center().x()), 2) + pow((regtargets[1].center().y() - regtargets[0].center().y()), 2);
                r = sqrt(r);

                r_prime = pow((regtargets[1].center().x() - regtargets[0].center().x() + deltaX), 2) + pow((regtargets[1].center().y() - regtargets[0].center().y() + deltaY), 2);
                r_prime = sqrt(r_prime);


                float scale = r_prime / r;
                float trans_x = (floatdatamat.size().width  / 2) * (1-scale);
                float trans_y = (floatdatamat.size().height / 2) * (1-scale);

                std::vector<float> matrix_data(6);
                matrix_data[0] = scale;
                matrix_data[1] = 0;
                matrix_data[2] = trans_x;
                matrix_data[3] = 0;
                matrix_data[4] = scale;
                matrix_data[5] = trans_y;

                cv::Mat affine(2, 3, CV_32F, matrix_data.data());
                cv::Mat scaled;
                cv::warpAffine(floatdatamat, scaled, affine, floatdatamat.size());

                cv::Mat scaled_target = scaled(cv::Range(regtargets[0].y(), regtargets[0].y() + regtargets[0].size().height()), cv::Range(regtargets[0].x(), regtargets[0].x() + regtargets[0].size().width()));
                cv::Point2d translation_offset = cv::phaseCorrelate(scaled_target, reg0_target);


                matrix_data[2] += translation_offset.x;
                matrix_data[5] += translation_offset.y;

                cv::warpAffine(floatdatamat, floatdatamat, affine, floatdatamat.size());
            }
            if (raw_tiff_path.size() > 0) {
                TIFFSetField(rawdata_tiff, TIFFTAG_IMAGEWIDTH, width_);
                TIFFSetField(rawdata_tiff, TIFFTAG_IMAGELENGTH, height_);
                TIFFSetField(rawdata_tiff, TIFFTAG_BITSPERSAMPLE, sizeof(float) * 8);
                TIFFSetField(rawdata_tiff, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_IEEEFP);
                TIFFSetField(rawdata_tiff, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
                TIFFSetField(rawdata_tiff, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_MINISBLACK);
                TIFFSetField(rawdata_tiff, TIFFTAG_SAMPLESPERPIXEL, 1);
                TIFFSetField(rawdata_tiff, TIFFTAG_SUBFILETYPE, FILETYPE_PAGE);
                TIFFSetField(rawdata_tiff, TIFFTAG_WTPTVAL, absolute_wtpt_values_[filter_index]);
                TIFFSetField(rawdata_tiff, TIFFTAG_WTPTMEASURED, measured_wtpt);
                TIFFSetField(rawdata_tiff, TIFFTAG_PAGENUMBER, page_index);

                for (auto row = 0; row < height_; ++row) {
                    TIFFWriteScanline(rawdata_tiff, &floatdata.get()[row*width_], row);
                }
                TIFFWriteDirectory(rawdata_tiff);
                ++page_index;
            }

            int wavelength = filter_->wavelengthAtPos(filter_index);
            std::vector<float> cmf = filter_->cmfValues(wavelength);
            float illuminant = filter_->illuminantValue(wavelength);
            for (auto y = 0; y < height_; ++y) {
                for (auto x = 0; x < width_; ++x) {
                    for (auto xyz_index = 0; xyz_index < 3; ++xyz_index) {
                        xyz_data[light_index].get()->filterData(xyz_index)[y*width_+x] += floatdata.get()[y*width_+x] * cmf[xyz_index] * illuminant;
                    }
                }
            }
        }
    }
    if (raw_tiff_path.size() > 0) {
        TIFFClose(rawdata_tiff);
    }
    // Scale xyz data by scalar constant
    for (auto light = 0; light < nlights_; ++light) {
        for (auto y = 0; y < height_; ++y) {
            for (auto x = 0; x < width_; ++x) {
                for (auto xyz_index = 0; xyz_index < 3; ++xyz_index) {
                     xyz_data[light].get()->filterData(xyz_index)[y*width_+x] /= scalar_constant[xyz_index];
                }
            }
        }
    }


    std::vector<XYZImage*> xyz_ptr;
    for (auto i = 0; i < xyz_data.size(); ++i) {
        xyz_ptr.push_back(xyz_data[i].get());
    }
    std::vector<float> weights(nlights_);
    for (auto weight = 0; weight < weights.size(); ++weight) {
       weights[weight] = 1.0 / float(nlights_);
    }

    master_xyz = std::shared_ptr<XYZImage>(new XYZImage(xyz_ptr, nlights_, weights));
}