示例#1
0
void save_as_png256_hex(T1 & file, T2 const& image, int colors = 256, int trans_mode = -1, double gamma = 2.0)
{
    unsigned width = image.width();
    unsigned height = image.height();

    // structure for color quantization
    hextree<mapnik::rgba> tree(colors);
    if (trans_mode >= 0)
        tree.setTransMode(trans_mode);
    if (gamma > 0)
        tree.setGamma(gamma);

    for (unsigned y = 0; y < height; ++y)
    {
        typename T2::pixel_type const * row = image.getRow(y);
        for (unsigned x = 0; x < width; ++x)
        {
            unsigned val = row[x];
            tree.insert(mapnik::rgba(U2RED(val), U2GREEN(val), U2BLUE(val), U2ALPHA(val)));
        }
    }

    //transparency values per palette index
    std::vector<mapnik::rgba> pal;
    tree.create_palette(pal);
    assert(int(pal.size()) <= colors);

    std::vector<mapnik::rgb> palette;
    std::vector<unsigned> alphaTable;
    for(unsigned i=0; i<pal.size(); i++)
    {
        palette.push_back(rgb(pal[i].r, pal[i].g, pal[i].b));
        alphaTable.push_back(pal[i].a);
    }

    if (palette.size() > 16 )
    {
        // >16 && <=256 colors -> write 8-bit color depth
        image_data_8 reduced_image(width, height);

        for (unsigned y = 0; y < height; ++y)
        {
            mapnik::image_data_32::pixel_type const * row = image.getRow(y);
            mapnik::image_data_8::pixel_type  * row_out = reduced_image.getRow(y);

            for (unsigned x = 0; x < width; ++x)
            {
                unsigned val = row[x];
                mapnik::rgba c(U2RED(val), U2GREEN(val), U2BLUE(val), U2ALPHA(val));
                row_out[x] = tree.quantize(c);
            }
        }
        save_as_png(file, palette, reduced_image, width, height, 8, alphaTable);
    }
    else if (palette.size() == 1)
    {
        // 1 color image ->  write 1-bit color depth PNG
        unsigned image_width  = (int(0.125*width) + 7)&~7;
        unsigned image_height = height;
        image_data_8 reduced_image(image_width, image_height);
        reduced_image.set(0);
        save_as_png(file, palette, reduced_image, width, height, 1, alphaTable);
    }
    else
    {
        // <=16 colors -> write 4-bit color depth PNG
        unsigned image_width  = (int(0.5*width) + 3)&~3;
        unsigned image_height = height;
        image_data_8 reduced_image(image_width, image_height);
        for (unsigned y = 0; y < height; ++y)
        {
            mapnik::image_data_32::pixel_type const * row = image.getRow(y);
            mapnik::image_data_8::pixel_type  * row_out = reduced_image.getRow(y);
            byte index = 0;

            for (unsigned x = 0; x < width; ++x)
            {
                unsigned val = row[x];
                mapnik::rgba c(U2RED(val), U2GREEN(val), U2BLUE(val), U2ALPHA(val));
                index = tree.quantize(c);
                if (x%2 == 0) index = index<<4;
                row_out[x>>1] |= index;
            }
        }
        save_as_png(file, palette, reduced_image, width, height, 4, alphaTable);
    }
}   
示例#2
0
void KonqSidebarBookmarkModule::slotMoved(Q3ListViewItem *i, Q3ListViewItem*, Q3ListViewItem *after)
{
    KonqSidebarBookmarkItem *item = dynamic_cast<KonqSidebarBookmarkItem*>( i );
    if (!item)
        return;
    KBookmark bookmark = item->bookmark();

    KBookmark afterBookmark;
    KonqSidebarBookmarkItem *afterItem = dynamic_cast<KonqSidebarBookmarkItem*>(after);
    if (afterItem)
        afterBookmark = afterItem->bookmark();

    KBookmarkGroup oldParentGroup = bookmark.parentGroup();
    KBookmarkGroup parentGroup;
    // try to get the parent group (assume that the QListViewItem has been reparented by K3ListView)...
    // if anything goes wrong, use the root.
    if (item->parent()) {
        bool error = false;

        KonqSidebarBookmarkItem *parent = dynamic_cast<KonqSidebarBookmarkItem*>( (item->parent()) );
        if (!parent) {
            error = true;
        } else {
            if (parent->bookmark().isGroup())
                parentGroup = parent->bookmark().toGroup();
            else
                error = true;
        }

        if (error)
            parentGroup = s_bookmarkManager->root();
    } else {
        // No parent! This means the user dropped it before the top level item
        // And K3ListView has moved the item there, we need to correct it
        tree()->moveItem(item, m_topLevelItem, 0L);
        parentGroup = s_bookmarkManager->root();
    }

    // remove the old reference.
    oldParentGroup.deleteBookmark( bookmark );

    // insert the new item.
    parentGroup.moveBookmark(bookmark, afterBookmark);

    // inform others about the changed groups. quite expensive, so do
    // our best to update them in only one emitChanged call.
    QString oldAddress = oldParentGroup.address();
    QString newAddress = parentGroup.address();
    if (oldAddress == newAddress) {
        s_bookmarkManager->emitChanged( parentGroup );
    } else {
        int i = 0;
        while (true) {
            QChar c1 = oldAddress[i];
            QChar c2 = newAddress[i];
            if (c1 == QChar::null) {
                // oldParentGroup is probably parent of parentGroup.
                s_bookmarkManager->emitChanged( oldParentGroup );
                break;
            } else if (c2 == QChar::null) {
                // parentGroup is probably parent of oldParentGroup.
                s_bookmarkManager->emitChanged( parentGroup );
                break;
            } else {
                if (c1 == c2) {
                    // step to the next character.
                    ++i;
                } else {
                    // ugh... need to update both groups separately.
                    s_bookmarkManager->emitChanged( oldParentGroup );
                    s_bookmarkManager->emitChanged( parentGroup );
                    break;
                }
            }
        }
    }
}
示例#3
0
tree
sql_exec (url db_name, string cmd) {
  if (!sqlite3_initialized)
    tm_sqlite3_initialize ();
  if (sqlite3_error) {
    cout << "TeXmacs] ERROR: SQLite support not properly configured.\n";
    return tree (TUPLE);
  }
  string name= concretize (db_name);
  if (!sqlite3_connections->contains (name)) {
    c_string _name (name);
    sqlite3* db= NULL;
    //cout << "Opening " << _name << "\n";
    int status= SQLITE3_open (_name, &db);
    if (status == SQLITE_OK)
      sqlite3_connections (name) = (void*) db;
  }
  if (!sqlite3_connections->contains (name)) {
    cout << "TeXmacs] SQL error: database " << name << " could not be opened\n";
    return tree (TUPLE);
  }
  tree ret (TUPLE);
  sqlite3* db= (sqlite3*) sqlite3_connections [name];
  c_string _cmd (sql_escape (cmd));
  char** tab;
  int rows, cols;
  char* err;
  //cout << "Executing " << _cmd << "\n";
  int status= SQLITE3_get_table (db, _cmd, &tab, &rows, &cols, &err);

  int attempt= 0;
  while (status != SQLITE_OK &&
         string (err) == string ("database is locked") &&
         attempt < 100) {
    usleep (100000);
    attempt++;
    status= SQLITE3_get_table (db, _cmd, &tab, &rows, &cols, &err);
  }

  if (status != SQLITE_OK) {
    // TODO: improve error handling
    cout << "TeXmacs] SQL error\n";
    if (err != NULL) cout << "TeXmacs] " << err << "\n";
  }

  for (int r=0; r<=rows; r++) {
    tree row (TUPLE);
    //cout << "  Row " << r << LF;
    for (int c=0; c<cols; c++) {
      int i= r*cols + c;
      if (tab[i] == NULL) row << tree (TUPLE);
      else {
        row << tree (scm_quote (sql_unescape (tab[i])));
        //cout << "    Column " << c << ": " << tab[i] << LF;
      }
    }
    ret << row;
  }

  SQLITE3_free_table (tab);
  //cout << "Return " << ret << "\n";
  return ret;
}
示例#4
0
//***********************************************************
// segmentation
//  セグメンテーションを行う.
//***********************************************************
void segmentation(pcl::PointCloud<pcl::PointXYZRGB>& cloud, double th, int c)
{
    std::cout << "segmentation" << std::endl;

    pcl::PointCloud<pcl::PointXYZRGB>::Ptr tmp_rgb (new pcl::PointCloud<pcl::PointXYZRGB>);
    pcl::PointCloud<pcl::PointXYZRGB>::Ptr tmp_cloud (new pcl::PointCloud<pcl::PointXYZRGB>);

    for(int i=0;i<cloud.points.size();i++){
        if((m_size.max_z <= cloud.points[i].z) && (cloud.points[i].z <= m_size.max_z+0.3)){
            tmp_cloud->points.push_back(cloud.points[i]);
        }
    }
    pcl::copyPointCloud(*tmp_cloud, cloud);

    // Creating the KdTree object for the search method of the extraction
    pcl::search::KdTree<pcl::PointXYZRGB>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZRGB>);
    tree->setInputCloud (cloud.makeShared ());

    std::vector<pcl::PointIndices> cluster_indices;
    pcl::EuclideanClusterExtraction<pcl::PointXYZRGB> ec;
    ec.setClusterTolerance (th);
    ec.setMinClusterSize (200);
    ec.setMaxClusterSize (300000);
    ec.setSearchMethod (tree);
    ec.setInputCloud (cloud.makeShared ());
    ec.extract (cluster_indices);

    std::cout << "クラスタリング開始" << std::endl;
    int m = 0;

    for (std::vector<pcl::PointIndices>::const_iterator it = cluster_indices.begin (); it != cluster_indices.end (); ++it){
        pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_cluster_rgb (new pcl::PointCloud<pcl::PointXYZRGB>);
        pcl::PointCloud<pcl::PointXYZHSV>::Ptr cloud_cluster_hsv (new pcl::PointCloud<pcl::PointXYZHSV>);
        pcl::PointXYZ g;

        for (std::vector<int>::const_iterator pit = it->indices.begin (); pit != it->indices.end (); pit++){
            cloud_cluster_rgb->points.push_back (cloud.points[*pit]);
        }
        cloud_cluster_rgb->width = cloud_cluster_rgb->points.size ();
        cloud_cluster_rgb->height = 1;
        cloud_cluster_rgb->is_dense = true;

        //クラスタ重心を求める  
        g = g_pos(*cloud_cluster_rgb);

        if(((g.x < m_size.min_x) || (m_size.max_x < g.x)) ||
           ((g.y < m_size.min_y) || (m_size.max_y < g.y))){
            continue;
        }

        m++;

        if((m_size.max_z + 0.02 < g.z) && (g.z < m_size.max_z + 0.12)){
            object_map map;
            map.cloud_rgb = *cloud_cluster_rgb;
            map.g = g;
            map.tf = c;
            make_elevation(map);

            // RGB -> HSV
            pcl::PointCloudXYZRGBtoXYZHSV(*cloud_cluster_rgb, *cloud_cluster_hsv);

            for(int i=0;i<MAP_H;i++){
              for(int j=0;j<MAP_S;j++){
                map.histogram[i][j] = 0.000;
              }
            }

            map.cloud_hsv = *cloud_cluster_hsv;
            // H-Sヒストグラムの作成
            for(int i=0;i<cloud_cluster_hsv->points.size();i++){
                if(((int)(255*cloud_cluster_hsv->points[i].h)/((256/MAP_H)*360) >= 32) || ((int)(255*cloud_cluster_hsv->points[i].h)/((256/MAP_H)*360) < 0)) continue;
                if(((int)(255*cloud_cluster_hsv->points[i].s)/(256/MAP_H) >= 32) || ((int)(255*cloud_cluster_hsv->points[i].s)/(256/MAP_H) < 0)) continue;

                // 正規化のため,セグメントの点数で割る.
                map.histogram[(int)(255*cloud_cluster_hsv->points[i].h)/((256/MAP_H)*360)][(int)(255*cloud_cluster_hsv->points[i].s)/(256/MAP_H)] += 1.000/(float)cloud_cluster_hsv->points.size();
            }

            Object_Map.push_back(map);

            *tmp_rgb += *cloud_cluster_rgb;
        }
    }

    pcl::copyPointCloud(*tmp_rgb, cloud);

    return;
}
示例#5
0
tree
drd_info_rep::get_env (tree_label l, int nr) {
  if (nr >= N(info[l]->ci)) return tree (ATTR);
  return drd_decode (info[l]->ci[nr].env);
}
示例#6
0
文件: sharing.cpp 项目: EBone/faust-1
void ScalarCompiler::setSharingCount(Tree sig, int count)
{
	//cerr << "setSharingCount of : " << *sig << " <- " << count << endl;
	setProperty(sig, fSharingKey, tree(count));
}
  unsigned int scale_of_anisotropy (const Point_set& points, double& size)
  {
    Tree tree(points.begin(), points.end());
    
    double ratio_kept = (points.size() < 1000)
      ? 1. : 1000. / (points.size());
    
    std::vector<Point> subset;
    for (std::size_t i = 0; i < points.size (); ++ i)
      if (rand() / (double)RAND_MAX < ratio_kept)
    	subset.push_back (points[i]);
    
    std::vector<unsigned int> scales;
    generate_scales (std::back_inserter (scales));

    std::vector<unsigned int> chosen;

    for (std::size_t i = 0; i < subset.size (); ++ i)
      {
    	Neighbor_search search(tree, subset[i],scales.back());
	double current = 0.;
    	unsigned int nb = 0;
    	std::size_t index = 0;
	double maximum = 0.;
	unsigned int c = 0;
	
    	for (Search_iterator search_iterator = search.begin();
    	     search_iterator != search.end (); ++ search_iterator, ++ nb)
    	  {
	    current += search_iterator->second;

    	    if (nb + 1 == scales[index])
    	      {
		double score = std::sqrt (current / scales[index])
		  / std::pow (scales[index], 0.75); // NB ^ (3/4)

		if (score > maximum)
		  {
		    maximum = score;
		    c = scales[index];
		  }

    		++ index;
    		if (index == scales.size ())
    		  break;
    	      }
    	  }
	chosen.push_back (c);
      }

    double mean = 0.;
    for (std::size_t i = 0; i < chosen.size(); ++ i)
      mean += chosen[i];
    mean /= chosen.size();
    unsigned int aniso_scale = static_cast<unsigned int>(mean);

    size = 0.;
    for (std::size_t i = 0; i < subset.size (); ++ i)
      {
    	Neighbor_search search(tree, subset[i], aniso_scale);
	size += std::sqrt ((-- search.end())->second);
      }
    size /= subset.size();
    
    return aniso_scale;
  }
示例#8
0
void swcode(Swtch swp, int b[], int lb, int ub) {
	int hilab, lolab, l, u, k = (lb + ub)/2;
	long *v = swp->values;

	if (k > lb && k < ub) {
		lolab = genlabel(1);
		hilab = genlabel(1);
	} else if (k > lb) {
		lolab = genlabel(1);
		hilab = swp->deflab->u.l.label;
	} else if (k < ub) {
		lolab = swp->deflab->u.l.label;
		hilab = genlabel(1);
	} else
		lolab = hilab = swp->deflab->u.l.label;
	l = b[k];
	u = b[k+1] - 1;
	if (u - l + 1 <= 3)
		{
			int i;
			for (i = l; i <= u; i++)
				cmp(EQ, swp->sym, v[i], swp->labels[i]->u.l.label);
			if (k > lb && k < ub)
				cmp(GT, swp->sym, v[u], hilab);
			else if (k > lb)
				cmp(GT, swp->sym, v[u], hilab);
			else if (k < ub)
				cmp(LT, swp->sym, v[l], lolab);
			else
				assert(lolab == hilab),
				branch(lolab);
			walk(NULL, 0, 0);
		}
	else {
		Tree e;
		Type ty = signedint(swp->sym->type);
		Symbol table = genident(STATIC,
			array(voidptype, u - l + 1, 0), GLOBAL);
		(*IR->defsymbol)(table);
		if (!isunsigned(swp->sym->type) || v[l] != 0)
			cmp(LT, swp->sym, v[l], lolab);
		cmp(GT, swp->sym, v[u], hilab);
		e = (*optree['-'])(SUB, cast(idtree(swp->sym), ty), cnsttree(ty, v[l]));
		if (e->type->size < unsignedptr->size)
			e = cast(e, unsignedlong);
		walk(tree(JUMP, voidtype,
			rvalue((*optree['+'])(ADD, pointer(idtree(table)), e)), NULL),
			0, 0);
		code(Switch);
		codelist->u.swtch.table = table;
		codelist->u.swtch.sym = swp->sym;
		codelist->u.swtch.deflab = swp->deflab;
		codelist->u.swtch.size = u - l + 1;
		codelist->u.swtch.values = &v[l];
		codelist->u.swtch.labels = &swp->labels[l];
		if (v[u] - v[l] + 1 >= 10000)
			warning("switch generates a huge table\n");
	}
	if (k > lb) {
		assert(lolab != swp->deflab->u.l.label);
		definelab(lolab);
		swcode(swp, b, lb, k - 1);
	}
	if (k < ub) {
		assert(hilab != swp->deflab->u.l.label);
		definelab(hilab);
		swcode(swp, b, k + 1, ub);
	}
}
示例#9
0
int main(int argc, char * argv[])
{
  MPI_Comm comm = MPI_COMM_WORLD;

  MPI_Init(&argc, &argv);
    
  int nRank, myRank;
  MPI_Comm_size(comm, &nRank);
  MPI_Comm_rank(comm, &myRank);

  assert(nRank == 1 && myRank == 0);

  if (argc < 5)
  {
    if (myRank == 0)
    {
      fprintf(stderr, " ------------------------------------------------------------------------\n");
      fprintf(stderr, " Usage: \n");
      fprintf(stderr, " %s  fileIn fileOut reduceDM reduceStars \n", argv[0]);
      fprintf(stderr, " ------------------------------------------------------------------------\n");
    }
    exit(-1);
  }
  
  const std::string fileIn (argv[1]);
  const std::string fileOut(argv[2]);
  const int reduceDM = atoi(argv[3]);
  const int reduceS  = atoi(argv[4]);

  if (myRank == 0)
  {
    fprintf(stderr, " Input file:  %s\n", fileIn.c_str());
    fprintf(stderr, "    reduceStars= %d \n", reduceS);
    fprintf(stderr, "    reduceDM   = %d \n", reduceDM);
    fprintf(stderr, " Output file: %s\n", fileOut.c_str());
  }

  std::vector<BonsaiIO::DataTypeBase*> dataInput;
  
  /************* read ***********/

  {
    const double tOpen = MPI_Wtime(); 
    BonsaiIO::Core in(myRank, nRank, comm, BonsaiIO::READ,  fileIn);
    double dtOpen = MPI_Wtime() - tOpen;


    if (myRank == 0)
      in.getHeader().printFields();

    double dtRead;
    if (reduceDM > 0)
    {
      assert(0);
      std::vector<BonsaiIO::DataTypeBase*> dataDM;
      dataDM.push_back(new BonsaiIO::DataType<IDType>("DM:IDType"));
      dataDM.push_back(new BonsaiIO::DataType<float4>("DM:POS:real4"));
      dataDM.push_back(new BonsaiIO::DataType<float3>("DM:VEL:float[3]"));
      dataDM.push_back(new BonsaiIO::DataType<float2>("DM:RHOH:float[2]"));

      dtRead += read(myRank, comm, dataDM, in, reduceDM);

      dataInput.insert(dataInput.end(), dataDM.begin(), dataDM.end());
    }
    if (reduceS > 0)
    {
      std::vector<BonsaiIO::DataTypeBase*> dataStars;
      dataStars.push_back(new BonsaiIO::DataType<IDType>("Stars:IDType"));
      dataStars.push_back(new BonsaiIO::DataType<float4>("Stars:POS:real4"));
      dataStars.push_back(new BonsaiIO::DataType<float3>("Stars:VEL:float[3]"));
      dataStars.push_back(new BonsaiIO::DataType<float2>("Stars:RHOH:float[2]"));

      dtRead += read(myRank, comm, dataStars, in, reduceS);

      dataInput.insert(dataInput.end(), dataStars.begin(), dataStars.end());
    }

    double readBW = in.computeBandwidth();

    const double tClose = MPI_Wtime(); 
    in.close();
    double dtClose = MPI_Wtime() - tClose;

    double dtOpenGlb = 0;
    double dtReadGlb = 0;
    double dtCloseGlb = 0;
    MPI_Allreduce(&dtOpen, &dtOpenGlb, 1, MPI_DOUBLE, MPI_SUM, comm);
    MPI_Allreduce(&dtRead, &dtReadGlb, 1, MPI_DOUBLE, MPI_SUM, comm);
    MPI_Allreduce(&dtClose, &dtCloseGlb, 1, MPI_DOUBLE, MPI_SUM, comm);
    if (myRank == 0)
    {
      fprintf(stderr, "dtOpen = %g sec \n", dtOpenGlb);
      fprintf(stderr, "dtRead = %g sec \n", dtReadGlb);
      fprintf(stderr, "dtClose= %g sec \n", dtCloseGlb);
      fprintf(stderr, "Read BW= %g MB/s \n", readBW/1e6);
    }
  }

  /************* estimate density **********/

  std::vector<BonsaiIO::DataTypeBase*> data;
  data.push_back(new BonsaiIO::DataType<float3>("Stars:XYZ:float[3]"));
  data.push_back(new BonsaiIO::DataType<float5>("Stars:VxVyVz,DENS,H:float[5]"));

  {
    const auto &posArray = *dynamic_cast<BonsaiIO::DataType<float4>*>(dataInput[1]);
    const auto &velArray = *dynamic_cast<BonsaiIO::DataType<float3>*>(dataInput[2]);
    const size_t np = posArray.getNumElements();
    Particle::Vector ptcl(np);

    fprintf(stderr, " -- create tree particles -- \n");

    for (size_t i = 0; i < np; i++)
    {
      const auto &pos = posArray[i];
      const auto &vel = velArray[i];
      ptcl[i].pos  = vec3(pos[0], pos[1], pos[2]);
      ptcl[i].mass = pos[3];
      ptcl[i].vel  = vec3(vel[0], vel[1], vel[2]);
    }
    Node::allocate(np,np);
    fprintf(stderr, " -- build tree -- \n");
    Tree tree(ptcl);

    const auto &leafArray = tree.leafArray;
    const int nLeaf = leafArray.size();

    fprintf(stderr, " np= %d  nleaf= %d  NLEAF= %d\n",
        (int)np, nLeaf, Node::NLEAF);

    auto &posOut  = *dynamic_cast<BonsaiIO::DataType<float3>*>(data[0]);
    auto &attrOut = *dynamic_cast<BonsaiIO::DataType<float5>*>(data[1]);

    posOut.resize(nLeaf);
    attrOut.resize(nLeaf);
    
    fprintf(stderr, " -- generate output data  -- \n");

    size_t npMean = 0;
    for (int i = 0; i < nLeaf; i++)
    {
      const auto &leaf = *leafArray[i];
      vec3 pos(0.0);
      vec3 vel(0.0);
      float mass = 0.0;
      npMean += leaf.np;
      for (int i = 0; i < leaf.np; i++)
      {
        const auto &p = Node::ptcl[leaf.pfirst+i];
        mass += p.mass;
        pos += p.pos*p.mass;
        vel += p.vel*p.mass;
      }
      pos *= 1.0/mass;
      vel *= 1.0/mass;
      posOut[i][0] = pos.x;
      posOut[i][1] = pos.y;
      posOut[i][2] = pos.z;
      attrOut[i][0] = vel.x;
      attrOut[i][1] = vel.y;
      attrOut[i][2] = vel.z;
      const float volume = leaf.size*leaf.size*leaf.size;
      attrOut[i][3] = mass/volume;
      attrOut[i][4] = leaf.size;
#if 0
      if (i%1000 == 0)
      {
        fprintf(stderr, "i= %d: pos= %g %g %g  vel= %g %g %g  d= %g  h= %g\n",
            i,
            posOut[i][0],
            posOut[i][1],
            posOut[i][2],
            attrOut[i][0],
            attrOut[i][1],
            attrOut[i][2],
            attrOut[i][3],
            attrOut[i][4]);
      }
#endif
    }
    fprintf(stderr, "npMean= %g\n", 1.0*npMean/nLeaf);

  }




  /************* write ***********/

  {
    fprintf(stderr, " -- write  output   -- \n");
    const double tOpen = MPI_Wtime(); 
    BonsaiIO::Core out(myRank, nRank, comm, BonsaiIO::WRITE,  fileOut);
    double dtOpen = MPI_Wtime() - tOpen;

    double dtWrite = 0;
    dtWrite += write(myRank, comm, data, out);

    double writeBW = out.computeBandwidth();
    const double tClose = MPI_Wtime(); 
    out.close();
    double dtClose = MPI_Wtime() - tClose;
    
    double dtOpenGlb = 0;
    double dtWriteGlb = 0;
    double dtCloseGlb = 0;
    MPI_Allreduce(&dtOpen, &dtOpenGlb, 1, MPI_DOUBLE, MPI_SUM, comm);
    MPI_Allreduce(&dtWrite, &dtWriteGlb, 1, MPI_DOUBLE, MPI_SUM, comm);
    MPI_Allreduce(&dtClose, &dtCloseGlb, 1, MPI_DOUBLE, MPI_SUM, comm);
    if (myRank == 0)
    {
      fprintf(stderr, "dtOpen = %g sec \n", dtOpenGlb);
      fprintf(stderr, "dtWrite = %g sec \n", dtWriteGlb);
      fprintf(stderr, "dtClose= %g sec \n", dtCloseGlb);
      fprintf(stderr, "Write BW= %g MB/s \n", writeBW/1e6);
    }
  }

  /*********** write stats **********/


  MPI_Finalize();




  return 0;
}
示例#10
0
 operator tree () { return tree ("table"); }
示例#11
0
 template<typename F> void tree(const F& f)
 {
     if (m_structure.hasCold()) tree(f, ChunkState(m_metadata));
 }
示例#12
0
文件: atree.c 项目: svagionitis/atree
int main(int argc, char *argv[])
{
    WINDOW *mainwin;
    int ch;
    int maxY = 0, maxX = 0;
    double startFrom = 0.0;
    double max = 19.0;
    char isLeft = 1;

    ///*
    mainwin = initscr();
    if (mainwin == NULL)
    {
        fprintf(stderr, "Error initialising ncurses.\n");
        exit(-1);
    }

    // Get the maximum size of the screen
    getmaxyx(mainwin, maxY, maxX);

    // Check if colors are supported
    if (!has_colors())
    {
        delwin(mainwin);
        endwin();
        fprintf(stderr,"Your terminal does not support color\n");
        exit(-1);
    }
    else
    {
        // Initialize colors
        start_color();

        // Assign terminal default foreground/background colors to color number -1
        use_default_colors();

        init_pair(1, COLOR_RED,     COLOR_BLACK);
        init_pair(2, COLOR_GREEN,   COLOR_BLACK);
        init_pair(3, COLOR_YELLOW,  COLOR_BLACK);
        init_pair(4, COLOR_BLUE,    COLOR_BLACK);
        init_pair(5, COLOR_CYAN,    COLOR_BLACK);
        init_pair(6, COLOR_MAGENTA, COLOR_BLACK);
        init_pair(7, COLOR_WHITE,   COLOR_BLACK);
        init_pair(8, COLOR_RED,     COLOR_WHITE);
        init_pair(9, COLOR_GREEN,   COLOR_WHITE);
    }


    clear();

    // Turn off key echoing
    noecho();
    // Line buffering disabled
    cbreak();
    // This determines the visibility of the cursor.
    curs_set(FALSE);
    // Tell curses not to do NL->CR/NL on output
    nonl();
    // Enable the keypad for non-char keys
    keypad(mainwin, TRUE);

    //*/

    tree(isLeft, startFrom, max);

    ///*
    // Loop until press ESC
    while ((ch = getch()) != 27)
    {
        switch(ch)
        {
            case '0': // Set foreground/background colors to default
            case '1': // Set foreground/background colors according to init_pairs
            case '2':
            case '3':
            case '4':
            case '5':
            case '6':
            case '7':
            case '8':
            case '9':
                attrset(COLOR_PAIR(ch - '0'));
                break;
            case 'l':
                if (isLeft)
                    isLeft = 0;
                else
                    isLeft = 1;
                break;
            case 'q':
                dx += 0.1;

                if (dx > maxX)
                    dx = maxX;
                break;
            case 'w':
                dy += 0.1;

                if (dy > maxY)
                    dy = maxY;
                break;
            case 'e':
                dz += 0.1;

                break;
            case KEY_UP: // Press Up to increase height
                break;
            case KEY_DOWN: // Press Down to decrease height
                break;
            case KEY_RIGHT: // Press right key reverse foreground/background colour.
                attron(A_REVERSE | A_BLINK);
                break;
            case KEY_LEFT: // Press left key return to default foreground/background colour.
                attroff(A_REVERSE | A_BLINK);
                break;
        }

        erase();
        tree(isLeft, startFrom, max);
    }


    delwin(mainwin);
    endwin();
    refresh();
    //*/

    return 0;
}
示例#13
0
int main(int argc, char** argv) {
    static int mc    = false;
    static int elist = false;

    const char *input_file = 0;

    int c;
    while (1) {
        static struct option long_options[] ={
            /* These options set a flag. */
            {"mc" , no_argument, &mc , 1},
            {"elist" , no_argument, &elist , 1},
            {0, 0, 0, 0}
        };
        /* getopt_long stores the option index here. */
        int option_index = 0;

        c = getopt_long(argc, argv, "i:",
                long_options, &option_index);

        /* Detect the end of the options. */
        if (c == -1)
            break;
        switch (c) {
            case 0:
                /* If this option set a flag, do nothing else now. */
                if (long_options[option_index].flag != 0)
                    break;
            case 'i':
                input_file = optarg;
                break;
            case '?':
                usage();
            default:
                usage();
        }
    }

    if (!input_file){
    	std::cout << argv[0] << " II: Missing input_file " << std::endl;
    	usage();
    }

	AA::newJob();
	AA::newAnaJob();
	AA::newSelectJob();
	params();
	AA::displayParameters();

	bool ok = false;
	if (elist){
		std::cout << argv[0] << " II: Doing event lists " << std::endl;
		ok = AA::dst.eventLst(input_file);
	} else {
		std::cout << argv[0] << " II: Doing sequential mode" << std::endl;
		ok = AA::dst.fileLst(input_file);
	}

	if (!ok) {
		std::cout << argv[0] << " EE: Input stream is not valid or is empty." << std::endl;
		exit(EXIT_FAILURE);
	}

/* ================================        MAIN       ===================================*/
	/* -- We will save all in this tree --*/
	TFile root_file("upsilon.root", "RECREATE");
	TTree tree("tree", "all info.");
	TTree *treeMC=0;

if (mc)
	treeMC = new TTree("treeMC", "all mc info.");
	EvtSaver evt_saver(tree);

	UpsilonFinder upsilon_finder(tree);

	/* -- Initilization of geometry, field and beam spot.
	 * Should be done after the fileLst or eventLst initilization -- */
	AA::det.input();
	AA::field.input();
	AA::spot.input();
	AA::newEvent();
	AA::newAnaEvent();
	AA::newSelectEvent();

	unsigned long p_events = 0;
	while (nextEvent()) {
		p_events++;
		AA::setField(); //define beam-spot and field value
		AA::spot.set();
		AA::analyse();
		AA::select(AA::TAG);
        //std::cout << "Run:" << AA::runNumber << " Evt: "  << AA::evtNumber << std::endl;

		if (!upsilon_finder.find())
			continue;
		while (upsilon_finder.next()){
//			std::cout << xyg_finder.getMass() << ' ' << std::endl;
			upsilon_finder.fill();
			evt_saver.fill();
			tree.Fill();
		}
		AA::dst.outEventLst("upsilon_elist");
	}//End while next event.

	tree.Write();
if (mc)
	treeMC->Write();

	root_file.Write();
	root_file.Close();
	std::cout << argv[0] << " II: upsilon_finder ended. " << p_events << " events seen."<< std::endl;
	std::exit(EXIT_SUCCESS);
}
示例#14
0
int main (int argc, char** argv)
{
    //using namespace mapnik;
    namespace po = boost::program_options;
    bool verbose = false;
    bool validate_features = false;
    unsigned int depth = DEFAULT_DEPTH;
    double ratio = DEFAULT_RATIO;
    std::vector<std::string> files;
    char separator = 0;
    char quote = 0;
    std::string manual_headers;
    try
    {
        po::options_description desc("Mapnik CSV/GeoJSON index utility");
        desc.add_options()
            ("help,h", "produce usage message")
            ("version,V","print version string")
            ("verbose,v","verbose output")
            ("depth,d", po::value<unsigned int>(), "max tree depth\n(default 8)")
            ("ratio,r",po::value<double>(),"split ratio (default 0.55)")
            ("separator,s", po::value<char>(), "CSV columns separator")
            ("quote,q", po::value<char>(), "CSV columns quote")
            ("manual-headers,H", po::value<std::string>(), "CSV manual headers string")
            ("files",po::value<std::vector<std::string> >(),"Files to index: file1 file2 ...fileN")
            ("validate-features", "Validate GeoJSON features")
            ;

        po::positional_options_description p;
        p.add("files",-1);
        po::variables_map vm;
        po::store(po::command_line_parser(argc, argv).options(desc).positional(p).run(), vm);
        po::notify(vm);

        if (vm.count("version"))
        {
            std::clog << "version 1.0.0" << std::endl;
            return 1;
        }
        if (vm.count("help"))
        {
            std::clog << desc << std::endl;
            return 1;
        }
        if (vm.count("verbose"))
        {
            verbose = true;
        }
        if (vm.count("validate-features"))
        {
            validate_features = true;
        }
        if (vm.count("depth"))
        {
            depth = vm["depth"].as<unsigned int>();
        }
        if (vm.count("ratio"))
        {
            ratio = vm["ratio"].as<double>();
        }
        if (vm.count("separator"))
        {
            separator = vm["separator"].as<char>();
        }
        if (vm.count("quote"))
        {
            quote = vm["quote"].as<char>();
        }
        if (vm.count("manual-headers"))
        {
            manual_headers = vm["manual-headers"].as<std::string>();
        }
        if (vm.count("files"))
        {
            files=vm["files"].as<std::vector<std::string> >();
        }
    }
    catch (std::exception const& ex)
    {
        std::clog << "Error: " << ex.what() << std::endl;
        return EXIT_FAILURE;
    }

    std::vector<std::string> files_to_process;

    for (auto const& filename : files)
    {
        if (!mapnik::util::exists(filename))
        {
            continue;
        }

        if (mapnik::detail::is_csv(filename) || mapnik::detail::is_geojson(filename))
        {
            files_to_process.push_back(filename);
        }
    }

    if (files_to_process.size() == 0)
    {
        std::clog << "no files to index" << std::endl;
        return EXIT_FAILURE;
    }

    std::clog << "max tree depth:" << depth << std::endl;
    std::clog << "split ratio:" << ratio << std::endl;

    using box_type = mapnik::box2d<float>;
    using item_type = std::pair<box_type, std::pair<std::size_t, std::size_t>>;

    for (auto const& filename : files_to_process)
    {
        if (!mapnik::util::exists(filename))
        {
            std::clog << "Error : file " << filename << " does not exist" << std::endl;
            continue;
        }

        std::vector<item_type> boxes;
        box_type extent;
        if (mapnik::detail::is_csv(filename))
        {
            std::clog << "processing '" << filename << "' as CSV\n";
            auto result = mapnik::detail::process_csv_file(boxes, filename, manual_headers, separator, quote);
            if (!result.first)
            {
                std::clog << "Error: failed to process " << filename << std::endl;
                return EXIT_FAILURE;
            }
            extent = result.second;
        }
        else if (mapnik::detail::is_geojson(filename))
        {
            std::clog << "processing '" << filename << "' as GeoJSON\n";
            auto result = mapnik::detail::process_geojson_file(boxes, filename, validate_features, verbose);
            if (!result.first)
            {
                std::clog << "Error: failed to process " << filename << std::endl;
                return EXIT_FAILURE;
            }
            extent = result.second;
        }

        if (extent.valid())
        {
            std::clog << extent << std::endl;
            mapnik::box2d<double> extent_d(extent.minx(), extent.miny(), extent.maxx(), extent.maxy());
            mapnik::quad_tree<std::pair<std::size_t, std::size_t>> tree(extent_d, depth, ratio);
            for (auto const& item : boxes)
            {
                auto ext_f = std::get<0>(item);
                tree.insert(std::get<1>(item), mapnik::box2d<double>(ext_f.minx(), ext_f.miny(), ext_f.maxx(), ext_f.maxy()));
            }

            std::fstream file((filename + ".index").c_str(),
                              std::ios::in | std::ios::out | std::ios::trunc | std::ios::binary);
            if (!file)
            {
                std::clog << "cannot open index file for writing file \""
                          << (filename + ".index") << "\"" << std::endl;
            }
            else
            {
                tree.trim();
                std::clog <<  "number nodes=" << tree.count() << std::endl;
                std::clog <<  "number element=" << tree.count_items() << std::endl;
                file.exceptions(std::ios::failbit | std::ios::badbit);
                tree.write(file);
                file.flush();
                file.close();
            }
        }
        else
        {
            std::clog << "Invalid extent " << extent << std::endl;
            return EXIT_FAILURE;
        }
    }
    std::clog << "done!" << std::endl;
    return EXIT_SUCCESS;
}
示例#15
0
tree
drd_info_rep::arg_access (tree t, tree arg, tree env, int& type) {
  // returns "" if unaccessible and the env if accessible
  //cout << "  arg_access " << t << ", " << arg << ", " << env << "\n";
  if (is_atomic (t)) return "";
  else if (t == arg) return env;
  else if (is_func (t, QUOTE_ARG, 1) && N(arg) == 1 && t[0] == arg[0])
    return env;
  else if (is_func (t, MAP_ARGS) && (t[2] == arg[0])) {
    if ((N(t) >= 4) && (N(arg) >= 2) && (as_int (t[3]) > as_int (arg[1])))
      return "";
    if ((N(t) == 5) && (N(arg) >= 2) && (as_int (t[3]) <= as_int (arg[1])))
      return "";
    tree_label inner= make_tree_label (as_string (t[0]));
    tree_label outer= make_tree_label (as_string (t[1]));
    if (get_nr_indices (inner) > 0)
      type= get_type_child (tree (inner, arg), 0);
    if ((get_nr_indices (inner) > 0) &&
	(get_accessible (inner, 0) == ACCESSIBLE_ALWAYS) &&
	all_accessible (outer))
      return env;
    return "";
  }
  else if (is_func (t, MACRO)) return "";
  else if (is_func (t, WITH)) {
    int n= N(t)-1;
    //cout << "env= " << drd_env_merge (env, t (0, n)) << "\n";
    return arg_access (t[n], arg, drd_env_merge (env, t (0, n)), type);
  }
  else if (is_func (t, TFORMAT)) {
    int n= N(t)-1;
    tree oldf= drd_env_read (env, CELL_FORMAT, tree (TFORMAT));
    tree newf= oldf * tree (TFORMAT, A (t (0, n)));
    tree w   = tree (ATTR, CELL_FORMAT, newf);
    tree cenv= get_env_child (t, n, drd_env_merge (env, w));
    return arg_access (t[n], arg, cenv, type);
  }
  else if (is_func (t, COMPOUND) && N(t) >= 1 && is_atomic (t[0]))
    return arg_access (compound (t[0]->label, A (t (1, N(t)))),
		       arg, env, type);
  else if ((is_func (t, IF) || is_func (t, VAR_IF)) && N(t) >= 2)
    return arg_access (t[1], arg, env, type);
  else {
    int i, n= N(t);
    for (i=0; i<n; i++) {
      int  ctype= get_type_child (t, i);
      tree cenv = get_env_child (t, i, env);
      tree aenv = arg_access (t[i], arg, cenv, ctype);
      if (aenv != "") {
	if (ctype != TYPE_INVALID) type= ctype;
	if (is_accessible_child (t, i)) return aenv;
      }
      else if (type == TYPE_UNKNOWN &&
               ctype != TYPE_INVALID &&
               ctype != TYPE_UNKNOWN) {
        type= ctype;
        //cout << "  found type " << t << ", " << arg << ", " << type << "\n";
      }
    }
    return "";
  }
}
示例#16
0
tree
drd_info_rep::get_env_child (tree t, int i, string var, tree val) {
  tree env= get_env_child (t, i, tree (ATTR));
  return drd_env_read (env, var, val);
}
示例#17
0
tree sql_exec (url db_name, string cmd) {
  (void) db_name; (void) cmd; return tree (TUPLE); }
  bool SnapIt::processModelCylinder(jsk_pcl_ros::CallSnapIt::Request& req,
                                    jsk_pcl_ros::CallSnapIt::Response& res) {
    pcl::PointCloud<pcl::PointXYZ>::Ptr candidate_points (new pcl::PointCloud<pcl::PointXYZ>);
    pcl::PointIndices::Ptr inliers(new pcl::PointIndices);
    Eigen::Vector3f n, C_orig;
    if (!extractPointsInsideCylinder(req.request.center,
                                     req.request.direction,
                                     req.request.radius,
                                     req.request.height,
                                     inliers, n, C_orig,
                                     1.3)) {
      return false;
    }
    if (inliers->indices.size() < 3) {
      NODELET_ERROR("not enough points inside of the target_plane");
      return false;
    }
    
    geometry_msgs::PointStamped centroid;
    centroid.point.x = C_orig[0];
    centroid.point.y = C_orig[1];
    centroid.point.z = C_orig[2];
    centroid.header = req.request.header;
    
    
    pcl::ExtractIndices<pcl::PointXYZ> extract;
    extract.setInputCloud(input_);
    extract.setIndices(inliers);
    extract.filter(*candidate_points);
    
    publishPointCloud(debug_candidate_points_pub_, candidate_points);


    // first, to remove plane we estimate the plane
    pcl::ModelCoefficients::Ptr plane_coefficients (new pcl::ModelCoefficients);
    pcl::PointIndices::Ptr plane_inliers (new pcl::PointIndices);
    extractPlanePoints(candidate_points, plane_inliers, plane_coefficients,
                       n, req.request.eps_angle);
    if (plane_inliers->indices.size() == 0) {
      NODELET_ERROR ("plane estimation failed");
      return false;
    }

    // remove the points blonging to the plane
    pcl::PointCloud<pcl::PointXYZ>::Ptr points_inside_pole_wo_plane (new pcl::PointCloud<pcl::PointXYZ>);
    extract.setInputCloud (candidate_points);
    extract.setIndices (plane_inliers);
    extract.setNegative (true);
    extract.filter (*points_inside_pole_wo_plane);

    publishPointCloud(debug_candidate_points_pub2_, points_inside_pole_wo_plane);
    
    // normal estimation
    pcl::PointCloud<pcl::Normal>::Ptr cloud_normals (new pcl::PointCloud<pcl::Normal>);
    pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> ne;
    pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ> ());
    ne.setSearchMethod (tree);
    ne.setInputCloud (points_inside_pole_wo_plane);
    ne.setKSearch (50);
    ne.compute (*cloud_normals);
    
    
    // segmentation
    pcl::ModelCoefficients::Ptr cylinder_coefficients (new pcl::ModelCoefficients);
    pcl::PointIndices::Ptr cylinder_inliers (new pcl::PointIndices);
    // Create the segmentation object
    pcl::SACSegmentationFromNormals<pcl::PointXYZ, pcl::Normal> seg;
    // Optional
    seg.setOptimizeCoefficients (true);
    // Mandatory
    seg.setModelType (pcl::SACMODEL_CYLINDER);
    seg.setMethodType (pcl::SAC_RANSAC);
    seg.setRadiusLimits (0.01, req.request.radius * 1.2);
    seg.setDistanceThreshold (0.05);
    
    seg.setInputCloud(points_inside_pole_wo_plane);
    seg.setInputNormals (cloud_normals);
    seg.setMaxIterations (10000);
    seg.setNormalDistanceWeight (0.1);
    seg.setAxis(n);
    if (req.request.eps_angle != 0.0) {
      seg.setEpsAngle(req.request.eps_angle);
    }
    else {
      seg.setEpsAngle(0.35);
    }
    seg.segment (*cylinder_inliers, *cylinder_coefficients);
    if (cylinder_inliers->indices.size () == 0)
    {
      NODELET_ERROR ("Could not estimate a cylinder model for the given dataset.");
      return false;
    }

    debug_centroid_pub_.publish(centroid);
    
    pcl::PointCloud<pcl::PointXYZ>::Ptr cylinder_points (new pcl::PointCloud<pcl::PointXYZ>);
    extract.setInputCloud (points_inside_pole_wo_plane);
    extract.setIndices (cylinder_inliers);
    extract.setNegative (false);
    extract.filter (*cylinder_points);

    publishPointCloud(debug_candidate_points_pub3_, cylinder_points);

    Eigen::Vector3f n_prime;
    Eigen::Vector3f C_new;
    for (size_t i = 0; i < 3; i++) {
      C_new[i] = cylinder_coefficients->values[i];
      n_prime[i] = cylinder_coefficients->values[i + 3];
    }

    double radius = fabs(cylinder_coefficients->values[6]);
    // inorder to compute centroid, we project all the points to the center line.
    // and after that, get the minimum and maximum points in the coordinate system of the center line
    double min_alpha = DBL_MAX;
    double max_alpha = -DBL_MAX;
    for (size_t i = 0; i < cylinder_points->points.size(); i++ ) {
      pcl::PointXYZ q = cylinder_points->points[i];
      double alpha = (q.getVector3fMap() - C_new).dot(n_prime);
      if (alpha < min_alpha) {
        min_alpha = alpha;
      }
      if (alpha > max_alpha) {
        max_alpha = alpha;
      }
    }
    // the center of cylinder
    Eigen::Vector3f C_new_prime = C_new + (max_alpha + min_alpha) / 2.0 * n_prime;
    
    Eigen::Vector3f n_cross = n.cross(n_prime);
    if (n.dot(n_prime)) {
      n_cross = - n_cross;
    }
    double theta = asin(n_cross.norm());
    Eigen::Quaternionf trans (Eigen::AngleAxisf(theta, n_cross.normalized()));
    Eigen::Vector3f C = trans * C_orig;
    Eigen::Affine3f A = Eigen::Translation3f(C) * Eigen::Translation3f(C_new_prime - C) * trans * Eigen::Translation3f(C_orig).inverse();
    tf::poseEigenToMsg((Eigen::Affine3d)A, res.transformation);

    geometry_msgs::PointStamped centroid_transformed;
    centroid_transformed.point.x = C_new_prime[0];
    centroid_transformed.point.y = C_new_prime[1];
    centroid_transformed.point.z = C_new_prime[2];
    centroid_transformed.header = req.request.header;
    debug_centroid_after_trans_pub_.publish(centroid_transformed);

    // publish marker
    visualization_msgs::Marker marker;
    marker.type = visualization_msgs::Marker::CYLINDER;
    marker.scale.x = radius;
    marker.scale.y = radius;
    marker.scale.z = (max_alpha - min_alpha);
    marker.pose.position.x = C_new_prime[0];
    marker.pose.position.y = C_new_prime[1];
    marker.pose.position.z = C_new_prime[2];

    // n_prime -> z
    // n_cross.normalized() -> x
    Eigen::Vector3f z_axis = n_prime.normalized();
    Eigen::Vector3f y_axis = n_cross.normalized();
    Eigen::Vector3f x_axis = (y_axis.cross(z_axis)).normalized();
    Eigen::Matrix3f M;
    for (size_t i = 0; i < 3; i++) {
      M(i, 0) = x_axis[i];
      M(i, 1) = y_axis[i];
      M(i, 2) = z_axis[i];
    }
    
    Eigen::Quaternionf q (M);
    marker.pose.orientation.x = q.x();
    marker.pose.orientation.y = q.y();
    marker.pose.orientation.z = q.z();
    marker.pose.orientation.w = q.w();
    marker.color.g = 1.0;
    marker.color.a = 1.0;
    marker.header = input_header_;
    marker_pub_.publish(marker);
    
    return true;
  }
void testQuadTree()
{
	Vector3 origin( 0.f, 0.f, 0.f );
	Vector2 origin2d( origin.x, origin.z );
	TestTree tree( origin.x, origin.z );
	const int WIDTH = (1 << TestTreeDesc::DEPTH);
	const float RANGE = TestTreeDesc::RANGE;
	const float CELL_SIZE = RANGE/WIDTH;
	TestElement elements[ WIDTH ][ WIDTH ];

	const Vector2 cellSize( CELL_SIZE - 0.001f, CELL_SIZE - 0.001f);

	for (int j = 0; j < WIDTH; j++)
	{
		for (int i = 0; i < WIDTH; i++)
		{
			TestElement & curr = elements[i][j];
			curr.x_ = i;
			curr.y_ = j;

			curr.minBounds_.set( i * CELL_SIZE, j * CELL_SIZE );
			curr.maxBounds_ = curr.minBounds_ + cellSize;

			curr.minBounds_ += origin2d;
			curr.maxBounds_ += origin2d;

			tree.add( curr );
		}
	}

	tree.print();

	printTraverse( tree, Vector3( 30.f, 0.f, 0.f ), Vector3( 30.f, 0.f, 100.f ) );
	printTraverse( tree, Vector3( 0.f, 0.f, 30.f ), Vector3( 100.f, 0.f, 30.f ) );
	printTraverse( tree, Vector3( 30.0001f, 0.f, 30.f ), Vector3( 30.f, 0.f, 30.f ) );
	return;

	Vector3 src( 0.f, 0.f, -1.f );
	Vector3 dst( 100.f, 0.f, 99.f );

	src.set( 30.f, 0.f, 30.f );
	dst.set( 30.f, 0.f, 30.f );

	src += origin;
	dst += origin;

	printTraverse( tree, src, dst );
	printTraverse( tree, dst, src );

//	src.set( 0.f, 0.f, 71.f );
//	dst.set( 70.f, 0.f, 1.f );
	src.set( 0.f, 0.f, 101.f );
	dst.set( 100.f, 0.f, 1.f );

	src += origin;
	dst += origin;

//	printTraverse( tree, src, dst );
//	printTraverse( tree, dst, src );

	src += Vector3( 0.f, 0.f, 50.f );
	dst += Vector3( 0.f, 0.f, 50.f );

	printTraverse( tree, src, dst );
	printTraverse( tree, dst, src );
}
示例#20
0
void MUONTriggerEfficiency(const char* filenameSim="galice_sim.root", 
                           const char* filenameRec="galice.root",  
                           Bool_t readFromRP = 0)
{
  
  // Set default CDB storage
  AliCDBManager* man = AliCDBManager::Instance();
  man->SetDefaultStorage("local://$ALICE_ROOT/OCDB");
  man->SetRun(0);
  
  // output file
  
  AliMUONMCDataInterface diSim(filenameSim);
  AliMUONDataInterface diRec(filenameRec);
  
  if (!diSim.IsValid())
  {
    cerr << "Cannot access " << filenameSim << endl;
    return;
  }
  
  if (!diRec.IsValid())
  {
    cerr << "Cannot access " << filenameRec << endl;
    return;
  }
  
  FILE* fdat = fopen("MUONTriggerEfficiency.out","w");          
  if (!fdat)
  {
    cerr << "Cannot create output file" << endl;
    return;
  }
  
  Int_t coincmuon,muonlpt,muonhpt;
  Int_t CoincMuPlus,CoincMuMinus;
  coincmuon=0;
  muonlpt=0;  
  muonhpt=0;
  
  Int_t nevents = diSim.NumberOfEvents();          
  
  for (Int_t ievent=0; ievent<nevents; ++ievent) 
  {   
    CoincMuPlus=0;
    CoincMuMinus=0;
    
    if (ievent%1000==0) printf("\t Event = %d\n",ievent);    
    
    // Hits
    
    Int_t NbHits[2][4];
    for (Int_t j=0; j<2; j++) 
    {
      for (Int_t jj=0; jj<4; jj++) 
      {
        NbHits[j][jj]=0;
      }
    } 
    
    Int_t ntracks = (Int_t) diSim.NumberOfTracks(ievent);
    
    for ( Int_t itrack=0; itrack<ntracks; ++itrack ) 
    {
      AliMUONVHitStore* hitStore = diSim.HitStore(ievent,itrack);      
      AliMUONHit* mHit;
      TIter next(hitStore->CreateIterator());
      
      while ( ( mHit = static_cast<AliMUONHit*>(next()) ) )
      {
        Int_t Nch = mHit->Chamber(); 
        Int_t hittrack = mHit->Track();
        Float_t IdPart = mHit->Particle();
        
        for (Int_t j=0;j<4;j++) 
        {
          Int_t kch=11+j;
          if (hittrack==1 || hittrack==2) 
          {
            if (Nch==kch && IdPart==-13 && NbHits[0][j]==0) NbHits[0][j]++; 
            if (Nch==kch && IdPart==13 && NbHits[1][j]==0) NbHits[1][j]++; 
          }       
        }
      }
    } // end track loop     
    
    // 3/4 coincidence
    Int_t SumNbHits=NbHits[0][0]+NbHits[0][1]+NbHits[0][2]+NbHits[0][3];
    if (SumNbHits==3 || SumNbHits==4) CoincMuPlus=1;
    
    SumNbHits=NbHits[1][0]+NbHits[1][1]+NbHits[1][2]+NbHits[1][3];
    if (SumNbHits==3 || SumNbHits==4) CoincMuMinus=1;
    
    if (CoincMuPlus==1 && CoincMuMinus==1) coincmuon++;         
          
    TString tree("D");
    if ( readFromRP ) tree = "R";
    
    AliMUONVTriggerStore* triggerStore = diRec.TriggerStore(ievent,tree.Data());

    AliMUONGlobalTrigger* gloTrg = triggerStore->Global();
    
    if (gloTrg->PairUnlikeLpt()>=1) muonlpt++;
    if (gloTrg->PairUnlikeHpt()>=1) muonhpt++;

  } // end loop on event  
 
  // calculate efficiency with as a ref. at least 3/4 planes fired
  Float_t efficiencylpt,efficiencyhpt;
  Double_t coincmu,lptmu,hptmu;  
  Float_t error;  
  coincmu=Double_t(coincmuon);
  lptmu=Double_t(muonlpt); 
  hptmu=Double_t(muonhpt); 

  // output
  fprintf(fdat,"\n"); 
  fprintf(fdat," Number of events = %d \n",nevents);  
  fprintf(fdat," Number of events with 3/4 coinc = %d \n",coincmuon);  
  fprintf(fdat," Nomber of dimuons with 3/4 coinc Lpt cut = %d \n",muonlpt);  
  fprintf(fdat," Number of dimuons with 3/4 coinc Hpt cut = %d \n",muonhpt);  
  fprintf(fdat,"\n");
  
  efficiencylpt=lptmu/coincmu;  
  error=efficiencylpt*TMath::Sqrt((lptmu+coincmu)/(lptmu*coincmu));  
  fprintf(fdat," Efficiency Lpt cut = %4.4f +/- %4.4f\n",efficiencylpt,error); 
 
  efficiencyhpt=hptmu/coincmu; 
  error=efficiencyhpt*TMath::Sqrt((hptmu+coincmu)/(hptmu*coincmu));          
  fprintf(fdat," Efficiency Hpt cut = %4.4f +/- %4.4f\n",efficiencyhpt,error);
      

  fclose(fdat);  
}
void DenseReconstruction::activeSegmentation (const pcl::PointCloud<pcl::PointXYZLRegionF> &cloud_input,
    float search_radius,
    double eps_angle,
    int fp_index,
    pcl::PointIndices &indices_out)
{




	  pcl::PointCloud<pcl::Normal>::Ptr normals(new pcl::PointCloud<pcl::Normal>);
	  pcl::PointCloud<pcl::Boundary> boundary;


	  normalsEstimation(cloud_input.makeShared(),normals);

	  boundaryEstimation(cloud_input.makeShared(),normals,boundary);


	  std::cerr<<"Index of seed point is: "<<fp_index<<std::endl;
	  std::cerr<<"Curvature of seed point: "<<normals->points[fp_index].curvature<<std::endl;


	pcl::PointCloud<pcl::PointXYZRGBA> cloud_in;
	pcl::copyPointCloud(cloud_input,cloud_in);

	  pcl::search::KdTree<pcl::PointXYZRGBA>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZRGBA> ());
	  tree->setInputCloud(cloud_in.makeShared());


  if (fp_index > cloud_in.points.size () || fp_index <0)
  {
    PCL_ERROR ("[pcl::activeSegmentation] Fixation point given is invalid\n");
    return;
  }
  if (boundary.points.size () != cloud_in.points.size ())
  {
    PCL_ERROR ("[pcl::activeSegmentation] Boundary map given was built for a different dataset (%zu) than the input cloud (%zu)!\n",
        boundary.points.size () , cloud_in.points.size ());
    return;
  }
  if (tree->getInputCloud ()->points.size () != cloud_in.points.size ())
  {
    PCL_ERROR ("[pcl::activeSegmentation] Tree built for a different point cloud dataset (%zu) than the input cloud (%zu)!\n",
        tree->getInputCloud ()->points.size (), cloud_in.points.size ());
    return;
  }
  if (normals->points.size () != cloud_in.points.size ())
  {
    PCL_ERROR ("[pcl::activeSegmentation] Input Normals are for a different point cloud dataset (%zu) than the input cloud (%zu)!\n",
        normals->points.size (), cloud_in.points.size ());
    return;
  }
  std::vector<int> seed_queue;
  std::vector<bool> processed (cloud_in.size(), false);
  seed_queue.push_back (fp_index);
  indices_out.indices.push_back (fp_index);
  processed[fp_index] = true;
  std::vector<int> nn_indices;
  std::vector<float> nn_distances;
  //process while there are valid seed points

  for(size_t seed_idx = 0; seed_idx < seed_queue.size();++seed_idx)
  {

    int curr_seed;
    curr_seed = seed_queue[seed_idx];

    // Search for seeds
    if (!tree->radiusSearch (curr_seed, search_radius, nn_indices, nn_distances))
      continue;
    //process all points found in the neighborhood
    bool stop_growing = false;
    size_t indices_old_size = indices_out.indices.size();
    for (size_t i=1; i < nn_indices.size (); ++i)
    {
      if (processed[nn_indices.at (i)])
        continue;
      if (boundary.points[nn_indices.at (i)].boundary_point != 0)
      {
        stop_growing=true;
        indices_out.indices.push_back (nn_indices.at (i));
        processed[nn_indices.at (i)] = true;
        break;
      }

      bool is_convex = false;
      pcl::PointXYZRGBA temp;
      temp.x = cloud_in.points[fp_index].x - cloud_in.points[nn_indices.at (i)].x;
      temp.y = cloud_in.points[fp_index].y - cloud_in.points[nn_indices.at (i)].y;
      temp.z = cloud_in.points[fp_index].z - cloud_in.points[nn_indices.at (i)].z;

      double dot_p = normals->points[nn_indices.at (i)].normal[0] * temp.x
          + normals->points[nn_indices.at (i)].normal[1] * temp.y
          + normals->points[nn_indices.at (i)].normal[2] * temp.z;

      dot_p = dot_p>1? 1:dot_p;
      dot_p = dot_p<-1 ? -1:dot_p;

      if ((acos (dot_p) > eps_angle*M_PI / 180))
        is_convex = true;


      if (is_convex)
      {
        indices_out.indices.push_back (nn_indices.at (i));
        processed[nn_indices.at (i)] = true;
      }
      else
        break;
    }//new neighbor

    if(!stop_growing && (indices_old_size != indices_out.indices.size()))
    {
      for (size_t j = indices_old_size-1; j < indices_out.indices.size(); ++j)
        seed_queue.push_back(indices_out.indices.at (j));
    }
  }//new seed point

}
示例#22
0
int 
main (int argc, char** argv)
{
  // Read in the cloud data
  pcl::PCDReader reader;
  pcl::PCDWriter writer;
  pcl::PointCloud<PointType>::Ptr cloud (new pcl::PointCloud<PointType>);
  std::vector<int> filenames;
  filenames = pcl::console::parse_file_extension_argument (argc, argv, ".pcd");
  reader.read (argv[filenames[0]], *cloud);
  std::string pcd_filename;
  std::string png_filename = argv[filenames[0]];

  // Take the origional png image out
  png::image<png::rgb_pixel> origin_image(cloud->width, cloud->height);
  int origin_index = 0;
  for (size_t y = 0; y < origin_image.get_height (); ++y) {
    for (size_t x = 0; x < origin_image.get_width (); ++x) {
      const PointType & p = cloud->points[origin_index++];
      origin_image[y][x] = png::rgb_pixel(p.r, p.g, p.b);
    }
  }

  png_filename.replace(png_filename.length () - 4, 4, ".png");
  origin_image.write(png_filename);

  std::cout << "PointCloud before filtering has: " << cloud->points.size () << " data points." << std::endl; //*  

  float min_depth = 0.1;
  pcl::console::parse_argument (argc, argv, "-min_depth", min_depth);

  float max_depth = 3.0;
  pcl::console::parse_argument (argc, argv, "-max_depth", max_depth);

  float max_x = 1.0;
  pcl::console::parse_argument (argc, argv, "-max_x", max_x);

  float min_x = -1.0;
  pcl::console::parse_argument (argc, argv, "-min_x", min_x);

  float max_y = 1.0;
  pcl::console::parse_argument (argc, argv, "-max_y", max_y);

  float min_y = -1.0;
  pcl::console::parse_argument (argc, argv, "-min_y", min_y);


  int plane_seg_times = 1;
  pcl::console::parse_argument (argc, argv, "-plane_seg_times", plane_seg_times);
  for (int i = 0; i < plane_seg_times; i++) {
    remove_plane(cloud, min_depth, max_depth, min_x, max_x, min_y, max_y);
  }

  pcd_filename = argv[filenames[0]];
  pcd_filename.replace(pcd_filename.length () - 4, 8, "plane.pcd");
  pcl::io::savePCDFile(pcd_filename, *cloud);

  // std::cout << "PointCloud after removing the plane has: " << cloud->points.size () << " data points." << std::endl; //*
  uint32_t xmin = 1000, xmax = 0, ymin = 1000, ymax = 0;

  pcl::PointCloud<PointXYZRGBUV>::Ptr cloud_uv (new pcl::PointCloud<PointXYZRGBUV>);
  for (size_t index = 0; index < cloud->points.size(); index++) {
    const pcl::PointXYZRGB & p = cloud->points[index];
    if (p.x != p.x || p.y != p.y || p.z != p.z) { // if current point is invalid
      continue;
    }

    PointXYZRGBUV cp = PointXYZRGBUV(p, index % cloud-> width, index / cloud->width);
    cloud_uv->points.push_back (cp); 
  }
  cloud_uv->width = cloud_uv->points.size ();
  cloud_uv->height = 1;
  
  // Creating the KdTree object for the search method of the extraction
  pcl::search::KdTree<PointXYZRGBUV>::Ptr tree (new pcl::search::KdTree<PointXYZRGBUV>);
  tree->setInputCloud (cloud_uv);

  std::vector<pcl::PointIndices> cluster_indices;
  pcl::EuclideanClusterExtraction<PointXYZRGBUV> ec;
  ec.setClusterTolerance (0.02); // 2cm
  ec.setMinClusterSize (500);
  ec.setMaxClusterSize (25000);
  ec.setSearchMethod (tree);
  ec.setInputCloud (cloud_uv);
  ec.extract (cluster_indices);
  

  int j = 0;
  for (std::vector<pcl::PointIndices>::const_iterator it = cluster_indices.begin (); it != cluster_indices.end (); ++it)
  {
    pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_cluster (new pcl::PointCloud<pcl::PointXYZRGB>);
    xmin = 1000;
    xmax = 0; 
    ymin = 1000;
    ymax = 0;

    for (std::vector<int>::const_iterator pit = it->indices.begin (); pit != it->indices.end (); ++pit) {
        PointXYZRGBUV& p = cloud_uv->points[*pit];
        pcl::PointXYZRGB cp_rgb;
        cp_rgb.x = p.x; cp_rgb.y = p.y; cp_rgb.z = p.z;
        cp_rgb.rgb = p.rgb; 
        cloud_cluster->points.push_back(cp_rgb);

        xmin = std::min(xmin, p.u);
        xmax = std::max(xmax, p.u);
        ymin = std::min(ymin, p.v);
        ymax = std::max(ymax, p.v);
    }
    cloud_cluster->is_dense = true;
    cloud_cluster->width = cloud_cluster->points.size();
    cloud_cluster->height = 1;

    std::cout << "PointCloud representing the Cluster: " << cloud_cluster->points.size () << " data points." << std::endl;
    pcd_filename = argv[filenames[0]];
    std::stringstream ss;
    ss << "cluster_" << j++ << ".pcd";
    pcd_filename.replace(pcd_filename.length () - 4, ss.str().length(), ss.str());
    pcl::io::savePCDFile(pcd_filename, *cloud_cluster);

    png::image<png::rgb_pixel> image(cloud_cluster->width, cloud_cluster->height);
    int i = 0;
    for (size_t y = 0; y < image.get_height (); ++y) {
      for (size_t x = 0; x < image.get_width (); ++x) {
        const PointType & p = cloud_cluster->points[i++];
        image[y][x] = png::rgb_pixel(p.r, p.g, p.b);
      }
    }
    pcd_filename.replace(pcd_filename.length () - 4, 4, ".png");
    image.write(pcd_filename);

    //crop out image patch
    png::image<png::rgb_pixel> image_patch(xmax - xmin + 1, ymax - ymin + 1);
    for (size_t y = 0; y < image_patch.get_height (); ++y) {
      for (size_t x = 0; x < image_patch.get_width (); ++x) {
        image_patch[y][x] = origin_image[y+ymin][x+xmin];
      }
    }
    pcd_filename.replace(pcd_filename.length () - 4, 7, "box.png");
    image_patch.write(pcd_filename);

    // writer.write<pcl::PointXYZRGB> (ss.str (), *cloud_cluster, false); //*
  }

  return (0);
}
示例#23
0
obj* php_load_impl(const char* base, const char*& beg, const char* end) {
#define THROW_INPUT_ERROR2(msg, ptr) \
	do{ char buf[256]; \
		int blen = snprintf(buf, sizeof(buf), "%s at pos: %ld", msg, long(ptr-base)); \
		std::string strMsg(buf, blen); \
		throw std::logic_error(strMsg); \
	} while (0)
#define THROW_INPUT_ERROR(msg) THROW_INPUT_ERROR2(msg, beg)
	switch (*beg) {
		default: {
			std::string msg = "php_load: invalid type: ";
			msg.push_back(*beg);
			msg += ", input:";
			msg.append(beg, std::min<size_t>(100, end-beg));
			throw std::logic_error(msg);
		}
		case 'i': { // i:val;
			if (++beg >= end)
				THROW_INPUT_ERROR("php_load: Incompleted php integer");
			if (':' != *beg)
				THROW_INPUT_ERROR("php_load: integer, expect ':'");
			char* pp = NULL;
			long long val = strtoll(beg+1, &pp, 10);
			if (';' != *pp)
				THROW_INPUT_ERROR2("php_load: integer, expect ';'", pp);
			beg = pp + 1;
			if (sizeof(long) == sizeof(long long) || (LONG_MIN <= val&&val <= LONG_MAX))
				return new obj_long((long)val);
			else
				return new obj_llong(val);
		}
		case 'b': { // b:val;
			if (++beg >= end)
				THROW_INPUT_ERROR("php_load: Incompleted php boolean");
			if (':' != *beg)
				THROW_INPUT_ERROR("php_load: boolean, expect ':'");
			char* pp = NULL;
			long val = strtol(beg+1, &pp, 10);
			if (';' != *pp)
				THROW_INPUT_ERROR2("php_load: boolean, expect ';'", pp);
			beg = pp + 1;
			return new obj_bool(val ? 1 : 0);
		}
#if 0
		case 'd': { // d:val;
			if (++beg >= end)
				THROW_INPUT_ERROR("php_load: Incompleted php double");
			if (':' != *beg)
				THROW_INPUT_ERROR("php_load: double, expect ':'");
			char* pp = NULL;
			double val = strtod(beg+1, &pp);
			if (';' != *pp)
				THROW_INPUT_ERROR("php_load: double, expect ';'");
			beg = pp + 1;
			return new obj_double(val);
		}
#else
		case 'd': { // d:val;
			if (++beg >= end)
				THROW_INPUT_ERROR("php_load: Incompleted php double");
			if (':' != *beg)
				THROW_INPUT_ERROR("php_load: long double, expect ':'");
			char* pp = NULL;
		#ifdef __CYGWIN__ //_LDBL_EQ_DBL
			double val = strtod(beg+1, &pp);
		#else
			long double val = strtold(beg+1, &pp);
		#endif
			if (';' != *pp)
				THROW_INPUT_ERROR2("php_load: long double, expect ';'", pp);
			beg = pp + 1;
			return new obj_ldouble(val);
		}
#endif
		case 's': { // s:size:"content";
			if (++beg >= end)
				THROW_INPUT_ERROR("php_load: Incompleted php string");
			if (':' != *beg)
				THROW_INPUT_ERROR("php_load: string.size expect ':'");
			char* pp = NULL;
			long len = strtol(beg+1, &pp, 10);
			if (':' != *pp)
				THROW_INPUT_ERROR2("php_load: string.content expect ':'", pp);
			if (len < 0)
				THROW_INPUT_ERROR2("php_load: string.size is negtive", beg+1);
			if ('"' != pp[1])
				THROW_INPUT_ERROR2("php_load: string.content expect '\"'", pp+1);
			if ('"' != pp[len+2])
				THROW_INPUT_ERROR2("php_load: string.content not found right quote", pp+len+2);
			if (';' != pp[len+3])
				THROW_INPUT_ERROR2("php_load: string didn't end with ';'", pp+len+3);
			std::auto_ptr<obj_string> x(new obj_string);
			x->resize(len);
			x->assign(pp+2, len);
			beg = pp + len + 4; // s:size:"content";
			return x.release();
		}
		case 'a': { // A:size:{key;value; ....}
		//	fprintf(stderr, "loading array: ");
			if (++beg >= end)
				THROW_INPUT_ERROR("php_load: Incompleted php array");
			if (':' != *beg)
				THROW_INPUT_ERROR("php_load: array.size expect ':'");
			char* pp = NULL;
			long len = strtol(beg+1, &pp, 10);
		//	fprintf(stderr, "size=%ld\n", len);
			if (':' != pp[0] || '{' != pp[1])
				THROW_INPUT_ERROR2("php_load: array.size should followed by ':{'", pp);
			std::auto_ptr<obj_array> arr(new obj_array);
			std::auto_ptr<php_array> map;
			arr->resize(2*len);
			beg = pp + 2;
			long max_idx = -1;
			for (long i = 0; i < len; ++i) {
			//	fprintf(stderr, "loading array[%ld]:\n", i);
				const char* key_pos = beg;
				std::auto_ptr<obj> key(php_load_impl(base, beg, end));
				if (key.get() == NULL) {
					THROW_INPUT_ERROR2("php_load: array.key must not be NULL", key_pos);
				}
				if (arr.get()) {
					obj_long* l = dynamic_cast<obj_long*>(key.get());
					if (l) {
						long idx = l->t;
						if (idx >= 0 && idx < (long)arr->size()) {
							(*arr)[idx].reset(php_load_impl(base, beg, end));
							max_idx = std::max(idx, max_idx);
							continue;
						}
					}
				}
				if (map.get() == NULL)
					map.reset(new php_array);
				if (arr.get()) {
					for (long j = 0; j <= max_idx; ++j) {
						if ((*arr)[j])
							(*map)[obj_ptr(new obj_long(j))] = (*arr)[j];
					}
					arr.reset(NULL);
				}
				(*map)[obj_ptr(key.get())].reset(php_load_impl(base, beg, end));
				key.release();
			}
			if ('}' != *beg)
				THROW_INPUT_ERROR("php_load: array not correctly closed");
			beg += 1;
			if (arr.get()) {
				arr->resize(max_idx+1);
				arr->shrink_to_fit();
				return arr.release();
			}
			return map.release();
		}
		case 'N': {
			if (++beg >= end)
				THROW_INPUT_ERROR("php_load: Incompleted php NULL");
			if (';' != *beg)
				THROW_INPUT_ERROR("php_load: NULL expect ';'");
			beg += 1;
			return NULL;
		}
		case 'O': {
// O:strlen(class name):"class name":fields_num:{s:strlen(field name):"field name":field definition;(repeated per field)}
			if (++beg >= end)
				THROW_INPUT_ERROR("php_load: Incompleted php Object");
			if (':' != *beg)
				THROW_INPUT_ERROR("php_load: Object.namelen expect ':' 1");
			char* pp = NULL;
			long len = strtol(beg+1, &pp, 10);
			if (':' != pp[0] && '"' != pp[1])
				THROW_INPUT_ERROR2("php_load: Object.namelen expect ':\"' 2", pp);
			if (pp + 4 + len > end)
				THROW_INPUT_ERROR2("php_load: Object 3", pp);
			std::auto_ptr<php_object> tree(new php_object);
			tree->cls_name.assign(pp + 2, len);
			long fields = strtol(pp + 4 + len, &pp, 10);
			if (':' != pp[0] || '{' != pp[1])
				THROW_INPUT_ERROR2("php_load: Object 4", pp);
			beg = pp + 2;
			for (long i = 0; i < fields; ++i) {
				std::auto_ptr<obj> pname(php_load_impl(base, beg, end));
				obj_string* name = dynamic_cast<obj_string*>(pname.get());
				if (NULL == name)
					THROW_INPUT_ERROR("object field name is not a string");
				std::auto_ptr<obj> value(php_load_impl(base, beg, end));
				tree->fields[*name].reset(value.release());
			}
			if ('}' != beg[0])
				THROW_INPUT_ERROR("php_load: Object not correctly closed");
			beg += 1;
			return tree.release();
		}
	}
	assert(0);
	return NULL;
}
示例#24
0
tree
edit_env_rep::rewrite_inactive_default (
  tree t, tree var, bool block, bool flush)
{
  int i, d= 0, n= N(t);
  tree op= as_string (L(t));
  if ((L(t) == COMPOUND) &&
      is_atomic (t[0]) &&
      (src_special >= SPECIAL_NORMAL))
    {
      d = 1;
      op= highlight (subvar (var, 0), t[0], TYPE_VARIABLE);
    }
  if (inactive_mode == INACTIVE_INLINE_ERROR ||
      inactive_mode == INACTIVE_BLOCK_ERROR)
    op= highlight (op, "", TYPE_INVALID);

  if ((N(t) == d) ||
      (src_compact == COMPACT_ALL) ||
      ((!block) && (src_compact != COMPACT_NONE)) ||
      ((!is_long (t)) && (src_compact != COMPACT_NONE)))
    {
      tree r (INLINE_TAG, n+1-d);
      r[0]= op;
      for (i=d; i<n; i++)
	r[i+1-d]= rewrite_inactive_arg (t, var, i, false, false);
      return tree (MARK, var, r);
    }
  else {
    tree doc (DOCUMENT);
    bool compact= (src_compact < COMPACT_INLINE);
 
    for (i=d; i<n; i++) {
      tree next;
      if ((!compact) || is_long_arg (t, i)) {
	if (i==d) doc << tree (OPEN_TAG, op);
	next= rewrite_inactive_arg (t, var, i, true, src_close >= CLOSE_LONG);
	next= compound ("indent*", next);
	i++;
      }

      int start= i;
      for (; i<n; i++)
	if ((!compact) || is_long_arg (t, i)) break;
      int end= i;
      tree_label l= MIDDLE_TAG;
      if (end == n) l= CLOSE_TAG;
      if (start == d) l= OPEN_TAG;
      tree u (l, end - start + 1);
      u[0]= op;
      for (i=0; i<end-start; i++)
	u[i+1]= rewrite_inactive_arg (t, var, start+i, false, false);
      i= end-1;
      compact= (src_compact < COMPACT_INLINE_START);

      if (start==d) doc << u;
      else {
	if (src_close < CLOSE_LONG)
	  doc << tree (SURROUND, "", u, next);
	else doc << next << u;
      }
    }

    if (flush) doc= tree (SURROUND, "", compound ("right-flush"), doc);
    return tree (MARK, var, doc);
  }
}
示例#25
0
void ClusterExtraction::extract() {
  /*
	pcl::PointCloud<pcl::PointXYZ>::Ptr cloud = in_pcl.read();
  // Creating the KdTree object for the search method of the extraction
  pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ>);
  tree->setInputCloud (cloud);
  
  std::vector<pcl::PointIndices> cluster_indices;
  pcl::EuclideanClusterExtraction<pcl::PointXYZ> ec;
  ec.setClusterTolerance (clusterTolerance); // 2cm
  ec.setMinClusterSize (minClusterSize);
  ec.setMaxClusterSize (maxClusterSize);
  ec.setSearchMethod (tree);
  ec.setInputCloud (cloud);
  ec.extract (cluster_indices);
  

  std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> clusters;
  
  int j = 0;
  for (std::vector<pcl::PointIndices>::const_iterator it = cluster_indices.begin (); it != cluster_indices.end (); ++it)
  {
    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_cluster (new pcl::PointCloud<pcl::PointXYZ>);
    for (std::vector<int>::const_iterator pit = it->indices.begin (); pit != it->indices.end (); pit++)
      cloud_cluster->points.push_back (cloud->points[*pit]); //*
      cloud_cluster->width = cloud_cluster->points.size ();
      cloud_cluster->height = 1;
      cloud_cluster->is_dense = true;

	clusters.push_back(cloud_cluster);
    std::cout << "PointCloud representing the Cluster: " << cloud_cluster->points.size () << " data points." << std::endl;
    std::stringstream ss;
//     if(j==0)
//     {
//       ss << "cloud_cluster_0" << ".pcd";
//       writer.write<pcl::PointXYZ> (ss.str (), *cloud_cluster, false);
//       j++;
//     }
   // if(j==0)	
    //{
	std::cin.ignore();
	out_the_biggest_cluster.write(cloud_cluster);
	j++;
	std::cout<<"Zapis!!!\n";
   // }
    
  }	
	out_indices.write(cluster_indices);
	out_clusters.write(clusters);
	//std::cout<<"j=="<<j<<endl;
	//std::cout<<clusters.size()<<endl;
*/

  pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_f (new pcl::PointCloud<pcl::PointXYZ>);

// Read in the cloud data
  pcl::PointCloud<pcl::PointXYZ>::Ptr cloud = in_pcl.read();
  std::cout << "PointCloud before filtering has: " << cloud->points.size () << " data points." << std::endl; //*

  // Create the filtering object: downsample the dataset using a leaf size of 1cm
  pcl::VoxelGrid<pcl::PointXYZ> vg;
  pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_filtered (new pcl::PointCloud<pcl::PointXYZ>);
  vg.setInputCloud (cloud);
  vg.setLeafSize (0.01f, 0.01f, 0.01f);
  vg.filter (*cloud_filtered);
  std::cout << "PointCloud after filtering has: " << cloud_filtered->points.size ()  << " data points." << std::endl; //*

  // Create the segmentation object for the planar model and set all the parameters
  pcl::SACSegmentation<pcl::PointXYZ> seg;
  pcl::PointIndices::Ptr inliers (new pcl::PointIndices);
  pcl::ModelCoefficients::Ptr coefficients (new pcl::ModelCoefficients);
  pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_plane (new pcl::PointCloud<pcl::PointXYZ> ());
  //pcl::PCDWriter writer;
  seg.setOptimizeCoefficients (true);
  seg.setModelType (pcl::SACMODEL_PLANE);
  seg.setMethodType (pcl::SAC_RANSAC);
  seg.setMaxIterations (100);
  seg.setDistanceThreshold (0.02);

  int i=0, nr_points = (int) cloud_filtered->points.size ();
  /*
  while (cloud_filtered->points.size () > 0.3 * nr_points)
  {
    // Segment the largest planar component from the remaining cloud
    seg.setInputCloud (cloud_filtered);
    seg.segment (*inliers, *coefficients);
    if (inliers->indices.size () == 0)
    {
      std::cout << "Could not estimate a planar model for the given dataset." << std::endl;
      break;
    }

    // Extract the planar inliers from the input cloud
    pcl::ExtractIndices<pcl::PointXYZ> extract;
    extract.setInputCloud (cloud_filtered);
    extract.setIndices (inliers);
    extract.setNegative (false);

    // Get the points associated with the planar surface
    extract.filter (*cloud_plane);
    std::cout << "PointCloud representing the planar component: " << cloud_plane->points.size () << " data points." << std::endl;

    // Remove the planar inliers, extract the rest
    extract.setNegative (true);
    extract.filter (*cloud_f);
    *cloud_filtered = *cloud_f;
  }
  */

  // Creating the KdTree object for the search method of the extraction
  pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ>);
  tree->setInputCloud (cloud_filtered);

  std::vector<pcl::PointIndices> cluster_indices;
  pcl::EuclideanClusterExtraction<pcl::PointXYZ> ec;
  ec.setClusterTolerance (0.02); // 2cm
  ec.setMinClusterSize (100);
  ec.setMaxClusterSize (25000);
  ec.setSearchMethod (tree);
  ec.setInputCloud (cloud_filtered);
  ec.extract (cluster_indices);

  int j = 0;
  for (std::vector<pcl::PointIndices>::const_iterator it = cluster_indices.begin (); it != cluster_indices.end (); ++it)
  {
    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_cluster (new pcl::PointCloud<pcl::PointXYZ>);
    for (std::vector<int>::const_iterator pit = it->indices.begin (); pit != it->indices.end (); pit++)
      cloud_cluster->points.push_back (cloud_filtered->points[*pit]); //*
    cloud_cluster->width = cloud_cluster->points.size ();
    cloud_cluster->height = 1;
    cloud_cluster->is_dense = true;

    std::cout << "PointCloud representing the Cluster: " << cloud_cluster->points.size () << " data points." << std::endl;
    //std::stringstream ss;
    //ss << "cloud_cluster_" << j << ".pcd";
    //writer.write<pcl::PointXYZ> (ss.str (), *cloud_cluster, false); //*
    //std::cin.ignore();
    // check if cluster contain interesting us plane, if so, return cluster, next remove plane and we have object :)
    //if (include_plane){
	out_the_biggest_cluster.write(cloud_cluster);
	break;
    //}
    j++;
  }


	
}
示例#26
0
int PointCloudStitching::stitch(pcl::PointCloud<PointT>& points, double epsilon, double maxCorrespondanceDistance)
{
	if (this->stitching.size() == 0)
	{
		pcl::copyPointCloud(points, this->stitching);
		return 0;
	}

	//	Compute surface normals and curvature
	pcl::PointCloud<PointT> tempTarget;
	pcl::copyPointCloud(this->stitching, tempTarget);

	pcl::PointCloud<pcl::PointNormal>::Ptr pointsWithNormalSource (new pcl::PointCloud<pcl::PointNormal>);
	pcl::PointCloud<pcl::PointNormal>::Ptr pointsWithNormalTarget (new pcl::PointCloud<pcl::PointNormal>);

	pcl::NormalEstimation<PointT, pcl::PointNormal> normalEstimate;
	pcl::search::KdTree<PointT>::Ptr tree (new pcl::search::KdTree<PointT> ());
	normalEstimate.setSearchMethod(tree);
	normalEstimate.setKSearch(30);

	normalEstimate.setInputCloud(points.makeShared());
	normalEstimate.compute(*pointsWithNormalSource);
	pcl::copyPointCloud(points, *pointsWithNormalSource);

	normalEstimate.setInputCloud (tempTarget.makeShared());
	normalEstimate.compute(*pointsWithNormalTarget);
	pcl::copyPointCloud (tempTarget, *pointsWithNormalTarget);

	//	Instantiate custom point representation
	MyPointNormal pointNormal;
	//	... and weight the 'curvature' dimension so that it is balanced against x, y, and z
	float alpha[4] = {1.0, 1.0, 1.0, 1.0};
	pointNormal.setRescaleValues(alpha);

	//	Align
	pcl::IterativeClosestPointNonLinear<pcl::PointNormal, pcl::PointNormal> registration;
	registration.setTransformationEpsilon(epsilon);
	//	Set the maximum distance between two correspondences
	registration.setMaxCorrespondenceDistance(maxCorrespondanceDistance);
	//	Set the point representation
	registration.setPointRepresentation (boost::make_shared<const MyPointNormal> (pointNormal));

	registration.setInputSource(pointsWithNormalSource);
	registration.setInputTarget(pointsWithNormalTarget);
	registration.setMaximumIterations(30);

	PCL_ERROR("Source size: %d  --  Target size: %d\n", (int)pointsWithNormalSource.get()->size(), (int)pointsWithNormalTarget.get()->size());

	Eigen::Matrix4f tf = Eigen::Matrix4f::Identity();
	pcl::PointCloud<pcl::PointNormal>::Ptr regResult = pointsWithNormalSource;

	PCL_ERROR("Stitching ... ");
	registration.align(*regResult);
	PCL_ERROR("Done!\n");

	tf = registration.getFinalTransformation().inverse();

//	PCL_ERROR("\nTransform:\n");
//	PCL_ERROR("| %f %f %f %f |\n", tf(0,0), tf(0,1), tf (0,2), tf(0,3));
//	PCL_ERROR("| %f %f %f %f |\n", tf(1,0), tf(1,1), tf (1,2), tf(1,3));
//	PCL_ERROR("| %f %f %f %f |\n", tf(2,0), tf(2,1), tf (2,2), tf(2,3));
//	PCL_ERROR("| %f %f %f %f |\n\n", tf(3,0), tf(3,1), tf (3,2), tf(3,3));

	pcl::transformPointCloud(*pointsWithNormalSource, *regResult, tf);
	*regResult += *pointsWithNormalTarget;

	pcl::copyPointCloud(*regResult, this->stitching);

	return 0;
}
  unsigned int scale_of_noise (const Point_set& points, double& size)
  {
    Tree tree(points.begin(), points.end());
    
    double ratio_kept = (points.size() < 1000)
      ? 1. : 1000. / (points.size());
    
    std::vector<Point> subset;
    for (std::size_t i = 0; i < points.size (); ++ i)
      if (rand() / (double)RAND_MAX < ratio_kept)
    	subset.push_back (points[i]);
    
    std::vector<unsigned int> scales;
    generate_scales (std::back_inserter (scales));

    std::vector<unsigned int> chosen;
    
    for (std::size_t i = 0; i < subset.size (); ++ i)
      {
    	Neighbor_search search(tree, subset[i],scales.back());
	double current = 0.;
    	unsigned int nb = 0;
    	std::size_t index = 0;
	double minimum = (std::numeric_limits<double>::max)();
	unsigned int c = 0;
	
    	for (Search_iterator search_iterator = search.begin();
    	     search_iterator != search.end (); ++ search_iterator, ++ nb)
    	  {
	    current += search_iterator->second;

    	    if (nb + 1 == scales[index])
    	      {
		double score = std::sqrt (current / scales[index])
		  / std::pow (scales[index], 0.375); // NB ^ (5/12)

		if (score < minimum)
		  {
		    minimum = score;
		    c = scales[index];
		  }

    		++ index;
    		if (index == scales.size ())
    		  break;
    	      }
    	  }
	chosen.push_back (c);
      }

    std::sort (chosen.begin (), chosen.end());

    unsigned int noise_scale = chosen[chosen.size() / 2];
    size = 0.;
    for (std::size_t i = 0; i < subset.size (); ++ i)
      {
    	Neighbor_search search(tree, subset[i], noise_scale);
	size += std::sqrt ((-- search.end())->second);
      }
    size /= subset.size();

    
    return noise_scale;
  }
示例#28
0
    void BinomialVanillaEngine<T>::calculate() const {

        DayCounter rfdc  = process_->riskFreeRate()->dayCounter();
        DayCounter divdc = process_->dividendYield()->dayCounter();
        DayCounter voldc = process_->blackVolatility()->dayCounter();
        Calendar volcal = process_->blackVolatility()->calendar();

        Real s0 = process_->stateVariable()->value();
        QL_REQUIRE(s0 > 0.0, "negative or null underlying given");
        Volatility v = process_->blackVolatility()->blackVol(
            arguments_.exercise->lastDate(), s0);
        Date maturityDate = arguments_.exercise->lastDate();
        Rate r = process_->riskFreeRate()->zeroRate(maturityDate,
            rfdc, Continuous, NoFrequency);
        Rate q = process_->dividendYield()->zeroRate(maturityDate,
            divdc, Continuous, NoFrequency);
        Date referenceDate = process_->riskFreeRate()->referenceDate();

        // binomial trees with constant coefficient
        Handle<YieldTermStructure> flatRiskFree(
            boost::shared_ptr<YieldTermStructure>(
                new FlatForward(referenceDate, r, rfdc)));
        Handle<YieldTermStructure> flatDividends(
            boost::shared_ptr<YieldTermStructure>(
                new FlatForward(referenceDate, q, divdc)));
        Handle<BlackVolTermStructure> flatVol(
            boost::shared_ptr<BlackVolTermStructure>(
                new BlackConstantVol(referenceDate, volcal, v, voldc)));

        boost::shared_ptr<PlainVanillaPayoff> payoff =
            boost::dynamic_pointer_cast<PlainVanillaPayoff>(arguments_.payoff);
        QL_REQUIRE(payoff, "non-plain payoff given");

        Time maturity = rfdc.yearFraction(referenceDate, maturityDate);

        boost::shared_ptr<StochasticProcess1D> bs(
                         new GeneralizedBlackScholesProcess(
                                      process_->stateVariable(),
                                      flatDividends, flatRiskFree, flatVol));

        TimeGrid grid(maturity, timeSteps_);

        boost::shared_ptr<T> tree(new T(bs, maturity, timeSteps_,
                                        payoff->strike()));

        boost::shared_ptr<BlackScholesLattice<T> > lattice(
            new BlackScholesLattice<T>(tree, r, maturity, timeSteps_));

        DiscretizedVanillaOption option(arguments_, *process_, grid);

        option.initialize(lattice, maturity);

        // Partial derivatives calculated from various points in the
        // binomial tree (Odegaard)

        // Rollback to third-last step, and get underlying price (s2) &
        // option values (p2) at this point
        option.rollback(grid[2]);
        Array va2(option.values());
        QL_ENSURE(va2.size() == 3, "Expect 3 nodes in grid at second step");
        Real p2h = va2[2]; // high-price
        Real s2 = lattice->underlying(2, 2); // high price

        // Rollback to second-last step, and get option value (p1) at
        // this point
        option.rollback(grid[1]);
        Array va(option.values());
        QL_ENSURE(va.size() == 2, "Expect 2 nodes in grid at first step");
        Real p1 = va[1];

        // Finally, rollback to t=0
        option.rollback(0.0);
        Real p0 = option.presentValue();
        Real s1 = lattice->underlying(1, 1);

        // Calculate partial derivatives
        Real delta0 = (p1-p0)/(s1-s0);   // dp/ds
        Real delta1 = (p2h-p1)/(s2-s1);  // dp/ds

        // Store results
        results_.value = p0;
        results_.delta = delta0;
        results_.gamma = 2.0*(delta1-delta0)/(s2-s0);    //d(delta)/ds
        results_.theta = blackScholesTheta(process_,
                                           results_.value,
                                           results_.delta,
                                           results_.gamma);
    }
bool cloud_cb(data_collection::process_cloud::Request &req,
	      data_collection::process_cloud::Response &res)
{
  //Segment from cloud:
  //May need to tweak segmentation parameters to get just the cluster I'm looking for.
  std::vector<pcl::PointCloud<pcl::PointXYZ>::Ptr> clouds;
  nrg_object_recognition::segmentation seg_srv;

  seg_srv.request.scene = req.in_cloud;
  seg_srv.request.min_x = -.75, seg_srv.request.max_x = .4;
  seg_srv.request.min_y = -5, seg_srv.request.max_y = .5;
  seg_srv.request.min_z = 0.0, seg_srv.request.max_z = 1.15;
  seg_client.call(seg_srv);
  
  //SegmentCloud(req.in_cloud, clouds);
  
  //For parameter tweaking, may be good to write all cluseters to file here for examination in pcd viewer.
  
  pcl::PointCloud<pcl::PointXYZ>::Ptr cluster (new pcl::PointCloud<pcl::PointXYZ>);
  //cluster = clouds.at(0);
  pcl::fromROSMsg(seg_srv.response.clusters.at(0), *cluster);
  //std::cout << "found " << clouds.size() << " clusters.\n"; 
  std::cout << "cluster has " << cluster->height*cluster->width << " points.\n";
  
  //Write raw pcd file (objecName_angle.pcd)
  std::stringstream fileName_ss;
  fileName_ss << "data/" << req.objectName << "_" << req.angle << ".pcd";
  std::cout << "writing raw cloud to file...\n";
  std::cout << fileName_ss.str() << std::endl;
  pcl::io::savePCDFile(fileName_ss.str(), *cluster);
  std::cout << "done.\n";
 
  //Write vfh feature to file: 
  pcl::VFHEstimation<pcl::PointXYZ, pcl::Normal, pcl::VFHSignature308> vfh;
  vfh.setInputCloud (cluster);
  //Estimate normals:
  pcl::NormalEstimation<pcl::PointXYZ, pcl::Normal> ne;
  ne.setInputCloud (cluster);
  pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ> ());
  ne.setSearchMethod (tree);
  pcl::PointCloud<pcl::Normal>::Ptr cloud_normals (new pcl::PointCloud<pcl::Normal>);
  ne.setRadiusSearch (0.03);
  ne.compute (*cloud_normals);
  vfh.setInputNormals (cloud_normals);
  //Estimate vfh:
  vfh.setSearchMethod (tree);
  pcl::PointCloud<pcl::VFHSignature308>::Ptr vfhs (new pcl::PointCloud<pcl::VFHSignature308> ());
  // Compute the feature
  vfh.compute (*vfhs);
  //Write to file: (objectName_angle_vfh.pcd)
  fileName_ss.str("");
  std::cout << "writing vfh descriptor to file...\n";
  fileName_ss << "data/" << req.objectName << "_" << req.angle << "_vfh.pcd";
  pcl::io::savePCDFile(fileName_ss.str(), *vfhs);
  std::cout << "done.\n";

 
  //Extract cph
  std::vector<float> feature;
  CPHEstimation cph(5,72);
  cph.setInputCloud(cluster);
  cph.compute(feature);
  //Write cph to file. (objectName_angle.csv)
  std::ofstream outFile;
  fileName_ss.str("");
  fileName_ss << "data/" << req.objectName << "_" << req.angle << ".csv";
  outFile.open(fileName_ss.str().c_str());
  std::cout << "writing cph descriptor to file...\n";
  for(unsigned int j=0; j<feature.size(); j++){
	outFile << feature.at(j) << " "; 
  }
  outFile.close();
  fileName_ss.str("");
  std::cout << "done.\n";
  res.result = 1;
}
示例#30
-1
    void BinomialBarrierEngine<T,D>::calculate() const {

        DayCounter rfdc  = process_->riskFreeRate()->dayCounter();
        DayCounter divdc = process_->dividendYield()->dayCounter();
        DayCounter voldc = process_->blackVolatility()->dayCounter();
        Calendar volcal = process_->blackVolatility()->calendar();

        Real s0 = process_->stateVariable()->value();
        QL_REQUIRE(s0 > 0.0, "negative or null underlying given");
        Volatility v = process_->blackVolatility()->blackVol(
            arguments_.exercise->lastDate(), s0);
        Date maturityDate = arguments_.exercise->lastDate();
        Rate r = process_->riskFreeRate()->zeroRate(maturityDate,
            rfdc, Continuous, NoFrequency);
        Rate q = process_->dividendYield()->zeroRate(maturityDate,
            divdc, Continuous, NoFrequency);
        Date referenceDate = process_->riskFreeRate()->referenceDate();

        // binomial trees with constant coefficient
        Handle<YieldTermStructure> flatRiskFree(
            boost::shared_ptr<YieldTermStructure>(
                new FlatForward(referenceDate, r, rfdc)));
        Handle<YieldTermStructure> flatDividends(
            boost::shared_ptr<YieldTermStructure>(
                new FlatForward(referenceDate, q, divdc)));
        Handle<BlackVolTermStructure> flatVol(
            boost::shared_ptr<BlackVolTermStructure>(
                new BlackConstantVol(referenceDate, volcal, v, voldc)));

        boost::shared_ptr<StrikedTypePayoff> payoff =
            boost::dynamic_pointer_cast<StrikedTypePayoff>(arguments_.payoff);
        QL_REQUIRE(payoff, "non-striked payoff given");

        Time maturity = rfdc.yearFraction(referenceDate, maturityDate);

        boost::shared_ptr<StochasticProcess1D> bs(
                         new GeneralizedBlackScholesProcess(
                                      process_->stateVariable(),
                                      flatDividends, flatRiskFree, flatVol));

        // correct timesteps to ensure a (local) minimum, using Boyle and Lau
        // approach. See Journal of Derivatives, 1/1994,
        // "Bumping up against the barrier with the binomial method"
        // Note: this approach works only for CoxRossRubinstein lattices, so
        // is disabled if T is not a CoxRossRubinstein or derived from it.
        Size optimum_steps = timeSteps_;
        if (boost::is_base_of<CoxRossRubinstein, T>::value && 
            maxTimeSteps_ > timeSteps_ && s0 > 0 && arguments_.barrier > 0) {
            Real divisor;
            if (s0 > arguments_.barrier)
               divisor = std::pow(std::log(s0 / arguments_.barrier), 2);
            else
               divisor = std::pow(std::log(arguments_.barrier / s0), 2);
            if (!close(divisor,0)) {
                for (Size i=1; i < timeSteps_ ; ++i) {
                    Size optimum = Size(( i*i * v*v * maturity) / divisor);
                    if (timeSteps_ < optimum) {
                        optimum_steps = optimum;
                        break; // found first minimum with iterations>=timesteps
                    }
                }
            }

            if (optimum_steps > maxTimeSteps_) 
               optimum_steps = maxTimeSteps_; // too high, limit
        }

        TimeGrid grid(maturity, optimum_steps);

        boost::shared_ptr<T> tree(new T(bs, maturity, optimum_steps,
                                        payoff->strike()));

        boost::shared_ptr<BlackScholesLattice<T> > lattice(
            new BlackScholesLattice<T>(tree, r, maturity, optimum_steps));

        D option(arguments_, *process_, grid);
        option.initialize(lattice, maturity);

        // Partial derivatives calculated from various points in the
        // binomial tree 
        // (see J.C.Hull, "Options, Futures and other derivatives", 6th edition, pp 397/398)

        // Rollback to third-last step, and get underlying prices (s2) &
        // option values (p2) at this point
        option.rollback(grid[2]);
        Array va2(option.values());
        QL_ENSURE(va2.size() == 3, "Expect 3 nodes in grid at second step");
        Real p2u = va2[2]; // up
        Real p2m = va2[1]; // mid
        Real p2d = va2[0]; // down (low)
        Real s2u = lattice->underlying(2, 2); // up price
        Real s2m = lattice->underlying(2, 1); // middle price
        Real s2d = lattice->underlying(2, 0); // down (low) price

        // calculate gamma by taking the first derivate of the two deltas
        Real delta2u = (p2u - p2m)/(s2u-s2m);
        Real delta2d = (p2m-p2d)/(s2m-s2d);
        Real gamma = (delta2u - delta2d) / ((s2u-s2d)/2);

        // Rollback to second-last step, and get option values (p1) at
        // this point
        option.rollback(grid[1]);
        Array va(option.values());
        QL_ENSURE(va.size() == 2, "Expect 2 nodes in grid at first step");
        Real p1u = va[1];
        Real p1d = va[0];
        Real s1u = lattice->underlying(1, 1); // up (high) price
        Real s1d = lattice->underlying(1, 0); // down (low) price

        Real delta = (p1u - p1d) / (s1u - s1d);

        // Finally, rollback to t=0
        option.rollback(0.0);
        Real p0 = option.presentValue();

        // Store results
        results_.value = p0;
        results_.delta = delta;
        results_.gamma = gamma;
        // theta can be approximated by calculating the numerical derivative
        // between mid value at third-last step and at t0. The underlying price
        // is the same, only time varies.
        results_.theta = (p2m - p0) / grid[2];
    }