Esempio n. 1
0
Geometry* GeometryCombiner::combine(const Geometry* g0, const Geometry* g1)
{
    std::vector<Geometry*> geoms;
    geoms.push_back(const_cast<Geometry*>(g0));
    geoms.push_back(const_cast<Geometry*>(g1));

    GeometryCombiner combiner(geoms);
    return combiner.combine();
}
Esempio n. 2
0
int main() {
	int restoreState = 1;
	
	if (restoreState == 0) {
		generateVertical("");
		
		printf("LeftBlock count: %d\n",leftBlock.size());
		printf("RightBlock count: %d\n",rightBlock.size());
		printf("MidBlock count: %d\n",midBlock.size());
		
		for (int i=0; i<2; i++) {
			printf("Running iteration %d...\n",i);
			combiner(LEFT);
			combiner(RIGHT);
			combiner(MIDDLE);
		}
		
		printf("sorting...\n");
		std::sort(leftBlock.begin(), leftBlock.end());
		std::sort(rightBlock.begin(), rightBlock.end());
		std::sort(midBlock.begin(), midBlock.end());
		
		// Output blocks to file to speed up startup
		saveBlocks();
	} else {
		// Load blocks from last run
		loadBlocks();
		// Load last iteration
		loadRootIt();
	}
	
	printf("\nLeftBlock count: %d\n",leftBlock.size());
	printf("RightBlock count: %d\n",rightBlock.size());
	printf("MidBlock count: %d\n",midBlock.size());
	printf("L: %d  M: %d  R: %d\n",leftWidth,midWidth,rightWidth);
	
	maxLevel = (puzzleWidth-leftWidth-rightWidth)/midWidth + 2;
	printf("Max Level: %d\n",maxLevel);
	solver("",0);
	
	// Close solutions file
	outFile.close();
	return 0;
}
Esempio n. 3
0
		constexpr V fold_reverse (C combiner, V value) const
		{
			auto remainingLength = encoding.length();
			while (remainingLength > 0)
			{
				const auto decoding = decode_reverse(encoding.data(), remainingLength);
				value = combiner(std::move(value), std::get<0>(decoding));
				remainingLength -= std::get<1>(decoding);
			}
			return value;
		}
Esempio n. 4
0
int main(int argc, char *argv[]) {
    int num_procs;
    int c, i, j, msqid;
    int proc_done=0;
    int child_msqids[20];

    if ((c=getopt(argc, argv, "n:")) != -1) {
        if (c == 'n') { 
	    num_procs = atoi(optarg);
	    if (num_procs > 20) {
		//printf("Number of processes should not exceed 20. Setting to 20.\n");
		num_procs = 20;
	    } 
	}
	else num_procs = 1;
	}
    else num_procs = 1;
    

    //Create parser-to-sorters msg queue
    msqid = msgget(IPC_PRIVATE, IPC_CREAT | IPC_EXCL | 0666);

    //Create sorters-to-combiner msg queues and fork the sorter children
    for (i=1;i<num_procs+1;i++) { 
	child_msqids[i] = msgget(IPC_PRIVATE, IPC_CREAT | IPC_EXCL | 0666);

        fork_child(msqid, child_msqids[i], i);
    }

    //Parse while children running
    parser(msqid, num_procs, child_msqids);

    //Combine while children running
    combiner(num_procs, child_msqids);

    //Reap the children
    for (j=1;j<num_procs+1;j++){ 
	if (wait(NULL) != -1) proc_done++;
    }

    // printf("Number of processes done: %d\n", proc_done);
 
    //Combine after the children are done -- nope doesn't work
    //  combiner(num_procs, child_msqids);
 
    //Destroy the parser-to-sorters message queue
     if (proc_done == num_procs){
	 msgctl(msqid, IPC_RMID, NULL);
     }

     exit(EXIT_SUCCESS);
     return 0;
}
Esempio n. 5
0
		constexpr V fold (C combiner, V value) const
		{
			std::size_t index = 0;
			auto remainingLength = encoding.length();
			while (remainingLength > 0)
			{
				const auto decoding = decode(encoding.data() + index);
				value = combiner(std::move(value), std::get<0>(decoding));
				index += std::get<1>(decoding);
				remainingLength -= std::get<1>(decoding);
			}
			return value;
		}
Esempio n. 6
0
		constexpr std::tuple<V, utf8_view, utf8_view> fold_reverse (C combiner, V value) const
		{
			auto keepOn = true;
			auto remainingLength = encoding.length();
			while (remainingLength > 0 and keepOn)
			{
				const auto decoding = decode_reverse(encoding.data(), remainingLength);
				std::tie(value, keepOn) = combiner(std::move(value), std::get<0>(decoding));
				if (keepOn)
				{
					remainingLength -= std::get<1>(decoding);
				}
			}
			const auto unfolded = utf8_view(encoding.data(), remainingLength);
			const auto folded = utf8_view(encoding.data() + remainingLength, encoding.length() - remainingLength);
			return std::make_tuple(std::move(value), unfolded, folded);
		}
int
main ()
{
    pixman_implementation_t *impl;
    argb_t *src_bytes = malloc (WIDTH * sizeof (argb_t));
    argb_t *mask_bytes = malloc (WIDTH * sizeof (argb_t));
    argb_t *dest_bytes = malloc (WIDTH * sizeof (argb_t));
    int i;

    enable_divbyzero_exceptions();
    
    impl = _pixman_internal_only_get_implementation();
    
    prng_srand (0);

    for (i = 0; i < ARRAY_LENGTH (op_list); ++i)
    {
	pixman_op_t op = op_list[i];
	pixman_combine_float_func_t combiner;
	int ca;

	for (ca = 0; ca < 2; ++ca)
	{
	    combiner = lookup_combiner (impl, op, ca);

	    random_floats (src_bytes, WIDTH);
	    random_floats (mask_bytes, WIDTH);
	    random_floats (dest_bytes, WIDTH);

	    combiner (impl, op,
		      (float *)dest_bytes,
		      (float *)mask_bytes,
		      (float *)src_bytes,
		      WIDTH);
	}
    }	

    return 0;
}
int Indicator_kriging::full_ik( Progress_notifier* progress_notifier ) {
  bool ok = true;
  bool order_relation_problems = false;

  // create all the properties we will populate
  std::vector< Grid_continuous_property* > simul_properties;
  for( int thres = 0; thres < thres_count_; thres++ ) {
    Grid_continuous_property* prop = multireal_property_->new_realization();
    prop->set_parameters(parameters_);
    simul_properties.push_back( prop );
  }

  std::vector<double> krig_weights;
  SK_constraints Kconstraints;
  typedef std::vector<double>::const_iterator weight_iterator;
  typedef SK_combiner< weight_iterator, Neighborhood > SKCombiner;

  // the following line could probably be omitted
  simul_grid_->select_property( simul_properties[0]->name() );

  Geostat_grid::iterator begin = simul_grid_->begin();
  Geostat_grid::iterator end = simul_grid_->end();

  // loop over the grid

  for( ; begin != end; ++begin ) {
    if( !progress_notifier->notify() ) return 1;

    if( begin->is_informed() ) continue;
    

    // for each threshold / class:
    Non_parametric_cdf<float>::p_iterator p_it = ccdf_->p_begin();
    for( int thres = 0; thres < thres_count_; thres++, ++p_it ) {
      neighborhoods_vector_[thres]->find_neighbors( *begin );
 
      if( neighborhoods_vector_[thres]->is_empty() ){
        //if we don't have any conditioning data, use the marginal
        *p_it = marginal_probs_[thres];
        continue;
      }
      
      int status = kriging_weights( krig_weights,
	                          			  *begin,
				                            *(neighborhoods_vector_[thres].raw_ptr()),
				                            covar_vector_[thres], Kconstraints );
      
      if( status != 0 ) {
        // the kriging system could not be solved, issue a warning and skip the
        // node
        ok = false;
        *p_it = marginal_probs_[thres];
        continue;
      }
      
      SKCombiner combiner( marginal_probs_[thres] );
      double estimate = combiner( krig_weights.begin(), 
		                          		krig_weights.end(),
			  	                        *(neighborhoods_vector_[thres].raw_ptr()) );
      *p_it = estimate;
    }

    // make sure the ccdf is a valid cdf
    if( !ccdf_->make_valid() ) {
      // there was a problem making the cdf a valid cdf:
      // leave the node un-estimated and set the flag so that an error will
      // be reported
      order_relation_problems = true; 
      continue;
    }

    GsTLInt node_id = begin->node_id();

    // output the ccdf probabilities to the grid properties
    p_it = ccdf_->p_begin();
    for( int thres2 = 0; thres2 < thres_count_; thres2++, ++p_it ) {
  	  simul_properties[ thres2 ]->set_value( *p_it, node_id );
    }
  }


  if( !ok )
    GsTLcerr << "The kriging system could not be solved for every node\n" 
             << gstlIO::end; 
  if( order_relation_problems ) {
    GsTLcerr << "A cdf could not be estimated for all nodes because of major "
             << "order-relation problems (all probabilities < 0 )" 
             << gstlIO::end;
  }

  return 0;
}
int Indicator_kriging::median_ik( Progress_notifier* progress_notifier ) {
  bool ok = true;
  
  // create all the properties we will populate
  std::vector< Grid_continuous_property* > simul_properties;
  for( int thres = 0; thres < thres_count_; thres++ ) {
    Grid_continuous_property* prop = multireal_property_->new_realization();
    prop->set_parameters(parameters_);
    simul_properties.push_back( prop );
  }

  std::vector<double> krig_weights;
  SK_constraints Kconstraints;
  typedef std::vector<double>::const_iterator weight_iterator;
  typedef SK_combiner< weight_iterator, Neighborhood > SKCombiner;

  // the following line could probably be omitted
  simul_grid_->select_property( simul_properties[0]->name() );

  Geostat_grid::iterator begin = simul_grid_->begin();
  Geostat_grid::iterator end = simul_grid_->end();
  
  
  for( ; begin != end; ++begin ) {
    if( !progress_notifier->notify() ) return 1;

    if( begin->is_informed() ) continue;
      
    neighborhood_->find_neighbors( *begin );
//    if( neighborhood_->is_empty() ){
    if( neighborhood_->size() < min_neigh_ ){
      //if we don't have any conditioning data, skip the node
      continue;
    }
    else {
      int status = kriging_weights( krig_weights,
				    *begin,
				    *(neighborhood_.raw_ptr()),
				    covar_, Kconstraints );
      
      if(status == 0) {
	      // the kriging system could be solved
	      // Since we're using the same covariance and the 
	      // same neighborhood for all thresholds, we can re-use the same
	      // weights for all thresholds 

       	GsTLInt node_id = begin->node_id();
      	Non_parametric_cdf<float>::p_iterator p_it = ccdf_->p_begin();

      	for( int thres = 0; thres < thres_count_; thres++, ++p_it ) {

      	  // tell the neighbors to work on the correct property
	        for( Neighborhood::iterator it = neighborhood_->begin();
	             it != neighborhood_->end(); it++ ) {
      	    it->set_property_array( hdata_properties_[ thres ] );
	        }

    	    SKCombiner combiner( marginal_probs_[thres] );
	        double estimate = combiner( krig_weights.begin(), 
				                              krig_weights.end(),
				                              *(neighborhood_.raw_ptr()) );
	        *p_it = estimate;
	      }

      	// make sure the ccdf is a valid cdf
      	ccdf_->make_valid();

      	// output the ccdf probabilities to the grid properties
      	p_it = ccdf_->p_begin();
      	for( int thres2 = 0; thres2 < thres_count_; thres2++, ++p_it ) {
      	  simul_properties[ thres2 ]->set_value( *p_it, node_id );
      	}
      }
      else {
      	// the kriging system could not be solved, issue a warning and skip the
      	// node
        ok = false;
      }
    }
  }

  if( !ok )
    GsTLcerr << "The kriging system could not be solved for every node\n" << gstlIO::end; 


  return 0;
}
Esempio n. 10
0
int main(int argc, char *argv[]){
  int i;
  char c;
  key_t *sortKeys;
  struct sigaction action;
  action.sa_handler = sigHandler;
  num_sort = 1;

  while ((c = getopt (argc, argv, "n:")) != -1)
  {
    switch (c)
    {
    case 'n':
      if(atoi(optarg) < 0 || atoi(optarg) > 50){
        printf("invalid No. of sort processes, defaulting to 1\n");
        num_sort = 1;
      }
      else{
        num_sort = atoi(optarg);
      }
      break;
    default:
      break;
    }
  }

  sigaction(SIGINT, &action, NULL);

  // create memory for key and queue arrays
  sortKeys = malloc(sizeof(key_t)*num_sort);
  assert(sortKeys);
  combiner_queues = malloc(sizeof(int)*num_sort);
  assert(combiner_queues);


  create_parser_queue( /* no args */);

  create_combiner_queues(sortKeys);

  // calling sort before the parser
  // seem to give better results.. i.e. it works
  for(i = 0; i < num_sort; i++){
    sort(sortKeys[i], i+1);
  }

  // create parser
  create_parser(/* no args */);

  // wait for the parser to finish
  wait(NULL);

  combiner(/* no args */);

  // wait for n - sort processes to finish
  for(i = 0; i < num_sort; i++){
    wait(NULL);
  }

  // remove combiner queues
  for(i = 0; i < num_sort; i++){
   msgctl(combiner_queues[i], IPC_RMID, NULL);
  }

  // remove parser queue
  msgctl(message_queue, IPC_RMID, NULL);

  exit(EXIT_SUCCESS);
}
int main(int argc, char **argv) {
  // On successful completion, print out the output file sizes.
  std::vector<std::string> output_files;
  try {
    std::string progname = argv[0];

    // Process commandline options
    int argn;
    bool help = false;
    std::string outdir;
    int index_version = 0;
    int sortbuf = kDefaultSortBufferMegabytes;
    uint32 numcpus = kDefaultNumCPUs;
    uint32 read_cache_max_blocks = kDefaultReadCacheBlocks;
    uint32 read_cache_block_size = kDefaultReadCacheBlockKilobyteSize;

    khGetopt options;
    options.flagOpt("help", help);
    options.flagOpt("?", help);
    options.opt("output", outdir);
    options.opt("indexversion", index_version);
    options.opt("sortbuf", sortbuf);
    options.opt("numcpus", numcpus,
                &khGetopt::RangeValidator<uint32, 1, kMaxNumJobsLimit_2>);
    options.opt("read_cache_max_blocks", read_cache_max_blocks,
                &khGetopt::RangeValidator<uint32, 0, 1024>);
    options.opt("read_cache_block_size", read_cache_block_size,
                &khGetopt::RangeValidator<uint32, 1, 1024>);

    if (!options.processAll(argc, argv, argn)) {
      usage(progname);
    }
    if (help) {
      usage(progname);
    }
    if (argn == argc) {
      usage(progname, "No input indexes specified");
    }

    numcpus = std::min(numcpus, CommandlineNumCPUsDefault());

    // Validate commandline options
    if (!outdir.size()) {
      usage(progname, "No output specified");
    }
    if (index_version <= 0) {
      usage(progname, "Index version not specified or <= 0");
    }
    if (numcpus < 1) {
      usage(progname, "Number of CPUs should not be less than 1");
    }
    if (sortbuf <= 0) {
      notify(NFY_FATAL, "--sortbuf must be > 0, is %d", sortbuf);
    }

    // Create a merge of the terrain indices
    JOBSTATS_BEGIN(job_stats, MERGER_CREATED);    // validate

    // We'll need to limit the number of filebundles opened by the filepool
    // at a single time, to keep from overflowing memory.
    // Allow 50 files for other operations outside the filepool.
    int max_open_fds = GetMaxFds(-50);

    // Read Cache is enabled only if read_cache_max_blocks is > 2.
    if (read_cache_max_blocks < 2) {
      notify(NFY_WARN, "Read caching is disabled. This will cause %s"
                       "to be much slower. To enable, set the "
                       "read_cache_blocks setting\n"
                       "to a number 2 or greater.\n", argv[0]);
    } else {
      // Get the physical memory size to help choose the read_cache_max_blocks.
      uint64 physical_memory_size = GetPhysicalMemorySize();
      if (physical_memory_size == 0) {
        physical_memory_size = kDefaultMinMemoryAssumed;
        notify(NFY_WARN, "Physical Memory available not found. "
               "Assuming min recommended system size: %llu bytes",
               static_cast<long long unsigned int>(physical_memory_size));
      } else {
        notify(NFY_NOTICE, "Physical Memory available: %llu bytes",
               static_cast<long long unsigned int>(physical_memory_size));
      }

      // Convert this read cache block size from kilobytes to bytes.
      read_cache_block_size *= 1024U;

      // Figure out the worst case size of the read cache
      // (if all of max_open_fds are open simultaneously)
      uint64 estimated_read_cache_bytes = max_open_fds *
        static_cast<uint64>(read_cache_max_blocks * read_cache_block_size);
      notify(NFY_NOTICE,
             "Read Cache Settings: %u count %u byte blocks per resource "
             "(max files open set to %u)\n"
             "This will use approximately %llu bytes in memory.",
             read_cache_max_blocks, read_cache_block_size, max_open_fds,
             static_cast<long long unsigned int>(estimated_read_cache_bytes));
      if (estimated_read_cache_bytes > physical_memory_size) {
        // If our worst case read cache blows out our memory, then
        // lower the max_open_fds to bring it to within 90% of the memory.
        // Be careful with overflow here.
        max_open_fds = (physical_memory_size * 90ULL)/
          (100ULL * read_cache_max_blocks * read_cache_block_size);
        notify(NFY_WARN, "The estimated read cache size (%llu bytes) exceeds\n"
                         "the Physical Memory available: %llu bytes.\n"
                         "We are reducing the max files open to %d to eliminate"
                         "memory overruns.\n",
               static_cast<long long unsigned int>(estimated_read_cache_bytes),
               static_cast<long long unsigned int>(physical_memory_size),
               max_open_fds);
      }
    }

    geFilePool file_pool(max_open_fds);
    geterrain::CountedPacketFileReaderPool packet_reader_pool(
        "TerrainReaderPool",
        file_pool);
    // Note: read cache's will not work without at least 2 blocks.
    if (read_cache_max_blocks >= 2) {
      packet_reader_pool.EnableReadCache(read_cache_max_blocks,
                                         read_cache_block_size);
    }

    khDeleteGuard<TerrainMergeType> merger(
        TransferOwnership(new TerrainMergeType("Terrain Merger")));

    // Print the input file sizes for diagnostic log file info.
    std::vector<std::string> input_files;

    fprintf(stderr, "index version: %d\n", index_version);
    for (int i = argn; i < argc; ++i) {
      notify(NFY_INFO, "Opening terrain index: %s", argv[i]);

      merger->AddSource(
          TransferOwnership(
              new TranslatingTerrainTraverser(&packet_reader_pool,
                                              argv[i])));
      input_files.push_back(argv[i]);
    }
    khPrintFileSizes("Input File Sizes", input_files);

    merger->Start();
    JOBSTATS_END(job_stats, MERGER_CREATED);

    // Feed this merge into a QuadsetGather operation
    JOBSTATS_BEGIN(job_stats, GATHERER_CREATED);    // validate

    qtpacket::QuadsetGather<geterrain::TerrainPacketItem>
      gather("TerrainQuadsetGather", TransferOwnership(merger));

    // Create the output packetfile
    geterrain::TerrainCombiner combiner(packet_reader_pool, outdir, numcpus);
    combiner.StartThreads();
    notify(NFY_DEBUG, "started combineterrain");

    // We need to wrap the combiner with a try/catch because otherwise, the
    // exception causes a deconstructor failure which masks the real error
    // which could be a CRC error in one of the terrain packets.
    std::string error_message;
    try {
      do {
        combiner.CombineTerrainPackets(gather.Current());
      } while (gather.Advance());
    } catch (const khAbortedException &e) {
      notify(NFY_FATAL, "Unable to proceed: See previous warnings: %s",
             e.what());
    } catch (const std::exception &e) {
      notify(NFY_FATAL, "%s", e.what());
    } catch (...) {
      notify(NFY_FATAL, "Unknown error");
    }

    notify(NFY_DEBUG, "waiting for compress and write threads to finish");
    combiner.WaitForThreadsToFinish();
    notify(NFY_DEBUG, "closing the gatherer");
    gather.Close();
    JOBSTATS_END(job_stats, GATHERER_CREATED);

    // Finish the packet file
    JOBSTATS_BEGIN(job_stats, COMBINE);    // validate
    notify(NFY_DEBUG, "writing the packet index");
    combiner.Close(static_cast<size_t>(sortbuf) * 1024 * 1024);
    JOBSTATS_END(job_stats, COMBINE);
    // On successful completion, print the output file sizes.
    output_files.push_back(outdir);
  } catch (const khAbortedException &e) {
    notify(NFY_FATAL, "Unable to proceed: See previous warnings");
  } catch (const std::exception &e) {
    notify(NFY_FATAL, "%s", e.what());
  } catch (...) {
    notify(NFY_FATAL, "Unknown error");
  }
  // at the end, call dump all
  JOBSTATS_DUMPALL();

  // On successful completion, print the output file sizes.
  // The print occurs here to allow progress to go out of scope.
  khPrintFileSizes("Output File Sizes", output_files);
  return 0;
}
Esempio n. 12
0
static void readAnimated( imageCache &cache, PngInfo& png ){
	auto width  = png.width();
	auto height = png.height();
	png_uint_32 x_offset=0, y_offset=0;
	png_uint_16 delay_num=0, delay_den=0;
	png_byte dispose_op = PNG_DISPOSE_OP_NONE, blend_op = PNG_BLEND_OP_SOURCE;
	
	QImage canvas( width, height, QImage::Format_ARGB32 );
	canvas.fill( qRgba( 0,0,0,0 ) );
	AnimCombiner combiner( canvas );
	
	if( setjmp( png_jmpbuf( png.png ) ) )
		return;
	
	unsigned repeats = png_get_num_plays( png.png, png.info );
	unsigned frames = png_get_num_frames( png.png, png.info );
	
	//NOTE: We discard the frame if it is not a part of the animation
	if( png_get_first_frame_is_hidden( png.png, png.info ) ){
		readImage( png, width, height );
		--frames; //libpng appears to tell the total amount of images
	}
	
	cache.set_info( frames, true, repeats>0 ? repeats-1 : -1 );
	
	for( unsigned i=0; i < frames; ++i ){
		png_read_frame_head( png.png, png.info );
		
		if( png_get_valid( png.png, png.info, PNG_INFO_fcTL ) ){
			png_get_next_frame_fcTL( png.png, png.info
				,	&width, &height
				,	&x_offset, &y_offset
				,	&delay_num, &delay_den
				,	&dispose_op, &blend_op
				);
		}
		else{
			width  = png.width();
			height = png.height();
		}
		
		readImage( png, width, height, i==0 );
		
		//Calculate delay
		delay_den = delay_den==0 ? 100 : delay_den;
		unsigned delay = std::ceil( (double)delay_num / delay_den * 1000 );
		if( delay == 0 )
			delay = 1; //Fastest speed we support
		
		//Compose and add
		auto blend_mode = blend_op == PNG_BLEND_OP_SOURCE ? BlendMode::OVERLAY : BlendMode::REPLACE;
		auto dispose_mode = [=](){ switch( dispose_op ){
				case PNG_DISPOSE_OP_NONE:       return DisposeMode::NONE;
				case PNG_DISPOSE_OP_BACKGROUND: return DisposeMode::BACKGROUND;
				case PNG_DISPOSE_OP_PREVIOUS:   return DisposeMode::REVERT;
				default: return DisposeMode::NONE; //TODO: add error
			} }();
		QImage output = combiner.combine( png.frame, x_offset, y_offset, blend_mode, dispose_mode );
		cache.add_frame( output, delay );
	}
}
Esempio n. 13
0
//
// case 0: depart-level
//	dep-id => [pid,...] => [{pid,*,*}, ...]
//	host=[auto]
// case 1: pid-level
//	pid, *, *
//	host=[auto]
// case 2: mid-level
//	pid, mid, *
//	host=[auto]
// case 3: host-level
//	pid, mid, *
//	host=ip
// case 4: expand-to-individual-hosts
//	pid,mid,iid as in [0-2] (no case #3)
//	host=[auto]
//
static void handleRequest(QueryParameters& parameters)
{
	// step 1: context
	const char *strContext = parameters.getString("context", "resource");
	int context;
	if (!strcmp(strContext, "business")) {
		context = CT_BUSINESS;
	}
	else if (!strcmp(strContext, "resource")) {
		context = CT_RESOURCE;
	}
	else {
		outputError(501, "Invalid context parameter");
		return;
	}

	// step 2: group - how to combine stats together
	int totalView = 0;
	const char *strGroup = parameters.getString("group", "total");
	if (!strcmp(strGroup, "total")) {
		totalView = 1;
	}
	else if (!strcmp(strGroup, "list")) {
		totalView = 0;
	}
	else {
		outputError(501, "invalid group parameter, which should be total|list.");
		return;
	}
	
	// step 3: time period, span, align
	int64_t startDtime, endDtime;
	int spanUnit, spanCount;
	if (parseDtimeSpan(parameters, startDtime, endDtime, spanUnit, spanCount) < 0)
		return;

	// move ahead one span for some calculation need its previous stats
	startDtime -= spanLength(spanUnit, spanCount);
	int mergeCount = (endDtime - startDtime) / spanLength(spanUnit, spanCount);

//	char buf1[128], buf2[128];
//	APPLOG_DEBUG("parsed start=%s, end=%s, mergeCount=%d", 
//		formatDtime(buf1, sizeof buf1, startDtime),
//		formatDtime(buf2, sizeof buf2, endDtime), mergeCount);

	StatMerger merger(NULL, NULL, NULL, NULL, spanUnit, spanCount, mergeCount);
	merger.periodStartTime = startDtime;
	
	// step 4: ids
// TODO: group by department...
//	uint16_t did = parameters.getInt("did", 0);
	uint16_t pid = parameters.getInt("pid", 0);
	uint16_t mid = parameters.getInt("mid", 0);

	if (pid == 0) {
		outputError(501, "pid can not be 0(ANY) now");
		return;
	}

	int cpuTotal = 0, cpuCores = 0; std::tr1::unordered_set<int> cpuIds;
	int memory = 0;
	int loadAvg = 0;
	int netAll = 0; std::tr1::unordered_set<int> netIds;
	int diskAll = 0; std::tr1::unordered_set<int> diskIds;

	// step 4.1: parse iids
	const char *strIid = parameters.getString("iid", "all");
	if (strcmp(strIid, "all") == 0) {
		cpuTotal = 1;	// no cpu-cores
		memory = 1;
		loadAvg = 1;
		netAll = 1;
		diskAll = 1;
	}
	else {
		char ss[1024]; 
		strncpy(ss, strIid, sizeof ss); ss[sizeof(ss) - 1] = 0;

		char *endptr, *nptr = strtok_r(ss, MULTIVAL_SEPARATORS, &endptr);
		while (nptr != NULL) {
			if (!strcmp(nptr, "cpu-total")) cpuTotal = 1;
			else if (!strcmp(nptr, "cpu-cores")) cpuCores = 1;
			else if (!strncmp(nptr, "cpu-", 4)) cpuIds.insert(strtol(nptr + 4, NULL, 0));
			else if (!strcmp(nptr, "mem")) memory = 1;
			else if (!strcmp(nptr, "load-avg")) loadAvg = 1;
			else if (!strcmp(nptr, "net-all")) netAll = 1;
			// TODO: mapping net-name to its id
			else if (!strncmp(nptr, "net-", 4)) netIds.insert(strtol(nptr + 4, NULL, 0));
			else if (!strcmp(nptr, "disk-all")) diskAll = 1;
			// TODO: mapping disk-name to its id
			else if (!strncmp(nptr, "disk-", 5)) diskIds.insert(strtol(nptr + 5, NULL, 0));
			else {
				outputError(501, "invalid iid parameter");
				return;
			}

			nptr = strtok_r(NULL, MULTIVAL_SEPARATORS, &endptr);
		}
	}

	// step 4.2: get all possible iids first
	local_key_set_t ids;
	host_set_t hosts;

	// step 4.3: get hosts and mapping iids with hosts
	const char *strHost = parameters.getString("host", "auto");
	if (strcmp(strHost, "auto")) {
		// individual host(s)
		char ss[1024];
		strncpy(ss, strHost, sizeof ss); ss[sizeof(ss) - 1] = 0;

		char *endptr, *nptr = strtok_r(ss, MULTIVAL_SEPARATORS, &endptr);
		while (nptr != NULL) {
			stat_ip_t hip;
			if (inet_pton(AF_INET, nptr, &hip.ip.ip4) == 1) {
				hip.ver = 4; 
			}
			else if (inet_pton(AF_INET6, nptr, &hip.ip.ip6[0]) == 1) {
				hip.ver = 6;
			}
			else {
				outputError(501, "invalid host parameter");
				return;
			}

			hosts.insert(hip);
			nptr = strtok_r(NULL, MULTIVAL_SEPARATORS, &endptr);
		}
	}

	unsigned char buf[8192], rspBuf[8192];
	Memorybuffer msg(buf, sizeof buf, false);
	MemoryBuffer rsp(rspBuf, sizeof rspBuf, false);

	struct proto_h16_head *h = (struct proto_h16_head *)msg.data();
	memset(h, sizeof(*h), 0);
	msg.setWptr(sizeof(*h));

	h->cmd = CMD_STAT_GET_SYSTEM_STATS_REQ;
	h->syn = nextSyn++;
	h->ack = 0;
	h->ver = 1;
	
	msg.writeUint8(context);
	msg.writeUint8(totalView);
	msg.writeInt64(startDtime);
	msg.writeInt64(endDtime);
	msg.writeUint8(spanUnit);
	msg.writeUint8(spanCount);
	msg.writeUint16(pid);
	msg.writeUint16(mid);

	msg.writeUint16(hosts.size());
	for (hosts::iterator iter = hosts.begin(); iter != hosts.end(); ++iter) {
		if (encodeTo(msg, *iter) < 0)
			break;
	}
	
	beyondy::TimedoutCountdown timer(10*1000);
	ClientConnection client(storageAddress, 10*1000, 3);
	if (client.request(&msg, &rsp) < 0) {
		APPLOG_ERROR("request to %s failed: %m", storageAddress);
		return -1;
	}

	struct proto_h16_res *h2 = (struct proto_h16_res *)rsp.data();
	rsp.setRptr(sizeof(*h2));

	if (combiner.parseFrom(&rsp) < 0) {
		APPLOG_ERROR("parse combiner from rsp-msg failed");
		return -1;
	}

	// further merge
	StatCombiner combiner(spanUnit, spanCount, startDtime, mergeCount);
	int gtype = combiner.groupType();

	// output
	printf("Status: 200 OK\r\n");
	printf("Content-Type: application/json\r\n");
	printf("\r\n");

	int64_t spanInterval = spanLength(spanUnit, spanCount);
	int64_t ts = startDtime + spanInterval;
	char buf[128];
	printf("{\"start\":\"%s\"", formatDtime(buf, sizeof buf, ts));
	printf(",\"end\":\"%s\"", formatDtime(buf, sizeof buf, endDtime));
	printf(",\"span\":\"%s\"", formatSpan(buf, sizeof buf, spanUnit, spanCount));
	printf(",\"stats\":[");
	for (int i = 1; i < mergeCount; ++i) {
		
		printf("%s{\"dtime\":\"%s\"", i == 1 ? "" : ",", formatDtime(buf, sizeof buf, ts));
		printf(",\"data\":[");

		bool first = true;
		if (!cpuIds.empty()) {
			outputCpuCombinedGauges(gtype, combiner.mergedGauges[i-1], combiner.mergedGauges[i], first, cpuIds);
		}

		if (memory) {
			outputMemCombinedGauges(gtype, combiner.mergedGauges[i-1], combiner.mergedGauges[i], first);
		}

		if (loadAvg) {
			outputLoadavgCombinedGauges(gtype, combiner.mergedGauges[i-1], combiner.mergedGauges[i], first);
		}

		if (!netIds.empty()) {
			outputNetCombinedGauges(gtype, combiner.mergedGauges[i-1], combiner.mergedGauges[i], first, netIds);
		}

		if (!diskIds.empty()) {
			outputDiskCombinedGauges(gtype, combiner.mergedGauges[i-1], combiner.mergedGauges[i], first, diskIds);
		}

		printf("]}");

		ts += spanInterval;
	}

	printf("]}");
	return;
}
Esempio n. 14
0
int main() {

  const int grid_size = 10;
  // Build grid with locally varying mean
  Cartesian_grid* lvm_grid = new Cartesian_grid( grid_size, grid_size, 1 );
  GsTLGridProperty* prop = lvm_grid->add_property( "trend", typeid( float ) );
  
  for( int i=0; i< grid_size*grid_size/2 ; i++ ) {
    prop->set_value( 0.0, i );
  }
  for( int i=grid_size*grid_size/2; i< grid_size*grid_size ; i++ ) {
    prop->set_value( 10.0, i );
  }
  Colocated_neighborhood* coloc_neigh = 
    dynamic_cast<Colocated_neighborhood*>( 
	  lvm_grid->colocated_neighborhood( "trend" )
       );


  // Build kriging grid 
  Cartesian_grid* krig_grid = new Cartesian_grid( grid_size, grid_size, 1 );
  GsTLGridProperty* krig_prop = 
    krig_grid->add_property( string("krig"), typeid( float )  );
  krig_grid->select_property( "krig");

  // Build harddata grid
  const int pointset_size = 4;
  Point_set* harddata = new Point_set( pointset_size );
  std::vector<GsTLPoint> locations;
  locations.push_back( GsTLPoint( 0,0,0 ) );
  locations.push_back( GsTLPoint( 1,5,0 ) );
  locations.push_back( GsTLPoint( 8,8,0 ) );
  locations.push_back( GsTLPoint( 5,2,0 ) );
  harddata->point_locations( locations );
  GsTLGridProperty* hard_prop = harddata->add_property( "poro" );
  
  for( int i=0; i<pointset_size; i++ ) {
    hard_prop->set_value( i, i );
  }

  harddata->select_property( "poro" );


  // Set up covariance
  Covariance<GsTLPoint> cov;
  cov.nugget(0.1);
  cov.add_structure( "Spherical" );
  cov.sill( 0, 0.9 );
  cov.set_geometry( 0, 10,10,10, 0,0,0 );

  Grid_initializer initializer;
  initializer.assign( krig_grid,
		      krig_prop,
		      harddata,
		      "poro" );
	
  for( int i=0; i< krig_prop->size(); i++ ) {
    if( krig_prop->is_harddata( i ) )
      cout << "value at " << i << ": " 
	   << krig_prop->get_value( i ) << endl;
  }

  krig_grid->select_property( "krig");

  Neighborhood* neighborhood = krig_grid->neighborhood( 20, 20, 1, 0,0,0,
							&cov, true );

  typedef GsTLPoint Location;
  typedef std::vector<double>::const_iterator weight_iterator;
  typedef SKConstraints_impl< Neighborhood, Location > SKConstraints;
  typedef SK_local_mean_combiner<weight_iterator, Neighborhood,
                                 Colocated_neighborhood> LVM_combiner;
  typedef Kriging_constraints< Neighborhood, Location > KrigingConstraints;
  typedef Kriging_combiner< weight_iterator, Neighborhood > KrigingCombiner;


  LVM_combiner combiner( *coloc_neigh );
  SKConstraints constraints;


  // initialize the algo
  Kriging algo;
  algo.simul_grid_ = krig_grid;
  algo.property_name_ = "krig";
  algo.harddata_grid_ = 0;
  algo.neighborhood_ = neighborhood;
  algo.covar_ = cov;
  algo.combiner_ = new KrigingCombiner( &combiner );
  algo.Kconstraints_ = new KrigingConstraints( &constraints );


  // Run and output the results
  algo.execute();

  ofstream ofile( "result.out" );
  if( !ofile ) {
    cerr << "can't create file result.out" << endl;
    return 1;
  }

  GsTLGridProperty* prop1 = krig_grid->select_property( "krig" );
  ofile << "kriging" << endl << "1" << endl << "krig" << endl ;
  for( int i=0; i< prop1->size(); i++ ) {
    if( prop1->is_informed( i ) )
      ofile << prop1->get_value( i ) << endl;
    else
      ofile << "-99" << endl;
  }
}
Esempio n. 15
0
Geometry* GeometryCombiner::combine(std::vector<Geometry*> const& geoms)
{
    GeometryCombiner combiner(geoms);
    return combiner.combine();
}
Esempio n. 16
0
    typename util::detail::algorithm_result<ExPolicy, OutIter>::type
    set_operation(ExPolicy policy,
        RanIter1 first1, RanIter1 last1, RanIter2 first2, RanIter2 last2,
        OutIter dest, F && f, Combiner && combiner, SetOp && setop)
    {
        typedef typename std::iterator_traits<RanIter1>::difference_type
            difference_type1;
        typedef typename std::iterator_traits<RanIter2>::difference_type
            difference_type2;

        // allocate intermediate buffers
        difference_type1 len1 = std::distance(first1, last1);
        difference_type2 len2 = std::distance(first2, last2);

        typedef typename set_operations_buffer<OutIter>::type buffer_type;
        boost::shared_array<buffer_type> buffer(
            new buffer_type[combiner(len1, len2)]);

        typedef typename ExPolicy::executor_type executor_type;
        std::size_t cores = executor_information_traits<executor_type>::
            processing_units_count(policy.executor(), policy.parameters());

        std::size_t step = (len1 + cores - 1) / cores;
        boost::shared_array<set_chunk_data> chunks(new set_chunk_data[cores]);

        // fill the buffer piecewise
        return parallel::util::partitioner<ExPolicy, OutIter, void>::call(
            policy, chunks.get(), cores,
            // first step, is applied to all partitions
            [=](set_chunk_data* curr_chunk, std::size_t part_size)
            {
                HPX_ASSERT(part_size == 1);

                // find start in sequence 1
                std::size_t start1 = (curr_chunk - chunks.get()) * step;
                std::size_t end1 = (std::min)(start1 + step, std::size_t(len1));

                bool first_partition = (start1 == 0);
                bool last_partition = (end1 == std::size_t(len1));

                // all but the last chunk require special handling
                if (!last_partition)
                {
                    // this chunk will be handled by the next one if all
                    // elements of this partition are equal
                    if (!f(first1[start1], first1[end1 + 1]))
                        return;

                    // move backwards to find earliest element which is equal to
                    // the last element of the current chunk
                    while (end1 != 0 && !f(first1[end1 - 1], first1[end1]))
                        --end1;
                }

                // move backwards to find earliest element which is equal to
                // the first element of the current chunk
                while (start1 != 0 && !f(first1[start1 - 1], first1[start1]))
                    --start1;

                // find start and end in sequence 2
                std::size_t start2 = 0;
                if (!first_partition)
                {
                    start2 =
                        std::lower_bound(
                            first2, first2 + len2, first1[start1], f
                        ) - first2;
                }

                std::size_t end2 = len2;
                if (!last_partition)
                {
                    end2 =
                        std::lower_bound(
                            first2 + start2, first2 + len2, first1[end1], f
                        ) - first2;
                }

                // perform requested set-operation into the proper place of the
                // intermediate buffer
                curr_chunk->start = combiner(start1, start2);
                auto buffer_dest = buffer.get() + curr_chunk->start;
                curr_chunk->len =
                    setop(first1 + start1, first1 + end1,
                          first2 + start2, first2 + end2, buffer_dest, f
                    ) - buffer_dest;
            },
            // second step, is executed after all partitions are done running
            [buffer, chunks, cores, dest](std::vector<future<void> >&&) -> OutIter
            {
                // accumulate real length
                set_chunk_data* chunk = chunks.get();
                chunk->start_index = 0;
                for (size_t i = 1; i != cores; ++i)
                {
                    set_chunk_data* curr_chunk = chunk++;
                    chunk->start_index =
                        curr_chunk->start_index + curr_chunk->len;
                }

                // finally, copy data to destination
                parallel::util::foreach_partitioner<
                        hpx::parallel::parallel_execution_policy
                    >::call(par, chunks.get(), cores,
                        [buffer, dest](
                            set_chunk_data* chunk, std::size_t, std::size_t)
                        {
                            std::copy(buffer.get() + chunk->start,
                                buffer.get() + chunk->start + chunk->len,
                                dest + chunk->start_index);
                        },
                        [](set_chunk_data* last) -> set_chunk_data*
                        {
                            return last;
                        });

                return dest;
            });
    }