Exemplo n.º 1
0
void MeshBase::partition (const unsigned int n_parts)
{
  // If we get here and we have unpartitioned elements, we need that
  // fixed.
  if (this->n_unpartitioned_elem() > 0)
    {
      libmesh_assert (partitioner().get());
      libmesh_assert (this->is_serial());
      partitioner()->partition (*this, n_parts);
    }
  // NULL partitioner means don't repartition
  // Non-serial meshes may not be ready for repartitioning here.
  else if(!skip_partitioning() &&
          partitioner().get())
    {
      partitioner()->partition (*this, n_parts);
    }
  else
    {
      // Adaptive coarsening may have "orphaned" nodes on processors
      // whose elements no longer share them.  We need to check for
      // and possibly fix that.
      Partitioner::set_node_processor_ids(*this);

      // Make sure locally cached partition count
      this->recalculate_n_partitions();

      // Make sure any other locally cached data is correct
      this->update_post_partitioning();
    }
}
Exemplo n.º 2
0
void MeshBase::partition (const unsigned int n_parts)
{
  // If we get here and we have unpartitioned elements, we need that
  // fixed.
  if (this->n_unpartitioned_elem() > 0)
    {
      libmesh_assert (partitioner().get());
      libmesh_assert (this->is_serial());
      partitioner()->partition (*this, n_parts);
    }
  // A nullptr partitioner or a skip_partitioning(true) call or a
  // skip_noncritical_partitioning(true) call means don't repartition;
  // skip_noncritical_partitioning() checks all these.
  else if (!skip_noncritical_partitioning())
    {
      partitioner()->partition (*this, n_parts);
    }
  else
    {
      // Adaptive coarsening may have "orphaned" nodes on processors
      // whose elements no longer share them.  We need to check for
      // and possibly fix that.
      MeshTools::correct_node_proc_ids(*this);

      // Make sure locally cached partition count is correct
      this->recalculate_n_partitions();

      // Make sure any other locally cached data is correct
      this->update_post_partitioning();
    }
}
Exemplo n.º 3
0
void MeshBase::partition (const unsigned int n_parts)
{
  // NULL partitioner means don't partition
  // Non-serial meshes aren't ready for partitioning yet.
  if(!skip_partitioning() &&
     partitioner().get() &&
     this->is_serial())
    {
      partitioner()->partition (*this, n_parts);
    }
  else
    {
      // Make sure locally cached partition count
      this->recalculate_n_partitions();

      // Make sure any other locally cached data is correct
      this->update_post_partitioning();
    }
}
Exemplo n.º 4
0
void AssemblyTree::rebuild_assembly_tree()
{
    // Clear the current tree.
    clear();
    m_assembly_instances.clear();

    Statistics statistics;

    // Collect all assembly instances of the scene.
    AABBVector assembly_instance_bboxes;
    collect_assembly_instances(assembly_instance_bboxes);

    RENDERER_LOG_INFO(
        "building assembly tree (%s %s)...",
        pretty_int(m_assembly_instances.size()).c_str(),
        plural(m_assembly_instances.size(), "assembly instance").c_str());

    // Create the partitioner.
    typedef bvh::SAHPartitioner<AABBVector> Partitioner;
    Partitioner partitioner(
        assembly_instance_bboxes,
        AssemblyTreeMaxLeafSize,
        AssemblyTreeInteriorNodeTraversalCost,
        AssemblyTreeTriangleIntersectionCost);

    // Build the assembly tree.
    typedef bvh::Builder<AssemblyTree, Partitioner> Builder;
    Builder builder;
    builder.build<DefaultWallclockTimer>(*this, partitioner, m_assembly_instances.size(), AssemblyTreeMaxLeafSize);
    statistics.insert_time("build time", builder.get_build_time());
    statistics.merge(bvh::TreeStatistics<AssemblyTree>(*this, AABB3d(m_scene.compute_bbox())));

    if (!m_assembly_instances.empty())
    {
        const vector<size_t>& ordering = partitioner.get_item_ordering();
        assert(m_assembly_instances.size() == ordering.size());

        // Reorder the assembly instances according to the tree ordering.
        vector<const AssemblyInstance*> temp_assembly_instances(ordering.size());
        small_item_reorder(
            &m_assembly_instances[0],
            &temp_assembly_instances[0],
            &ordering[0],
            ordering.size());

        // Store assembly instances in the tree leaves whenever possible.
        store_assembly_instances_in_leaves(statistics);
    }

    // Print assembly tree statistics.
    RENDERER_LOG_DEBUG("%s",
        StatisticsVector::make(
            "assembly tree statistics",
            statistics).to_string().c_str());
}
Exemplo n.º 5
0
int
main(int argc, const char* const* argv)
{
	// parse arguments
	int argi = 1;
	for (; argi < argc; argi++) {
		const char* arg = argv[argi];
		if (arg[0] == '-') {
			if (arg[1] == '-') {
				// a double '-' option
				if (strcmp(arg, "--help") == 0) {
					print_usage_and_exit(false);
				} else
					print_usage_and_exit(true);
			} else {
				// single '-' options
				for (int i = 1; arg[i] != '\0'; i++) {
					switch (arg[i]) {
						case 'h':
							print_usage_and_exit(false);
						default:
							print_usage_and_exit(true);
					}
				}
			}
		} else
			break;
	}

	// only the device path should remain
	if (argi != argc - 1) 
		print_usage_and_exit(true);
	const char* devicePath = argv[argi];

	// get the disk device
	BDiskDeviceRoster roster;
	BDiskDevice device;
	status_t error = roster.GetDeviceForPath(devicePath, &device);
	if (error != B_OK) {
		fprintf(stderr, "Error: Failed to get disk device for path \"%s\": "
			"%s\n", devicePath, strerror(error));
	}

	Partitioner partitioner(&device);
	partitioner.Run();

	return 0;
}
Exemplo n.º 6
0
void CurveTree::build_bvh(
    const ParamArray&       params,
    const double            time,
    Statistics&             statistics)
{
    // Collect curves for this tree.
    RENDERER_LOG_INFO(
        "collecting geometry for curve tree #" FMT_UNIQUE_ID " from assembly \"%s\"...",
        m_arguments.m_curve_tree_uid,
        m_arguments.m_assembly.get_path().c_str());
    vector<GAABB3> curve_bboxes;
    collect_curves(curve_bboxes);

    // Print statistics about the input geometry.
    RENDERER_LOG_INFO(
        "building curve tree #" FMT_UNIQUE_ID " (bvh, %s %s)...",
        m_arguments.m_curve_tree_uid,
        pretty_uint(m_curve_keys.size()).c_str(),
        plural(m_curve_keys.size(), "curve").c_str());

    // Create the partitioner.
    typedef bvh::SAHPartitioner<vector<GAABB3> > Partitioner;
    Partitioner partitioner(
        curve_bboxes,
        CurveTreeDefaultMaxLeafSize,
        CurveTreeDefaultInteriorNodeTraversalCost,
        CurveTreeDefaultCurveIntersectionCost);

    // Build the tree.
    typedef bvh::Builder<CurveTree, Partitioner> Builder;
    Builder builder;
    builder.build<DefaultWallclockTimer>(
        *this,
        partitioner,
        m_curves1.size() + m_curves3.size(),
        CurveTreeDefaultMaxLeafSize);
    statistics.merge(
        bvh::TreeStatistics<CurveTree>(*this, m_arguments.m_bbox));

    // Reorder the curve keys based on the nodes ordering.
    if (!m_curves1.empty() || !m_curves3.empty())
    {
        const vector<size_t>& ordering = partitioner.get_item_ordering();
        reorder_curve_keys(ordering);
        reorder_curves(ordering);
        reorder_curve_keys_in_leaf_nodes();
    }
}
void LogMapper::send_bucket_partition_presort(
  Bucket* bucket,
  storage::StorageType storage_type,
  storage::PartitionId partition) {
  storage::Partitioner partitioner(engine_, bucket->storage_id_);

  char* io_base = reinterpret_cast<char*>(io_buffer_.get_block());
  presort_ouputs_.assure_capacity(sizeof(BufferPosition) * bucket->counts_);
  BufferPosition* outputs = reinterpret_cast<BufferPosition*>(presort_ouputs_.get_block());

  uint32_t shortest_key_length = 0xFFFF;
  uint32_t longest_key_length = 0;
  if (storage_type == storage::kMasstreeStorage) {
    for (uint32_t i = 0; i < bucket->counts_; ++i) {
      uint64_t pos = from_buffer_position(bucket->log_positions_[i]);
      const log::LogHeader* header = reinterpret_cast<const log::LogHeader*>(io_base + pos);
      update_key_lengthes(header, storage_type, &shortest_key_length, &longest_key_length);
    }
  }

  LogBuffer buffer(io_base);
  uint32_t count = 0;
  storage::Partitioner::SortBatchArguments args = {
    buffer,
    bucket->log_positions_,
    bucket->counts_,
    shortest_key_length,
    longest_key_length,
    &presort_buffer_,
    parent_.get_base_epoch(),
    outputs,
    &count};
  partitioner.sort_batch(args);
  ASSERT_ND(count <= bucket->counts_);
  bucket->counts_ = count;  // it might be compacted

  // then same as usual send_bucket_partition() except we use outputs
  send_bucket_partition_general(bucket, storage_type, partition, outputs);
}
Exemplo n.º 8
0
void peano::kernel::regulargrid::parallel::tests::SetupPartitionerTest::test2D_400x400ForkMessages() {
  #ifdef Dim2
  tarch::la::Vector<DIMENSIONS,int> domain;
  assignList(domain) = 401,401;
  peano::kernel::regulargrid::parallel::SetupPartitioner partitioner(domain,2);

  partitioner._ranks.push_back(0);
  partitioner._ranks.push_back(1);

  tarch::la::Vector<DIMENSIONS,int>     partition;
  tarch::la::Vector<DIMENSIONS,double>  domainOffset;
  tarch::la::Vector<DIMENSIONS,double>  h;

  assignList(domainOffset) =  0.0,  0.0;
  assignList(h)            =  1.0/400.0,  1.0/400.0;

  assignList(partition) = 0,0;
  peano::kernel::regulargrid::parallel::messages::ForkMessage message00 =
    partitioner.getForkMessage( partition, domainOffset, h );
  validateEquals( message00.getH(), h );
  validateEquals( message00.getNumberOfGridPoints()(0), 201 );
  validateEquals( message00.getNumberOfGridPoints()(1), 401 );
  validateEquals( message00.getDomainOffset()(0), 0.0);
  validateEquals( message00.getDomainOffset()(1), 0.0);

  assignList(partition) = 1,0;
  peano::kernel::regulargrid::parallel::messages::ForkMessage message10 =
    partitioner.getForkMessage( partition, domainOffset, h );
  validateEquals( message10.getH(), h );
  validateEquals( message10.getNumberOfGridPoints()(0), 201 );
  validateEquals( message10.getNumberOfGridPoints()(1), 401 );
  validateEquals( partitioner.getOffsetOfPartition(partition)(0), 200);
  validateEquals( partitioner.getOffsetOfPartition(partition)(1), 0);
  validateEquals( message10.getDomainOffset()(1), 0.0);
  validateEquals( message10.getDomainOffset()(0), 0.5);
  validateEquals( message10.getDomainOffset()(1), 0.0);

  #endif
}
void LogMapper::flush_bucket(const BucketHashList& hashlist) {
  ASSERT_ND(hashlist.head_);
  ASSERT_ND(hashlist.tail_);
  // temporary variables to store partitioning results
  BufferPosition* position_array = reinterpret_cast<BufferPosition*>(
    tmp_position_array_slice_.get_block());
  PartitionSortEntry* sort_array = reinterpret_cast<PartitionSortEntry*>(
    tmp_sort_array_slice_.get_block());
  storage::PartitionId* partition_array = reinterpret_cast<storage::PartitionId*>(
    tmp_partition_array_slice_.get_block());
  LogBuffer log_buffer(reinterpret_cast<char*>(io_buffer_.get_block()));
  const bool multi_partitions = engine_->get_options().thread_.group_count_ > 1U;

  if (!engine_->get_storage_manager()->get_storage(hashlist.storage_id_)->exists()) {
    // We ignore such logs in snapshot. As DROP STORAGE immediately becomes durable,
    // There is no point to collect logs for the storage.
    LOG(INFO) << "These logs are sent to a dropped storage.. ignore them";
    return;
  }

  uint64_t log_count = 0;  // just for reporting
  debugging::StopWatch stop_watch;
  for (Bucket* bucket = hashlist.head_; bucket != nullptr; bucket = bucket->next_bucket_) {
    ASSERT_ND(bucket->counts_ > 0);
    ASSERT_ND(bucket->counts_ <= kBucketMaxCount);
    ASSERT_ND(bucket->storage_id_ == hashlist.storage_id_);
    log_count += bucket->counts_;

    // if there are multiple partitions, we first partition log entries.
    if (multi_partitions) {
      storage::Partitioner partitioner(engine_, bucket->storage_id_);
      ASSERT_ND(partitioner.is_valid());
      if (partitioner.is_partitionable()) {
        // calculate partitions
        for (uint32_t i = 0; i < bucket->counts_; ++i) {
          position_array[i] = bucket->log_positions_[i];
          ASSERT_ND(log_buffer.resolve(position_array[i])->header_.storage_id_
            == bucket->storage_id_);
          ASSERT_ND(log_buffer.resolve(position_array[i])->header_.storage_id_
            == hashlist.storage_id_);
        }
        storage::Partitioner::PartitionBatchArguments args = {
          static_cast< storage::PartitionId >(numa_node_),
          log_buffer,
          position_array,
          bucket->counts_,
          partition_array};
        partitioner.partition_batch(args);

        // sort the log positions by the calculated partitions
        std::memset(sort_array, 0, sizeof(PartitionSortEntry) * bucket->counts_);
        for (uint32_t i = 0; i < bucket->counts_; ++i) {
          sort_array[i].set(partition_array[i], bucket->log_positions_[i]);
        }
        std::sort(sort_array, sort_array + bucket->counts_);

        // let's reuse the current bucket as a temporary memory to hold sorted entries.
        // buckets are discarded after the flushing, so this doesn't cause any issue.
        const uint32_t original_count = bucket->counts_;
        storage::PartitionId current_partition = sort_array[0].partition_;
        bucket->log_positions_[0] = sort_array[0].position_;
        bucket->counts_ = 1;
        for (uint32_t i = 1; i < original_count; ++i) {
          if (current_partition == sort_array[i].partition_) {
            bucket->log_positions_[bucket->counts_] = sort_array[i].position_;
            ++bucket->counts_;
            ASSERT_ND(bucket->counts_ <= original_count);
          } else {
            // the current partition has ended.
            // let's send out these log entries to this partition
            send_bucket_partition(bucket, current_partition);
            // this is the beginning of next partition
            current_partition = sort_array[i].partition_;
            bucket->log_positions_[0] = sort_array[i].position_;
            bucket->counts_ = 1;
          }
        }

        ASSERT_ND(bucket->counts_ > 0);
        // send out the last partition
        send_bucket_partition(bucket, current_partition);
      } else {
        // in this case, it's same as single partition regarding this storage.
        send_bucket_partition(bucket, 0);
      }
    } else {
      // if it's not multi-partition, we blindly send everything to partition-0 (NUMA node 0)
      send_bucket_partition(bucket, 0);
    }
  }

  stop_watch.stop();
  LOG(INFO) << to_string() << " sent out " << log_count << " log entries for storage-"
    << hashlist.storage_id_ << " in " << stop_watch.elapsed_ms() << " milliseconds";
}
Exemplo n.º 10
0
int main(int argc, char **argv)
{
    int k, i, **fd_p_s, j, mergenow, mergenext, ***mergfd, n, num_to_wait = 0, len, in, out, count, carry, gener = 1;
    char *s, key[3] = "0";
    FILE *f = fopen("psort.log", "w");
    freopen("out.txt", "w", stdout);
    fclose(f);
    setbuf(stdout, NULL);
    semid = semget(IPC_PRIVATE, 1, 0);
    semctl(semid, 0, SETVAL, (int)1);
    k = atoi(argv[1]);// number of processes
    if (argc == 3) // check if inputed key "-n"
        strncpy(key, argv[2], 2);
    mergfd = (int ***)malloc(1 * sizeof(int**)); //two arrays
    mergfd[0] = (int**)malloc(k * sizeof(int*));
    fd_p_s = (int**)malloc(k * sizeof(int*)); // fd from partitioner to sorter
    s = (char*)malloc(1* sizeof(char));
    for (i = 0; i < k; ++i) {
        fd_p_s[i] = (int*)malloc(2 * sizeof(int));
        mergfd[0][i] = (int*)malloc(2 * sizeof(int));
        pipe(fd_p_s[i]);
    }
    if (!fork())
        partitioner(fd_p_s, k);
    for (i = 0; i < k; ++i)
        close(fd_p_s[i][1]);
    for (i = 0; i < k; ++i){
        pipe(mergfd[0][i]);
        if (!fork())
            sorter(i, fd_p_s, k, key, mergfd[0][i][1]); //sort strings and sents it's part to mergfd[0][i][1]
    }
    num_to_wait = k + 1; //partitioner + k sorters
    for (i = 0; i < k; ++i)
        close(fd_p_s[i][0]);
    //start to merge in turn,
    mergenow = k;
    while (mergenow != 1){
        ++gener;
        mergenext = mergenow/2 + mergenow%2;
        mergfd = (int ***)realloc(mergfd, gener * sizeof(int**));
        mergfd[gener - 1] = (int**)malloc(mergenext * sizeof(int*));
        for (i = 0; i < mergenext; ++i){
            mergfd[gener - 1][i] = (int*)malloc(2 * sizeof(int));
            pipe(mergfd[gener - 1][i]);
        }
        count = 0;
        while (count < mergenow) {
            if (count + 1 == mergenow) { // not even number of mergers on this iteration; reached last one without pair
                if (!fork()){
                    merger1(mergfd[gener - 2][count][0], mergfd[gener - 1][count / 2][1], key, count/2, gener);
                    return 0;
                }
            }
            else {
                if (!fork()){
                    merger2(mergfd[gener - 2][count][0], mergfd[gener - 2][count + 1][0], mergfd[gener - 1][count / 2][1], key, count/2, gener);
                    return 0;
                }
            }
            count +=2;
            ++num_to_wait;
        }
        for(i = 0; i < mergenow; ++i)
            close(mergfd[gener - 2][i][0]);
        for(i = 0; i < mergenext; ++i)
            close(mergfd[gener - 1][i][1]);
        mergenow = mergenext;
    }
    my_read(mergfd[gener - 1][0][0], &n, sizeof(int));
    for (i = 0; i < n; ++i){
        my_read(mergfd[gener - 1][0][0], &len, sizeof(len));
        s = (char*)realloc(s, len * sizeof(char));
        my_read(mergfd[gener - 1][0][0], s, len);
        fputs(s, stdout);
        printf("\n");
    }
    for (i = 0; i < num_to_wait; ++i)
        wait(NULL);
    free(s);
    return 0;
}
Exemplo n.º 11
0
int main( int argc, char *argv[] )
{
//    try {

        time_t programStartTime(time(NULL));
        boost::filesystem::path workingDir( boost::filesystem::current_path());


        // ========== PROGRAM PARAMETERS ==========

        std::string progName("partitiontree");
        std::string configFilename("../../config/"+progName+".cfg");
        unsigned int threads(0), levelDepth(3), filterRadius(0);
        bool verbose(false), niftiMode( true );

        // program parameters
        std::string treeFilename, outputFolder;

        // Declare a group of options that will be allowed only on command line
        boost::program_options::options_description genericOptions("Generic options");
        genericOptions.add_options()
                ( "version", "Program version" )
                ( "help,h", "Produce extended program help message" )
                ( "tree,t",  boost::program_options::value< std::string >(&treeFilename), "file with the tree to compute partitions from")
                ( "outputf,O",  boost::program_options::value< std::string >(&outputFolder), "output folder where partition files will be written")
                ( "search-depth,d", boost::program_options::value< unsigned int >(&levelDepth)->implicit_value(3), "[opt] optimal partition search depth (default = 3)")
                ( "filter-radius,r", boost::program_options::value< unsigned int >(&filterRadius)->implicit_value(0), "[opt] output partition filter kernel radius (default = 0 | no filtering)")
                ( "hoz", "[opt] obtain horizontal cut partitions (instead of Spread-Separation ones)")
                ( "maxgran,m", "[opt] obtain only the maximum granularity partition")
                ;

        // Declare a group of options that will be allowed both on command line and in config file
        boost::program_options::options_description configOptions("Configuration");
        configOptions.add_options()
                ( "verbose,v", "[opt] verbose output." )
                ( "vista", "[opt] use vista file format (default is nifti)." )
                ( "pthreads,p",  boost::program_options::value< unsigned int >(&threads), "[opt] number of processing threads to run the program in parallel, default: all available")
                ;

        // Hidden options, will be allowed both on command line and in config file, but will not be shown to the user.
        boost::program_options::options_description hiddenOptions("Hidden options");
        //hiddenOptions.add_options() ;

        boost::program_options::options_description cmdlineOptions;
        cmdlineOptions.add(genericOptions).add(configOptions).add(hiddenOptions);
        boost::program_options::options_description configFileOptions;
        configFileOptions.add(configOptions).add(hiddenOptions);
        boost::program_options::options_description visibleOptions("Allowed options");
        visibleOptions.add(genericOptions).add(configOptions);
        boost::program_options::positional_options_description posOpt; //this arguments do not need to specify the option descriptor when typed in
        //posOpt.add("roi-file", -1);

        boost::program_options::variables_map variableMap;
        store(boost::program_options::command_line_parser(argc, argv).options(cmdlineOptions).positional(posOpt).run(), variableMap);

        std::ifstream ifs(configFilename.c_str());
        store(parse_config_file(ifs, configFileOptions), variableMap);
        notify(variableMap);


        if (variableMap.count("help"))
        {
            std::cout << "---------------------------------------------------------------------------" << std::endl;
            std::cout << std::endl;
            std::cout << " Project: hClustering" << std::endl;
            std::cout << std::endl;
            std::cout << " Whole-Brain Connectivity-Based Hierarchical Parcellation Project" << std::endl;
            std::cout << " David Moreno-Dominguez" << std::endl;
            std::cout << " [email protected]" << std::endl;
            std::cout << " [email protected]" << std::endl;
            std::cout << " www.cbs.mpg.de/~moreno" << std::endl;
            std::cout << std::endl;
            std::cout << " For more reference on the underlying algorithm and research they have been used for refer to:" << std::endl;
            std::cout << " - Moreno-Dominguez, D., Anwander, A., & Knösche, T. R. (2014)." << std::endl;
            std::cout << "   A hierarchical method for whole-brain connectivity-based parcellation." << std::endl;
            std::cout << "   Human Brain Mapping, 35(10), 5000-5025. doi: http://dx.doi.org/10.1002/hbm.22528" << std::endl;
            std::cout << " - Moreno-Dominguez, D. (2014)." << std::endl;
            std::cout << "   Whole-brain cortical parcellation: A hierarchical method based on dMRI tractography." << std::endl;
            std::cout << "   PhD Thesis, Max Planck Institute for Human Cognitive and Brain Sciences, Leipzig." << std::endl;
            std::cout << "   ISBN 978-3-941504-45-5" << std::endl;
            std::cout << std::endl;
            std::cout << " hClustering is free software: you can redistribute it and/or modify" << std::endl;
            std::cout << " it under the terms of the GNU Lesser General Public License as published by" << std::endl;
            std::cout << " the Free Software Foundation, either version 3 of the License, or" << std::endl;
            std::cout << " (at your option) any later version." << std::endl;
            std::cout << " http://creativecommons.org/licenses/by-nc/3.0" << std::endl;
            std::cout << std::endl;
            std::cout << " hClustering is distributed in the hope that it will be useful," << std::endl;
            std::cout << " but WITHOUT ANY WARRANTY; without even the implied warranty of" << std::endl;
            std::cout << " MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the" << std::endl;
            std::cout << " GNU Lesser General Public License for more details." << std::endl;
            std::cout << std::endl;
            std::cout << "---------------------------------------------------------------------------" << std::endl << std::endl;
            std::cout << "partitiontree" << std::endl << std::endl;
            std::cout << "Obtain tree partitions at all granularity levels using the Spread-Separation method (finding the the partition with highest SS index at each granularity)." << std::endl;
            std::cout << " Optimal SS value for each partition is searched within a defined search-depth hierarchical levels. Final partitions can be filtered with a defined kernel size." << std::endl;
            std::cout << " to keep local SS maxima within that kernel. For SS index refer to (Moreno-Dominguez, 2014)" << std::endl;
            std::cout << " For an interactive 3D partition management with more options please use the Hierarchcial Clustering module developed in OpenWalnut (www.openwalnut.org)." << std::endl << std::endl;
            std::cout << "* Arguments:" << std::endl << std::endl;
            std::cout << " --version:       Program version." << std::endl << std::endl;
            std::cout << " -h --help:       produce extended program help message." << std::endl << std::endl;
            std::cout << " -t --tree:       File with the hierarchical tree to extract partitions from." << std::endl << std::endl;
            std::cout << " -O --outputf:    Output folder where partition files will be written." << std::endl << std::endl;
            std::cout << "[-d --search-depth]:  Search optimal partition for each granularity within d hierarchical levels." << std::endl;
            std::cout << "                       A higher value will produce more optimized partition but will increase computing time." << std::endl;
            std::cout << "                       Default: 3. Recommendened values: 3 for good quality and fast computation, 4 for enhanced quality." << std::endl << std::endl;
            std::cout << "[-r --filter-radius]: Filter output partitions to keep only local SS (partition quality) maxima" << std::endl;
            std::cout << "                       within a r-sized kernel across the granularity dimension." << std::endl << std::endl;
            std::cout << "[-h --hoz]:       Write horizontal cut partitions instead of SS ones (optimal partition search is still based on SS index)." << std::endl << std::endl;
            std::cout << "[-m --maxgran]:   Compute and write only the maximum granularity (meta-leaves) partition." << std::endl << std::endl;
            std::cout << "[-v --verbose]:   verbose output (recommended)." << std::endl << std::endl;
            std::cout << "[--vista]: 	    write output tree in vista coordinates (default is nifti)." << std::endl << std::endl;
            std::cout << "[-p --pthreads]:  number of processing threads to run the program in parallel. Default: use all available processors." << std::endl << std::endl;
            std::cout << std::endl;
            std::cout << "* Usage example:" << std::endl << std::endl;
            std::cout << " partitiontree -t tree_lh.txt -O results/ -d 3 -r 50 -v" << std::endl << std::endl;
            std::cout << std::endl;
            std::cout << "* Outputs (in output folder defined at option -O):" << std::endl << std::endl;
            std::cout << " (default outputs)" << std::endl;
            std::cout << " - 'allSSparts_dX.txt' - (where X is the search depth level defined at parameter -d) Contains a summary of the partition information (cut value and size) for all granularities." << std::endl;
            std::cout << " - 'TREE_SSparts_dX.txt' - (where TREE is the filename of the input tree defined at parameter -t) contains a copy of the original tree file with the partitions at all granularities included in the relevant fields." << std::endl;
            std::cout << " - 'partitiontree_log.txt' - A text log file containing the parameter details and in-run and completion information of the program." << std::endl;
            std::cout << std::endl;
            std::cout << " (additional if using option -r)" << std::endl;
            std::cout << " - 'filtSSparts_dX_rY.txt' - (where Y is the filter radius defined at parameter -r) Contains a summary of the resulting filtered partitions." << std::endl;
            std::cout << " - 'TREE_SSparts_dX_rY.txt' - contains a copy of the original tree file with the resulting filtered partitions included in the relevant fields." << std::endl;
            std::cout << std::endl;
            std::cout << " (when using --hoz option, the prefix 'SS' will be replaced by 'Hoz'')" << std::endl;
            std::cout << std::endl;
            std::cout << " (alternative outputs when using option --maxgran)" << std::endl;
            std::cout << " - 'fmaxgranPart.txt' - Contains the size information of the resulting maximal granularity partition for that tree." << std::endl;
            std::cout << " - 'TREE_maxgranPart.txt' - contains a copy of the original tree file with the resulting max granularity partition included in the relevant fields." << std::endl;
            std::cout << std::endl;
            exit(0);
        }
        if (variableMap.count("version"))
        {
            std::cout << progName <<", version 2.0"<<std::endl;
            exit(0);
        }
        if (variableMap.count("verbose"))
        {
            std::cout << "verbose output"<<std::endl;
            verbose=true;
        }

        if (variableMap.count("pthreads"))
        {
            if (threads==1)
            {
                std::cout <<"Using a single processor"<< std::endl;
            }
            else if(threads==0 || threads>=omp_get_num_procs())
            {
                threads = omp_get_num_procs();
                std::cout <<"Using all available processors ("<< threads <<")." << std::endl;
            }
            else
            {
                std::cout <<"Using a maximum of "<< threads <<" processors "<< std::endl;
            }
            omp_set_num_threads( threads );
        }
        else
        {
            threads = omp_get_num_procs();
            omp_set_num_threads( threads );
            std::cout <<"Using all available processors ("<< threads <<")." << std::endl;
        }

        if ( variableMap.count( "vista" ) )
        {
            if( verbose )
            {
                std::cout << "Using vista format" << std::endl;
            }
            fileManagerFactory fmf;
            fmf.setVista();
            niftiMode = false;
        }
        else
        {
            if( verbose )
            {
                std::cout << "Using nifti format" << std::endl;
            }
            fileManagerFactory fmf;
            fmf.setNifti();
            niftiMode = true;
        }

        if (variableMap.count("tree"))
        {
            if(!boost::filesystem::is_regular_file(boost::filesystem::path(treeFilename)))
            {
                std::cerr << "ERROR: tree file \""<<treeFilename<<"\" is not a regular file"<<std::endl;
                std::cerr << visibleOptions << std::endl;
                exit(-1);
            }
            std::cout << "Roi voxels file: "<< treeFilename << std::endl;
        }
        else
        {
            std::cerr << "ERROR: no tree file stated"<<std::endl;
            std::cerr << visibleOptions << std::endl;
            exit(-1);
        }


        if (variableMap.count("outputf"))
        {
            if(!boost::filesystem::is_directory(boost::filesystem::path(outputFolder)))
            {
                std::cerr << "ERROR: output folder \""<<outputFolder<<"\" is not a directory"<<std::endl;
                std::cerr << visibleOptions << std::endl;
                exit(-1);

            }
            std::cout << "Output folder: "<< outputFolder << std::endl;
        }
        else
        {
            std::cerr << "ERROR: no output folder stated"<<std::endl;
            std::cerr << visibleOptions << std::endl;
            exit(-1);

        }



        if (variableMap.count("maxgran"))
        {
            std::cout<<"Obtaining only max. granularity partition..."<<std::endl;

            WHtree tree(treeFilename);
            std::cout<<tree.getReport( false )<<std::endl;
            if( tree.testRootBaseNodes() )
            {
                std::vector<size_t > maxpart( tree.getRootBaseNodes() );
                std::vector<std::vector<size_t > > partitionVector( 1, maxpart);
                std::vector<float > partitionValues(1,0);
                std::cout<<"maxgranpart size: "<<std::endl<<maxpart.size()<<std::endl;
                WHtreePartition partitioner(&tree);
                std::string outPartFilename( outputFolder + "/maxgranPart.txt" );
                partitioner.writePartitionSet( outPartFilename, partitionValues,partitionVector);
                tree.insertPartitions( partitionVector, partitionValues );
                std::string outTreeFilename( outputFolder + "/" + tree.getName() + "_maxgranPart" );
                outTreeFilename += ( ".txt" );
                tree.writeTree( outTreeFilename, niftiMode );
                return 0;
            }
            else
            {
                std::cout<<"ERROR: tree  does not have a maximum granularity meta-leaf partition"<<std::endl;
                return(-1);
            }
        }

        if( levelDepth > 5 )
        {
            std::cout << "Level depth indicated: " << levelDepth << " is too high, setting to a maximum of 5" << std::endl;
            levelDepth = 5;
        }
        std::cout << "Using a search depth of: " << levelDepth << std::endl;

        if( filterRadius > 1000 )
        {
            std::cout << "filter radius indicated: " << filterRadius << " is too high (max is 1000), setting to 100" << std::endl;
            filterRadius = 10;
        }
        if( filterRadius == 0 )
        {
            std::cout << "using no filtering (radius 0)" << std::endl;
        }
        else if( filterRadius < 0 )
        {
            std::cout << "filter radius indicated: " << filterRadius << " must be positive. using no filtering (radius 0)" << std::endl;
            filterRadius = 0;
        }
        else
        {
            std::cout << "Using a filter radius of: " << filterRadius << std::endl;
        }

        /////////////////////////////////////////////////////////////////



        std::string logFilename(outputFolder+"/"+progName+"_log.txt");
        std::ofstream logFile(logFilename.c_str());
        if(!logFile) {
            std::cerr << "ERROR: unable to open log file: \""<<logFilename<<"\""<<std::endl;
            exit(-1);
        }
        logFile <<"Start Time:\t"<< ctime(&programStartTime) <<std::endl;
        logFile <<"Working directory:\t"<< workingDir.string() <<std::endl;
        logFile <<"Verbose:\t"<< verbose <<std::endl;
        logFile <<"Tree file:\t"<< treeFilename <<std::endl;
        logFile <<"Output folder:\t"<< outputFolder <<std::endl;
        logFile <<"Verbose:\t"<< verbose <<std::endl;
        if( niftiMode )
        {
            logFile << "Using nifti file format" << std::endl;
        }
        else
        {
            logFile << "Using vista file format" << std::endl;
        }

        WHtree tree(treeFilename);

        logFile << tree.getReport( false ) <<std::endl;
        std::cout<<tree.getReport( false )<<std::endl;

        std::vector< float > partitionValues;
        std::vector< std::vector< size_t> > partitionVector;

        WHtreePartition treePartition(&tree);

        std::string prefix;

        if (variableMap.count("hoz"))
        {
            prefix = "Hoz";
            std::cout <<"getting hoz partitions at all levels..." <<std::endl;
            treePartition.scanHozPartitions( &partitionValues, &partitionVector );

            std::cout << partitionValues.size() << " Partitions obtained, writing to file..." <<std::endl;
            logFile <<"Initial partitions:\t"<< partitionValues.size() <<std::endl;
            std::string outPartFilename( outputFolder + "/all" + prefix + "parts.txt" );
            treePartition.writePartitionSet( outPartFilename, partitionValues, partitionVector);

            tree.insertPartitions( partitionVector, partitionValues );
            std::string outTreeFilename( outputFolder + "/" + tree.getName() + "_" + prefix + "parts_d" + boost::lexical_cast<std::string>(levelDepth) );
            outTreeFilename += ( ".txt" );
            tree.writeTree( outTreeFilename, niftiMode );
        }
        else
        {

            prefix = "SS";
            std::cout <<"getting SS partitions at all levels..." <<std::endl;
            treePartition.scanOptimalPartitions( levelDepth, &partitionValues, &partitionVector );

            std::cout << partitionValues.size() << " Partitions obtained, writing to file..." <<std::endl;
            logFile <<"Initial partitions:\t"<< partitionValues.size() <<std::endl;
            std::string outPartFilename( outputFolder + "/all" + prefix + "parts_d" + boost::lexical_cast<std::string>(levelDepth) + ".txt" );
            treePartition.writePartitionSet( outPartFilename, partitionValues, partitionVector);

            tree.insertPartitions( partitionVector, partitionValues );
            std::string outTreeFilename( outputFolder + "/" + tree.getName() + "_" + prefix + "parts_d" + boost::lexical_cast<std::string>(levelDepth) );
            outTreeFilename += ( ".txt" );
            tree.writeTree( outTreeFilename, niftiMode );

        }


        std::vector < unsigned int > filterRadii;
        //filterRadii.reserve( 6 );
        //        filterRadii.push_back( 1 );
        //        filterRadii.push_back( 2 );
        //        filterRadii.push_back( 5 );
        //        filterRadii.push_back( 10 );
        //        filterRadii.push_back( 15 );
        //        filterRadii.push_back( 20 );
        filterRadii.push_back( filterRadius );



        for(size_t i=0; i< filterRadii.size(); ++i)
        {
            if( filterRadii[i] <= 0 )
            {
                continue;
            }
            std::vector< float > filtPartValues( partitionValues );
            std::vector< std::vector< size_t> > filtPartVector( partitionVector );

            std::cout << "Filtering with a radius of "<< filterRadii[i] << "..." <<std::endl;
            treePartition.filterMaxPartitions( filterRadii[i], &filtPartValues, &filtPartVector );

            std::cout << filtPartValues.size() << " Filtered partitions obtained, writing to file..." <<std::endl;
            logFile <<"Filtered partitions:\t"<< filtPartValues.size() <<std::endl;
            std::string outPartFilename( outputFolder + "/filt" + prefix + "parts_d" + boost::lexical_cast<std::string>(levelDepth) );
            outPartFilename += ( "_r" + boost::lexical_cast<std::string>(filterRadii[i]) +  ".txt" );
            treePartition.writePartitionSet(outPartFilename, filtPartValues, filtPartVector);

            std::cout << "Adding filtered partitions to tree and writing..." <<std::endl;

            std::string outTreeFilename( outputFolder + "/" + tree.getName() + "_" + prefix + "parts_d" + boost::lexical_cast<std::string>(levelDepth) );
            outTreeFilename += ( "_r" + boost::lexical_cast<std::string>(filterRadii[i]) +  ".txt" );

            tree.insertPartitions( filtPartVector, filtPartValues );
            tree.writeTree( outTreeFilename, niftiMode );
        }



        /////////////////////////////////////////////////////////////////


        // save and print total time
        time_t programEndTime(time(NULL));
        int totalTime( difftime(programEndTime,programStartTime) );
        std::cout <<"Program Finished, total time: "<< totalTime/3600 <<"h "<<  (totalTime%3600)/60 <<"' "<< ((totalTime%3600)%60) <<"\"   "<< std::endl;
        logFile <<"-------------"<<std::endl;
        logFile <<"Finish Time:\t"<< ctime(&programEndTime) <<std::endl;
        logFile <<"Elapsed time : "<< totalTime/3600 <<"h "<<  (totalTime%3600)/60 <<"' "<< ((totalTime%3600)%60) <<"\""<< std::endl;


//    }
//    catch(std::exception& e)
//    {
//        std::cout << e.what() << std::endl;
//        return 1;
//    }
    return 0;
}
Exemplo n.º 12
0
ErrorStack LogReducer::dump_buffer_sort_storage(
  const LogBuffer &buffer,
  storage::StorageId storage_id,
  const std::vector<BufferPosition>& log_positions,
  uint32_t* out_shortest_key_length,
  uint32_t* out_longest_key_length,
  uint32_t* written_count) {
  // first, count how many log entries are there. this is quick as we have a statistics
  // in the header.
  uint64_t records = 0;
  for (BufferPosition position : log_positions) {
    FullBlockHeader* header = reinterpret_cast<FullBlockHeader*>(buffer.resolve(position));
    if (!header->is_full_block()) {
      LOG(FATAL) << to_string() << " wtf. magic word doesn't match. position=" << position
        << ", storage_id=" << storage_id << *header;
    }
    header->assert_key_length();
    records += header->log_count_;
  }

  // now we need a memory for this long array. expand the memory if not sufficient.
  uint64_t positions_buffer_size = records * sizeof(BufferPosition);
  expand_positions_buffers_if_needed(positions_buffer_size);
  BufferPosition* inputs = reinterpret_cast<BufferPosition*>(input_positions_slice_.get_block());
  uint64_t cur_rec_total = 0;

  // put all log positions to the array
  uint32_t shortest_key_length = 0xFFFF;
  uint32_t longest_key_length = 0;
  for (BufferPosition position : log_positions) {
    FullBlockHeader* header = reinterpret_cast<FullBlockHeader*>(buffer.resolve(position));
    if (!header->is_full_block()) {
      LOG(FATAL) << to_string() << " wtf. magic word doesn't match. position=" << position
        << ", storage_id=" << storage_id << *header;
    }
    header->assert_key_length();
    shortest_key_length = std::min<uint32_t>(shortest_key_length, header->shortest_key_length_);
    longest_key_length = std::max<uint32_t>(longest_key_length, header->longest_key_length_);
    BufferPosition record_pos = position + to_buffer_position(sizeof(FullBlockHeader));
    for (uint32_t i = 0; i < header->log_count_; ++i) {
      log::RecordLogType* record = buffer.resolve(record_pos);
      ASSERT_ND(record->header_.storage_id_ == storage_id);
      ASSERT_ND(record->header_.log_length_ > 0);
      inputs[cur_rec_total] = record_pos;
      ++cur_rec_total;
      record_pos += to_buffer_position(record->header_.log_length_);
    }
    ASSERT_ND(record_pos == position + header->block_length_);
  }
  ASSERT_ND(cur_rec_total == records);

  // Now, sort these log records by key and then ordinal. we use the partitioner object for this.
  storage::Partitioner partitioner(engine_, storage_id);
  BufferPosition* pos = reinterpret_cast<BufferPosition*>(output_positions_slice_.get_block());
  *written_count = 0;
  storage::Partitioner::SortBatchArguments args = {
    buffer,
    inputs,
    static_cast<uint32_t>(records),
    shortest_key_length,
    longest_key_length,
    &sort_buffer_,
    parent_.get_base_epoch(),
    pos,
    written_count};
  partitioner.sort_batch(args);

  *out_shortest_key_length = shortest_key_length;
  *out_longest_key_length = longest_key_length;
  return kRetOk;
}
Exemplo n.º 13
0
void peano::kernel::regulargrid::parallel::tests::SetupPartitionerTest::test2D_12x8ForkMessages() {
  #ifdef Dim2
  tarch::la::Vector<DIMENSIONS,int> domain;
  assignList(domain) = 12,9;
  peano::kernel::regulargrid::parallel::SetupPartitioner partitioner(domain,9);

  partitioner._ranks.push_back(0);
  partitioner._ranks.push_back(1);
  partitioner._ranks.push_back(2);
  partitioner._ranks.push_back(3);
  partitioner._ranks.push_back(4);
  partitioner._ranks.push_back(5);
  partitioner._ranks.push_back(6);
  partitioner._ranks.push_back(7);
  partitioner._ranks.push_back(8);

  tarch::la::Vector<DIMENSIONS,int>     partition;
  tarch::la::Vector<DIMENSIONS,double>  domainOffset;
  tarch::la::Vector<DIMENSIONS,double>  h;

  assignList(domainOffset) = -2.0, -3.0;
  assignList(h)            =  0.4,  0.5;

  assignList(partition) = 0,0;
  peano::kernel::regulargrid::parallel::messages::ForkMessage message00 =
    partitioner.getForkMessage( partition, domainOffset, h );
  assignList(partition) = 1,0;
  peano::kernel::regulargrid::parallel::messages::ForkMessage message10 =
    partitioner.getForkMessage( partition, domainOffset, h );
  assignList(partition) = 2,0;
  peano::kernel::regulargrid::parallel::messages::ForkMessage message20 =
    partitioner.getForkMessage( partition, domainOffset, h );
  assignList(partition) = 0,1;
  peano::kernel::regulargrid::parallel::messages::ForkMessage message01 =
    partitioner.getForkMessage( partition, domainOffset, h );
  assignList(partition) = 1,1;
  peano::kernel::regulargrid::parallel::messages::ForkMessage message11 =
    partitioner.getForkMessage( partition, domainOffset, h );
  assignList(partition) = 2,1;
  peano::kernel::regulargrid::parallel::messages::ForkMessage message21 =
    partitioner.getForkMessage( partition, domainOffset, h );
  assignList(partition) = 0,2;
  peano::kernel::regulargrid::parallel::messages::ForkMessage message02 =
    partitioner.getForkMessage( partition, domainOffset, h );
  assignList(partition) = 1,2;
  peano::kernel::regulargrid::parallel::messages::ForkMessage message12 =
    partitioner.getForkMessage( partition, domainOffset, h );
  assignList(partition) = 2,2;
  peano::kernel::regulargrid::parallel::messages::ForkMessage message22 =
    partitioner.getForkMessage( partition, domainOffset, h );

  //  validateEquals( message00.getNeighbourRanks(), h );

  validateEquals( message00.getH(), h );
  validateEquals( message00.getNumberOfGridPoints()(0), 5 );
  validateEquals( message00.getNumberOfGridPoints()(1), 4 );
  validateEquals( message00.getDomainOffset()(0), domainOffset(0) + 0*h(0));
  validateEquals( message00.getDomainOffset()(1), domainOffset(1) + 0*h(1));

  validateEquals( message10.getH(), h );
  validateEquals( message10.getNumberOfGridPoints()(0), 5 );
  validateEquals( message10.getNumberOfGridPoints()(1), 4 );
  validateEquals( message10.getDomainOffset()(0), domainOffset(0) + 4*h(0));
  validateEquals( message10.getDomainOffset()(1), domainOffset(1) + 0*h(1));

  validateEquals( message20.getH(), h );
  validateEquals( message20.getNumberOfGridPoints()(0), 4 );
  validateEquals( message20.getNumberOfGridPoints()(1), 4 );
  validateEquals( message20.getDomainOffset()(0), domainOffset(0) + 8*h(0));
  validateEquals( message20.getDomainOffset()(1), domainOffset(1) + 0*h(1));

  validateEquals( message01.getH(), h );
  validateEquals( message01.getNumberOfGridPoints()(0), 5 );
  validateEquals( message01.getNumberOfGridPoints()(1), 4 );
  validateEquals( message01.getDomainOffset()(0), domainOffset(0) + 0*h(0));
  validateEquals( message01.getDomainOffset()(1), domainOffset(1) + 3*h(1));
  #endif
}
Exemplo n.º 14
0
void FMEMultipoleKernel::quadtreeConstruction(ArrayPartition& pointPartition)
{
	FMELocalContext*  localContext	= m_pLocalContext;
	FMEGlobalContext* globalContext = m_pGlobalContext;
	LinearQuadtree&	tree			= *globalContext->pQuadtree;

	// precompute the bounding box for the quadtree points from the graph nodes
	for_loop(pointPartition, min_max_x_function(localContext));
	for_loop(pointPartition, min_max_y_function(localContext));

	// wait until the thread's bounding box is computed
	sync();

	// let the main thread computed the bounding box of the bounding boxes
	if (isMainThread())
	{
		globalContext->min_x = globalContext->pLocalContext[0]->min_x;
		globalContext->min_y = globalContext->pLocalContext[0]->min_y;
		globalContext->max_x = globalContext->pLocalContext[0]->max_x;
		globalContext->max_y = globalContext->pLocalContext[0]->max_y;
		for (__uint32 j=1; j < numThreads(); j++)
		{
			globalContext->min_x = min(globalContext->min_x, globalContext->pLocalContext[j]->min_x);
			globalContext->min_y = min(globalContext->min_y, globalContext->pLocalContext[j]->min_y);
			globalContext->max_x = max(globalContext->max_x, globalContext->pLocalContext[j]->max_x);
			globalContext->max_y = max(globalContext->max_y, globalContext->pLocalContext[j]->max_y);
		};
		tree.init(globalContext->min_x, globalContext->min_y, globalContext->max_x, globalContext->max_y);
		globalContext->coolDown *= 0.999f;
		tree.clear();
	};
	// wait because the morton number computation needs the bounding box
	sync();		
	// udpate morton number to prepare them for sorting
	for_loop(pointPartition, LQMortonFunctor(localContext));
	// wait so we can sort them by morton number
	sync();

#ifdef OGDF_FME_PARALLEL_QUADTREE_SORT
	// use a simple parallel sorting algorithm
	LinearQuadtree::LQPoint* points = tree.pointArray();
	sort_parallel(points, tree.numberOfPoints(), LQPointComparer);
#else
	if (isMainThread())
	{
		LinearQuadtree::LQPoint* points = tree.pointArray();
		sort_single(points, tree.numberOfPoints(), LQPointComparer);
	};
#endif
	// wait because the quadtree builder needs the sorted order
	sync();
	// if not a parallel run, we can do the easy way
	if (isSingleThreaded())
	{
		LinearQuadtreeBuilder builder(tree);
		// prepare the tree
		builder.prepareTree();
		// and link it
		builder.build();
		LQPartitioner partitioner( localContext );
		partitioner.partition();
	} else // the more difficult part
	{
		// snap the left point of the interval of the thread to the first in the cell
		LinearQuadtree::PointID beginPoint = tree.findFirstPointInCell(pointPartition.begin);
		LinearQuadtree::PointID endPoint_plus_one;
		// if this thread is the last one, no snapping required for the right point
		if (threadNr()==numThreads()-1)
			endPoint_plus_one = tree.numberOfPoints();
		else // find the left point of the next thread
			endPoint_plus_one = tree.findFirstPointInCell(pointPartition.end+1);
		// and calculate the number of points to prepare
		__uint32 numPointsToPrepare = endPoint_plus_one - beginPoint;

		// now we can prepare the snapped interval
		LinearQuadtreeBuilder builder(tree);
		// this function prepares the tree from begin point to endPoint_plus_one-1 (EXCLUDING endPoint_plus_one)
		builder.prepareTree(beginPoint, endPoint_plus_one);
		// save the start, end and count of the inner node chain in the context
		localContext->firstInnerNode = builder.firstInner;
		localContext->lastInnerNode = builder.lastInner;
		localContext->numInnerNodes = builder.numInnerNodes;
		// save the start, end and count of the leaf node chain in the context
		localContext->firstLeaf = builder.firstLeaf;
		localContext->lastLeaf = builder.lastLeaf;
		localContext->numLeaves = builder.numLeaves;
		// wait until all are finished
		sync();

		// now the main thread has to link the tree
		if (isMainThread())
		{
			// with his own builder
			LinearQuadtreeBuilder sbuilder(tree);
			// first we need the complete chain data
			sbuilder.firstInner = globalContext->pLocalContext[0]->firstInnerNode;
			sbuilder.firstLeaf = globalContext->pLocalContext[0]->firstLeaf;
			sbuilder.numInnerNodes = globalContext->pLocalContext[0]->numInnerNodes;
			sbuilder.numLeaves = globalContext->pLocalContext[0]->numLeaves;
			for (__uint32 j=1; j < numThreads(); j++)
			{
				sbuilder.numLeaves += globalContext->pLocalContext[j]->numLeaves;
				sbuilder.numInnerNodes += globalContext->pLocalContext[j]->numInnerNodes;
			};
			sbuilder.lastInner = globalContext->pLocalContext[numThreads()-1]->lastInnerNode;
			sbuilder.lastLeaf = globalContext->pLocalContext[numThreads()-1]->lastLeaf;
			// Link the tree
			sbuilder.build();
			// and run the partitions
			LQPartitioner partitioner(localContext);
			partitioner.partition();
		};
	};
	// wait for tree to finish
	sync();
	// now update the copy of the point data 
	for_loop(pointPartition, LQPointUpdateFunctor(localContext));
	// compute the nodes coordinates and sizes
	tree.forall_tree_nodes(LQCoordsFunctor(localContext), localContext->innerNodePartition.begin, localContext->innerNodePartition.numNodes)();
	tree.forall_tree_nodes(LQCoordsFunctor(localContext), localContext->leafPartition.begin, localContext->leafPartition.numNodes)();
};
ErrorStack execute(
  Engine* engine,
  storage::StorageId id,
  double* elapsed_ms,
  std::vector<std::string>* papi_results) {
  storage::PartitionerMetadata* metadata = storage::PartitionerMetadata::get_metadata(engine, id);
  make_dummy_partitions(engine, id, metadata);

  LOG(INFO) << "Allocating memories...";
  debugging::StopWatch alloc_watch;
  memory::AlignedMemory::AllocType kAlloc = memory::AlignedMemory::kNumaAllocOnnode;
  memory::AlignedMemory work_memory(kRecords * 32ULL, 1U << 21, kAlloc, 0);
  memory::AlignedMemory positions_memory(sizeof(BufferPosition) * kRecords, 1U << 12, kAlloc, 0);
  memory::AlignedMemory out_memory(sizeof(BufferPosition) * kRecords, 1U << 12, kAlloc, 0);
  memory::AlignedMemory partitions_memory(sizeof(uint8_t) * kRecords, 1U << 12, kAlloc, 0);
  memory::AlignedMemory log_memory(kRecords * kPayloadSize * 2ULL, 1U << 21, kAlloc, 0);
  alloc_watch.stop();
  LOG(INFO) << "Allocated memories in " << alloc_watch.elapsed_ms() << "ms";

  LOG(INFO) << "Populating logs to process...";
  debugging::StopWatch log_watch;
  char* log_buffer = reinterpret_cast<char*>(log_memory.get_block());
  uint64_t log_size = 0;
  BufferPosition* log_positions = reinterpret_cast<BufferPosition*>(positions_memory.get_block());
  populate_logs(id, log_buffer, log_positions, &log_size);
  log_watch.stop();
  LOG(INFO) << "Populated logs to process in " << log_watch.elapsed_ms() << "ms";

  if (FLAGS_profile) {
    COERCE_ERROR(engine->get_debug()->start_profile("partition_experiment.prof"));
    engine->get_debug()->start_papi_counters();
  }

  LOG(INFO) << "experiment's main part has started";
  debugging::StopWatch watch;

  storage::Partitioner partitioner_base(engine, id);
  ASSERT_ND(partitioner_base.is_valid());
  storage::hash::HashPartitioner partitioner(&partitioner_base);
  LogBuffer buf(log_buffer);
  if (FLAGS_run_partition) {
    LOG(INFO) << "running partitioning...";
    storage::Partitioner::PartitionBatchArguments partition_args = {
      0,
      buf,
      log_positions,
      kRecords,
      reinterpret_cast<uint8_t*>(partitions_memory.get_block())};
    partitioner.partition_batch(partition_args);
  }

  if (FLAGS_run_sort) {
    LOG(INFO) << "running sorting...";
    uint32_t written_count;
    storage::Partitioner::SortBatchArguments sort_args = {
      buf,
      log_positions,
      kRecords,
      8,
      8,
      &work_memory,
      Epoch(1),
      reinterpret_cast<BufferPosition*>(out_memory.get_block()),
      &written_count};
    partitioner.sort_batch(sort_args);
  }

  watch.stop();
  *elapsed_ms = watch.elapsed_ms();
  LOG(INFO) << "experiment's main part has ended. Took " << *elapsed_ms << "ms";

  if (FLAGS_profile) {
    engine->get_debug()->stop_profile();
    engine->get_debug()->stop_papi_counters();
    if (FLAGS_papi) {
      *papi_results = debugging::DebuggingSupports::describe_papi_counters(
        engine->get_debug()->get_papi_counters());
    }
  }

  return kRetOk;
}