コード例 #1
0
// This method contains no policy. You should probably
// be calling invoke() instead.
bool PSScavenge::invoke_no_policy() {
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");

  assert(_preserved_mark_stack.is_empty(), "should be empty");
  assert(_preserved_oop_stack.is_empty(), "should be empty");

  TimeStamp scavenge_entry;
  TimeStamp scavenge_midpoint;
  TimeStamp scavenge_exit;

  scavenge_entry.update();

  if (GC_locker::check_active_before_gc()) {
    return false;
  }

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  GCCause::Cause gc_cause = heap->gc_cause();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  // Check for potential problems.
  if (!should_attempt_scavenge()) {
    return false;
  }

  bool promotion_failure_occurred = false;

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();
  PSPermGen* perm_gen = heap->perm_gen();
  PSAdaptiveSizePolicy* size_policy = heap->size_policy();
  heap->increment_total_collections();

  AdaptiveSizePolicyOutput(size_policy, heap->total_collections());

  if ((gc_cause != GCCause::_java_lang_system_gc) ||
       UseAdaptiveSizePolicyWithSystemGC) {
    // Gather the feedback data for eden occupancy.
    young_gen->eden_space()->accumulate_statistics();
  }

  if (ZapUnusedHeapArea) {
    // Save information needed to minimize mangling
    heap->record_gen_tops_before_GC();
  }

  if (PrintHeapAtGC) {
    Universe::print_heap_before_gc();
  }

  assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
  assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");

  size_t prev_used = heap->used();
  assert(promotion_failed() == false, "Sanity");

  // Fill in TLABs
  heap->accumulate_statistics_all_tlabs();
  heap->ensure_parsability(true);  // retire TLABs

  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    gclog_or_tty->print(" VerifyBeforeGC:");
    Universe::verify(true);
  }

  {
    ResourceMark rm;
    HandleMark hm;

    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
    TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
    TraceCollectorStats tcs(counters());
    TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);

    if (TraceGen0Time) accumulated_time()->start();

    // Let the size policy know we're starting
    size_policy->minor_collection_begin();

    // Verify the object start arrays.
    if (VerifyObjectStartArray &&
        VerifyBeforeGC) {
      old_gen->verify_object_start_array();
      perm_gen->verify_object_start_array();
    }

    // Verify no unmarked old->young roots
    if (VerifyRememberedSets) {
      CardTableExtension::verify_all_young_refs_imprecise();
    }

    if (!ScavengeWithObjectsInToSpace) {
      assert(young_gen->to_space()->is_empty(),
             "Attempt to scavenge with live objects in to_space");
      young_gen->to_space()->clear(SpaceDecorator::Mangle);
    } else if (ZapUnusedHeapArea) {
      young_gen->to_space()->mangle_unused_area();
    }
    save_to_space_top_before_gc();

    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
    COMPILER2_PRESENT(DerivedPointerTable::clear());

    reference_processor()->enable_discovery();
    reference_processor()->setup_policy(false);

    // We track how much was promoted to the next generation for
    // the AdaptiveSizePolicy.
    size_t old_gen_used_before = old_gen->used_in_bytes();

    // For PrintGCDetails
    size_t young_gen_used_before = young_gen->used_in_bytes();

    // Reset our survivor overflow.
    set_survivor_overflow(false);

    // We need to save the old/perm top values before
    // creating the promotion_manager. We pass the top
    // values to the card_table, to prevent it from
    // straying into the promotion labs.
    HeapWord* old_top = old_gen->object_space()->top();
    HeapWord* perm_top = perm_gen->object_space()->top();

    // Release all previously held resources
    gc_task_manager()->release_all_resources();

    PSPromotionManager::pre_scavenge();

    // We'll use the promotion manager again later.
    PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
    {
      // TraceTime("Roots");
      ParallelScavengeHeap::ParStrongRootsScope psrs;

      GCTaskQueue* q = GCTaskQueue::create();

      for(uint i=0; i<ParallelGCThreads; i++) {
        q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i));
      }

      q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top));

      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
      // We scan the thread roots in parallel
      Threads::create_thread_roots_tasks(q);
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));

      ParallelTaskTerminator terminator(
                  gc_task_manager()->workers(),
                  (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
      if (ParallelGCThreads>1) {
        for (uint j=0; j<ParallelGCThreads; j++) {
          q->enqueue(new StealTask(&terminator));
        }
      }

      gc_task_manager()->execute_and_wait(q);
    }

    scavenge_midpoint.update();

    // Process reference objects discovered during scavenge
    {
      reference_processor()->setup_policy(false); // not always_clear
      PSKeepAliveClosure keep_alive(promotion_manager);
      PSEvacuateFollowersClosure evac_followers(promotion_manager);
      if (reference_processor()->processing_is_mt()) {
        PSRefProcTaskExecutor task_executor;
        reference_processor()->process_discovered_references(
          &_is_alive_closure, &keep_alive, &evac_followers, &task_executor);
      } else {
        reference_processor()->process_discovered_references(
          &_is_alive_closure, &keep_alive, &evac_followers, NULL);
      }
    }

    // Enqueue reference objects discovered during scavenge.
    if (reference_processor()->processing_is_mt()) {
      PSRefProcTaskExecutor task_executor;
      reference_processor()->enqueue_discovered_references(&task_executor);
    } else {
      reference_processor()->enqueue_discovered_references(NULL);
    }

    if (!JavaObjectsInPerm) {
      // Unlink any dead interned Strings
      StringTable::unlink(&_is_alive_closure);
      // Process the remaining live ones
      PSScavengeRootsClosure root_closure(promotion_manager);
      StringTable::oops_do(&root_closure);
    }

    // Finally, flush the promotion_manager's labs, and deallocate its stacks.
    PSPromotionManager::post_scavenge();

    promotion_failure_occurred = promotion_failed();
    if (promotion_failure_occurred) {
      clean_up_failed_promotion();
      if (PrintGC) {
        gclog_or_tty->print("--");
      }
    }

    // Let the size policy know we're done.  Note that we count promotion
    // failure cleanup time as part of the collection (otherwise, we're
    // implicitly saying it's mutator time).
    size_policy->minor_collection_end(gc_cause);

    if (!promotion_failure_occurred) {
      // Swap the survivor spaces.


      young_gen->eden_space()->clear(SpaceDecorator::Mangle);
      young_gen->from_space()->clear(SpaceDecorator::Mangle);
      young_gen->swap_spaces();

      size_t survived = young_gen->from_space()->used_in_bytes();
      size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
      size_policy->update_averages(_survivor_overflow, survived, promoted);

      // A successful scavenge should restart the GC time limit count which is
      // for full GC's.
      size_policy->reset_gc_overhead_limit_count();
      if (UseAdaptiveSizePolicy) {
        // Calculate the new survivor size and tenuring threshold

        if (PrintAdaptiveSizePolicy) {
          gclog_or_tty->print("AdaptiveSizeStart: ");
          gclog_or_tty->stamp();
          gclog_or_tty->print_cr(" collection: %d ",
                         heap->total_collections());

          if (Verbose) {
            gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
              " perm_gen_capacity: %d ",
              old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
              perm_gen->capacity_in_bytes());
          }
        }


        if (UsePerfData) {
          PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
          counters->update_old_eden_size(
            size_policy->calculated_eden_size_in_bytes());
          counters->update_old_promo_size(
            size_policy->calculated_promo_size_in_bytes());
          counters->update_old_capacity(old_gen->capacity_in_bytes());
          counters->update_young_capacity(young_gen->capacity_in_bytes());
          counters->update_survived(survived);
          counters->update_promoted(promoted);
          counters->update_survivor_overflowed(_survivor_overflow);
        }

        size_t survivor_limit =
          size_policy->max_survivor_size(young_gen->max_size());
        _tenuring_threshold =
          size_policy->compute_survivor_space_size_and_threshold(
                                                           _survivor_overflow,
                                                           _tenuring_threshold,
                                                           survivor_limit);

       if (PrintTenuringDistribution) {
         gclog_or_tty->cr();
         gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)",
                                size_policy->calculated_survivor_size_in_bytes(),
                                _tenuring_threshold, MaxTenuringThreshold);
       }

        if (UsePerfData) {
          PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
          counters->update_tenuring_threshold(_tenuring_threshold);
          counters->update_survivor_size_counters();
        }

        // Do call at minor collections?
        // Don't check if the size_policy is ready at this
        // level.  Let the size_policy check that internally.
        if (UseAdaptiveSizePolicy &&
            UseAdaptiveGenerationSizePolicyAtMinorCollection &&
            ((gc_cause != GCCause::_java_lang_system_gc) ||
              UseAdaptiveSizePolicyWithSystemGC)) {

          // Calculate optimial free space amounts
          assert(young_gen->max_size() >
            young_gen->from_space()->capacity_in_bytes() +
            young_gen->to_space()->capacity_in_bytes(),
            "Sizes of space in young gen are out-of-bounds");
          size_t max_eden_size = young_gen->max_size() -
            young_gen->from_space()->capacity_in_bytes() -
            young_gen->to_space()->capacity_in_bytes();
          size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
                                   young_gen->eden_space()->used_in_bytes(),
                                   old_gen->used_in_bytes(),
                                   perm_gen->used_in_bytes(),
                                   young_gen->eden_space()->capacity_in_bytes(),
                                   old_gen->max_gen_size(),
                                   max_eden_size,
                                   false  /* full gc*/,
                                   gc_cause,
                                   heap->collector_policy());

        }
        // Resize the young generation at every collection
        // even if new sizes have not been calculated.  This is
        // to allow resizes that may have been inhibited by the
        // relative location of the "to" and "from" spaces.

        // Resizing the old gen at minor collects can cause increases
        // that don't feed back to the generation sizing policy until
        // a major collection.  Don't resize the old gen here.

        heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
                        size_policy->calculated_survivor_size_in_bytes());

        if (PrintAdaptiveSizePolicy) {
          gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
                         heap->total_collections());
        }
      }

      // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
      // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
      // Also update() will case adaptive NUMA chunk resizing.
      assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
      young_gen->eden_space()->update();

      heap->gc_policy_counters()->update_counters();

      heap->resize_all_tlabs();

      assert(young_gen->to_space()->is_empty(), "to space should be empty now");
    }

    COMPILER2_PRESENT(DerivedPointerTable::update_pointers());

    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());

    // Re-verify object start arrays
    if (VerifyObjectStartArray &&
        VerifyAfterGC) {
      old_gen->verify_object_start_array();
      perm_gen->verify_object_start_array();
    }

    // Verify all old -> young cards are now precise
    if (VerifyRememberedSets) {
      // Precise verification will give false positives. Until this is fixed,
      // use imprecise verification.
      // CardTableExtension::verify_all_young_refs_precise();
      CardTableExtension::verify_all_young_refs_imprecise();
    }

    if (TraceGen0Time) accumulated_time()->stop();

    if (PrintGC) {
      if (PrintGCDetails) {
        // Don't print a GC timestamp here.  This is after the GC so
        // would be confusing.
        young_gen->print_used_change(young_gen_used_before);
      }
      heap->print_heap_change(prev_used);
    }

    // Track memory usage and detect low memory
    MemoryService::track_memory_usage();
    heap->update_counters();
  }

  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    gclog_or_tty->print(" VerifyAfterGC:");
    Universe::verify(false);
  }

  if (PrintHeapAtGC) {
    Universe::print_heap_after_gc();
  }

  if (ZapUnusedHeapArea) {
    young_gen->eden_space()->check_mangled_unused_area_complete();
    young_gen->from_space()->check_mangled_unused_area_complete();
    young_gen->to_space()->check_mangled_unused_area_complete();
  }

  scavenge_exit.update();

  if (PrintGCTaskTimeStamps) {
    tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
                  scavenge_entry.ticks(), scavenge_midpoint.ticks(),
                  scavenge_exit.ticks());
    gc_task_manager()->print_task_time_stamps();
  }

#ifdef TRACESPINNING
  ParallelTaskTerminator::print_termination_counts();
#endif

  return !promotion_failure_occurred;
}
コード例 #2
0
int main(int argc, char **argv) {
    int threads = 1;
    const char *host = "localhost.localdomain";
    int port = 80;
    int connections = 1;
    int iterations = 1;
    bool reuseConnections = false;
    int logLevel = ESFLogger::Info;
    const char *method = "GET";
    const char *contentType = "octet-stream";
    const char *bodyFilePath = 0;
    const char *absPath = "/";
    FILE *outputFile = stdout;

    {
        int result = 0;

        while (true) {
            result = getopt(argc, argv, "l:t:H:p:c:i:m:C:b:a:o:rh");

            if (0 > result) {
                break;
            }

            switch (result) {
            case 'l':

                logLevel = atoi(optarg);
                break;

            case 't':

                threads = atoi(optarg);
                break;

            case 'H':

                host = optarg;
                break;

            case 'p':

                port = atoi(optarg);
                break;

            case 'c':

                connections = atoi(optarg);
                break;

            case 'i':

                iterations = atoi(optarg);
                break;

            case 'm':

                method = optarg;
                break;

            case 'C':

                contentType = optarg;
                break;

            case 'b':

                bodyFilePath = optarg;
                break;

            case 'a':

                absPath = optarg;
                break;

            case 'o':

                outputFile = fopen(optarg, "w");

                if (0 == outputFile) {
                    fprintf(stderr, "Cannot open %s: %s\n", optarg, strerror(errno));
                    return -10;
                }

                break;

            case 'r':

                reuseConnections = true;
                break;

            case 'h':

                printHelp(argv[0]);
                return 0;

            default:

                printHelp(argv[0]);

                return 2;
            }
        }
    }

    ESFConsoleLogger::Initialize((ESFLogger::Severity) logLevel);
    ESFLogger *logger = ESFConsoleLogger::Instance();

    if (logger->isLoggable(ESFLogger::Notice)) {
        logger->log(
                ESFLogger::Notice,
                __FILE__,
                __LINE__,
                "Starting. logLevel: %d, threads: %d, host: %s, port: %d, connections: %d, iterations: %d, method: %s, contentType: %s, bodyFile %s, reuseConnections: %s",
                logLevel, threads, host, port, connections, iterations, method, contentType, bodyFilePath, reuseConnections ? "true" : "false");
    }

    //
    // Install signal handlers: Ctrl-C and kill will start clean shutdown sequence
    //

    signal(SIGHUP, SIG_IGN);
    signal(SIGPIPE, SIG_IGN);
    signal(SIGINT, AWSRawEchoClientSignalHandler);
    signal(SIGQUIT, AWSRawEchoClientSignalHandler);
    signal(SIGTERM, AWSRawEchoClientSignalHandler);

    //
    // Slurp the request body into memory
    //

    int bodySize = 0;
    unsigned char *body = 0;

    if (0 != bodyFilePath) {
        int fd = open(bodyFilePath, O_RDONLY);
        if (-1 == fd) {
            fprintf(stderr, "Cannot open %s: %s\n", bodyFilePath, strerror(errno));
            return -5;
        }

        struct stat statbuf;

        memset(&statbuf, 0, sizeof(statbuf));

        if (0 != fstat(fd, &statbuf)) {
            close(fd);
            fprintf(stderr, "Cannot stat %s: %s\n", bodyFilePath, strerror(errno));
            return -6;
        }

        bodySize = statbuf.st_size;

        if (0 >= bodySize) {
            close(fd);
            bodySize = 0;
            bodyFilePath = 0;
        } else {
            body = (unsigned char *) malloc(bodySize);

            if (0 == body) {
                close(fd);
                fprintf(stderr, "Cannot allocate %d bytes of memory for body file\n", bodySize);
                return -7;
            }

            int bytesRead = 0;
            int totalBytesRead = 0;

            while (totalBytesRead < bodySize) {
                bytesRead = read(fd, body + totalBytesRead, bodySize - totalBytesRead);

                if (0 == bytesRead) {
                    free(body);
                    close(fd);
                    fprintf(stderr, "Premature EOF slurping body into memory\n");
                    return -8;
                }

                if (0 > bytesRead) {
                    free(body);
                    close(fd);
                    fprintf(stderr, "Error slurping %s into memory: %s\n", bodyFilePath, strerror(errno));
                    return -9;
                }

                totalBytesRead += bytesRead;
            }

            close(fd);
        }
    }

    //
    // Create, initialize, and start the stack
    //

    AWSHttpDefaultResolver resolver(logger);
    AWSHttpClientHistoricalCounters counters(30, ESFSystemAllocator::GetInstance(), logger);

    AWSHttpStack stack(&resolver, threads, &counters, logger);

    AWSHttpEchoClientHandler handler(absPath, method, contentType, body, bodySize, connections * iterations, &stack, logger);

    // TODO - make configuration stack-specific and increase options richness
    AWSHttpClientSocket::SetReuseConnections(reuseConnections);

    ESFError error = stack.initialize();

    if (ESF_SUCCESS != error) {
        if (body) {
            free(body);
            body = 0;
        }

        return -1;
    }

    error = stack.start();

    if (ESF_SUCCESS != error) {
        if (body) {
            free(body);
            body = 0;
        }

        return -2;
    }

    ESFDiscardAllocator echoClientContextAllocator(1024, ESFSystemAllocator::GetInstance());

    sleep(1); // give the worker threads a chance to start - cleans up perf testing numbers a bit

    // Create <connections> distinct client connections which each submit <iterations> SOAP requests

    AWSHttpEchoClientContext *context = 0;
    AWSHttpClientTransaction *transaction = 0;

    for (int i = 0; i < connections; ++i) {
        // Create the request context and transaction

        context = new (&echoClientContextAllocator) AWSHttpEchoClientContext(iterations - 1);

        if (0 == context) {
            if (logger->isLoggable(ESFLogger::Critical)) {
                logger->log(ESFLogger::Critical, __FILE__, __LINE__, "[main] cannot create new client context");
            }

            if (body) {
                free(body);
                body = 0;
            }

            return -3;
        }

        transaction = stack.createClientTransaction(&handler);

        if (0 == transaction) {
            context->~AWSHttpEchoClientContext();
            echoClientContextAllocator.deallocate(context);

            if (logger->isLoggable(ESFLogger::Critical)) {
                logger->log(ESFLogger::Critical, __FILE__, __LINE__, "[main] cannot create new client transaction");
            }

            if (body) {
                free(body);
                body = 0;
            }

            return -3;
        }

        transaction->setApplicationContext(context);

        // Build the request

        error = AWSHttpEchoClientRequestBuilder(host, port, absPath, method, contentType, transaction);

        if (ESF_SUCCESS != error) {
            context->~AWSHttpEchoClientContext();
            echoClientContextAllocator.deallocate(context);
            stack.destroyClientTransaction(transaction);

            if (logger->isLoggable(ESFLogger::Critical)) {
                char buffer[100];

                ESFDescribeError(error, buffer, sizeof(buffer));

                logger->log(ESFLogger::Critical, __FILE__, __LINE__, "[main] cannot build request: %s");
            }

            if (body) {
                free(body);
                body = 0;
            }

            return -4;
        }

        // Send the request (asynch) - the context will resubmit the request for <iteration> - 1 iterations.

        error = stack.executeClientTransaction(transaction);

        if (ESF_SUCCESS != error) {
            context->~AWSHttpEchoClientContext();
            echoClientContextAllocator.deallocate(context);
            stack.destroyClientTransaction(transaction);

            if (logger->isLoggable(ESFLogger::Critical)) {
                char buffer[100];

                ESFDescribeError(error, buffer, sizeof(buffer));

                logger->log(ESFLogger::Critical, __FILE__, __LINE__, "[main] Cannot execute client transaction: %s", buffer);
            }

            if (body) {
                free(body);
                body = 0;
            }

            return error;
        }
    }

    while (IsRunning && false == handler.isFinished()) {
        sleep(5);
    }

    error = stack.stop();

    if (body) {
        free(body);
        body = 0;
    }

    if (ESF_SUCCESS != error) {
        return -3;
    }

    stack.getClientCounters()->printSummary(outputFile);

    stack.destroy();

    echoClientContextAllocator.destroy(); // echo client context destructors will not be called.

    ESFConsoleLogger::Destroy();

    fflush(outputFile);

    fclose(outputFile);

    return ESF_SUCCESS;
}
コード例 #3
0
ファイル: gal_combine.cpp プロジェクト: SebastianSchlag/KaHIP
// implements our version of gal combine
// compute a matching between blocks (greedily) 
// extend to partition
// apply our refinements and tabu search
void gal_combine::perform_gal_combine( PartitionConfig & config, graph_access & G) {
        //first greedily compute a matching of the partitions
        std::vector< std::unordered_map<PartitionID, unsigned> > counters(config.k);
        forall_nodes(G, node) {
                //boundary_pair bp;
                if(counters[G.getPartitionIndex(node)].find(G.getSecondPartitionIndex(node)) != counters[G.getPartitionIndex(node)].end()) {
                        counters[G.getPartitionIndex(node)][G.getSecondPartitionIndex(node)] += 1;
                } else {
                        counters[G.getPartitionIndex(node)][G.getSecondPartitionIndex(node)] = 1;
                }
        } endfor

        std::vector< PartitionID > permutation(config.k);
        for( unsigned i = 0; i < permutation.size(); i++) {
                permutation[i] = i;
        }

        random_functions::permutate_vector_good_small(permutation); 
        std::vector<bool> rhs_matched(config.k, false);
        std::vector<PartitionID> bipartite_matching(config.k);
        for( unsigned i = 0; i < permutation.size(); i++) {
                PartitionID cur_partition   = permutation[i];
                PartitionID best_unassigned = config.k;
                NodeWeight  best_value      = 0;

                for( std::unordered_map<PartitionID, unsigned>::iterator it = counters[cur_partition].begin(); 
                     it != counters[cur_partition].end(); ++it) {
                        if( rhs_matched[it->first] == false && it->second > best_value ) {
                                best_unassigned = it->first; 
                                best_value = it->second;
                        }
                }

                bipartite_matching[cur_partition] = best_unassigned;
                if( best_unassigned != config.k ) {
                        rhs_matched[best_unassigned] = true;
                }
        }

        std::vector<bool> blocked_vertices(G.number_of_nodes(), false);
        forall_nodes(G, node) {
                if( bipartite_matching[G.getPartitionIndex(node)] == G.getSecondPartitionIndex(node) ){
                        blocked_vertices[node] = true;
                } else {
                        // we will reassign this vertex since the partitions do not agree on it
                        G.setPartitionIndex(node, config.k); 
                }
        } endfor

        construct_partition cp;
        cp.construct_starting_from_partition( config, G );

        refinement* refine = new mixed_refinement();

        double real_epsilon = config.imbalance/100.0;
        double epsilon = random_functions::nextDouble(real_epsilon+0.005,real_epsilon+config.kabaE_internal_bal); 
        PartitionConfig copy = config;
        copy.upper_bound_partition = (1+epsilon)*ceil(config.work_load/(double)config.k);

        complete_boundary boundary(&G);
        boundary.build();

        tabu_search ts;
        ts.perform_refinement( copy, G, boundary);
        
        //now obtain the quotient graph
        complete_boundary boundary2(&G);
        boundary2.build();

        copy = config;
        copy.upper_bound_partition = (1+epsilon)*ceil(config.work_load/(double)config.k);

        refine->perform_refinement( copy, G, boundary2);

        copy = config;
        cycle_refinement cr;
        cr.perform_refinement(config, G, boundary2);
        delete refine;

        
}
コード例 #4
0
// This method contains no policy. You should probably
// be calling invoke() instead.
void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
  assert(ref_processor() != NULL, "Sanity");

  if (GC_locker::check_active_before_gc()) {
    return;
  }

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  GCCause::Cause gc_cause = heap->gc_cause();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  PSAdaptiveSizePolicy* size_policy = heap->size_policy();

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();
  PSPermGen* perm_gen = heap->perm_gen();

  // Increment the invocation count
  heap->increment_total_collections(true /* full */);

  // Save information needed to minimize mangling
  heap->record_gen_tops_before_GC();

  // We need to track unique mark sweep invocations as well.
  _total_invocations++;

  AdaptiveSizePolicyOutput(size_policy, heap->total_collections());

  if (PrintHeapAtGC) {
    Universe::print_heap_before_gc();
  }

  // Fill in TLABs
  heap->accumulate_statistics_all_tlabs();
  heap->ensure_parsability(true);  // retire TLABs

  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    gclog_or_tty->print(" VerifyBeforeGC:");
    Universe::verify(true);
  }

  // Verify object start arrays
  if (VerifyObjectStartArray &&
      VerifyBeforeGC) {
    old_gen->verify_object_start_array();
    perm_gen->verify_object_start_array();
  }

  heap->pre_full_gc_dump();

  // Filled in below to track the state of the young gen after the collection.
  bool eden_empty;
  bool survivors_empty;
  bool young_gen_empty;

  {
    HandleMark hm;
    const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
    // This is useful for debugging but don't change the output the
    // the customer sees.
    const char* gc_cause_str = "Full GC";
    if (is_system_gc && PrintGCDetails) {
      gc_cause_str = "Full GC (System)";
    }
    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
    TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
    TraceCollectorStats tcs(counters());
    TraceMemoryManagerStats tms(true /* Full GC */);

    if (TraceGen1Time) accumulated_time()->start();

    // Let the size policy know we're starting
    size_policy->major_collection_begin();

    // When collecting the permanent generation methodOops may be moving,
    // so we either have to flush all bcp data or convert it into bci.
    CodeCache::gc_prologue();
    Threads::gc_prologue();
    BiasedLocking::preserve_marks();

    // Capture heap size before collection for printing.
    size_t prev_used = heap->used();

    // Capture perm gen size before collection for sizing.
    size_t perm_gen_prev_used = perm_gen->used_in_bytes();

    // For PrintGCDetails
    size_t old_gen_prev_used = old_gen->used_in_bytes();
    size_t young_gen_prev_used = young_gen->used_in_bytes();

    allocate_stacks();

    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
    COMPILER2_PRESENT(DerivedPointerTable::clear());

    ref_processor()->enable_discovery();
    ref_processor()->setup_policy(clear_all_softrefs);

    mark_sweep_phase1(clear_all_softrefs);

    mark_sweep_phase2();

    // Don't add any more derived pointers during phase3
    COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
    COMPILER2_PRESENT(DerivedPointerTable::set_active(false));

    mark_sweep_phase3();

    mark_sweep_phase4();

    restore_marks();

    deallocate_stacks();

    if (ZapUnusedHeapArea) {
      // Do a complete mangle (top to end) because the usage for
      // scratch does not maintain a top pointer.
      young_gen->to_space()->mangle_unused_area_complete();
    }

    eden_empty = young_gen->eden_space()->is_empty();
    if (!eden_empty) {
      eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
    }

    // Update heap occupancy information which is used as
    // input to soft ref clearing policy at the next gc.
    Universe::update_heap_info_at_gc();

    survivors_empty = young_gen->from_space()->is_empty() &&
                      young_gen->to_space()->is_empty();
    young_gen_empty = eden_empty && survivors_empty;

    BarrierSet* bs = heap->barrier_set();
    if (bs->is_a(BarrierSet::ModRef)) {
      ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
      MemRegion old_mr = heap->old_gen()->reserved();
      MemRegion perm_mr = heap->perm_gen()->reserved();
      assert(perm_mr.end() <= old_mr.start(), "Generations out of order");

      if (young_gen_empty) {
        modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
      } else {
        modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
      }
    }

    BiasedLocking::restore_marks();
    Threads::gc_epilogue();
    CodeCache::gc_epilogue();

    COMPILER2_PRESENT(DerivedPointerTable::update_pointers());

    ref_processor()->enqueue_discovered_references(NULL);

    // Update time of last GC
    reset_millis_since_last_gc();

    // Let the size policy know we're done
    size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);

    if (UseAdaptiveSizePolicy) {

      if (PrintAdaptiveSizePolicy) {
        gclog_or_tty->print("AdaptiveSizeStart: ");
        gclog_or_tty->stamp();
        gclog_or_tty->print_cr(" collection: %d ",
                       heap->total_collections());
        if (Verbose) {
          gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
            " perm_gen_capacity: %d ",
            old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
            perm_gen->capacity_in_bytes());
        }
      }

      // Don't check if the size_policy is ready here.  Let
      // the size_policy check that internally.
      if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
          ((gc_cause != GCCause::_java_lang_system_gc) ||
            UseAdaptiveSizePolicyWithSystemGC)) {
        // Calculate optimal free space amounts
        assert(young_gen->max_size() >
          young_gen->from_space()->capacity_in_bytes() +
          young_gen->to_space()->capacity_in_bytes(),
          "Sizes of space in young gen are out-of-bounds");
        size_t max_eden_size = young_gen->max_size() -
          young_gen->from_space()->capacity_in_bytes() -
          young_gen->to_space()->capacity_in_bytes();
        size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
                                 young_gen->eden_space()->used_in_bytes(),
                                 old_gen->used_in_bytes(),
                                 perm_gen->used_in_bytes(),
                                 young_gen->eden_space()->capacity_in_bytes(),
                                 old_gen->max_gen_size(),
                                 max_eden_size,
                                 true /* full gc*/,
                                 gc_cause);

        heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());

        // Don't resize the young generation at an major collection.  A
        // desired young generation size may have been calculated but
        // resizing the young generation complicates the code because the
        // resizing of the old generation may have moved the boundary
        // between the young generation and the old generation.  Let the
        // young generation resizing happen at the minor collections.
      }
      if (PrintAdaptiveSizePolicy) {
        gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
                       heap->total_collections());
      }
    }

    if (UsePerfData) {
      heap->gc_policy_counters()->update_counters();
      heap->gc_policy_counters()->update_old_capacity(
        old_gen->capacity_in_bytes());
      heap->gc_policy_counters()->update_young_capacity(
        young_gen->capacity_in_bytes());
    }

    heap->resize_all_tlabs();

    // We collected the perm gen, so we'll resize it here.
    perm_gen->compute_new_size(perm_gen_prev_used);

    if (TraceGen1Time) accumulated_time()->stop();

    if (PrintGC) {
      if (PrintGCDetails) {
        // Don't print a GC timestamp here.  This is after the GC so
        // would be confusing.
        young_gen->print_used_change(young_gen_prev_used);
        old_gen->print_used_change(old_gen_prev_used);
      }
      heap->print_heap_change(prev_used);
      // Do perm gen after heap becase prev_used does
      // not include the perm gen (done this way in the other
      // collectors).
      if (PrintGCDetails) {
        perm_gen->print_used_change(perm_gen_prev_used);
      }
    }

    // Track memory usage and detect low memory
    MemoryService::track_memory_usage();
    heap->update_counters();

    if (PrintGCDetails) {
      if (size_policy->print_gc_time_limit_would_be_exceeded()) {
        if (size_policy->gc_time_limit_exceeded()) {
          gclog_or_tty->print_cr("      GC time is exceeding GCTimeLimit "
            "of %d%%", GCTimeLimit);
        } else {
          gclog_or_tty->print_cr("      GC time would exceed GCTimeLimit "
            "of %d%%", GCTimeLimit);
        }
      }
      size_policy->set_print_gc_time_limit_would_be_exceeded(false);
    }
  }

  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    gclog_or_tty->print(" VerifyAfterGC:");
    Universe::verify(false);
  }

  // Re-verify object start arrays
  if (VerifyObjectStartArray &&
      VerifyAfterGC) {
    old_gen->verify_object_start_array();
    perm_gen->verify_object_start_array();
  }

  if (ZapUnusedHeapArea) {
    old_gen->object_space()->check_mangled_unused_area_complete();
    perm_gen->object_space()->check_mangled_unused_area_complete();
  }

  NOT_PRODUCT(ref_processor()->verify_no_references_recorded());

  if (PrintHeapAtGC) {
    Universe::print_heap_after_gc();
  }

  heap->post_full_gc_dump();

#ifdef TRACESPINNING
  ParallelTaskTerminator::print_termination_counts();
#endif
}
コード例 #5
0
ファイル: kMeans.transformed.cpp プロジェクト: zayac/joule
variant _1_kMeans(std::vector<std::vector<double>> img, std::vector<std::vector<double>> old_centers_v, int K, double epsilon kMeans_DOWN__1_kMeans_decl) {
    cv::Mat centers(cv::Scalar(0)), old_centers(old_centers_v);
    cv::Mat data0(img);
    bool isrow = data0.rows == 1 && data0.channels() > 1;
    int N = !isrow ? data0.rows : data0.cols;
    int dims = (!isrow ? data0.cols : 1) * data0.channels();
    int type = data0.depth();

    if (!(data0.dims <= 2 && type == CV_32F && K > 0 && N >= K)) {
        error("Cannot perform K-means algorithm for this configuration" kMeans_DOWN__1_kMeans_error_use);
        return;
    }

    cv::Mat data(N, dims, CV_32F, data0.ptr(), isrow ? dims * sizeof(float) : static_cast<size_t>(data0.step));
    cv::Mat temp(1, dims, type);

    std::vector<int> counters(K, 0);
    const float* sample = data.ptr<float>(0);

    double max_center_shift = 0;

    for (int k = 0; k < K; ++k) {
        if (counters[k] != 0)
            continue;

        int max_k = 0;
        for (int k1 = 1; k1 < K; ++k1) {
            if (counters[max_k] < counters[k1])
                max_k = k1;
        }

        double max_dist = 0;
        int farthest_i = -1;
        float* new_center = centers.ptr<float>(k);
        float* old_center = centers.ptr<float>(max_k);
        float* _old_center = temp.ptr<float>();
        float scale = 1.f/counters[max_k];
        for (int j = 0; j < dims; ++j)
            _old_center[j] = old_center[j]*scale;

        for (int i = 0; i < N; ++i) {
            sample = data.ptr<float>(i);
            double dist = cv::normL2Sqr_(sample, _old_center, dims);

            if (max_dist <= dist) {
                max_dist = dist;
                farthest_i = i;
            }
        }

        counters[max_k]--;
        counters[k]++;
        sample = data.ptr<float>(farthest_i);

        for (int j = 0; j < dims; ++j) {
            old_center[j] -= sample[j];
            new_center[j] += sample[j];
        }
    }

    for (int k = 0; k < K; ++k) {
        float* center = centers.ptr<float>(k);
        if (counters[k] == 0) {
            error("For some reason one of the clusters is empty" kMeans_DOWN__1_kMeans_error_use);
            return;
        }
        float scale = 1.f/counters[k];
        for (int j = 0; j < dims; ++j)
            center[j] *= scale;

        double dist = 0;
        const float* old_center = old_centers.ptr<float>(k);
        for (int j = 0; j < dims; ++j) {
            double t = center[j] - old_center[j];
            dist += t * t;
        }
        max_center_shift = std::max(max_center_shift, dist);
    }

    std::vector<std::vector<double>> _centers;
    centers.copyTo(_centers);
    if (max_center_shift <= epsilon) {
        result(_centers kMeans_DOWN__1_kMeans_result_use);
    } else {
        _1_loop(img, _centers, K, epsilon kMeans_DOWN__1_kMeans_loop_use);
    }
}
コード例 #6
0
ファイル: kmeans.cpp プロジェクト: 410pfeliciano/opencv
double cv::kmeans( InputArray _data, int K,
                   InputOutputArray _bestLabels,
                   TermCriteria criteria, int attempts,
                   int flags, OutputArray _centers )
{
    const int SPP_TRIALS = 3;
    Mat data0 = _data.getMat();
    bool isrow = data0.rows == 1;
    int N = isrow ? data0.cols : data0.rows;
    int dims = (isrow ? 1 : data0.cols)*data0.channels();
    int type = data0.depth();

    attempts = std::max(attempts, 1);
    CV_Assert( data0.dims <= 2 && type == CV_32F && K > 0 );
    CV_Assert( N >= K );

    Mat data(N, dims, CV_32F, data0.ptr(), isrow ? dims * sizeof(float) : static_cast<size_t>(data0.step));

    _bestLabels.create(N, 1, CV_32S, -1, true);

    Mat _labels, best_labels = _bestLabels.getMat();
    if( flags & CV_KMEANS_USE_INITIAL_LABELS )
    {
        CV_Assert( (best_labels.cols == 1 || best_labels.rows == 1) &&
                   best_labels.cols*best_labels.rows == N &&
                   best_labels.type() == CV_32S &&
                   best_labels.isContinuous());
        best_labels.copyTo(_labels);
    }
    else
    {
        if( !((best_labels.cols == 1 || best_labels.rows == 1) &&
                best_labels.cols*best_labels.rows == N &&
                best_labels.type() == CV_32S &&
                best_labels.isContinuous()))
            best_labels.create(N, 1, CV_32S);
        _labels.create(best_labels.size(), best_labels.type());
    }
    int* labels = _labels.ptr<int>();

    Mat centers(K, dims, type), old_centers(K, dims, type), temp(1, dims, type);
    std::vector<int> counters(K);
    std::vector<Vec2f> _box(dims);
    Vec2f* box = &_box[0];
    double best_compactness = DBL_MAX, compactness = 0;
    RNG& rng = theRNG();
    int a, iter, i, j, k;

    if( criteria.type & TermCriteria::EPS )
        criteria.epsilon = std::max(criteria.epsilon, 0.);
    else
        criteria.epsilon = FLT_EPSILON;
    criteria.epsilon *= criteria.epsilon;

    if( criteria.type & TermCriteria::COUNT )
        criteria.maxCount = std::min(std::max(criteria.maxCount, 2), 100);
    else
        criteria.maxCount = 100;

    if( K == 1 )
    {
        attempts = 1;
        criteria.maxCount = 2;
    }

    const float* sample = data.ptr<float>(0);
    for( j = 0; j < dims; j++ )
        box[j] = Vec2f(sample[j], sample[j]);

    for( i = 1; i < N; i++ )
    {
        sample = data.ptr<float>(i);
        for( j = 0; j < dims; j++ )
        {
            float v = sample[j];
            box[j][0] = std::min(box[j][0], v);
            box[j][1] = std::max(box[j][1], v);
        }
    }

    for( a = 0; a < attempts; a++ )
    {
        double max_center_shift = DBL_MAX;
        for( iter = 0;; )
        {
            swap(centers, old_centers);

            if( iter == 0 && (a > 0 || !(flags & KMEANS_USE_INITIAL_LABELS)) )
            {
                if( flags & KMEANS_PP_CENTERS )
                    generateCentersPP(data, centers, K, rng, SPP_TRIALS);
                else
                {
                    for( k = 0; k < K; k++ )
                        generateRandomCenter(_box, centers.ptr<float>(k), rng);
                }
            }
            else
            {
                if( iter == 0 && a == 0 && (flags & KMEANS_USE_INITIAL_LABELS) )
                {
                    for( i = 0; i < N; i++ )
                        CV_Assert( (unsigned)labels[i] < (unsigned)K );
                }

                // compute centers
                centers = Scalar(0);
                for( k = 0; k < K; k++ )
                    counters[k] = 0;

                for( i = 0; i < N; i++ )
                {
                    sample = data.ptr<float>(i);
                    k = labels[i];
                    float* center = centers.ptr<float>(k);
                    j=0;
#if CV_ENABLE_UNROLLED
                    for(; j <= dims - 4; j += 4 )
                    {
                        float t0 = center[j] + sample[j];
                        float t1 = center[j+1] + sample[j+1];

                        center[j] = t0;
                        center[j+1] = t1;

                        t0 = center[j+2] + sample[j+2];
                        t1 = center[j+3] + sample[j+3];

                        center[j+2] = t0;
                        center[j+3] = t1;
                    }
#endif
                    for( ; j < dims; j++ )
                        center[j] += sample[j];
                    counters[k]++;
                }

                if( iter > 0 )
                    max_center_shift = 0;

                for( k = 0; k < K; k++ )
                {
                    if( counters[k] != 0 )
                        continue;

                    // if some cluster appeared to be empty then:
                    //   1. find the biggest cluster
                    //   2. find the farthest from the center point in the biggest cluster
                    //   3. exclude the farthest point from the biggest cluster and form a new 1-point cluster.
                    int max_k = 0;
                    for( int k1 = 1; k1 < K; k1++ )
                    {
                        if( counters[max_k] < counters[k1] )
                            max_k = k1;
                    }

                    double max_dist = 0;
                    int farthest_i = -1;
                    float* new_center = centers.ptr<float>(k);
                    float* old_center = centers.ptr<float>(max_k);
                    float* _old_center = temp.ptr<float>(); // normalized
                    float scale = 1.f/counters[max_k];
                    for( j = 0; j < dims; j++ )
                        _old_center[j] = old_center[j]*scale;

                    for( i = 0; i < N; i++ )
                    {
                        if( labels[i] != max_k )
                            continue;
                        sample = data.ptr<float>(i);
                        double dist = normL2Sqr(sample, _old_center, dims);

                        if( max_dist <= dist )
                        {
                            max_dist = dist;
                            farthest_i = i;
                        }
                    }

                    counters[max_k]--;
                    counters[k]++;
                    labels[farthest_i] = k;
                    sample = data.ptr<float>(farthest_i);

                    for( j = 0; j < dims; j++ )
                    {
                        old_center[j] -= sample[j];
                        new_center[j] += sample[j];
                    }
                }

                for( k = 0; k < K; k++ )
                {
                    float* center = centers.ptr<float>(k);
                    CV_Assert( counters[k] != 0 );

                    float scale = 1.f/counters[k];
                    for( j = 0; j < dims; j++ )
                        center[j] *= scale;

                    if( iter > 0 )
                    {
                        double dist = 0;
                        const float* old_center = old_centers.ptr<float>(k);
                        for( j = 0; j < dims; j++ )
                        {
                            double t = center[j] - old_center[j];
                            dist += t*t;
                        }
                        max_center_shift = std::max(max_center_shift, dist);
                    }
                }
            }

            if( ++iter == MAX(criteria.maxCount, 2) || max_center_shift <= criteria.epsilon )
                break;

            // assign labels
            Mat dists(1, N, CV_64F);
            double* dist = dists.ptr<double>(0);
            parallel_for_(Range(0, N),
                          KMeansDistanceComputer(dist, labels, data, centers));
            compactness = 0;
            for( i = 0; i < N; i++ )
            {
                compactness += dist[i];
            }
        }

        if( compactness < best_compactness )
        {
            best_compactness = compactness;
            if( _centers.needed() )
                centers.copyTo(_centers);
            _labels.copyTo(best_labels);
        }
    }

    return best_compactness;
}
コード例 #7
0
ファイル: randomNetUtils.cpp プロジェクト: pront/nemo-mpi
/* Used to demonstrate the optimal network for MPI simulations */
nemo::Network* constructSemiRandom(unsigned ncount, unsigned scount, unsigned dmax, bool stdp, unsigned workers, float ratio) {
	rng_t rng;
	/* Neuron parameters and weights are partially randomised */
	urng_t randomParameter(rng, boost::uniform_real<double>(0, 1));
	uirng_t randomDelay(rng, boost::uniform_int<>(1, dmax));

	nemo::Network* net = new nemo::Network();

	/* Firstly, add ncount neurons to the network.
	 * Properties: 80% excitatory, 20% inhibitory.
	 */
	for ( unsigned nidx = 0; nidx < ncount; ++nidx ) {
		if ( nidx < (ncount * 4) / 5 ) { // excitatory
			addExcitatoryNeuron(net, nidx, randomParameter);
		}
		else { // inhibitory
			addInhibitoryNeuron(net, nidx, randomParameter);
		}
	}

	/* Initialize partition counters */
	float avgNeurons = ceil(((float) ncount) / ((float) workers));
	std::vector<unsigned> counters(workers, avgNeurons);
	counters.back() = ncount - (workers - 1) * avgNeurons;

	std::vector<std::pair<nidx_t, nidx_t> > ranges;

	/* Make index ranges for each partition */
	for ( unsigned r = 0; r < workers; r++ ) {
		unsigned start = r * avgNeurons;
		std::pair<unsigned, unsigned> range(start, start + counters[r] - 1);
		ranges.push_back(range);
	}

	for ( unsigned i = 0; i < workers; i++ ) {
		uirng_t randomLocalNeuron(rng, boost::uniform_int<>(ranges[i].first, ranges[i].second));
		unsigned partitionSynapses = counters[i] * scount;
		unsigned globalSynapses = ratio * partitionSynapses;
		unsigned localSynapses = partitionSynapses - globalSynapses;

//		std::cout << "partitionSynapses: " << partitionSynapses << std::endl;
//		std::cout << "localSynapses: " << localSynapses << std::endl;
//		std::cout << "globalSynapses: " << globalSynapses << std::endl;
//		std::cout << std::endl;

		for ( unsigned j = 0; j < localSynapses; j++ ) {
			unsigned source;
			unsigned target;
			while (true) {
				source = randomLocalNeuron();
				target = randomLocalNeuron();
				if ( source != target )
					break;
			}

			if ( randomLocalNeuron() < (ncount * 4) / 5 )
				net->addSynapse(source, target, randomDelay(), 0.5f * float(randomParameter()), stdp);
			else
				net->addSynapse(source, target, 1U, float(-randomParameter()), stdp);
		}

		for ( unsigned j = 0; j < globalSynapses; j++ ) {

			uirng_t randomWorker(rng, boost::uniform_int<>(0, workers - 1));
			unsigned randomNeighbour;
			while (true) {
				randomNeighbour = randomWorker();
				if ( randomNeighbour != i )
					break;
			}

			uirng_t randomGlobalNeuron(rng,
					boost::uniform_int<>(ranges[randomNeighbour].first, ranges[randomNeighbour].second));

			if ( randomLocalNeuron() < (ncount * 4) / 5 )
				net->addSynapse(randomLocalNeuron(), randomGlobalNeuron(), randomDelay(), 0.5f * float(randomParameter()), stdp);
			else
				net->addSynapse(randomLocalNeuron(), randomGlobalNeuron(), 1U, float(-randomParameter()), stdp);

		}
	}

//	std::cout << "net->neuronCount: " << net->neuronCount() << std::endl;
//	std::cout << "net->synapseCount: " << net->synapseCount() << std::endl;
	return net;
}