int UltimateTicTacToeMontecarloAI::select(Nodes const& nodes, int const current) const
{
  Node const& node = nodes.at(current);
  if(node.children.empty())
    return current;

  return select(nodes, pickBestChild(node, nodes));
}
qreal UltimateTicTacToeMontecarloAI::nodeUCBValue(Node const& node, Nodes const& nodes) const {
  if(node.parent != -1)
  {
    return node.v + c * qSqrt(qLn(nodes.at(node.parent).n) / node.n);
  } else {
    return 0;
  }
}
Beispiel #3
0
TEST(floodfill, one) {
  typedef DAG::Node<long long> PFNode;
  typedef std::map<long long, PFNode> Nodes;

  Nodes myNodes;
  long long id1, id2;
  id1 = 1;
  id2 = 2;
  DAG::FloodFill<long long> FFill;
  myNodes.emplace(id1, PFNode(id1));
  myNodes.emplace(id2, PFNode(id2));
  myNodes[id1].addChild(myNodes[id2]);

  ASSERT_EQ(myNodes.at(id1).parents().size(), 0UL);  // avoid compiler warning declare as unsigned long
  ASSERT_EQ(myNodes.at(id1).children().size(), 1UL);
  // ASSERT_EQ(myNodes.at(id1).children()[0],id2);

  for (const auto& nodevector : FFill.traverse(myNodes)) {
    ASSERT_EQ(nodevector.size(), 2UL);
  }
}
int UltimateTicTacToeMontecarloAI::pickBestChild(Node const& node, Nodes const& nodes, bool const ucb) const
{
  bool first = true;
  qreal bestChildValue = 0;
  int bestChildIndex = -1;

  for(int const childIndex : node.children)
  {
    Node const& child = nodes.at(childIndex);
    qreal childValue = ucb ? nodeUCBValue(child, nodes) : child.v / static_cast<qreal>(child.n);
    if(first || childValue >= bestChildValue) {
      bestChildIndex = childIndex;
      bestChildValue = childValue;
      first = false;
    }
  }

  return bestChildIndex;
}
/**
 * Takes a subgraph of the given graph (all nodes in the graph with the given label),
 * partitions this subgraph into even smaller subgraphs (using something similar to k-means),
 * and gives all small subgraphs a unique label (using the given min_label).
 *
 * @param graph
 * @param label_of_connected_component
 * @param size_of_largest_partition
 * @param min_label_for_partition_labeling
 * @return the number of generated partitions
 */
std::size_t
partition_connected_component(UniGraph * graph, std::size_t label_of_connected_component, std::size_t partition_size, std::size_t min_label_for_partition_labeling)
{
    typedef std::size_t Node;
    typedef std::size_t Label;
    typedef std::vector<Node> Nodes;

    Nodes nodes;
    for (Node node = 0; node < graph->num_nodes(); ++node)
        if (graph->get_label(node) == label_of_connected_component)
            nodes.push_back(node);

    const std::size_t num_partitions = (nodes.size() + partition_size - 1) / partition_size; // division and rounding up

    /********* k-means clustering *******/

    const std::size_t num_kmeans_iterations = 100;

    Nodes centroids;

    /* Draw centroids randomly. */
    std::default_random_engine generator;
    std::uniform_int_distribution<std::size_t> distribution(0, nodes.size() - 1);
    for(std::size_t partition = 0; partition < num_partitions; ++partition) {
        Node centroid = std::numeric_limits<Node>::max();
        while (std::find(centroids.begin(), centroids.end(), centroid) != centroids.end())
            centroid = nodes.at(distribution(generator));
        centroids.push_back(centroid);
    }

    for (std::size_t kmeans_iteration = 0; kmeans_iteration < num_kmeans_iterations; ++kmeans_iteration) {
        const Label unvisited = std::numeric_limits<Label>::max();
        for (Node const & node : nodes)
            graph->set_label(node, unvisited);

        /* Put centroids into queues. */
        std::vector<Nodes> queues(num_partitions);
        for (std::size_t i = 0; i < num_partitions; ++i)
            queues.at(i).push_back(centroids.at(i));

        /* Grow regions starting from centroids */
        while (std::any_of(queues.begin(), queues.end(), [](Nodes const & queue){return !queue.empty();})) {
#pragma omp parallel for
            for (std::size_t queue_id = 0; queue_id < queues.size(); ++queue_id) {
                Nodes & old_queue = queues.at(queue_id);
                std::unordered_set<Node> new_queue;
                for (Node node : old_queue)
                    graph->set_label(node, min_label_for_partition_labeling + queue_id); // there is a race condition for partition boundary nodes but we don't care
                for (Node node : old_queue) {
                    /* Copy all unvisited (and not yet inserted) neighbors into new queue. */
                    for (Node neighbor : graph->get_adj_nodes(node))
                        if (graph->get_label(neighbor) == unvisited)
                            new_queue.insert(neighbor);
                }

                old_queue.clear();
                old_queue.insert(old_queue.begin(), new_queue.begin(), new_queue.end());
            }
        }

        /* If we are in the final iteration we stop here to keep the graph labels
         * (they would be removed in the following region shrinking step). */
        if (kmeans_iteration == num_kmeans_iterations - 1)
            break;

        /* Put partition boundary nodes into queues. */
        for (Node const node : nodes) {
            Label const cur_label = graph->get_label(node);
            std::size_t const cur_queue = cur_label - min_label_for_partition_labeling;
            Nodes const & neighbors = graph->get_adj_nodes(node);
            /* Each node, where any of its neighbors has a different label, is a boundary node. */
            if (std::any_of(neighbors.begin(), neighbors.end(), [graph, cur_label]
                (Node const neighbor) { return graph->get_label(neighbor) != cur_label; } ))
                queues.at(cur_queue).push_back(node);
        }

        /* Shrink regions starting from boundaries to obtain new centroids. */
#pragma omp parallel for
        for (std::size_t queue_id = 0; queue_id < queues.size(); ++queue_id) {
            Nodes & old_queue = queues.at(queue_id);
            while (!old_queue.empty()){
                std::unordered_set<Node> new_queue;
                for (Node node : old_queue)
                    graph->set_label(node, unvisited);
                for (Node node : old_queue) {
                    /* Copy all neighbors that have not yet been marked (and have not yet been inserted) into new queue. */
                    for (Node neighbor : graph->get_adj_nodes(node))
                        if (graph->get_label(neighbor) == min_label_for_partition_labeling + queue_id)
                            new_queue.insert(neighbor);
                }

                /* If the new queue is empty we are (almost) finished and use a random node from the old queue as new centroid. */
                if (new_queue.empty()) {
                    std::uniform_int_distribution<std::size_t> distribution(0, old_queue.size() - 1);
                    centroids.at(queue_id) = old_queue.at(distribution(generator));
                }

                /* Replace old queue with new one. */
                old_queue.clear();
                old_queue.insert(old_queue.begin(), new_queue.begin(), new_queue.end());
            }
        }
    }

    return num_partitions;
}
Beispiel #6
0
static SV *node_to_sv(pTHX_ Node *node)
{
	SV *ret = NULL;
	if (!node) return ret;
	if (TYPE_match(node, BranchNode)) {
		BranchNode *branch = dynamic_cast<BranchNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, branch->tk);
		add_key(hash, "left", branch->left);
		add_key(hash, "right", branch->right);
		add_key(hash, "next", branch->next);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::Branch");
	} else if (TYPE_match(node, FunctionCallNode)) {
		FunctionCallNode *call = dynamic_cast<FunctionCallNode *>(node);
		Nodes *args = call->args;
		size_t argsize = args->size();
		AV *array = new_Array();
		for (size_t i = 0; i < argsize; i++) {
			SV *arg = node_to_sv(aTHX_ args->at(i));
			if (!arg) continue;
			av_push(array, set(arg));
		}
		HV *hash = (HV*)new_Hash();
		add_key(hash, "next", call->next);
		add_token(hash, call->tk);
		(void)hv_stores(hash, "args", set(new_Ref(array)));
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::FunctionCall");
	} else if (TYPE_match(node, ArrayNode)) {
		ArrayNode *array = dynamic_cast<ArrayNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, array->tk);
		add_key(hash, "next", array->next);
		add_key(hash, "idx", array->idx);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::Array");
	} else if (TYPE_match(node, HashNode)) {
		HashNode *h = dynamic_cast<HashNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, h->tk);
		add_key(hash, "next", h->next);
		add_key(hash, "key", h->key);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::Hash");
	} else if (TYPE_match(node, DereferenceNode)) {
		DereferenceNode *dref = dynamic_cast<DereferenceNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, dref->tk);
		add_key(hash, "next", dref->next);
		add_key(hash, "expr", dref->expr);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::Dereference");
	} else if (TYPE_match(node, FunctionNode)) {
		FunctionNode *f = dynamic_cast<FunctionNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, f->tk);
		add_key(hash, "next", f->next);
		add_key(hash, "body", f->body);
		add_key(hash, "prototype", f->prototype);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::Function");
	} else if (TYPE_match(node, BlockNode)) {
		BlockNode *b = dynamic_cast<BlockNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, b->tk);
		add_key(hash, "next", b->next);
		add_key(hash, "body", b->body);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::Block");
	} else if (TYPE_match(node, ReturnNode)) {
		ReturnNode *r = dynamic_cast<ReturnNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, r->tk);
		add_key(hash, "next", r->next);
		add_key(hash, "body", r->body);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::Return");
	} else if (TYPE_match(node, SingleTermOperatorNode)) {
		SingleTermOperatorNode *s = dynamic_cast<SingleTermOperatorNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, s->tk);
		add_key(hash, "next", s->next);
		add_key(hash, "expr", s->expr);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::SingleTermOperator");
	} else if (TYPE_match(node, DoubleTermOperatorNode)) {
	} else if (TYPE_match(node, LeafNode)) {
		LeafNode *leaf = dynamic_cast<LeafNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, leaf->tk);
		add_key(hash, "next", leaf->next);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::Leaf");
	} else if (TYPE_match(node, ListNode)) {
		ListNode *list = dynamic_cast<ListNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, list->tk);
		add_key(hash, "data", list->data);
		add_key(hash, "next", list->next);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::List");
	} else if (TYPE_match(node, ArrayRefNode)) {
		ArrayRefNode *ref = dynamic_cast<ArrayRefNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, ref->tk);
		add_key(hash, "data", ref->data);
		add_key(hash, "next", ref->next);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::ArrayRef");
	} else if (TYPE_match(node, HashRefNode)) {
		HashRefNode *ref = dynamic_cast<HashRefNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, ref->tk);
		add_key(hash, "data", ref->data);
		add_key(hash, "next", ref->next);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::HashRef");
	} else if (TYPE_match(node, IfStmtNode)) {
		IfStmtNode *stmt = dynamic_cast<IfStmtNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, stmt->tk);
		add_key(hash, "next", stmt->next);
		add_key(hash, "expr", stmt->expr);
		add_key(hash, "true_stmt", stmt->true_stmt);
		add_key(hash, "false_stmt", stmt->false_stmt);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::IfStmt");
	} else if (TYPE_match(node, ElseStmtNode)) {
		ElseStmtNode *stmt = dynamic_cast<ElseStmtNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, stmt->tk);
		add_key(hash, "next", stmt->next);
		add_key(hash, "stmt", stmt->stmt);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::ElseStmt");
	} else if (TYPE_match(node, DoStmtNode)) {
		DoStmtNode *stmt = dynamic_cast<DoStmtNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, stmt->tk);
		add_key(hash, "next", stmt->next);
		add_key(hash, "stmt", stmt->stmt);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::DoStmt");
	} else if (TYPE_match(node, ForStmtNode)) {
		ForStmtNode *stmt = dynamic_cast<ForStmtNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, stmt->tk);
		add_key(hash, "next", stmt->next);
		add_key(hash, "init", stmt->init);
		add_key(hash, "cond", stmt->cond);
		add_key(hash, "progress", stmt->progress);
		add_key(hash, "true_stmt", stmt->true_stmt);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::ForStmt");
	} else if (TYPE_match(node, ForeachStmtNode)) {
		ForeachStmtNode *stmt = dynamic_cast<ForeachStmtNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, stmt->tk);
		add_key(hash, "next", stmt->next);
		add_key(hash, "itr", stmt->itr);
		add_key(hash, "cond", stmt->cond);
		add_key(hash, "true_stmt", stmt->true_stmt);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::ForeachStmt");
	} else if (TYPE_match(node, WhileStmtNode)) {
		WhileStmtNode *stmt = dynamic_cast<WhileStmtNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, stmt->tk);
		add_key(hash, "next", stmt->next);
		add_key(hash, "true_stmt", stmt->true_stmt);
		add_key(hash, "expr", stmt->expr);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::WhileStmt");
	} else if (TYPE_match(node, ModuleNode)) {
		ModuleNode *mod = dynamic_cast<ModuleNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, mod->tk);
		add_key(hash, "next", mod->next);
		add_key(hash, "args", mod->args);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::Module");
	} else if (TYPE_match(node, PackageNode)) {
		PackageNode *pkg = dynamic_cast<PackageNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, pkg->tk);
		add_key(hash, "next", pkg->next);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::Package");
	} else if (TYPE_match(node, RegPrefixNode)) {
		RegPrefixNode *reg = dynamic_cast<RegPrefixNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, reg->tk);
		add_key(hash, "next", reg->next);
		add_key(hash, "option", reg->option);
		add_key(hash, "expr", reg->exp);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::RegPrefix");
	} else if (TYPE_match(node, RegReplaceNode)) {
		RegReplaceNode *reg = dynamic_cast<RegReplaceNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, reg->tk);
		add_key(hash, "next", reg->next);
		add_key(hash, "from", reg->from);
		add_key(hash, "to", reg->to);
		add_key(hash, "option", reg->option);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::RegReplace");
	} else if (TYPE_match(node, RegexpNode)) {
		RegexpNode *reg = dynamic_cast<RegexpNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, reg->tk);
		add_key(hash, "next", reg->next);
		add_key(hash, "option", reg->option);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::Regexp");
	} else if (TYPE_match(node, LabelNode)) {
		LabelNode *label = dynamic_cast<LabelNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, label->tk);
		add_key(hash, "next", label->next);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::Label");
	} else if (TYPE_match(node, HandleNode)) {
		HandleNode *fh = dynamic_cast<HandleNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, fh->tk);
		add_key(hash, "expr", fh->expr);
		add_key(hash, "next", fh->next);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::Handle");
	} else if (TYPE_match(node, HandleReadNode)) {
		HandleReadNode *fh = dynamic_cast<HandleReadNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, fh->tk);
		add_key(hash, "next", fh->next);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::HandleRead");
	} else if (TYPE_match(node, ThreeTermOperatorNode)) {
		ThreeTermOperatorNode *term = dynamic_cast<ThreeTermOperatorNode *>(node);
		HV *hash = (HV*)new_Hash();
		add_token(hash, term->tk);
		add_key(hash, "cond", term->cond);
		add_key(hash, "true_expr", term->true_expr);
		add_key(hash, "false_expr", term->false_expr);
		add_key(hash, "next", term->next);
		ret = bless(aTHX_ hash, "Compiler::Parser::Node::ThreeTermOperator");
	} else {
		assert(0 && "node type is not found");
	}
	return ret;
}
void ribi::pvdb::QtPvdbClusterDialog::Test() noexcept
{
  {
    static bool is_tested = false;
    if (is_tested) return;
    is_tested = true;
  }
  typedef std::vector<boost::shared_ptr<ribi::cmap::Edge> > Edges;
  typedef std::vector<boost::shared_ptr<ribi::cmap::Node> > Nodes;

  //Regular tests
  {
    const std::vector<boost::shared_ptr<pvdb::File> > v = pvdb::File::GetTests();
    std::for_each(v.begin(),v.end(),
      [](const boost::shared_ptr<pvdb::File> & file)
      {
        const bool had_cluster = file->GetCluster().get();
        const bool had_concept_map = file->GetConceptMap().get();
        boost::shared_ptr<QtPvdbClusterDialog> d(new QtPvdbClusterDialog(file));

        if (!had_cluster && !had_concept_map)
        {
          assert(file->GetCluster());
          assert(!file->GetConceptMap());
          assert(d->ui->button_add->isEnabled());
        }
        if ( had_cluster && !had_concept_map)
        {
          assert(file->GetCluster());
          assert(!file->GetConceptMap());
          assert(d->ui->button_add->isEnabled());
        }
        if (!had_cluster &&  had_concept_map)
        {
          assert(!file->GetCluster());
          assert( file->GetConceptMap());
          assert(!d->ui->button_add->isEnabled());
        }
        if ( had_cluster &&  had_concept_map)
        {
          assert( file->GetCluster());
          assert( file->GetConceptMap());
          assert(!d->ui->button_add->isEnabled());
        }
        //Test cluster copying, if there
        if (file->GetCluster())
        {
          assert(file->GetCluster() && "the cluster dialog used an existing or created a cluster");
          assert(file->GetCluster() == d->GetWidget()->GetCluster());
          const boost::shared_ptr<pvdb::Cluster> before = pvdb::ClusterFactory::DeepCopy(file->GetCluster());
          assert(before);
          assert(before != file->GetCluster());
          assert(operator==(*file->GetCluster(),*before));
          d->GetWidget()->Add("Modification!");
          //assert(!IsEqual(*file->GetCluster(),*before)); //Does not work, must obtain the cluster from the widget
          assert(!operator==(*d->GetWidget()->GetCluster(),*before)); //Widget updates the cluster
        }
      }
    );
  }
  {
    const std::vector<boost::shared_ptr<pvdb::File> > v = pvdb::File::GetTests();
    std::for_each(v.begin(),v.end(),
      [](const boost::shared_ptr<pvdb::File> & file)
      {
        const bool had_cluster = file->GetCluster().get(); //.get() needed for crosscompiler
        const bool had_concept_map = file->GetConceptMap().get(); //.get() needed for crosscompiler

        boost::shared_ptr<QtPvdbClusterDialog> d(new QtPvdbClusterDialog(file));

        if (!had_cluster && !had_concept_map)
        {
          assert(file->GetCluster());
          assert(!file->GetConceptMap());
          assert(d->ui->button_add->isEnabled());
        }
        if ( had_cluster && !had_concept_map)
        {
          assert(file->GetCluster());
          assert(!file->GetConceptMap());
          assert(d->ui->button_add->isEnabled());
        }
        if (!had_cluster &&  had_concept_map)
        {
          assert(!file->GetCluster());
          assert( file->GetConceptMap());
          assert(!d->ui->button_add->isEnabled());
        }
        if ( had_cluster &&  had_concept_map)
        {
          assert( file->GetCluster());
          assert( file->GetConceptMap());
          assert(!d->ui->button_add->isEnabled());
        }
        if (file->GetCluster())
        {

          const boost::shared_ptr<pvdb::Cluster> before = pvdb::ClusterFactory::DeepCopy(file->GetCluster());
          assert(before);
          assert(before != file->GetCluster());
          assert(operator==(*file->GetCluster(),*before));
          d->ui->edit->setText("modification");
          if (d->ui->button_add->isEnabled())
          {
            d->on_button_add_clicked();
            assert(!operator==(*before,*d->GetWidget()->GetCluster()));
          }
        }
      }
    );
  }
  //QtPvdbClusterDialog must be enabled if there is no concept map
  {
    const std::string question = "TESTQUESTION";
    const boost::shared_ptr<pvdb::File> file(new pvdb::File);
    file->SetQuestion(question);
    assert(file->GetQuestion() == question);

    const boost::shared_ptr<pvdb::Cluster> cluster = pvdb::ClusterFactory::GetTest( {0,1,2} );

    file->SetCluster(cluster);

    assert( file->GetCluster());
    assert(!file->GetConceptMap());

    const QtPvdbClusterDialog d(file);
    assert(d.GetWidget()->isEnabled()
      && "QtClusterWidget is enabled only when there is no ConceptMap");
  }

  //QtPvdbClusterDialog must be disabled if there are more nodes in the concept map
  {
    using namespace cmap;
    const std::string question = "TESTQUESTION";
    const boost::shared_ptr<pvdb::File> file(new pvdb::File);
    file->SetQuestion(question);

    const boost::shared_ptr<pvdb::Cluster> cluster = pvdb::ClusterFactory::GetTest( { 0,1,2 } );

    file->SetCluster(cluster);

    const int index_1 = 1;
    assert(index_1 < static_cast<int>(ConceptFactory().GetTests().size()));
    const int index_2 = 2;
    assert(index_2 < static_cast<int>(ConceptFactory().GetTests().size()));

    const boost::shared_ptr<Concept> concept_d(ConceptFactory().Create("Concept F"));
    const boost::shared_ptr<Concept> concept_e(ConceptFactory().GetTests().at(index_1));
    const boost::shared_ptr<Concept> concept_f(ConceptFactory().GetTests().at(index_2));
    const boost::shared_ptr<Node> node_a(CenterNodeFactory().CreateFromStrings(question));
    const boost::shared_ptr<Node> node_b(cmap::NodeFactory().GetTest(index_1));
    const boost::shared_ptr<Node> node_c(cmap::NodeFactory().GetTest(index_2));
    const Nodes nodes = { node_a, node_b, node_c };
    const boost::shared_ptr<Edge> edge_a(cmap::EdgeFactory::Create(concept_d,1.2,3.4,nodes.at(0),false,nodes.at(1),true));
    const boost::shared_ptr<Edge> edge_b(cmap::EdgeFactory::Create(concept_e,2.3,4.5,nodes.at(1),false,nodes.at(2),true));
    const boost::shared_ptr<Edge> edge_c(cmap::EdgeFactory::Create(concept_f,3.4,5.6,nodes.at(2),false,nodes.at(0),true));
    const Edges edges = { edge_a, edge_b, edge_c };

    const boost::shared_ptr<ribi::cmap::ConceptMap> concept_map(
      ribi::cmap::ConceptMapFactory::Create(nodes,edges));
    assert(concept_map);
    file->SetConceptMap(concept_map);

    assert(file->GetQuestion() == question);
    assert(file->GetCluster());
    const QtPvdbClusterDialog d(file);
    assert(d.GetWidget());
    assert(!d.GetWidget()->isEnabled()
      && "QtClusterWidget is disabled when there is a filled ConceptMap");
  }
}
int UltimateTicTacToeMontecarloAI::realThink(const UltimateTicTacToeMontecarloAI::Board &board, const int previousMove, const int player) const
{
  //printBoard(board);
  if(maxIterations == 0) {
    Moves options = movementOptions(board, previousMove);
    return options.at(qrand() % options.size());
  }

  //qint64 now = QDateTime::currentMSecsSinceEpoch();
  //qDebug() << "c: " << c << ", maxIterations: " << maxIterations << ", maxChildren: " <<maxChildren;
  Nodes nodes;
  nodes.reserve(maxIterations * maxChildren);
  nodes.append(Node { 0, 1, board, previousMove, -1, Node::Children() });

  int i;
  for(i = 0; i < maxIterations; ++i)
  {
    int leafIndex = select(nodes);
    Node const& leaf = nodes.at(leafIndex);

    GameState leafState = gameState(leaf.board, player);
    if(leafState == GameState::WIN)
    {
/*      qDebug() << "---";
      printBoard(leaf.board);
*/
      break;
    }
    else if(leafState == GameState::LOSE)
    {
      backpropagate(leafIndex, nodes, -10);
    }
    else if(leafState == GameState::TIE)
    {
      backpropagate(leafIndex, nodes, -5);
    }
    else if(leafState == GameState::UNRESOLVED)
    {
      int nodeIndex = expand(leafIndex, nodes, player);

      Node const& node = nodes.at(nodeIndex);
      int score = simulate(node.board,  node.previousMove, player);

      backpropagate(nodeIndex, nodes, score);
    }
  }

  //qDebug() << "Found solution in " << i + 1 << " iterations";
  Node const& root = nodes.at(0);
  int bestChildIndex = pickBestChild(root, nodes, false);
  Node const& bestChild = nodes.at(bestChildIndex);

  //qDebug() << "AI took " << (QDateTime::currentMSecsSinceEpoch() - now) << " ms";

  /*for(int childIndex : root.children)
  {
    Node const& child = nodes.at(childIndex);
    qDebug() << child.previousMove << ":" << child.v << child.n;
  }*/
  //qDebug() << bestChild.previousMove / 9 << bestChild.previousMove %9;
  return bestChild.previousMove;
}