Exemplo n.º 1
0
    const Node* explore(const State& startState, TerminationChecker& terminationChecker) {
        ++iterationCounter;
        clearOpenList();
        openList.reorder(fComparator);

        Planner::incrementGeneratedNodeCount();
        Node*& startNode = nodes[startState];

        if (startNode == nullptr) {
            startNode = nodePool->construct(Node{nullptr, startState, Action(), 0, domain.heuristic(startState), true});
        } else {
            startNode->g = 0;
            startNode->action = Action();
            startNode->predecessors.clear();
            startNode->parent = nullptr;
        }

        startNode->iteration = iterationCounter;
        addToOpenList(*startNode);

        while (!terminationChecker.reachedTermination() && openList.isNotEmpty()) {
            Node* const currentNode = popOpenList();

            if (domain.isGoal(currentNode->state)) {
                return currentNode;
            }

            terminationChecker.notifyExpansion();
            expandNode(currentNode);
        }

        return openList.top();
    }
Exemplo n.º 2
0
    Node* explore(const State& startState, TerminationChecker& terminationChecker) {
        ++iterationCounter;
        clearOpenList();
        openList.reorder(fComparator);

        Planner::incrementGeneratedNodeCount();
        Node*& startNode = nodes[startState];

        if (startNode == nullptr) {
            startNode = nodePool->construct(Node{nullptr, startState, Action(), 0, domain.heuristic(startState), true});
        } else {
            startNode->g = 0;
            startNode->action = Action();
            startNode->predecessors.clear();
            startNode->parent = nullptr;
        }

        startNode->iteration = iterationCounter;
        addToOpenList(*startNode);

        Node* currentNode = startNode;

        checkSafeNode(currentNode);

        while (!terminationChecker.reachedTermination() && !domain.isGoal(currentNode->state)) {
            //            if (domain.safetyPredicate(currentNode->state)) { // try to find nodes which lead to safety
            //                currentNode = popOpenList();
            //                terminationChecker.notifyExpansion();
            //                expandNode(currentNode);
            //            }
            //
            //            if (currentNode == startNode) { // if we can't just do LSS-LRTA*
            //                while (!terminationChecker.reachedTermination() && !domain.isGoal(currentNode->state)) {
            //                    currentNode = popOpenList();
            //                    terminationChecker.notifyExpansion();
            //                    expandNode(currentNode);
            //                }
            //            }
            Node* const currentNode = popOpenList();
            if (domain.isGoal(currentNode->state)) {
                return currentNode;
            }

            terminationChecker.notifyExpansion();
            expandNode(currentNode);
        }

        return openList.top();
    }
Exemplo n.º 3
0
    void learn(const TerminationChecker& terminationChecker) {
        ++iterationCounter;

        // Reorder the open list based on the heuristic values
        openList.reorder(hComparator);

        sweepBackSafety();

        while (!terminationChecker.reachedTermination() && openList.isNotEmpty()) {
            auto currentNode = popOpenList();
            currentNode->iteration = iterationCounter;

            Cost currentHeuristicValue = currentNode->h;

            // update heuristic actionDuration of each predecessor
            for (auto predecessor : currentNode->predecessors) {
                Node* predecessorNode = predecessor.predecessor;

                if (predecessorNode->iteration == iterationCounter && !predecessorNode->open) {
                    // This node was already learned and closed in the current iteration
                    continue;
                    // TODO Review this. This could be incorrect if the action costs are not uniform
                }

                if (!predecessorNode->open) {
                    // This node is not open yet, because it was not visited in the current planning iteration

                    predecessorNode->h = currentHeuristicValue + predecessor.actionCost;
                    assert(predecessorNode->iteration == iterationCounter - 1);
                    predecessorNode->iteration = iterationCounter;

                    addToOpenList(*predecessorNode);
                } else if (predecessorNode->h > currentHeuristicValue + predecessor.actionCost) {
                    // This node was visited in this learning phase, but the current path is better then the previous
                    predecessorNode->h = currentHeuristicValue + predecessor.actionCost;
                    openList.update(*predecessorNode);
                }
            }
        }
    }