Ejemplo n.º 1
0
 // amortized log(container size)
 void insert_line(T a, T b) {
     auto it = this->insert({ a, b, false });
     while (set_boundary(it, next(it))) this->erase(next(it));
     if (covered(it)) set_boundary(prev(it), it = this->erase(it));
     while (it != this->begin() && covered(prev(it))) {
         this->erase(prev(it));
         set_boundary(prev(it), it);
     }
 }
bool
extension::covered (const vec<str> &ev)
{
  bhash<str> eh;
  for (const str *ep = ev.base (); ep < ev.lim (); ep++)
    eh.insert (*ep);
  return covered (eh);
}
Ejemplo n.º 3
0
bool intern openwindow( UI_WINDOW *wptr )
/***************************************/
{
    wptr->dirty = wptr->area;
    insert( wptr, wptr->priority );
    if( wptr->prev == NULL ) {
        return( false );
    } else {
        return( covered( wptr->area, wptr->prev ) );
    }
}
Ejemplo n.º 4
0
 int numberOfPatterns(int m, int n) {
     vector<vector<vector<int>>> covered(10, vector<vector<int>>(10, vector<int>()));
     for(int i=1; i<=9; i++)
         for(int j=1; j<=9; j++)
             calc_keys_covered(covered, i, j);
     int ret = 0;
     vector<vector<int>> res;
     res.push_back(vector<int>());
     int num;
     for(int k=1; k<=n; k++)
     {
         num = numofPatterns(res,k,covered);
         if(k>=m) ret += num;
     }
     return ret;
 }
Ejemplo n.º 5
0
static bool covered( SAREA area, UI_WINDOW *wptr )
/************************************************/
{
    int             i;
    bool            flag;
    SAREA           areas[ 5 ];

    dividearea( area, wptr->area, areas );
    flag = ( areas[ 0 ].height > 0 );
    if( wptr->prev != NULL ) {
        for( i = 1; i < 5; ++i ) {
            if( areas[ i ].height > 0 ) {
                flag |= covered( areas[ i ], wptr->prev );
            }
        }
    }
    return( flag );
}
Ejemplo n.º 6
0
	bool overlap(const rectangle& ir, const size& valid_input_area, const rectangle & dr, const size& valid_dst_area, rectangle& op_ir, rectangle& op_dr)
	{
		if(overlap(ir, rectangle(0, 0, valid_input_area.width, valid_input_area.height), op_ir) == false)
			return false;

		rectangle good_dr;
		if(overlap(dr, rectangle(0, 0, valid_dst_area.width, valid_dst_area.height), good_dr) == false)
			return false;

		zoom(ir, op_ir, dr, op_dr);

		if(false == covered(op_dr, good_dr))
		{
			op_dr = good_dr;
			zoom(dr, good_dr, ir, op_ir);
		}
		return true;
	}
Ejemplo n.º 7
0
	bool overlap(const rectangle& ir, const size& valid_input_area, const rectangle & dr, const size& valid_dst_area, rectangle& op_ir, rectangle& op_dr)
	{
		rectangle valid_r{ valid_input_area };
		if (overlap(ir, valid_r, op_ir) == false)
			return false;

		valid_r = valid_dst_area;
		rectangle good_dr;
		if (overlap(dr, valid_r, good_dr) == false)
			return false;

		zoom(ir, op_ir, dr, op_dr);

		if (covered(op_dr, good_dr))
		{
			overlap({ op_dr }, good_dr, op_dr);
		}
		else
		{
			op_dr = good_dr;
			zoom(dr, good_dr, ir, op_ir);
		}
		return true;
	}
Ejemplo n.º 8
0
static void addSome1Dmats4dim(FHESecKey& sKey, long i, long bound, long keyID)
{
  const FHEcontext &context = sKey.getContext();
  long m = context.zMStar.getM();
  computeParams(context,m,i); // defines vars: native, ord, gi, g2md, giminv, g2mdminv

  long baby, giant;
  std::tie(baby,giant) = computeSteps(ord, bound, native);

  for (long j=1,val=gi; j<=baby; j++) { // Add matrices for baby steps
    sKey.GenKeySWmatrix(1, val, keyID, keyID);
    if (!native) {
      long val2 = MulModPrecon(val,g2md,m,g2mdminv);
      sKey.GenKeySWmatrix(1, val2, keyID, keyID);
    }
    val = MulModPrecon(val, gi, m, giminv); // val *= g mod m (= g^{j+1})
   }

  long gb = PowerMod(gi,baby,m); // g^baby
  NTL::mulmod_precon_t gbminv = PrepMulModPrecon(gb, m);  
  for (long j=2,val=gb; j < giant; j++) { // Add matrices for giant steps
    val = MulModPrecon(val, gb, m, gbminv); // val = g^{(j+1)*baby}
    sKey.GenKeySWmatrix(1, val, keyID, keyID);
  }

  if (!native) {
    sKey.GenKeySWmatrix(1, context.zMStar.genToPow(i, -ord), keyID, keyID);
  }

  // VJS: experimantal feature...because the replication code
  // uses rotations by -1, -2, -4, -8, we add a few
  // of these as well...only the small ones are important,
  // and we only need them if SameOrd(i)...
  // Note: we do indeed get a nontrivial speed-up

  if (native && i<context.zMStar.numOfGens()) {
    for (long k = 1; k < giant; k = 2*k) {
      long j = ord - k;
      long val = PowerMod(gi, j, m); // val = g^j
      sKey.GenKeySWmatrix(1, val, keyID, keyID);
    }
  }

#if 0 
MAUTO

  // build the tree for this dimension, the internal nodes are 1 and
  // (subset of) gi^{giant}, gi^{2*giant}, ..., gi^{baby*giant}. We

  MAUTO sKey.resetTree(i,keyID); // remove existing tree, if any 

  // keep a list of all the elements that are covered by the tree so far,
  // initialized to only the root (=1).
  std::unordered_set<long> covered({1});

  // Make a list of the automorphisms for this dimension
  std::vector<long> autos;
  for (long j=1,val=gi; j<ord; j++) {
    // Do we have matrices for val and/or val/gi^{di}?
    if (!native) {
      long val2 = MulModPrecon(val, g2md, m, g2mdminv);
      if (sKey.haveKeySWmatrix(1,val2,keyID,keyID)) {
        autos.push_back(val2);
      }
    }
    if (sKey.haveKeySWmatrix(1,val,keyID,keyID)) {
      autos.push_back(val);
    }
    val = MulModPrecon(val, gi, m, giminv); // g^{j+1}
  }

  // Insert internal nodes and their children to tree
  for (long j=0,fromVal=1; j<giant; j++) {
    NTL::mulmod_precon_t fromminv = PrepMulModPrecon(fromVal, m);      
    vector<long> children;
    for (long k: autos) {
      long toVal = MulModPrecon(k, fromVal, m, fromminv);
      if (covered.count(toVal)==0) { // toVal not covered yet
        covered.insert(toVal);
        children.push_back(toVal);
      }
    }
    if (!children.empty()) { // insert fromVal with its children
      sKey.add2tree(i, fromVal, children, keyID);
    }
    fromVal = MulModPrecon(fromVal, gb, m, gbminv); // g^{(j+1)*baby}
  }

  // Sanity-check, did we cover everything?
  long toCover = native? ord: (2*ord-1);
  if (covered.size()<toCover)
    cerr << "**Warning: order-"<<ord<<" dimension, covered "<<covered.size()
         << " of "<<toCover<<endl;
#endif
}
Ejemplo n.º 9
0
bool SegmentTulipDepth::run(Communicator *comm, const Options &options, ShapeGraph &map, bool simple_version) {

    AttributeTable &attributes = map.getAttributeTable();

    std::string stepdepth_col_text = "Angular Step Depth";
    int stepdepth_col = attributes.insertOrResetColumn(stepdepth_col_text.c_str());

    // The original code set tulip_bins to 1024, divided by two and added one
    // in order to duplicate previous code (using a semicircle of tulip bins)
    size_t tulip_bins = 513;

    std::vector<bool> covered(map.getConnections().size());
    for (size_t i = 0; i < map.getConnections().size(); i++) {
       covered[i] = false;
    }
    std::vector<std::vector<SegmentData> > bins(tulip_bins);

    int opencount = 0;
    for (auto& sel: map.getSelSet()) {
       int row = depthmapX::getMapAtIndex(map.getAllShapes(), sel)->first;
       if (row != -1) {
          bins[0].push_back(SegmentData(0,row,SegmentRef(),0,0.0,0));
          opencount++;
       }
    }
    int depthlevel = 0;
    auto binIter = bins.begin();
    int currentbin = 0;
    while (opencount) {
       while (binIter->empty()) {
          depthlevel++;
          binIter++;
          currentbin++;
          if (binIter == bins.end()) {
             binIter = bins.begin();
          }
       }
       SegmentData lineindex;
       if (binIter->size() > 1) {
          // it is slightly slower to delete from an arbitrary place in the bin,
          // but it is necessary to use random paths to even out the number of times through equal paths
          int curr = pafrand() % binIter->size();
          auto currIter = binIter->begin() + curr;
          lineindex = *currIter;
          binIter->erase(currIter);
          // note: do not clear choice values here!
       }
       else {
          lineindex = binIter->front();
          binIter->pop_back();
       }
       opencount--;
       if (!covered[lineindex.ref]) {
          covered[lineindex.ref] = true;
          Connector& line = map.getConnections()[lineindex.ref];
          // convert depth from tulip_bins normalised to standard angle
          // (note the -1)
          double depth_to_line = depthlevel / ((tulip_bins - 1) * 0.5);
          map.getAttributeRowFromShapeIndex(lineindex.ref).setValue(stepdepth_col,depth_to_line);
          register int extradepth;
          if (lineindex.dir != -1) {
             for (auto& segconn: line.m_forward_segconns) {
                if (!covered[segconn.first.ref]) {
                   extradepth = (int) floor(segconn.second * tulip_bins * 0.5);
                   auto currIter = binIter;
                   bins[(currentbin + tulip_bins + extradepth) % tulip_bins].push_back(
                       SegmentData(segconn.first,lineindex.ref,lineindex.segdepth+1,0.0,0));
                   opencount++;
                }
             }
          }
          if (lineindex.dir != 1) {
             for (auto& segconn: line.m_back_segconns) {
                if (!covered[segconn.first.ref]) {
                   extradepth = (int) floor(segconn.second * tulip_bins * 0.5);
                   bins[(currentbin + tulip_bins + extradepth) % tulip_bins].push_back(
                       SegmentData(segconn.first,lineindex.ref,lineindex.segdepth+1,0.0,0));
                   opencount++;
                 }
             }
          }
       }
    }

    map.setDisplayedAttribute(-2); // <- override if it's already showing
    map.setDisplayedAttribute(stepdepth_col);

    return true;
}
Ejemplo n.º 10
0
bool SegmentAngular::run(Communicator *comm, const Options &options, ShapeGraph &map, bool simple_version) {

    if (map.getMapType() != ShapeMap::SEGMENTMAP) {
        return false;
    }

    AttributeTable &attributes = map.getAttributeTable();

    time_t atime = 0;
    if (comm) {
        qtimer(atime, 0);
        comm->CommPostMessage(Communicator::NUM_RECORDS, map.getConnections().size());
    }

    // note: radius must be sorted lowest to highest, but if -1 occurs ("radius n") it needs to be last...
    // ...to ensure no mess ups, we'll re-sort here:
    bool radius_n = false;
    std::vector<double> radii;
    for (double radius : options.radius_set) {
        if (radius < 0) {
            radius_n = true;
        } else {
            radii.push_back(radius);
        }
    }
    if (radius_n) {
        radii.push_back(-1.0);
    }

    std::vector<int> depth_col, count_col, total_col;
    // first enter table values
    for (int radius : radii) {
        std::string radius_text = makeRadiusText(Options::RADIUS_ANGULAR, radius);
        std::string depth_col_text = std::string("Angular Mean Depth") + radius_text;
        attributes.insertOrResetColumn(depth_col_text.c_str());
        std::string count_col_text = std::string("Angular Node Count") + radius_text;
        attributes.insertOrResetColumn(count_col_text.c_str());
        std::string total_col_text = std::string("Angular Total Depth") + radius_text;
        attributes.insertOrResetColumn(total_col_text.c_str());
    }

    for (int radius : radii) {
        std::string radius_text = makeRadiusText(Options::RADIUS_ANGULAR, radius);
        std::string depth_col_text = std::string("Angular Mean Depth") + radius_text;
        depth_col.push_back(attributes.getColumnIndex(depth_col_text.c_str()));
        std::string count_col_text = std::string("Angular Node Count") + radius_text;
        count_col.push_back(attributes.getColumnIndex(count_col_text.c_str()));
        std::string total_col_text = std::string("Angular Total Depth") + radius_text;
        total_col.push_back(attributes.getColumnIndex(total_col_text.c_str()));
    }

    std::vector<bool> covered(map.getShapeCount());
    size_t i = 0;
    for (auto & iter : attributes){
        for (size_t j = 0; j < map.getShapeCount(); j++) {
            covered[j] = false;
        }
        std::vector<std::pair<float, SegmentData>> anglebins;
        anglebins.push_back(std::make_pair(0.0f, SegmentData(0, i, SegmentRef(), 0, 0.0, 0)));

        std::vector<double> total_depth;
        std::vector<int> node_count;
        for (size_t r = 0; r < radii.size(); r++) {
            total_depth.push_back(0.0);
            node_count.push_back(0);
        }
        // node_count includes this one, but will be added in next algo:
        while (anglebins.size()) {
            auto iter = anglebins.begin();
            SegmentData lineindex = iter->second;
            if (!covered[lineindex.ref]) {
                covered[lineindex.ref] = true;
                double depth_to_line = iter->first;
                total_depth[lineindex.coverage] += depth_to_line;
                node_count[lineindex.coverage] += 1;
                anglebins.erase(iter);
                Connector &line = map.getConnections()[lineindex.ref];
                if (lineindex.dir != -1) {
                    for (auto &segconn : line.m_forward_segconns) {
                        if (!covered[segconn.first.ref]) {
                            double angle = depth_to_line + segconn.second;
                            int rbin = lineindex.coverage;
                            while (rbin != radii.size() && radii[rbin] != -1 && angle > radii[rbin]) {
                                rbin++;
                            }
                            if (rbin != radii.size()) {
                                depthmapX::insert_sorted(
                                    anglebins, std::make_pair(float(angle),
                                                              SegmentData(segconn.first, SegmentRef(), 0, 0.0, rbin)));
                            }
                        }
                    }
                }
                if (lineindex.dir != 1) {
                    for (auto &segconn : line.m_back_segconns) {
                        if (!covered[segconn.first.ref]) {
                            double angle = depth_to_line + segconn.second;
                            int rbin = lineindex.coverage;
                            while (rbin != radii.size() && radii[rbin] != -1 && angle > radii[rbin]) {
                                rbin++;
                            }
                            if (rbin != radii.size()) {
                                depthmapX::insert_sorted(
                                    anglebins, std::make_pair(float(angle),
                                                              SegmentData(segconn.first, SegmentRef(), 0, 0.0, rbin)));
                            }
                        }
                    }
                }
            } else {
                anglebins.erase(iter);
            }
        }
        AttributeRow &row = iter.getRow();
        // set the attributes for this node:
        int curs_node_count = 0;
        double curs_total_depth = 0.0;
        for (size_t r = 0; r < radii.size(); r++) {
            curs_node_count += node_count[r];
            curs_total_depth += total_depth[r];
            row.setValue(count_col[r], float(curs_node_count));
            if (curs_node_count > 1) {
                // note -- node_count includes this one -- mean depth as per p.108 Social Logic of Space
                double mean_depth = curs_total_depth / double(curs_node_count - 1);
                row.setValue(depth_col[r], float(mean_depth));
                row.setValue(total_col[r], float(curs_total_depth));
            } else {
                row.setValue(depth_col[r], -1);
                row.setValue(total_col[r], -1);
            }
        }
        //
        if (comm) {
            if (qtimer(atime, 500)) {
                if (comm->IsCancelled()) {
                    throw Communicator::CancelledException();
                }
                comm->CommPostMessage(Communicator::CURRENT_RECORD, i);
            }
        }
        i++;
    }

    map.setDisplayedAttribute(-2); // <- override if it's already showing
    map.setDisplayedAttribute(depth_col.back());

    return true;
}
Ejemplo n.º 11
0
	sequence_in_t C_method(const unique_ptr<DFSM>& fsm, int extraStates) {
		RETURN_IF_UNREDUCED(fsm, "FSMtesting::C_method", sequence_in_t());
		auto E = getAdaptiveDistinguishingSet(fsm);
		if (E.empty()) {
			return sequence_in_t();
		}
		auto N = fsm->getNumberOfStates();
		auto P = fsm->getNumberOfInputs();
		
		/* // Example from simao2009checking
		E.clear();
		E[0].push_back(0);
		E[0].push_back(1);
		E[0].push_back(0);
		E[1].push_back(0);
		E[1].push_back(1);
		E[1].push_back(0);
		E[2].push_back(0);
		E[2].push_back(1);
		E[3].push_back(0);
		E[3].push_back(1);
		E[4].push_back(0);
		E[4].push_back(1);
		//*/
		vector<vector<bool>> verifiedTransition(N);
		vector<input_t> verifiedState(N, P);
		vector<shared_ptr<ver_seq_t>> verSeq(N);
		for (state_t i = 0; i < N; i++) {
			verifiedTransition[i].resize(P, false);
			verSeq[i] = make_shared<ver_seq_t>();
		}
		if (fsm->isOutputState()) {
			for (state_t i = 0; i < N; i++) {
				sequence_in_t seq;
				for (const auto& input : E[i]) {
					if (input == STOUT_INPUT) continue;
					seq.push_back(input);
				}
				E[i].swap(seq);
			}
		}
		output_t outputState = (fsm->isOutputState()) ? fsm->getOutput(0, STOUT_INPUT) : DEFAULT_OUTPUT;
		output_t outputTransition;
		vector<unique_ptr<TestNodeC>> cs;
		cs.emplace_back(make_unique<TestNodeC>(STOUT_INPUT, DEFAULT_OUTPUT, 0, outputState));
		state_t currState = 0;
		for (const auto& input : E[0]) {
			auto nextState = fsm->getNextState(currState, input);
			outputState = (fsm->isOutputState()) ? fsm->getOutput(nextState, STOUT_INPUT) : DEFAULT_OUTPUT;
			outputTransition = (fsm->isOutputTransition()) ? fsm->getOutput(currState, input) : DEFAULT_OUTPUT;
			cs.emplace_back(make_unique<TestNodeC>(input, outputTransition, nextState, outputState));
			currState = nextState;
		}
		vector<vector<seq_len_t>> confirmedNodes(N);
		confirmedNodes[0].push_back(0);
		queue<seq_len_t> newlyConfirmed;
		seq_len_t counter = N * fsm->getNumberOfInputs();
		seq_len_t currIdx = 1;
		seq_len_t lastConfIdx = 0;
		currState = 0;

		while (counter > 0) {
			//getCS(CS, fsm->isOutputState());
			//printf("%u/%u %u %s\n", currIdx, cs.size(), lastConfIdx, FSMmodel::getInSequenceAsString(CS).c_str());
			if (cs.back()->confirmed) {
				currIdx = seq_len_t(cs.size());
				lastConfIdx = currIdx - 1;
			}
			if (currIdx < cs.size()) {
				if (!cs[currIdx]->confirmed) {
					auto nextState = cs[currIdx]->state;
					auto nextInput = E[nextState].begin();
					if (equalSeqPart(currIdx + 1, nextInput, E[nextState].end(), cs)) {
						currState = cs.back()->state;
						for (; nextInput != E[cs[currIdx]->state].end(); nextInput++) {
							nextState = fsm->getNextState(currState, *nextInput);
							outputState = (fsm->isOutputState()) ? fsm->getOutput(nextState, STOUT_INPUT) : DEFAULT_OUTPUT;
							outputTransition = (fsm->isOutputTransition()) ? fsm->getOutput(currState, *nextInput) : DEFAULT_OUTPUT;
							cs.emplace_back(make_unique<TestNodeC>(*nextInput, outputTransition, nextState, outputState));
							currState = nextState;
						}
						cs[currIdx]->confirmed = true;
						newlyConfirmed.emplace(currIdx);
						update(lastConfIdx, cs, newlyConfirmed, verifiedTransition, verifiedState, verSeq, confirmedNodes, counter);
						processNewlyConfirmed(cs, newlyConfirmed, verifiedTransition, verifiedState, verSeq, confirmedNodes, counter,
							currIdx, lastConfIdx);
					}
				}
				else {
					lastConfIdx = currIdx;
				}
				currIdx++;
			}
			else if (verifiedState[cs.back()->state] > 0) {
				currState = cs.back()->state;
				for (input_t input = 0; input < P; input++) {
					if (!verifiedTransition[currState][input]) {
						auto nextState = fsm->getNextState(currState, input);
						outputState = (fsm->isOutputState()) ? fsm->getOutput(nextState, STOUT_INPUT) : DEFAULT_OUTPUT;
						outputTransition = (fsm->isOutputTransition()) ? fsm->getOutput(currState, input) : DEFAULT_OUTPUT;
						cs.emplace_back(make_unique<TestNodeC>(input, outputTransition, nextState, outputState));
						cs.back()->confirmed = true;
						newlyConfirmed.emplace(currIdx); //cs.size()-1

						sequence_in_t seqE(E[nextState]);
						// output-confirmed
						if (!E[currState].empty() && input == E[currState].front()) {
							sequence_in_t suf(E[currState]);
							suf.pop_front();
							auto outSuf = fsm->getOutputAlongPath(nextState, suf);
							auto outE = fsm->getOutputAlongPath(nextState, E[nextState]);
							//printf("(%d,%d,%d) %s/%s %s\n", currState, input, nextState,
							//      FSMmodel::getInSequenceAsString(suf).c_str(),
							//    FSMmodel::getOutSequenceAsString(outSuf).c_str(),
							//  FSMmodel::getOutSequenceAsString(outE).c_str());
							seq_len_t lenE = 0; //outE.size();
							for (state_t i = 0; i < N; i++) {
								if (i != nextState) {
									auto outSufI = fsm->getOutputAlongPath(i, suf);
									auto osl = equalLength(outSuf.begin(), outSufI.begin(), seq_len_t(outSuf.size()));
									if (osl != outSuf.size()) {
										bool outConfirmed = false;
										auto sufIt = suf.begin();
										osl++;
										while (osl-- > 0) sufIt++;
										for (auto& cnIdx : confirmedNodes[i]) {
											auto sufBeginIt = suf.begin();
											if (equalSeqPart(cnIdx + 1, sufBeginIt, sufIt, cs)) {
												outConfirmed = true;
												break;
											}
										}

										if (outConfirmed) {
											continue;
											/*
											outConfirmed = false;
											for (auto cnIdx : confirmedNodes[nextState]) {
											auto sufBeginIt = suf.begin();
											if (equalSeqPart(cnIdx, sufBeginIt, sufIt)) {
											outConfirmed = true;
											break;
											}
											}
											if (outConfirmed) {
											continue;
											}
											*/
										}
									}
									auto outI = fsm->getOutputAlongPath(i, E[nextState]);
									auto oel = 1 + equalLength(outE.begin(), outI.begin(), seq_len_t(outI.size()));
									//printf("%s/%s x %s %d %d-%d\n", 
									//      FSMmodel::getInSequenceAsString(E[nextState]).c_str(),
									//    FSMmodel::getOutSequenceAsString(outE).c_str(),
									//  FSMmodel::getOutSequenceAsString(outI).c_str(),
									//oel, lenE, outE.size());
									if (oel > lenE) {
										lenE = oel;
										if (lenE == outE.size()) {// entire E is needed
											break;
										}
									}
								}
							}
							// adjust E
							for (; lenE < outE.size(); lenE++) {
								seqE.pop_back();
							}
						}
						currState = nextState;
						for (const auto& input : seqE) {
							nextState = fsm->getNextState(currState, input);
							outputState = (fsm->isOutputState()) ? fsm->getOutput(nextState, STOUT_INPUT) : DEFAULT_OUTPUT;
							outputTransition = (fsm->isOutputTransition()) ? fsm->getOutput(currState, input) : DEFAULT_OUTPUT;
							cs.emplace_back(make_unique<TestNodeC>(input, outputTransition, nextState, outputState));
							currState = nextState;
						}
						update(currIdx - 1, cs, newlyConfirmed, verifiedTransition, verifiedState, verSeq, confirmedNodes, counter);
						processNewlyConfirmed(cs, newlyConfirmed, verifiedTransition, verifiedState, verSeq, confirmedNodes, counter,
							currIdx, lastConfIdx);
						break;
					}
				}
			}
			else {// find unverified transition
				vector<bool> covered(N, false);
				list<pair<state_t, sequence_in_t>> fifo;
				currState = cs.back()->state;
				covered[currState] = true;
				fifo.emplace_back(currState, sequence_in_t());
				while (!fifo.empty()) {
					auto current = move(fifo.front());
					fifo.pop_front();
					for (input_t input = 0; input < P; input++) {
						auto nextState = fsm->getNextState(current.first, input);
						if (nextState == WRONG_STATE) continue;
						if (verifiedState[nextState] > 0) {
							for (auto nextInput = current.second.begin(); nextInput != current.second.end(); nextInput++) {
								nextState = fsm->getNextState(currState, *nextInput);
								outputState = (fsm->isOutputState()) ? fsm->getOutput(nextState, STOUT_INPUT) : DEFAULT_OUTPUT;
								outputTransition = (fsm->isOutputTransition()) ? fsm->getOutput(currState, *nextInput) : DEFAULT_OUTPUT;
								cs.emplace_back(make_unique<TestNodeC>(*nextInput, outputTransition, nextState, outputState));
								cs.back()->confirmed = true;
								currState = nextState;
							}
							nextState = fsm->getNextState(currState, input);
							outputState = (fsm->isOutputState()) ? fsm->getOutput(nextState, STOUT_INPUT) : DEFAULT_OUTPUT;
							outputTransition = (fsm->isOutputTransition()) ? fsm->getOutput(currState, input) : DEFAULT_OUTPUT;
							lastConfIdx = seq_len_t(cs.size());
							cs.emplace_back(make_unique<TestNodeC>(input, outputTransition, nextState, outputState));
							cs.back()->confirmed = true;
							currIdx = seq_len_t(cs.size());
							fifo.clear();
							break;
						}
						if (!covered[nextState]) {
							covered[nextState] = true;
							sequence_in_t newPath(current.second);
							newPath.push_back(input);
							fifo.emplace_back(nextState, move(newPath));
						}
					}
				}

			}
		}
		return getCS(fsm->isOutputState(), cs);
	}
Ejemplo n.º 12
0
void STGGeneric::makeChildren(SphereTree *st, int node, int level, const SurfaceRep &surRep) const{
  //  get the error of the parent
  Sphere parS = st->nodes.index(node);
  double parErr = eval->evalSphere(parS);

  //  get minimum bounding sphere for points to give to reducer
  Sphere boundingSphere;
  SFWhite::makeSphere(&boundingSphere, *surRep.getSurPts());

  //  generate the set of child spheres
  Array<Sphere> initChildren, children;
  reducer->getSpheres(&initChildren, st->degree, surRep, &boundingSphere, parErr);

  //  do sphere refit - local optimisation
  if (useRefit){
    OUTPUTINFO("Refitting\n");
    SOPerSphere perSphere;
    perSphere.numIter = 3;
    perSphere.eval = eval;
    perSphere.optimise(&initChildren, surRep);
    }

  //  apply optimiser if required
  if (optimiser && (maxOptLevel < 0 || level <= maxOptLevel))
    optimiser->optimise(&initChildren, surRep, -1, &parS, level-1);

  //  remove redundent spheres
  RELargest reLargest;
  if (!reLargest.reduceSpheres(&children, initChildren, surRep))
    children.clone(initChildren);

  int numChildren = children.getSize();
  if (numChildren == 0)
    return;

  //  get the points that this node covers
  const Array<Surface::Point> *surPts = surRep.getSurPts();
  int numPts = surPts->getSize();

  //  info for areas to be covered by each sphere
  Array<Array<Surface::Point> > subPts(numChildren);
  Array<bool> covered(numPts);
  covered.clear();

  //  make the divisions between the children
  SurfaceDivision surDiv;
  surDiv.setup(children, surPts);

  //  do the children
  int firstChild = st->getFirstChild(node);
  for (int i = 0; i < numChildren; i++){
    //  get sphere
    Sphere s = children.index(i);

    //  list the points in the sphere
    Array<int> listPoints;
    surRep.listContainedPoints(&listPoints, NULL, s, NULL);
    int numList = listPoints.getSize();

    //  filter points
    Array<Surface::Point> *filterPts = &subPts.index(i);
    filterPts->resize(0);
    for (int j = 0; j < numList; j++){
      //  get point
      int pI = listPoints.index(j);
      Surface::Point p = surPts->index(pI);

      //  check if it's in the region
      if (surDiv.pointInRegion(p.p, i)){
        covered.index(j) = true;
        filterPts->addItem() = p;
        }
      }
    }

  //  count/cover uncovered points
  for (int i = 0; i < numPts; i++){
    if (!covered.index(i)){
      //  get the point
      Point3D p = surPts->index(i).p;

      //  find the closest sphere
      int closestJ = -1;
      float closestD = 0;
      for (int j = 0; j < numChildren;  j++){
        Sphere s = children.index(j);
        float d = p.distance(s.c) - s.r;
        if (d < closestD){
          closestJ = j;
          closestD = d;
          }
        }

      subPts.index(closestJ).addItem() = surPts->index(i);
      }
    }

  //  store spheres & recurse to children 
  int childNum = firstChild;
  for (int i = 0; i < numChildren; i++){
    if (subPts.index(i).getSize() > 1){
      //  recreate the sphere
      Sphere s = children.index(i);

      //  add sphere to tree
      st->nodes.index(childNum).c = s.c;
      st->nodes.index(childNum).r = s.r;

      if (level < st->levels-1 && numChildren > 1){
        const Array<Surface::Point> *pts = &subPts.index(i);

        //  make cells to have 10 pts each, most will have alot more
        int numCells = pts->getSize() / PTS_PER_CELL;
        int gridDim = ceil(pow(numCells, 1.0 / 3.0));
        OUTPUTINFO("numCells = %d, gridDim = %d\n", numCells, gridDim);

        //  make children by recursion
        SurfaceRep subRep;
        subRep.setup(*pts, gridDim);

        makeChildren(st, childNum, level+1, subRep);
        }

      childNum++;
      }
    }

  //  NULL out the rest of the spheres
  for (int i = childNum; i < st->degree; i++)
    st->initNode(firstChild+i, level+1);
}
Ejemplo n.º 13
0
//  generate breadth first
void STGGeneric::constructTree(SphereTree *st) const{
  CHECK_DEBUG0(st != NULL && st->degree > 1 && st->levels >= 1);
  CHECK_DEBUG0(reducer != NULL);
  CHECK_DEBUG0(eval != NULL);
  CHECK_DEBUG0(surfacePoints != NULL);

  //  NULL out the entire tree
  st->initNode(0);

  //  make cells to have 10 pts each, most will have alot more
  int numCells = surfacePoints->getSize() / PTS_PER_CELL;
  int gridDim = ceil(pow(numCells, 1.0 / 3.0));
  OUTPUTINFO("numCells = %d, gridDim = %d\n", numCells, gridDim);

  SurfaceRep surRep;
  surRep.setup(*surfacePoints, gridDim);

  //  bounding sphere for root - should use vertices
  SFWhite::makeSphere(&st->nodes.index(0), *surfacePoints);

  //  list of points to be covered by each node
  unsigned long start, num;
  st->getRow(&start, &num, st->levels-1);
  Array<Array<int>/**/> pointsInSpheres;
  pointsInSpheres.resize(st->nodes.getSize() - num);

  //  initialise the list for the first node
  int numPts = surfacePoints->getSize();
  Array<int> *list0 = &pointsInSpheres.index(0);
  list0->resize(numPts);
  for (int i = 0; i < numPts; i++)
    list0->index(i) = i;

  //  process the remaining levels
  int numLeaves = 0;
  for (int level = 0; level < st->levels-1; level++){
    //  get the positions of the nodes
    unsigned long start, num;
    st->getRow(&start, &num, level);

    //  update samples etc. for this level
    reducer->setupForLevel(level, st->degree, &surRep);

    //  get the errors for all the spheres in that level
    int numActualSpheres = 0;
    double averageError = 0;
    Array<double> sphereErrors(num);
    for (int i = 0; i < num; i++){
      Sphere s = st->nodes.index(start+i);
      if (s.r >= 0){
        double err = eval->evalSphere(s);
        sphereErrors.index(i) = err;
        averageError += err;
        numActualSpheres++;
        }
      else 
        sphereErrors.index(i) = -1;
      }
    averageError /= numActualSpheres;
    if (level != 0 && numActualSpheres <= 1){
      numLeaves++;
      continue;     //  there is only one sphere here - will never improve
      }

    //  process each node's to make children
    int maxNode = -1;
    double maxR = -1;
    int levelChildren = 0;
    for (int nodeI = 0; nodeI < num; nodeI++){
      //OUTPUTINFO("Level = %d, node = %d\n", level, nodeI);
      printf("Level = %d, node = %d\n", level, nodeI);

      int node = nodeI + start;
      if (st->nodes.index(node).r <= 0){
          OUTPUTINFO("R = %f\n", st->nodes.index(node).r);
          st->initNode(node);
          continue;
          }

/*
      //  hack to do largest sphere at each run - gives guide to good params
      double r = st->nodes.index(node).r;
      if (r > maxR){
        maxR = r;
        maxNode = node;
        }
      }

      nodeI = maxNode - start;
      int node = maxNode;{
      //  end hack
*/

      //  make the set of surface poitns to be covered by this sphere
      Array<int> *selPtsI = &pointsInSpheres.index(node);
      int numSelPts = selPtsI->getSize();

      if (numSelPts <= 0)
        break;

      Array<Surface::Point> selPts(numSelPts);
      for (int i = 0; i < numSelPts; i++)
        selPts.index(i) = surfacePoints->index(selPtsI->index(i));

      //  get filter sphere
      Sphere s;
      SFWhite::makeSphere(&s, selPts);

      //  make cells to have 10 pts each, most will have alot more
      int numCells = numSelPts / PTS_PER_CELL;
      int gridDim = ceil(pow(numCells, 1.0 / 3.0));
      OUTPUTINFO("%d Points\n", numSelPts);
      OUTPUTINFO("numCells = %d, gridDim = %d\n", numCells, gridDim);

      //  make new SurfaceRepresentation
      SurfaceRep subRep;
      subRep.setup(selPts, gridDim);

      //  compute error for this sphere
      double err = sphereErrors.index(nodeI);
      if (err > averageError)
        err = averageError;  //  improve the bad ones a bit more

      //  generate the children nodes
      Array<Sphere> initChildren, children;
      reducer->getSpheres(&initChildren, st->degree, subRep, &s, err);

      //  apply optimiser if required
      if (optimiser && (maxOptLevel < 0 || level <= maxOptLevel)){
printf("RUNNING OPTIMISER...\n");
        optimiser->optimise(&initChildren, subRep, -1, &s, level);
printf("DONE OPTIMISING...\n");
        }

      //  do sphere refit - local optimisation
      if (useRefit){
        OUTPUTINFO("Refitting\n");
        SOPerSphere perSphere;
        perSphere.numIter = 3;
        perSphere.eval = eval;
        perSphere.optimise(&initChildren, subRep);
        }

      //  remove redundent spheres
      RELargest reLargest;
      if (!reLargest.reduceSpheres(&children, initChildren, subRep))
        children.clone(initChildren);

      //  setup the node's sub-division (make the regions to be covered by children)
      //subDivs.index(node).setup(children, &selPts);
      SurfaceDivision surDiv;
      surDiv.setup(children, &selPts);

      //  list of points that are covered
      Array<bool> covered(numSelPts);
      covered.clear();

      //  create the new nodes and their the points to cover
      int numChildren = children.getSize();
      int firstChild = st->getFirstChild(node);
      levelChildren += numChildren;
      for (int i = 0; i < numChildren; i++){
        int childNum = firstChild + i;

        //  get sphere
        const Sphere& s = children.index(i);

        //  add sphere to tree
        st->nodes.index(childNum).c = s.c;
        st->nodes.index(childNum).r = s.r;

        if (level < st->levels-2){
          //  get the points in this sphere
          Array<int> pointsInS;
          subRep.listContainedPoints(&pointsInS, NULL, s);
          int numInS = pointsInS.getSize();

          //  populate list of points in sphere
          Array<int> *pointsToCover = &pointsInSpheres.index(childNum);
          pointsToCover->resize(0);   //  just in case
          for (int j = 0; j < numInS; j++){
            int pI = pointsInS.index(j);
            if (surDiv.pointInRegion(selPts.index(pI).p, i)){
              pointsToCover->addItem() = selPtsI->index(pI);
              covered.index(pI) = true;
              }
            }
          }
        }

      //  assign uncovered points
      if (numChildren > 0 && level < st->levels-2){
        for (int i = 0; i < numSelPts; i++){
          if (!covered.index(i)){
            //  get point
            const Point3D& pt = selPts.index(i).p;

            //  find the sphere
            int minJ = -1;
            float minD = FLT_MAX;
            for (int j = 0; j < numChildren; j++){
              const Sphere& s = children.index(j);
              float d = pt.distance(s.c);// - s.r;
              if (d < minD){
                minD = d;
                minJ = j;
                }
              }

            //  add the point to the sphere's list
            pointsInSpheres.index(firstChild+minJ).addItem() = selPtsI->index(i);
            }
          }
        }

      //  save after each child set
      //st->saveSphereTree("saved.sph");
      }

    //  save after each level
    //st->saveSphereTree("saved.sph");

    //  see if we need to add another level
    if (level == st->levels - 2 && minLeaves > 0 && numLeaves + levelChildren < minLeaves){
      //  grow the tree
      OUTPUTINFO("Growing the tree : %d-->%d\n", st->levels, st->levels+1);
      OUTPUTINFO("New Nodes : %d-->", st->nodes.getSize());
      st->growTree(st->levels+1);
      OUTPUTINFO("%d\n", st->nodes.getSize());
      }
    }
}