static bool SlowRekey(IntSet* s) { IntSet tmp; if (!tmp.init()) return false; for (IntSet::Range r = s->all(); !r.empty(); r.popFront()) { if (NewKeyFunction::shouldBeRemoved(r.front())) continue; uint32_t hi = NewKeyFunction::rekey(r.front()); if (tmp.has(hi)) return false; if (!tmp.putNew(hi)) return false; } s->clear(); for (IntSet::Range r = tmp.all(); !r.empty(); r.popFront()) { if (!s->putNew(r.front())) return false; } return true; }
/// Initialize \a s with iterator \a i static void init(IntSet& s, I& i) { Support::DynamicArray<IntSet::Range,Heap> d(heap); int n=0; unsigned int size = 0; while (i()) { d[n].min = i.min(); d[n].max = i.max(); size += i.width(); ++n; ++i; } if (n > 0) { IntSet::IntSetObject* o = IntSet::IntSetObject::allocate(n); for (int j=n; j--; ) o->r[j]=d[j]; o->size = size; s.object(o); } }
int main() { srand(137872); IntSet* s = new BsVectorIntSet(); for(int i = 0; i != 20; i++) { s->insert(rand() % 30); } for(int i = 0; i != 30; i++) { cout << i << ": " << (s->contains(i) ? "Yes" : "No") << endl; if (i % 2 == 0) { s->remove(i); } } cout << endl; for(int i = 0; i != 30; i++) { cout << i << ": " << (s->contains(i) ? "Yes" : "No") << endl; } clock_t start = clock(); for(int i = 0; i != 1000000; i++) { s->insert(rand()); } for(int i = 0; i != 1000000; i++) { s->remove(rand()); } cout << "Done inserting" << endl; cout << "Time: " << ((clock()- start) / (double) CLOCKS_PER_SEC) << "s" << endl; cout << "Querying" << endl; start = clock(); for(int i = 0; i != 1000000; i++) { s->contains(i); } cout << "Done querying" << endl; cout << "Time: " << ((clock()- start) / (double) CLOCKS_PER_SEC) << "s" << endl; delete s; }
forceinline ConstSetView::ConstSetView(Space& home, const IntSet& dom) { size = dom.ranges(); domSize = 0; if (size > 0) { ranges = home.alloc<int>(2*size); IntSetRanges dr(dom); for (int i=0; dr(); ++dr, i+=2) { int min = dr.min(); int max = dr.max(); ranges[i] = min; ranges[i+1] = max; domSize += static_cast<unsigned int>(max-min+1); } } else { ranges = NULL; } }
IntSet IntSet::intersect(const IntSet& otherIntSet) const // this function creates a temporary IntSet object to collect // the intersection of relevant values from IntSet and otherIntSet; // any values not in the intersection are removed before // returning the intersection to the calling function { IntSet tempArray(capacity); // create a temp IntSet object // to hold the intersection vals int strikeCount = 0; // this var counts # times that // a value in IntSet does not // match a value in otherIntSet for( int i = 0; i < used; i ++ ) // copy IntSet into tempArray { tempArray.data[i] = data[i]; tempArray.used ++; } // now, remove the contents of tempArray if they're not contained // in the intersection of thisIntSet and otherIntSet: for( int i = 0; i < used; i ++ ) { for( int i2 = 0; i2 < otherIntSet.used; i2 ++ ) { if( data[i] != otherIntSet.data[i2] ) strikeCount ++; } if( strikeCount == otherIntSet.size() ) { // if strikeCount == otherIntSet.size(), then the // IntSet value was not found in otherIntSet, so // it's not part of the intersection and should be // removed. . . assert ( tempArray.remove(data[i]) == true ); strikeCount = 0; // reset count for the next loop } } return tempArray; // return the intersection values }
void init( const ObjModel* model, const IntSet& vidxs) { _kddata = new kdtree::KDTreeArray; const int n = (int)vidxs.size(); _kddata->resize( boost::extents[n][3]); _vmap = new std::vector<int>(n); int i = 0; for ( int vidx : vidxs) { const cv::Vec3f& v = model->vtx(vidx); (*_kddata)[i][0] = v[0]; (*_kddata)[i][1] = v[1]; (*_kddata)[i][2] = v[2]; _vmap->at(i++) = vidx; } // end foreach _kdtree = new kdtree::KDTree( *_kddata); } // end init
int maxCities(int n, vector <int> a, vector <int> b, vector <int> len) { if (n <= 2) { return n; } int d[50][50]; memset(d, 0x3f, sizeof(d)); for (int i = 0; i < (int)a.size(); ++i) { d[a[i]-1][b[i]-1] = len[i]; d[b[i]-1][a[i]-1] = len[i]; } for (int k = 0; k < n; ++k) { for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { d[i][j] = min(d[i][j], d[i][k] + d[k][j]); } } } int ans = 2; for (int i = 0; i < n; ++i) { for (int j = i+1; j < n; ++j) { int r = d[i][j]; if (r < 1e6) { IntSet s; s.insert(i); s.insert(j); for (int k = 0; k < n; ++k) { if (i != k && j != k) { IntSet::const_iterator it; for (it = s.begin(); it != s.end(); ++it) { if (d[k][*it] != r) { break; } } if (it == s.end()) { s.insert(k); } } } ans = max(ans, (int)s.size()); } } } return ans; }
static bool SetsAreEqual(IntSet& am, IntSet& bm) { bool equal = true; if (am.count() != bm.count()) { equal = false; fprintf(stderr, "A.count() == %u and B.count() == %u\n", am.count(), bm.count()); } for (auto iter = am.iter(); !iter.done(); iter.next()) { if (!bm.has(iter.get())) { equal = false; fprintf(stderr, "B does not have %x which is in A\n", iter.get()); } } for (auto iter = bm.iter(); !iter.done(); iter.next()) { if (!am.has(iter.get())) { equal = false; fprintf(stderr, "A does not have %x which is in B\n", iter.get()); } } return equal; }
bool Init() { int nRandom = 0; for (int i = 0; i < g_nMax; ++i) { nRandom = i; g_nSummary += nRandom; g_dInts.push_back(nRandom); g_hmInts.insert(std::make_pair(nRandom, nRandom)); g_hsInts.insert(nRandom); g_lInts.push_back(nRandom); g_mInts.insert(std::make_pair(nRandom, nRandom)); g_mmInts.insert(std::make_pair(nRandom, nRandom)); g_msInts.insert(nRandom); g_sInts.insert(nRandom); g_vInts.push_back(nRandom); } return true; }
void getSmallK(const vector<int>& data, IntSet& s, int k) { if (data.size() < k || k < 1) return; for (int i = 0; i < data.size(); ++i) { if (s.size() < k) s.insert(data[k]); else if (data[k] < s.begin()) { s.erase(s.begin()); s.insert(data[k]); } } }
static bool SetsAreEqual(IntSet& am, IntSet& bm) { bool equal = true; if (am.count() != bm.count()) { equal = false; fprintf(stderr, "A.count() == %u and B.count() == %u\n", am.count(), bm.count()); } for (IntSet::Range r = am.all(); !r.empty(); r.popFront()) { if (!bm.has(r.front())) { equal = false; fprintf(stderr, "B does not have %x which is in A\n", r.front()); } } for (IntSet::Range r = bm.all(); !r.empty(); r.popFront()) { if (!am.has(r.front())) { equal = false; fprintf(stderr, "A does not have %x which is in B\n", r.front()); } } return equal; }
int getFortunate(vector <int> a, vector <int> b, vector <int> c) { IntSet ab; IntSet Z; VI::const_iterator ia, ib, ic; for (ia = a.begin(); ia != a.end(); ++ia) { for (ib = b.begin(); ib != b.end(); ++ib) { ab.insert(*ia + *ib); } } for (ic = c.begin(); ic != c.end(); ++ic) { IntSet::const_iterator s; for (s = ab.begin(); s != ab.end(); ++s) { Z.insert(*ic + *s); } } int r = 0; IntSet::const_iterator z; for (z = Z.begin(); z != Z.end(); ++z) { if (isFortunate(*z)) { ++r; } } return r; }
bool EquivalencyGroup::IsPerfectMatch() const { return (m_LinesLeft.Count() == 1)&&(m_LinesRight.Count() == 1); }
inline void DiscrepancyCorrection:: apply_multiplicative(const Variables& vars, RealVector& approx_fns) { for (ISIter it=surrogateFnIndices.begin(); it!=surrogateFnIndices.end(); ++it) approx_fns[*it] *= multCorrections[*it].value(vars); }
/** * Note: This method assumes that there really are some preferred nodes and that those nodes * are probably active. So do not use it as a replacement for the version without preferred nodes! * * @param allowNonPreferredNodes true to enable use of non-preferred nodes if not enough * preferred nodes are active to satisfy numNodes * @param outNodes might cotain less than numNodes if not enough nodes are known */ void NodeStoreServers::chooseStorageNodesWithPref(unsigned numNodes, UInt16List* preferredNodes, bool allowNonPreferredNodes, UInt16Vector* outNodes) { SafeMutexLock mutexLock(&mutex); // L O C K if(!localNode && !activeNodes.size() ) { // there's nothing we can do without any storage targets mutexLock.unlock(); // U N L O C K return; } NodeReferencer localNodeRefer(localNode, false); /* don't move this into if-brackets, because it would be removed from stack then and we need to access it again further below.*/ // temporary insertion of localNode to include it in the possible storage targets if(localNode) activeNodes.insert(NodeMapVal(localNode->getNumID(), &localNodeRefer) ); unsigned nodesSize = activeNodes.size(); // max number of nodes is limited by the number of known active nodes if(numNodes > nodesSize) numNodes = nodesSize; // Stage 1: add all the preferred nodes that are actually available to the outNodes /* note: we use a separate map for the outNodes here to quickly find out (in stage 2) whether we already added a certain node from the preferred nodes (in stage 1) */ IntSet outNodesSet; UInt16ListIter preferredIter; NodeMapIter activeNodesIter; // (will be re-used in stage 2) moveIterToRandomElem<UInt16List, UInt16ListIter>(*preferredNodes, preferredIter); // walk over all the preferred nodes and add them to outNodes when they are available // (note: iterTmp is just used to avoid calling preferredNodes->size() ) for(UInt16ListIter iterTmp = preferredNodes->begin(); (iterTmp != preferredNodes->end() ) && numNodes; iterTmp++) { activeNodesIter = activeNodes.find(*preferredIter); if(activeNodesIter != activeNodes.end() ) { // this preferred node is active => add to outNodes and to outNodesSet outNodes->push_back(*preferredIter); outNodesSet.insert(*preferredIter); numNodes--; } moveIterToNextRingElem<UInt16List, UInt16ListIter>(*preferredNodes, preferredIter); } // Stage 2: add the remaining requested number of nodes from the active nodes /* if numNodes is greater than 0 then we have some requested nodes left, that could not be taken from the preferred nodes */ /* we keep it simple here, because usually there will be enough preferred nodes available, so that this case is quite unlikely */ if(allowNonPreferredNodes && numNodes) { IntSetIter outNodesSetIter; moveIterToRandomElem<NodeMap, NodeMapIter>(activeNodes, activeNodesIter); // while we haven't found the number of requested nodes while(numNodes) { outNodesSetIter = outNodesSet.find(activeNodesIter->first); if(outNodesSetIter == outNodesSet.end() ) { outNodes->push_back(activeNodesIter->first); outNodesSet.insert(activeNodesIter->first); numNodes--; } moveIterToNextRingElem<NodeMap, NodeMapIter>(activeNodes, activeNodesIter); } } if(localNode) // remove local node activeNodes.erase(localNode->getNumID() ); mutexLock.unlock(); // U N L O C K }
void cljp_coarsening(Mat depends_on, IS *pCoarse) { const int debug = 0; //create a vector of the weights. Vec w; MatGetVecs(depends_on, PETSC_NULL, &w); VecZeroEntries(w); //Get my local matrix size PetscInt start; PetscInt end; MatGetOwnershipRange(depends_on, &start, &end); //TODO: replace with something that doesn't require re-creating the matrix structure. //Initialize all the weights { Mat influences; MatTranspose(depends_on, &influences); { RawGraph influences_raw(influences); assert(influences_raw.local_nrows() == end-start); //Initialize the weight vector with \norm{S^T_i} + \sigma(i) PetscScalar *local_weights; VecGetArray(w, &local_weights); for (int local_row=0; local_row < influences_raw.local_nrows(); local_row++) { local_weights[local_row] = influences_raw.ia(local_row+1)-influences_raw.ia(local_row) + frand(); } VecRestoreArray(w, &local_weights); } MatDestroy(influences); } //VecView(w, PETSC_VIEWER_STDOUT_WORLD); //-------------------------------------------------------------- //Prepare the scatters needed for the independent set algorithm. IS all_local_nodes; describe_partition(depends_on, &all_local_nodes); NonlocalCollection nonlocal(depends_on, all_local_nodes); ISDestroy(all_local_nodes); //while we are here, get the matrix + graph nodes that we need. Mat extended_depend_mat; get_matrix_rows(depends_on, nonlocal.nodes, &extended_depend_mat); // Vec used only for display purposes enum NodeType {UNKNOWN=-1, FINE, COARSE}; Vec node_type; VecDuplicate(w, &node_type); VecSet(node_type, UNKNOWN); Vec w_nonlocal; VecDuplicate(nonlocal.vec, &w_nonlocal); Vec node_type_nonlocal; VecDuplicate(w_nonlocal, &node_type_nonlocal); VecSet(node_type_nonlocal, UNKNOWN); Vec is_not_independent; VecDuplicate(w, &is_not_independent); Vec is_not_independent_nonlocal; VecDuplicate(w_nonlocal, &is_not_independent_nonlocal); VecScatterBegin(nonlocal.scatter, w, w_nonlocal, INSERT_VALUES, SCATTER_FORWARD); VecScatterEnd(nonlocal.scatter, w, w_nonlocal, INSERT_VALUES, SCATTER_FORWARD); Vec w_update_nonlocal; VecDuplicate(w_nonlocal, &w_update_nonlocal); //get ready to find all the coarse and fine points typedef std::set<PetscInt> IntSet; IntSet unknown; //initialize the unknown set with all points that are local to this processor. for (int ii=start; ii<end; ii++) { unknown.insert(ii); } //we use MPI_INT here because we need to allreduce it with MPI_LAND int all_points_partitioned=0; int inc = 0; { RawGraph dep_nonlocal_raw(extended_depend_mat); //while not done while(!all_points_partitioned) { //Start: non-local weights, non-local coarse points if (debug) { LTRACE(); char fname[] = "weightsXXX"; char selection_graph[] = "selectionXXX"; sprintf(fname, "weights%03d", inc); sprintf(selection_graph, "selection%03d", inc); inc++; /* PetscViewer view; PetscViewerBinaryMatlabOpen(PETSC_COMM_WORLD, fname, &view); PetscViewerBinaryMatlabOutputVecDA(view, "z", w, user->da); PetscViewerBinaryMatlabDestroy(view); PetscViewerBinaryMatlabOpen(PETSC_COMM_WORLD, selection_graph, &view); PetscViewerBinaryMatlabOutputVecDA(view, "z", node_type, user->da); PetscViewerBinaryMatlabDestroy(view); //*/ } //Pre: non-local weights, non-local coarse points //find the independent set. //By using ADD_VALUES in a scattter, we can perform //a boolean OR across procesors. //is_not_independent[*] = false VecSet(is_not_independent_nonlocal, 0); //for all unknown points P { RawVector node_type_nonlocal_raw(node_type_nonlocal); RawVector w_nonlocal_raw(w_nonlocal); RawVector is_not_independent_nonlocal_raw(is_not_independent_nonlocal); FOREACH(P, unknown) { //get weight(P) PetscScalar weight_P = w_nonlocal_raw.at(nonlocal.map[*P]); //for all dependencies K of P (K st P->K) for (PetscInt ii=0; ii<dep_nonlocal_raw.nnz_in_row(nonlocal.map[*P]); ii++) { PetscInt K = dep_nonlocal_raw.col(nonlocal.map[*P], ii); //skip if K is fine/coarse /* Notice that we don't have to consider the independent set we've been generating here. By construction, if K is in the independent set, then P cannot be in the independent set. */ if (node_type_nonlocal_raw.at(nonlocal.map[K]) != UNKNOWN) { continue; } //skip if P->K is marked if (dep_nonlocal_raw.is_marked(nonlocal.map[*P], ii)) { continue; } //get weight(K) PetscScalar weight_K = w_nonlocal_raw.at(nonlocal.map[K]); if (weight_K <= weight_P) { //is_not_independent(K) = true is_not_independent_nonlocal_raw.at(nonlocal.map[K]) = 1; } else { // (weight(P) < weight_K) is_not_independent_nonlocal_raw.at(nonlocal.map[*P]) = 1; } } } } if (debug) {LTRACE();} //VecView(is_not_independent_nonlocal, PETSC_VIEWER_STDOUT_WORLD); //reconstruct is_not_independent vector with a ADD_VALUES, which //performs boolean OR VecSet(is_not_independent, 0); VecScatterBegin(nonlocal.scatter, is_not_independent_nonlocal, is_not_independent, ADD_VALUES, SCATTER_REVERSE); VecScatterEnd(nonlocal.scatter, is_not_independent_nonlocal, is_not_independent, ADD_VALUES, SCATTER_REVERSE); IntSet new_coarse_points; { RawVector is_not_independent_raw(is_not_independent); //for all unknown points P FOREACH(P, unknown) { //if (!is_not_independent(P)) if (is_not_independent_raw.at(*P) == 0) { new_coarse_points.insert(*P); if (debug) {SHOWVAR(*P, d);} } } } //Post: new coarse points (independent set) if (debug) {LTRACE();} //Pre: independent set { RawVector node_type_raw(node_type); // for each independent point FOREACH(I, new_coarse_points) { //mark that point as coarse node_type_raw.at(*I) = COARSE; unknown.erase(*I); } } //Post: updated coarse local if (debug) {LTRACE();} //Pre: updated coarse local //scatter changes to other processors VecScatterBegin(nonlocal.scatter, node_type, node_type_nonlocal, INSERT_VALUES, SCATTER_FORWARD); VecScatterEnd(nonlocal.scatter, node_type, node_type_nonlocal, INSERT_VALUES, SCATTER_FORWARD); //Post: updated coarse non-local if (debug) {LTRACE();} //Pre: updated coarse non-local, new local coarse points VecSet(w_update_nonlocal, 0); { RawVector node_type_nonlocal_raw(node_type_nonlocal); RawVector w_update_nonlocal_raw(w_update_nonlocal); //for all new coarse points C FOREACH(C, new_coarse_points) { //for all K st C->K for(PetscInt ii=0; ii<dep_nonlocal_raw.nnz_in_row(nonlocal.map[*C]); ii++) { //mark (C->K) dep_nonlocal_raw.mark(nonlocal.map[*C], ii); PetscInt K = dep_nonlocal_raw.col(nonlocal.map[*C], ii); //if K is unknown if (node_type_nonlocal_raw.at(nonlocal.map[K]) == UNKNOWN) { //measure(K)-- w_update_nonlocal_raw.at(nonlocal.map[K]) -= 1; } } } //for all unknown points I FOREACH(I, unknown) { IntSet common_coarse; //for all (J->K) for (PetscInt kk=0; kk<dep_nonlocal_raw.nnz_in_row(nonlocal.map[*I]); kk++) { if (!dep_nonlocal_raw.is_marked(nonlocal.map[*I], kk)) { //if K is coarse PetscInt K = dep_nonlocal_raw.col(nonlocal.map[*I], kk); if (node_type_nonlocal_raw.at(nonlocal.map[K]) == COARSE) { //mark K as common coarse common_coarse.insert(K); //mark (J->K) if unmarked dep_nonlocal_raw.mark(nonlocal.map[*I], kk); } } } //for all unmarked (I->J) for (PetscInt jj=0; jj<dep_nonlocal_raw.nnz_in_row(nonlocal.map[*I]); jj++) { if (!dep_nonlocal_raw.is_marked(nonlocal.map[*I], jj)) { //for all (J->K), marked or no PetscInt J = dep_nonlocal_raw.col(nonlocal.map[*I], jj); for(PetscInt kk=0; kk<dep_nonlocal_raw.nnz_in_row(nonlocal.map[J]); kk++) { //if K is in layer or ghost layer and common-coarse PetscInt K = dep_nonlocal_raw.col(nonlocal.map[J], kk); if (is_member(K, common_coarse)) { //mark (I->J) dep_nonlocal_raw.mark(nonlocal.map[*I], jj); //measure(J)-- w_update_nonlocal_raw.at(nonlocal.map[J]) -= 1; } } } } } }
void GameArbiter::process_command(Message *command) { switch (command->type) { case UnitMove: { auto cmd = dynamic_cast<UnitMoveMessage *>(command); int stack_id = cmd->data1; IntSet units = cmd->data2; Path& path = cmd->data3; int target_id = cmd->data4; UnitStack::pointer stack = game->stacks.get(stack_id); if (units.empty() || !stack->has_units(units) || path.empty()) { throw DataError() << "Invalid UnitMove message"; } /* Check that the move is allowed; shorten it if necessary */ Point end_pos = path.back(); UnitStack::pointer end_stack = game->level.tiles[end_pos].stack; int end_stack_id = end_stack ? end_stack->id : 0; if (end_stack_id != target_id) { path.pop_back(); target_id = 0; } MovementModel movement(game); UnitStack::pointer selected_stack = stack->copy_subset(units); unsigned int allowed_steps = movement.check_path(*selected_stack, path); bool truncated = allowed_steps < path.size(); int attack_target_id = target_id; if (truncated) target_id = 0; path.resize(allowed_steps); if (!path.empty()) { end_pos = path.back(); /* Generate updates. */ Faction::pointer faction = stack->owner; bool move = units.size() == stack->units.size() && target_id == 0; bool split = units.size() < stack->units.size() && target_id == 0; bool merge = units.size() == stack->units.size() && target_id != 0; UnitStack::pointer target = game->stacks.find(target_id); if (move) target_id = stack_id; if (split) target_id = game->get_free_stack_id(); // Send the moves for (auto iter = path.begin(); iter != path.end(); iter++) { emit(create_message(MoveUnits, stack_id, units, *iter)); } // If the stack is splitting to a new empty position, create a stack there if (split) { emit(create_message(CreateStack, target_id, end_pos, faction->id)); } emit(create_message(TransferUnits, stack_id, units, path, target_id)); // If the whole stack merged with an existing one, destroy it if (merge) { emit(create_message(DestroyStack, stack_id)); } } else { end_pos = stack->position; } UnitStack::pointer attack_target = game->stacks.find(attack_target_id); bool attack = attack_target && (attack_target->owner != stack->owner); if (attack) { BOOST_LOG_TRIVIAL(debug) << "Attack!"; Point target_point = attack_target->position; Point attacking_point = end_pos; Battle battle(game, target_point, attacking_point); battle.run(); emit(create_message(DoBattle, end_stack_id, target_point, battle.moves)); } } break; case FactionReady: { auto cmd = dynamic_cast<FactionReadyMessage *>(command); int faction_id = cmd->data1; bool ready = cmd->data2; if (game->mark_faction_ready(faction_id, ready)) { emit(create_message(FactionReady, faction_id, ready)); } if (game->all_factions_ready()) { emit(create_message(TurnEnd)); // process turn end spawn_units(); game->turn_number++; emit(create_message(TurnBegin, game->turn_number)); } } break; case Chat: { auto chat_msg = dynamic_cast<ChatMessage *>(command); emit(create_message(Chat, chat_msg->data)); } break; case SetLevelData: case CreateStructure: case DestroyStructure: { emit(command->shared_from_this()); } break; default: break; } }
void sequence(Home home, const IntVarArgs& x, const IntSet &s, int q, int l, int u, IntConLevel) { Limits::check(s.min(),"Int::sequence"); Limits::check(s.max(),"Int::sequence"); if (x.size() == 0) throw TooFewArguments("Int::sequence"); Limits::check(q,"Int::sequence"); Limits::check(l,"Int::sequence"); Limits::check(u,"Int::sequence"); if (x.same(home)) throw ArgumentSame("Int::sequence"); if ((q < 1) || (q > x.size())) throw OutOfLimits("Int::sequence"); if (home.failed()) return; // Normalize l and u l=std::max(0,l); u=std::min(q,u); // Lower bound of values taken can never exceed upper bound if (u < l) { home.fail(); return; } // Already subsumed as any number of values taken is okay if ((0 == l) && (q == u)) return; // All variables must take a value in s if (l == q) { for (int i=x.size(); i--; ) { IntView xv(x[i]); IntSetRanges ris(s); GECODE_ME_FAIL(xv.inter_r(home,ris,false)); } return; } // No variable can take a value in s if (0 == u) { for (int i=x.size(); i--; ) { IntView xv(x[i]); IntSetRanges ris(s); GECODE_ME_FAIL(xv.minus_r(home,ris,false)); } return; } ViewArray<IntView> xv(home,x); if (s.size() == 1) { GECODE_ES_FAIL( (Sequence::Sequence<IntView,int>::post (home,xv,s.min(),q,l,u))); } else { GECODE_ES_FAIL( (Sequence::Sequence<IntView,IntSet>::post (home,xv,s,q,l,u))); } }
void NoisyCnaEnumerate::collapse(const StlIntVector& mapNewCharToOldChar, const StlIntVector& mapOldCharToNewChar, RootedCladisticNoisyAncestryGraph& G) { typedef std::set<IntPair> IntPairSet; int k = _M.k(); const auto& intervals = _M.intervals(); for (const IntSet& interval : intervals) { IntSet remappedInterval; for (int c : interval) { int cc = mapOldCharToNewChar[c]; if (cc != -1) remappedInterval.insert(cc); } if (remappedInterval.size() > 1) { // get the copy states IntPairSet XY; const StateTree& S = G.S(*remappedInterval.begin()); for (int i = 0; i < k; ++i) { if (S.isPresent(i)) { const auto& xyz = _M.stateToTriple(i); // skip state 1,1 if (xyz._x != 1 || xyz._y != 1) { XY.insert(IntPair(xyz._x, xyz._y)); } } } for (const IntPair& xy : XY) { assert(xy.first != 1 || xy.second != 1); // collect all char-state pairs correspond to CNAs IntPairSet toCollapse; for (int c : remappedInterval) { const StateTree& S_c = G.S(c); for (int i = 0; i < k; ++i) { if (S_c.isPresent(i) && _M.stateToTriple(i)._x == xy.first && _M.stateToTriple(i)._y == xy.second) { int pi_i = S_c.parent(i); assert(0 <= pi_i && pi_i < k); if (_M.stateToTriple(pi_i)._x != xy.first || _M.stateToTriple(pi_i)._y != xy.second) { // we got a CNA state toCollapse.insert(IntPair(c, i)); } } } } G.collapse(toCollapse); } } } }
static void init(IntSet& s, const IntSet& i) { s.object(i.object()); }
IntSet::ModIterator setModIter(IntSet& set) { return set.modIter(); }
static void init(IntSet& s, const IntArgs& i) { if (i.size() > 0) s.init(&i[0],i.size()); }
bool solve(int N, int M, IIVec &walls, int &flavors, IntVec &pillars) { IntSet V[2000]; // vertexes in a room IntSet F[2000]; // flavors in a room IntVec C[2001]; // vertex connection int i, j; for (i = 0; i < N; ++i) { V[0].insert(i); } int rooms = 1; for (i = 0; i < N; ++i) { C[i].push_back((i-1+N)%N); C[i].push_back((i+1)%N); } for (i = 0; i < (int)walls.size(); ++i) { int s = min(walls[i].first, walls[i].second) - 1; int e = max(walls[i].first, walls[i].second) - 1; for (j = 0; j < rooms; ++j) { if (V[j].count(s) > 0 && V[j].count(e) > 0) { break; } } if (j >= rooms) { // error return false; } IntSet a, b; a.insert(s); a.insert(e); b.insert(s); b.insert(e); bool f = false; IntSet::const_iterator it; for (it = V[j].begin(); it != V[j].end(); ++it) { if (!f) { a.insert(*it); } else { b.insert(*it); } if (*it == s || *it == e) { f = !f; } } V[j] = a; V[rooms++] = b; C[s].push_back(e); C[e].push_back(s); } flavors = N; for (i = 0; i < rooms; ++i) { flavors = min((int)V[i].size(), flavors); } // fill the first room { int flavor = 1; IntSet::const_iterator it; for (it = V[0].begin(); it != V[0].end(); ++it) { int pillar = *it; if (flavor <= flavors) { set_flavor(rooms, V, F, pillars, pillar, flavor); } else { // fill with different color IntSet s; for (i = 0; i < (int)C[pillar].size(); ++i) { s.insert(pillars[C[pillar][i]]); } for (i = 1; ; ++i) { if (s.count(i) <= 0) { set_flavor(rooms, V, F, pillars, pillar, i); break; } } } ++flavor; } } int filled_rooms, room; for (filled_rooms = 1; filled_rooms < N; ++filled_rooms) { bool found = false; for (room = 0; room < rooms; ++room) { int c = 0; IntSet::const_iterator it; for (it = V[room].begin(); it != V[room].end(); ++it) { if (pillars[*it] > 0) { ++c; } } if (c >= 2 && c < (int)V[room].size()) { found = true; break; } } if (!found) { break; } fill_room(rooms, flavors, V, F, C, pillars, room); } for (i = 0; i < rooms; ++i) { if ((int)F[i].size() < flavors) { return false; } } for (i = 0; i < N; ++i) { if (pillars[i] <= 0 || pillars[i] > flavors) { return false; } } return true; }
int main(int argc, char** argv) { int limit = -1; int timeLimit = -1; int threads = 2; int state_tree_limit = -1; int random_seed = 0; int lowerbound = 0; bool polyclonal = false; bool perfectData = false; std::string purityString; std::string cliqueFile; int offset = 0; int verbosityLevel = 1; std::string whiteListString; lemon::ArgParser ap(argc, argv); ap.boolOption("-version", "Show version number") .refOption("v", "Verbosity level (default: 1)", verbosityLevel) .boolOption("cladistic", "Cladistic character mode") .refOption("p", "Polyclonal", polyclonal) .refOption("purity", "Purity values (used for fixing trunk)", purityString) .refOption("clique", "Clique file", cliqueFile) .refOption("perfect", "Perfect data mode", perfectData) .refOption("t", "Number of threads (default: 2)", threads) .refOption("l", "Maximum number of trees to enumerate (default: -1)", limit) .refOption("ll", "Time limit in seconds (default: -1)", timeLimit) .refOption("s", "Number of cliques to consider (default: -1)", state_tree_limit) .refOption("o", "Clique offset (default: 0)", offset) .refOption("r", "Seed for random number generator", random_seed) .refOption("lb", "Lower bound on #characters in enumerated trees (default: 0)", lowerbound) .refOption("w", "Characters that must be present in the solution trees", whiteListString) .other("input_1", "Input file") .other("input_2", "Interval file relating SNVs affected by the same CNA"); ap.parse(); g_rng = std::mt19937(random_seed); g_verbosity = static_cast<VerbosityLevel>(verbosityLevel); if (ap.given("-version")) { std::cout << "Version number: " << SPRUCE_VERSION << std::endl; return 0; } if (ap.files().size() == 0) { std::cerr << "Error: missing input file" << std::endl; return 1; } std::ifstream inFile(ap.files()[0].c_str()); if (!inFile.good()) { std::cerr << "Unable to open '" << ap.files()[0].c_str() << "' for reading" << std::endl; return 1; } IntSet whiteList; if (!whiteListString.empty()) { StringVector s; boost::split(s, whiteListString, boost::is_any_of(", ;")); for (const std::string& str : s) { whiteList.insert(boost::lexical_cast<int>(str)); } } bool readCliqueFile = false; bool writeCliqueFile = false; if (cliqueFile != "" && boost::filesystem::exists(cliqueFile)) { readCliqueFile = true; } else if (cliqueFile != "") { writeCliqueFile = true; } SolutionSet sols; enumerate(limit, timeLimit, threads, state_tree_limit, inFile, (ap.files().size() > 1 ? ap.files()[1] : ""), lowerbound, !polyclonal, purityString, writeCliqueFile, readCliqueFile, cliqueFile, offset, whiteList, sols); std::cout << sols; return 0; }
IntSet IntSet::unionWith(const IntSet& otherIntSet) const // this function creates a temporary IntSet object to collect the union of // relevant data values from IntSet and otherIntSet; the sequence // of values is maintained while duplicates are removed; if the union // requires more capacity, resize() is called; { int totUnionVals = 0; // var to count # vals in union IntSet tempArray(capacity); // create tempIntSet object int tempIndex = 0; // var to mark the index where // the union values from // otherIntSet should begin // to be inserted bool tempFlag = false; // this flag used to mark dupes // use an outer loop to copy IntSet values into tempArray; // use an inner loop to count up dupe values so that afterward // we can compute # values needed in final union: for( int i = 0; i < used; i ++ ) { tempArray.data[i] = data[i]; tempArray.used ++; tempIndex ++; for( int i2 = 0; i2 < otherIntSet.used; i2 ++ ) { if( data[i] == otherIntSet.data[i2] ) totUnionVals ++; } } // compute total # of vals needed in final union and resize if needed: totUnionVals = ( used + otherIntSet.used ) - totUnionVals; if( tempArray.capacity < totUnionVals ) tempArray.resize(totUnionVals); // now insert the content from otherIntSet into tempArray, w/o dupes: int otherUsed = otherIntSet.size(); // sentinel for outer loop for( int i2 = 0; i2 < otherUsed; i2 ++ ) { for( int i = 0; i < tempArray.used; i ++ ) { if( otherIntSet.data[i2] == tempArray.data[i] ) { tempFlag = true; // flag the dupes } } if( !tempFlag ) // save non-dupes to tempArray { tempArray.data[tempIndex] = otherIntSet.data[i2]; tempArray.used ++; tempIndex ++; } tempFlag = false; // reset tempFlag for next pass } return tempArray; }
SEXP cpp_sampleGlm(SEXP r_interface) { // ---------------------------------------------------------------------------------- // extract arguments // ---------------------------------------------------------------------------------- r_interface = CDR(r_interface); List rcpp_model(CAR(r_interface)); r_interface = CDR(r_interface); List rcpp_data(CAR(r_interface)); r_interface = CDR(r_interface); List rcpp_fpInfos(CAR(r_interface)); r_interface = CDR(r_interface); List rcpp_ucInfos(CAR(r_interface)); r_interface = CDR(r_interface); List rcpp_fixInfos(CAR(r_interface)); r_interface = CDR(r_interface); List rcpp_distribution(CAR(r_interface)); r_interface = CDR(r_interface); List rcpp_searchConfig(CAR(r_interface)); r_interface = CDR(r_interface); List rcpp_options(CAR(r_interface)); r_interface = CDR(r_interface); List rcpp_marginalz(CAR(r_interface)); // ---------------------------------------------------------------------------------- // unpack the R objects // ---------------------------------------------------------------------------------- // data: const NumericMatrix n_x = rcpp_data["x"]; const AMatrix x(n_x.begin(), n_x.nrow(), n_x.ncol()); const NumericMatrix n_xCentered = rcpp_data["xCentered"]; const AMatrix xCentered(n_xCentered.begin(), n_xCentered.nrow(), n_xCentered.ncol()); const NumericVector n_y = rcpp_data["y"]; const AVector y(n_y.begin(), n_y.size()); const IntVector censInd = as<IntVector>(rcpp_data["censInd"]); // FP configuration: // vector of maximum fp degrees const PosIntVector fpmaxs = as<PosIntVector>(rcpp_fpInfos["fpmaxs"]); // corresponding vector of fp column indices const PosIntVector fppos = rcpp_fpInfos["fppos"]; // corresponding vector of power set cardinalities const PosIntVector fpcards = rcpp_fpInfos["fpcards"]; // names of fp terms const StrVector fpnames = rcpp_fpInfos["fpnames"]; // UC configuration: const PosIntVector ucIndices = rcpp_ucInfos["ucIndices"]; List rcpp_ucColList = rcpp_ucInfos["ucColList"]; std::vector<PosIntVector> ucColList; for (R_len_t i = 0; i != rcpp_ucColList.length(); ++i) { ucColList.push_back(as<PosIntVector>(rcpp_ucColList[i])); } // fixed covariate configuration: const PosIntVector fixIndices = rcpp_fixInfos["fixIndices"]; List rcpp_fixColList = rcpp_fixInfos["fixColList"]; std::vector<PosIntVector> fixColList; for (R_len_t i = 0; i != rcpp_fixColList.length(); ++i) { fixColList.push_back(as<PosIntVector>(rcpp_fixColList[i])); } // distributions info: const double nullModelLogMargLik = as<double>(rcpp_distribution["nullModelLogMargLik"]); const double nullModelDeviance = as<double>(rcpp_distribution["nullModelDeviance"]); S4 rcpp_gPrior = rcpp_distribution["gPrior"]; List rcpp_family = rcpp_distribution["family"]; const bool tbf = as<bool>(rcpp_distribution["tbf"]); const bool doGlm = as<bool>(rcpp_distribution["doGlm"]); const double empiricalMean = as<double>(rcpp_distribution["yMean"]); const bool empiricalgPrior = as<bool>(rcpp_distribution["empiricalgPrior"]); // model search configuration: const bool useFixedc = as<bool>(rcpp_searchConfig["useFixedc"]); // options: const bool estimateMargLik = as<bool>(rcpp_options["estimateMargLik"]); const bool verbose = as<bool>(rcpp_options["verbose"]); const bool debug = as<bool>(rcpp_options["debug"]); const bool isNullModel = as<bool>(rcpp_options["isNullModel"]); const bool useFixedZ = as<bool>(rcpp_options["useFixedZ"]); const double fixedZ = as<double>(rcpp_options["fixedZ"]); #ifdef _OPENMP const bool useOpenMP = as<bool>(rcpp_options["useOpenMP"]); #endif S4 rcpp_mcmc = rcpp_options["mcmc"]; const PosInt iterations = rcpp_mcmc.slot("iterations"); const PosInt burnin = rcpp_mcmc.slot("burnin"); const PosInt step = rcpp_mcmc.slot("step"); // z density stuff: const RFunction logMarginalZdens(as<SEXP>(rcpp_marginalz["logDens"])); const RFunction marginalZgen(as<SEXP>(rcpp_marginalz["gen"])); // ---------------------------------------------------------------------------------- // further process arguments // ---------------------------------------------------------------------------------- // data: // only the intercept is always included, that is fixed, in the model IntSet fixedCols; fixedCols.insert(1); // totalnumber is set to 0 because we do not care about it. const DataValues data(x, xCentered, y, censInd, 0, fixedCols); // FP configuration: const FpInfo fpInfo(fpcards, fppos, fpmaxs, fpnames, x); // UC configuration: // determine sizes of the UC groups, and the total size == maximum size reached together by all // UC groups. PosIntVector ucSizes; PosInt maxUcDim = 0; for (std::vector<PosIntVector>::const_iterator cols = ucColList.begin(); cols != ucColList.end(); ++cols) { PosInt thisSize = cols->size(); maxUcDim += thisSize; ucSizes.push_back(thisSize); } const UcInfo ucInfo(ucSizes, maxUcDim, ucIndices, ucColList); // fix configuration: // determine sizes of the fix groups, and the total size == maximum size reached together by all // UC groups. PosIntVector fixSizes; PosInt maxFixDim = 0; for (std::vector<PosIntVector>::const_iterator cols = fixColList.begin(); cols != fixColList.end(); ++cols) { PosInt thisSize = cols->size(); maxFixDim += thisSize; fixSizes.push_back(thisSize); } const FixInfo fixInfo(fixSizes, maxFixDim, fixIndices, fixColList); // model configuration: GlmModelConfig config(rcpp_family, nullModelLogMargLik, nullModelDeviance, exp(fixedZ), rcpp_gPrior, data.response, debug, useFixedc, empiricalMean, empiricalgPrior); // model config/info: const Model thisModel(ModelPar(rcpp_model["configuration"], fpInfo), GlmModelInfo(as<List>(rcpp_model["information"]))); // the options const Options options(estimateMargLik, verbose, debug, isNullModel, useFixedZ, tbf, doGlm, iterations, burnin, step); // marginal z stuff const MarginalZ marginalZ(logMarginalZdens, marginalZgen); // use only one thread if we do not want to use openMP. #ifdef _OPENMP if(! useOpenMP) { omp_set_num_threads(1); } else { omp_set_num_threads(omp_get_num_procs()); } #endif // ---------------------------------------------------------------------------------- // prepare the sampling // ---------------------------------------------------------------------------------- Fitter fitter; int nCoefs; if(options.doGlm) { // construct IWLS object, which can be used for all IWLS stuff, // and also contains the design matrix etc fitter.iwlsObject = new Iwls(thisModel.par, data, fpInfo, ucInfo, fixInfo, config, config.linPredStart, options.useFixedZ, EPS, options.debug, options.tbf); nCoefs = fitter.iwlsObject->nCoefs; // check that we have the same answer about the null model as R //assert(fitter.iwlsObject->isNullModel == options.isNullModel); if(fitter.iwlsObject->isNullModel != options.isNullModel){ Rcpp::stop("sampleGlm.cpp:cpp_sampleGlm: isNullModel != options.isNullModel"); } } else { AMatrix design = getDesignMatrix(thisModel.par, data, fpInfo, ucInfo, fixInfo, false); fitter.coxfitObject = new Coxfit(data.response, data.censInd, design, config.weights, config.offsets, 1); // the number of coefficients (here it does not include the intercept!!) nCoefs = design.n_cols; // check that we do not have a null model here: // assert(nCoefs > 0); if(nCoefs <= 0){ Rcpp::stop("sampleGlm.cpp:cpp_sampleGlm: nCoefs <= 0"); } } // allocate sample container Samples samples(nCoefs, options.nSamples); // count how many proposals we have accepted: PosInt nAccepted(0); // at what z do we start? double startZ = useFixedZ ? fixedZ : thisModel.info.zMode; // start container with current things Mcmc now(marginalZ, data.nObs, nCoefs); if(doGlm) { // get the mode for beta given the mode of the approximated marginal posterior as z // if TBF approach is used, this will be the only time the IWLS is used, // because we only need the MLE and the Cholesky factor of its // precision matrix estimate, which do not depend on z. PosInt iwlsIterations = fitter.iwlsObject->startWithNewLinPred(40, // this is the corresponding g exp(startZ), // and the start value for the linear predictor is taken from the Glm model config config.linPredStart); // echo debug-level message? if(options.debug) { Rprintf("\ncpp_sampleGlm: Initial IWLS for high density point finished after %d iterations", iwlsIterations); } // this is the current proposal info: now.proposalInfo = fitter.iwlsObject->getResults(); // and this is the current parameters sample: now.sample = Parameter(now.proposalInfo.coefs, startZ); if(options.tbf) { // we will not compute this in the TBF case: now.logUnPosterior = R_NaReal; // start to compute the variance of the intercept parameter: // here the inverse cholesky factor of the precision matrix will // be stored. First, it's the identity matrix. AMatrix inverseQfactor = arma::eye(now.proposalInfo.qFactor.n_rows, now.proposalInfo.qFactor.n_cols); // do the inversion trs(false, false, now.proposalInfo.qFactor, inverseQfactor); // now we can compute the variance of the intercept estimate: const AVector firstCol = inverseQfactor.col(0); const double interceptVar = arma::dot(firstCol, firstCol); // ok, now alter the qFactor appropriately to reflect the // independence assumption between the intercept estimate // and the other coefficients estimates now.proposalInfo.qFactor.col(0) = arma::zeros<AVector>(now.proposalInfo.qFactor.n_rows); now.proposalInfo.qFactor(0, 0) = sqrt(1.0 / interceptVar); } else { // compute the (unnormalized) log posterior of the proposal now.logUnPosterior = fitter.iwlsObject->computeLogUnPosteriorDens(now.sample); } } else { PosInt coxfitIterations = fitter.coxfitObject->fit(); CoxfitResults coxResults = fitter.coxfitObject->finalizeAndGetResults(); fitter.coxfitObject->checkResults(); // echo debug-level message? if(options.debug) { Rprintf("\ncpp_sampleGlm: Cox fit finished after %d iterations", coxfitIterations); } // we will not compute this in the TBF case: now.logUnPosterior = R_NaReal; // compute the Cholesky factorization of the covariance matrix int info = potrf(false, coxResults.imat); // check that all went well if(info != 0) { std::ostringstream stream; stream << "dpotrf(coxResults.imat) got error code " << info << "in sampleGlm"; throw std::domain_error(stream.str().c_str()); } // compute the precision matrix, using the Cholesky factorization // of the covariance matrix now.proposalInfo.qFactor = arma::eye(now.proposalInfo.qFactor.n_rows, now.proposalInfo.qFactor.n_cols); info = potrs(false, coxResults.imat, now.proposalInfo.qFactor); // check that all went well if(info != 0) { std::ostringstream stream; stream << "dpotrs(coxResults.imat, now.proposalInfo.qFactor) got error code " << info << "in sampleGlm"; throw std::domain_error(stream.str().c_str()); } // compute the Cholesky factorization of the precision matrix info = potrf(false, now.proposalInfo.qFactor); // check that all went well if(info != 0) { std::ostringstream stream; stream << "dpotrf(now.proposalInfo.qFactor) got error code " << info << "in sampleGlm"; throw std::domain_error(stream.str().c_str()); } // the MLE of the coefficients now.proposalInfo.coefs = coxResults.coefs; } // so the parameter object "now" is then also the high density point // required for the marginal likelihood estimate: const Mcmc highDensityPoint(now); // we accept this starting value, so initialize "old" with the same ones Mcmc old(now); // ---------------------------------------------------------------------------------- // start sampling // ---------------------------------------------------------------------------------- // echo debug-level message? if(options.debug) { if(tbf) { Rprintf("\ncpp_sampleGlm: Starting MC simulation"); } else { Rprintf("\ncpp_sampleGlm: Starting MCMC loop"); } } // i_iter starts at 1 !! for(PosInt i_iter = 1; i_iter <= options.iterations; ++i_iter) { // echo debug-level message? if(options.debug) { Rprintf("\ncpp_sampleGlm: Starting iteration no. %d", i_iter); } // ---------------------------------------------------------------------------------- // store the proposal // ---------------------------------------------------------------------------------- // sample one new log covariance factor z (other arguments than 1 are not useful // with the current setup of the RFunction wrapper class) now.sample.z = marginalZ.gen(1); if(options.tbf) { if(options.isNullModel) { // note that we do not encounter this in the Cox case // assert(options.doGlm); if(!options.doGlm){ Rcpp::stop("sampleGlm.cpp:cpp_sampleGlm: options.doGlm should be TRUE"); } // draw the proposal coefs, which is here just the intercept now.sample.coefs = drawNormalVector(now.proposalInfo.coefs, now.proposalInfo.qFactor); } else { // here we have at least one non-intercept coefficient // get vector from N(0, I) AVector w = drawNormalVariates(now.proposalInfo.coefs.n_elem, 0.0, 1.0); // then solve L' * ret = w, and overwrite w with the result: trs(false, true, now.proposalInfo.qFactor, w); // compute the shrinkage factor t = g / (g + 1) const double g = exp(now.sample.z); //Previously used g directly, but if g=inf we need to use the limit // const double shrinkFactor = g / (g + 1.0); const double shrinkFactor = isinf(g) ? 1 : g / (g + 1.0); // scale the variance of the non-intercept coefficients // with this factor. // In the Cox case: no intercept present, so scale everything int startCoef = options.doGlm ? 1 : 0; w.rows(startCoef, w.n_rows - 1) *= sqrt(shrinkFactor); // also scale the mean of the non-intercept coefficients // appropriately: // In the Cox case: no intercept present, so scale everything now.sample.coefs = now.proposalInfo.coefs; now.sample.coefs.rows(startCoef, now.sample.coefs.n_rows - 1) *= shrinkFactor; // so altogether we have: now.sample.coefs += w; } ++nAccepted; } else // the generalized hyper-g prior case { // do 1 IWLS step, starting from the last linear predictor and the new z // (here the return value is not very interesting, as it must be 1) fitter.iwlsObject->startWithNewCoefs(1, exp(now.sample.z), now.sample.coefs); // get the results now.proposalInfo = fitter.iwlsObject->getResults(); // draw the proposal coefs: now.sample.coefs = drawNormalVector(now.proposalInfo.coefs, now.proposalInfo.qFactor); // compute the (unnormalized) log posterior of the proposal now.logUnPosterior = fitter.iwlsObject->computeLogUnPosteriorDens(now.sample); // ---------------------------------------------------------------------------------- // get the reverse jump normal density // ---------------------------------------------------------------------------------- // copy the old Mcmc object Mcmc reverse(old); // do again 1 IWLS step, starting from the sampled linear predictor and the old z fitter.iwlsObject->startWithNewCoefs(1, exp(reverse.sample.z), now.sample.coefs); // get the results for the reverse jump Gaussian: // only the proposal has changed in contrast to the old container, // the sample stays the same! reverse.proposalInfo = fitter.iwlsObject->getResults(); // ---------------------------------------------------------------------------------- // compute the proposal density ratio // ---------------------------------------------------------------------------------- // first the log of the numerator, i.e. log(f(old | new)): double logProposalRatioNumerator = reverse.computeLogProposalDens(); // second the log of the denominator, i.e. log(f(new | old)): double logProposalRatioDenominator = now.computeLogProposalDens(); // so the log proposal density ratio is double logProposalRatio = logProposalRatioNumerator - logProposalRatioDenominator; // ---------------------------------------------------------------------------------- // compute the posterior density ratio // ---------------------------------------------------------------------------------- double logPosteriorRatio = now.logUnPosterior - old.logUnPosterior; // ---------------------------------------------------------------------------------- // accept or reject proposal // ---------------------------------------------------------------------------------- double acceptanceProb = exp(logPosteriorRatio + logProposalRatio); if(unif() < acceptanceProb) { old = now; ++nAccepted; } else { now = old; } } // ---------------------------------------------------------------------------------- // store the sample? // ---------------------------------------------------------------------------------- // if the burnin was passed and we are at a multiple of step beyond that, then store // the sample. if((i_iter > options.burnin) && (((i_iter - options.burnin) % options.step) == 0)) { // echo debug-level message if(options.debug) { Rprintf("\ncpp_sampleGlm: Storing samples of iteration no. %d", i_iter); } // store the current parameter sample samples.storeParameters(now.sample); // ---------------------------------------------------------------------------------- // compute marginal likelihood terms // ---------------------------------------------------------------------------------- // compute marginal likelihood terms and save them? // (Note that the tbf bool is just for safety here, // the R function sampleGlm will set estimateMargLik to FALSE // when tbf is TRUE.) if(options.estimateMargLik && (! options.tbf)) { // echo debug-level message? if(options.debug) { Rprintf("\ncpp_sampleGlm: Compute marginal likelihood estimation terms"); } // ---------------------------------------------------------------------------------- // compute next term for the denominator // ---------------------------------------------------------------------------------- // draw from the high density point proposal distribution Mcmc denominator(highDensityPoint); denominator.sample.z = marginalZ.gen(1); fitter.iwlsObject->startWithNewLinPred(1, exp(denominator.sample.z), highDensityPoint.proposalInfo.linPred); denominator.proposalInfo = fitter.iwlsObject->getResults(); denominator.sample.coefs = drawNormalVector(denominator.proposalInfo.coefs, denominator.proposalInfo.qFactor); // get posterior density of the sample denominator.logUnPosterior = fitter.iwlsObject->computeLogUnPosteriorDens(denominator.sample); // get the proposal density at the sample double denominator_logProposalDensity = denominator.computeLogProposalDens(); // then the reverse stuff: // first we copy again the high density point Mcmc revDenom(highDensityPoint); // but choose the new sampled coefficients as starting point fitter.iwlsObject->startWithNewCoefs(1, exp(revDenom.sample.z), denominator.sample.coefs); revDenom.proposalInfo = fitter.iwlsObject->getResults(); // so the reverse proposal density is double revDenom_logProposalDensity = revDenom.computeLogProposalDens(); // so altogether the next term for the denominator is the following acceptance probability double denominatorTerm = denominator.logUnPosterior - highDensityPoint.logUnPosterior + revDenom_logProposalDensity - denominator_logProposalDensity; denominatorTerm = exp(fmin(0.0, denominatorTerm)); // ---------------------------------------------------------------------------------- // compute next term for the numerator // ---------------------------------------------------------------------------------- // compute the proposal density of the current sample starting from the high density point Mcmc numerator(now); fitter.iwlsObject->startWithNewLinPred(1, exp(numerator.sample.z), highDensityPoint.proposalInfo.linPred); numerator.proposalInfo = fitter.iwlsObject->getResults(); double numerator_logProposalDensity = numerator.computeLogProposalDens(); // then compute the reverse proposal density of the high density point when we start from the current // sample Mcmc revNum(highDensityPoint); fitter.iwlsObject->startWithNewCoefs(1, exp(revNum.sample.z), now.sample.coefs); revNum.proposalInfo = fitter.iwlsObject->getResults(); double revNum_logProposalDensity = revNum.computeLogProposalDens(); // so altogether the next term for the numerator is the following guy: double numeratorTerm = exp(fmin(revNum_logProposalDensity, highDensityPoint.logUnPosterior - now.logUnPosterior + numerator_logProposalDensity)); // ---------------------------------------------------------------------------------- // finally store both terms // ---------------------------------------------------------------------------------- samples.storeMargLikTerms(numeratorTerm, denominatorTerm); } } // ---------------------------------------------------------------------------------- // echo progress? // ---------------------------------------------------------------------------------- // echo debug-level message? if(options.debug) { Rprintf("\ncpp_sampleGlm: Finished iteration no. %d", i_iter); } if((i_iter % std::max(static_cast<int>(options.iterations / 100), 1) == 0) && options.verbose) { // display computation progress at each percent Rprintf("-"); } // end echo progress } // end MCMC loop // echo debug-level message? if(options.debug) { if(tbf) { Rprintf("\ncpp_sampleGlm: Finished MC simulation"); } else { Rprintf("\ncpp_sampleGlm: Finished MCMC loop"); } } // ---------------------------------------------------------------------------------- // build up return list for R and return that. // ---------------------------------------------------------------------------------- return List::create(_["samples"] = samples.convert2list(), _["nAccepted"] = nAccepted, _["highDensityPointLogUnPosterior"] = highDensityPoint.logUnPosterior); } // end cpp_sampleGlm
void sequence(Home home, const BoolVarArgs& x, const IntSet& s, int q, int l, int u, IntConLevel) { if ((s.min() < 0) || (s.max() > 1)) throw NotZeroOne("Int::sequence"); if (x.size() == 0) throw TooFewArguments("Int::sequence"); Limits::check(q,"Int::sequence"); Limits::check(l,"Int::sequence"); Limits::check(u,"Int::sequence"); if (x.same(home)) throw ArgumentSame("Int::sequence"); if ((q < 1) || (q > x.size())) throw OutOfLimits("Int::sequence"); if (home.failed()) return; // Normalize l and u l=std::max(0,l); u=std::min(q,u); // Lower bound of values taken can never exceed upper bound if (u < l) { home.fail(); return; } // Already subsumed as any number of values taken is okay if ((0 == l) && (q == u)) return; // Check whether the set is {0,1}, then the number of values taken is q if ((s.min() == 0) && (s.max() == 1)) { if ((l > 0) || (u < q)) home.failed(); return; } assert(s.min() == s.max()); // All variables must take a value in s if (l == q) { if (s.min() == 0) { for (int i=x.size(); i--; ) { BoolView xv(x[i]); GECODE_ME_FAIL(xv.zero(home)); } } else { assert(s.min() == 1); for (int i=x.size(); i--; ) { BoolView xv(x[i]); GECODE_ME_FAIL(xv.one(home)); } } return; } // No variable can take a value in s if (0 == u) { if (s.min() == 0) { for (int i=x.size(); i--; ) { BoolView xv(x[i]); GECODE_ME_FAIL(xv.one(home)); } } else { assert(s.min() == 1); for (int i=x.size(); i--; ) { BoolView xv(x[i]); GECODE_ME_FAIL(xv.zero(home)); } } return; } ViewArray<BoolView> xv(home,x); GECODE_ES_FAIL( (Sequence::Sequence<BoolView,int>::post (home,xv,s.min(),q,l,u))); }
void ConvexDecomp(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr& cloud, const Eigen::MatrixXf& dirs, float thresh, /*optional outputs: */ std::vector<IntVec>* indices, std::vector< IntVec >* hull_indices) { int k_neighbs = 5; pcl::KdTreeFLANN<pcl::PointXYZ>::Ptr tree(new pcl::KdTreeFLANN<pcl::PointXYZ>(true)); tree->setEpsilon(0); tree->setInputCloud(cloud); int n_pts = cloud->size(); int n_dirs = dirs.rows(); DEBUG_PRINT("npts, ndirs %i %i\n", n_pts, n_dirs); MatrixXf dirs4(n_dirs, 4); dirs4.leftCols(3) = dirs; dirs4.col(3).setZero(); MatrixXf pt2supports = Map< const Matrix<float, Dynamic, Dynamic, RowMajor > >(reinterpret_cast<const float*>(cloud->points.data()), n_pts, 4) * dirs4.transpose(); const int UNLABELED = -1; IntVec pt2label(n_pts, UNLABELED); IntSet alldirs; for(int i = 0; i < n_dirs; ++i) alldirs.insert(i); int i_seed = 0; int i_label = 0; // each loop cycle, add a new cluster while(true) { // find first unlabeled point while(true) { if(i_seed == n_pts) return; if(pt2label[i_seed] == UNLABELED) break; ++i_seed; } pt2label[i_seed] = i_label; map<int, IntSet> pt2dirs; pt2dirs[i_seed] = alldirs; vector<SupInfo> dir2supinfo(n_dirs); for(int i_dir = 0; i_dir < n_dirs; ++i_dir) { float seedsup = pt2supports(i_seed, i_dir); dir2supinfo[i_dir].inds.push_back(i_seed); dir2supinfo[i_dir].sups.push_back(seedsup); dir2supinfo[i_dir].best = seedsup; } DEBUG_PRINT("seed: %i\n", i_seed); IntSet exclude_frontier; exclude_frontier.insert(i_seed); queue<int> frontier; BOOST_FOREACH(const int & i_nb, getNeighbors(*tree, i_seed, k_neighbs, 2 * thresh)) { if(pt2label[i_nb] == UNLABELED && exclude_frontier.find(i_nb) == exclude_frontier.end()) { DEBUG_PRINT("adding %i to frontier\n", i_nb); frontier.push(i_nb); exclude_frontier.insert(i_nb); } } while(!frontier.empty()) { #if 0 // for serious debugging vector<int> clu; BOOST_FOREACH(Int2IntSet::value_type & pt_dir, pt2dirs) { clu.push_back(pt_dir.first); } MatrixXd sup_pd(clu.size(), n_dirs); for(int i = 0; i < clu.size(); ++i) { for(int i_dir = 0; i_dir < n_dirs; ++i_dir) { sup_pd(i, i_dir) = pt2supports(clu[i], i_dir); } } for(int i_dir = 0; i_dir < n_dirs; ++i_dir) { IntSet nearext; for(int i = 0; i < clu.size(); ++i) { if(sup_pd.col(i_dir).maxCoeff() - sup_pd(i, i_dir) < thresh) { nearext.insert(clu[i]); } } assert(toSet(dir2supinfo[i_dir].inds) == nearext); } printf("ok!\n"); #endif int i_cur = frontier.front(); frontier.pop(); // printf("cur: %i\n", i_cur); DEBUG_PRINT("pt2dirs %s", Str(pt2dirs).c_str()); bool reject = false; Int2Int pt2decrement; for(int i_dir = 0; i_dir < n_dirs; ++i_dir) { float cursup = pt2supports(i_cur, i_dir); SupInfo& si = dir2supinfo[i_dir]; if(cursup > si.best) { for(int i = 0; i < si.inds.size(); ++i) { float sup = si.sups[i]; int i_pt = si.inds[i]; if(cursup - sup > thresh) { pt2decrement[i_pt] = pt2decrement[i_pt] + 1; DEBUG_PRINT("decrementing %i (dir %i)\n", i_pt, i_dir); } } } } DEBUG_PRINT("pt2dec: %s", Str(pt2decrement).c_str()); BOOST_FOREACH(const Int2Int::value_type & pt_dec, pt2decrement) { if(pt_dec.second == pt2dirs[pt_dec.first].size()) { reject = true; break; } } DEBUG_PRINT("reject? %i\n", reject); if(!reject) { pt2label[i_cur] = i_label; pt2dirs[i_cur] = IntSet(); for(int i_dir = 0; i_dir < n_dirs; ++i_dir) { float cursup = pt2supports(i_cur, i_dir); if(cursup > dir2supinfo[i_dir].best - thresh) pt2dirs[i_cur].insert(i_dir); } for(int i_dir = 0; i_dir < n_dirs; ++i_dir) { float cursup = pt2supports(i_cur, i_dir); SupInfo& si = dir2supinfo[i_dir]; if(cursup > si.best) { IntVec filtinds; FloatVec filtsups; for(int i = 0; i < si.inds.size(); ++i) { float sup = si.sups[i]; int i_pt = si.inds[i]; if(cursup - sup > thresh) { pt2dirs[i_pt].erase(i_dir); } else { filtinds.push_back(i_pt); filtsups.push_back(sup); } } si.inds = filtinds; si.sups = filtsups; si.inds.push_back(i_cur); si.sups.push_back(cursup); si.best = cursup; } else if(cursup > si.best - thresh) { si.inds.push_back(i_cur); si.sups.push_back(cursup); } } BOOST_FOREACH(const int & i_nb, getNeighbors(*tree, i_cur, k_neighbs, 2 * thresh)) { if(pt2label[i_nb] == UNLABELED && exclude_frontier.find(i_nb) == exclude_frontier.end()) { DEBUG_PRINT("adding %i to frontier\n", i_nb); frontier.push(i_nb); exclude_frontier.insert(i_nb); } } } // if !reject else { } } // while frontier nonempty if(indices != NULL)