Example #1
0
 //constructor for a pre-fab set of words,  primarily for using error version
 //in a polymorphic manner.
 Dictionary ( set<string> PreFabSet ) :
       wordSet( PreFabSet ), constructionWasSuccesfull( true ) {
    if(wordSet.size()==0) constructionWasSuccesfull = false;
    DetermineLongestWord();
 }
Example #2
0
void AddTimeData(const CNetAddr& ip, int64_t nOffsetSample)
{
    LOCK(cs_nTimeOffset);
    // Ignore duplicates
    static set<CNetAddr> setKnown;
    if (setKnown.size() == BITCOIN_TIMEDATA_MAX_SAMPLES)
        return;
    if (!setKnown.insert(ip).second)
        return;

    // Add data
    static CMedianFilter<int64_t> vTimeOffsets(BITCOIN_TIMEDATA_MAX_SAMPLES, 0);
    vTimeOffsets.input(nOffsetSample);
    LogPrint("net","added time data, samples %d, offset %+d (%+d minutes)\n", vTimeOffsets.size(), nOffsetSample, nOffsetSample/60);

    // There is a known issue here (see issue #4521):
    //
    // - The structure vTimeOffsets contains up to 200 elements, after which
    // any new element added to it will not increase its size, replacing the
    // oldest element.
    //
    // - The condition to update nTimeOffset includes checking whether the
    // number of elements in vTimeOffsets is odd, which will never happen after
    // there are 200 elements.
    //
    // But in this case the 'bug' is protective against some attacks, and may
    // actually explain why we've never seen attacks which manipulate the
    // clock offset.
    //
    // So we should hold off on fixing this and clean it up as part of
    // a timing cleanup that strengthens it in a number of other ways.
    //
    if (vTimeOffsets.size() >= 5 && vTimeOffsets.size() % 2 == 1)
    {
        int64_t nMedian = vTimeOffsets.median();
        std::vector<int64_t> vSorted = vTimeOffsets.sorted();
        // Only let other nodes change our time by so much
        if (abs64(nMedian) <= std::max<int64_t>(0, GetArg("-maxtimeadjustment", DEFAULT_MAX_TIME_ADJUSTMENT)))
        {
            nTimeOffset = nMedian;
        }
        else
        {
            nTimeOffset = 0;

            static bool fDone;
            if (!fDone)
            {
                // If nobody has a time different than ours but within 5 minutes of ours, give a warning
                bool fMatch = false;
                BOOST_FOREACH(int64_t nOffset, vSorted)
                    if (nOffset != 0 && abs64(nOffset) < 5 * 60)
                        fMatch = true;

                if (!fMatch)
                {
                    fDone = true;
                    string strMessage = strprintf(_("Please check that your computer's date and time are correct! If your clock is wrong, %s will not work properly."), _(PACKAGE_NAME));
                    strMiscWarning = strMessage;
                    uiInterface.ThreadSafeMessageBox(strMessage, "", CClientUIInterface::MSG_WARNING);
                }
            }
        }
Example #3
0
vector<int> HDBScan::do_labelling(vector<CondensedTree*>& tree, set<int>& clusters,
                                  boost::unordered_map<int, int>& cluster_label_map,
                                  bool allow_single_cluster,
                                  bool match_reference_implementation)
{
    int root_cluster = tree[0]->parent;  //root_cluster = parent_array.min()
    int parent_array_max = tree[0]->parent;
    for (int i=1; i<tree.size(); i++) {
        if (tree[i]->parent < root_cluster) {
            root_cluster = tree[i]->parent;
        }
        if (tree[i]->parent > parent_array_max) {
            parent_array_max = tree[i]->parent;
        }
    }
    vector<int> result(root_cluster);
    
    TreeUnionFind union_find(parent_array_max + 1);
    
    for (int n=0; n<tree.size(); n++) {
        int child = tree[n]->child;
        int parent = tree[n]->parent;
        if (clusters.find(child) ==clusters.end() ) {
            union_find.union_(parent, child);
        }
    }
    
    for (int n=0; n<root_cluster; n++) {
        int cluster = union_find.find(n);
        if ( cluster < root_cluster) {
            result[n] = -1;
            
        } else if (cluster == root_cluster) {
            result[n] = -1;
            if (clusters.size()==1 && allow_single_cluster) {
                double c_lambda = -1;
                double p_lambda = -1;
                for (int j=0; j<tree.size(); j++) {
                    if (tree[j]->child == n) {
                        c_lambda = tree[j]->lambda_val;
                    }
                    if (tree[j]->parent == cluster) {
                        if (tree[j]->lambda_val > p_lambda) {
                            p_lambda = tree[j]->lambda_val;
                        }
                    }
                }
                if (c_lambda >= p_lambda && p_lambda > -1) {
                    result[n] = cluster_label_map[cluster];
                }
            }
            
        } else {
            if (match_reference_implementation) {
                double point_lambda=-1, cluster_lambda=-1;
                for (int j=0; j<tree.size(); j++) {
                    if (tree[j]->child == n) {
                        point_lambda = tree[j]->lambda_val;
                        break;
                    }
                }
                for (int j=0; j<tree.size(); j++) {
                    if (tree[j]->child == cluster) {
                        cluster_lambda = tree[j]->lambda_val;
                        break;
                    }
                }
                if (point_lambda > cluster_lambda && cluster_lambda > -1) {
                    result[n] = cluster_label_map[cluster];
                } else {
                    result[n] = -1;
                }
            } else {
                result[n] = cluster_label_map[cluster];
            }
        }
    }
    return result;
}
/*
* @function CreatTree 递归DFS创建并输出决策树
* @param: treeHead 为生成的决定树
* @param: statTree 为状态树,此树动态更新,但是由于是DFS对数据更新,所以不必每次新建状态树
* @param: infos 数据信息
* @param: readLine 当前在infos中所要进行统计的行数,由函数外给出
* @param: deep 决定树的深度,用于打印
* @return void
*/
void DecisionTree::CreatTree(TreeNode* treeHead, vector<attributes*>& statTree, vector<vector<string>>& infos, 
							 set<int>& readLine, vector<int>& readClumNum, int deep)
{
	//有可统计的行
	if (readLine.size() != 0)
	{
		string treeLine = "";
		for (int i = 0; i < deep; i++)
		{
			treeLine += "--";
		}
		//清空其他属性子树,进行递归
		resetStatTree(statTree, readClumNum);
		//统计当前readLine中的数据:包括统计哪几个属性、哪些行,
		//并生成statTree(由于公用一个statTree,所有用引用代替),并返回目的信息数
		int deciNum = statister(getInfos(), statTree, readLine, readClumNum);
		int lineNum = readLine.size();
		int attr_node = compuDecisiNote(statTree, deciNum, lineNum, readClumNum);//本条复制为局部变量
		//该列被锁定
		readClumNum[attr_node] = 1;
		//建立树根
		TreeNode* treeNote = new TreeNode();
		treeNote->m_sAttribute = statTree[attr_node]->attriName;
		treeNote->m_iDeciNum = deciNum;
		treeNote->m_iUnDecinum = lineNum - deciNum;
		if (treeHead == nullptr)
		{
			treeHead = treeNote; //树根
		}
		else
		{
			treeHead->m_vChildren.push_back(treeNote); //子节点
		}
		cout << "节点-"<< treeLine << ">" << statTree[attr_node]->attriName << " " << deciNum << " " << lineNum - deciNum << endl;
		
		//从孩子分支进行递归
		for(map<string, attrItem*>::iterator map_iterator = statTree[attr_node]->attriItem.begin();
			map_iterator != statTree[attr_node]->attriItem.end(); ++map_iterator)
		{
			//打印分支
			int sum = map_iterator->second->itemNum[0];
			int deci_Num = map_iterator->second->itemNum[1];
			cout << "分支--"<< treeLine << ">" << map_iterator->first << endl;
			//递归计算、创建
			if (deci_Num != 0 && sum != deci_Num )
			{
				//计算有效行数
				set<int> newReadLineNum = map_iterator->second->itemLine;
				//DFS
				CreatTree(treeNote, statTree, infos, newReadLineNum, readClumNum, deep + 1);
			}
			else
			{
				//建立叶子节点
				TreeNode* treeEnd = new TreeNode();
				treeEnd->m_sAttribute = statTree[attr_node]->attriName;
				treeEnd->m_iDeciNum = deci_Num;
				treeEnd->m_iUnDecinum = sum - deci_Num;
				treeNote->m_vChildren.push_back(treeEnd);
				//打印叶子
				if (deci_Num == 0)
				{
					cout << "叶子---"<< treeLine << ">no" << " " << sum << endl;
				}
				else
				{
					cout << "叶子---"<< treeLine << ">yes" << " " << deci_Num <<endl;
				}
			}
		}
		//还原属性列可用性
		readClumNum[attr_node] = 0;
	}
}
    void buildBottomUpPhases2And3( bool dupsAllowed,
                                   IndexDescriptor* idx,
                                   BSONObjExternalSorter& sorter,
                                   bool dropDups,
                                   set<DiskLoc>& dupsToDrop,
                                   CurOp* op,
                                   SortPhaseOne* phase1,
                                   ProgressMeterHolder& pm,
                                   Timer& t,
                                   bool mayInterrupt ) {
        BtreeBuilder<V> btBuilder(dupsAllowed, idx->getOnDisk());
        BSONObj keyLast;
        auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
        // verifies that pm and op refer to the same ProgressMeter
        verify(pm == op->setMessage("index: (2/3) btree bottom up",
                                    "Index: (2/3) BTree Bottom Up Progress",
                                    phase1->nkeys,
                                    10));
        while( i->more() ) {
            RARELY killCurrentOp.checkForInterrupt( !mayInterrupt );
            ExternalSortDatum d = i->next();

            try {
                if ( !dupsAllowed && dropDups ) {
                    LastError::Disabled led( lastError.get() );
                    btBuilder.addKey(d.first, d.second);
                }
                else {
                    btBuilder.addKey(d.first, d.second);                    
                }
            }
            catch( AssertionException& e ) {
                if ( dupsAllowed ) {
                    // unknown exception??
                    throw;
                }

                if( e.interrupted() ) {
                    killCurrentOp.checkForInterrupt();
                }

                if ( ! dropDups )
                    throw;

                /* we could queue these on disk, but normally there are very few dups, so instead we
                    keep in ram and have a limit.
                */
                dupsToDrop.insert(d.second);
                uassert( 10092 , "too may dups on index build with dropDups=true", dupsToDrop.size() < 1000000 );
            }
            pm.hit();
        }
        pm.finished();
        op->setMessage("index: (3/3) btree-middle", "Index: (3/3) BTree Middle Progress");
        LOG(t.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit" << endl;
        btBuilder.commit( mayInterrupt );
        if ( btBuilder.getn() != phase1->nkeys && ! dropDups ) {
            warning() << "not all entries were added to the index, probably some "
                         "keys were too large" << endl;
        }
    }
 size_t inner_vertex_cnt() const {
     return inner_vertices_.size();
 }
Example #7
0
int main( int argc, const char** argv )
{
  // splash

  printf("hello, trail!\n");

  // initialize for LINEAR trail approximation (quadrilateral)

  trailEdgeRow.push_back(IMAGE_ROW_FAR);
  trailEdgeRow.push_back(IMAGE_ROW_NEAR);

  add_all_images_from_file("imagedirs.txt");

  // create initial, ordered indices 

  int i;

  for (i = 0; i < dir_image_filename.size(); i++)      
    Random_idx.push_back(i);
  printf("%i total images\n", (int) Random_idx.size());

  // shuffle indices (this should be optional)

  struct timeval tp;
  
  gettimeofday(&tp, NULL); 
  //  srand48(tp.tv_sec);
  srand(tp.tv_sec);
  random_shuffle(Random_idx.begin(), Random_idx.end());

  Nonrandom_idx.resize(Random_idx.size());
  for (i = 0; i < Random_idx.size(); i++)
    Nonrandom_idx[Random_idx[i]] = i;

  loadBad();
  printf("%i bad\n", (int) Bad_idx_set.size());

  Vert.resize(Random_idx.size());
  ClosestVert_dist.resize(Random_idx.size());   

  num_saved_verts = loadVert();
  printf("%i with verts\n", num_saved_verts);

  set_current_index(ZERO_INDEX);

  // display

  char c;

  do {
    
    // load image

    current_imname = dir_image_filename[current_index];

    current_im = imread(current_imname.c_str());
    draw_im = current_im.clone();

    // show image 

    draw_overlay();
    //draw_output_window();
    //draw_training_images();


    imshow("trailGT", draw_im);
    if (!callbacks_set) {
      setMouseCallback("trailGT", onMouse);
    }

    c = waitKey(0);

    onKeyPress(c);

  } while (c != (int) 'q');

  return 0;
}
Example #8
0
#define debug(x) cerr << #x << " = " << (x) << " (L" << __LINE__ << ")" << " " << __FILE__ << endl;
#define check(x) cerr << #x << " = "; REP(q,(x).size()) cerr << (x)[q] << " "; cerr << endl;
#define checkn(x,n) cerr << #x << " = "; for(int i=0;i<(x).size()&&i<(n);++i) cerr << (x)[i] << " "; cerr << endl;

int main() {
  int P; cin >> P;
  vector<int> pages(P);
  REP(i, P) {
    int temp;
    scanf("%d", &temp);
    pages[i] = temp;
  }

  set<int> all;
  REP(i, P) all.insert(pages[i]);
  int n = all.size();

  int s = 0, t = 0, num = 0;
  map<int, int> count;
  int res = P;
  for (;;) {
    while (t < P && num < n) {
      if (count[pages[t]] == 0) {
        num++;
      }
      count[pages[t]]++;
      t++;
    }
    if (num < n) break;
    res = min(res, t - s);
    count[pages[s]]--;
Example #9
0
int _tmain(int argc, _TCHAR* argv[])
{
	long pmax = 10000000;
	vector<long> primes = sieve_of_eratosthenes(pmax);

	long long sum = 0;

	for (vector<long>::iterator it=primes.begin(); it!= primes.end(); it++)
	{
		long p = *it;

		// every 1 digit prime is a relative
		if (p<10) {
			relatives.insert(*it);
			continue;
		}

		bool condition_met = false;

		// condition 1
		// change all digits possible
		int len = findn(*it);
		for (long exponent=1; exponent < p ; exponent *= 10) {
			long limit = p/exponent % 10;
			long newp = p;
			for (int n=0; n<limit; n++) {
				newp -= exponent;
				if (relatives.find(newp) != relatives.end() && findn(newp) + 1 >= len) {
					relatives.insert(*it);
					exponent=p;
					n=limit;
					condition_met=true;
				}
			}
		}

		// condition 2
		// remove leftmost digit
		if ( condition_met == false ) {
			long newp = p % getexp(p);
			if (relatives.find(newp) != relatives.end() && findn(newp) + 1 >= len) {
				relatives.insert(*it);
				condition_met=true;
			}
		}

		if (condition_met) {
			if ( no_relatives.size() > 0 ) {
				set<long> erased = recheck_non_relatives(p);
				set<long> erased2;
				set<long> new_erased;

				while (erased.size() > 0) {
					erased2.clear();
					new_erased.clear();
					for (set<long>::iterator sit=erased.begin(); sit != erased.end(); sit++) {
						erased2 = recheck_non_relatives(*sit);
						for (set<long>::iterator sit2=erased2.begin(); sit2 != erased2.end(); sit2++) {
							new_erased.insert(*sit2);
						}
					}
					erased = new_erased;
				}
			}
		}
		else {
			no_relatives.insert(*it);
			sum += *it;
			cout << *it << endl;
		}
	}

	
	cout << "--------------" << endl << sum << endl;

	system("PAUSE");
	return 0;
}
Example #10
0
int main()
{
	freopen("/home/qitaishui/code/out.txt","r",stdin);
	freopen("/home/qitaishui/code/out2.txt","w",stdout);
	int ca = 0;
	while(~scanf("%d", &n))
	{
		for(int i = 0; i < n; i++)
			d[i].input();

		for(int i = 0; i < n; i++)
			root[i] = i;

		for(int i = 0; i < n; i++)
			for(int j = 0; j < i; j++)
			if(check(d[i], d[j]))
			{
				int x = find(i), y = find(j);
				if(x != y)
					root[x] = y;
			}

		for(int i = 0; i < n; i++)
			find(i);

		for(int i = 0; i < n; i++)
		{
			int x1, y1, x2, y2;
			int j = find(i);
			x1 = min(d[i].x1, d[i].x2, d[j].x1, d[j].x2);
			x2 = max(d[i].x1, d[i].x2, d[j].x1, d[j].x2);
			y1 = min(d[i].y1, d[i].y2, d[j].y1, d[j].y2);
			y2 = max(d[i].y1, d[i].y2, d[j].y1, d[j].y2);

			if((ll)(d[j].y2 - d[j].y1) * (d[j].x2 - d[j].x1) >= 0)
				d[j] = Data(x1, y1, x2, y2);
			else
				d[j] = Data(x1, y2, x2, y1);
		}

		int cnt = 0;
		for(int i = 0; i < n; i++)
		if(i == find(i))
			d[cnt++] = d[i];

		n = cnt;
		all.clear();
		tset.clear();
		ll ans = 0;

		for(int i = 0; i < n; i++)
		{
			ans += 1 + gcd(abs(d[i].x1 - d[i].x2), abs(d[i].y1 - d[i].y2));
			tset.clear();
			if(i != j)
			{
				cal(d[i], d[j]);

			}
			ans -= (ll)tset.size();
		}

		ans += (ll)all.size();
		printf("%d %d\n", ++ca,(int)ans);
	}
	return 0;
}
Example #11
0
uint64_t Config::lca_from_ids(unordered_map<uint64_t,unsigned int> & node2depth, set<uint64_t> & ids) {

	if(ids.size() == 1) {
		return *(ids.begin());
	}
	size_t num_ids = ids.size();
	uint64_t * leafs = (uint64_t *) calloc(num_ids,sizeof(uint64_t));
	unsigned int shallowest_depth = 100000;
	unsigned int index = 0;
	for(auto it = ids.begin() ; it != ids.end(); ++it) {
		uint64_t id = *it;	

		if(nodes->count(id)==0) {
			if(verbose) cerr << "Warning: Taxon ID " << id << " in database is not contained in taxonomic tree.\n";
			num_ids--;
			continue;
		}

		// check if this id was already seen, then skip it
		leafs[index++] = id;

		//if id is alrady in the depth map then do not add it.
		if(node2depth.count(id)==0) {
			unsigned int depth = 1;
			while(nodes->count(id)>0 && id != nodes->at(id)) {
				depth++;
				id = nodes->at(id);	
			}
			node2depth.insert(pair<uint64_t,unsigned int>(*it,depth));
			//cerr << "Inserting to depth map: " << *it <<" -> " << depth << endl;
			if(depth < shallowest_depth) { shallowest_depth = depth; }
		}
		else if(node2depth.at(*it) < shallowest_depth) {
			shallowest_depth = node2depth.at(*it);
		}
	}

	if(num_ids<=0) {
		free(leafs);
		return 0;
	}

	//cerr << "shallowest depth = " << shallowest_depth << endl;

	// bring all IDs up to the same depth
	/*for (auto it=leafs.begin(); it != leafs.end(); ++it) {
		//cerr << "Bringing leaf " << *it << " to depth " << shallowest_depth <<" from depth " << node2depth->at(*it) << endl;
		for(int i = node2depth.at(*it) - shallowest_depth; i > 0; i--) {
			*it = nodes->at(*it);
		}
	}*/
	for(size_t index = 0; index < num_ids; ++index) {
		for(int i = node2depth.at(leafs[index]) - shallowest_depth; i > 0; i--) {
			leafs[index]	= nodes->at(leafs[index]);
		}
	}

	while(true) {
		//foreach element in the list, check if id is the same, otherwise go one level up in tree, i.e. one more iteration 
		uint64_t first = leafs[0];
		bool found = true;
		//for (auto it=leafs.begin(); it != leafs.end(); ++it) {
		for(size_t index = 0; index < num_ids; ++index) {
			if(first != leafs[index]) {
				found = false;
			}
			leafs[index] = nodes->at(leafs[index]);
		}
		if(found) {
			free(leafs);
			return first;
		}
	}
	free(leafs);

}
Example #12
0
bool compare_length (set<int> &first, set<int> &second)
{
	return (first.size() < second.size());
}
Example #13
0
File: vd.cpp Project: cerdogan/Job
/* ******************************************************************************************** */
void vd () {

	// Process the site and circle events 
	while(!eventQueue.empty()) {

		// avl.draw();
		getchar2();

		// Check if it is a site event
		Event* event = eventQueue.top();
		eventQueue.pop();
		SiteEvent* siteEvent = dynamic_cast <SiteEvent*> (event);
		if(siteEvent != NULL) {

			printf("\n--- site --------------------------------------------------\n");
			
			// Update the sweep line location
			sweepLine = siteEvent->point(1) - 0.0001;
			printf("sweepLine: %lf\n", sweepLine);
			printf("new site: (%lf, %lf)\n", siteEvent->point(0), siteEvent->point(1));
			//avl.draw();
			getchar2();

			// Locate the existing arc information
			pair <bool, AVL<TreeNode*>::Node*> searchRes = 
				avl.search_candidateLoc(new TreeNode(siteEvent->pi, -1, true));

			// The tree is empty. Temporarily add the site information as a dummy node
			if(searchRes.second == NULL) {
				avl.insert(new TreeNode(siteEvent->pi, -1, true));
				printf("Tree empty!\n");
				continue;
			}

			// The tree still doesn't have a break point, but just a dummy site node information
			TreeNode* parentNode = searchRes.second->value;
			if(parentNode->dummy) {
				avl.remove(parentNode);
				avl.insert(new TreeNode(parentNode->p0i, siteEvent->pi));
				avl.insert(new TreeNode(siteEvent->pi, parentNode->p0i));
				printf("Tree dummy!\n");
				continue;
			}
			
			// Determine the site by comparing it with the found node value
			int prevSiteIdx = 0;
			if(parentNode->value() < siteEvent->point(0)) prevSiteIdx = parentNode->p1i;
			else prevSiteIdx = parentNode->p0i;
			printf("Previous site idx: (%d)\n", prevSiteIdx);
			
			// Create the new break points
			TreeNode* newNode1 = new TreeNode(siteEvent->pi, prevSiteIdx);
			TreeNode* newNode2 = new TreeNode(prevSiteIdx, siteEvent->pi);
 			avl.insert(newNode1);
 			avl.insert(newNode2);

			// Check for "false alarms" for circle events
			set <pair<CircleEvent*, Vector2d>, Vector2dComp>::iterator it =  allCircles.begin();
			printf("# parent circles: %d\n", parentNode->circleEvents.size());
//			for(size_t c_i = 0; c_i < parentNode->circleEvents.size(); c_i++) {
			for(; it != allCircles.end(); it++) {
//				CircleEvent* ce = parentNode->circleEvents[c_i];
				CircleEvent* ce = it->first;
				printf("\tTriplet (%d,%d,%d)\n", ce->points(0), ce->points(1), ce->points(2)); 
				if((ce->center - siteEvent->point).norm() < ce->radius) {
					printf("\tRemoving triplet: (%d,%d,%d)\n", ce->points(0),ce->points(1),ce->points(2));
					ce->falseAlarm = true;
				}
			}
			
			// Get the leaf information to check for circles
			vector <pair<int, AVL<TreeNode*>::Node*> > leafParents;
			avl.traversal_leaves(leafParents);
			printf("Traversal: {");
			vector <pair<int, TreeNode*> > sites;
			for(int i = 0; i < leafParents.size(); i++) {
				TreeNode* node = leafParents[i].second->value;
				int type = leafParents[i].first;
				if(type == 2) {
					printf("(%d,%d), ", node->p0i, node->p1i);
					sites.push_back(make_pair(node->p0i, node));
					sites.push_back(make_pair(node->p1i, node));
				}
				if(type == 0) {
					printf("%d, ", node->p0i);
					sites.push_back(make_pair(node->p0i, node));
				}
				if(type == 1) {
					printf("%d, ", node->p1i);
					sites.push_back(make_pair(node->p1i, node));
				}
			}
			printf("\b\b}\n");

			// Check for circles in triplets
			for(int s_i = 0; s_i < sites.size()-2; s_i++) {

				// Skip newly generated centers
				int i0 = sites[s_i].first, i1 = sites[s_i+1].first, i2 = sites[s_i+2].first;
				if(i0 == i2) continue;

				// If the bottom point of the fit circle can be tangent to the sweep line,
				// add it to the queue
				Vector2d center = fitCircle(data[i0], data[i1], data[i2]);
				double radius = (data[i0]-center).norm();
				double temp_y = center(1) - radius;
				printf("idx: %d, center: (%lf, %lf), temp_y: %lf\n", s_i, center(0), center(1), temp_y);
				printf("radius: %lf, sweepLine: %lf\n", radius, sweepLine);
				if(temp_y < sweepLine) { 

					if (allCircles.find(make_pair((CircleEvent*) NULL, center)) != allCircles.end()) {
						printf("\tTriplet (%d,%d,%d), (%lf, %lf) already exists.\n", i0, i1, i2, center(0), center(1));
						printf("all circles #: %lu\n", allCircles.size());
						continue;
					}

					// Create the circle event
					CircleEvent* ce = new CircleEvent();
					ce->point = Vector2d(center(0), temp_y);
					ce->points = Vector3i(i0,i1,i2);
					ce->center = center;
					ce->radius = radius;
					eventQueue.push(ce);
					allCircles.insert(make_pair(ce, ce->center));
					printf("\tAdding triplet: (%d,%d,%d), (%lf, %lf)\n", i0, i1, i2, center(0), center(1));

					// Register the circle event with the involved arcs
					sites[s_i].second->circleEvents.push_back(ce);
					sites[s_i+1].second->circleEvents.push_back(ce);
					sites[s_i+2].second->circleEvents.push_back(ce);
				}
				else printf("\tCircle already passed, not adding!\n");
			}

		}

		else {

			printf("\n--- circle ------------------------------------------------\n");

			// Update the sweepline
			CircleEvent* ce = dynamic_cast <CircleEvent*> (event);
			printf("circle event: point: (%lf, %lf), center:, (%lf, %lf), points: %d, %d, %d\n", ce->point(0), ce->point(1), ce->center(0), ce->center(1), 
				ce->points(0), ce->points(1), ce->points(2));
			sweepLine = ce->point(1) + 0.00001;
			printf("sweepLine: %lf\n", sweepLine);
			// avl.draw();
			getchar2();

			// Check if false alarm
			if(ce->falseAlarm || ce->falseAlarmCircle) {
				printf("\tFalse alarm!\n");
				continue;
			}

			// Get the arc that is disappearing due to the circle
			pair <bool, AVL<TreeNode*>::Node*> searchRes = 
				avl.search_candidateLoc(new TreeNode(ce->point, Vector2d(), true));
			assert(searchRes.second != NULL && "Could not find the above arc");
			TreeNode* node1 = searchRes.second->value;
			AVL<TreeNode*>::Node* searchNode = searchRes.second;
			printf("node1: (%d,%d)\n", node1->p0i, node1->p1i);

			// Fix node1 if next one is better
			AVL<TreeNode*>::Node* temp = avl.next(searchNode);
			AVL<TreeNode*>::Node* temp2 = avl.prev(searchNode);
			if(temp != NULL) printf("temp: '%s'\n", print(temp->value).c_str());
			if(temp != NULL) printf("temp2: '%s'\n", print(temp2->value).c_str());
			double diff1 = (node1->value() - ce->point(0));
			double diff2 = (temp == NULL) ? 1000.0 : (temp->value->value() - ce->point(0));
			double diff3 = (temp2 == NULL) ? 1000.0 : (temp2->value->value() - ce->point(0));
			printf("\t%lf vs %lf\n", diff1, diff2);
			if(fabs(diff2) < fabs(diff1)) {
				node1 = temp->value;
				searchNode = temp;
				printf("\t\tupdating node1 with temp\n");
			}
			printf("\t%lf vs %lf\n", diff1, diff3);
			if(fabs(diff3) < fabs(diff1)) {
				node1 = temp2->value;
				searchNode = temp2;
				printf("\t\tupdating node1 with temp2\n");
			}
			printf("node1: (%d,%d)\n", node1->p0i, node1->p1i);

			// Skip if the node can't be found
			double diff = (node1->value() - ce->point(0));
			if(fabs(diff) > 0.05) {
				printf("Skipping a circle event (node1) because it is behind the beach line\n");
				continue;
			}

			// Determine the other node
			AVL<TreeNode*>::Node* opt1 = avl.next(searchNode);
			if(opt1 != NULL) printf("opt1: '%s'\n", print(opt1->value).c_str());
			AVL<TreeNode*>::Node* opt2 = avl.prev(searchNode);
			if(opt2 != NULL) printf("opt2: '%s'\n", print(opt2->value).c_str());
			TreeNode* node2;
			if(opt1 == NULL) node2 = opt2->value;
			else if(opt2 == NULL) node2 = opt1->value;
			else {
				double diff1 = (node1->value() - opt1->value->value());	
				double diff2 = (node1->value() - opt2->value->value());	
				printf("diff1: %lf, diff2: %lf\n", diff1, diff2);
				if(fabs(diff1) < fabs(diff2)) node2 = opt1->value;
				else node2 = opt2->value;
			}

			// Skip if the node can't be found
			diff = (node2->value() - ce->point(0));
			if(fabs(diff) > 0.05) {
				printf("Skipping a circle event (node2) because it is behind the beach line\n");
				continue;
			}

			printf("node1: (%d,%d)\n", node1->p0i, node1->p1i);
			printf("node2: (%d,%d)\n", node2->p0i, node2->p1i);

			// Remove any potential circles that were going to use one of the break points for 
			// convergence that just got merged into a voronoi vertex.
			set <pair<CircleEvent*, Vector2d>, Vector2dComp>::iterator it =  allCircles.begin();
			int si0 = ce->points(0), si1 = ce->points(1), si2 = ce->points(2); 
			for(; it != allCircles.end(); it++) {
				CircleEvent* ce = it->first;
				int i0 = ce->points(0), i1 = ce->points(1), i2 = ce->points(2); 
				bool remove = false;
				if((i0 == si0 && i1 == si1) || (i1 == si0 && i2 == si1) || 
					(i0 == si1 && i1 == si0) || (i1 == si1 && i2 == si0)) remove = true; 
				if((i0 == si1 && i1 == si2) || (i1 == si1 && i2 == si2) || 
					(i0 == si2 && i1 == si1) || (i1 == si2 && i2 == si1)) remove = true; 
				
				if(remove) {
					printf("\tRemoving triplet: (%d,%d,%d)\n", i0, i1, i2);
					ce->falseAlarmCircle = true;
				}
			}
	
			// Remove the potential circle events from these nodes
			for(int ce_i = 0; ce_i < node1->circleEvents.size(); ce_i++) {
				CircleEvent* ce = node1->circleEvents[ce_i];
				if(ce->points[0] == node1->p0i && ce->points[1] == node1->p1i)
					ce->falseAlarmCircle = true;
				if(ce->points[1] == node1->p0i && ce->points[2] == node1->p1i)
					ce->falseAlarmCircle = true;
			}
			for(int ce_i = 0; ce_i < node2->circleEvents.size(); ce_i++) {
				CircleEvent* ce = node2->circleEvents[ce_i];
				if(ce->points[0] == node2->p0i && ce->points[1] == node2->p1i)
					ce->falseAlarmCircle = true;
				if(ce->points[1] == node2->p0i && ce->points[2] == node2->p1i)
					ce->falseAlarmCircle = true;
			}

			// Remove the arc from the tree
			printf("Before removes\n");
			getchar2();
			avl.remove(node1);
			// avl.draw();
			printf("Drawn after remove 1\n");
			getchar2();
			avl.remove(node2);
			// avl.draw();
			printf("Drawn after remove 2\n");
			getchar2();

			// Add the new break point 
			TreeNode* newNode;
			if(node1->p0i == node2->p1i)
				newNode = new TreeNode(node2->p0i, node1->p1i);
			else if(node1->p1i == node2->p0i)
				newNode = new TreeNode(node1->p0i, node2->p1i);
			else assert(false && "Unknown new break point creation");
			
 			avl.insert(newNode);
			printf("Inserted new node: '%s'\n", print(newNode).c_str());

			// Set the second points of the completed voronoi edges
			node1->edge1->p1 = ce->center;
			node1->edge2->p0 = ce->center;
			node2->edge1->p1 = ce->center;
			node2->edge2->p0 = ce->center;

			// Find angles around the cell center to place them ccw
			HalfEdge* e1 = node1->edge1, *e2 = node2->edge2;
			int site_idx = (node1->p0i == node2->p1i) ? node1->p0i : node1->p1i;
			Vector2d site = data[site_idx];
			Vector2d v1 = (0.5 * (e1->p0 + e1->p1) - site).normalized();
			Vector2d v2 = (0.5 * (e2->p0 + e2->p1) - site).normalized();
			double angle1 = atan2(v1(1), v1(0)) + (v1(1) < 0 ? 2*M_PI : 0);
			double angle2 = atan2(v2(1), v2(0)) + (v2(1) < 0 ? 2*M_PI : 0);
			if((angle1 < angle2) && fabs(angle1-angle2) > M_PI) angle1 += 2*M_PI;
			else if((angle2 < angle1) && fabs(angle2-angle1) > M_PI) angle2 += 2*M_PI;
			if(angle1 > angle2) {
				e1->prev = e2;
				e2->next = e1;
			}
			else {
				e2->prev = e1;
				e1->next = e2;
			}

			// Get the leaf information to check for circles again
			vector <pair<int, AVL<TreeNode*>::Node*> > leafParents;
			avl.traversal_leaves(leafParents);
			printf("Traversal: {");
			vector <pair<int, TreeNode*> > sites;
			for(int i = 0; i < leafParents.size(); i++) {
				TreeNode* node = leafParents[i].second->value;
				int type = leafParents[i].first;
				if(type == 2) {
					printf("(%d,%d), ", node->p0i, node->p1i);
					sites.push_back(make_pair(node->p0i, node));
					sites.push_back(make_pair(node->p1i, node));
				}
				if(type == 0) {
					printf("%d, ", node->p0i);
					sites.push_back(make_pair(node->p0i, node));
				}
				if(type == 1) {
					printf("%d, ", node->p1i);
					sites.push_back(make_pair(node->p1i, node));
				}
			}
			printf("\b\b}\n");

			// Check for circles in triplets
			for(int s_i = 0; s_i < sites.size()-2; s_i++) {

				// Skip newly generated centers
				int i0 = sites[s_i].first, i1 = sites[s_i+1].first, i2 = sites[s_i+2].first;
				if(i0 == i2) continue;

				// If the bottom point of the fit circle can be tangent to the sweep line,
				// add it to the queue
				Vector2d center = fitCircle(data[i0], data[i1], data[i2]);
				double temp_y = center(1) - (data[i0]-center).norm();
				printf("idx: %d, center: (%lf, %lf), temp_y: %lf\n", s_i, center(0), center(1), temp_y);
				if(temp_y < sweepLine) { 

					// Check if it existed before
					set <pair<CircleEvent*, Vector2d>, Vector2dComp>::iterator it =
						allCircles.find(make_pair((CircleEvent*) NULL, center));
					if(it != allCircles.end() && it->first->falseAlarmCircle){

						printf("\tTurning on an old false alarm for triplet: %d, %d, %d.\n", it->first->points(0), it->first->points(1), it->first->points(2));
						it->first->falseAlarmCircle = false;
						getchar2();
						continue;
					}
					else if(it == allCircles.end()) {

						// Create the circle event
						CircleEvent* ce = new CircleEvent();
						ce->point = Vector2d(center(0), temp_y);
						ce->points = Vector3i(i0,i1,i2);
						ce->center = center;
						eventQueue.push(ce);
						allCircles.insert(make_pair(ce, ce->center));
						printf("\tAdding triplet: (%d, %d, %d)\n", i0, i1, i2);

						// Register the circle event with the involved arcs
						sites[s_i].second->circleEvents.push_back(ce);
						sites[s_i+1].second->circleEvents.push_back(ce);
						sites[s_i+2].second->circleEvents.push_back(ce);
					}
				}

			}

			getchar2();
		}

		
	}
	
}
Example #14
0
 //returns number of words
 long int NumWords () {
    return wordSet.size();
 }
Example #15
0
int main(int argc, char **argv) {

    stringstream nullStream;
    nullStream.clear(ios::failbit);

    const char *dev = NULL;
    char errbuf[PCAP_ERRBUF_SIZE];
    pcap_t *handle;

    struct bpf_program fp;
    bpf_u_int32 mask;
    bpf_u_int32 net;

    bool source = false;
    bool replay = false;
    bool diaglog = false;
    const char *file = 0;

    vector< const char * > args;
    for( int i = 1; i < argc; ++i )
        args.push_back( argv[ i ] );

    try {
        for( unsigned i = 0; i < args.size(); ++i ) {
            const char *arg = args[ i ];
            if ( arg == string( "--help" ) ) {
                usage();
                return 0;
            }
            else if ( arg == string( "--forward" ) ) {
                forwardAddress = args[ ++i ];
            }
            else if ( arg == string( "--source" ) ) {
                uassert( 10266 ,  "can't use --source twice" , source == false );
                uassert( 10267 ,  "source needs more args" , args.size() > i + 2);
                source = true;
                replay = ( args[ ++i ] == string( "FILE" ) );
                diaglog = ( args[ i ] == string( "DIAGLOG" ) );
                if ( replay || diaglog )
                    file = args[ ++i ];
                else
                    dev = args[ ++i ];
            }
            else if ( arg == string( "--objcheck" ) ) {
                objcheck = true;
                outPtr = &nullStream;
            }
            else {
                serverPorts.insert( atoi( args[ i ] ) );
            }
        }
    }
    catch ( ... ) {
        usage();
        return -1;
    }

    if ( !serverPorts.size() )
        serverPorts.insert( 27017 );

    if ( diaglog ) {
        processDiagLog( file );
        return 0;
    }
    else if ( replay ) {
        handle = pcap_open_offline(file, errbuf);
        if ( ! handle ) {
            cerr << "error opening capture file!" << endl;
            return -1;
        }
    }
    else {
        if ( !dev ) {
            dev = pcap_lookupdev(errbuf);
            if ( ! dev ) {
                cerr << "error finding device: " << errbuf << endl;
                return -1;
            }
            cout << "found device: " << dev << endl;
        }
        if (pcap_lookupnet(dev, &net, &mask, errbuf) == -1) {
            cerr << "can't get netmask: " << errbuf << endl;
            return -1;
        }
        handle = pcap_open_live(dev, SNAP_LEN, 1, 1000, errbuf);
        if ( ! handle ) {
            cerr << "error opening device: " << errbuf << endl;
            return -1;
        }
    }

    switch ( pcap_datalink( handle ) ) {
    case DLT_EN10MB:
        captureHeaderSize = 14;
        break;
    case DLT_NULL:
        captureHeaderSize = 4;
        break;
    default:
        cerr << "don't know how to handle datalink type: " << pcap_datalink( handle ) << endl;
    }

    assert( pcap_compile(handle, &fp, const_cast< char * >( "tcp" ) , 0, net) != -1 );
    assert( pcap_setfilter(handle, &fp) != -1 );

    cout << "sniffing... ";
    for ( set<int>::iterator i = serverPorts.begin(); i != serverPorts.end(); i++ )
        cout << *i << " ";
    cout << endl;

    pcap_loop(handle, 0 , got_packet, NULL);

    pcap_freecode(&fp);
    pcap_close(handle);

    return 0;
}
Example #16
0
void CompNovoIdentificationCID::reducePermuts_(set<String> & permuts, const PeakSpectrum & CID_spec, double prefix, double suffix)
{
    if (permuts.size() < max_subscore_number_)
    {
        return;
    }

    vector<Permut> score_permuts;

    Size i(0);
    for (set<String>::const_iterator it = permuts.begin(); it != permuts.end(); ++it, ++i)
    {
#ifdef REDUCE_PERMUTS_DEBUG
        if (i % 1000 == 0)
        {
            cerr << (double)i / permuts.size() * 100 << "%" << endl;
        }
#endif

        PeakSpectrum CID_sim_spec;
        getCIDSpectrumLight_(CID_sim_spec, *it, prefix, suffix);
        //getCIDSpectrum_(CID_sim_spec, *it, 1, prefix, suffix);

        double score = zhang_(CID_sim_spec, CID_spec);

        if (boost::math::isnan(score))
        {
            score = 0;
        }

        score /= it->size();

        if (boost::math::isnan(score))
        {
            score = 0;
        }


#ifdef REDUCE_PERMUTS_DEBUG
        cerr << "Subscoring: " << *it << " " << cid_score << " (CID=";
        /*      for (PeakSpectrum::ConstIterator pit = CID_sim_spec.begin(); pit != CID_sim_spec.end(); ++pit)
                {
                cerr << pit->getPosition()[0] << "|" << pit->getIntensity() << "; ";
                }*/
        cerr << endl;
#endif

        Permut new_permut(it, score);
        score_permuts.push_back(new_permut);
    }

    sort(score_permuts.begin(), score_permuts.end(), Internal::PermutScoreComparator);

    set<String> new_permuts;
    Size count(0);
    for (vector<Permut>::const_iterator it = score_permuts.begin(); it != score_permuts.end() && count < max_subscore_number_; ++it, ++count)
    {
        new_permuts.insert(*it->getPermut());
#ifdef REDUCE_PERMUTS_DEBUG
        cerr << "Subscore winner: " << it->getPermut() << " " << it->getScore() << endl;
#endif
    }

    permuts = new_permuts;
    return;
}
Example #17
0
	segment_t get_max() const
	{
		assert(s.size() > 0);
		segment_t seg = *(--s.end());
		return seg;
	}
Example #18
0
// divide and conquer algorithm of the sequencing
void CompNovoIdentificationCID::getDecompositionsDAC_(set<String> & sequences, Size left, Size right, double peptide_weight, const PeakSpectrum & CID_spec, Map<double, CompNovoIonScoringCID::IonScore> & ion_scores)
{
    static double oxonium_mass = EmpiricalFormula("H2O+").getMonoWeight();
    double offset_suffix(CID_spec[left].getPosition()[0] - oxonium_mass);
    double offset_prefix(peptide_weight - CID_spec[right].getPosition()[0]);

#ifdef DAC_DEBUG
    static Int depth_(0);
    ++depth_;
    String tabs_(depth_, '\t');
    cerr << tabs_ << "void getDecompositionsDAC(sequences[" << sequences.size() << "], " << left << ", " << right << ") ";
    cerr << CID_spec[left].getPosition()[0] << " " << CID_spec[right].getPosition()[0] << " diff=";
#endif

    double diff = CID_spec[right].getPosition()[0] - CID_spec[left].getPosition()[0];

#ifdef DAC_DEBUG
    cerr << diff << endl;
    cerr << "offset_prefix=" << offset_prefix << ", offset_suffix=" << offset_suffix << endl;
#endif

    if (subspec_to_sequences_.has(left) && subspec_to_sequences_[left].has(right))
    {
        sequences = subspec_to_sequences_[left][right];

#ifdef DAC_DEBUG
        depth_--;
        cerr << tabs_ << "from cache DAC: " << CID_spec[left].getPosition()[0] << " " << CID_spec[right].getPosition()[0] << " " << sequences.size() << " " << left << " " << right << endl;
#endif
        return;
    }

    // no further solutions possible?
    if (diff < min_aa_weight_)
    {
#ifdef DAC_DEBUG
        depth_--;
#endif
        return;
    }

    // no further division needed?
    if (diff <= max_decomp_weight_)
    {
        vector<MassDecomposition> decomps;

        // if we are at the C-terminus use precursor_mass_tolerance_
        if (offset_prefix < precursor_mass_tolerance_)
        {
            Param decomp_param(mass_decomp_algorithm_.getParameters());
            decomp_param.setValue("tolerance", precursor_mass_tolerance_);
            mass_decomp_algorithm_.setParameters(decomp_param);
            getDecompositions_(decomps, diff);
            decomp_param.setValue("tolerance", fragment_mass_tolerance_);
            mass_decomp_algorithm_.setParameters(decomp_param);
        }
        else
        {
            getDecompositions_(decomps, diff);
        }
        //filterDecomps_(decomps);

#ifdef DAC_DEBUG
        cerr << tabs_ << "Found " << decomps.size() << " decomps" << endl;
        cerr << tabs_ << "Permuting...";
#endif

        //static Map<String, set<String> > permute_cache;
        for (vector<MassDecomposition>::const_iterator it = decomps.begin(); it != decomps.end(); ++it)
        {
#ifdef DAC_DEBUG
            cerr << it->toString() << endl;
#endif

            String exp_string = it->toExpandedString();
            if (!permute_cache_.has(exp_string))
            {
                permute_("", exp_string, sequences);
                permute_cache_[exp_string] = sequences;
            }
            else
            {
                sequences = permute_cache_[exp_string];
            }
        }

#ifdef DAC_DEBUG
        cerr << tabs_ << CID_spec[left].getPosition()[0] << " " << CID_spec[right].getPosition()[0] << " " << peptide_weight << endl;
        if (sequences.size() > max_subscore_number_)
        {
            cerr << tabs_ << "Reducing #sequences from " << sequences.size() << " to " << max_subscore_number_ << "(prefix=" << offset_prefix  << ", suffix=" << offset_suffix << ")...";
        }
#endif

        // C-terminus
        if (offset_suffix <= precursor_mass_tolerance_)
        {
            filterPermuts_(sequences);
        }

        // reduce the sequences
        reducePermuts_(sequences, CID_spec, offset_prefix, offset_suffix);
#ifdef DAC_DEBUG
        cerr << "Writing to cache " << left << " " << right << endl;
#endif
        subspec_to_sequences_[left][right] = sequences;

#ifdef DAC_DEBUG
        cerr << "ended" << endl;
        cerr << tabs_ << "DAC: " << CID_spec[left].getPosition()[0] << " " << CID_spec[right].getPosition()[0] << " " << sequences.size() << endl;
        depth_--;
#endif

        return;
    }

    // select suitable pivot peaks
    vector<Size> pivots;

    if (offset_suffix < precursor_mass_tolerance_ && offset_prefix < precursor_mass_tolerance_)
    {
        selectPivotIons_(pivots, left, right, ion_scores, CID_spec, peptide_weight, true);
    }
    else
    {
        selectPivotIons_(pivots, left, right, ion_scores, CID_spec, peptide_weight, false);
    }

    // run divide step
#ifdef DAC_DEBUG
    cerr << tabs_ << "Selected " << pivots.size() << " pivot ions: ";
    for (vector<Size>::const_iterator it = pivots.begin(); it != pivots.end(); ++it)
    {
        cerr << *it << "(" << CID_spec[*it].getPosition()[0] << ") ";
    }
    cerr << endl;
#endif

    for (vector<Size>::const_iterator it = pivots.begin(); it != pivots.end(); ++it)
    {
        set<String> seq1, seq2, new_sequences;

        // the smaller the 'gap' the greater the chance of not finding anything
        // so we we compute the smaller gap first
        double diff1(CID_spec[*it].getPosition()[0] - CID_spec[left].getPosition()[0]);
        double diff2(CID_spec[right].getPosition()[0] - CID_spec[*it].getPosition()[0]);

        if (diff1 < diff2)
        {
            getDecompositionsDAC_(seq1, left, *it, peptide_weight, CID_spec, ion_scores);
            if (seq1.empty())
            {
#ifdef DAC_DEBUG
                cerr << tabs_ << "first call produced 0 candidates (" << diff1 << ")" << endl;
#endif
                continue;
            }

            getDecompositionsDAC_(seq2, *it, right, peptide_weight, CID_spec, ion_scores);
        }
        else
        {
            getDecompositionsDAC_(seq2, *it, right, peptide_weight, CID_spec, ion_scores);
            if (seq2.empty())
            {
#ifdef DAC_DEBUG
                cerr << tabs_ << "second call produced 0 candidates (" << diff2 << ")" << endl;
#endif
                continue;
            }

            getDecompositionsDAC_(seq1, left, *it, peptide_weight, CID_spec, ion_scores);
        }

#ifdef DAC_DEBUG
        cerr << tabs_ << "Found " << seq1.size() << " solutions (1) " << diff1 << endl;
        cerr << tabs_ << "Found " << seq2.size() << " solutions (2) " << diff2 << endl;
        cerr << tabs_ << "inserting " << seq1.size() * seq2.size()  << " sequences" << endl;
#endif

        // C-terminus
        if (offset_suffix <= fragment_mass_tolerance_)
        {
            filterPermuts_(seq1);
        }

        // test if we found enough sequence candidates
        if (seq1.empty() || seq2.empty())
        {
            continue;
        }

        for (set<String>::const_iterator it1 = seq1.begin(); it1 != seq1.end(); ++it1)
        {
            for (set<String>::const_iterator it2 = seq2.begin(); it2 != seq2.end(); ++it2)
            {
                new_sequences.insert(*it2 + *it1);
            }
        }

        if (seq1.size() * seq2.size() > max_subscore_number_ /* && (offset_prefix > fragment_mass_tolerance_ || offset_suffix > fragment_mass_tolerance_)*/)
        {
#ifdef DAC_DEBUG
            cerr << tabs_ << CID_spec[left].getPosition()[0] << " " << CID_spec[right].getPosition()[0] << " " << peptide_weight << endl;
            cerr << tabs_ << "Reducing #sequences from " << new_sequences.size() << " to " << max_subscore_number_ << "(prefix=" << offset_prefix  << ", suffix=" << offset_suffix << ")...";
#endif
            if (offset_prefix > precursor_mass_tolerance_ || offset_suffix > precursor_mass_tolerance_)
            {
                reducePermuts_(new_sequences, CID_spec, offset_prefix, offset_suffix);
            }

#ifdef DAC_DEBUG
            for (set<String>::const_iterator it1 = new_sequences.begin(); it1 != new_sequences.end(); ++it1)
            {
                cerr << tabs_ << *it1 << endl;
            }
            cerr << endl;
#endif
        }

        for (set<String>::const_iterator sit = new_sequences.begin(); sit != new_sequences.end(); ++sit)
        {
            sequences.insert(*sit);
        }
    }
#ifdef DAC_DEBUG
    cerr << tabs_ << "Found sequences for " << CID_spec[left].getPosition()[0] << " " << CID_spec[right].getPosition()[0] << endl;
    for (set<String>::const_iterator sit = sequences.begin(); sit != sequences.end(); ++sit)
    {
        cerr << tabs_ << *sit << endl;
    }
#endif

    // reduce the permuts once again to reduce complexity
    if (offset_prefix > precursor_mass_tolerance_ || offset_suffix > precursor_mass_tolerance_)
    {
        reducePermuts_(sequences, CID_spec, offset_prefix, offset_suffix);
    }

#ifdef DAC_DEBUG
    cerr << "Writing to cache " << left << " " << right << endl;
#endif

    subspec_to_sequences_[left][right] = sequences;

#ifdef DAC_DEBUG
    depth_--;
    cerr << tabs_ << "DAC: " << CID_spec[left].getPosition()[0] << " " << CID_spec[right].getPosition()[0] << " " << sequences.size() << endl;
#endif
    return;

}
Example #19
0
		if(a == c) x.insert(a);
		if(b == d) y.insert(b);

		/* processa */
		foreach(xc, x) {
			bool ok = false;
			foreach(yc, y) {
				point p(*xc, *yc);
				if(!inpoly(quadra, p)) {
					int xi, yi;
					if(nodesx.count(*xc) == 0) {
						nodesx[*xc] = xi = n++;
						cap[SRC][xi] = 1;
					} else xi = nodesx[*xc];
					if(nodesy.count(*yc) == 0) {
						nodesy[*yc] = yi = n++;
						cap[yi][SINK] = 1;
					} else yi = nodesy[*yc];
					cap[xi][yi] = 1;
				}
			}
		}
		flow = maxflow(SRC, SINK);

		/* output */
		printf("%d\n", x.size() + y.size() - flow);
	}

	return 0;
}
Example #20
0
int MasterProcess(map<int, Subbasin*>& subbasinMap, set<int>& groupSet, string& projectPath, string& outputFile)
{
	//cout << "Enter master process.\n";
	MPI_Request request;
	MPI_Status status;
	int nSlaves = groupSet.size();
	//cout << "nSlaves " << nSlaves << endl;	
	map< int, vector<int> > groupMap;
	for(set<int>::iterator it = groupSet.begin(); it != groupSet.end(); ++it)
		groupMap[*it] = vector<int>();

	// get the subbasin id list of different groups
	int idOutlet = -1;
	for(map<int,Subbasin*>::iterator it = subbasinMap.begin(); it != subbasinMap.end(); ++it) 
	{
		groupMap[it->second->group].push_back(it->second->id);
		if(it->second->downStream == NULL)
			idOutlet = it->second->id;
	}
	// get the maximum length of the task assignment message
	size_t maxTaskLen = 0;
	for(set<int>::iterator it = groupSet.begin(); it != groupSet.end(); ++it)
	{
		if(groupMap[*it].size() > maxTaskLen)
			maxTaskLen = groupMap[*it].size();
	}

	int nTaskAll = maxTaskLen * groupMap.size();
	int *pSendTask = new int[nTaskAll]; // id of subbasins
	int *pSendRank = new int[nTaskAll]; // distance to the most upstream subbasin
	int *pSendDis = new int[nTaskAll];  // distance to the outlet subbasin
	int *pSendDownStream = new int[nTaskAll]; // id of downstream subbasins
	int *pSendUpNums = new int[nTaskAll];     // number of upstream subbasins
	int *pSendUpStream = new int[nTaskAll * MAX_UPSTREAM]; // ids of upstream subbasins

	int *pSendGroupId = new int[groupMap.size()]; // id of the group

	int iGroup = 0;
	for(set<int>::iterator it = groupSet.begin(); it != groupSet.end(); ++it)
	{
		pSendGroupId[iGroup] = *it;
		vector<int> &vec = groupMap[*it];
		int groupIndex = iGroup * maxTaskLen;
		for(size_t i = 0; i < vec.size(); ++i)
		{
			int id = vec[i];
			pSendTask[groupIndex + i] = id;
			pSendRank[groupIndex + i] = subbasinMap[id]->rank;
			pSendDis[groupIndex + i] = subbasinMap[id]->disToOutlet;
			if(subbasinMap[id]->downStream != NULL)
				pSendDownStream[groupIndex + i] = subbasinMap[id]->downStream->id;
			else
				pSendDownStream[groupIndex + i] = -1;

			int nUps = subbasinMap[id]->upStreams.size();
			pSendUpNums[groupIndex + i] = nUps;
			if(nUps > MAX_UPSTREAM)
			{
				cout << "The number of upstreams exceeds MAX_UPSTREAM.\n";
				exit(-1);
			}
			for(int j = 0; j < nUps; ++j)
			{
				pSendUpStream[MAX_UPSTREAM*(groupIndex+i) + j] = subbasinMap[id]->upStreams[j]->id;
			}
			for(int j = nUps; j < MAX_UPSTREAM; ++j)
			{
				pSendUpStream[MAX_UPSTREAM*(groupIndex+i) + j] = -1;
			}
		}
		for(size_t i = vec.size(); i < maxTaskLen; ++i)
		{
			pSendTask[groupIndex + i] = -1;
			pSendRank[groupIndex + i] = -1;
			pSendDis[groupIndex + i] = -1;
			pSendDownStream[groupIndex + i] = -1;
		}
		iGroup++;
	}

	// send the information to slave0
	//cout << "Sending tasks...\n";
	//cout << "MASTER " << nTaskAll << endl;
	MPI_Send(&nTaskAll, 1, MPI_INT, SLAVE0_RANK, WORK_TAG, MPI_COMM_WORLD);
	MPI_Send(pSendGroupId, nSlaves, MPI_INT, SLAVE0_RANK, WORK_TAG, MPI_COMM_WORLD);
	MPI_Send(pSendTask, nTaskAll, MPI_INT, SLAVE0_RANK, WORK_TAG, MPI_COMM_WORLD);
	MPI_Send(pSendRank, nTaskAll, MPI_INT, SLAVE0_RANK, WORK_TAG, MPI_COMM_WORLD);
	MPI_Send(pSendDis, nTaskAll, MPI_INT, SLAVE0_RANK, WORK_TAG, MPI_COMM_WORLD);
	MPI_Send(pSendDownStream, nTaskAll, MPI_INT, SLAVE0_RANK, WORK_TAG, MPI_COMM_WORLD);
	MPI_Send(pSendUpNums, nTaskAll, MPI_INT, SLAVE0_RANK, WORK_TAG, MPI_COMM_WORLD);
	MPI_Send(pSendUpStream, nTaskAll*MAX_UPSTREAM, MPI_INT, SLAVE0_RANK, WORK_TAG, MPI_COMM_WORLD);
	
	//cout << "Tasks are dispatched.\n";
	
	// loop to receive information from slave process
	bool finished = false;
	float buf[MSG_LEN];
	map<int, int> waitingMap;
	ofstream fOutput(outputFile.c_str());
	while(!finished)
	{
		MPI_Irecv(&buf, MSG_LEN, MPI_FLOAT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &request);
		MPI_Wait(&request, &status);
		//cout << "master info:" << int(buf[1]) << " " << buf[2] << endl;
		// deal with different types of message
		int msgCode = buf[0];
		if(msgCode == 1)// outlet flowout of subbasins, no need to reply
		{
			int id = int(buf[1]); // subbasin id
			//cout << "master: " << id << endl;
			subbasinMap[id]->qOutlet = buf[2];
			subbasinMap[id]->calculated = true;
			time_t t = int(buf[3]);

#ifdef DEBUG_OUTPUT
			cout << "subbasins>> in: " << id << "  all: ";
			for (map<int, Subbasin*>::iterator it = subbasinMap.begin(); it != subbasinMap.end(); it++)
			{
				if (it->second->calculated)
					cout << it->first << " ";
			}
			cout << endl;
#endif
			// check waiting list
			int found = false;
			map<int,int>::iterator it;
			for(it = waitingMap.begin(); it != waitingMap.end(); ++it)
			{
				int gid = it->first;
				int sRank = it->second;
				vector<int>& subs = groupMap[gid];
				for (size_t i = 0; i < subs.size(); i++)
				{
					if(subbasinMap[id]->downStream->id == subs[i])
					{
						// send message to the slave process
						int msgLen = 2;
						MPI_Isend(&msgLen, 1, MPI_INT, sRank, WORK_TAG, MPI_COMM_WORLD, &request);
						float pData[2];
						pData[0] = (float)id;
						pData[1] = subbasinMap[id]->qOutlet;
						MPI_Wait(&request, &status);
						MPI_Isend(pData, msgLen, MPI_FLOAT, sRank, WORK_TAG, MPI_COMM_WORLD, &request);
						MPI_Wait(&request, &status);
#ifdef DEBUG_OUTPUT
						cout << "active >> " << pData[0] << "->" << sRank << endl;
#endif
						found = true;
						
						// delete the current group from waiting group
						waitingMap.erase(it);
						subbasinMap[id]->calculated = false;

						break;
					}
				}

				if(found)
					break;
			}
			

			if(id == idOutlet)
				fOutput << utils::ConvertToString2(&t) << "\t" << setprecision(8) << subbasinMap[id]->qOutlet + deepGw << "\n";
		}
		else if(msgCode == 2) //a slave process is asking for information of the newly calculated upstream subbasins
		{
			map<int, float> transMap; // used to contain flowout of the newly calculated basins
			int gid = int(buf[1]);
			int sRank = int(buf[2]);
			vector<int>& subs = groupMap[gid];
			// loop subbasins in the group
			for (size_t i = 0; i < subs.size(); i++)
			{
				int id = subs[i];
				// for not most upstream basins
				if(subbasinMap[id]->rank > 1)
				{
					// find if their upstream basins are newly calculated
					vector<Subbasin*>& ups = subbasinMap[id]->upStreams;
					for(size_t j = 0; j < ups.size(); j++)
					{
						if(ups[j]->calculated)
                    {
                        transMap[ups[j]->id] = ups[j]->qOutlet;
                        ups[j]->calculated = false;
                    }
                }
            }
        }

        if(transMap.empty())
        {
            waitingMap[gid] = sRank;
        }
        else
        {
            // tell the slave process the message length containing new information
            int msgLen = transMap.size() * 2;
            MPI_Isend(&msgLen, 1, MPI_INT, sRank, WORK_TAG, MPI_COMM_WORLD, &request);
            float *pData = new float[msgLen];
            int counter = 0;
            for(map<int, float>::iterator it = transMap.begin(); it != transMap.end(); it++)
            {
                pData[2*counter] = (float)it->first;
                pData[2*counter+1] = it->second;

                counter++;
            }
            MPI_Wait(&request, &status);
            MPI_Isend(pData, msgLen, MPI_FLOAT, sRank, WORK_TAG, MPI_COMM_WORLD, &request);
            MPI_Wait(&request, &status);

#ifdef DEBUG_OUTPUT
            //if(sRank == 1) cout << "master send to rank  " << sRank << " size:" << transMap.size() << " ";
				cout << "positive >> ";
				for(int i = 0; i < msgLen; i += 2)
				    cout << pData[i] << "->" << sRank << " ";
				cout << endl;
#endif
				delete pData;
			}
		}
		else if(msgCode == 0) // reset all qOutlet informaion
		{
			for(map<int,Subbasin*>::iterator it = subbasinMap.begin(); it != subbasinMap.end(); ++it)
			{
				it->second->calculated = false;
				it->second->qOutlet = 0.f;
			}
#ifdef DEBUG_OUTPUT
			cout << "master: newround" << endl;
#endif
		}
		else if(msgCode == 9)
		{
			finished = true;
			//cout << "Exit from the master process.\n";
		}
	}
	fOutput.close();
	
	for(map<int,Subbasin*>::iterator it = subbasinMap.begin(); it != subbasinMap.end(); ++it) 
	{
		delete it->second;
	}
	delete[] pSendTask;
	delete[] pSendRank;
	delete[] pSendDis;
	delete[] pSendDownStream;
	delete[] pSendUpNums;
	delete[] pSendUpStream;

	return 0;
}
Example #21
0
void draw_overlay()
{
  stringstream ss;

  if (do_overlay) {
    
    // which image is this?

    ss << current_index << ": " << current_imname;
    string str = ss.str();

    putText(draw_im, str, Point(5, 10), FONT_HERSHEY_SIMPLEX, fontScale, Scalar::all(255), 1, 8);

    // isolation stats

    if (!bad_current_index && !vert_current_index) {
      ss.str("");
      ss << "max dist = " << max_closest_vert_dist << ", this dist = " << ClosestVert_dist[current_index];
      putText(draw_im, ss.str(), Point(5, 25), FONT_HERSHEY_SIMPLEX, fontScale, Scalar(255, 255, 255), 1, 8);
    }

    // save status

    int num_unsaved = Vert_idx_set.size() - num_saved_verts;

    if (num_unsaved > 0) {
      ss.str("");
      if (num_unsaved == 1)
	ss << num_unsaved << " image with unsaved verts [" << Vert_idx_set.size() << "]";
      else
	ss << num_unsaved << " images with unsaved verts [" << Vert_idx_set.size() << "]";
      str = ss.str();
      putText(draw_im, str, Point(5, 315), FONT_HERSHEY_SIMPLEX, 1.5 * fontScale, Scalar(0, 0, 255), 1, 8);
    }

    // show crop rectangle:

    if (do_show_crop_rect) {

      int center_x = draw_im.cols / 2;
      int xl = center_x - output_crop_width/2;
      int xr = center_x + output_crop_width/2;
      int yt = output_crop_top_y;
      int yb = output_crop_top_y + output_crop_height;

      rectangle(draw_im,  
		Point(xl, yt),
		Point(xr, yb),
		Scalar(255, 255, 255), 2);
      
    }

    // are we in "random next image" mode?

    if (do_random) 
      putText(draw_im, "R", Point(5, 40), FONT_HERSHEY_SIMPLEX, fontScale, Scalar(0, 0, 255), 1, 8);

    // are we in "bad image" marking mode?

    if (do_bad) 
      putText(draw_im, "B", Point(15, 40), FONT_HERSHEY_SIMPLEX, fontScale, Scalar(0, 0, 255), 1, 8);

    // are we in "verts only" mode?

    if (do_verts) 
      putText(draw_im, "V", Point(25, 40), FONT_HERSHEY_SIMPLEX, fontScale, Scalar(0, 0, 255), 1, 8);

    // is this a "bad" image?

    if (bad_current_index) {
      setChannel(draw_im, 2, 255);
      return;
    }

    // is this an image for which we have ground-truth trail edges?

    else if (vert_current_index) 
      setChannel(draw_im, 0, 200);
   
    // horizontal lines for trail edge rows

    for (int i = 0; i < trailEdgeRow.size(); i++) 
      line(draw_im, Point(0, trailEdgeRow[i]), Point(current_im.cols - 1, trailEdgeRow[i]), Scalar(0, 128, 128), 1);

    // trail edge vertices

    int r, g;

    if (erasing) {
      r = 255; g = 0;
    }
    else {
      r = 0; g = 255;
    }

    for (int i = 0; i < Vert[current_index].size(); i += 2) {

      // only draw line segment if we have a PAIR of verts (this will NOT be the case while a new segment is being drawn) 

      if (i + 1 < Vert[current_index].size())
	line(draw_im, Vert[current_index][i], Vert[current_index][i + 1], Scalar(0, g, r), 2);
    }

  }
}
Example #22
0
void Projector::projectFunctionOntoBasis(FieldContainer<double> &basisCoefficients, FunctionPtr fxn, 
                                         BasisPtr basis, BasisCachePtr basisCache, IPPtr ip, VarPtr v,
                                         set<int> fieldIndicesToSkip) {
  CellTopoPtr cellTopo = basis->domainTopology();
  DofOrderingPtr dofOrderPtr = Teuchos::rcp(new DofOrdering());
  
  if (! fxn.get()) {
    TEUCHOS_TEST_FOR_EXCEPTION(true, std::invalid_argument, "fxn cannot be null!");
  }
  
  int cardinality = basis->getCardinality();
  int numCells = basisCache->getPhysicalCubaturePoints().dimension(0);
  int numDofs = cardinality - fieldIndicesToSkip.size();
  if (numDofs==0) {
    // we're skipping all the fields, so just initialize basisCoefficients to 0 and return
    basisCoefficients.resize(numCells,cardinality);
    basisCoefficients.initialize(0);
    return;
  }
  
  FieldContainer<double> gramMatrix(numCells,cardinality,cardinality);
  FieldContainer<double> ipVector(numCells,cardinality);

  // fake a DofOrdering
  DofOrderingPtr dofOrdering = Teuchos::rcp( new DofOrdering );
  if (! basisCache->isSideCache()) {
    dofOrdering->addEntry(v->ID(), basis, v->rank());
  } else {
    dofOrdering->addEntry(v->ID(), basis, v->rank(), basisCache->getSideIndex());
  }
  
  ip->computeInnerProductMatrix(gramMatrix, dofOrdering, basisCache);
  ip->computeInnerProductVector(ipVector, v, fxn, dofOrdering, basisCache);
  
//  cout << "physical points for projection:\n" << basisCache->getPhysicalCubaturePoints();
//  cout << "gramMatrix:\n" << gramMatrix;
//  cout << "ipVector:\n" << ipVector;
  
  map<int,int> oldToNewIndices;
  if (fieldIndicesToSkip.size() > 0) {
    // the code to do with fieldIndicesToSkip might not be terribly efficient...
    // (but it's not likely to be called too frequently)
    int i_indices_skipped = 0;
    for (int i=0; i<cardinality; i++) {
      int new_index;
      if (fieldIndicesToSkip.find(i) != fieldIndicesToSkip.end()) {
        i_indices_skipped++;
        new_index = -1;
      } else {
        new_index = i - i_indices_skipped;
      }
      oldToNewIndices[i] = new_index;
    }
    
    FieldContainer<double> gramMatrixFiltered(numCells,numDofs,numDofs);
    FieldContainer<double> ipVectorFiltered(numCells,numDofs);
    // now filter out the values that we're to skip
    
    for (int cellIndex=0; cellIndex<numCells; cellIndex++) {
      for (int i=0; i<cardinality; i++) {
        int i_filtered = oldToNewIndices[i];
        if (i_filtered == -1) {
          continue;
        }
        ipVectorFiltered(cellIndex,i_filtered) = ipVector(cellIndex,i);
        
        for (int j=0; j<cardinality; j++) {
          int j_filtered = oldToNewIndices[j];
          if (j_filtered == -1) {
            continue;
          }
          gramMatrixFiltered(cellIndex,i_filtered,j_filtered) = gramMatrix(cellIndex,i,j);
        }
      }
    }
//    cout << "gramMatrixFiltered:\n" << gramMatrixFiltered;
//    cout << "ipVectorFiltered:\n" << ipVectorFiltered;
    gramMatrix = gramMatrixFiltered;
    ipVector = ipVectorFiltered;
  }
  
  for (int cellIndex=0; cellIndex<numCells; cellIndex++){
    
    // TODO: rewrite to take advantage of SerialDenseWrapper...
    Epetra_SerialDenseSolver solver;
    
    Epetra_SerialDenseMatrix A(Copy,
                               &gramMatrix(cellIndex,0,0),
                               gramMatrix.dimension(2), 
                               gramMatrix.dimension(2),  
                               gramMatrix.dimension(1)); // stride -- fc stores in row-major order (a.o.t. SDM)
    
    Epetra_SerialDenseVector b(Copy,
                               &ipVector(cellIndex,0),
                               ipVector.dimension(1));
    
    Epetra_SerialDenseVector x(gramMatrix.dimension(1));
    
    solver.SetMatrix(A);
    int info = solver.SetVectors(x,b);
    if (info!=0){
      cout << "projectFunctionOntoBasis: failed to SetVectors with error " << info << endl;
    }
    
    bool equilibrated = false;
    if (solver.ShouldEquilibrate()){
      solver.EquilibrateMatrix();
      solver.EquilibrateRHS();      
      equilibrated = true;
    }   
    
    info = solver.Solve();
    if (info!=0){
      cout << "projectFunctionOntoBasis: failed to solve with error " << info << endl;
    }
    
    if (equilibrated) {
      int successLocal = solver.UnequilibrateLHS();
      if (successLocal != 0) {
        cout << "projection: unequilibration FAILED with error: " << successLocal << endl;
      }
    }
    
    basisCoefficients.resize(numCells,cardinality);
    for (int i=0;i<cardinality;i++) {
      if (fieldIndicesToSkip.size()==0) {
        basisCoefficients(cellIndex,i) = x(i);
      } else {
        int i_filtered = oldToNewIndices[i];
        if (i_filtered==-1) {
          basisCoefficients(cellIndex,i) = 0.0;
        } else {
          basisCoefficients(cellIndex,i) = x(i_filtered);
        }
      }
    }
    
  }
}
Example #23
0
void RefinementHistory::hUnrefine(const set<GlobalIndexType> &cellIDs) {
  if (cellIDs.size() == 0) return;
  Refinement ref = make_pair(H_UNREFINEMENT, cellIDs);
  _refinements.push_back(ref);
}
Example #24
0
void myGraph::subPathes(set<int> &preNodeSet, vector<vector<int>> &Edge_t, set<int>& nodeSet, vector<int> &nodeRanks)
{
	float maxlength=-100000;
	int maxid=-1;
	vector<vector<vector<int>>> maxPath;	
	bool haveHead=false;
	set<int> headSet, endSet;
		
	for(int i=0; i<Edge_t.size(); i++)
	{
		headSet.insert(Edge_t[i][0]); endSet.insert(Edge_t[i][1]);
	}		
	for(set<int>::iterator it=nodeSet.begin(); it!=nodeSet.end(); it++)			
	{    
		int start=*it;
		if(headSet.find(start)!=headSet.end() && endSet.find(start)==endSet.end()) //a top node
		{
			int length;
			vector<vector<vector<int>>> path = getShortestPath(start, length, Edge_t); //edgeSet, nodeSet); 					
			if(length > maxlength)
			{
				maxlength = length;
				maxid = start;
				maxPath=path;
			}
			haveHead=true;
		}
	}	
	if(!haveHead)
	for(set<int>::iterator it=nodeSet.begin(); it!=nodeSet.end(); it++)			
	{    
		int start=*it;
		{
			int length;
			vector<vector<vector<int>>> path = getShortestPath(start, length, Edge_t); //edgeSet[sid], nodeSet); 					
			if(length > maxlength)
			{
				maxlength = length;
				maxid = start;
				maxPath=path;
			}
			haveHead=true;
		}
	}	
	//initial rank
	//get the rank by redo the shortest path search for the minimal span tree
	//Graph initG;
	vector<vector<int>> Edge_i=maxPath[0];
	for(int k=0; k<Edge_i.size(); k++)
	{
		//if(Edge[k][0]==etemp[0] || Edge[k][1]==etemp[1])
		//add_edge(maxPath[k][0], maxPath[k][1], 1, initG);
		Edge_i[k].push_back(1);
	}
	int length;
	vector<vector<vector<int>>> initPath = getShortestPath(maxid, length, Edge_i);//, edgeSet[sid], nodeSet); 
	//iniPath should have rank info
	vector<int> nodeRanks_t(nodeRanks.size(),20000);

	nodeRanks_t[maxid]=0;
	for(int k=0; k<initPath[0].size(); k++)
	{
		int v=initPath[0][k][1], d=initPath[0][k][initPath[0][k].size()-1];
		nodeRanks_t[v]=d;
	}						
	if(initPath[0].size()!=Edge_i.size())
	{
		//error
	}

	//reduce the searched graph
	Edge_t=maxPath[1];
	for(int i=0; i<maxPath[0].size(); i++)
	{
		preNodeSet.insert(maxPath[0][i][0]);  preNodeSet.insert(maxPath[0][i][1]);
	}
	if(preNodeSet.size()==nodeSet.size())
	{
		nodeSet.clear();
	}
	else
	{
		for(int i=0; i<Edge_t.size(); i++)
	    {
		  nodeSet.insert(Edge_t[i][0]);  nodeSet.insert(Edge_t[i][1]);
	    }
	}
	//reassign rank level to nodeRanks_t
	int markid=-1, markLevel=20000, dL=20000;
	for(int i=0; i<nodeRanks_t.size(); i++)
	{
	   if(nodeRanks_t[i]!=20000 && nodeRanks[i]!=20000 && nodeRanks_t[i]!=nodeRanks[i])
	   {
	       markid=i;  
		   dL = nodeRanks[i]-nodeRanks_t[i];
		   markLevel=nodeRanks_t[i];
	   }
	}
	if(markid>=0)
	for(int i=0; i<nodeRanks_t.size(); i++)
	{
	   if(nodeRanks_t[i]!=20000)
	   {
	       nodeRanks_t[i]=nodeRanks_t[i] + dL;
	   }
	}
	int negFlag=0;
	for(int i=0; i<nodeRanks.size(); i++)
	{
	   if(nodeRanks[i]==20000  && nodeRanks_t[i]!=20000)
	   {
	       nodeRanks[i]=nodeRanks_t[i];
		   if(nodeRanks_t[i]<negFlag)
			{
				negFlag=nodeRanks_t[i];
		   }
	   }
	}	
	if( negFlag<0)
	{
	    negFlag = -negFlag;
		for(int i=0; i<nodeRanks.size(); i++)
		{
			if(nodeRanks[i]!=20000)
			   nodeRanks[i]= nodeRanks[i]+negFlag;
		}
	}
}
Example #25
0
bool filesystem_destroy(){
	while( file_set.size() ) bbCloseFile( *file_set.begin() );
	gx_runtime->closeFileSystem( gx_filesys );
	return true;
}
Example #26
0
//--------------------------------------
bool ofGetMousePressed(int button){ //by default any button
	if(button==-1) return pressedMouseButtons.size();
	return pressedMouseButtons.find(button)!=pressedMouseButtons.end();
}
Example #27
0
 size_t count(Expr expr) {
   indexVars.clear();
   expr.accept(this);
   return indexVars.size();
 }
Example #28
0
//--------------------------------------
bool ofGetKeyPressed(int key){
	if(key==-1) return pressedKeys.size();
	return pressedKeys.find(key)!=pressedKeys.end();
}
Example #29
0
bool CGroupModel::insertNewMember(uint32_t nGroupId, set<uint32_t>& setUsers)
{
    bool bRet = false;
    uint32_t nUserCnt = (uint32_t)setUsers.size();
    if(nGroupId != INVALID_VALUE &&  nUserCnt > 0)
    {
        CDBManager* pDBManager = CDBManager::getInstance();
        CDBConn* pDBConn = pDBManager->GetDBConn("teamtalk_slave");
        if (pDBConn)
        {
            uint32_t nCreated = (uint32_t)time(NULL);
            // 获取 已经存在群里的用户
            string strClause;
            bool bFirst = true;
            for (auto it=setUsers.begin(); it!=setUsers.end(); ++it)
            {
                if(bFirst)
                {
                    bFirst = false;
                    strClause = int2string(*it);
                }
                else
                {
                    strClause += ("," + int2string(*it));
                }
            }
            string strSql = "select userId from IMGroupMember where groupId=" + int2string(nGroupId) + " and userId in (" + strClause + ")";
            CResultSet* pResult = pDBConn->ExecuteQuery(strSql.c_str());
            set<uint32_t> setHasUser;
            if(pResult)
            {
                while (pResult->Next()) {
                    setHasUser.insert(pResult->GetInt("userId"));
                }
                delete pResult;
            }
            else
            {
                log("no result for sql:%s", strSql.c_str());
            }
            pDBManager->RelDBConn(pDBConn);
            
            pDBConn = pDBManager->GetDBConn("teamtalk_master");
            if (pDBConn)
            {
                CacheManager* pCacheManager = CacheManager::getInstance();
                CacheConn* pCacheConn = pCacheManager->GetCacheConn("group_member");
                if (pCacheConn)
                {
                    // 设置已经存在群中人的状态
                    if (!setHasUser.empty())
                    {
                        strClause.clear();
                        bFirst = true;
                        for (auto it=setHasUser.begin(); it!=setHasUser.end(); ++it) {
                            if(bFirst)
                            {
                                bFirst = false;
                                strClause = int2string(*it);
                            }
                            else
                            {
                                strClause += ("," + int2string(*it));
                            }
                        }
                        
                        strSql = "update IMGroupMember set status=0, updated="+int2string(nCreated)+" where groupId=" + int2string(nGroupId) + " and userId in (" + strClause + ")";
                        pDBConn->ExecuteUpdate(strSql.c_str());
                    }
                    strSql = "insert into IMGroupMember(`groupId`, `userId`, `status`, `created`, `updated`) values\
                    (?,?,?,?,?)";
                    
                    //插入新成员
                    auto it = setUsers.begin();
                    uint32_t nStatus = 0;
                    uint32_t nIncMemberCnt = 0;
                    for (;it != setUsers.end();)
                    {
                        uint32_t nUserId = *it;
                        if(setHasUser.find(nUserId) == setHasUser.end())
                        {
                            CPrepareStatement* pStmt = new CPrepareStatement();
                            if (pStmt->Init(pDBConn->GetMysql(), strSql))
                            {
                                uint32_t index = 0;
                                pStmt->SetParam(index++, nGroupId);
                                pStmt->SetParam(index++, nUserId);
                                pStmt->SetParam(index++, nStatus);
                                pStmt->SetParam(index++, nCreated);
                                pStmt->SetParam(index++, nCreated);
                                pStmt->ExecuteUpdate();
                                ++nIncMemberCnt;
                                delete pStmt;
                            }
                            else
                            {
                                setUsers.erase(it++);
                                delete pStmt;
                                continue;
                            }
                        }
                        ++it;
                    }
                    if(nIncMemberCnt != 0)
                    {
                        strSql = "update IMGroup set userCnt=userCnt+" + int2string(nIncMemberCnt) + " where id="+int2string(nGroupId);
                        pDBConn->ExecuteUpdate(strSql.c_str());
                    }
                    
                    //更新一份到redis中
                    string strKey = "group_member_"+int2string(nGroupId);
                    for(auto it = setUsers.begin(); it!=setUsers.end(); ++it)
                    {
                        pCacheConn->hset(strKey, int2string(*it), int2string(nCreated));
                    }
                    pCacheManager->RelCacheConn(pCacheConn);
                    bRet = true;
                }
                else
                {
                    log("no cache connection");
                }
                pDBManager->RelDBConn(pDBConn);
            }
            else
            {
 double computeTightness()
 {
     tight = (double)penalty * abs((int)permitted.size() - (int)x->getDomainSize()) / x->getDomainSize();
     return tight;
 }