コード例 #1
0
A::operator std::size_t () const {
    std::size_t retval = 0;
    accumulate(retval, b);
    #ifndef BOOST_NO_INT64_T
    accumulate(retval, f);
    accumulate(retval, g);
    #endif
    accumulate(retval, l);
    accumulate(retval, m);
    accumulate(retval, n);
    accumulate(retval, o);
    accumulate(retval, p);
    accumulate(retval, q);
    #ifndef BOOST_NO_CWCHAR
    accumulate(retval, r);
    #endif
    accumulate(retval, c);
    accumulate(retval, s);
    accumulate(retval, t);
    accumulate(retval, u);
    accumulate(retval, v);
    return retval;
}
コード例 #2
0
__host__ __device__
agency::experimental::range_value_t<Range>
  accumulate_nonempty(ExecutionPolicy policy, Range&& rng, BinaryOperator binary_op)
{
  return accumulate(policy, agency::experimental::drop(std::forward<Range>(rng), 1), std::forward<Range>(rng)[0], binary_op);
}
コード例 #3
0
ファイル: SpParHelper.cpp プロジェクト: Aguomath/CombBLAS_15
void SpParHelper::MemoryEfficientPSort(pair<KEY,VAL> * array, IT length, IT * dist, const MPI_Comm & comm)
{
	int nprocs, myrank;
	MPI_Comm_size(comm, &nprocs);
	MPI_Comm_rank(comm, &myrank);
	int nsize = nprocs / 2;	// new size
	if(nprocs < 1000)
	{
		bool excluded =  false;
		if(dist[myrank] == 0)	excluded = true;

		int nreals = 0; 
		for(int i=0; i< nprocs; ++i)	
			if(dist[i] != 0) ++nreals;

		if(nreals == nprocs)	// general case
		{
			long * dist_in = new long[nprocs];
                	for(int i=0; i< nprocs; ++i)    dist_in[i] = (long) dist[i];
                	vpsort::parallel_sort (array, array+length,  dist_in, comm);
                	delete [] dist_in;
		}
		else
		{
			long * dist_in = new long[nreals];
			int * dist_out = new int[nprocs-nreals];	// ranks to exclude
			int indin = 0;
			int indout = 0;
			for(int i=0; i< nprocs; ++i)	
			{
				if(dist[i] == 0)
					dist_out[indout++] = i;
				else
					dist_in[indin++] = (long) dist[i];	
			}
		
			#ifdef DEBUG	
			ostringstream outs;
			outs << "To exclude indices: ";
			copy(dist_out, dist_out+indout, ostream_iterator<int>(outs, " ")); outs << endl;
			SpParHelper::Print(outs.str());
			#endif

			MPI_Group sort_group, real_group;
			MPI_Comm_group(comm, &sort_group);
			MPI_Group_excl(sort_group, indout, dist_out, &real_group);
			MPI_Group_free(&sort_group);

			// The Create() function should be executed by all processes in comm, 
			// even if they do not belong to the new group (in that case MPI_COMM_NULL is returned as real_comm?)
			// MPI::Intracomm MPI::Intracomm::Create(const MPI::Group& group) const;
			MPI_Comm real_comm;
			MPI_Comm_create(comm, real_group, &real_comm);
			if(!excluded)
			{
				vpsort::parallel_sort (array, array+length,  dist_in, real_comm);
				MPI_Comm_free(&real_comm);
			}
			MPI_Group_free(&real_group);
			delete [] dist_in;
			delete [] dist_out;
		}
	}
	else
	{
		IT gl_median = accumulate(dist, dist+nsize, static_cast<IT>(0));	// global rank of the first element of the median processor
		sort(array, array+length);	// re-sort because we might have swapped data in previous iterations
		int color = (myrank < nsize)? 0: 1;
		
		pair<KEY,VAL> * low = array;
		pair<KEY,VAL> * upp = array;
		GlobalSelect(gl_median, low, upp, array, length, comm);
		BipartiteSwap(low, array, length, nsize, color, comm);

		if(color == 1)	dist = dist + nsize;	// adjust for the second half of processors

		// recursive call; two implicit 'spawn's where half of the processors execute different paramaters
		// MPI::Intracomm MPI::Intracomm::Split(int color, int key) const;

		MPI_Comm halfcomm;
		MPI_Comm_split(comm, color, myrank, &halfcomm);	// split into two communicators
		MemoryEfficientPSort(array, length, dist, halfcomm);
	}
}
コード例 #4
0
int main(){
	using in=std::istream_iterator<int>;
	std::cout << accumulate(in{std::cin},in{},0);
}
コード例 #5
0
ファイル: forecast_machine.cpp プロジェクト: thk686/rEDM
void ForecastMachine::simplex_prediction(const size_t start, const size_t end)
{
    size_t curr_pred, effective_nn, num_ties;
    double min_distance, tie_distance;
    vec weights;
    std::vector<size_t> nearest_neighbors;
    double tie_adj_factor;
    double total_weight;
    std::vector<size_t> temp_lib;
    
    for(size_t k = start; k < end; ++k)
    {
        curr_pred = which_pred[k];
        
        // find nearest neighbors
        if(CROSS_VALIDATION)
        {
            temp_lib = which_lib;
            adjust_lib(curr_pred);
            nearest_neighbors = find_nearest_neighbors(distances.innerVector(curr_pred));
            which_lib = temp_lib;
        }
        else
        {
            nearest_neighbors = find_nearest_neighbors(distances.innerVector(curr_pred));
        }
        effective_nn = nearest_neighbors.size();
        
        if(effective_nn == 0)
        {
            predicted[curr_pred] = qnan;
            LOG_WARNING("no nearest neighbors found; using NA for forecast");
            continue;
        }
        
        // compute weights
        min_distance = distances.coeff(curr_pred, nearest_neighbors[0]);
        weights.assign(effective_nn, min_weight);
        if(min_distance == 0)
        {
            for(size_t k = 0; k < effective_nn; ++k)
            {
                if(distances.coeff(curr_pred, nearest_neighbors[k]) == min_distance)
                    weights[k] = 1;
                else
                    break;
            }
        }
        else
        {
            for(size_t k = 0; k < effective_nn; ++k)
            {
                weights[k] = fmax(exp(-distances.coeff(curr_pred, nearest_neighbors[k]) / min_distance),
                                 min_weight);
            }
        }
        
        // identify ties and adjust weights
        if(effective_nn > nn) // ties exist
        {
            tie_distance = distances.coeff(curr_pred, nearest_neighbors.back());
            
            // count ties
            num_ties = 0;
            for(auto& neighbor_index: nearest_neighbors)
                if(distances.coeff(curr_pred, neighbor_index) == tie_distance)
                    num_ties++;
            
            tie_adj_factor = double(num_ties + nn - effective_nn) / double(num_ties);
            
            // adjust weights
            for(size_t k = 0; k < nearest_neighbors.size(); ++k)
                if(distances.coeff(curr_pred, nearest_neighbors[k]) == tie_distance)
                    weights[k] *= tie_adj_factor;
        }
        
        // make prediction
        total_weight = accumulate(weights.begin(), weights.end(), 0.0);
        predicted[curr_pred] = 0;
        for(size_t k = 0; k < effective_nn; ++k)
            predicted[curr_pred] += weights[k] * targets[nearest_neighbors[k]];
        predicted[curr_pred] = predicted[curr_pred] / total_weight;
        
        //compute variance
        predicted_var[curr_pred] = 0;
        for(size_t k = 0; k < effective_nn; ++k)
            predicted_var[curr_pred] += weights[k] * pow(targets[nearest_neighbors[k]] - predicted[curr_pred], 2);
        predicted_var[curr_pred] = predicted_var[curr_pred] / total_weight;
    }
    return;
}
コード例 #6
0
int main(){
	ios_base::sync_with_stdio(false);
	while(true){
		int N, M, K;
		cin >> N >> M >> K;
		if(N == 0 && M == 0 && K == 0){ break; }
		vector<int> next_words[501];
		map<string, int> word_map;
		vector<string> words(1);
		word_map.insert(pair<string, int>("", 0));
		for(int i = 0; i < N; ++i){
			string from, to;
			cin >> from >> to;
			int from_index = 0, to_index = 0;
			if(word_map.find(from) != word_map.end()){
				from_index = word_map[from];
			}else{
				from_index = word_map.size();
				word_map[from] = from_index;
				words.push_back(from);
				next_words[0].push_back(from_index);
			}
			if(word_map.find(to) != word_map.end()){
				to_index = word_map[to];
			}else{
				to_index = word_map.size();
				word_map[to] = to_index;
				words.push_back(to);
				next_words[0].push_back(to_index);
			}
			next_words[from_index].push_back(to_index);
		}
		vector<string> season_words(K);
		for(int i = 0; i < K; ++i){ cin >> season_words[i]; }
		libcomp::string::AhoCorasick ac(season_words);
		int state_num = ac.size();
		for(int i = 0; i < 21; ++i){
			for(int j = 0; j < 501; ++j){
				for(int k = 0; k < 2; ++k){ dp[i][j][k].clear(); }
			}
		}
		dp[0][0][0][0] = 1;
		vector<int> result(K);
		for(int i = 0; i < M; ++i){
			for(int j = 0; j < words.size(); ++j){
				for(int k = 0; k < 2; ++k){
					for(
						map<int, int>::iterator it = dp[i % 21][j][k].begin();
						it != dp[i % 21][j][k].end(); ++it)
					{
						for(int n = 0; n < next_words[j].size(); ++n){
							int next = next_words[j][n];
							string &word = words[next];
							if(i + word.size() > M){ continue; }
							fill(result.begin(), result.end(), 0);
							int s = ac.match(word, result, it->first);
							int m = k + accumulate(result.begin(), result.end(), 0);
							if(m >= 2){ continue; }
							dp[(i + word.size()) % 21][next][m][s] += it->second;
							dp[(i + word.size()) % 21][next][m][s] %= MOD;
						}
					}
					dp[i % 21][j][k].clear();
				}
			}
		}
		int answer = 0;
		for(int i = 0; i < words.size(); ++i){
			for(int j = 0; j < state_num; ++j){
				answer += dp[M % 21][i][1][j];
				answer %= MOD;
			}
		}
		cout << answer << endl;
	}
	return 0;
}
コード例 #7
0
IWorkloadVector* ISMPWHWorkload::compute()
{
	IMatrixPolynomial l_int = this->m_ptrData->getIdleDistributions();
	IMatrixPolynomial v_int = this->m_ptrData->getPhaseDistributions();
	
	const int h = v_int.degree();
	const int m = Ub(l_int[0],1)-Lb(l_int[0],1)+1;
	
	ivector ret(0, this->getUpTo());
	
	// Both SMP/G/1 and GI/G/1
	
	// Compute V(1)
	const imatrix v1 = v_int.sum();
	Logging::log(Logging::Debug, "v1", v1);
	
	const imatrix I = IMatrixUtils::identity(1,m);
	
	// E(N) = (I-V(1))^-1
    const ivector e_von_n = IMatrixUtils::sumRows(IMatrixUtils::invert(I-v1));
    Logging::log(Logging::Debug, "E(N)", e_von_n);
    
    // l_i = sum_i l_i l_ij, l_ij = sum_k l_ij(k)
    ivector l_i = computeLI(l_int);
    Logging::log(Logging::Debug, "l_i", l_i);
    
    // sum_i l_i E(N_i)
    idotprecision nenner(0.0);
    for ( int i = 1; i <= m; i++ )
    	accumulate(nenner, l_i[i], e_von_n[i]);
        
	imatrix e_von_n_i_w_i(0, getUpTo(), 1,m);
	const imatrix inverse = IMatrixUtils::invert(I-v_int[0]);
	Logging::log(Logging::Debug, "(I-V[0])^-1", inverse);
    e_von_n_i_w_i[0] = IMatrixUtils::sumRows(inverse);
    idotprecision zaehler;
    zaehler = 0.0;

    for ( int i = 1; i <= m; i++ )
    	accumulate(zaehler, l_i[i], e_von_n_i_w_i[0][i]);
    

    /* GI/G/1 optimization */
	if(m == 1) // GI/G/1 case
	{
	    ret[0] = (I-v1)[1][1] / (I-v_int[0])[1][1]; 
	}
	else // SMP/G/1 case
	{
	    ret[0] = rnd(zaehler) / rnd(nenner);
	}
	
	int minimum = 0;
	for(int n = 1; n<=getUpTo(); ++n)
    {
       // Berechne w(n)
        minimum = std::min<int>( h, n );
        
        ivector rechte_seite(1,m);
        for ( int i = 1; i <= m; i++ )
           rechte_seite[i] = 0.0;

        for ( int k = 1; k <= minimum; k++ )
           rechte_seite += v_int[k] * e_von_n_i_w_i[n-k];
        
        e_von_n_i_w_i[n] = inverse * rechte_seite;
                                
        zaehler = 0.0;

        for ( int i = 1; i <= m; i++ )
        	accumulate(zaehler, l_i[i], e_von_n_i_w_i[n][i]);
		ret[n] = rnd(zaehler) / rnd(nenner);
    }
	
	Logging::log(Logging::Debug, "Workload vector", ret);
	
	return new IWorkloadVector(ret);
}
コード例 #8
0
float TactileValueArray::getRangeLambda() const {
	return accumulate([](const TactileValue &self) {return self.getRangeLambda();}, Sum, true);
}
コード例 #9
0
float TactileValueArray::getReleaseDecay() const {
	return accumulate([](const TactileValue &self) {return self.getReleaseDecay();}, Sum, true);
}
コード例 #10
0
ファイル: SiteContainerBuilder.cpp プロジェクト: kgori/bpp
/*
Folds vector of VSC into single VSC using merge_vscs()
*/
shared_ptr<VectorSiteContainer> SiteContainerBuilder::concatenate_alignments(vector<shared_ptr<VectorSiteContainer>> vec_of_vsc) {
    auto first_vsc = make_shared<VectorSiteContainer>(*vec_of_vsc[0]);
    return accumulate(vec_of_vsc.begin()+1, vec_of_vsc.end(), first_vsc, merge_vscs);
//    return merge_vscs(first_vsc, vec_of_vsc[1]);
}
コード例 #11
0
ファイル: randomforest.cpp プロジェクト: Cryomics-Lab/mothur
int RandomForest::populateDecisionTrees() {
    try {
        
        vector<double> errorRateImprovements;
        
        for (int i = 0; i < numDecisionTrees; i++) {
          
            if (m->control_pressed) { return 0; }
            if (((i+1) % 100) == 0) {  m->mothurOut("Creating " + toString(i+1) + " (th) Decision tree\n");  }
          
            // TODO: need to first fix if we are going to use pointer based system or anything else
            DecisionTree* decisionTree = new DecisionTree(dataSet, globalDiscardedFeatureIndices, OptimumFeatureSubsetSelector(optimumFeatureSubsetSelectionCriteria), treeSplitCriterion, featureStandardDeviationThreshold);
          
            if (m->debug && doPruning) {
                m->mothurOut("Before pruning\n");
                decisionTree->printTree(decisionTree->rootNode, "ROOT");
            }
            
            int numCorrect;
            double treeErrorRate;
            
            decisionTree->calcTreeErrorRate(numCorrect, treeErrorRate);
            double prePrunedErrorRate = treeErrorRate;
            
            if (m->debug) {
                m->mothurOut("treeErrorRate: " + toString(treeErrorRate) + " numCorrect: " + toString(numCorrect) + "\n");
            }
            
            if (doPruning) {
                decisionTree->pruneTree(pruneAggressiveness);
                if (m->debug) {
                    m->mothurOut("After pruning\n");
                    decisionTree->printTree(decisionTree->rootNode, "ROOT");
                }
                decisionTree->calcTreeErrorRate(numCorrect, treeErrorRate);
            }
            double postPrunedErrorRate = treeErrorRate;
            
          
            decisionTree->calcTreeVariableImportanceAndError(numCorrect, treeErrorRate);
            double errorRateImprovement = (prePrunedErrorRate - postPrunedErrorRate) / prePrunedErrorRate;

            if (m->debug) {
                m->mothurOut("treeErrorRate: " + toString(treeErrorRate) + " numCorrect: " + toString(numCorrect) + "\n");
                if (doPruning) {
                    m->mothurOut("errorRateImprovement: " + toString(errorRateImprovement) + "\n");
                }
            }
            
            
            if (discardHighErrorTrees) {
                if (treeErrorRate < highErrorTreeDiscardThreshold) {
                    updateGlobalOutOfBagEstimates(decisionTree);
                    decisionTree->purgeDataSetsFromTree();
                    decisionTrees.push_back(decisionTree);
                    if (doPruning) {
                        errorRateImprovements.push_back(errorRateImprovement);
                    }
                } else {
                    delete decisionTree;
                }
            } else {
                updateGlobalOutOfBagEstimates(decisionTree);
                decisionTree->purgeDataSetsFromTree();
                decisionTrees.push_back(decisionTree);
                if (doPruning) {
                    errorRateImprovements.push_back(errorRateImprovement);
                }
            }          
        }
        
        double avgErrorRateImprovement = -1.0;
        if (errorRateImprovements.size() > 0) {
            avgErrorRateImprovement = accumulate(errorRateImprovements.begin(), errorRateImprovements.end(), 0.0);
//            cout << "Total " << avgErrorRateImprovement << " size " << errorRateImprovements.size() << endl;
            avgErrorRateImprovement /= errorRateImprovements.size();
        }
        
        if (m->debug && doPruning) {
            m->mothurOut("avgErrorRateImprovement:" + toString(avgErrorRateImprovement) + "\n");
        }
        // m->mothurOut("globalOutOfBagEstimates = " + toStringVectorMap(globalOutOfBagEstimates)+ "\n");

        
        return 0;
    }
    catch(exception& e) {
        m->errorOut(e, "RandomForest", "populateDecisionTrees");
        exit(1);
    }  
}
コード例 #12
0
ファイル: cpchop.cpp プロジェクト: berland/opm-upscaling
int main(int argc, char** argv)
try
{
    if (argc == 1) {
        std::cout << "Usage: cpchop gridfilename=filename.grdecl [subsamples=10] [ilen=5] [jlen=5] " << std::endl;
        std::cout << "       [zlen=5] [imin=] [imax=] [jmin=] [jmax=] [upscale=true] [bc=fixed]" << std::endl;
        std::cout << "       [resettoorigin=true] [seed=111] [z_tolerance=0.0] [minperm=1e-9] " << std::endl;
        std::cout << "       [dips=false] [azimuthdisplacement=] [satnumvolumes=false] [mincellvolume=1e-9]" << std::endl;
        std::cout << "       [filebase=] [resultfile=] [endpoints=false] [cappres=false]" << std::endl;
        std::cout << "       [rock_list=] [anisotropicrocks=false]" << std::endl;
        exit(1);
    }
    Opm::parameter::ParameterGroup param(argc, argv);
    std::string gridfilename = param.get<std::string>("gridfilename");
    Opm::CornerPointChopper ch(gridfilename);

    // The cells with i coordinate in [imin, imax) are included, similar for j.
    // The z limits may be changed inside the chopper to match actual min/max z.
    const int* dims = ch.dimensions();
    int imin = param.getDefault("imin", 0);
    int imax = param.getDefault("imax", dims[0]);
    int jmin = param.getDefault("jmin", 0);
    int jmax = param.getDefault("jmax", dims[1]);
    double zmin = param.getDefault("zmin", ch.zLimits().first);
    double zmax = param.getDefault("zmax", ch.zLimits().second);
    int subsamples = param.getDefault("subsamples", 1);
    int ilen = param.getDefault("ilen", imax - imin);
    int jlen = param.getDefault("jlen", jmax - jmin);
    double zlen = param.getDefault("zlen", zmax - zmin);
    bool upscale = param.getDefault("upscale", true);
    std::string bc = param.getDefault<std::string>("bc", "fixed");
    bool resettoorigin = param.getDefault("resettoorigin", true);
    boost::mt19937::result_type userseed = param.getDefault("seed", 0);

    int outputprecision = param.getDefault("outputprecision", 8);
    std::string filebase = param.getDefault<std::string>("filebase", "");
    std::string resultfile = param.getDefault<std::string>("resultfile", "");

    double minperm = param.getDefault("minperm", 1e-9);
    double minpermSI = Opm::unit::convert::from(minperm, Opm::prefix::milli*Opm::unit::darcy);

    // Following two options are for dip upscaling (slope of cell top and bottom edges)
    bool dips = param.getDefault("dips", false);  // whether to do dip averaging
    double azimuthdisplacement = param.getDefault("azimuthdisplacement", 0.0);  // posibility to add/subtract a value to/from azimuth for dip plane.
    double mincellvolume = param.getDefault("mincellvolume", 1e-9); // ignore smaller cells for dip calculations

    bool satnumvolumes = param.getDefault("satnumvolumes", false); // whether to count volumes pr. satnum

    // upscaling of endpoints and capillary pressure
    // Conversion factor, multiply mD numbers with this to get m² numbers
    const double milliDarcyToSqMetre = 9.869233e-16;
    // Input for surfaceTension is dynes/cm, SI units are Joules/square metre
    const double surfaceTension = param.getDefault("surfaceTension", 11.0) * 1e-3; // multiply with 10^-3 to obtain SI units

    bool endpoints = param.getDefault("endpoints", false); // whether to upscale saturation endpoints
    bool cappres = param.getDefault("cappres", false); // whether to upscale capillary pressure
    if (cappres) {
        endpoints = true;
    }
    std::string rock_list = param.getDefault<std::string>("rock_list", "no_list");
    bool anisorocks = param.getDefault("anisotropicrocks", false);
    std::vector<std::vector<double> > rocksatendpoints_;
    std::vector<std::vector<double> > jfuncendpoints_; // Used if isotropic rock input
    int nsatpoints = 5; // nuber of saturation points in upscaled capillary pressure function per subsample
    double saturationThreshold = 0.00001;

    // For isotropic input rocks:
    std::vector<Opm::MonotCubicInterpolator> InvJfunctions; // Holds the inverse of the loaded J-functions.
    // For anisotropic input rocks:
    std::vector<Opm::MonotCubicInterpolator> SwPcfunctions; // Holds Sw(Pc) for each rocktype.

    // Read rock data from files specifyed in rock_list
    if (endpoints) {
        if (!rock_list.compare("no_list")) {
            std::cout << "Can't do endponts without rock list (" << rock_list << ")" << std::endl;
            throw std::exception();
        }
        // Code copied from ReservoirPropertyCommon.hpp for file reading
        std::ifstream rl(rock_list.c_str());
        if (!rl) {
            OPM_THROW(std::runtime_error, "Could not open file " << rock_list);
        }
        int num_rocks = -1;
        rl >> num_rocks;
        assert(num_rocks >= 1);
        rocksatendpoints_.resize(num_rocks);
        jfuncendpoints_.resize(num_rocks);
        // Loop through rock files defined in rock_list and store the data we need
        for (int i = 0; i < num_rocks; ++i) {
            std::string spec;
            while (spec.empty()) {
                std::getline(rl, spec);
            }
            // Read the contents of the i'th rock
            std::istringstream specstream(spec);
            std::string rockname;
            specstream >> rockname;
            std::string rockfilename = rockname;

            std::ifstream rock_stream(rockfilename.c_str());
            if (!rock_stream) {
                OPM_THROW(std::runtime_error, "Could not open file " << rockfilename);
            }

            if (! anisorocks) { //Isotropic input rocks (Sw Krw Kro J)
                Opm::MonotCubicInterpolator Jtmp;
                try {
                    Jtmp = Opm::MonotCubicInterpolator(rockname, 1, 4);
                }
                catch (const char * errormessage) {
                    std::cerr << "Error: " << errormessage << std::endl;
                    std::cerr << "Check filename" << std::endl;
                    exit(1);
                }

                // Invert J-function, now we get saturation as a function of pressure:
                if (Jtmp.isStrictlyMonotone()) {
                    InvJfunctions.push_back(Opm::MonotCubicInterpolator(Jtmp.get_fVector(), Jtmp.get_xVector()));
                }
                else {
                    std::cerr << "Error: Jfunction " << i+1 << " in rock file " << rockname << " was not invertible." << std::endl;
                    exit(1);
                }

                jfuncendpoints_[i][0] = Jtmp.getMinimumX().second;
                jfuncendpoints_[i][1] = Jtmp.getMaximumX().second;
                rocksatendpoints_[i][0] = Jtmp.getMinimumX().first;
                rocksatendpoints_[i][1] = Jtmp.getMaximumX().first;
                if (rocksatendpoints_[i][0] < 0 || rocksatendpoints_[i][0] > 1) {
                    OPM_THROW(std::runtime_error, "Minimum rock saturation (" << rocksatendpoints_[i][0] << ") not sane for rock "
                              << rockfilename << "." << std::endl << "Did you forget to specify anisotropicrocks=true ?");
                }
            }
            else { //Anisotropic input rocks (Pc Sw Krxx Kryy Krzz)
                Opm::MonotCubicInterpolator Pctmp;
                try {
                    Pctmp = Opm::MonotCubicInterpolator(rockname, 2, 1);
                }
                catch (const char * errormessage) {
                    std::cerr << "Error: " << errormessage << std::endl;
                    std::cerr << "Check filename and columns 1 and 2 (Pc and Sw)" << std::endl;
                    exit(1);
                }
                if (cappres) {
                    // Invert Pc(Sw) curve into Sw(Pc):
                    if (Pctmp.isStrictlyMonotone()) {
                        SwPcfunctions.push_back(Opm::MonotCubicInterpolator(Pctmp.get_fVector(), Pctmp.get_xVector()));
                    }
                    else {
                        std::cerr << "Error: Pc(Sw) curve " << i+1 << " in rock file " << rockname << " was not invertible." << std::endl;
                        exit(1);
                    }
                }
                rocksatendpoints_[i][0] = Pctmp.getMinimumX().first;
                rocksatendpoints_[i][1] = Pctmp.getMaximumX().first;
            }
        }
    }

    double z_tolerance = param.getDefault("z_tolerance", 0.0);
    double residual_tolerance = param.getDefault("residual_tolerance", 1e-8);
    int linsolver_verbosity = param.getDefault("linsolver_verbosity", 0);
    int linsolver_type = param.getDefault("linsolver_type", 1);

    //  Guarantee initialization
    double Pcmax = -DBL_MAX, Pcmin = DBL_MAX;

    // Check that we do not have any user input
    // that goes outside the coordinates described in
    // the cornerpoint file (runtime-exception will be thrown in case of error)
    ch.verifyInscribedShoebox(imin, ilen, imax,
                              jmin, jlen, jmax,
                              zmin, zlen, zmax);

    // Random number generator from boost.
    boost::mt19937 gen;

    // Seed the random number generators with the current time, unless specified on command line
    // Warning: Current code does not allow 0 for the seed!!
    boost::mt19937::result_type autoseed = time(NULL);
    if (userseed == 0) {
        gen.seed(autoseed);
    }
    else {
        gen.seed(userseed);
    }

    Opm::SinglePhaseUpscaler::BoundaryConditionType bctype = Opm::SinglePhaseUpscaler::Fixed;
    bool isFixed, isPeriodic;
    isFixed = isPeriodic = false;
    if (upscale) {
        if (bc == "fixed") {
            isFixed = true;
            bctype = Opm::SinglePhaseUpscaler::Fixed;
        }
        else if (bc == "periodic") {
            isPeriodic = true;
            bctype = Opm::SinglePhaseUpscaler::Periodic;
        }
        else {
            std::cout << "Boundary condition type (bc=" << bc << ") not allowed." << std::endl;
            std::cout << "Only bc=fixed or bc=periodic implemented." << std::endl;
            throw std::exception();
        }
    }

    // Check for unused parameters (potential typos).
    if (param.anyUnused()) {
        std::cout << "*****     WARNING: Unused parameters:     *****\n";
        param.displayUsage();
    }

    // Note that end is included in interval for uniform_int.
    boost::uniform_int<> disti(imin, imax - ilen);
    boost::uniform_int<> distj(jmin, jmax - jlen);
    boost::uniform_real<> distz(zmin, std::max(zmax - zlen, zmin));
    boost::variate_generator<boost::mt19937&, boost::uniform_int<> > ri(gen, disti);
    boost::variate_generator<boost::mt19937&, boost::uniform_int<> > rj(gen, distj);
    boost::variate_generator<boost::mt19937&, boost::uniform_real<> > rz(gen, distz);

    // Storage for results
    std::vector<double> porosities;
    std::vector<double> permxs;
    std::vector<double> permys;
    std::vector<double> permzs;
    std::vector<double> permyzs;
    std::vector<double> permxzs;
    std::vector<double> permxys;
    std::vector<double> minsws, maxsws;
    std::vector<std::vector<double> > pcvalues;
    std::vector<double> dipangs, azimuths;

    // Initialize a matrix for subsample satnum volumes.
    // Outer index is subsample index, inner index is SATNUM-value
    std::vector<std::vector<double> > rockvolumes;
    int maxSatnum = 0; // This value is determined from the chopped cells.

    int finished_subsamples = 0; // keep explicit count of successful subsamples
    for (int sample = 1; sample <= subsamples; ++sample) {
        int istart = ri();
        int jstart = rj();
        double zstart = rz();
        ch.chop(istart, istart + ilen, jstart, jstart + jlen, zstart, zstart + zlen, resettoorigin);
        std::string subsampledgrdecl = filebase;

        // Output grdecl-data to file if a filebase is supplied.
        if (filebase != "") {
            std::ostringstream oss;
            if ((size_t) subsamples > 1) { // Only add number to filename if more than one sample is asked for
                oss << 'R' << std::setw(4) << std::setfill('0') << sample;
                subsampledgrdecl += oss.str();
            }
            subsampledgrdecl += ".grdecl";
            ch.writeGrdecl(subsampledgrdecl);
        }

        try { /* The upscaling may fail to converge on icky grids, lets just pass by those */
            if (upscale) {
                Opm::EclipseGridParser subparser = ch.subparser();
                subparser.convertToSI();
                Opm::SinglePhaseUpscaler upscaler;

                upscaler.init(subparser, bctype, minpermSI, z_tolerance,
                              residual_tolerance, linsolver_verbosity, linsolver_type, false);

                Opm::SinglePhaseUpscaler::permtensor_t upscaled_K = upscaler.upscaleSinglePhase();
                upscaled_K *= (1.0/(Opm::prefix::milli*Opm::unit::darcy));


                porosities.push_back(upscaler.upscalePorosity());
                permxs.push_back(upscaled_K(0,0));
                permys.push_back(upscaled_K(1,1));
                permzs.push_back(upscaled_K(2,2));
                permyzs.push_back(upscaled_K(1,2));
                permxzs.push_back(upscaled_K(0,2));
                permxys.push_back(upscaled_K(0,1));

            }

            if (endpoints) {
                // Calculate minimum and maximum water volume in each cell
                // Create single-phase upscaling object to get poro and perm values from the grid
                Opm::EclipseGridParser subparser = ch.subparser();
                std::vector<double>  perms = subparser.getFloatingPointValue("PERMX");
                subparser.convertToSI();
                Opm::SinglePhaseUpscaler upscaler;
                upscaler.init(subparser, bctype, minpermSI, z_tolerance,
                              residual_tolerance, linsolver_verbosity, linsolver_type, false);
                std::vector<int>   satnums = subparser.getIntegerValue("SATNUM");
                std::vector<double>  poros = subparser.getFloatingPointValue("PORO");
                std::vector<double> cellVolumes, cellPoreVolumes;
                cellVolumes.resize(satnums.size(), 0.0);
                cellPoreVolumes.resize(satnums.size(), 0.0);
                int tesselatedCells = 0;
                //double maxSinglePhasePerm = 0;
                double Swirvolume = 0;
                double Sworvolume = 0;
                const std::vector<int>& ecl_idx = upscaler.grid().globalCell();
                Dune::CpGrid::Codim<0>::LeafIterator c = upscaler.grid().leafbegin<0>();
                for (; c != upscaler.grid().leafend<0>(); ++c) {
                    unsigned int cell_idx = ecl_idx[c->index()];
                    if (satnums[cell_idx] > 0) { // Satnum zero is "no rock"
                        cellVolumes[cell_idx] = c->geometry().volume();
                        cellPoreVolumes[cell_idx] = cellVolumes[cell_idx] * poros[cell_idx];
                        double Pcmincandidate = 0.0, Pcmaxcandidate = 0.0, minSw, maxSw;
                        if (!anisorocks) {
                            if (cappres) {
                                Pcmincandidate = jfuncendpoints_[int(satnums[cell_idx])-1][1]
                                                 / sqrt(perms[cell_idx] * milliDarcyToSqMetre/poros[cell_idx]) * surfaceTension;
                                Pcmaxcandidate = jfuncendpoints_[int(satnums[cell_idx])-1][0]
                                                 / sqrt(perms[cell_idx] * milliDarcyToSqMetre/poros[cell_idx]) * surfaceTension;
                            }
                            minSw = rocksatendpoints_[int(satnums[cell_idx])-1][0];
                            maxSw = rocksatendpoints_[int(satnums[cell_idx])-1][1];
                        }
                        else { // anisotropic input, we do not to J-function scaling
                            if (cappres) {
                                Pcmincandidate = SwPcfunctions[int(satnums[cell_idx])-1].getMinimumX().first;
                                Pcmaxcandidate = SwPcfunctions[int(satnums[cell_idx])-1].getMaximumX().first;
                            }
                            minSw = rocksatendpoints_[int(satnums[cell_idx])-1][0];
                            maxSw = rocksatendpoints_[int(satnums[cell_idx])-1][1];
                        }
                        if (cappres) {
                            Pcmin = std::min(Pcmincandidate, Pcmin);
                            Pcmax = std::max(Pcmaxcandidate, Pcmax);
                        }
                        Swirvolume += minSw * cellPoreVolumes[cell_idx];
                        Sworvolume += maxSw * cellPoreVolumes[cell_idx];
                    }
                    ++tesselatedCells; // keep count.
                }

                // If upscling=false, we still (may) want to have porosities together with endpoints
                if (!upscale) {
                    porosities.push_back(upscaler.upscalePorosity());
                }

                // Total porevolume and total volume -> upscaled porosity:
                double poreVolume = std::accumulate(cellPoreVolumes.begin(),
                                                    cellPoreVolumes.end(),
                                                    0.0);
                double Swir = Swirvolume/poreVolume;
                double Swor = Sworvolume/poreVolume;
                minsws.push_back(Swir);
                maxsws.push_back(Swor);
                if (cappres) {
                    // Upscale capillary pressure function
                    Opm::MonotCubicInterpolator WaterSaturationVsCapPressure;
                    double largestSaturationInterval = Swor-Swir;
                    double Ptestvalue = Pcmax;
                    while (largestSaturationInterval > (Swor-Swir)/double(nsatpoints)) {
                        if (Pcmax == Pcmin) {
                            // This is a dummy situation, we go through once and then
                            // we are finished (this will be triggered by zero permeability)
                            Ptestvalue = Pcmin;
                            largestSaturationInterval = 0;
                        }
                        else if (WaterSaturationVsCapPressure.getSize() == 0) {
                            /* No data values previously computed */
                            Ptestvalue = Pcmax;
                        }
                        else if (WaterSaturationVsCapPressure.getSize() == 1) {
                            /* If only one point has been computed, it was for Pcmax. So now
                               do Pcmin */
                            Ptestvalue = Pcmin;
                        }
                        else {
                            /* Search for largest saturation interval in which there are no
                               computed saturation points (and estimate the capillary pressure
                               that will fall in the center of this saturation interval)
                            */
                            std::pair<double,double> SatDiff = WaterSaturationVsCapPressure.getMissingX();
                            Ptestvalue = SatDiff.first;
                            largestSaturationInterval = SatDiff.second;
                        }
                        // Check for saneness of Ptestvalue:
                        if (std::isnan(Ptestvalue) || std::isinf(Ptestvalue)) {
                            std::cerr << "ERROR: Ptestvalue was inf or nan" << std::endl;
                            break; // Jump out of while-loop, just print out the results
                            // up to now and exit the program
                        }

                        double waterVolume = 0.0;
                        for (unsigned int i = 0; i < ecl_idx.size(); ++i) {
                            unsigned int cell_idx = ecl_idx[i];
                            double waterSaturationCell = 0.0;
                            if (satnums[cell_idx] > 0) { // handle "no rock" cells with satnum zero
                                double PtestvalueCell;

                                PtestvalueCell = Ptestvalue;

                                if (!anisorocks) {
                                    double Jvalue = sqrt(perms[cell_idx] * milliDarcyToSqMetre /poros[cell_idx]) * PtestvalueCell / surfaceTension;
                                    waterSaturationCell
                                        = InvJfunctions[int(satnums[cell_idx])-1].evaluate(Jvalue);
                                }
                                else { // anisotropic_input, then we do not do J-function-scaling
                                    waterSaturationCell = SwPcfunctions[int(satnums[cell_idx])-1].evaluate(PtestvalueCell);
                                }
                            }
                            waterVolume += waterSaturationCell  * cellPoreVolumes[cell_idx];
                        }
                        WaterSaturationVsCapPressure.addPair(Ptestvalue, waterVolume/poreVolume);
                    }
                    WaterSaturationVsCapPressure.chopFlatEndpoints(saturationThreshold);
                    std::vector<double> wattest = WaterSaturationVsCapPressure.get_fVector();
                    std::vector<double> cprtest = WaterSaturationVsCapPressure.get_xVector();
                    Opm::MonotCubicInterpolator CapPressureVsWaterSaturation(WaterSaturationVsCapPressure.get_fVector(),
                            WaterSaturationVsCapPressure.get_xVector());
                    std::vector<double> pcs;
                    for (int satp=0; satp<nsatpoints; ++satp) {
                        pcs.push_back(CapPressureVsWaterSaturation.evaluate(Swir+(Swor-Swir)/(nsatpoints-1)*satp));
                    }
                    pcvalues.push_back(pcs);
                }

            }


            if (dips) {
                Opm::EclipseGridParser subparser = ch.subparser();
                std::vector<int>  griddims = subparser.getSPECGRID().dimensions;
                std::vector<double> xdips_subsample, ydips_subsample;

                Opm::EclipseGridInspector gridinspector(subparser);
                for (int k=0; k < griddims[2]; ++k) {
                    for (int j=0; j < griddims[1]; ++j) {
                        for (int i=0; i < griddims[0]; ++i) {
                            if (gridinspector.cellVolumeVerticalPillars(i, j, k) > mincellvolume) {
                                std::pair<double,double> xydip = gridinspector.cellDips(i, j, k);
                                xdips_subsample.push_back(xydip.first);
                                ydips_subsample.push_back(xydip.second);
                            }
                        }
                    }
                }


                //  double azimuth = atan(xydip.first/xydip.second);
                //              double dip = acos(1.0/sqrt(pow(xydip.first,2.0)+pow(xydip.second,2.0)+1.0));
                //	dips_subsample.push_back( xydip.first );
                //	azims_subsample.push_back(atan(xydip.first/xydip.second));

                // Average xdips and ydips
                double xdipaverage = accumulate(xdips_subsample.begin(), xdips_subsample.end(), 0.0)/xdips_subsample.size();
                double ydipaverage = accumulate(ydips_subsample.begin(), ydips_subsample.end(), 0.0)/ydips_subsample.size();

                // Convert to dip and azimuth
                double azimuth = atan(xdipaverage/ydipaverage)+azimuthdisplacement;
                double dip = acos(1.0/sqrt(pow(xdipaverage,2.0)+pow(ydipaverage,2.0)+1.0));
                dipangs.push_back(dip);
                azimuths.push_back(azimuth);
            }

            if (satnumvolumes) {
                Opm::EclipseGridParser subparser = ch.subparser();
                Opm::EclipseGridInspector subinspector(subparser);
                std::vector<int>  griddims = subparser.getSPECGRID().dimensions;
                int number_of_subsamplecells = griddims[0] * griddims[1] * griddims[2];

                // If SATNUM is non-existent in input grid, this will fail:
                std::vector<int> satnums = subparser.getIntegerValue("SATNUM");

                std::vector<double> rockvolumessubsample;
                for (int cell_idx=0; cell_idx < number_of_subsamplecells; ++cell_idx) {
                    maxSatnum = std::max(maxSatnum, int(satnums[cell_idx]));
                    rockvolumessubsample.resize(maxSatnum); // Ensure long enough vector
                    rockvolumessubsample[int(satnums[cell_idx])-1] += subinspector.cellVolumeVerticalPillars(cell_idx);
                }

                // Normalize volumes to obtain relative volumes:
                double subsamplevolume = std::accumulate(rockvolumessubsample.begin(),
                                         rockvolumessubsample.end(), 0.0);
                std::vector<double> rockvolumessubsample_normalized;
                for (size_t satnum_idx = 0; satnum_idx < rockvolumessubsample.size(); ++satnum_idx) {
                    rockvolumessubsample_normalized.push_back(rockvolumessubsample[satnum_idx]/subsamplevolume);
                }
                rockvolumes.push_back(rockvolumessubsample_normalized);
            }

            finished_subsamples++;
        }
        catch (...) {
            std::cerr << "Warning: Upscaling chopped subsample nr. " << sample << " failed, proceeding to next subsample\n";
        }

    }


    // Make stream of output data, to be outputted to screen and optionally to file
    std::stringstream outputtmp;

    outputtmp << "################################################################################################" << std::endl;
    outputtmp << "# Results from property analysis on subsamples" << std::endl;
    outputtmp << "#" << std::endl;
    time_t now = time(NULL);
    outputtmp << "# Finished: " << asctime(localtime(&now));

    utsname hostname;
    uname(&hostname);
    outputtmp << "# Hostname: " << hostname.nodename << std::endl;
    outputtmp << "#" << std::endl;
    outputtmp << "# Options used:" << std::endl;
    outputtmp << "#     gridfilename: " << gridfilename << std::endl;
    outputtmp << "#   i; min,len,max: " << imin << " " << ilen << " " << imax << std::endl;
    outputtmp << "#   j; min,len,max: " << jmin << " " << jlen << " " << jmax << std::endl;
    outputtmp << "#   z; min,len,max: " << zmin << " " << zlen << " " << zmax << std::endl;
    outputtmp << "#       subsamples: " << subsamples << std::endl;
    if (userseed == 0) {
        outputtmp << "#      (auto) seed: " << autoseed << std::endl;
    }
    else {
        outputtmp << "#    (manual) seed: " << userseed << std::endl;
    }
    outputtmp << "################################################################################################" << std::endl;
    outputtmp << "# id";
    if (upscale) {
        if (isPeriodic) {
            outputtmp << "          porosity                 permx                   permy                   permz                   permyz                  permxz                  permxy";
        }
        else if (isFixed) {
            outputtmp << "          porosity                 permx                   permy                   permz";
        }
    }
    if (endpoints) {
        if (!upscale) {
            outputtmp << "          porosity";
        }
        outputtmp << "                  Swir                    Swor";
        if (cappres) {
            outputtmp << "                  Pc(Swir)                Pc2                     Pc3                     Pc4                     Pc(Swor)";
        }
    }
    if (dips) {
        outputtmp << "                  dip                     azim(displacement:" << azimuthdisplacement << ")";
    }
    if (satnumvolumes) {
        for (int satnumidx = 0; satnumidx < maxSatnum; ++satnumidx) {
            outputtmp << "               satnum_" << satnumidx+1;
        }
    }
    outputtmp << std::endl;

    const int fieldwidth = outputprecision + 8;
    for (int sample = 1; sample <= finished_subsamples; ++sample) {
        outputtmp << sample << '\t';
        if (upscale) {
            outputtmp <<
                      std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << porosities[sample-1] << '\t' <<
                      std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << permxs[sample-1] << '\t' <<
                      std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << permys[sample-1] << '\t' <<
                      std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << permzs[sample-1] << '\t';
            if (isPeriodic) {
                outputtmp <<
                          std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << permyzs[sample-1] << '\t' <<
                          std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << permxzs[sample-1] << '\t' <<
                          std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << permxys[sample-1] << '\t';
            }
        }
        if (endpoints) {
            if (!upscale) {
                outputtmp <<
                          std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << porosities[sample-1] << '\t';
            }
            outputtmp <<
                      std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << minsws[sample-1] << '\t' <<
                      std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << maxsws[sample-1];
            if (cappres) {
                outputtmp <<
                          std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << pcvalues[sample-1][0] << '\t' <<
                          std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << pcvalues[sample-1][1] << '\t' <<
                          std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << pcvalues[sample-1][2] << '\t' <<
                          std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << pcvalues[sample-1][3] << '\t' <<
                          std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << pcvalues[sample-1][4];
            }
        }
        if (dips) {
            outputtmp <<
                      std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << dipangs[sample-1] << '\t' <<
                      std::showpoint << std::setw(fieldwidth) << std::setprecision(outputprecision) << azimuths[sample-1];
        }
        if (satnumvolumes) {
            rockvolumes[sample-1].resize(maxSatnum, 0.0);
            for (int satnumidx = 0; satnumidx < maxSatnum; ++satnumidx) {
                outputtmp <<
                          std::showpoint << std::setw(fieldwidth) <<
                          std::setprecision(outputprecision) << rockvolumes[sample-1][satnumidx] << '\t';
            }
        }
        outputtmp <<  std::endl;
    }

    if (resultfile != "") {
        std::cout << "Writing results to " << resultfile << std::endl;
        std::ofstream outfile;
        outfile.open(resultfile.c_str(), std::ios::out | std::ios::trunc);
        outfile << outputtmp.str();
        outfile.close();
    }



    std::cout << outputtmp.str();
}
catch (const std::exception &e) {
    std::cerr << "Program threw an exception: " << e.what() << "\n";
    throw;
}
コード例 #13
0
ファイル: 11-6.cpp プロジェクト: jervisfm/ExampleCode
double computeMean(Iter_T first, Iter_T last) {
  return static_cast<double>(accumulate(first, last, 0.0)) 
    / distance(first, last);
}
コード例 #14
0
 int findMissing(vector<int> &nums) {
     vector<int> expected(nums.size()); 
     iota(expected.begin(), expected.end(), 1);  // Costs extra space O(n)
     return accumulate (nums.cbegin(), nums.cend(), 0, bit_xor<int>()) ^
            accumulate (expected.cbegin(), expected.cend(), 0, bit_xor<int>());
 }
コード例 #15
0
 vector<int> singleNumber(vector<int>& nums) 
 {
     int a = 0, b = 0, s = accumulate(nums.begin(), nums.end(), 0, bit_xor<int>());
     for (auto n : nums) n & s & -s ? a ^= n: b ^= n;
     return vector<int>{a, b};
 }
コード例 #16
0
float TactileValueArray::accumulate (TactileValue::Mode mode, AccMode acc_mode, bool bMean) {
	return accumulate([mode](const TactileValue &self) {return self.value(mode);}, acc_mode, bMean);
}
コード例 #17
0
ファイル: SIFT.hpp プロジェクト: pmoulon/DO-CV
    //! Computes the SIFT descriptor for keypoint \$(x,y,\sigma,\theta)\f$.
    SIFTDescriptor operator()(float x, float y, float sigma, float theta,
                              const Image<Vector2f>& gradPolar) const
    {
      const float pi = static_cast<float>(M_PI);
      /*
        The oriented keypoint is denoted by $k = (x,y,\sigma,\theta)$.
        SIFT describes keypoint $k$ in a similarity-invariant manner.

        To do so, we consider a square image patch which:
        - is centered in $(x,y)$
        - has an orientation angle $\theta$ w.r.t. the image frame coordinates:
        => to ensure rotation invariance
        - has a side length proportional to the scale $\sigma$:
        => to ensure scale invariance
        This square patch is denoted by $P(x,y,\sigma,\theta) = P(k)$.

        The square patch $P(x,y,\sigma,\theta)$ is itself divided into NxN
        smaller square patches $(P_{i,j})_{1 \leq i \leq N, j \leq j \leq N}$.

        Notice that we omit the variables $(x,y,\sigma,\theta)$ which the
        patches $P_{i,j}$ actually depend on.

        $N$ corresponds to the template argument 'int N' which should be 4 as 
        stated in the paper [Lowe, IJCV 2004]).

        In the image, each small square patch $P_{i,j}$ has a side length $l$ 
        proportional to the scale $\sigma$ of the keypoint, i.e., 
        $l = \lambda \sigma$.
      */
      const float lambda = bin_scale_unit_length_;
      const float l = lambda*sigma;
      /*
        It is important to note that $\lambda$ is some 'universal' constant 
        used for all SIFT descriptors to ensure the scale-invariance of the 
        descriptor.
      */

      /*
        Now in each image square patch $P_{i,j}$, we build a histogram of 
        gradient orientations $\mathbf{h}_{i,j} \in \mathbb{R}^d$, which 
        quantizes the gradient orientations into $O$ principal orientations.
        $O$ corresponds to the template argument 'int O'.

        Let us initialize the SIFT descriptor consisting of the NxN histograms 
        $\mathbf{h}_{i,j}$, each in $\mathbf{R}^O$ as follows.
      */
      SIFTDescriptor h(SIFTDescriptor::Zero());

      /*
       In the rescaled and oriented coordinate frame bound to the patch $P(k)$, 
       - keypoint $k$ is located at (0,0)
       - centers $C_{i,j}$ of patch $P_{i,j}$ are located at
         $[ -(N+1)/2 + i, -(N+1)/2 + j ]$
      
         For example for $N=4$, they are at:
         (-1.5,-1.5) (-0.5,-1.5) (0.5,-1.5) (1.5,-1.5)
         (-1.5,-0.5) (-0.5,-0.5) (0.5,-0.5) (1.5,-0.5)
         (-1.5, 0.5) (-0.5, 0.5) (0.5, 0.5) (1.5, 0.5)
         (-1.5, 1.5) (-0.5, 1.5) (0.5, 1.5) (1.5, 1.5)
      
       Gradients in $[x_i-1, x_i+1] \times [y_i-1, y_i+1]$ contributes
       to histogram $\mathbf{h}_{i,j}$, namely gradients in the square patch
       $Q_{i,j}$
       - centered in $C_{i,j}$ as square patch $P_{i,j}$,
       - with side length $2$.
       That is because we want to do trilinear interpolation in order to make 
       SIFT robust to small shift in rotation, translation.

       Therefore, to compute the SIFT descriptor we need to scan all the pixels 
       on a larger circular image patch with radius $r$:
      */
      const float r = sqrt(2.f) * l * (N+1)/2.f;
      /*
       In the above formula, notice:
       - the factor $\sqrt{2}$ because diagonal corners of the furthest patches 
         $P_{i,j}$ from the center $(x,y)$ must be in the circular patch.
       - the factor $(N+1)/2$ because we have to include the gradients in larger
         patches $Q_{i,j}$ for each $P_{i,j}$.
       It is recommended to make a drawing to convince oneself.
      */

      // To build the SIFT descriptor, we do the following procedure:
      // - we work in the image reference frame;
      // - we scan in the convolved image $G_\sigma$ the position $(x+u, y+v)$
      //   where $(u,v) \in [-r,r]^2$;
      // - we retrieve its coordinates in the oriented frame of the patch 
      //   $P(x,y,\sigma,\theta)$ with inverse transform $T = 1/l R_\theta^T$
      Matrix2f T;
      T << cos(theta), sin(theta),
          -sin(theta), cos(theta);
      T /= l;
      // Loop to perform interpolation
      const int rounded_r = intRound(r);
      const float rounded_x = intRound(x);
      const float rounded_y = intRound(y);
      for (int v = -rounded_r; v <= rounded_r; ++v)
      {
        for (int u = -rounded_r; u <= rounded_r; ++u)
        {
          // Compute the coordinates in the rescaled and oriented coordinate 
          // frame bound to patch $P(k)$.
          Vector2f pos( T*Vector2f(u,v) );
          // subpixel correction?
          /*pos.x() -= (x - rounded_x);
          pos.y() -= (y - rounded_y);*/

          if ( rounded_x+u < 0 || rounded_x+u >= gradPolar.width()  ||
               rounded_y+v < 0 || rounded_y+v >= gradPolar.height() )
            continue;

          // Compute the Gaussian weight which gives more emphasis to gradient 
          // closer to the center.
          float weight = exp(-pos.squaredNorm()/(2.f*pow(N/2.f, 2)));
          float mag = gradPolar(rounded_x+u, rounded_y+v)(0);
          float ori = gradPolar(rounded_x+u, rounded_y+v)(1) - theta;
          ori = ori < 0.f ? ori+2.f*pi : ori;
          ori *= float(O)/(2.f*pi);

          // The coordinate frame is centered in the patch center, thus:
          // $(x,y)$ is in $[-(N+1)/2, (N+1)/2]^2$.
          //
          // Change the coordinate frame so that $(x,y)$ is in $[-1, N]^2$. Thus,
          // translate by $[ (N-1)/2, (N-1)/2 ]$.
          pos.array() += N/2.f - 0.5f;
          if (pos.minCoeff() <= -1.f  || pos.maxCoeff() >= static_cast<float>(N))
            continue;
          // In the translated coordinate frame, note that for $N=4$ the centers
          // are now located at:
          //   (0,0) (1,0) (2,0) (3,0)
          //   (0,1) (1,1) (2,1) (3,1)
          //   (0,2) (1,1) (2,2) (3,2)
          //   (0,3) (1,1) (2,3) (3,3)
          //

          // Update the SIFT descriptor using trilinear interpolation.
          accumulate(h, pos, ori, weight, mag);
        }
      }
    
      h.normalize();

      h = (h * 512.f).cwiseMin(Matrix<float, Dim, 1>::Ones()*255.f);
      return h;
    }
コード例 #18
0
ファイル: quake.c プロジェクト: akkendrick/gravity-fest
     void
     fail_loop(
                int     f_index         /* 0-based fault strand identifier */
               )

/*  RETURN VALUE:  n/a */
/*  DESCRIPTION:   */
/*
** Routine fail_loop
** fail_loop is the topmost controlling routine that for a specific split node
** strand that has been declared 'failed',  applies varying amounts of
** slip with the objective of minimizing global strain energy
**/
/*   EOP   */
/*---------------------------------------------------------------------------*/

     {
      int  reform , nextslot , lastslot ;
      real  slip_delta , energy1 , energy2 , energy3 , total_slip ;
      real  kink , min_slip , concav ;
      
     
      clear_stiff() ;
      clear_real(force.full_rhs,loc_sys.neq) ; /* we elect not to do equil correction during fail_loop() */
      elgrp_loop( FORMS_QUAKE ) ;  reform = YES ;

      slip_delta = PT5 ;
      total_slip = ZERO ;
      (fltgrp_ptr+f_index)->q_amount = ONE - slip_delta ;
      total_slip += (fltgrp_ptr+f_index)->q_amount ;
      elgrp_loop( RHS_QUAKE ) ;  /* first slip (guess-delta) */
               

             /* Finish forming preconditioner */
     if(reform == YES && PCG == fe_sys.solver)
		{ 
		 int ii;
		 for (ii = 0; ii < loc_sys.neq; ii++) 
			{
			 *(pcg.precond+ii) = ONE / *(pcg.precond+ii);
			 if(*(pcg.precond+ii)==ZERO) printf("bad precond value at ieq=%d\n",ii) ;
			 if( isnan(*(pcg.precond+ii) ) ) printf("NaN precond value at ieq=%d\n",ii) ;
			}
		} 
    
      clear_real(global.del_displ, loc_sys.numnp*fe_sys.ndof) ;
                   
      if (PCG == fe_sys.solver)
		{
		 solver( ITER ) ;
		}
      else
		{
		 solver( FACBACK ) ;
		}
		
      accumulate() ;
      global.elas_energy = ZERO ;
      elgrp_loop( Q_STRESS ) ;
      energy1 = global.elas_energy ;

      clear_real(force.full_rhs,loc_sys.neq) ;

  /*  first exploratory slip complete...  */
  printf("\nslip delta=%g  applied=%g  slip total= %g  strain energy=%g\n\n",
  slip_delta,(fltgrp_ptr+f_index)->q_amount,total_slip,global.elas_energy) ;

      (fltgrp_ptr+f_index)->q_amount = slip_delta ;
      total_slip += (fltgrp_ptr+f_index)->q_amount ;
      elgrp_loop( RHS_QUAKE ) ;  /* 2nd slip (+delta) */
      clear_real(global.del_displ, loc_sys.numnp*fe_sys.ndof) ;
                   
      if (PCG == fe_sys.solver)
		{
		 solver( ITER ) ;
		}
      else
		{
		 solver( BACK ) ;
		}
		
      accumulate() ;
      global.elas_energy = ZERO ;
      elgrp_loop( Q_STRESS ) ;
      energy2 = global.elas_energy ;
      clear_real(force.full_rhs,loc_sys.neq) ;

  /*  2nd exploratory slip complete...  */
  printf("\nslip delta=%g  applied=%g  slip total= %g  strain energy=%g\n\n",
  slip_delta,(fltgrp_ptr+f_index)->q_amount,total_slip,global.elas_energy) ;

      (fltgrp_ptr+f_index)->q_amount = slip_delta ;
      total_slip += (fltgrp_ptr+f_index)->q_amount ;
      elgrp_loop( RHS_QUAKE ) ;  /* 3rd slip (+delta) */
      clear_real(global.del_displ, loc_sys.numnp*fe_sys.ndof) ;
                   
      if (PCG == fe_sys.solver)
		{
		 solver( ITER ) ;
		}
      else
		{
		 solver( BACK ) ;
		}
		
      accumulate() ;
      global.elas_energy = ZERO ;
      elgrp_loop( Q_STRESS ) ;
      energy3 = global.elas_energy ;
      clear_real(force.full_rhs,loc_sys.neq) ;

  /*  3rd exploratory slip complete... now estimate position of energy minimum...  */
  printf("\nslip delta=%g  applied=%g  slip total= %g  strain energy=%g\n\n",
  slip_delta,(fltgrp_ptr+f_index)->q_amount,total_slip,global.elas_energy) ;
  
      kink = (energy1 - energy2) / (energy2 - energy3) ;
      min_slip = PT5 - PT25*(THREE*kink - ONE)/(ONE - kink) ;
      concav = (energy2 - energy1) / (0.75 - min_slip) ;
      
      if( concav <= ZERO )
         {
          printf("\nWARNING - no energy minimum found for fault slip; aborting\n") ;
		  (fltgrp_ptr+f_index)->q_amount = -THREE*slip_delta;
		  total_slip += (fltgrp_ptr+f_index)->q_amount ;
		  elgrp_loop( RHS_QUAKE ) ;  /* return to zero total slip (-3*delta) */
		  clear_real(global.del_displ, loc_sys.numnp*fe_sys.ndof) ;
					   
		  if (PCG == fe_sys.solver)
			{
			 solver( ITER ) ;
			}
		  else
			{
			 solver( BACK ) ;
			}
			
		  accumulate() ;
		  global.elas_energy = ZERO ;
		  elgrp_loop( Q_STRESS ) ;
		  clear_real(force.full_rhs,loc_sys.neq) ;
		  printf("\nslip delta=%g  applied=%g  slip total= %g  strain energy=%g\n\n",
                 slip_delta,(fltgrp_ptr+f_index)->q_amount,total_slip,global.elas_energy) ;
          printf("\nNet slip results:\n                time   strand   net slip   energy\n") ;
          printf("EVENT*** %g   %d   %g   %g\n\n", time_data.time , f_index+1 , total_slip , global.elas_energy) ;
         }
    
      else if( min_slip <= ZERO )
         {
          printf("\nWARNING - non-positive slip found for fault; aborting\n") ;
		  (fltgrp_ptr+f_index)->q_amount = -THREE*slip_delta;
		  total_slip += (fltgrp_ptr+f_index)->q_amount ;
		  elgrp_loop( RHS_QUAKE ) ;  /* return to zero total slip (-3*delta) */
		  clear_real(global.del_displ, loc_sys.numnp*fe_sys.ndof) ;
					   
		  if (PCG == fe_sys.solver)
			{
			 solver( ITER ) ;
			}
		  else
			{
			 solver( BACK ) ;
			}
			
		  accumulate() ;
		  global.elas_energy = ZERO ;
		  elgrp_loop( Q_STRESS ) ;
		  clear_real(force.full_rhs,loc_sys.neq) ;
		  printf("\nslip delta=%g  applied=%g  slip total= %g  strain energy=%g\n\n",
                 slip_delta,(fltgrp_ptr+f_index)->q_amount,total_slip,global.elas_energy) ;
          printf("\nNet slip results:\n                time   strand   net slip   energy\n") ;
          printf("EVENT** %g   %d   %g   %g\n\n", time_data.time , f_index+1 , total_slip , global.elas_energy) ;
         }


      else if( min_slip > ONE )
         {
          printf("\n Slip maxed out for fault; applying max\n") ;
		  (fltgrp_ptr+f_index)->q_amount = -slip_delta;
		  total_slip += (fltgrp_ptr+f_index)->q_amount ;
		  elgrp_loop( RHS_QUAKE ) ;  /* return to max nominal slip (-delta) */
		  clear_real(global.del_displ, loc_sys.numnp*fe_sys.ndof) ;
					   
		  if (PCG == fe_sys.solver)
			{
			 solver( ITER ) ;
			}
		  else
			{
			 solver( BACK ) ;
			}
			
		  accumulate() ;
		  global.elas_energy = ZERO ;
		  elgrp_loop( Q_STRESS ) ;
		  clear_real(force.full_rhs,loc_sys.neq) ;
		  printf("\nslip delta=%g  applied=%g  slip total= %g  strain energy=%g\n\n",
                 slip_delta,(fltgrp_ptr+f_index)->q_amount,total_slip,global.elas_energy) ;
          printf("\nNet slip results:\n                time   strand   net slip   energy\n") ;
          printf("EVENT** %g   %d   %g   %g\n\n", time_data.time , f_index+1 , total_slip , global.elas_energy) ;
         }


      else
         {
		  (fltgrp_ptr+f_index)->q_amount = min_slip-THREE*slip_delta;
		  total_slip += (fltgrp_ptr+f_index)->q_amount ;
		  elgrp_loop( RHS_QUAKE ) ;  /* slip by amount to minimize energy (quadratic fit) */
		  clear_real(global.del_displ, loc_sys.numnp*fe_sys.ndof) ;
					   
		  if (PCG == fe_sys.solver)
			{
			 solver( ITER ) ;
			}
		  else
			{
			 solver( BACK ) ;
			}
			
		  accumulate() ;
		  global.elas_energy = ZERO ;
		  elgrp_loop( Q_STRESS ) ;
		  clear_real(force.full_rhs,loc_sys.neq) ;
		  printf("\nslip delta=%g  applied=%g  slip total= %g  strain energy=%g\n\n",
                 slip_delta,(fltgrp_ptr+f_index)->q_amount,total_slip,global.elas_energy) ;
          printf("\nNet slip results:\n                time   strand   net slip   energy\n") ;
          printf("EVENT* %g   %d   %g   %g\n\n", time_data.time , f_index+1 , total_slip , global.elas_energy) ;
         }
         
     }
コード例 #19
0
// Read Sensor data
bool AP_Compass_HMC5843::read()
{
    if (!_initialised) {
        // someone has tried to enable a compass for the first time
        // mid-flight .... we can't do that yet (especially as we won't
        // have the right orientation!)
        return false;
    }
    if (!healthy) {
        if (hal.scheduler->millis() < _retry_time) {
            return false;
        }
        if (!re_initialise()) {
            _retry_time = hal.scheduler->millis() + 1000;
			hal.i2c->setHighSpeed(false);
            return false;
        }
    }

	if (_accum_count == 0) {
	   accumulate();
	   if (!healthy || _accum_count == 0) {
		  // try again in 1 second, and set I2c clock speed slower
		  _retry_time = hal.scheduler->millis() + 1000;
		  hal.i2c->setHighSpeed(false);
		  return false;
	   }
	}

	mag_x = _mag_x_accum * calibration[0] / _accum_count;
	mag_y = _mag_y_accum * calibration[1] / _accum_count;
	mag_z = _mag_z_accum * calibration[2] / _accum_count;
	_accum_count = 0;
	_mag_x_accum = _mag_y_accum = _mag_z_accum = 0;

    last_update = hal.scheduler->micros(); // record time of update

    // rotate to the desired orientation
    Vector3f rot_mag = Vector3f(mag_x,mag_y,mag_z);
    if (product_id == AP_COMPASS_TYPE_HMC5883L) {
        rot_mag.rotate(ROTATION_YAW_90);
    }

    // apply default board orientation for this compass type. This is
    // a noop on most boards
    rot_mag.rotate(MAG_BOARD_ORIENTATION);

    // add user selectable orientation
    rot_mag.rotate((enum Rotation)_orientation.get());

    // add in board orientation from AHRS
    rot_mag.rotate(_board_orientation);

    rot_mag += _offset.get();

    // apply motor compensation
    if(_motor_comp_type != AP_COMPASS_MOT_COMP_DISABLED && _thr_or_curr != 0.0f) {
        _motor_offset = _motor_compensation.get() * _thr_or_curr;
        rot_mag += _motor_offset;
    }else{
        _motor_offset.x = 0;
        _motor_offset.y = 0;
        _motor_offset.z = 0;
    }

    mag_x = rot_mag.x;
    mag_y = rot_mag.y;
    mag_z = rot_mag.z;
    healthy = true;

    return true;
}
コード例 #20
0
TEST(MapFilterReduce, Reduce1)
{
  vector<double> v{ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0 };
  auto res = accumulate(v.begin(), v.end(), 0.0, adder());
  ASSERT_THAT(res, DoubleEq(21.0));
}
コード例 #21
0
ファイル: OCRer.cpp プロジェクト: peaceleven/pad
// @param img: grayscaled and in RGB format (e.g. loaded from file).
void OCRer::process_image(Mat img) {
	if (_motion) {
		_n_imgs = 0;
		_motion = false;
	}


	if (_n_imgs == 0) {
		_average_img = img;

	} else {
		// detect motion by comparing with _last_img
		vector<uchar> status;
		vector<float> err;
		calcImageDisplacement(_last_img, img, &status, &err);

		int n = 0;
		int l = status.size();
		for (int i = 0; i < l; i++) {
			bool matched = (bool) status[i];
			if (matched) {
				n++;
			}
		}

		float fn  = (float) n;
		float fnl = (float) _last_n_features;
		float p_motion = fabs( (fn - fnl)  / fnl );          // "percentage" of motion

		_last_n_features = n;
		if (p_motion > MOTION_P_THRESHOLD)
			_motion = true;

		else if (_n_imgs < MAX_N_IMGS) {
			float alpha = 1.0f / (float) (_n_imgs + 1);
			Mat sum = Mat::zeros(img.size(), CV_32F);       // has to be CV_32F or CV_64F

			accumulate(img, sum);
			accumulate(_average_img, sum);
			sum.convertTo(_average_img, _average_img.type(), alpha);
		}
		else {
			return ;
		}
	}
	_last_img = img;
	_n_imgs++;


	_tessapi->SetImage((const unsigned char*) _average_img.data,
	                   _average_img.cols, _average_img.rows,
	                   _average_img.channels(), _average_img.step);
	_tessapi->Recognize(NULL);

	ResultIterator *ri = _tessapi->GetIterator();

	/*
	ChoiceIterator* ci;
	if (ri != NULL) {
		while ((ri->Next(RIL_SYMBOL))) {
			const char* symbol = ri->GetUTF8Text(RIL_SYMBOL);

			if (symbol != 0) {
				float conf = ri->Confidence(RIL_SYMBOL);
				std::cout << "\tnext symbol: " << symbol << "\tconf: " << conf << endl;

				const ResultIterator itr = *ri;
				ci = new ChoiceIterator(itr);

				do {
					std::cout << "\t\t" << ci->GetUTF8Text() << " conf: " << ci->Confidence() << endl;
				} while(ci->Next());

				delete ci;
			}

			delete[] symbol;
		}
	}
	*/

	if (ri != NULL) {
		// int l, t, r, b;
		while (ri->Next(RIL_WORD)) {
			// ri->BoundingBox(RIL_WORD, &l, &t, &r, &b);
			// cout << "rect = " << l << ", " << r << ", " << t << ", " << b << endl;

			char *ocr_text = ri->GetUTF8Text(RIL_WORD);

			if (heuristic(ocr_text)) {
				if (ri->WordIsFromDictionary()) {
					_dict_words.insert(string(ocr_text));
					// cout << "conf = " << ri->Confidence(RIL_WORD) << endl;

				} else {
					_live_words.insert(string(ocr_text));
				}
			}

			delete[] ocr_text;
		}

		delete ri;
	}
}
コード例 #22
0
 /** accumulate data over walkers
  * @param W MCWalkerConfiguration
  * @param wgt weight
  */
 void CompositeEstimatorSet::accumulate(MCWalkerConfiguration& W, RealType wgtnorm) 
 {
   accumulate(W,W.begin(),W.end(),wgtnorm);
 }
コード例 #23
0
ファイル: suite.cpp プロジェクト: Zamthos/Server
	// Counts all tests in this and all its embedded suites.
	//
	int
	Suite::total_tests() const
	{
		return accumulate(_suites.begin(), _suites.end(),
						  _tests.size(), SubSuiteTests());
	}
コード例 #24
0
ファイル: 10.3.cpp プロジェクト: peterocean/cplusplus-primer
int accumulate_test(const std::vector<int> &vec)
{
	return accumulate(vec.cbegin(),vec.cend(),0);
}
コード例 #25
0
ファイル: filter.cpp プロジェクト: THTBSE/Stability
// Find center of mass of a bunch of points
point point_center_of_mass(const vector<point> &pts)
{
	point com = accumulate(pts.begin(), pts.end(), point());
	return com / (float) pts.size();
}
コード例 #26
0
ファイル: phase_space.cpp プロジェクト: fhoefling/halmd
void phase_space<modules_type>::test()
{
    float_type const epsilon = std::numeric_limits<float_type>::epsilon();

    auto& input_position = input_position_sample->data();
    auto& input_velocity = input_velocity_sample->data();
    auto& input_species = input_species_sample->data();

    // prepare input sample
    BOOST_CHECK_EQUAL(input_position.size(), accumulate(npart.begin(), npart.end(), 0u));
    BOOST_CHECK_EQUAL(input_velocity.size(), accumulate(npart.begin(), npart.end(), 0u));
    for (unsigned int i = 0, n = 0; i < npart.size(); ++i) { // iterate over particle species
        for (unsigned int j = 0; j < npart[i]; ++n, ++j) { // iterate over particles
            vector_type& r = input_position[n];
            vector_type& v = input_velocity[n];
            unsigned int& type = input_species[n];
            r[0] = float_type(j) + float_type(1) / (i + 1); //< a large, non-integer value
            r[1] = 0;
            r[dimension - 1] = - static_cast<float_type>(j);
            v[0] = static_cast<float_type>(i);
            v[1] = 0;
            v[dimension - 1] = float_type(1) / (j + 1);
            type = i;
        }
    }

    // copy input sample to particle
    std::shared_ptr<particle_group_type> particle_group = std::make_shared<particle_group_type>(particle);
    {
        auto phase_space = phase_space_type(particle, particle_group, box);
        phase_space.set("position", input_position_sample);
        phase_space.set("velocity", input_velocity_sample);
        phase_space.set("species", input_species_sample);
        phase_space.set("mass", input_mass_sample);
    }

    // randomly permute particles in memory, do it three times since permutations are
    // not commutative
    shuffle(particle, random);
    shuffle(particle, random);
    shuffle(particle, random);

    // compare output and input, copy GPU sample to host before
    typename modules_type::samples_type result(phase_space_type(particle, particle_group, box));
    auto const& result_position = result.position->data();
    auto const& result_velocity = result.velocity->data();
    auto const& result_species = result.species->data();

    BOOST_CHECK_EQUAL(result_position.size(), accumulate(npart.begin(), npart.end(), 0u));
    for (unsigned int i = 0, n = 0; i < npart.size(); ++i) { // iterate over particle species
        for (unsigned int j = 0; j < npart[i]; ++n, ++j) { // iterate over particles
            // compare positions with a tolerance due to mapping to and from the periodic box
            for (unsigned int k = 0; k < dimension; ++k) {
                BOOST_CHECK_CLOSE_FRACTION(result_position[n][k], input_position[n][k], 10 * epsilon);
            }
        }
    }
    // compare velocities directly as they should not have been modified
    BOOST_CHECK_EQUAL_COLLECTIONS(
        result_velocity.begin(), result_velocity.end()
      , input_velocity.begin(), input_velocity.end()
    );
    // compare particle species
    BOOST_CHECK_EQUAL_COLLECTIONS(
        result_species.begin(), result_species.end()
      , input_species.begin(), input_species.end()
    );
}
コード例 #27
0
ファイル: SpParHelper.cpp プロジェクト: Aguomath/CombBLAS_15
void SpParHelper::BipartiteSwap(pair<KEY,VAL> * low, pair<KEY,VAL> * array, IT length, int nfirsthalf, int color, const MPI_Comm & comm)
{
	int nprocs, myrank;
	MPI_Comm_size(comm, &nprocs);
	MPI_Comm_rank(comm, &myrank);

	IT * firsthalves = new IT[nprocs];
	IT * secondhalves = new IT[nprocs];	
	firsthalves[myrank] = low-array;
	secondhalves[myrank] = length - (low-array);

	MPI_Allgather(MPI_IN_PLACE, 0, MPIType<IT>(), firsthalves, 1, MPIType<IT>(), comm);
	MPI_Allgather(MPI_IN_PLACE, 0, MPIType<IT>(), secondhalves, 1, MPIType<IT>(), comm);
	
	int * sendcnt = new int[nprocs]();	// zero initialize
	int totrecvcnt = 0; 

	pair<KEY,VAL> * bufbegin = NULL;
	if(color == 0)	// first processor half, only send second half of data
	{
		bufbegin = low;
		totrecvcnt = length - (low-array);
		IT beg_oftransfer = accumulate(secondhalves, secondhalves+myrank, static_cast<IT>(0));
		IT spaceafter = firsthalves[nfirsthalf];
		int i=nfirsthalf+1;
		while(i < nprocs && spaceafter < beg_oftransfer)
		{
			spaceafter += firsthalves[i++];		// post-incremenet
		}
		IT end_oftransfer = beg_oftransfer + secondhalves[myrank];	// global index (within second half) of the end of my data
		IT beg_pour = beg_oftransfer;
		IT end_pour = min(end_oftransfer, spaceafter);
		sendcnt[i-1] = end_pour - beg_pour;
		while( i < nprocs && spaceafter < end_oftransfer )	// find other recipients until I run out of data
		{
			beg_pour = end_pour;
			spaceafter += firsthalves[i];
			end_pour = min(end_oftransfer, spaceafter);
			sendcnt[i++] = end_pour - beg_pour;	// post-increment
		}
	}
	else if(color == 1)	// second processor half, only send first half of data
	{
		bufbegin = array;
		totrecvcnt = low-array;
		// global index (within the second processor half) of the beginning of my data
		IT beg_oftransfer = accumulate(firsthalves+nfirsthalf, firsthalves+myrank, static_cast<IT>(0));
		IT spaceafter = secondhalves[0];
		int i=1;
		while( i< nfirsthalf && spaceafter < beg_oftransfer)
		{
			//spacebefore = spaceafter;
			spaceafter += secondhalves[i++];	// post-increment
		}
		IT end_oftransfer = beg_oftransfer + firsthalves[myrank];	// global index (within second half) of the end of my data
		IT beg_pour = beg_oftransfer;
		IT end_pour = min(end_oftransfer, spaceafter);
		sendcnt[i-1] = end_pour - beg_pour;
		while( i < nfirsthalf && spaceafter < end_oftransfer )	// find other recipients until I run out of data
		{
			beg_pour = end_pour;
			spaceafter += secondhalves[i];
			end_pour = min(end_oftransfer, spaceafter);
			sendcnt[i++] = end_pour - beg_pour;	// post-increment
		}
	}
	DeleteAll(firsthalves, secondhalves);
	int * recvcnt = new int[nprocs];
	MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, comm);   // get the recv counts
	// Alltoall is actually unnecessary, because sendcnt = recvcnt
	// If I have n_mine > n_yours data to send, then I can send you only n_yours 
	// as this is your space, and you'll send me identical amount.
	// Then I can only receive n_mine - n_yours from the third processor and
	// that processor can only send n_mine - n_yours to me back. 
	// The proof follows from induction

	MPI_Datatype MPI_valueType;
	MPI_Type_contiguous(sizeof(pair<KEY,VAL>), MPI_CHAR, &MPI_valueType);
	MPI_Type_commit(&MPI_valueType);

	pair<KEY,VAL> * receives = new pair<KEY,VAL>[totrecvcnt];
	int * sdpls = new int[nprocs]();	// displacements (zero initialized pid) 
	int * rdpls = new int[nprocs](); 
	partial_sum(sendcnt, sendcnt+nprocs-1, sdpls+1);
	partial_sum(recvcnt, recvcnt+nprocs-1, rdpls+1);

	MPI_Alltoallv(bufbegin, sendcnt, sdpls, MPI_valueType, receives, recvcnt, rdpls, MPI_valueType, comm);  // sparse swap
	
	DeleteAll(sendcnt, recvcnt, sdpls, rdpls);
	copy(receives, receives+totrecvcnt, bufbegin);
	delete [] receives;
}
コード例 #28
0
ファイル: clipper_stats.cpp プロジェクト: cctbx/clipper
void Generic_ordinal::add_pass_2( const ftype& value )
{
  accumulate( value );
}
コード例 #29
0
 /**
  * @param A: Array of integers.
  * return: The single number.
  */
 int singleNumber(vector<int> &A) {
     return accumulate(A.cbegin(), A.cend(), 0, std::bit_xor<int>());
 }
コード例 #30
0
// coverage starts with 0-s and it's equal to number of routes
// break through : spawn
void Place(const vector<Creep>& creeps, 
           vector<Creep>& after_simulation_creeps,
           Count& money) {
    
    bool no_hp_miss = true;
    for (auto c : route_miss_hp) {
        if (c > 0) {
            no_hp_miss = false;
            break;
        }
    }
    if (no_hp_miss) {
        return;
    }
    
    vector<double> buf_coverage(current_coverage.size());
    auto sum = [](double c_0, double c_1) {
        return c_0 + c_1;
    };
    auto can_wound = [](const Position& creep, const Position& tower, const Position& base) {
        Indent c = creep - base;
        Indent t = tower - base;
        int d = (c.row*c.row + c.col*c.col) - (t.col*t.col + t.row*t.row);
        return d >= 4;
    };
    auto& ts = tower_manager_->towers();
    
    auto func = [&](const Item& i_0, const Item& i_1) {
        double d_0 = Score(i_0);
        double d_1 = Score(i_1);
        return d_0 > d_1;
        //return i_0.miss_hp_coverage < i_1.miss_hp_coverage; 
    };
    set<Item, decltype(func)> best_items(func);
    const int BEST_MAX_COUNT = 10;
    for (const Position& p : tower_manager_->open_tower_positions()) {
        for (Index i = 0; i < ts.size(); ++i) {
            Item item;
            item.tower = i;
            item.position = p;
            auto coverage = ComputeCoverage(*tower_manager_, item);
            item.count = int(accumulate(coverage.begin(), coverage.end(), 0));
            transform(coverage.begin(), coverage.end(), coverage.begin(), [&](double d) {
                if (d > 1) {
                    d = (d-1)/2 + 1;
                }
                return d*ts[i].dmg;
            });
            if (item.count == 0) {
                continue;
            }
            bool stupid = true;
            // set another coverage to see how good it is against all those current creeps
            item.miss_hp_coverage = 0;
            for (auto i = 0; i < coverage.size(); ++i) {
                if (route_miss_hp[i] > 0 && coverage[i] > 0.5) {
                    item.miss_hp_coverage += double(1.) * std::min<Count>(route_miss_hp[i], coverage[i]) / route_miss_hp[i];
                    stupid = false;
                    //break;
                }
                //coverage[i] /= count;
            }
            item.miss_hp_coverage = int(item.miss_hp_coverage);
            if (stupid) continue;
            stupid = true;
            for (auto& b : break_through) {
                auto bb = next_->base_loc_for_spawn(b.spawn_loc);
                if (coverage[b.spawn_loc] > 0 && can_wound(b.cur_loc, p, bb)) {
                    stupid = false;
                    break;
                }
            }
            if (stupid) continue;
            transform(coverage.begin(), coverage.end(), 
                      current_coverage.begin(), 
                      buf_coverage.begin(), sum);
            item.min_total_coverage = MinTotalCoverage(buf_coverage);
            
            if (best_items.size() < BEST_MAX_COUNT) {
                best_items.insert(item);
            }else if (best_items.begin()->miss_hp_coverage < item.miss_hp_coverage) {
                best_items.erase(best_items.begin());
                best_items.insert(item);
            }
        }
    }
    vector<Item> v{best_items.begin(), best_items.end()};
    pair<Item, bool> p = ChooseItem(v, money);
    if (p.first.position.col == 11 && p.first.position.row == 10) {
        p.first.position.col++;
        p.first.position.col--;
    }
    if (p.second) {
        tower_manager_->PlaceTower(p.first);
        money -= tower_manager_->towers()[p.first.tower].cost;
        auto coverage = ComputeCoverage(*tower_manager_, p.first);
        transform(coverage.begin(), coverage.end(), 
                  current_coverage.begin(), 
                  current_coverage.begin(), sum);
        
    }
    
}