int main(int argc, char *argv[]) { int n = 10000; /* The size of the set; for {1, 2, 3, 4} it's 4 */ int k = 2; /* The size of the subsets; for {1, 2}, {1, 3}, ... it's 2 */ int comb[16]; /* comb[i] is the index of the i-th element in the combination */ /* Setup comb for the initial combination */ int i; for (i = 0; i < k; ++i) { comb[i] = i; } /* Print the first combination */ //printc(comb, k); /* Generate and print all the other combinations */ while (next_comb(comb, k, n)) { //printc(comb, k); } printf("\n all done\n"); return 0; }
void c_elegans::update(){ if(has_gone){ has_gone=false;//prevent more than one update happening without call to rhs if(!first_round){ if(next_comb(abl_neur, num_neur)){ holder->write_dat(); char ind_str[20];//won't ever have a 20 digit index //handy buffer to overflow for those hacking this. sprintf(ind_str, "%d", cur_ind); err(std::string("Combinations exhausted in index ") + ind_str, "c_elegans::update","rhs/c_elegans.cpp", FATAL_ERROR); } auto dat_inds = std::shared_ptr<writer>(new writer(true)); dat_inds->add_data(data::create("Ablations", abl_neur.data(), abl_neur.size()), writer::OTHER); holder->add_writer(dat_inds); } else{ first_round=false; } Matrix<double, Dynamic, Dynamic> ag_dense(num_neur, num_neur); //insert code to zero it out later auto ag_m = ag_full; AEchem_trans = AEchem_trans_full; auto fncval = [this](int i, int j, double val)->bool{ for(int kk = 0; kk < (int)this->abl_neur.size(); kk++){ if((int)this->abl_neur[kk] == i || (int)this->abl_neur[kk] == j){ return false; } } return true; }; AEchem_trans.prune(fncval); ag_m.prune(fncval); ag_dense = ag_m * Matrix<double, num_neur, num_neur, ColMajor>::Identity(); dmd(ag_dense); auto sumvals = ag_dense.colwise().sum(); sparse_type temp(num_neur, num_neur); //generate the sparse diagonal matrix to build the lapacian std::vector<Triplet<double, int> > temp_tr; for(int i = 0; i < (int)num_neur; i++){ temp_tr.push_back(Triplet<double, int>(i, i, sumvals[i])); } temp.setFromTriplets(temp_tr.begin(), temp_tr.end()); laplacian = temp - ag_m; //initialize the Echem array for(size_t i = 0; i < num_neur; i++){ if(GABAergic[i]){ Echem[i] = EchemInh; } else{ Echem[i] = EchemEx; } } //Initialize the sig array for(size_t i = 0; i < num_neur; i++){ sig[i] = 0.5; } eqS = sig * (ar/(ar*sig + ad)); //more initialization of temporary dense matrices Matrix<double, Dynamic, Dynamic> ldense(num_neur,num_neur); ldense = laplacian*Matrix<double, num_neur, num_neur>::Identity(); Matrix<double, Dynamic, Dynamic> aedense(num_neur, num_neur); aedense= AEchem_trans*Matrix<double, num_neur, num_neur>::Identity(); Matrix<double, Dynamic, Dynamic> C(num_neur, num_neur); //create the C matrix C= memG*Matrix<double, num_neur, num_neur>::Identity() + gelec*ldense; //initialize matrix to modify diagonal part of C Matrix<double, num_neur, 1> tmp =(gchem * aedense * eqS.matrix()); for(size_t i = 0; i < num_neur; i++){ C(i, i) += tmp(i); } Matrix<double, num_neur, 1> Ivals; Ivals.setZero(); double amp=2e4; Ivals[276]=amp; Ivals[278]=amp; Matrix<double, num_neur, 1> b; //create B vector b= gchem*aedense*(eqS * Echem).matrix(); for(size_t i = 0; i < num_neur; i++){ b[i] += (memG*memV + Ivals[i]); } //calculate eqV eqV.matrix() = C.inverse()*b; vmean = eqV+(1.0/beta) * (1.0/sig - 1).log(); for(auto val : abl_neur){ eqV[val] = vmean [val] = eqS[val] = 0; }; } }
/*! * This function does the processing for the c_elegans class. * * It initializes the various matrices and reads values from the input files */ void c_elegans::postprocess(input& in){ rhs_type::postprocess(in); if(dimension != num_neur*2){ err("Dimension must be 558, which is double the number of neurons", "", "", FATAL_ERROR); } in.retrieve(beta, "beta", this); in.retrieve(tau, "tau", this); in.retrieve(gelec, "gelec", this); in.retrieve(gchem, "gchem", this); in.retrieve(memV, "memV", this); in.retrieve(memG, "memG", this); in.retrieve(EchemEx, "EchemEx", this); in.retrieve(EchemInh, "EchemInh", this); in.retrieve(ar, "ar", this); in.retrieve(ad, "ad", this); std::string ag_fname, a_fname; in.retrieve(ag_fname, "ag_mat", this); in.retrieve(a_fname, "a_mat", this); sparse_type a_m(num_neur, num_neur); ag_full.resize(num_neur, num_neur); laplacian.resize(num_neur, num_neur); read_mat(ag_fname, ag_full); read_mat(a_fname, a_m); //create transposed sparse matrix AEchem AEchem_trans_full.resize(num_neur, num_neur); AEchem_trans_full = a_m.transpose(); AEchem_trans.resize(num_neur, num_neur); //do any needed fake iterations, must make more general at some point size_t num_comb; int iterations; in.retrieve(num_comb, "num_comb", this); in.retrieve(iterations, "iterations", this); in.retrieve(cur_ind, "!start_ind", this); abl_neur.resize(num_comb); for(auto& val : abl_neur){ val = 0; } if(abl_neur.size() != 1){ next_comb(abl_neur, num_neur); } for(int i = 0; i < cur_ind; i++){ for(int j = 0; j < iterations; j++){ if(next_comb(abl_neur, num_neur)){ char ind_str[20];//won't ever have a 20 digit index //handy buffer to overflow for those hacking this. sprintf(ind_str, "%d", (int)cur_ind); err(std::string("Combinations exhausted in index ") + ind_str, "c_elegans::postprocess","rhs/c_elegans.cpp", FATAL_ERROR); } } } auto dat_inds = std::shared_ptr<writer>(new writer(true)); dat_inds->add_data(data::create("Ablations", abl_neur.data(), abl_neur.size()), writer::OTHER); holder->add_writer(dat_inds); //write first ablation data //set up dummy connection to toroidal controller for now controller* cont; in.retrieve(cont, "controller", this); auto val = std::make_shared<variable>(); val->setname("c_elegans_quickfix"); val->holder = holder; val->parse("0.1"); in.insert_item(val); cont->addvar(val); in.retrieve(dummy, val->name(), this); has_gone=true; //is true at first to allow update of zero index to occur first_round=true; update(); }