/***********************************************************
 *	Generate a random number between 0 and 1.
 ****/
double RandomNum(bool reseed) {
  static bool firstTime = true;
  static std::default_random_engine gen;
  static std::uniform_real_distribution<double> unif(0.0, 1.0);

  if (firstTime || reseed) {
    std::random_device rd;
    gen.seed(rd());
    firstTime = false;
  }  
  return unif(gen);
}
예제 #2
0
파일: Ball.cpp 프로젝트: jhpy1024/sdlPong
Vector2 Ball::genRandomVelocity() const
{
	Vector2 vec;

	static std::default_random_engine engine;
	engine.seed(time(nullptr));
	static std::bernoulli_distribution distribution;

	vec.setX(distribution(engine) ? 1 : -1);
	vec.setY(distribution(engine) ? 1 : -1);

	return vec;
}
예제 #3
0
namespace math {

#ifdef HAVE_RANDOM
std::default_random_engine generator_;
#endif

void InitRandom() {
  Clock clock;
  TimeStamp now;
  clock.CurrentTime(&now);

  uint64_t seed = (static_cast<uint64_t>(now.MicroSeconds()) << 32) +
                   static_cast<uint64_t>(getpid());
#ifdef HAVE_RANDOM
  generator_.seed(seed);
#elif defined(_WIN32)
  srand(seed);
#else
  srandom(seed);
#endif
}

int Random(int lower, int upper) {
#ifdef HAVE_RANDOM
  std::uniform_int_distribution<int> distribution(lower, upper);
  return distribution(generator_);
#elif defined(_WIN32)
  return (lower +
          (rand() % (upper - lower + 1)));  // NOLINT(runtime/threadsafe_fn)
#else
  return (lower + (random() % (upper - lower + 1)));
#endif
}
}  // namespace math
예제 #4
0
	namespace Random {
		std::default_random_engine gen(time(0));

		sf::Color getRandomColor() {
			return sf::Color(gen()%256, gen()%256, gen()%256);
		}

		unsigned long int getUnsignedRandom(unsigned long int min=0,
				unsigned long int max=gen.max()) {
			return min + (gen() % (max-min));
		}

		short int getRandomSign(bool zero=true) {
			if(zero) return (-1 + getUnsignedRandom(0, 3));
			else return (getUnsignedRandom(0, 11)%2==0)?-1:1;
		}

		long int getRandom(long int min=0, long int max=gen.max()/2-1) {
			return getUnsignedRandom(0, math::abs(max-min))+min;
		}

		long int getBoundRandom(long int x1, long int x2, long int x3, long int x4) {
			return getRandomSign(false)<0?getRandom(x1, x2):getRandom(x3, x4);
		}

		bool getRandomBool(void) {return getUnsignedRandom(0, 11)%2;}
	}
std::string EasterEggFactory::GetRandomEggTextureFilepath()
{
    // MAKE SURE THAT THE RANDOM NUMBER GENERATOR HAS BEEN INITIALIZED.
    static bool randomNumberGeneratorInitialized = false;
    static std::default_random_engine randomNumberGenerator;
    if (!randomNumberGeneratorInitialized)
    {
        // Initialize the random number generator with a seed based on the current time.
        unsigned long seed = static_cast<unsigned long>(std::chrono::system_clock::now().time_since_epoch().count());
        randomNumberGenerator.seed(seed);

        randomNumberGeneratorInitialized = true;
    }

    // SELECT A RANDOM EGG TEXTURE.
    // An random index is selected that is restricted to the valid range of indices
    // into the egg texture array.
    unsigned int randomEggTextureIndex = ( randomNumberGenerator() % EGG_TEXTURE_FILEPATHS.size() );
    return EGG_TEXTURE_FILEPATHS[randomEggTextureIndex];
}
예제 #6
0
파일: vertex.cpp 프로젝트: DumexVN/RanAgg
Vertex::Vertex()
{
    myWeight = 1;
    parent = 0;
    isDraggedAlong = false;
    isAbsorbed = false;
    noOfChild = 0;
    ExtraWeight = 0;
    myRealCommunity = -1;
    gen.seed(QTime::currentTime().msec());

}
예제 #7
0
파일: main.cpp 프로젝트: Jettriangle/Deque
TEST_P(DequeTest, TestCorrectOperations) {
	std::uniform_int_distribution <int> getOperation(0, 5);
	generator.seed(5);
	std::deque <int> stldeq;
	mydeque <int> mydeq;
	for(size_t i = 0; i < GetParam(); ++i) {
		Operations thisOper = static_cast<Operations>  (getOperation(generator));	
		switch(thisOper) {
		case PUSH_BACK: {
		    int tmpelem = dist(generator);
		    mydeq.push_back(tmpelem);
		    stldeq.push_back(tmpelem);
		}
		break;
		case PUSH_FRONT: {
		    int tmpelem = dist(generator);
		    mydeq.push_back(tmpelem);
		    stldeq.push_back(tmpelem);
		}
		break;
		case POP_BACK: {
		    if(stldeq.size()) {
			mydeq.pop_back();
			stldeq.pop_back();
			
    		    }
		}
		break;
		case POP_FRONT: {
		    if(stldeq.size()) {
			mydeq.pop_front();
			stldeq.pop_front();
    		    }
		}
		break;
		case SIZE_EQUAL: {
		    int size_stl = stldeq.end() - stldeq.begin();
		    int size_my = mydeq.end() - mydeq.begin();
		    ASSERT_EQ(size_stl, size_my);
		}
		break;
		case EQUAL: {
		    ASSERT_TRUE(std::equal(mydeq.begin(), mydeq.end(), stldeq.begin()));

		}
		break;
		default: ASSERT_TRUE(false);
		break;
		}
	}
	SUCCEED();
}
예제 #8
0
파일: main.cpp 프로젝트: ale-git/yarp
    void doInit()
    {
        mutex.lock();

        printf("Connecting with game server\n");
        Network::connect(port.getName(), SERVER_NAME);

        myLife=6;

        randengine.seed(0);
    
        mutex.unlock();
    }
예제 #9
0
void InitRandom() {
  Clock clock;
  TimeStamp now;
  clock.CurrentTime(&now);

  uint64_t seed = (static_cast<uint64_t>(now.MicroSeconds()) << 32) +
                   static_cast<uint64_t>(getpid());
#ifdef HAVE_RANDOM
  generator_.seed(seed);
#elif defined(_WIN32)
  srand(seed);
#else
  srandom(seed);
#endif
}
예제 #10
0
int randomInteger(int max, int min) {
    if (max == min)
        return max;

    orAssertGreaterThan(max, min);

    if (!engineIsSeeded) {
        engine.seed(std::chrono::system_clock::now().time_since_epoch().count());
        engineIsSeeded = true;
    }

    int range = max - min;
    auto elem = distributions.find(range);
    if (elem == distributions.end()) {
        distributions[range] = std::uniform_int_distribution<int>(0, range);
        return distributions[range](engine) + min;
    } else {
        return std::get<1>(*elem)(engine) + min;
    }
}
예제 #11
0
int main(int argc, char** argv) {
  // Google tools initialization
  google::InitGoogleLogging(argv[0]);
  google::SetUsageMessage(
      "A Connect Four game based on Minimax with Alpha-Beta prunning.");
  google::ParseCommandLineFlags(&argc, &argv, true);
  // Random seed
  PRNG.seed(FLAGS_seed);
  // Show used options
  LOG(INFO) << "-o " << FLAGS_o;
  LOG(INFO) << "-rows " << FLAGS_rows;
  LOG(INFO) << "-cols " << FLAGS_cols;
  LOG(INFO) << "-seed " << FLAGS_seed;
  LOG(INFO) << "-ai " << FLAGS_ai;
  LOG(INFO) << "-max_depth " << FLAGS_max_depth;
  LOG(INFO) << "-wh " << FLAGS_wh;
  LOG(INFO) << "-random " << FLAGS_random;
  // Play!
  Game game;
  game.Play();
  return 0;
}
int main(int argc, char* argv[])
{


    fstream myfile, gradfile;
        gradfile.open("gradsRNN.txt", ios::trunc | ios::out);  



    double dt = 1.0;
    double tau = 30.0;

    int PHASE=LEARNING;
    int RANDW = 0;
    if (argc > 1)
        for (int nn=1; nn < argc; nn++)
        {
            if (strcmp(argv[nn], "test") == 0) { PHASE = TESTING; cout << "Test mode. " << endl; }
            if (strcmp(argv[nn], "RANDW") == 0) { RANDW = 1; } // Randomize weights. Only useful for 'test' mode.
            if (strcmp(argv[nn], "METHOD") == 0) { METHOD = argv[nn+1]; }
            if (strcmp(argv[nn], "MODULTYPE") == 0) { MODULTYPE = argv[nn+1]; }
            if (strcmp(argv[nn], "SQUARING") == 0) { SQUARING = atoi(argv[nn+1]); }
            if (strcmp(argv[nn], "DEBUG") == 0) { DEBUG = atoi(argv[nn+1]); }
            if (strcmp(argv[nn], "G") == 0) { G = atof(argv[nn+1]); }
            if (strcmp(argv[nn], "ETA") == 0) { ETA = atof(argv[nn+1]); }
            if (strcmp(argv[nn], "TAU") == 0) { tau = atof(argv[nn+1]); }
            //if (strcmp(argv[nn], "INPUTMULT") == 0) { INPUTMULT = atof(argv[nn+1]); }
            if (strcmp(argv[nn], "ALPHAMODUL") == 0) { ALPHAMODUL = atof(argv[nn+1]); }
            if (strcmp(argv[nn], "PROBAMODUL") == 0) { PROBAMODUL = atof(argv[nn+1]); }
            if (strcmp(argv[nn], "ALPHATRACE") == 0) { ALPHATRACE = atof(argv[nn+1]); }
            if (strcmp(argv[nn], "ALPHATRACEEXC") == 0) { ALPHATRACEEXC = atof(argv[nn+1]); }
            if (strcmp(argv[nn], "RNGSEED") == 0) { RNGSEED = atof(argv[nn+1]); }
            if (strcmp(argv[nn], "MAXDW") == 0) { MAXDW = atof(argv[nn+1]); }
        }

    string SUFFIX = "_G" + to_string(G) + "_MAXDW" + to_string(MAXDW) + "_ETA" + to_string(ETA) + "_ALPHAMODUL" + to_string(ALPHAMODUL) + "_PROBAMODUL" + to_string(PROBAMODUL) + "_SQUARING" +to_string(SQUARING) + "_MODULTYPE-" + MODULTYPE +   
        "_ALPHATRACE" + to_string(ALPHATRACE) + "_METHOD-" + METHOD +  "_ATRACEEXC" + to_string(ALPHATRACEEXC) + "_TAU" + to_string(tau) + 
        //"_INPUTMULT" +to_string(INPUTMULT) + 
        "_RNGSEED" + 
        to_string(RNGSEED);
    cout << SUFFIX << endl;

    myrng.seed(RNGSEED);
    srand(RNGSEED);


    int trialtype;


    int NBTRIALS = 507;
    int TRIALTIME = 300;
    int TIMEINPUT = 100;
    int TIMEMODUL = 210;
    int TIMERESP = 200;

    VectorXi modulmarker(NBNEUR); modulmarker.setZero();

    if (PHASE == TESTING) 
    {
        NBTRIALS = 20*NBPATTERNS;
        //NBTRIALS = 40*NBPATTERNS;
        //TRIALTIME = 1500;
    }



    MatrixXd dJ(NBNEUR, NBNEUR); dJ.setZero();
    MatrixXd win(NBNEUR, NBIN); win.setRandom(); //win.row(0).setZero(); // Input weights are uniformly chosen between -1 and 1, except possibly for output cell (not even necessary). No plasticity for input weights.

    
    win *= .2;



    MatrixXd J(NBNEUR, NBNEUR);


    cout << Uniform(myrng) << endl;
    randJ(J); // Randomize recurrent weight matrix, according to the Sompolinsky method (Gaussian(0,1), divided by sqrt(ProbaConn*N) and multiplied by G - see definition of randJ() below).

    // If in the TESTING mode, read the weights from a previously saved file:
    if (PHASE == TESTING){
        if (RANDW == 0){
        //readWeights(J, "J.dat");
        //readWeights(win, "win.dat");
        readWeights(J, "J" + SUFFIX + ".dat");
        readWeights(win, "win" + SUFFIX + ".dat"); // win doesn't change over time.
        }
        else cout << "Using random weights." << endl;
    }


//cout << J(0,0) << " " << win(1,1) << endl;

    VectorXd meanabserrs(NBTRIALS); meanabserrs.setZero();
    VectorXd lateral_input;
    VectorXd total_exc(NBNEUR), delta_total_exc(NBNEUR), delta_total_exc_sq(NBNEUR), total_exc_prev(NBNEUR);
    VectorXd delta_r(NBNEUR), delta_r_sq(NBNEUR), r_trace(NBNEUR), r_trace2(NBNEUR);
    r_trace.fill(0); r_trace2.fill(0);
    VectorXd delta_x(NBNEUR), delta_x_sq(NBNEUR), x_trace(NBNEUR), x_trace2(NBNEUR), delta_x_cu(NBNEUR);
    total_exc.fill(0); total_exc_prev.fill(0); x_trace.fill(0); x_trace2.fill(0);
    VectorXd modul(NBNEUR); modul.setZero();
    VectorXd modul_trace(NBNEUR); modul_trace.setZero();
    MatrixXd rs(NBNEUR, TRIALTIME); rs.setZero();
    MatrixXd hebb(NBNEUR, NBNEUR);  
    VectorXd x(NBNEUR), r(NBNEUR), rprev(NBNEUR), dxdt(NBNEUR), k(NBNEUR), 
             input(NBIN), deltax(NBNEUR);
    VectorXd         etraceDELTAX(NBNEUR), etraceDELTAXCUALT(NBNEUR), etraceDELTAXCU(NBNEUR), etraceNODEPERT(NBNEUR), etraceDELTAXOP(NBNEUR), etraceDELTAX31(NBNEUR), etraceDELTAXSIGNSQRT(NBNEUR), etraceDELTAXSIGNSQ(NBNEUR), etraceEH(NBNEUR);
    VectorXd         gradDELTAX(NBNEUR), gradDELTAXCUALT(NBNEUR), gradDELTAXCU(NBNEUR), gradNODEPERT(NBNEUR), gradDELTAXOP(NBNEUR), gradBP(NBNEUR), gradDELTAX31(NBNEUR), gradDELTAXSIGNSQRT(NBNEUR), gradDELTAXSIGNSQ(NBNEUR), gradEH(NBNEUR);
    x.fill(0); r.fill(0);


    double modul0;
    double rew, rew_trace, delta_rew;
    double tgtresp;
    double predictederr;

    VectorXd err(TRIALTIME); 
    VectorXd meanabserrtrace(NBPATTERNS);
    double meanabserr;

    MatrixXd dJtmp, Jprev, Jr;


    // Auxiliary variables for speeding things up a little bit.
    double hebbmat[NBNEUR][NBNEUR];
    double rprevmat[NBNEUR], dx2[NBNEUR];
    double xmat[NBNEUR], xtracemat[NBNEUR];

    double dtdivtau = dt / tau;



    meanabserrtrace.setZero();




    for (int numtrial=0; numtrial < NBTRIALS; numtrial++)
    {



        // We use native-C array hebbmat for fast computations within the loop, then transfer it back to Eigen matrix hebb for plasticity computations
        hebb.setZero();
        for (int n1=0; n1 < NBNEUR; n1++)
            for (int n2=0; n2 < NBNEUR; n2++)
                hebbmat[n1][n2] = 0;
        r.setZero();
        x.setZero();

        modul0 = 0;

        if (numtrial %2 == 0)
            input.setRandom();
        
        tgtresp  = input.sum() > 0 ? 1.0 : -1.0;

        etraceDELTAX.setZero();
        etraceEH.setZero();
        etraceDELTAXCU.setZero();
        etraceDELTAXSIGNSQ.setZero();
        etraceDELTAXSIGNSQRT.setZero();
        etraceDELTAXCUALT.setZero();
        etraceDELTAXOP.setZero();
        etraceDELTAX31.setZero();
        etraceNODEPERT.setZero();

        rew = 0;
        rew_trace = 0;

        // Let's start the trial:
        for (int numiter=0; numiter < TRIALTIME;  numiter++)
        {

            rprev = r;
            lateral_input =  J * r;

            if (numiter < TIMEINPUT)
                total_exc =  lateral_input + win * input ;
            else
                total_exc =  lateral_input ;




            // Exploratory perturbations
/*            modul.setZero();
            if (MODULTYPE == "UNIFORM")
            {
                // Apply a modulation to the entire network with probability PROBAMODUL - Not used for these simulations.
                if ( (Uniform(myrng) < PROBAMODUL)
                        && (numiter> 3)
                   )
                {
                    randVec(modul);
                    modul *= ALPHAMODUL;
                    total_exc +=   modul;
                    modulmarker.fill(1);
                }
                else
                    modulmarker.setZero();
            }
            else if (MODULTYPE == "DECOUPLED")
            {
                // Perturb each neuron independently with probability PROBAMODUL
                modulmarker.setZero();
                for (int nn=0; nn < NBNEUR; nn++)
                    if ( (Uniform(myrng) < PROBAMODUL)
                            && (numiter> 3)
                       )
                    {
                        modulmarker(nn) = 1;
                        modul(nn) = (-1.0 + 2.0 * Uniform(myrng));
                    }
                modul *= ALPHAMODUL;
                total_exc +=   modul;
            }
            else { throw std::runtime_error("Which modulation type?"); }
*/
            
            if (numtrial % 2 == 1)
            {
                if (numiter == TIMEMODUL)
                {
                    modul0 =  Uniform(myrng) < .5 ? ALPHAMODUL : -ALPHAMODUL;  // Fixed-magnitude modulations: sem to align much better to BackProp gradients
                    //modul0 =  (1.0 - 2.0 * Uniform(myrng))  * ALPHAMODUL ;  
                    
                    total_exc(0) += modul0;

                    etraceNODEPERT = r * modul0;
                }
            }

            
            
            // Compute network activations

            x += dtdivtau * (-x + total_exc);

            x(1)=1.0; x(10)=1.0;x(11)=-1.0; //x(12) = 1.0;  // Biases


            // Actual responses = tanh(activations)
            for (int nn=0; nn < NBNEUR; nn++)
            {
                r(nn) = tanh(x(nn));
            }


            rs.col(numiter) = r;


            // Okay, now for the actual plasticity.

            // First, compute the fluctuations of neural activity (detrending / high-pass filtering)
            // NOTE: Remember that x is the current excitation of the neuron (i.e. what is passed to tanh to produce the actual output r) - NOT the input!
            delta_x =  x  - x_trace ;
            //delta_x_sq = delta_x.array() * delta_x.array().abs();
            //delta_x_cu = delta_x.array() * delta_x.array() * delta_x.array();
            x_trace = ALPHATRACEEXC * x_trace + (1.0 - ALPHATRACEEXC) * x;

            // This is for the full exploratory-hebbian rule, which requires a continuous, real-time reward signal (and its running average, with same time constant as that of x)
            rew = -abs(r(0) - tgtresp);
            delta_rew =  rew  - rew_trace ;
            //delta_x_sq = delta_x.array() * delta_x.array().abs();
            //delta_x_cu = delta_x.array() * delta_x.array() * delta_x.array();
            rew_trace = ALPHATRACEEXC * rew_trace + (1.0 - ALPHATRACEEXC) * rew;


            if (numiter > TIMERESP)
            {

                    // Note that r = r(t-1) = the current lateral input to each neuron! 

                    // Eligibility trace as the accumulated product of inputs times fluctuations in output (i.e. like the Exploratory Hebbian rule, without the continuous real-time reward signal) (should not work)
                    etraceDELTAX += rprev * delta_x(0);
             
                    // For Exploratory-Hebbian rule, the eligibility trace is essentially the gradient:
                    etraceEH += rprev * delta_x(0) * delta_rew;
                    
                    // Eligibility trace computed according to our rule (accumulated product of inputs time fluctuations, passed through a supralinear function - here, the cubic function)
                    etraceDELTAXCU.array() +=  (rprev * delta_x(0)).array().cube();
                    

                    // Slight variant: cube applied only to the fluctuations, not to the total product 
                    double deltaxcu =  delta_x(0) * delta_x(0) * delta_x(0);
                    etraceDELTAXCUALT += deltaxcu * rprev;

                    // Signed-square nonlinearity (supralinear, so should work)
                    etraceDELTAXSIGNSQ.array() +=  (rprev * delta_x(0)).array() * (rprev * delta_x(0)).array().abs();
                    
                    // Signed-square-root nonlinearity (sublinear, so should NOT work)
                    etraceDELTAXSIGNSQRT.array() +=  ((r * delta_x(0)).array() > 0).select( (r * delta_x(0)).array().abs().sqrt(), -(r * delta_x(0)).array().abs().sqrt() ) ;

                    
                    // Product of inputs times fluctuation, but only at the time of the modulation (should work, illustating that the problem is with the post-perturbation relaxation effects)
                    if (numiter == TIMEMODUL)
                    {
                        etraceDELTAXOP += rprev * delta_x(0);
                    }
                    
                    // Same thing, but accumulated for a few ms after the modulation
                    if ((numiter >= TIMEMODUL) && (numiter < TIMEMODUL + 11))
                    //if (numiter >= TIMEMODUL)
                        etraceDELTAX31 += rprev * delta_x(0);
                    

            }


        }  // Trial finished!


        // Compute error for this trial

        int EVALTIME = TRIALTIME - TIMERESP; 

        err = rs.row(0).array() - tgtresp;
        err.head(TIMERESP).setZero(); // Error is only computed over the response period, i.e. from TIMERESP onward

        meanabserr =  err.cwiseAbs().sum() / (double)EVALTIME;

        if (numtrial % 2 == 0)
            predictederr = meanabserr;
        
        // How to compute predicted error in the absence of perturbation (i.e. R0)? Normally we simply use a running average over previous Rs for this particular trial type, but here there is no trial type !
        // Solution is to run each trial twice, once with and once without the perturbation.

        if (numtrial % 2 == 1)
        {
            gradDELTAX = -ETA * (meanabserr -   predictederr) * etraceDELTAX;
            gradEH = ETA * etraceEH ;  // The reward/error signal is already included in the e-trace for the EH rule
            gradDELTAXOP = -ETA * (meanabserr - predictederr) * etraceDELTAXOP;
            gradDELTAX31 = -ETA * (meanabserr - predictederr) * etraceDELTAX31;
            gradDELTAXCU = -ETA * (meanabserr - predictederr) * etraceDELTAXCU;
            gradDELTAXCUALT = -ETA * (meanabserr - predictederr) * etraceDELTAXCUALT;
            gradDELTAXSIGNSQ = -ETA * (meanabserr - predictederr) * etraceDELTAXSIGNSQ;
            gradDELTAXSIGNSQRT = -ETA * (meanabserr - predictederr) * etraceDELTAXSIGNSQRT;
            gradNODEPERT = -ETA * (meanabserr - predictederr) * etraceNODEPERT;
            // For backpropagation, the gradient is simply error * inputs. We
            // only consider the gradient around the time of the modulation to
            // ensure a fair comparison with other measures. We use TIMEMODUL-1
            // as the closest approximation not affected by the modulation
            // itself.
            gradBP = -ETA * err(TIMEMODUL -1)  * rs.col(TIMEMODUL-1);
        }

        // We re-transfer the values back from the C arrays to the Eigen matrix
        /*
        for (int n1=0; n1 < NBNEUR; n1++)
            for (int n2=0; n2 < NBNEUR; n2++)
                hebb(n1, n2) = hebbmat[n1][n2];


        // Compute the actual weight change, based on eligibility trace and the relative error for this trial:

        if ((PHASE == LEARNING) && (numtrial> 100)
           )
        {
            // Note that the weight change is the summed Hebbian increments, multiplied by the relative error, AND the mean of recent errors for this trial type - this last multiplication may help to stabilize learning.
            dJ = (  -  ETA * meanabserrtrace(trialtype) * (hebb.array() * (meanabserr - meanabserrtrace(trialtype)))).transpose().cwiseMin(MAXDW).cwiseMax(-MAXDW);
            J +=  dJ;

        }


        meanabserrtrace(trialtype) = ALPHATRACE * meanabserrtrace(trialtype) + (1.0 - ALPHATRACE) * meanabserr; 
        meanabserrs(numtrial) = meanabserr;
        */


        // Display stuff, save files.
        VectorXd etrace = etraceDELTAX;

        if (numtrial % 2 == 1)
        {
            // Note that the weight change is the summed Hebbian increments, multiplied by the relative error, AND the mean of recent errors for this trial type - this last multiplication may help to stabilize learning.
            //dwff = (  -  ETA * meanabserrtrace(trialtype) * (etrace.array() * (meanabserr - meanabserrtrace(trialtype)))).cwiseMin(MAXDW).cwiseMax(-MAXDW);
            //wff +=  dwff;

            int numsyn = (int)floor(Uniform(myrng) * NBNEUR);
            gradfile << gradBP(numsyn) << " " << gradDELTAX(numsyn) << " " << gradDELTAXOP(numsyn) << " " << gradDELTAX31(numsyn) << " " << gradDELTAXCU(numsyn) << " " <<gradDELTAXCUALT(numsyn) << " " << gradDELTAXSIGNSQ(numsyn) << 
                " " << gradDELTAXSIGNSQRT(numsyn) << " " << gradEH(numsyn)  << " " <<gradNODEPERT(numsyn) <<endl;
        }


            if (numtrial % 100 <  8)
            {    
                cout << numtrial ; // << "- trial type: " << trialtype;
                //cout << ", responses : " << zout;
                //cout << ", time-avg responses for each pattern: " << zouttrace ;
                //cout << ", sub(abs(wout)): "  << wout.cwiseAbs().sum() ;
                //cout << ", hebb(0,1:3): " << hebb.col(0).head(4).transpose();
                cout << ", meanabserr: " << meanabserr;
                //cout << ", wout(0,1:3): " << wout.row(0).head(5) ; 
                cout << ", r: " << r.head(5).transpose();
                cout << ", modul: " << modul0;
                cout << ", input: " << input.head(4).transpose();
                //cout << ", wff: " << wff.transpose();
                //cout << ", dwff: " << dwff.transpose();
                cout<<endl;
/*                cout << ", gradCU: " << gradDELTAXCU.transpose();
                cout<<endl;
                cout << ", gradCUalt: " << gradDELTAXCUALT.transpose();
                cout<<endl;
                cout << ", gradDX: " << gradDELTAX.transpose();
                cout<<endl;
                cout << ", gradOP: " << gradDELTAXOP.transpose();
                cout<<endl;
                cout << ", gradNP: " << gradNODEPERT.transpose();
                cout << endl;
                cout << ", gradBP: " << gradBP.transpose();
                cout << endl;
        cout << gradNODEPERT.dot(gradBP) / (gradNODEPERT.norm() * gradBP.norm());
        cout << " " << gradDELTAX.dot(gradBP) / (gradDELTAX.norm() * gradBP.norm());
        cout << " " << gradDELTAXOP.dot(gradBP) / (gradDELTAXOP.norm() * gradBP.norm());
        cout << " " << gradDELTAXCU.dot(gradBP) / (gradDELTAXCU.norm() * gradBP.norm());*/
        cout << endl;
            }


    }

    cout << "Done learning ..." << endl;


    cout << J.mean() << " " << J.cwiseAbs().sum() << " " << J.maxCoeff() << endl;
    //cout << wout.mean() << " " << wout.cwiseAbs().sum() << " " << wout.maxCoeff() << endl;


    cout << endl;
    return 0;
}
예제 #13
0
	RandomModule() {
		random_engine.seed(std::random_device{}());
	}
예제 #14
0
		long int getRandom(long int min=0, long int max=gen.max()/2-1) {
			return getUnsignedRandom(0, math::abs(max-min))+min;
		}
예제 #15
0
		unsigned long int getUnsignedRandom(unsigned long int min=0,
				unsigned long int max=gen.max()) {
			return min + (gen() % (max-min));
		}
예제 #16
0
	void set_seed(int s) {
		rng.seed(s);
	}
예제 #17
0
int main(int argc, char* argv[])
{

    fstream myfile;


    int PHASE=LEARNING;
    if (argc > 1)
       for (int nn=1; nn < argc; nn++)
       {
           if (strcmp(argv[nn], "test") == 0) { PHASE = TESTING; cout << "Test mode. " << endl; }
           if (strcmp(argv[nn], "METHOD") == 0) { METHOD = argv[nn+1]; }
           if (strcmp(argv[nn], "SUBW") == 0) { SUBW = atoi(argv[nn+1]); }
           if (strcmp(argv[nn], "G") == 0) { G = atof(argv[nn+1]); }
           if (strcmp(argv[nn], "ALPHABIAS") == 0) { ALPHABIAS = atof(argv[nn+1]); }
           if (strcmp(argv[nn], "ETA") == 0) { ETA = atof(argv[nn+1]); }
           if (strcmp(argv[nn], "STIMVAL") == 0) { STIMVAL = atof(argv[nn+1]); }
           if (strcmp(argv[nn], "ALPHAMODUL") == 0) { ALPHAMODUL = atof(argv[nn+1]); }
           if (strcmp(argv[nn], "NBNEUR") == 0) { NBNEUR = atoi(argv[nn+1]); }
           if (strcmp(argv[nn], "PROBAMODUL") == 0) { PROBAMODUL = atof(argv[nn+1]); }
           if (strcmp(argv[nn], "PROBAHEBB") == 0) { PROBAHEBB = atof(argv[nn+1]); }
           if (strcmp(argv[nn], "ALPHATRACE") == 0) { ALPHATRACE = atof(argv[nn+1]); }
           if (strcmp(argv[nn], "RNGSEED") == 0) { RNGSEED = atof(argv[nn+1]); }
           if (strcmp(argv[nn], "MAXDW") == 0) { MAXDW = atof(argv[nn+1]); }
       }

    string SUFFIX = "_G" + to_string(G) + "_MAXDW" + to_string(MAXDW) + "_ETA" + to_string(ETA) + "_ALPHAMODUL" + to_string(ALPHAMODUL) + "_PROBAMODUL" + to_string(PROBAMODUL) + "_SUBW" +to_string(SUBW) + "_ALPHATRACE" + to_string(ALPHATRACE) + "_METHOD-" + METHOD + "_ALPHABIAS" + to_string(ALPHABIAS) + "_PROBAHEBB" + to_string(PROBAHEBB) + "_NBNEUR" + to_string(NBNEUR) + "_RNGSEED" + to_string(RNGSEED);
    cout << SUFFIX << endl;

    myrng.seed(RNGSEED);

    double dt = 1.0;
    double tau = 10.0;
    int trialtype;

    int NBTRIALS = 100017; 
    int TRIALTIME = 700 ; //500; // 1100
    int STARTSTIM1 = 1, TIMESTIM1 = 500; // 200
    //int STARTSTIM2 = 400, TIMESTIM2 = 0; 

    if (PHASE == TESTING) 
        NBTRIALS = 40*NBPATTERNS;


    MatrixXd patterns[NBPATTERNS];
    MatrixXd tgtresps[NBPATTERNS];


    // Remember that input channel 0 is reserved for the unimplemented 'go' signal
    

    MatrixXd dJ(NBOUT, NBNEUR); dJ.setZero();
    MatrixXd win(NBNEUR, NBIN); randMat(win); //win.setRandom();// win.row(0).setZero(); // Uniformly between -1 and 1, except possibly for output cell (not even necessary).
    cout << win.col(0).head(5) << endl;
    MatrixXd J(NBNEUR, NBNEUR);
    
    randJ(J);
    
    if (PHASE == TESTING){
        readWeights(J, "J.dat");
        readWeights(win, "win.dat");
    }



    VectorXd meanerrs(NBTRIALS); meanerrs.setZero();
    VectorXd lateral_input;
    VectorXd dxthistrial(NBNEUR);
    MatrixXd rs(NBNEUR, TRIALTIME); rs.setZero();
    MatrixXd hebb(NBNEUR, NBNEUR);  
    VectorXd x(NBNEUR), r(NBNEUR), rprev(NBNEUR), dxdt(NBNEUR), k(NBNEUR), 
             input(NBIN), deltax(NBNEUR); 
    x.fill(0); r.fill(0);
    
    VectorXd err(TRIALTIME); 
    VectorXd meanerrtrace(NBPATTERNS);
    double meanerr;

    MatrixXd dJtmp, Jprev, Jr;

    double dtdivtau = dt / tau;



    meanerrtrace.setZero();



    for (int numtrial=0; numtrial < NBTRIALS; numtrial++)
    {

        if (PHASE == LEARNING)
            //trialtype = (int)(numtrial/2) % NBPATTERNS;
            trialtype = numtrial % NBPATTERNS;
        else 
            trialtype = numtrial % NBPATTERNS;

        
        hebb.setZero();
        dJ.setZero();
        //input = patterns.col(trialtype);
        input.setZero();
        
        x.fill(0.0); 
        //x.setRandom(); x *= .05; 
        x(1)=1.0; x(10)=1.0; x(11)=-1.0; //x(12) = 1.0; 
        x += dtdivtau * win * input;
        for (int nn=0; nn < NBNEUR; nn++)
            r(nn) = tanh(x(nn));

        

        randVec(dxthistrial);
        dxthistrial *= ALPHABIAS;

        double tgtresp;
        double biasmodality1, biasmodality2;

        if (trialtype == 0){
            input(3) = 1.0; input(4) = 0.0;
            biasmodality1 = 1.0; 
            tgtresp = .98;
            biasmodality2 = Uniform(myrng) < .5 ?  1 : -1;
        }
        if (trialtype == 1){
            input(3) = 1.0; input(4) = 0.0;
            biasmodality1 = -1.0; 
            tgtresp = -.98;
            biasmodality2 = Uniform(myrng) < .5 ?  1 : -1;
        }
        if (trialtype == 2){
            input(3) = 0.0; input(4) = 1.0;
            biasmodality2 = 1.0; 
            tgtresp = .98;
            biasmodality1 = Uniform(myrng) < .5 ?  1 : -1;
        }
        if (trialtype == 3){
            input(3) = 0.0; input(4) = 1.0;
            biasmodality2 = -1.0; 
            tgtresp = -.98;
            biasmodality1 = Uniform(myrng) < .5 ?  1 : -1;
        }

        biasmodality1 *= .2;
        biasmodality2 *= .2;

        for (int numiter=0; numiter < TRIALTIME;  numiter++)
        {

            input(0) = 0;
            input(1) = 0; input(2) = 0;
            if (numiter >= STARTSTIM1  & numiter <  STARTSTIM1 + TIMESTIM1)
            {
                input(1) = .5 * Gauss(myrng) + biasmodality1;
                input(2) = .5 * Gauss(myrng) + biasmodality2;
            }
            rprev = r;
            lateral_input =  J * r;
        
            deltax = dtdivtau * ( -x + lateral_input /*+ wfb * zout */ + STIMVAL * win * input + dxthistrial );
            x += deltax;
        
            x(1)=1.0; x(10)=1.0;x(11)=-1.0; //x(12) = 1.0; 
            
            //if (numtrial % 2 == 1)
            { 
                if ((PHASE == LEARNING) && (Uniform(myrng) < PROBAHEBB))
                {
                    if (METHOD == "DXTRIAL")
                        hebb += r * dxthistrial.transpose();
                    else if (METHOD == "X")
                        hebb += r * x.transpose();
                    else if (METHOD == "DELTAX")
                        hebb += r * deltax.transpose();
                    else if (METHOD == "LATINPUT")
                        hebb += r * (dtdivtau * lateral_input.transpose()); // Works, with instabilities and sufficiently low ETA
                    else { cout << "Which method??" << endl; return -1; }
                }
            }

            for (int nn=0; nn < NBNEUR; nn++)
            {
                /*if (x(nn) > 0)
                    r(nn) = tanh(x(nn));
                else
                    r(nn) = .1 * tanh(10.0*x(nn));*/
                r(nn) = tanh(x(nn));
            }
           

            rs.col(numiter) = r;

            
        }
       
       int EVALTIME = 300; 

        err = rs.row(0).array() - tgtresp;
        err.head(TRIALTIME - EVALTIME).setZero();

        //meanerr = 2.0   * err.cwiseAbs().sum();
        meanerr =  err.cwiseAbs().sum() / (double)EVALTIME;

        if ((PHASE == LEARNING) && (numtrial> 100)
                // && (numtrial %2 == 1)
           )
            {
             
                
                // dJ = -.0001 * meanerr.sum() * (hebb.array() * (meanerr.sum() - meanerrtrace.col(trialtype).sum())).transpose().cwiseMin(5e-4).cwiseMax(-5e-4); << Version that worked
                //dJ = (  -.0000001 * meanerr * (hebb.array() * (meanerr - meanerrtrace(trialtype)))).transpose().cwiseMin(1e-6).cwiseMax(-1e-6); 
                //dJ = (  -.000005 * meanerr * (hebb.array() * (meanerr - meanerrtrace(trialtype)))).transpose().cwiseMin(5e-5).cwiseMax(-5e-5);
                //dJ = G * (  -  ETA * meanerr * (hebb.array() * (meanerr - meanerrtrace(trialtype)))).transpose().cwiseMin(MAXDW).cwiseMax(-MAXDW);
                
                //dJ = (  -  ETA * meanerrtrace(trialtype) * meanerrtrace(trialtype) * (hebb.array() * (meanerr - meanerrtrace(trialtype)))).transpose().cwiseMin(MAXDW).cwiseMax(-MAXDW);
                dJ = (  -  ETA * meanerrtrace(trialtype) * (hebb.array() * (meanerr - meanerrtrace(trialtype)))).transpose().cwiseMin(MAXDW).cwiseMax(-MAXDW);

                J +=  dJ;


            }


        meanerrtrace(trialtype) = ALPHATRACE * meanerrtrace(trialtype) + (1.0 - ALPHATRACE) * meanerr; 
        //meanerrtrace(trialtype) = meanerr; 
        meanerrs(numtrial) = meanerr;
       

        if (PHASE == LEARNING)
        {
            if (numtrial % 3000 < 8) 
            {
                //myfile.open("rs"+std::to_string((numtrial/2)%4)+".txt", ios::trunc | ios::out);  myfile << endl << rs.transpose() << endl; myfile.close();
                //myfile.open("rs"+std::to_string(trialtype)+".txt", ios::trunc | ios::out);  myfile << endl << rs.transpose() << endl; myfile.close();
                
                //myfile.open("rs"+std::to_string(numtrial % 3000)+".txt", ios::trunc | ios::out);  myfile << endl << rs.transpose() << endl; myfile.close();
            }
            if (numtrial % 3000 == 0)
            {
                //myfile.open("J_"+std::to_string(numtrial)+".txt", ios::trunc | ios::out);  myfile << J << endl; myfile.close();
                //saveWeights(J, "J_"+std::to_string(numtrial)+".dat");
                myfile.open("J" + SUFFIX + ".txt", ios::trunc | ios::out);  myfile << J << endl; myfile.close();
                myfile.open("win" + SUFFIX + ".txt", ios::trunc | ios::out);  myfile << win << endl; myfile.close();
                saveWeights(J, "J" + SUFFIX + ".dat");
                saveWeights(win, "win" + SUFFIX + ".dat"); // win doesn't change over time.
                
                myfile.open("errs" + SUFFIX + ".txt", ios::trunc | ios::out);  myfile << endl << meanerrs.head(numtrial) << endl; myfile.close();

            }


            if (numtrial % (NBPATTERNS * 200) <  2*NBPATTERNS)
            {    
                cout << numtrial << "- trial type: " << trialtype;
                //cout << ", responses : " << zout;
                //cout << ", time-avg responses for each pattern: " << zouttrace ;
                //cout << ", sub(abs(wout)): "  << wout.cwiseAbs().sum() ;
                //cout << ", hebb(0,1:3): " << hebb.col(0).head(4).transpose();
                cout << ", meanerr: " << meanerr;
                //cout << ", wout(0,1:3): " << wout.row(0).head(5) ; 
                cout << ", r(0,1:6): " << r.transpose().head(6) ; 
                cout << ", dJ(0,1:4): " << dJ.row(0).head(4)  ;
                cout << endl;
            }
        }
        else if (PHASE == TESTING) {
            cout << numtrial << "- trial type: " << trialtype;
            cout << " r[0]: " << r(0);
            cout << endl;
            myfile.open("rs_long_type"+std::to_string(trialtype)+"_"+std::to_string(int(numtrial/NBPATTERNS))+".txt", ios::trunc | ios::out);  myfile << endl << rs.transpose() << endl; myfile.close();
        }


    }

    cout << "Done learning ..." << endl;


    cout << J.mean() << " " << J.cwiseAbs().sum() << " " << J.maxCoeff() << endl;
    //cout << wout.mean() << " " << wout.cwiseAbs().sum() << " " << wout.maxCoeff() << endl;


    cout << endl;
    return 0;
}