void Toolbox::train(DataSet &X, bool bInitWeights) { if(bInitWeights) { initModel(X); initWeights(X); } //Start training pOptimizer->optimize(pModel,&X, pEvaluator,pGradient); }
void Network::buildWeightData() { if (weights == NULL) initWeights(); //construct the weightIndicies array, each element of //which contains a vector of pre-synaptic connections //for each a given unit vector<UnitId>* weightIndicies = new vector<UnitId>[units.size()]; numConns = 0; UnitId k; UnitId* indx; vector<ssm_index*>* conn_indicies; for (k = 0; k < conn_indicies->size(); k++) { indx = (*conn_indicies)[k]; int pre = indx[0]; int post = indx[1]; float wval = weights->get(pre, post); printf("buildWeightData: pre=%d, post=%d, wval=%f\n", pre, post, wval); //add to weight index weightIndicies[post].push_back((UnitId) pre); numConns++; } delete conn_indicies; //construct weightArr and weightIndexRange if (weightArr != NULL) delete weightArr; weightArr = new float[numConns]; if (weightIndex != NULL) delete weightIndex; weightIndex = new UnitId[numConns]; if (weightIndexRange != NULL) delete weightIndexRange; weightIndexRange = new unsigned int*[numNonExUnits]; for (k = 0; k < numNonExUnits; k++) weightIndexRange[k] = NULL; unsigned int cindx, gpuIndx, nwts, m; cindx = 0; for (k = 0; k < units.size(); k++) { nwts = weightIndicies[k].size(); if (nwts > 0) { gpuIndx = kernelIndexArr[k]; weightIndexRange[gpuIndx] = new unsigned int[2]; weightIndexRange[gpuIndx][0] = cindx; weightIndexRange[gpuIndx][1] = cindx + nwts - 1; for (m = 0; m < nwts; m++) { weightIndex[cindx] = weightIndicies[k][m]; weightArr[cindx] = weights->get(m, k); cindx++; } } } //throw out weightIndicies delete weightIndicies; }
void Neuron::init( int num ) { isOrNeuron = false; threshold = 1.0; this->num_inputs = (short)num; setValue( 0. ); inputs = NULL; initInputs(); initWeights(); }
int main(int argc, char **argv) { double *netout = malloc(sizeof(double) * NUM_O); // what the network thinks double *hidout = malloc(sizeof(double) * NUM_H); // results from hidden layer sumsh = malloc(sizeof(double) * NUM_H); // weighted summations for hidden layer sumso = malloc(sizeof(double) * NUM_O); // weighted summations for output layer srand(time(NULL)*getpid()); /* initialize the weights; small, random values */ wih = initWeights(NUM_I, NUM_H); // weights between input and hidden layer who = initWeights(NUM_H, NUM_O); // weights between hidden and output layer /* train this thing. */ printf("%s\n", trainNet(netout, hidout) ? "Epoch limit exceeded!" : "Neural network trained and ready to go."); free(sumsh); free(sumso); free(wih); free(who); return 0; }
Network *createNetwork(int inpCount, int hidCount, int outCount){ // Calculate size of INPUT Layer int inpNodeSize = sizeof(Node); // Input layer has 0 weights int inpLayerSize = sizeof(Layer) + (inpCount * inpNodeSize); // Calculate size of HIDDEN Layer int hidWeightsCount = inpCount; int hidNodeSize = sizeof(Node) + (hidWeightsCount * sizeof(double)); int hidLayerSize = sizeof(Layer) + (hidCount * hidNodeSize); // Calculate size of OUTPUT Layer int outWeightsCount = hidCount; int outNodeSize = sizeof(Node) + (outWeightsCount * sizeof(double)); int outLayerSize = sizeof(Layer) + (outCount * outNodeSize); // Allocate memory block for the network Network *nn = (Network*)malloc(sizeof(Network) + inpLayerSize + hidLayerSize + outLayerSize); // Set/remember byte sizes of each component of the network nn->inpNodeSize = inpNodeSize; nn->inpLayerSize = inpLayerSize; nn->hidNodeSize = hidNodeSize; nn->hidLayerSize = hidLayerSize; nn->outNodeSize = outNodeSize; nn->outLayerSize = outLayerSize; // Initialize the network by creating the INPUT, HIDDEN and OUTPUT layer inside of it initNetwork(nn, inpCount, hidCount, outCount); // Setting defaults setNetworkDefaults(nn); // Init connection weights with random values initWeights(nn, HIDDEN); initWeights(nn, OUTPUT); return nn; }
int main(void) { // seed random number function srand ( time(NULL) ); // initiate the weights initWeights(); // load in the data initData(); // train the network for(int j = 0;j <= numEpochs;j++) { for(int i = 0;i<numPatterns;i++) { //select a pattern at random patNum = rand()%numPatterns; //calculate the current network output //and error for this pattern calcNet(); //change network weights WeightChangesHO(); WeightChangesIH(); } //display the overall network error //after each epoch calcOverallError(); printf("epoch = %d RMS Error = %f\n",j,RMSerror); } //training has finished //display the results displayResults(); system("PAUSE"); return 0; }
CNeuralNet::CNeuralNet(uint inputLayerSize, uint hiddenLayerSize, uint outputLayerSize, double lRate, double mse_cutoff) : m_inputLayerSize(inputLayerSize), m_hiddenLayerSize(hiddenLayerSize), m_outputLayerSize(outputLayerSize), m_lRate(lRate), m_mse_cutoff(mse_cutoff) //intializer list { //initialize vectors for output layer values std::vector<double> _outputActivation(m_outputLayerSize); //initialize the two neuron layers SNeuronLayer hiddenLayer(m_hiddenLayerSize, m_inputLayerSize); SNeuronLayer outputLayer(m_outputLayerSize, m_hiddenLayerSize); //push the layers onto a vector m_vecLayer.push_back(hiddenLayer); m_vecLayer.push_back(outputLayer); //initialize random weights for neurons initWeights(); //std::cout << "Constructor complete" << std::endl; }
/** Create a new BPN object with the specified number of input, hidden and output neurons. @param input The number of neurons in the input layer. @param hidden A vector indicating the number of neurons in each hidden layer. @param output The number of neurons in the output layer. @param alpha The learning rate. @param m The momentum rate. @param type The neural network type. @param ifs Optional parameter to define the input field shape, default is IFS_SQUARE. */ BPN::BPN(int input, vector<int>& hidden, int output, float alpha, float m, int type, INPUTFIELDSHAPE ifs /*=IFS_SQUARE*/) : NeuralNetwork(), id(type), learningRate(alpha), momentum(m), epochsCompleted(0), errorCode(NONE), lastPatternTest(0), patternsCompleted(0), saveVersion(2), dynamicLearningRate(false), dynamicMomentum(false), inputFieldShape(ifs) { if(type<FIRSTBPNTYPE || type>LASTBPNTYPE) { setErrorCode(BPN_ERROR); throw "Unknown BPN type specified for BPN constructor!"; } // height of matrix is number of neurons in layer // width is number of neurons in next layer // +1 to each height for the bias neuron weight int h = hidden.size(); for(int i=0;i<h;i++) { if(i==0) weights.push_back(Matrix<float>(hidden[i], input)); else weights.push_back(Matrix<float>(hidden[i], hidden[i-1])); biasWeights.push_back(Matrix<float>(hidden[i], 1)); activationFunction.push_back(SIGMOID); } // weights to output layer if(h==0) weights.push_back(Matrix<float>(output, input)); else weights.push_back(Matrix<float>(output, hidden[i-1])); biasWeights.push_back(Matrix<float>(output, 1)); activationFunction.push_back(SIGMOID); // resize to number of layers weightsSize = weights.size(); // net.resize(weightsSize); initWeights(); }
void Network::connect(UnitId u1, UnitId u2, float weight) { if (weights == NULL) initWeights(); weights->set(u1, u2, weight); }
/// <summary> /// Initialise the map with the desired dimensions. It is assumed that the input to the classifier will be a two /// dimensional image, but if you wish to have only one dimensional input just set InputsHeight to 1. /// </summary> /// <param name="InputsWidth">Width of the input image to be classified</param> /// <param name="InputsHeight">Height of the input image to be classified</param> /// <param name="mapWidth">Width of the topological map</param> /// <param name="mapHeight">Height of the topological map</param> Ttopmap::Ttopmap(int inputsWidth, int inputsHeight, int mapWidth, int mapHeight) { int x,y,xx,yy,i; max_hits=0; inputs_width = inputsWidth; inputs_height = inputsHeight; inputs = new unsigned char *[inputs_width]; for (x=0;x<inputs_width;x++) inputs[x] = new unsigned char [inputs_height]; map_width = mapWidth; map_height = mapHeight; unit = new float ***[map_width]; image = new unsigned char ***[map_width]; outputs = new unsigned char *[map_width]; hits = new int *[map_width]; classification = new unsigned char *[map_width]; classificationMulti = new int **[map_width]; availability = new bool *[map_width]; for (x=0;x<map_width;x++) { unit[x] = new float **[map_height]; image[x] = new unsigned char **[map_height]; outputs[x] = new unsigned char [map_height]; hits[x] = new int [map_height]; classification[x] = new unsigned char [map_height]; availability[x] = new bool [map_height]; classificationMulti[x] = new int *[map_height]; for (y=0;y<map_height;y++) { hits[x][y]=0; unit[x][y] = new float *[inputs_width]; image[x][y] = new unsigned char *[inputs_width]; classificationMulti[x][y] = new int [10]; classification[x][y] = 0; availability[x][y]=true; for (i=0;i<10;i++) classificationMulti[x][y][i] = 0; for (xx=0;xx<inputs_width;xx++) { unit[x][y][xx] = new float [inputs_height]; image[x][y][xx] = new unsigned char [inputs_height]; for (yy=0;yy<inputs_height;yy++) { unit[x][y][xx][yy] = 0; image[x][y][xx][yy] = 0; } } } } max_hits=0; learningRate = (float)0.5; RadiusExcite = 1; randomness = (float)0.01; Threshold = (float)0.0; initWeights(0,50); }
int main() { srand(time(NULL)); Neuron neuron; initWeights(&neuron); neuron.eta = 0.5; FILE* file; file = fopen("C:\\Users\\Michael\\Source\\Repos\\Computational_Intelligence\\exercise_2\\testInput10A.txt", "r"); if (file == NULL) return EXIT_FAILURE; //read training data ("x1,x2,t") char line[60]; double trainingData[1000][DIM]; double testData[1000][N]; int i = 0; int trainingDataSize = 0; int testDataSize = 0; int trainingParsed = 0; while (1) { if (trainingParsed != 1 && fgets(line, 60, file)) { //fgets returns NULL at EOF (EOF in VS cmd line: enter ctrl+z enter) if (strcmp(line, "0,0,0\n") == 0) { trainingParsed = 1; i = 0; } else { memcpy(trainingData[i], parseTrainingInputLine(&line), DIM*sizeof(double)); trainingDataSize++; i++; } } else if (fgets(line, 60, file) && trainingParsed == 1) { memcpy(testData[i], parseTestInputLine(&line), (DIM-1)*sizeof(double)); testDataSize++; i++; } else break; } int trainingCycles = 0; double MSE = 1; double eSum = 0; while (MSE > 0.0001) { for (int j = 0; j < trainingDataSize; j++) { eSum += pow(updateWeights(&neuron, trainingData[j]), 2); //squared error } MSE = eSum * (0.5 / (NUM_NEURONS*trainingDataSize)); //mean square error (see BILHR doc by Peer) printf("MSE: %f\n", MSE); //printNeuron(&neuron); eSum = 0; trainingCycles++; } //read test data ("x1,x2") //neuron.w[0] = 0.2; for (int i = 0; i <= testDataSize; i++) { double res = activate(&neuron, testData[i]); if(res == 1) printf("+%f\n", res); else printf("%f\n", res); } return EXIT_SUCCESS; }
int main () { //int XOR[4][3] = {{0,0,0,},{0,1,1,},{1,0,1,},{1,1,0,}}; // XOR data clock_t t; // auxiliary variable for compute running time float sec; // auxiliary variable for compute seconds double error1,error2; int sumData = 0, tPoints = 0; char trash[50000]; // -------------------- srand(time(NULL)); // seed for random numbers initWeights(-0.1,0.1); // initialize weights FILE * trainingSet = fopen("trainingData.txt","r"); FILE * con1 = fopen("con1.txt","w"); FILE * con2 = fopen("con2.txt","w"); while (fgets (trash, 50000 , trainingSet) != NULL) tPoints++; t = clock(); // start timer //printf("\nEpoch: Output Des.out. (Error)\n"); // -------------------- //printf("--------------------------------\n"); // -------------------- int epoch; // -------------------- for (epoch = 0; epoch <= epochs; epoch++) { // for every error1 = 0, error2 = 0; rewind(trainingSet); for (int p = 0; p < tPoints; p++) { // for every pattern for (int img = 1; img <= IN; img++) { fscanf(trainingSet,"%lf",&y(img)); y(img) = inputCode(y(img)); } fscanf(trainingSet,"%lf %lf",&dv(0),&dv(1)); dv(0) = outputCode(dv(0)); dv(1) = outputCode(dv(1)); // -------------------- forwardRun(); // train backwardRun(); // train weightsUpdate(); // train double J1=fabs(dv(0) - y(FO)); // compute the error double J2=fabs(dv(1) - y(LO)); // compute the error error1 += J1; error2 += J2; sumData++; // -------------------- if (epoch % 100==0) { // every 20000 ep. print error if (p == 0 && epoch != 0) { printf("\n"); printf("\n%f %f\n",error1,error2); } // -------------------- // -------------------- //forwardRun(); // runs network //if (p % 20 == 0) printf("%5d: %f %f (%.6f) ::: %f %f (%.6f)\n",epoch,y(FO),dv(0),J1,y(LO),dv(1),J2); } } fprintf(con1,"%f ",error1 / tPoints); fprintf(con2,"%f ",error2 / tPoints); if ((error1 / tPoints < 0.005) && (error2 / tPoints < 0.005)) { printf("%5d: %f %f\n", epoch, error1 / sumData, error2 / sumData); break; } } fprintf(con1,"\n%d ",epoch); fprintf(con2,"\n%d ",epoch); fclose(con1); fclose(con2); fclose(trainingSet); t = clock() - t; // stop timer sec = ((float)t)/CLOCKS_PER_SEC; // conversion to seconds //printf("--------------------------------\n%.3f sec\n\n",sec); weightsToFileHelper(); //printf("%.3f ",sec); printf("%d",tPoints); return 0; }
void initFIR(filter * f) { f->len = FILTER_LENGTH; initWeights(f); initHistory(f); }