int main()
{
    int N;
    cin >> N;
    vector<string> vertex_names = input_string(N);
    vector<vector<int>> adjacency_matrix = input_matrix(N);
    
    Graph g(adjacency_matrix, vertex_names);
    cout << "Created graph object g" << endl;
    
    Graph h(g);
    cout << "Copy constructor h<-g" << endl;
    
    Graph i(move(h));
    cout << "Move constructor i<-h" << endl;
    
    Graph j = i;
    cout << "Copy Assignment j<-i" << endl;
    
    Graph k = move(j);
    cout << "Move assignment k<-j" << endl << endl;

    
    cout << "BFS : Belgaum -> Chennai : ";
    disp(g.begin_bfs("Belgaum"), g.end_bfs("Chennai"));
    
    cout << "DFS : Chennai -> *No destination : ";
    disp(g.begin_dfs("Chennai"), g.end_dfs());
    
    cout << "Topo sort : Belgaum -> Chennai : ";   
    vector<string> test= g.topo_sort(g.begin_dfs("Belgaum"), g.end_dfs("Chennai"));
    disp(test.begin(), test.end());
    
    
    cout << "Compare graph : (g('Belgaum') == i('Belgaum') -> " << comp_graphs(g.begin_dfs("Belgaum"), 
                                                          g.end_dfs(),
                                                          g.begin_dfs("Belgaum"),
                                                          g.end_dfs()
                                                         ) << endl ;

    cout << "Compare graph : (g('Belgaum') == i('Bangalore') -> " << comp_graphs(g.begin_dfs("Belgaum"), 
                                                          g.end_dfs(),
                                                          g.begin_dfs("Bangalore"),
                                                          g.end_dfs()
                                                         ) << endl << endl ;
   
    cout << "Dijkstra matrix " << endl;
    cin >> N;
    vertex_names = input_string(N);
    adjacency_matrix = input_matrix(N);
    Graph obj(adjacency_matrix, vertex_names);
	obj.djikstra("Mumbai",N);
    return 0;
}
Beispiel #2
0
int main(void)
{
	int mat1[MAX][MAX];
	int mat2[MAX][MAX];
	int mat3[MAX][MAX];

	input_matrix(mat1, MAX);
	input_matrix(mat2, MAX);

	multiply_matrix(mat1, mat2, mat3, MAX);

	printf("\n Answer : ");
	print_matrix(mat3, MAX);

	return 0;
}
Beispiel #3
0
int main(void)
{
    Matrix *matrix = input_matrix();
    char *target = getinput(100, '\n', "\nInput the target word: ");

    search(matrix, matrix->rows, matrix->cols, target);

    free_matrix(matrix);

    return 0;
}
Beispiel #4
0
int main()
{
   int N, size;
   double *array1, *array2, *array3;
   printf("Please input the size of the matrix:");
   scanf("%d", &N);
   size=N*N;
   
   array1=(double *) malloc(size*sizeof(double));
   array2=(double *) malloc(size*sizeof(double));
   array3=(double *) malloc(N*sizeof(double));

   input_matrix(size, N, array1, array2);
   output_matrix(size, N, array1, array2);
   product_matrix(size, N, array1, array2, array3);
}
Beispiel #5
0
int main()
{
  FILE *fin,*fout;
  double **a,*b;
  int i;
  //open file//
  a =dmatrix(1,N,1,N);
  b =dvec(1,N);

  fin = fopen("input.dat","r");
  if (fin ==NULL)
    {
      printf("Can't find file\n");
      exit(1);
    }
  fout = fopen("output.dat","w");
  if(fout == NULL)
    {
      printf("Can't make file\n");
      exit(1);
    }
  input_matrix(a,'A',fin,fout);
  input_vec(b,'b',fin,fout);
  //  printf("%lf",a[1][1]);
  b =simple_gauss(a,b);

  //output results//
  fprintf(fout,"Ax=bの計算結果は次の通り\n");
  for(i = 1;i <= N; i++)
    {
      fprintf(fout,"%f\n",b[i]);
    }
  
  fclose(fin);fclose(fout);

  //  free_dmatrix(a,1,N,1,N);free_dvec(b,1);
  return(0);
}
//#define VERBOSE
void Compute_Simple_XOR_network_version_5(int num_iterations)
{
	Timer timer;

	// TRAINING SET FOR EXCLUSIVE OR GATE
	vector<vector2d > training;
	training.push_back(vector2d{ { 0.f, 0.f } });
	training.push_back(vector2d{ { 0.f, 1.f } });
	training.push_back(vector2d{ { 1.f, 0.f } });
	training.push_back(vector2d{ { 1.f, 1.f } });

	float desired_output[4] = { 0.f, 1.f, 1.f, 0.f };

	int input_data_size = 1;
	int num_inputs = 2;
	int num_hidden = 2;
	int num_outputs = 1;

	// ==========================================
	matrix input_matrix( 1, num_inputs + 1 );

	matrix w_m_1_2_(num_inputs + 1, num_hidden + 1);
	
	matrix hidden_layer_(num_hidden+1, 1);
	
	matrix w_m_2_3_(num_hidden + 1, num_outputs);
	
	matrix out_(num_outputs, num_outputs);

	matrix del_3_2_(num_outputs, num_outputs);
	matrix del_2_1_(1, num_hidden + 1);
	
	/*for (int i = 0; i < num_inputs+1; i++)
	{
		for (int j = 0; j < num_hidden + 1; j++)
		{
			w_m_1_2_(i, j) = RandomFloat(-1.2, 1.2);
		}
	}
	*/
	w_m_1_2_(0, 0) = 0.5f;
	w_m_1_2_(0, 1) = 0.9f;
	w_m_1_2_(0, 2) = 0.0f;

	w_m_1_2_(1, 0) = 0.4f;
	w_m_1_2_(1, 1) = 1.0f;
	w_m_1_2_(1, 2) = 0.0f;

	w_m_1_2_(2, 0) = 0.8f;// theta 1
	w_m_1_2_(2, 1) = -0.1f;//// theta 2
	w_m_1_2_(2, 2) = 1.0f;
	
	/*
	for (int i = 0; i < num_hidden + 1; i++)
	{
		w_m_2_3_(i, 0) = RandomFloat(-1.2, 1.2);
	}
	*/
	w_m_2_3_(0, 0) = -1.2f;
	w_m_2_3_(1, 0) = 1.1f;
	w_m_2_3_(2, 0) = 0.3f; // theta for output 
	
	


	float output_error = 0.0f;

	matrix w_m_delta_1_(3, 3);
	matrix w_m_delta_2_(3, 1);


	float alpha = 0.1f;
	float beta = 0.95f;

	float sum_squared_errors = 0.0f;


	timer.Start();
	
	//	Sleep(2000);

	float last_sum_squared_errors = 0.0f;
	int positive_error_delta_count = 0;
	int negative_error_delta_count = 0;
	int alternation_count = 0;

	for (int p = 0; p < num_iterations; p++)
	{
		sum_squared_errors = 0.0f;
		for (int q = 0; q < 4; q++)
		{
			input_matrix(0, 0) = training[q].v[0];
			input_matrix(0, 1) = training[q].v[1];
			input_matrix(0, 2) = -1.0f; // bias is always -1

			float sum[3] = { 0.0f, 0.0f, 0.0f };



			hidden_layer_ = input_matrix * w_m_1_2_;// -theta_1_;


			sigmoid(hidden_layer_, hidden_layer_);

			// OVERWRITE 3rd INPUT
			hidden_layer_(0, 2) = -1.0f;

			out_ = hidden_layer_ * w_m_2_3_;

			sigmoid(out_, out_);

#ifdef VERBOSE
			if (p % 250 == 0)
			{
				hidden_layer_.print();
				cout<<endl;
				out_.print();
				cout<<endl;
			}
#endif
			output_error = desired_output[q] - out_(0, 0);

			sum_squared_errors += output_error * output_error;


			// back propogate

			anti_sigmoid(del_3_2_, out_);

			del_3_2_ = del_3_2_ * output_error;



			anti_sigmoid(del_2_1_, hidden_layer_);

			// put the vector on the diagonal for next operation ...
			matrix ident_22(3, 3);
			for (int i = 0; i < 3; i++)
			{
				for (int h = 0; h < 3; h++)
				{
					if (i == h) ident_22(i, h) = del_2_1_(0, i);
					else ident_22(i, h) = 0.0f;
				}
			}

			
			del_2_1_ = ident_22 * w_m_2_3_ * del_3_2_(0, 0);


			// weight deltas

			w_m_delta_2_.transpose();

			w_m_delta_2_ = w_m_delta_2_ * beta + del_3_2_* hidden_layer_ * alpha ;

			w_m_delta_2_.transpose();


#ifdef VERBOSE
			if (p % 250 == 0)
			{
				del_2_1_.print();
				cout << endl;
				del_3_2_.print();
				cout << endl;
			}
#endif
			w_m_delta_1_.transpose();

			w_m_delta_1_ = w_m_delta_1_ * beta  + del_2_1_ * input_matrix   * alpha;

			w_m_delta_1_.transpose();

#ifdef VERBOSE
			if (p % 250 == 0)
			{
				w_m_delta_1_.print();
				cout << endl;
				w_m_delta_2_.print();
				cout << endl;
			}
#endif
			// update weights

			w_m_1_2_ = w_m_1_2_ + w_m_delta_1_;// 


			w_m_2_3_ = w_m_2_3_ + w_m_delta_2_;

#ifdef VERBOSE
			if (p % 250 == 0)
			{
				w_m_1_2_.print();
				cout << endl;
				w_m_2_3_.print();
				cout << endl;
			}
#endif
		}
		if (sum_squared_errors > last_sum_squared_errors*1.04) alpha *= 0.7;
		if (sum_squared_errors < last_sum_squared_errors) alpha *= 1.05;
		// calculate the change in sum_squared_errors
		float delta_sum_square_errors = sum_squared_errors - last_sum_squared_errors;
		last_sum_squared_errors = sum_squared_errors;
		if (delta_sum_square_errors > 0.0f)
		{
			if (positive_error_delta_count == 0) {
				alternation_count++;
			}
			else{
				alternation_count = 0;
			}
			positive_error_delta_count++;
			negative_error_delta_count = 0;
		}
		else
		{
			if (negative_error_delta_count == 0) {
				alternation_count++;
			}
			else{
				alternation_count = 0;
			}
			negative_error_delta_count++;
			positive_error_delta_count = 0;
		}

		// determine change in learning rate
		if (positive_error_delta_count >= 2 || negative_error_delta_count >= 2)
		{
			alpha += 0.1;
			if (alpha > 1.0f) alpha = 1.0f;
		}
		else if (alternation_count >= 2)
		{
			alpha -= 0.1;
			if (alpha < 0.0f) alpha = 0.01;
		}

		//cout << sum_squared_errors << endl;
		if (sum_squared_errors < 0.001)
		{
			timer.Update();
			timer.Stop();
			cout << "Finished on iteration: " << p << ", with sum squared errors less than 0.001" << endl << "Total calculation performed in " << timer.GetTimeDelta() << " seconds" << endl;
			break;
		}
	}
		
}
void Compute_Simple_XOR_network_version_4(int num_iterations)
{
	Timer timer;

	// TRAINING SET FOR EXCLUSIVE OR GATE
	vector<vector2d > training;
	training.push_back(vector2d{ { 0.f, 0.f } });
	training.push_back(vector2d{ { 0.f, 1.f } });
	training.push_back(vector2d{ { 1.f, 0.f } });
	training.push_back(vector2d{ { 1.f, 1.f } });

	float desired_output[4] = { 0.f, 1.f, 1.f, 0.f };

	// ==========================================
	matrix input_matrix(1, 3);
	matrix w_m_1_2_(3, 3);
	matrix hidden_layer_(3, 1);
	matrix w_m_2_3_(3, 1);
	matrix out_(1, 1);

	matrix del_3_2_(1, 1);
	matrix del_2_1_(1, 3);
	matrix theta_1_(1, 2);
	matrix theta_2_(1, 1);

	w_m_1_2_(0, 0) = 0.5f;
	w_m_1_2_(0, 1) = 0.9f;
	w_m_1_2_(0, 2) = 0.0f;

	w_m_1_2_(1, 0) = 0.4f;
	w_m_1_2_(1, 1) = 1.0f;
	w_m_1_2_(1, 2) = 0.0f;

	w_m_1_2_(2, 0) = 0.8f;// theta 1
	w_m_1_2_(2, 1) = -0.1f;//// theta 2
	w_m_1_2_(2, 2) = 1.0f;

	w_m_2_3_(0, 0) = -1.2f;
	w_m_2_3_(1, 0) = 1.1f;
	w_m_2_3_(2, 0) = 0.3f; // theta for output 

	theta_1_(0, 0) = 0.8f;
	theta_1_(0, 1) = -0.1f;
	theta_2_(0, 0) = 0.3f;


	float output_error = 0.0f;

	matrix w_m_delta_1_(3, 3);
	matrix w_m_delta_2_(3, 1);


	float alpha = 0.3;

	float sum_squared_errors = 0.0f;

	timer.Start();
	//	Sleep(2000);

	float last_sum_squared_errors = 0.0f;
	int positive_error_delta_count = 0;
	int negative_error_delta_count = 0;

	for (int p = 0; p < num_iterations; p++)
	{
		sum_squared_errors = 0.0f;
		
		for (int q = 0; q < 4; q++)
		{
			input_matrix(0, 0) = training[q].v[0];
			input_matrix(0, 1) = training[q].v[1];
			input_matrix(0, 2) = -1.0f; // bias is always -1

			float sum[3] = { 0.0f, 0.0f, 0.0f };



			hidden_layer_ = input_matrix * w_m_1_2_;// -theta_1_;


			sigmoid(hidden_layer_, hidden_layer_);

			// OVERWRITE 3rd INPUT
			hidden_layer_(0, 2) = -1.0f;

			out_ = hidden_layer_ * w_m_2_3_;

			out_(0, 0) = sigmoid(out_(0, 0));

#ifdef VERBOSE
			if (p % 250 == 0)
			{
				hidden_layer_.print();
				cout<<endl;
				out_(0, 0).print();
				cout<<endl;
			}
#endif
			output_error = desired_output[q] - out_(0, 0);

			sum_squared_errors += output_error * output_error;


			// back propogate

			anti_sigmoid(del_3_2_, out_);

			del_3_2_ = del_3_2_ * output_error;



			anti_sigmoid(del_2_1_, hidden_layer_);

			// put the vector on the diagonal for next operation ...
			matrix ident_22(3, 3);
			for (int i = 0; i < 3; i++)
			{
				for (int h = 0; h < 3; h++)
				{
					if (i == h) ident_22(i, h) = del_2_1_(0, i);
					else ident_22(i, h) = 0.0f;
				}
			}

			del_2_1_ = ident_22 * w_m_2_3_ * del_3_2_(0, 0);


			// weight deltas

			w_m_delta_2_ = hidden_layer_ * alpha * del_3_2_(0, 0);

			w_m_delta_2_.transpose();


#ifdef VERBOSE
			if (p % 250 == 0)
			{ 
				del_2_1_.print();
				cout<<endl;
				de_3_2_.print();
				cout << endl;
			}
#endif
		
			w_m_delta_1_ = del_2_1_ * input_matrix   * alpha;

			w_m_delta_1_.transpose();

#ifdef VERBOSE
			if (p % 250 == 0)
			{
				w_m_delta_1_.print();
				cout<<endl;
				w_m_delta_2_.print();
				cout << endl;
			}
#endif
			// update weights

			w_m_1_2_ = w_m_1_2_ + w_m_delta_1_;// 


			w_m_2_3_ = w_m_2_3_ + w_m_delta_2_;

#ifdef VERBOSE
			if (p % 250 == 0)
			{
				w_m_1_2_.print();
				cout<<endl;
				w_m_2_3_.print();
				cout << endl;
			}
#endif
		}

		//cout << sum_squared_errors << endl;
		if (sum_squared_errors < 0.03)
		{
			timer.Update();
			timer.Stop();
			cout << "Finished on iteration: " << p << ", with sum squared errors less than 0.03" << endl << "Total calculation performed in " << timer.GetTimeDelta() << " seconds" << endl;
			break;
		}
	}
}
void Compute_Simple_XOR_network_version_3(int num_iterations)
{
	Timer timer;

	// TRAINING SET FOR EXCLUSIVE OR GATE
	vector<vector2d > training;
	training.push_back(vector2d{ { 0.f, 0.f } });
	training.push_back(vector2d{ { 0.f, 1.f } });
	training.push_back(vector2d{ { 1.f, 0.f } });
	training.push_back(vector2d{ { 1.f, 1.f } });

	float desired_output[4] = { 0.f, 1.f, 1.f, 0.f };

	// ==========================================
	matrix input_matrix(1, 2);
	matrix w_m_1_2_(2, 2);
	matrix hidden_layer_(2, 1);
	matrix w_m_2_3_(2, 1);
	matrix out_(1, 1);

	matrix del_3_2_(1, 1);
	matrix del_2_1_(1, 2);
	matrix theta_1_(1, 2);
	matrix theta_2_(1, 1);
	
	w_m_1_2_(0,0) = 0.5f;
	w_m_1_2_(0, 1) = 0.9f;
	
	w_m_1_2_(1, 0) = 0.4f;
	w_m_1_2_(1, 1) = 1.0f;


	w_m_2_3_(0, 0) = -1.2f;
	w_m_2_3_(1, 0) = 1.1f;

	theta_1_(0, 0) = 0.8f;
	theta_1_(0,1) = -0.1f;
	theta_2_(0,0) = 0.3f;


	float output_error = 0.0f;

	matrix w_m_delta_1_(3, 2);
	matrix w_m_delta_2_(3, 1);
	

	float alpha = 0.3f;

	float sum_squared_errors = 0.0f;

	timer.Start();
	//	Sleep(2000);


	for (int p = 0; p < num_iterations; p++)
	{
		sum_squared_errors = 0.0f;
		for (int q = 0; q < 4; q++)
		{
			input_matrix(0,0)= training[q].v[0];
			input_matrix(0, 1)= training[q].v[1];


			// theta's must be in the weight matrix
			hidden_layer_ = input_matrix * w_m_1_2_ - theta_1_;

			// computes the elementwise sigmoid of the sum 
			sigmoid(hidden_layer_, hidden_layer_);

			out_ = hidden_layer_ * w_m_2_3_ - theta_2_;
			out_(0, 0) = sigmoid(out_(0, 0));
	
//#define VERBOSE
#ifdef VERBOSE
		if (p % 250 == 0)
			{
				cout << "hidden layer: " << endl;
				hidden_layer_.print();
				cout << "output: " << endl;
				out_.print();
				cout << endl;
			}
#endif
			output_error = desired_output[q] - out_(0, 0);

			sum_squared_errors += output_error * output_error;

			// back propogate
			del_3_2_(0, 0) = out_(0, 0) * (1 - out_(0, 0)) * output_error;

			// calculation of the debug values. 
			float correct_val_1 = hidden_layer_(0, 0)*(1 - hidden_layer_(0, 0)) * w_m_2_3_(0, 0) * del_3_2_(0, 0);
			float correct_val_2 = hidden_layer_(0, 1)*(1 - hidden_layer_(0, 1)) * w_m_2_3_(1, 0) * del_3_2_(0, 0);
			
			// computes the elementwise differentiation of the sigmoid function
			anti_sigmoid( del_2_1_, hidden_layer_ );
			
			// the del_2_1_ vector is expanded to inhabit the diagonal of the identity
			// matrix for the next matrix operation
			matrix ident_22(2, 2);
			for (int i = 0; i < 2; i++)
			{
				for (int h = 0; h < 2; h++)
				{
					if (i == h) ident_22(i, h) = del_2_1_(0, i);
					else ident_22(i, h) = 0.0f;
				}
			}

			del_2_1_ = ident_22 * w_m_2_3_ * del_3_2_(0, 0);

			del_2_1_.transpose();

			w_m_delta_2_(0, 0) = alpha * hidden_layer_(0, 0) * del_3_2_(0, 0);
			w_m_delta_2_(1, 0) = alpha * hidden_layer_(0, 1) * del_3_2_(0, 0);
			w_m_delta_2_(2, 0) = alpha * (-1.0f) * del_3_2_(0, 0);
#ifdef VERBOSE
			if (p % 250 == 0)
			{
				cout << "deltas: " << endl;
				del_2_1_.print();
				cout  << endl;
				del_3_2_.print();
				cout << endl;
			}
#endif

#undef VERBOSE
			// this operation could be bunched up into a matrix operation, this
			// shall be left to the next function
			w_m_delta_1_(0, 0) = alpha * input_matrix(0, 0) * del_2_1_(0, 0);
			w_m_delta_1_(1, 0) = alpha * input_matrix(0, 1) * del_2_1_(0, 0);
			w_m_delta_1_(2, 0) = alpha * (-1.0f) * del_2_1_(0, 0);

			w_m_delta_1_(0, 1) = alpha * input_matrix(0, 0) * del_2_1_(0, 1);
			w_m_delta_1_(1, 1) = alpha * input_matrix(0, 1) * del_2_1_(0, 1);
			w_m_delta_1_(2, 1) = alpha * (-1.0f) * del_2_1_(0, 1);

//			weight_mat_2_delta[2] = alpha * (-1) * deltas[2];
#ifdef VERBOSE
			if (p % 250 == 0)
			{
				w_m_delta_1_.print(); // untested
			}
#endif
			// update weights

			for (int i = 0; i < 2; i++)
			{
				for (int j = 0; j < 2; j++)
				{
					w_m_1_2_(i, j) = w_m_1_2_(i, j) + w_m_delta_1_(i, j);// weight_mat_1[i][j] = weight_mat_1[i][j] + weight_mat_1_delta[i][j];
				}
			}
			// it is clear that the operation above is an elementwise matrix addition
			// but the w_m_1_2_ matrix is not the same size as the w_m_delta_1_ 
			// because of the storage of theta bias values ... this is the correct
			// place to store the theta bias values however I have left this stage of
			// development to the next function, where i will attempt to further generalize
			// the matrix operations to facilitate an arbitrary number of inputs, outputs, 
			// hidden layer neurons and number of hidden layers


			w_m_2_3_(0, 0) = w_m_2_3_(0, 0) + w_m_delta_2_(0, 0);
			w_m_2_3_(1, 0) = w_m_2_3_(1, 0) + w_m_delta_2_(1, 0); //weight_mat_2[1] = weight_mat_2[1] + weight_mat_2_delta[1];

			theta_1_(0, 0) = theta_1_(0, 0) + w_m_delta_1_(2, 0);
			theta_1_(0, 1) = theta_1_(0, 1) + w_m_delta_1_(2, 1);

			theta_2_(0, 0) = theta_2_(0, 0) + w_m_delta_2_(2, 0);
#ifdef VERBOSE
			if (p % 250 == 0)
			{
				
			}
#endif


		}
		//cout << sum_squared_errors << endl;
		if (sum_squared_errors < 0.03)
		{
			timer.Update();
			timer.Stop();
			cout << "Finished on iteration: " << p << ", with sum squared errors less than 0.03" << endl << "Total calculation performed in " << timer.GetTimeDelta() << " seconds" << endl;
			break;
		}


	}


}
void Compute_Simple_XOR_network_version_7(int num_iterations)
{
	Timer timer;

	// TRAINING SET FOR EXCLUSIVE OR GATE
	vector<vector2d > training;
	training.push_back(vector2d{ { 0.f, 0.f } });
	training.push_back(vector2d{ { 0.f, 1.f } });
	training.push_back(vector2d{ { 1.f, 0.f } });
	training.push_back(vector2d{ { 1.f, 1.f } });

	float desired_output[4] = { 0.f, 1.f, 1.f, 0.f };

	int input_data_size = 1;
	int num_inputs = 2;
	int num_hidden = 2;
	int num_outputs = 1;

	// ==========================================
	matrix input_matrix(1, num_inputs);

	
	float output_error = 0.0f;

	

	float alpha = 0.1f;
	float beta = 0.95f;

	float sum_squared_errors = 0.0f;

	Layer hidden(num_hidden, num_inputs);
	hidden.InitSampleRandomValues_1();

	Layer output(num_outputs, num_hidden);
	output.InitSampleRandomValues_2();

	timer.Start();

	//	Sleep(2000);

	float last_sum_squared_errors = 0.0f;
	int positive_error_delta_count = 0;
	int negative_error_delta_count = 0;
	int alternation_count = 0;

	for (int p = 0; p < num_iterations; p++)
	{
		sum_squared_errors = 0.0f;
		for (int q = 0; q < 4; q++)
		{
			input_matrix(0, 0) = training[q].v[0];
			input_matrix(0, 1) = training[q].v[1];
			//input_matrix(0, 2) = -1.0f; // bias is always -1

			

			hidden.FeedForward(input_matrix);

			output.FeedForward(hidden.nuerons_);



#ifdef VERBOSE
			//if (p % 250 == 0)
			{

				hidden.PrintNeurons();
				output.PrintNeurons();
			}
#endif
			output_error = desired_output[q] - output.nuerons_(0, 0);


			sum_squared_errors += output_error* output_error;


			// back propogate
			matrix unit(1, 1);
			unit(0, 0) = 1.0f;
			
			matrix output_error_matrix(1, 1);
			output_error_matrix(0, 0) = output_error;


			output.BackPropogate(unit, output_error_matrix);
			
			hidden.BackPropogate(output.weights_, output.deltas_);




			

#ifdef VERBOSE
			//if (p % 250 == 0)
			{
				cout << "Deltas" << endl;
				

				hidden.PrintDeltas();
				output.PrintDeltas();
			}
#endif
			// weight deltas

			output.ComputeWeightDeltas(hidden.nuerons_, alpha, beta);

			hidden.ComputeWeightDeltas(input_matrix, alpha, beta);



#ifdef VERBOSE
			//if (p % 250 == 0)
			{
				cout << "Weight Deltas" << endl;
               

				hidden.PrintDeltaWeights();
				output.PrintDeltaWeights();
			}
#endif

			output.UpdateWeights();

			hidden.UpdateWeights();

#ifdef VERBOSE
			//if (p % 250 == 0)
			{

				hidden.PrintWeights();
				output.PrintWeights();
			}
#endif
		}
		if (sum_squared_errors > last_sum_squared_errors*1.04) alpha *= 0.7;
		if (sum_squared_errors < last_sum_squared_errors) alpha *= 1.05;
		// calculate the change in sum_squared_errors
		float delta_sum_square_errors = sum_squared_errors - last_sum_squared_errors;
		last_sum_squared_errors = sum_squared_errors;
		if (delta_sum_square_errors > 0.0f)
		{
			if (positive_error_delta_count == 0) {
				alternation_count++;
			}
			else{
				alternation_count = 0;
			}
			positive_error_delta_count++;
			negative_error_delta_count = 0;
		}
		else
		{
			if (negative_error_delta_count == 0) {
				alternation_count++;
			}
			else{
				alternation_count = 0;
			}
			negative_error_delta_count++;
			positive_error_delta_count = 0;
		}

		// determine change in learning rate
		if (positive_error_delta_count >= 2 || negative_error_delta_count >= 2)
		{
			alpha += 0.1;
			if (alpha > 1.0f) alpha = 1.0f;
		}
		else if (alternation_count >= 2)
		{
			alpha -= 0.1;
			if (alpha < 0.0f) alpha = 0.01;
		}

		//cout << sum_squared_errors << endl;
		if (sum_squared_errors < 0.001)
		{
			timer.Update();
			timer.Stop();
			cout << "Finished on iteration: " << p << ", with sum squared errors less than 0.001" << endl << "Total calculation performed in " << timer.GetTimeDelta() << " seconds" << endl;
			break;
		}
	}

}
Beispiel #10
0
    /**
     * \brief Overridable default destructor
     */
    virtual ~LinearStateTransitionModel() { }

    virtual State expected_state(const State& state,
                                 const Input& input) const
    {
        return dynamics_matrix_ * state + input_matrix_ * input;
    }

    virtual Real log_probability(const State& state,
                                          const State& cond_state,
                                          const Input& cond_input) const
    {
        density_.mean(expected_state(cond_state, cond_input));

        return density_.log_probability(state);
    }

public: /* factory functions */
    virtual InputMatrix create_input_matrix() const
    {
        auto B = input_matrix();
        B.setIdentity();
        return B;
    }
Beispiel #11
0
int main(int argc, char *argv[]) {
  CommandLine cl;
  MathRandom<MathMersenneTwister> rng;
  Error error;
  ya_check_debug();

  YA_MatD input_matrix;
  YA_MatD output_matrix;

  // Parse the command line
  HandleArgs(cl,argc,argv,&error);

  string outputfile="";
  if (cl.argsize(' ')>0) {
    load(cl.argstring(' ',0),input_matrix);
    if (cl.argsize(' ')>1)
      outputfile=cl.argstring(' ',1);
  } else
    read(cin,input_matrix);

  // Select rows
  if (cl['r']) {
    output_matrix=input_matrix(YA_RowI(cl.argstring('r',0)),":");
    input_matrix=output_matrix;
  }
  
  // Select cols
  if (cl['c']) {
    output_matrix=input_matrix(":",YA_RowI(cl.argstring('c',0)));
    input_matrix=output_matrix;
  }

  // Reorder rows using modulus
  else if (cl['z']) {
    ya_sizet mod=cl.argint('z',0);
    if (mod==0)
      error.generate_error(0,"vm_slice","Cannot specify a mod_num of 0.");
    if (input_matrix.rows()%mod!=0) {
      error.buffer() << "When using -z, the number of rows in the matrix "
                     << "must be evenly divisible by the mod_num.";
      error.addbuf(0,"vm_slice");
    }
    YA_VecI row_order(input_matrix.rows());
    ya_sizet offset=input_matrix.rows()/mod;
    for (ya_sizet i=0; i<input_matrix.rows(); i++) {
      div_t index=div(int(i),int(mod));
      row_order(i)=index.quot+index.rem*offset;
    }
    output_matrix=input_matrix(row_order,":");
  } else    
    output_matrix=input_matrix;

  ya_sizet file_format=YA_DEFAULT_IO;
  if (cl['t'])
    file_format=YA_PRETTY_IO;
  if (cl['b'])
    file_format=YA_BINARY_IO;

  // Random subset
  if (cl['s']) {
    double percent=cl.argdouble('s',0);
    if (percent>1)
      error.generate_error(0,"mat_convert",
        "Random percentage must be between 0 and 1");
    YA_RowI rand_perm(randperm(output_matrix.rows(),rng));
    output_matrix=copy(output_matrix(rand_perm,":"));
    ya_sizet cut_frac=ya_sizet(percent*output_matrix.rows());
    if (cl.argstring('s',1)!="NO_OUTPUT")
      save(cl.argstring('s',1),
           output_matrix(vmcount(cut_frac,":",output_matrix.rows()-1),":"),
           file_format);
    output_matrix=copy(output_matrix(vmcount(cut_frac),":"));
  }

  if (cl['q'])
    ip_transpose(output_matrix);

  if (outputfile=="")
    write(cout,output_matrix,file_format);
  else
    save(outputfile,output_matrix,file_format);
  return 0;
}
Beispiel #12
0
int main(void)
{
    int retval = 0;
    int choice = 0;
    short *prandom_data;
#ifdef PATCHED_1
    char m_result_data[MAX_ROWS * MAX_COLS * sizeof(int)];
#else
    char m_result_data[((MAX_ROWS * MAX_COLS) - 1) * sizeof(int)];
#endif

    prandom_data = create_random_shorts();
    matrix_t *m;
    matrix_t *m1, *m2;
    matrix_t *m_result;
    m1 = create_matrix(SHORT, NULL);
    m2 = create_matrix(SHORT, NULL);
    m_result = create_matrix(INT, m_result_data);

    char *input = malloc(2048);
    printf("Matrix math is fun!\n");
    printf("-------------------\n");
    while (1)
    {
        choice = select_menu_choice(input, LINE_SIZE);
        switch(choice)
        {
        case 1:
            printf("Inputting Matrix Values:\n");
            m = choose_matrix(m1, m2, input, LINE_SIZE);
            if (!m)
                goto cgc_exit;
            if (input_matrix(m, input, LINE_SIZE) == ERROR)
                goto cgc_exit;
            break;
        case 2:
            printf("Print Matrices:\n");
            print_matrices(m1, m2, m_result);
            break;
        case 3:
            printf("Adding Matrices:\n");
            add_matrices(m1, m2, m_result);
            break;
        case 4:
            printf("Subtracting Matrices:\n");
            subtract_matrices(m1, m2, m_result);
            break;
        case 5:
            printf("Multiplying Matrices:\n");
            multiply_matrices(m1, m2, m_result);
            break;
        case 6:
            printf("Swap Rows in a  Matrix:\n");
            m = choose_matrix(m1, m2, input, LINE_SIZE);
            if (!m)
                goto cgc_exit;
            retval = swap_matrix_row_col(m, SWAP_ROW, input, LINE_SIZE);
            if (retval == ERROR)
                goto cgc_exit;
            if (retval == SUCCESS)
                print_matrix("Swapped Rows", m);
            break;
        case 7:
            printf("Swap Columns in a  Matrix:\n");
            m = choose_matrix(m1, m2, input, LINE_SIZE);
            if (!m)
                goto cgc_exit;
            retval = swap_matrix_row_col(m, SWAP_COL, input, LINE_SIZE);
            if (retval == ERROR)
                goto cgc_exit;
            if (retval == SUCCESS)
                print_matrix("Swapped Columns", m);
            break;
        case 8:
            printf("Transpose a Matrix:\n");
            m = choose_matrix(m1, m2, input, LINE_SIZE);
            if (!m)
                goto cgc_exit;
            transpose_matrix(m);
            break;
        case 9:
            printf("Perform Reduced Row Echelon Form on Matrix\n");
            m = choose_matrix(m1, m2, input, LINE_SIZE);
            if (!m)
                goto cgc_exit;
            rref_matrix(m, m_result);
            break;
        case 10:
            printf("Create a Random Matrix:\n");
            m = choose_matrix(m1, m2, input, LINE_SIZE);
            if (!m)
                goto cgc_exit;
            if (random_matrix(m, input, LINE_SIZE, prandom_data) == ERROR)
                goto cgc_exit;
            break;
        case 11:
            goto cgc_exit;
        default:
            printf("Bad Selection\n");
        }
    }

cgc_exit:
    printf("Exiting...\n");
    return 0;
}