Example #1
0
int main(int argc, char *argv[]) {
  
  if (argc != 2) {
    printf("%s <matrix size>\n", argv[0]);
    exit(1);
  }

  uint32_t n = atoi(argv[1]);
  srand(time(0));


  BoolMatrix m;
  INIT_MATRIX(m, n ,n);
  MATRIX_SET_MAIN_DIAGONAL_PART(m, n);

  uint32_t i, j;
  for (i = 0; i < n/2; i++) {
    uint32_t x, y;
    x = rand()%n;
    y = rand()%n;
    MATRIX_SET_BOTH(m, x, y);
  }

  printf("  |");
  for (i = 0; i < n; i++)
    printf("%2d", i);

  printf("\n--+");
  for (i = 0; i < n; i++)
    printf("--");

  for (i = 0; i < n; i++) {
    printf("\n%2d|", i);
    for (j = 0; j < n; j++)
      printf("%2d", MATRIX_AT(m, j, i));
  }
  printf("\n\n");

  MATRIX_CALC_PATHS_WITHOUT(m, n);

  printf("  |");
  for (i = 0; i < n; i++)
    printf("%2d", i);

  printf("\n--+");
  for (i = 0; i < n; i++)
    printf("--");

  for (i = 0; i < n; i++) {
    printf("\n%2d|", i);
    for (j = 0; j < n; j++)
      printf("%2d", MATRIX_AT(m, j, i));
  }
  printf("\n\n");

  return 0;
}
Example #2
0
File: SRNN.cpp Project: MrAiki/SRNN
/* Predict : predicting next sequence of input */
void SRNN::predict(float* input)
{
  float *norm_input = new float[this->dim_signal];

  // normalize signal
  for (int n=0; n < dim_signal; n++) {
    norm_input[n] = 
      normalize_signal(input[n],
          MATRIX_AT(this->sample_maxmin,2,n,0),
          MATRIX_AT(this->sample_maxmin,2,n,1));
  }

  // output signal
  float* out_signal = new float[dim_signal];
  // value of network in input->hidden layer 
  float* in_mid_net = new float[num_mid_neuron];
  // value of network in hidden->output layer 
  float* mid_out_net = new float[dim_signal];

  /* Calcurate output signal */
  // Get input signal 
  memcpy(expand_in_signal, norm_input, sizeof(float) * dim_signal);
  // Signal of input layer : 中間層との線形和をシグモイド関数に通す.
  for (int d = 0; d < num_mid_neuron; d++) {
    expand_in_signal[dim_signal + d] = sigmoid_func(alpha_context * expand_in_signal[dim_signal + d] + expand_mid_signal[d]);
  }
  // Bias fixed at 1.
  expand_in_signal[dim_signal + num_mid_neuron] = 1;

  // 入力->中間層の出力信号和計算
  multiply_mat_vec(Win_mid, expand_in_signal, in_mid_net, num_mid_neuron, dim_signal + num_mid_neuron + 1);
  // 中間層の出力信号計算
  sigmoid_vec(in_mid_net, expand_mid_signal, num_mid_neuron);
  expand_mid_signal[num_mid_neuron] = 1;

  // 中間->出力層の出力信号和計算
  multiply_mat_vec(Wmid_out, expand_mid_signal, mid_out_net, dim_signal, num_mid_neuron + 1);
  // 出力層の出力信号計算
  sigmoid_vec(mid_out_net, out_signal, dim_signal);

  // expand output signal to origin width.
  for (int n=0;n < dim_signal;n++) {
    predict_signal[n] = expand_signal(out_signal[n],sample_maxmin[n * 2],sample_maxmin[n * 2 + 1]);
  }
  
  delete [] norm_input; delete [] out_signal;
  delete [] in_mid_net; delete [] mid_out_net;

}
Example #3
0
File: SVM.cpp Project: MrAiki/SVM
// 未知データの識別確率を計算
float SVM::predict_probability(float* data)
{
    float net, probability;
    float* optimal_w = new float[dim_signal];   // 最適時の係数(not 双対係数)
    float sigmoid_param;                        // シグモイド関数の温度パラメタ
    float norm_w;                               // 係数の2乗ノルム
    
    net = SVM::predict_net(data);
    
    // 最適時の係数を計算
    for (int n = 0; n < dim_signal; n++ ) {
        optimal_w[n] = 0;
        for (int l = 0; l < n_sample; l++ ) {
            optimal_w[n] += alpha[l] * label[l] * MATRIX_AT(sample, dim_signal, l, n);
        }
    }
    norm_w = two_norm(optimal_w, dim_signal);
    sigmoid_param = 1 / ( norm_w * logf( (1 - epsilon) / epsilon ) );
    
    probability = sigmoid_func(net/sigmoid_param);
    
    delete [] optimal_w;
    
    // 打ち切り:誤差epsilon以内ならば, 1 or 0に打ち切る.
    if ( probability > (1 - epsilon) ) {
        return float(1);
    } else if ( probability < epsilon ) {
        return float(0);
    }
    
    return probability;
    
}
Example #4
0
File: SVM.cpp Project: MrAiki/SVM
// 未知データのネットワーク値を計算
float SVM::predict_net(float* data)
{
  // 学習の終了を確認
  if (status != SVM_LEARN_SUCCESS && status != SVM_SET_ALPHA) {
    fprintf(stderr, "Learning is not completed yet.");
    //exit(1);
    return SVM_NOT_LEARN;
  }

  float* norm_data = new float[dim_signal];

  // 信号の正規化
  for (int i = 0; i < dim_signal; i++) {
    norm_data[i] = ( data[i] - sample_min[i] ) / ( sample_max[i] - sample_min[i] );
  }

  // ネットワーク値の計算
  float net = 0;
  for (int l=0; l < n_sample; l++) {
    // **係数が正に相当するサンプルはサポートベクトル**
    if(alpha[l] > 0) {
      net += label[l] * alpha[l]
              * kernel_function(&(MATRIX_AT(sample,dim_signal,l,0)), norm_data, dim_signal);
    }
  }

  return net;

}
Example #5
0
/**
	this method sets the current matrix to a 3x3 matrix that is equal to the outer product of the vectors a and b
*/
void Matrix::setToOuterproduct(const Vector3d& a, const Vector3d& b){
	resizeTo(3,3);
	MATRIX_AT(this->matrix, 0, 0) = a.x * b.x;
	MATRIX_AT(this->matrix, 0, 1) = a.x * b.y;
	MATRIX_AT(this->matrix, 0, 2) = a.x * b.z;

	MATRIX_AT(this->matrix, 1, 0) = a.y * b.x;
	MATRIX_AT(this->matrix, 1, 1) = a.y * b.y;
	MATRIX_AT(this->matrix, 1, 2) = a.y * b.z;

	MATRIX_AT(this->matrix, 2, 0) = a.z * b.x;
	MATRIX_AT(this->matrix, 2, 1) = a.z * b.y;
	MATRIX_AT(this->matrix, 2, 2) = a.z * b.z;
}
Example #6
0
/**
	Implement a potentially faster function for 3x3 matrix - vector3d multiplication
*/
void Matrix::postMultiplyVector(const Vector3d& v, Vector3d& result){
	if (this->matrix->size1 != 3 || this->matrix->size2 != 3)
		throwError("Can only use the * operator between a 3x3 matrix and a vector3d object!");
	result.x = MATRIX_AT(this->matrix, 0, 0) * v.x + MATRIX_AT(this->matrix, 0, 1) * v.y + MATRIX_AT(this->matrix, 0, 2) * v.z;
	result.y = MATRIX_AT(this->matrix, 1, 0) * v.x + MATRIX_AT(this->matrix, 1, 1) * v.y + MATRIX_AT(this->matrix, 1, 2) * v.z;
	result.z = MATRIX_AT(this->matrix, 2, 0) * v.x + MATRIX_AT(this->matrix, 2, 1) * v.y + MATRIX_AT(this->matrix, 2, 2) * v.z;
}
Example #7
0
/**
	Implement this operator to have a quick way of multiplying 3x3 matrices by vectors - used for dynamics for instance
*/
Vector3d Matrix::operator * (const Vector3d &other){
	Vector3d result;
	if (this->matrix->size1 != 3 || this->matrix->size2 != 3)
		throwError("Can only use the * operator between a 3x3 matrix and a vector3d object!");
	result.x = MATRIX_AT(this->matrix, 0, 0) * other.x + MATRIX_AT(this->matrix, 0, 1) * other.y + MATRIX_AT(this->matrix, 0, 2) * other.z;
	result.y = MATRIX_AT(this->matrix, 1, 0) * other.x + MATRIX_AT(this->matrix, 1, 1) * other.y + MATRIX_AT(this->matrix, 1, 2) * other.z;
	result.z = MATRIX_AT(this->matrix, 2, 0) * other.x + MATRIX_AT(this->matrix, 2, 1) * other.y + MATRIX_AT(this->matrix, 2, 2) * other.z;

	return result;
}
Example #8
0
File: SVM.cpp Project: MrAiki/SVM
// 再急勾配法(サーセンwww)による学習
int SVM::learning(void)
{

  int iteration;              // 学習繰り返しカウント

  float* diff_alpha;          // 双対問題の勾配値
  float* pre_diff_alpha;      // 双対問題の前回の勾配値(慣性項に用いる)
  float* pre_alpha;           // 前回の双対係数ベクトル(収束判定に用いる)
  register float diff_sum;    // 勾配計算用の小計
  register float kernel_val;  // カーネル関数とC2を含めた項

  //float plus_sum, minus_sum;  // 正例と負例の係数和

  // 配列(ベクトル)のアロケート
  diff_alpha     = new float[n_sample];
  pre_diff_alpha = new float[n_sample];
  pre_alpha      = new float[n_sample];

  status = SVM_NOT_LEARN;       // 学習を未完了に
  iteration  = 0;       // 繰り返し回数を初期化

  // 双対係数の初期化.乱択
  for (int i = 0; i < n_sample; i++ ) {
    // 欠損データの係数は0にして使用しない
    if ( label[i] == 0 ) {
      alpha[i] = 0;
      continue;
    }
    alpha[i] = uniform_rand(1.0) + 1.0;
  }

  // 学習ループ
  while ( iteration < maxIteration ) {

    printf("ite: %d diff_norm : %f alpha_dist : %f \r\n", iteration, two_norm(diff_alpha, n_sample), vec_dist(alpha, pre_alpha, n_sample));
    // 前回の更新値の記録
    memcpy(pre_alpha, alpha, sizeof(float) * n_sample);
    if ( iteration >= 1 ) {
      memcpy(pre_diff_alpha, diff_alpha, sizeof(float) * n_sample);
    } else {
      // 初回は0埋めで初期化
      memset(diff_alpha, 0, sizeof(float) * n_sample);
      memset(pre_diff_alpha, 0, sizeof(float) * n_sample);
    }

    // 勾配値の計算
    for (int i=0; i < n_sample; i++) {
      diff_sum = 0;
      for (int j=0; j < n_sample;j++) {
        // C2を踏まえたカーネル関数値
        kernel_val = kernel_function(&(MATRIX_AT(sample,dim_signal,i,0)), &(MATRIX_AT(sample,dim_signal,j,0)), dim_signal);
        // kernel_val = MATRIX_AT(grammat,n_sample,i,j); // via Gram matrix
        if (i == j) { 
          kernel_val += (1/C2);
        }
        diff_sum += alpha[j] * label[j] * kernel_val; 
      }
      diff_sum *= label[i];
      diff_alpha[i] = 1 - diff_sum;
    }

    // 双対変数の更新
    for (int i=0; i < n_sample; i++) {
      if ( label[i] == 0 ) {
        continue;
      }
      //printf("alpha[%d] : %f -> ", i, alpha[i]);
      alpha[i] = pre_alpha[i] 
                  + eta * diff_alpha[i]
                  + learn_alpha * pre_diff_alpha[i];
      //printf("%f \dim_signal", alpha[i]);

      // 非数/無限チェック
      if ( isnan(alpha[i]) || isinf(alpha[i]) ) {
        fprintf(stderr, "Detected NaN or Inf Dual-Coffience : pre_alhpa[%d]=%f -> alpha[%d]=%f", i, pre_alpha[i], i, alpha[i]);
        return SVM_DETECT_BAD_VAL;
      }

    }

    // 係数の制約条件1:正例と負例の双対係数和を等しくする.
    //                 手法:標本平均に寄せる
    float norm_sum = 0;
    for (int i = 0; i < n_sample; i++ ) {
      norm_sum += (label[i] * alpha[i]);
    }
    norm_sum /= n_sample;

    for (int i = 0; i < n_sample; i++ ) {
      if ( label[i] == 0 ) {
        continue;
      }
      alpha[i] -= (norm_sum / label[i]);
    }

    // 係数の制約条件2:双対係数は非負
    for (int i = 0; i < n_sample; i++ ) {
      if ( alpha[i] < 0 ) {
        alpha[i] = 0;
      } else if ( alpha[i] > C1 ) {
        // C1を踏まえると,係数の上限はC1となる.
        alpha[i] = C1;
      }  
    }

    // 収束判定 : 凸計画問題なので,収束時は大域最適が
    //            保証されている.
    if ( (vec_dist(alpha, pre_alpha, n_sample) < epsilon)
        || (two_norm(diff_alpha, n_sample) < epsilon) ) {
      // 学習の正常完了
      status = SVM_LEARN_SUCCESS;
      break;
    }

    // 学習繰り返し回数のインクリメント
    iteration++;
  }

  if (iteration >= maxIteration) {
    fprintf(stderr, "Learning is not convergenced. (iteration count > maxIteration) \r\n");
    status = SVM_NOT_CONVERGENCED;
  } else if ( status != SVM_LEARN_SUCCESS ) {
    status = SVM_NOT_LEARN;
  }
  
  // 領域開放
  delete [] diff_alpha;
  delete [] pre_diff_alpha;
  delete [] pre_alpha;
  
  return status;

}
Example #9
0
File: SVM.cpp Project: MrAiki/SVM
SVM::SVM(int dim_sample, int n_sample, float* sample_data, int* sample_label)
{
  this->dim_signal = dim_sample;
  this->n_sample = n_sample;

  // 各配列(ベクトル)のアロケート
  alpha   = new float[n_sample];
  // grammat = new float[n_sample * n_sample];
  label   = new int[n_sample];
  sample  = new float[dim_signal * n_sample];
  sample_max = new float[dim_signal];
  sample_min = new float[dim_signal];

  // サンプルのコピー
  memcpy(this->sample, sample_data,
          sizeof(float) * dim_signal * n_sample);
  memcpy(this->label, sample_label,
          sizeof(int) * n_sample);

  // 正規化のための最大最小値
  memset(sample_max, 0, sizeof(float) * dim_sample);
  memset(sample_min, 0, sizeof(float) * dim_sample);
  for (int i = 0; i < dim_signal; i++) {
    float value;
    sample_min[i] = FLT_MAX;
    for (int j = 0; j < n_sample; j++) {
      value = MATRIX_AT(this->sample, dim_signal, j, i);
      if ( value > sample_max[i] ) {
        sample_max[i] = value;
      } else if ( value < sample_min[i] ) {
        //printf("min[%d] : %f -> ", i, sample_min[i]);
        sample_min[i] = value;
        //printf("min[%d] : %f \dim_signal", i, value);
      }
    }
  }

  // 信号の正規化 : 死ぬほど大事
  for (int i = 0; i < dim_signal; i++) {
    float max,min;
    max = sample_max[i];
    min = sample_min[i];
    for (int j = 0; j < n_sample; j++) {
      //printf("[%d,%d] %f -> ", i, j, MATRIX_AT(this->sample, dim_signal, j, i));
      MATRIX_AT(this->sample, dim_signal, j, i) = ( MATRIX_AT(this->sample, dim_signal, j, i) - min ) / (max - min);
      //printf("%f\dim_signal", MATRIX_AT(this->sample, dim_signal, j, i));
    }
  }

  /* // グラム行列の計算 : メモリの制約上,廃止
  for (int i = 0; i < n_sample; i++) {
    for (int j = i; j < n_sample; j++) {
      MATRIX_AT(grammat,n_sample,i,j) = kernel_function(&(MATRIX_AT(this->sample,dim_signal,i,0)), &(MATRIX_AT(this->sample,dim_signal,j,0)), dim_signal);
      // グラム行列は対称
      if ( i != j ) {
        MATRIX_AT(grammat,n_sample,j,i) = MATRIX_AT(grammat,n_sample,i,j);
      }
    }
  }
  */

  // 学習関連の設定. 例によって経験則
  this->maxIteration = 5000;
  this->epsilon      = float(0.00001);
  this->eta          = float(0.05);
  this->learn_alpha  = float(0.8) * this->eta;
  this->status       = SVM_NOT_LEARN;

  // ソフトマージンの係数. 両方ともFLT_MAXとすることでハードマージンと(ほぼ)一致.
  // また, 設定するときはどちらか一方のみにすること.
  C1 = FLT_MAX;
  C2 = 5;

  srand((unsigned int)time(NULL));

}
Example #10
0
/**
	This method	computes the inverse of	the	matrix a and writes	it over	the	current	matrix.
*/
void Matrix::setToInverseOf(const Matrix &a, double t){
		if (a.matrix->size1 != a.matrix->size2)
			throwError("Cannot invert a matrix that is not square");
	
		int DIM = (int)a.matrix->size1;
		this->resizeTo(DIM, DIM);

//we'll write some messy code and try to get efficient inverses by means of determinants for 1x1, 2x2 and 3x3 matrices. We'll also safeguard 
//against very small determinants if desired...
		if (DIM == 1){
			double a00 = MATRIX_AT(a.matrix, 0, 0);
			if (fabs(a00)<t)
				a00 = t * fabs(a00)/a00;
			MATRIX_AT(this->matrix, 0, 0) = 1/a00;
			return;
		}

		if (DIM == 2){
			double a11 = MATRIX_AT(a.matrix, 0, 0);
			double a12 = MATRIX_AT(a.matrix, 0, 1);
			double a21 = MATRIX_AT(a.matrix, 1, 0);
			double a22 = MATRIX_AT(a.matrix, 1, 1);
			
			double det = a11*a22-a12*a21;
			if (fabs(det)<t)
				det = t * fabs(det)/det;

			MATRIX_AT(this->matrix, 0, 0) = a22 / det;
			MATRIX_AT(this->matrix, 0, 1) = -a12 / det;
			MATRIX_AT(this->matrix, 1, 0) = -a21 / det;
			MATRIX_AT(this->matrix, 1, 1) = a11 / det;
			return;
		}

		if (DIM == 3){
			double a11 = MATRIX_AT(a.matrix, 0, 0);
			double a12 = MATRIX_AT(a.matrix, 0, 1);
			double a13 = MATRIX_AT(a.matrix, 0, 2);

			double a21 = MATRIX_AT(a.matrix, 1, 0);
			double a22 = MATRIX_AT(a.matrix, 1, 1);
			double a23 = MATRIX_AT(a.matrix, 1, 2);

			double a31 = MATRIX_AT(a.matrix, 2, 0);
			double a32 = MATRIX_AT(a.matrix, 2, 1);
			double a33 = MATRIX_AT(a.matrix, 2, 2);
			
			double det = a11*(a33*a22-a32*a23)-a21*(a33*a12-a32*a13)+a31*(a23*a12-a22*a13);

			if (fabs(det)<t)
				det = t * fabs(det)/det;

			MATRIX_AT(this->matrix, 0, 0) = (a33*a22-a32*a23)/det;
			MATRIX_AT(this->matrix, 0, 1) = -(a33*a12-a32*a13)/det;
			MATRIX_AT(this->matrix, 0, 2) = (a23*a12-a22*a13)/det;

			MATRIX_AT(this->matrix, 1, 0) = -(a33*a21-a31*a23)/det;
			MATRIX_AT(this->matrix, 1, 1) = (a33*a11-a31*a13)/det;
			MATRIX_AT(this->matrix, 1, 2) = -(a23*a11-a21*a13)/det;


			MATRIX_AT(this->matrix, 2, 0) = (a32*a21-a31*a22)/det;
			MATRIX_AT(this->matrix, 2, 1) = -(a32*a11-a31*a12)/det;
			MATRIX_AT(this->matrix, 2, 2) = (a22*a11-a21*a12)/det;

			return;
		}

//ok, it's already messy, so if the dimmensions are even bigger than 3, we'll do it by row reduction


		double val, val2;
		int i, j, k, ind;
		
		//make a copy of the current matrix
		Matrix tmp = a;

		this->loadIdentity();
    
		for (i = 0; i != DIM; i++) {
			
			val = MATRIX_AT(tmp.matrix, i, i);			/* find pivot */
			ind = i;
			for (j = i + 1; j != DIM; j++) {
				if (fabs(MATRIX_AT(tmp.matrix, j, i)) > fabs(val)) {
					ind = j;
					val = MATRIX_AT(tmp.matrix, j, i);
				}
			}
            
			if (ind != i) {			
				for (j = 0; j != DIM; j++) {
					val2 = MATRIX_AT(this->matrix, i, j);
					MATRIX_AT(this->matrix,i,j) = MATRIX_AT(this->matrix, ind, j);
					MATRIX_AT(this->matrix,ind,j) = val2;           /* swap columns */
					val2 = MATRIX_AT(tmp.matrix, i, j);
					MATRIX_AT(tmp.matrix,i,j) = MATRIX_AT(tmp.matrix, ind, j);
					MATRIX_AT(tmp.matrix, ind,j) = val2;
				}
			}

			//safeguard against zero's if need be...
			if (fabs(val)<t)
				val = t * fabs(val)/val;

			if (IS_ZERO(val))
				throwError("Matrix is singular.");
            
			for (j = 0; j != DIM; j++) {
				MATRIX_AT(tmp.matrix, i, j) /= val;
				MATRIX_AT(this->matrix, i, j) /= val;
			}
        
			for (j = 0; j != DIM; j++) {		
				if (j == i)
					continue;                       /* eliminate column */
				val = MATRIX_AT(tmp.matrix, j, i);
				for (k = 0; k != DIM; k++) {
					MATRIX_AT(tmp.matrix, j,k) = MATRIX_AT(tmp.matrix, j, k) - MATRIX_AT(tmp.matrix, i, k)  * val;
					MATRIX_AT(this->matrix, j,k) = MATRIX_AT(this->matrix, j, k) - MATRIX_AT(this->matrix, i, k)  * val;
				}
			}
		}

		//and done
}
Example #11
0
File: SRNN.cpp Project: MrAiki/SRNN
/* 逆誤差伝搬法による学習 局所解?なんのこったよ(すっとぼけ)*/
float SRNN::learning(void)
{
  int iteration = 0; // 学習繰り返し回数
  int seq = 0;       // 現在学習中の系列番号[0,...,len_seqence-1]
  int end_flag = 0;  // 学習終了フラグ.このフラグが成立したら今回のsequenceを最後まで回して終了する.
  // 係数行列のサイズ
  int row_in_mid = num_mid_neuron;
  int col_in_mid = dim_signal + num_mid_neuron + 1;
  int row_mid_out = dim_signal;
  int col_mid_out = num_mid_neuron + 1;

  // 行列のアロケート
  // 係数行列の更新量
  float* dWin_mid  = new float[row_in_mid * col_in_mid];
  float* dWmid_out = new float[row_mid_out * col_mid_out];
  // 前回の更新量:慣性項に用いる.
  float* prevdWin_mid  = new float[row_in_mid * col_in_mid];
  float* prevdWmid_out = new float[row_mid_out * col_mid_out];
  float* norm_sample   = new float[len_seqence * dim_signal]; // 正規化したサンプル信号; 実際の学習は正規化した信号を用います.

  // 係数行列の初期化
  for (int i=0; i < row_in_mid; i++)
    for (int j=0; j < col_in_mid; j++)
      MATRIX_AT(Win_mid,col_in_mid,i,j) = uniform_rand(width_initW);

  for (int i=0; i < row_mid_out; i++)
    for (int j=0; j < col_mid_out; j++)
      MATRIX_AT(Wmid_out,col_mid_out,i,j) = uniform_rand(width_initW);

  // 信号の正規化:経験上,非常に大切な処理
  for (int seq=0; seq < len_seqence; seq++) {
    for (int n=0; n < dim_signal; n++) {
      MATRIX_AT(norm_sample,dim_signal,seq,n) = 
            normalize_signal(MATRIX_AT(this->sample,dim_signal,seq,n),
                             MATRIX_AT(this->sample_maxmin,2,n,0),
                             MATRIX_AT(this->sample_maxmin,2,n,1));
      // printf("%f ", MATRIX_AT(norm_sample,dim_signal,seq,n));
    }
    // printf("\r\n");
  }

  // 出力層の信号
  float* out_signal = new float[dim_signal];

  // 入力層->中間層の信号和
  float* in_mid_net = new float[num_mid_neuron];
  // 中間層->出力層の信号和.
  float* mid_out_net = new float[dim_signal];

  // 誤差信号
  float* sigma = new float[dim_signal];

  // 前回の二乗誤差値:収束判定に用いる.
  float prevError;

  /* 学習ループ */
  while (1) {

    // 終了条件を満たすか確認
    if (!end_flag) {
      end_flag = !(iteration < this->maxIteration 
                   && (iteration <= this->len_seqence 
                       || this->squareError > this->goalError)
                  );
    }

    // printf("ite:%d err:%f \r\n", iteration, squareError);

    // 系列の末尾に到達していたら,最初からリセットする.
    if (seq == len_seqence && !end_flag) {
      seq = 0;
    }

    // 前回の更新量/二乗誤差を保存
    if (iteration >= 1) {
      memcpy(prevdWin_mid, dWin_mid, sizeof(float) * row_in_mid * col_in_mid);
      memcpy(prevdWmid_out, dWmid_out, sizeof(float) * row_mid_out * col_mid_out);
      prevError = squareError;
    } else {
      // 初回は0埋め
      memset(prevdWin_mid, float(0), sizeof(float) * row_in_mid * col_in_mid);
      memset(prevdWmid_out, float(0), sizeof(float) * row_mid_out * col_mid_out);
    }
    
    /* 学習ステップその1:ニューラルネットの出力信号を求める */

    // 入力値を取得
    memcpy(expand_in_signal, &(norm_sample[seq * dim_signal]), sizeof(float) * dim_signal);
    // SRNN特有:入力層に中間層のコピーが追加され,中間層に入力される.
    if (iteration == 0) {
      // 初回は0埋めする
      memset(&(expand_in_signal[dim_signal]), float(0), sizeof(float) * num_mid_neuron);
    } else {
      // コンテキスト層 = 前回のコンテキスト層の出力
      // 前回の中間層信号との線形和をシグモイド関数に通す.
      for (int d = 0; d < num_mid_neuron; d++) {
        expand_in_signal[dim_signal + d] = sigmoid_func(alpha_context * expand_in_signal[dim_signal + d] + expand_mid_signal[d]);
      }
    }
    // バイアス項は常に1に固定.
    expand_in_signal[dim_signal + num_mid_neuron] = 1;

    // 入力->中間層の出力信号和計算
    multiply_mat_vec(Win_mid,
                     expand_in_signal,
                     in_mid_net,
                     num_mid_neuron,
                     dim_signal + num_mid_neuron + 1);
    // 中間層の出力信号計算
    sigmoid_vec(in_mid_net,
                expand_mid_signal,
                num_mid_neuron);
    expand_mid_signal[num_mid_neuron] = 1;
    // 中間->出力層の出力信号和計算
    multiply_mat_vec(Wmid_out,
                     expand_mid_signal,
                     mid_out_net,
                     dim_signal,
                     num_mid_neuron + 1);
    // 出力層の出力信号計算
    sigmoid_vec(mid_out_net,
                out_signal,
                dim_signal);

    
    for (int i = 0; i < dim_signal; i++) {
      predict_signal[i] = expand_signal(out_signal[i],
                                        MATRIX_AT(sample_maxmin,2,i,0),
                                        MATRIX_AT(sample_maxmin,2,i,1));
    }
    printf("predict : %f %f %f \r\n", predict_signal[0], predict_signal[1], predict_signal[2]);
    
    // print_mat(Wmid_out, row_mid_out, col_mid_out);
    
    // この時点での二乗誤差計算
    squareError = 0;
    // 次の系列との誤差を見ている!! ここが注目ポイント
    // ==> つまり,次系列を予測させようとしている.
    for (int n = 0;n < dim_signal;n++) {
      if (seq < len_seqence - 1) {
        squareError += powf((out_signal[n] - MATRIX_AT(norm_sample,dim_signal,(seq + 1),n)),2);
      } else {
        squareError += powf((out_signal[n] - MATRIX_AT(norm_sample,dim_signal,0,n)),2);
      }
    } 
    squareError /= dim_signal;

    /* 学習の終了 */
    // 終了フラグが立ち,かつ系列の最後に達していたら学習終了
    if (end_flag && (seq == (len_seqence-1))) {
      // 予測結果をセット.
      for (int i = 0; i < dim_signal; i++) {
        predict_signal[i] = expand_signal(out_signal[i],
                                          MATRIX_AT(sample_maxmin,2,i,0),
                                          MATRIX_AT(sample_maxmin,2,i,1));
        //printf("%f ", predict_signal[i]);
      }
      break;
    }

    // 収束したと判定したら終了フラグを立てる.
    if (fabsf(squareError - prevError) < epsilon) {
      end_flag = 1;
    }

    /* 学習ステップその2:逆誤差伝搬 */
    // 誤差信号の計算
    for (int n = 0; n < dim_signal; n++) {
      if (seq < len_seqence - 1) {
        sigma[n] = (out_signal[n] - MATRIX_AT(norm_sample,dim_signal,seq+1,n)) * out_signal[n] * (1 - out_signal[n]);
      } else {
        /* 末尾と先頭の誤差を取る (大抵,大きくなる) */
        sigma[n] = (out_signal[n] - MATRIX_AT(norm_sample, dim_signal,0,n)) * out_signal[n] * (1 - out_signal[n]);
      }
    }
    // printf("Sigma : %f %f %f \r\n", sigma[0], sigma[1], sigma[2]);

    // 出力->中間層の係数の変更量計算
    for (int n = 0; n < dim_signal; n++) {
      for (int j = 0; j < num_mid_neuron + 1; j++) {
        MATRIX_AT(dWmid_out,num_mid_neuron,n,j) = sigma[n] * expand_mid_signal[j];
      }
    }

    // 中間->入力層の係数の変更量計算
    register float sum_sigma;
    for (int i = 0; i < num_mid_neuron; i++) {
      // 誤差信号を逆向きに伝播させる.
      sum_sigma = 0;
      for (int k = 0; k < dim_signal; k++) {
        sum_sigma += sigma[k] * MATRIX_AT(Wmid_out,num_mid_neuron + 1,k,i);
      }
      // 中間->入力層の係数の変更量計算
      for (int j = 0; j < col_in_mid; j++) {
        MATRIX_AT(dWin_mid,num_mid_neuron,j,i)
                          = sum_sigma * expand_mid_signal[i] *
                            (1 - expand_mid_signal[i]) *
                            expand_in_signal[j];
      }
    }

    // 係数更新
    for (int i = 0; i < row_in_mid; i++) {
      for (int j = 0; j < col_in_mid; j++) {
        //printf("[%f -> ", MATRIX_AT(Win_mid,col_in_mid,i,j));
        MATRIX_AT(Win_mid,col_in_mid,i,j) = 
              MATRIX_AT(Win_mid,col_in_mid,i,j) - 
              this->learnRate * MATRIX_AT(dWin_mid,col_in_mid,i,j) -
              this->alpha * MATRIX_AT(prevdWin_mid,col_in_mid,i,j);
        // printf("%f] ", MATRIX_AT(Win_mid,col_in_mid,i,j));
        // printf("dW : %f , prevdW : %f ", MATRIX_AT(dWin_mid,col_in_mid,i,j), MATRIX_AT(prevdWin_mid,col_in_mid,i,j));
      }
      //printf("\r\n");
    }
    for (int i = 0; i < row_mid_out; i++) {
      for (int j = 0; j < col_mid_out; j++) {
        MATRIX_AT(Wmid_out,col_mid_out,i,j)= 
              MATRIX_AT(Wmid_out,col_mid_out,i,j) - 
              this->learnRate * MATRIX_AT(dWmid_out,col_mid_out,i,j) - 
              this->alpha * MATRIX_AT(prevdWmid_out,col_mid_out,i,j);
      }
    }

    // ループ回数/系列のインクリメント
    iteration += 1;
    seq += 1;

  }
  
  delete [] dWin_mid; delete [] dWmid_out;
  delete [] prevdWin_mid; delete [] prevdWmid_out;
  delete [] norm_sample; delete [] out_signal;
  delete [] in_mid_net; delete [] mid_out_net;

  return squareError;
}