コード例 #1
0
ファイル: main.c プロジェクト: fei3189/neural_network
int main() {
    size_t i, j;
    size_t nencode = 3, ninput = pow(2, nencode);  //Set the number of vectors
    size_t iter_times = 50000; // Complex network requires more iteration times
    double rate = 0.5, momentum = 0.5; // Learning parameters;
    size_t hidden[1] = { nencode };
    char *ch = "sigmoid";
    char *chs[1] = {ch};

    double **input = alloc_2d(ninput, ninput), **output = alloc_2d(ninput, ninput);
    for (i = 0; i < ninput; ++i) {
        for (j = 0; j < ninput; ++j) {
            if (i == j)
                input[i][j] = output[i][j] = 1;
            else
                input[i][j] = output[i][j] = 0;
        }
    }

    /* Set the configuration of neural network */
    struct nn_config config = {
        .dim_h = hidden,
        .ahfunc = chs,
        .n_hidden = 1,
        .dim_i = ninput,
        .dim_o = ninput,
        .aofunc = ch
    };

    struct neural_network* nn = create_nn(&config);
    train(nn, input, output, ninput, rate, momentum, iter_times);
    
    double *res = alloc_1d(ninput);

    for (i = 0; i < ninput; ++i) {
        for (j = 0; j < ninput; ++j)
            printf("%d ", (int)input[i][j]);
        printf("=> ");
        predict_hidden(nn, input[i], res, 0);
        for (j = 0; j < nencode; ++j) {
            printf("%.3f ", res[j]);    // Actually value
//          printf("%d ", (int)(res[j]+0.5)); // 0-1 value
        }
      predict(nn, input[i], res);
      printf("=> ");
      for (j = 0; j < ninput; ++j)
          printf("%.3f ", res[j]);
        printf("\n");
    }
    free_2d(input, ninput);
    free_2d(output, ninput);
    free_1d(res);
    destroy_nn(nn);
    return 0;
}
コード例 #2
0
ファイル: start.cpp プロジェクト: Jie211/solver_rebuild
int csr_start(int argc, char *argv[])
{

  int error;
  struct Parameter para;
  
  int N, NNZ;
  double *bvec, *xvec, *val;
  int *col, *ptr;
  double *Tval;
  int *Tcol, *Tptr;

  double *test;
  malloc_cuda_1d(10, test);
  free_cuda_1d(test);

  init_ver(&para);

  error = get_opt(argc, argv, &para);
  if(error_handle(error, (char*)"error in get_cmd")!=0)
    return -1;

  error = check_opt(&para);
  if(error_handle(error, (char*)"error in check_cmd")!=0)
    return -1;

  error = find_mat(&para);
  if(error_handle(error, (char*)"error in find_mat")!=0)
    return -1;

  show_opt(&para);

  error = set_openmp_thread(para.i_thread);
  if(error_handle(error, (char*)"error in set_openmp_thread")!=0)
    return -1;

  error = get_mat_head(&para, &N, &NNZ);
  if(error_handle(error, (char*)"error in get_mat_head")!=0)
    return -1;

  if( para.f_cuda == false )
  {
    bvec = malloc_1d(N);
    xvec = malloc_1d(N);

    val = malloc_1d(NNZ);
    col = malloc_1i(NNZ);
    ptr = malloc_1i(N+1);

    Tval=malloc_1d(NNZ);
    Tcol=malloc_1i(NNZ);
    Tptr=malloc_1i(N+1);
  }else{
    error_log((char*)"Cuda not done now");
    return -1;
  }


  error = get_mat_data(&para, col, ptr, val, bvec, xvec, N, NNZ);
  if(error_handle(error, (char*)"error in get_mat_data")!=0)
    return -1;

  //A^T
  Transpose_d(val, col, ptr, Tval, Tcol, Tptr, N, NNZ);

  error = outer_selecter(&para, bvec, xvec, val, col, ptr, Tval, Tcol, Tptr, N, NNZ);
  if(error_handle(error, (char*)"error in outer_selecter")!=0)
    return -1;

  show_opt(&para);

  if( para.f_cuda == false )
  {
    free_1d(bvec);
    free_1d(xvec);

    free_1d(val);
    free_1i(col);
    free_1i(ptr);

    free_1d(Tval);
    free_1i(Tcol);
    free_1i(Tptr);
  }else{
    error_log((char*)"Cuda not done now");
    return -1;
  }
  return 0;
}