void iter2_conjgrad(void* _conf,
		const struct operator_s* normaleq_op,
		unsigned int D,
		const struct operator_p_s** prox_ops,
		const struct linop_s** ops,
		const struct operator_p_s* xupdate_op,
		long size, float* image, const float* image_adj,
		const float* image_truth,
		void* obj_eval_data,
		float (*obj_eval)(const void*, const float*))
{

	assert(0 == D);
	assert(NULL == prox_ops);
	assert(NULL == ops);
	UNUSED(xupdate_op);

	struct iter_conjgrad_conf* conf = _conf;

	float eps = md_norm(1, MD_DIMS(size), image_adj);

	if (checkeps(eps))
		goto cleanup;

	conjgrad(conf->maxiter, conf->l2lambda, eps * conf->tol, size, (void*)normaleq_op, select_vecops(image_adj), operator_iter, image, image_adj, image_truth, obj_eval_data, obj_eval);

cleanup:
	;
}
Exemple #2
0
void iter2_conjgrad(iter_conf* _conf,
		const struct operator_s* normaleq_op,
		unsigned int D,
		const struct operator_p_s* prox_ops[D],
		const struct linop_s* ops[D],
		const float* biases[D],
		const struct operator_p_s* xupdate_op,
		long size, float* image, const float* image_adj,
		struct iter_monitor_s* monitor)
{
	assert(0 == D);
	assert(NULL == prox_ops);
	assert(NULL == ops);
	assert(NULL == biases);
	UNUSED(xupdate_op);

	auto conf = CAST_DOWN(iter_conjgrad_conf, _conf);

	float eps = md_norm(1, MD_DIMS(size), image_adj);

	if (checkeps(eps))
		goto cleanup;

	conjgrad(conf->maxiter, conf->l2lambda, eps * conf->tol, size, select_vecops(image_adj),
			OPERATOR2ITOP(normaleq_op), image, image_adj, monitor);

cleanup:
	;
}
Exemple #3
0
static void inverse(void* _data, float alpha, float* dst, const float* src)
{
        struct irgnm_s* data = _data;

	md_clear(1, MD_DIMS(data->size), dst, FL_SIZE);

        float eps = md_norm(1, MD_DIMS(data->size), src);
        conjgrad(100, alpha, 0.1f * eps, data->size, (void*)data, select_vecops(src), normal, dst, src, NULL, NULL, NULL);
}
static void sense_reco(struct sense_data* data, complex float* imgs, const complex float* kspace)
{
	complex float* adj = md_alloc(DIMS, data->imgs_dims, CFL_SIZE);

	md_clear(DIMS, data->imgs_dims, imgs, CFL_SIZE);

	sense_adjoint(data, adj, kspace);

	long size = md_calc_size(DIMS, data->imgs_dims);

	conjgrad(100, data->alpha, 1.E-3, 2 * size, data, &cpu_iter_ops, sense_normal,
	(float*)imgs, (const float*)adj, NULL, NULL, NULL);

	md_free(adj);
}
Exemple #5
0
int main (void) 
{
    float x[4], y[5], d[4];
    int i,j;

    a = sf_floatalloc2(4,5);

    a[0][0] = 1.; a[0][1] = 1.;	a[0][2] = 1.; a[0][3] = 0.;
    a[1][0] = 1.; a[1][1] = 2.;	a[1][2] = 0.; a[1][3] = 0.;
    a[2][0] = 1.; a[2][1] = 3.;	a[2][2] = 1.; a[2][3] = 0.;
    a[3][0] = 1.; a[3][1] = 4.;	a[3][2] = 0.; a[3][3] = 1.;
    a[4][0] = 1.; a[4][1] = 5.; a[4][2] = 1.; a[4][3] = 1.;

    y[0]=3.; y[1]=3.; y[2]=5.; y[3]=7.; y[4] = 9.;

    printf ("y = \n");
    for (i=0; i < 5; i ++) {
	printf (" %10.2f",y[i]);
    }
    printf ("\n");
    printf ("a = \n");
    for (j=0; j < 5; j ++) {
	for (i=0; i < 4; i ++) {
	    printf (" %10.2f",a[j][i]);
	}
	printf("\n");
    }
    printf("\n");

    matmult_lop(true,false,4,5,d,y);

    conjgrad(normal, 4, d, NULL, x, 7, 0.0);

    printf ("x = ");
    for (i=0; i < 4; i ++) {
	printf (" %12.8f",x[i]);
    }
    printf ("\n");

    exit(0);
}
Exemple #6
0
Fichier : Mcr.c Projet : huahbo/src
int main(int argc, char* argv[])
{
    int nr, nc, nx, niter;
    float *x, tol;
    sf_file row_in, col_in, row_out, col_out;

    sf_init(argc,argv);
    row_in = sf_input("in");
    col_in = sf_input("col_in");

    row_out = sf_output("out");
    col_out = sf_output("col_out");

    if (!sf_histint(row_in,"n1",&nr)) sf_error("No n1= in input");
    if (!sf_histint(col_in,"n1",&nc)) sf_error("No n1= in input");

    nx = nr+nc;
    x = sf_floatalloc(nx);

    /* read input - B'd */
    sf_floatread(x,nr,row_in);
    sf_floatread(x+nr,nc,col_in);

    if (!sf_getint("niter",&niter)) niter=10; /* number of iterations */
    if (!sf_getfloat("tol",&tol)) tol=0.0f;   /* CG tolerance */

    /* Run PCG */
    cr_init(nr,nc);
    conjgrad(cr_apply,nx,x,x,x,niter,tol);
    
    /* write output */
    sf_floatwrite(x,nr,row_out);
    sf_floatwrite(x+nr,nc,col_out);

    exit(0);
}
Exemple #7
0
Fichier : main.c Projet : fenech/2d
int main(int argc, char **argv)
{
    double fret;            /* Frank energy */
    double start, end;      /* for timing */
    double **grid;          /* 2D grid */
    int **lock;             /* locked cells */
    double **fgrid;         /* fullgrid */
    int **flock;            /* locked cells (fullgrid) */
    char fname[128] = "log";
    char gname[128] = "grid";
    char suffix[128];
    int iter = 0;
    long maxiter;
    int flag, rank, np;     /* MPI variables */
    float t0;               /* starting "temperature" */
    FILE *log_fp;
    t_par par[2];
    int sep, ba;

    MPI_Init(&argc, &argv);
    MPI_Initialized(&flag);
    if (flag != 1) MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &np);

    if (argc < 11 || argc > 12) {
        if (rank == 0) printf("Usage: <%s> <x length> <y length> <monte carlo steps> <temp> <major axis> <minor axis> <align> <theta> <separation> <boundary angle> [id]\n", argv[0]);
        MPI_Finalize();
        exit(1);
    }

    nx = atoi(argv[1]);
    ny = atoi(argv[2]);
    maxiter = atoi(argv[3]);
    t0 = atof(argv[4]);
    par[0].major = par[1].major = atoi(argv[5]);
    par[0].minor = par[1].minor = atoi(argv[6]);
    if (strcmp(argv[7], "para") != 0 && strcmp(argv[7], "perp") != 0) {
        if (rank == 0) printf("Alignment must be para or perp\n");
        MPI_Finalize();
        exit(1);
    }
    strcpy(par[0].align, argv[7]);
    strcpy(par[1].align, argv[7]);
    par[0].theta = 0;
    par[1].theta = atof(argv[8]);
    sep = atoi(argv[9]);
    ba = atoi(argv[10]);

    if (argc == 12) id = atoi(argv[11]);
    else id = 1;

    par[0].cy = par[1].cy = ny / 2;
    par[0].cx = nx / 2 - par[0].major - sep / 2;
    par[1].cx = nx / 2 + par[1].major + sep / 2 - 1;

    sprintf(suffix, "r%dx%d_t%.0f_s%d_a%d_%d_%s",
            par[0].major, par[0].minor, par[1].theta, sep, ba, id, par[0].align);
    par[1].theta = PI * par[1].theta / 180.0;

    random_key key;

    int success = initialise(&grid, &lock, par, sep, ba, suffix, &key);
    int all_succeeded;
    MPI_Allreduce(&success, &all_succeeded, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
    if (all_succeeded != np) {
        MPI_Finalize();
        return 0;
    }

    strcat(gname, suffix);
    print(grid, gname);

    fret = func(grid, lock, 0);
    if (rank == 0) {
        printf("Initial Frank Energy: %f\n", fret);
        strcat(fname, suffix);
        log_fp = fopen(fname, "w");
    }

    start = MPI_Wtime();
    monte(grid, lock, maxiter, t0, log_fp, suffix, key);
    end = MPI_Wtime();

    grid2root(grid, &fgrid);
    lock2root(lock, &flock);
    prncont(fgrid, "testgrid");

    fret = func(grid, lock, 0);
    if (rank == 0) conjgrad(fgrid, flock, 10000, &fret, log_fp, suffix, par, sep);

    if (rank == 0) {
        printf("End Frank Energy:     %f\n", fret);
        printf("No. iterations:       %d\n", iter);
        printf("Time taken:           %f\n", end - start);
    }

    MPI_Finalize();
    return 0;
}
Exemple #8
0
void LayerNet::anx_dd ( TrainingSet *tptr , struct LearnParams *lptr )
{
   int itry, n_escape, n_retry, bad_count, new_record, refined ;
   long seed ;
   double err, prev_err, best_err, start_of_loop_error, best_inner_error ;
   double initial_accuracy, final_accuracy ;
   char msg[80] ;
   LayerNet *worknet, *worknet2, *bestnet ;

   n_escape = n_retry = 0 ;

/*
   Allocate scratch memory
*/

   MEMTEXT ( "ANX_DD::learn new worknet, bestnet" ) ;
   worknet = new LayerNet ( model , outmod , outlin , nin , nhid1 , nhid2 ,
                            nout , 0 , 0 ) ;
   bestnet = new LayerNet ( model , outmod , outlin , nin , nhid1 , nhid2 ,
                            nout , 0 , 1 ) ;

   if ((worknet == NULL)  ||  (! worknet->ok)
    || (bestnet == NULL)  ||  (! bestnet->ok)) {
      memory_message ( "to learn" ) ;
      if (worknet != NULL)
         delete worknet ;
      if (bestnet != NULL)
         delete bestnet ;
      errtype = 0 ;
      return ;
      }

   if ((lptr->method == METHOD_AN2_CJ)  ||  (lptr->method == METHOD_AN2_LM)) {
      worknet2 = new LayerNet ( model , outmod , outlin , nin , nhid1 , nhid2 ,
                                nout , 0 , 0 ) ;
      if ((worknet2 == NULL)  ||  (! worknet2->ok)) {
         if (worknet2 != NULL)
            delete worknet2 ;
         delete worknet ;
         delete bestnet ;
         memory_message ( "to learn" ) ;
         errtype = 0 ;
         return ;
         }
      }
   else
      worknet2 = NULL ;

/*
   Start by annealing around the starting weights.  These will be zero if the
   net was just created.  If it was restored or partially trained already,
   they will be meaningful.  Anneal1 guarantees that it will not return all
   zero weights if there is at least one hidden layer, even if that means
   that the error exceeds the amount that could be attained by all zeros.
*/

   best_err = best_inner_error = 1.e30 ;
   if ((lptr->method == METHOD_AN1_CJ)  ||  (lptr->method == METHOD_AN1_LM))
      anneal1 ( tptr , lptr , worknet , 1 , 0 ) ;
   else if ((lptr->method == METHOD_AN2_CJ) || (lptr->method == METHOD_AN2_LM))
      anneal2 ( tptr , lptr , worknet , worknet2 , 1 ) ;

/*
   Do direct descent optimization, finding local minimum.
   Then anneal to break out of it.  If successful, loop back up to
   do direct descent again.  Otherwise restart totally random.
*/

   bad_count = 0 ;         // Handles flat local mins
   refined = 0 ;           // Did we ever refine to high resolution?  Not yet.
   new_record = 0 ;        // Refine every time a new inner error record set
   initial_accuracy = pow ( 10.0 , -lptr->cj_acc ) ;
   final_accuracy = initial_accuracy * pow ( 10.0 , -lptr->cj_refine ) ;

   for (itry=1 ; ; itry++) {

      if (neterr < best_err) {   // Keep track of best
         copy_weights ( bestnet , this ) ;
         best_err = neterr ;
         }

      sprintf ( msg , "Try %d  (best=%lf):", itry, best_err ) ;
      normal_message ( msg ) ;

      if (neterr <= lptr->quit_err)
         break ;

      start_of_loop_error = neterr ;
      if ((lptr->method == METHOD_AN1_CJ)  ||  (lptr->method == METHOD_AN2_CJ))
         err = conjgrad ( tptr , 32767 , initial_accuracy ,
                          lptr->quit_err , lptr->cj_progress ) ;
      else if ((lptr->method==METHOD_AN1_LM) || (lptr->method==METHOD_AN2_LM))
         err = lev_marq ( tptr , 32767 , initial_accuracy ,
                          lptr->quit_err , lptr->cj_progress ) ;
      neterr = fabs ( err ) ; // err<0 if user pressed ESCape

      sprintf ( msg , "  Gradient err=%lf", neterr ) ;
      progress_message ( msg ) ;

      if (neterr < best_err) {   // Keep track of best
         copy_weights ( bestnet , this ) ;
         best_err = neterr ;
         }

      if (err <= lptr->quit_err) { // err<0 if user pressed ESCape
         if (err < -1.e29)         // or insufficient memory
            printf ( "\nInsufficient memory for gradient learning." ) ;
         break ;
         }

      seed = flrand() - (long) (itry * 97) ;   // Insure new seed for anneal
      sflrand ( seed ) ;

      prev_err = neterr ;  // So we can see if anneal helped

      if ((lptr->method == METHOD_AN1_CJ)  ||  (lptr->method == METHOD_AN1_LM))
         anneal1 ( tptr , lptr , worknet , 0 , itry ) ;
      else if ((lptr->method==METHOD_AN2_CJ) || (lptr->method==METHOD_AN2_LM))
         anneal2 ( tptr , lptr , worknet , worknet2 , 0 ) ;

      sprintf ( msg , "  Anneal err=%lf", neterr ) ;
      progress_message ( msg ) ;

      if (neterr < best_err) {  // Keep track of best
         copy_weights ( bestnet , this ) ;
         best_err = neterr ;
         }

      if (best_err <= lptr->quit_err)
         break ;

      if (neterr < best_inner_error) {  // Keep track of best inner for refine
         best_inner_error = neterr ;
         new_record = 1 ;               // Tells us to refine
         }

      if ((prev_err - neterr) > 1.e-7) { // Did we break out of local min?
         if ((start_of_loop_error - neterr) < 1.e-3)
            ++bad_count ;  // Avoid many unprofitable iters
         else
            bad_count = 0 ;
         if (bad_count < 4) {
            ++n_escape ;          // Count escapes from local min
            continue ;            // Escaped, so gradient learn again
            }
         }

/*
   After first few tries, and after each inprovement thereafter, refine
   to high resolution
*/

      if ((itry-n_escape >= lptr->cj_pretries)  &&  (new_record || ! refined)) {
         if (! refined) {   // If refining the best of the pretries
            copy_weights ( this , bestnet ) ;  // Get that net
            neterr = best_err ;
            }
         refined = 1 ;     // Only force refine once
         new_record = 0 ;  // Reset new inner error record flag
         progress_message ( "  REFINING" ) ;
         if ((lptr->method == METHOD_AN1_CJ) || (lptr->method == METHOD_AN2_CJ))
            err = conjgrad ( tptr , 0 , final_accuracy ,
                             lptr->quit_err , lptr->cj_progress ) ;
         else if ((lptr->method==METHOD_AN1_LM)|| (lptr->method==METHOD_AN2_LM))
            err = lev_marq ( tptr , 0 , final_accuracy ,
                             lptr->quit_err , lptr->cj_progress ) ;
         neterr = fabs ( err ) ; // err<0 if user pressed ESCape
         sprintf ( msg , "  Attained err=%lf", neterr ) ;
         progress_message ( msg ) ;
         if (neterr < best_err) {  // Keep track of best
            copy_weights ( bestnet , this ) ;
            best_err = neterr ;
            }
         }

      if (++n_retry > lptr->retries)
         break ;

      progress_message ( "  RESTART" ) ;
      zero_weights () ;  // Failed to break out, so retry random
      seed = flrand() - (long) (itry * 773) ;   // Insure new seed for anneal
      sflrand ( seed ) ;
      if ((lptr->method == METHOD_AN1_CJ)  ||  (lptr->method == METHOD_AN1_LM))
         anneal1 ( tptr , lptr , worknet , 1 , itry ) ;
      else if ((lptr->method==METHOD_AN2_CJ) || (lptr->method==METHOD_AN2_LM))
         anneal2 ( tptr , lptr , worknet , worknet2 , 1 ) ;
      }

FINISH:
   copy_weights ( this , bestnet ) ;
   neterr = best_err ;
   MEMTEXT ( "AN1_DD::learn delete worknet, bestnet" ) ;
   delete worknet ;
   delete bestnet ;
   sprintf ( msg , "%d successful escapes, %d retries", n_escape, n_retry ) ;
   normal_message ( msg ) ;

   return ;
}
Exemple #9
0
/* Fit the curve to the given points */
static void mcv_fit(mcv *p,
	int verb,		/* Vebosity level, 0 = none */
	int order,		/* Number of curve orders, 1..MCV_MAXORDER */
	mcvco *d,		/* Array holding scattered initialisation data */
	int ndp,		/* Number of data points */
	double smooth	/* Degree of smoothing, 1.0 = normal */			
) {
	int i;
	double *sa;		/* Search area */
	double *pms;	/* Parameters to optimise */
	double min, max;

	p->verb = verb;
	p->smooth = smooth;
	p->luord = order+2;		/* Add two for offset and scale */

	if (p->pms != NULL)
		free(p->pms);
	if ((p->pms = (double *)calloc(p->luord, sizeof(double))) == NULL)
		error ("Malloc failed");
	if ((pms = (double *)calloc(p->luord, sizeof(double))) == NULL)
		error ("Malloc failed");
	if ((sa = (double *)calloc(p->luord, sizeof(double))) == NULL)
		error ("Malloc failed");
	if ((p->dv = (double *)calloc(p->luord, sizeof(double))) == NULL)
		error ("Malloc failed");

#ifdef DEBUG
	printf("mcv_fit with %d points (noos = %d)\n",ndp,p->noos);
#endif
	/* Establish the range of data values */
	min = 1e38;			/* Locate min, and make that offset */
	max = -1e38;			/* Locate max */
	for (i = 0; i < ndp; i++) {
		if (d[i].v < min)
			min = d[i].v;
		if (d[i].v > max)
			max = d[i].v;
#ifdef DEBUG
		printf("point %d is %f %f\n",i,d[i].p,d[i].v);
#endif
	}

	if (p->noos) {
		p->pms[0] = min = 0.0;
		p->pms[1] = max = 1.0;
	} else {
		/* Set offset and scale to reasonable values */
		p->pms[0] = min;
		p->pms[1] = max - min;
	}
	p->dra = max - min;
	if (p->dra <= 1e-12)
		error("Mcv max - min %e too small",p->dra);

	/* Use powell to minimise the sum of the squares of the */
	/* input points to the curvem, plus a parameter damping factor. */
	p->d = d;
	p->ndp = ndp;

	for (i = 0; i < p->luord; i++)
		sa[i] = 0.2;

#ifdef NEVER
	if (powell(&p->resid, p->luord-p->noos, p->pms+p->noos, sa+p->noos, POWTOL, MAXITS,
	                                          mcv_opt_func, (void *)p, NULL, NULL) != 0)
		error ("Mcv fit powell failed");
#else
	if (conjgrad(&p->resid, p->luord-p->noos, p->pms+p->noos, sa+p->noos, POWTOL, MAXITS,
	                              mcv_opt_func, mcv_dopt_func, (void *)p, NULL, NULL) != 0) {
#ifndef NEVER
	fprintf(stderr,"Mcv fit conjgrad failed with %d points:\n",ndp);
	for (i = 0; i < ndp; i++) {
		fprintf(stderr,"  %d: %f -> %f\n",i,d->p, d->v);
	}
#endif
		error ("Mcv fit conjgrad failed");
	}
#endif

	free(p->dv);
	p->dv = NULL;
	free(sa);
	free(pms);
}