void
Albany::SolutionMaxValueResponseFunction::
evaluateTangent(const double alpha, 
		const double beta,
		const double omega,
		const double current_time,
		bool sum_derivs,
		const Epetra_Vector* xdot,
		const Epetra_Vector* xdotdot,
		const Epetra_Vector& x,
		const Teuchos::Array<ParamVec>& p,
		ParamVec* deriv_p,
		const Epetra_MultiVector* Vxdot,
		const Epetra_MultiVector* Vxdotdot,
		const Epetra_MultiVector* Vx,
		const Epetra_MultiVector* Vp,
		Epetra_Vector* g,
		Epetra_MultiVector* gx,
		Epetra_MultiVector* gp)
{

  if (gx != NULL || gp != NULL)
    evaluateGradient(current_time, xdot, xdotdot, x, p, deriv_p, g, gx, NULL, NULL, gp);

  if (gx != NULL && Vx != NULL) {
    Epetra_MultiVector dgdx(*gx); //is this needed?
    gx->Multiply('T', 'N', alpha, dgdx, *Vx, 0.0);
  }
}
Пример #2
0
// Return a new configuration which is p+q*gradient(p)
struct configuration *
gradientOffset(struct configuration *p, double q)
{
    struct functionDefinition *fd = p->functionDefinition;
    struct configuration *r;
    int i;

    r = makeConfiguration(fd);
    evaluateGradient(p); BAILR(p);
    for (i=fd->dimension-1; i>=0; i--) {
	r->coordinate[i] = p->coordinate[i] + q * p->gradient[i];
    }
    r->parameter = q;
    if (fd->constraints != NULL) {
        (*fd->constraints)(r);
    }
    return r;
}
Пример #3
0
// Starting with an initial configuration p, find the configuration
// which minimizes the value of the function (as defined by fd).  The
// number of iterations used is returned in iteration.
static struct configuration *
minimize_one_tolerance(struct configuration *initial_p,
                       int *iteration,
                       int iterationLimit)
{
    struct functionDefinition *fd;
    double fp;
    double dgg;
    double gg;
    double gamma;
    struct configuration *p = NULL;
    struct configuration *q = NULL;
    int i;

    Enter(initial_p);
    NULLPTRR(initial_p, NULL);
    NULLPTRR(iteration, initial_p);
    fd = initial_p->functionDefinition;
    NULLPTRR(fd, initial_p);
    if (fd->termination == NULL) {
        fd->termination = defaultTermination;
    }
    SetConfiguration(&p, initial_p);
    fp = evaluate(p);
    BAILR(initial_p);
    for ((*iteration)=0; (*iteration) < iterationLimit && !Interrupted; (*iteration)++) {
	SetConfiguration(&q, NULL);
	q = linearMinimize(p, fd->tolerance, fd->linear_algorithm);
        // If linearMinimize made some progress, but threw an
        // exception, then we want the best result, which is q.  If it
        // threw an exception and returned NULL, the best we can do
        // at this point is p.  Beyond this point, we can bail with q.
	BAILR(q == NULL ? p : q);
        if ((fd->termination)(fd, p, q)) {
	    SetConfiguration(&p, NULL);
	    Leave(minimize_one_tolerance, initial_p, (q == initial_p) ? 0 :1);
	    return q;
	}
	evaluateGradient(p); // should have been evaluated by linearMinimize already
	BAILR(q);
	evaluateGradient(q);
	BAILR(q);
	if (fd->algorithm != SteepestDescent) {
	    dgg = gg = 0.0;
	    if (fd->algorithm == PolakRibiereConjugateGradient) {
		for (i=fd->dimension-1; i>=0; i--) {
		    gg += p->gradient[i] * p->gradient[i];
		    // following line implements Polak-Ribiere
		    dgg += (q->gradient[i] + p->gradient[i]) * q->gradient[i] ;
		}
	    } else { // fd->algorithm == FletcherReevesConjugateGradient
		// NOTE: Polak-Ribiere may handle non-quadratic minima better
		// than Fletcher-Reeves
		for (i=fd->dimension-1; i>=0; i--) {
		    gg += p->gradient[i] * p->gradient[i];
		    // following line implements Fletcher-Reeves
		    dgg += q->gradient[i] * q->gradient[i] ;
		}
	    }
	    if (gg == 0.0) {
		// rather than divide by zero below, note that the gradient
		// is zero, so we must be done.
		DPRINT(D_MINIMIZE, "gg==0 in minimize_one_tolerance\n");
		SetConfiguration(&p, NULL);
		Leave(minimize_one_tolerance, initial_p, 1);
		return q;
	    }
	    gamma = dgg / gg;
	    DPRINT3(D_MINIMIZE, "gamma[%e] = %e / %e\n", gamma, dgg, gg);
	    for (i=fd->dimension-1; i>=0; i--) {
		q->gradient[i] += gamma * p->gradient[i];
	    }
	}
	fp = evaluate(q); // previous value of function
	BAILR(q);
	SetConfiguration(&p, q);
    }
    if (Interrupted) {
        message(fd, "minimization interrupted");
    } else {
        message(fd, "reached iteration limit");
    }
    SetConfiguration(&p, NULL);
    Leave(minimize_one_tolerance, initial_p, 1);
    return q;
}
Пример #4
0
// Given a configuration p, find three configurations (a, b, c) such
// that f(b) < f(a) and f(b) < f(c), where a, b, and c are colinear in
// configuration space, with b between a and c.  This assures that a
// local minimum exists between a and c.
//
// If the function is monotonic to parameterLimit, we could exit with
// b and c having the same parameter value (but being distinct
// configuration objects).  It looks like this won't confuse brent(),
// but it would be nice to be sure...
static void
bracketMinimum(struct configuration **ap,
               struct configuration **bp,
               struct configuration **cp,
               struct configuration *p)
{
    struct configuration *a = NULL;
    struct configuration *b = NULL;
    struct configuration *c = NULL;
    struct configuration *u = NULL;
    double nx;
    double r;
    double q;
    double denom;
    double ulimit;
    double parameterLimit;

    Enter(p);
    SetConfiguration(&a, p);
    a->parameter = 0.0;
    evaluateGradient(p); // this lets (*dfunc)() set initial_parameter_guess
    BAIL();
    parameterLimit = fabs(p->functionDefinition->parameter_limit);
    // when we step GOLDEN_RATIO beyond b, we don't want to exceed parameterLimit.
    b = gradientOffset(p,
                       signClamp(p->functionDefinition->initial_parameter_guess,
                                 parameterLimit / (GOLDEN_RATIO + 1.0)));
    BAIL();
    if (evaluate(b) > evaluate(a)) {
	// swap a and b, so b is downhill of a
	u = a;
	a = b;
	b = u;
	u = NULL;
    }
    nx = b->parameter + GOLDEN_RATIO * (b->parameter - a->parameter);
    c = gradientOffset(p, nx); BAIL();
    while (evaluate(b) > evaluate(c) && !Interrupted && !EXCEPTION) {

	// u is the extreme point for a parabola passing through a, b, and c:
	r = (b->parameter - a->parameter) * (evaluate(b) - evaluate(c));
	q = (b->parameter - c->parameter) * (evaluate(b) - evaluate(a));
	denom = q - r;
	if (denom < 0.0) {
	    if (denom > -DONT_DIVIDE_BY_ZERO) {
		denom = -DONT_DIVIDE_BY_ZERO;
	    }
	} else {
	    if (denom < DONT_DIVIDE_BY_ZERO) {
		denom = DONT_DIVIDE_BY_ZERO;
	    }
	}
	nx = b->parameter -
	    ((b->parameter - c->parameter) * q - (b->parameter - a->parameter) * r) /
	    (2.0 * denom);
        nx = signClamp(nx, parameterLimit);
	SetConfiguration(&u, NULL);
	u = gradientOffset(p, nx); BAIL();

	// a, b, and c are in order, ulimit is far past c
	ulimit = b->parameter + PARABOLIC_BRACKET_LIMIT * (c->parameter - b->parameter);
        ulimit = signClamp(ulimit, parameterLimit);

	if ((b->parameter-u->parameter) * (u->parameter-c->parameter) > 0.0) {
	    // u is between b and c, also f(c) < f(b) and f(b) < f(a)
	    if (evaluate(u) < evaluate(c)) { // success: (b, u, c) brackets
		*ap = b;
		*bp = u;
		*cp = c;
		SetConfiguration(&a, NULL);
		Leave(bracketMinimum, p, (p == *ap || p == *bp || p == *cp) ? 2 : 3);
		return;
	    }
	    if (evaluate(u) > evaluate(b)) { // success: (a, b, u) brackets
		*ap = a;
		*bp = b;
		*cp = u;
		SetConfiguration(&c, NULL);
		Leave(bracketMinimum, p, (p == *ap || p == *bp || p == *cp) ? 2 : 3);
		return;
	    }
	    // b, u, c monotonically decrease, u is useless.
	    // try default golden ration extension for u:
	    nx = c->parameter + GOLDEN_RATIO * (c->parameter - b->parameter);
            nx = signClamp(nx, parameterLimit);
	    SetConfiguration(&u, NULL);
	    u = gradientOffset(p, nx);
	} else if ((c->parameter-u->parameter) * (u->parameter-ulimit) > 0.0) {
	    // u is between c and ulimit
	    if (evaluate(u) < evaluate(c)) {
		// we're still going down, keep going
		SetConfiguration(&b, c);
		SetConfiguration(&c, u);
		SetConfiguration(&u, NULL);
		nx = c->parameter + GOLDEN_RATIO * (c->parameter - b->parameter);
                nx = signClamp(nx, parameterLimit);
		u = gradientOffset(p, nx);
	    }
	} else if ((u->parameter-ulimit) * (ulimit-c->parameter) >= 0.0) {
	    // u is past ulimit, reign it in
	    nx = ulimit;
	    SetConfiguration(&u, NULL);
	    u = gradientOffset(p, nx);
	    // XXX we did an extra gradientOffset of the old ux that we're
	    // discarding.  would be nice to avoid that.
	} else {
	    // u must be before b
	    // since (a b c) are monotonic decreasing, u should be a
	    // maximum, so we reject it.
	    nx = c->parameter + GOLDEN_RATIO * (c->parameter - b->parameter);
            nx = signClamp(nx, parameterLimit);
	    SetConfiguration(&u, NULL);
	    u = gradientOffset(p, nx);
	}
	SetConfiguration(&a, b);
	SetConfiguration(&b, c);
	SetConfiguration(&c, u);
	SetConfiguration(&u, NULL);
    }
    if (Interrupted) {
        SetConfiguration(&a, NULL);
	evaluateGradient(p);
	BAIL();
	parameterLimit = fabs(p->functionDefinition->parameter_limit);
	*bp = gradientOffset(p,
			     signClamp(p->functionDefinition->initial_parameter_guess,
				       parameterLimit / (GOLDEN_RATIO + 1.0)));
        SetConfiguration(&c, NULL);
        SetConfiguration(&u, NULL);
        Leave(bracketMinimum, p, 0);
        return;
    }
    if (EXCEPTION) {
        // We haven't succeeded in bracketing, so we leave the results
        // as all NULL's.  Caller needs to check for this.
        SetConfiguration(&a, NULL);
        SetConfiguration(&b, NULL);
        SetConfiguration(&c, NULL);
        SetConfiguration(&u, NULL);
        Leave(bracketMinimum, p, 0);
        return;
    }
        
    // success: (a, b, c) brackets
    *ap = a;
    *bp = b;
    *cp = c;
    SetConfiguration(&u, NULL);
    Leave(bracketMinimum, p, (p == *ap || p == *bp || p == *cp) ? 2 : 3);
}