Пример #1
0
 bool WalkMovement::isDirectionPossible(Ogre::Vector3 &direction) const
 {
     Vector3 oldDirection(direction);
     direction.x = direction.y = 0;
     if( direction.z > 0 )
         direction.z = 0;
     return oldDirection.x == 0 && oldDirection.y == 0 && oldDirection.z > 0;
 }
bool StrafeMovement::isDirectionPossible(Ogre::Vector3 &direction) const
{
    Vector3 oldDirection(direction);
    direction.z = direction.y = 0;
    return oldDirection.z == 0 && oldDirection.y == 0;
}
Пример #3
0
	value_t GeometricCGVariant::solve(const TTOperator *_Ap, TTTensor &_x, const TTTensor &_b, size_t _numSteps, value_t _convergenceEpsilon, PerformanceData &_perfData) const {
		const TTOperator &_A = *_Ap;
		static const Index i,j;
		size_t stepCount=0;
		TTTensor residual;
		TTTangentVector gradient;
		value_t gradientNorm = 1.0;
		value_t lastResidual=1e100;
		value_t currResidual=1e100;
		value_t normB = frob_norm(_b);
		
		if (_Ap != nullptr) {
			_perfData << "Conjugated Gradients for ||A*x - b||^2, x.dimensions: " << _x.dimensions << '\n'
					<< "A.ranks: " << _A.ranks() << '\n';
			if (assumeSymmetricPositiveDefiniteOperator) {
				_perfData << " with symmetric positive definite Operator A\n";
			}
		} else {
			_perfData << "Conjugated Gradients for ||x - b||^2, x.dimensions: " << _x.dimensions << '\n';
		}
		_perfData << "x.ranks: " << _x.ranks() << '\n'
					<< "b.ranks: " << _b.ranks() << '\n'
					<< "maximum number of steps: " << _numSteps << '\n'
					<< "convergence epsilon: " << _convergenceEpsilon << '\n';
		_perfData.start();
		
		auto calculateResidual = [&]()->value_t {
			if (_Ap != nullptr) {
				residual(i&0) = _b(i&0) - _A(i/2,j/2)*_x(j&0);
			} else {
				residual = _b - _x;
			}
			return frob_norm(residual);//normB;
		};
		auto updateGradient = [&]() {
			if (assumeSymmetricPositiveDefiniteOperator || (_Ap == nullptr)) {
				gradient = TTTangentVector(_x, residual);
			} else {
				TTTensor grad;
				grad(i&0) = (*_Ap)(j/2,i/2) * residual(j&0); // grad = A^T * (b - Ax)
				gradient = TTTangentVector(_x, grad);
			}
			gradientNorm = gradient.frob_norm();
		};
		
		currResidual = calculateResidual();
		_perfData.add(stepCount, currResidual, _x);
		
		updateGradient();
		TTTangentVector direction = gradient;
		value_t alpha = 1;
		while ((_numSteps == 0 || stepCount < _numSteps)
			&& currResidual/normB > _convergenceEpsilon
			&& std::abs(lastResidual-currResidual)/normB > _convergenceEpsilon
			&& std::abs(1-currResidual/lastResidual)/normB > _convergenceEpsilon) 
		{
			stepCount += 1;
			size_t stepFlags = 0;
			
			// check the derivative along the current direction
			value_t derivative = gradient.scalar_product(direction) / direction.frob_norm();
			
			// if movement in the given direction would increase the residual rather than decrease it, perform one steepest descent step instead
			if (derivative <= 0) {
				direction = gradient;
				derivative = gradient.frob_norm();
				alpha = 1;
				stepFlags |= 1;
			}
			
			line_search(_x, alpha, direction, derivative, currResidual, retraction, calculateResidual, 0.8);
			
			_perfData.add(stepCount, currResidual, _x, stepFlags);
			
// 			direction(i&0) = residual(i&0) + beta * direction(i&0);
			TTTangentVector oldDirection(direction);
			vectorTransport(_x, oldDirection);
			value_t oldGradNorm = gradientNorm;
			updateGradient();
			
			double beta = gradientNorm / oldGradNorm ;// Fletcher-Reeves update
			direction = gradient;
			direction += oldDirection * beta;
		}
		
		return currResidual;
	}
 bool StepRecognitionMovement::isDirectionPossible(Ogre::Vector3 &direction) const
 {
     Vector3 oldDirection(direction);
     direction = Vector3::ZERO;
     return oldDirection == Vector3::ZERO;
 }