// Takes the scalar multiplicationof a dVector v with a scalar k dVector RKF45::scalar_multiplication(double k, const dVector& v) const { dVector output; for (dVector::const_iterator it=v.begin(); it != v.end(); ++it) { output.push_back(k * (*it)); } return output; }
dVector ortogonalizacao(const dVector u, const dVector v) { double escalar = produtoEscalar(u, v) / produtoEscalar(v, v); dVector vetor(u.size()); for(int i = 0;i< u.size();i++)vetor[i]=u[i] - escalar*v[i]; return vetor; }
void Toolbox::calculateGlobalMeanAndStd(DataSet &X,dVector& mean,dVector& stdDev) { calculateGlobalMean(X,mean); int nbElements = 0; //Calculate standard deviation stdDev.set(0); for(int i = 0;i < (int)X.size() ;i++) { double* pData = X.at(i)->getPrecomputedFeatures()->get(); int Width = X.at(i)->getPrecomputedFeatures()->getWidth(); int Height = X.at(i)->getPrecomputedFeatures()->getHeight(); for(int col=0; col < Width; col++) { double* pStdDev = stdDev.get(); double* pMean = mean.get(); for(int row = 0; row < Height;row++) { *pStdDev += (*pData-*pMean) * (*pData-*pMean); pStdDev++; pData++; pMean++; } } nbElements+=Width; } stdDev.multiply(1.0/(double)nbElements); stdDev.eltSqrt(); }
// Adds the dVector b to the dVector a. a is modified in-place. void RKF45::sum_in_place(dVector& a, const dVector& b) const { assert ( a.size() == b.size() && "To sum two vectors, they must be the same size."); for (unsigned int i = 0; i < a.size(); i++) { a[i] += b[i]; } }
// Calculates the 2-norm of the dVector v double RKF45::norm(const dVector& v) const { double output = 0; for (dVector::const_iterator it=v.begin(); it != v.end(); ++it) { output += (*it) * (*it); } return sqrt(output); }
void BLASInterface::Madd(dVector& v,const dVector& x,double a) { integer n=x.n; integer vinc = v.stride; integer xinc = x.stride; daxpy_(&n,&a,x.getStart(),&xinc,v.getStart(),&vinc); }
// Adds k*b to the dVector a. k is a scalar. b is a dVector. void RKF45::linear_combination_in_place(dVector& a, double k, const dVector& b) const { assert ( a.size() == b.size() && "To sum two vectors, they must be the same size."); for (unsigned int i = 0; i < a.size(); i++) { a[i] += k*b[i]; } }
double BLASInterface::Dot(const dVector& x,const dVector& y) { Assert(x.n == y.n); integer n=x.n; integer xinc = x.stride; integer yinc = y.stride; return ddot_(&n,x.getStart(),&xinc,y.getStart(),&yinc); }
// Takes the norm of the difference between two dVectors. double RKF45::norm_difference(const dVector& a, const dVector& b) const { assert ( a.size() == b.size() && "To sum two vectors, they must be the same size."); double output = 0; for (unsigned int i = 0; i < a.size(); i++) { output += (a[i] - b[i]) * (a[i] - b[i]); } return sqrt(output); }
// Adds two dVectors a and b and outputs a new dVector that is their // sum. Assumes that the vectors are the same size. If they're not, // raises an error. dVector RKF45::sum(const dVector& a, const dVector& b) const { assert ( a.size() == b.size() && "To sum two vectors, they must be the same size."); dVector output; int size = a.size(); output.resize(size); for (int i = 0; i < size; i++) { output[i] = a[i] + b[i]; } return output; }
double GradientDD::computeGradient(dVector& vecGradrient, Model* m,DataSequence*) { dVector tmpVec; vecGradrient = *(m->getWeights()); vecGradrient.add(mu); tmpVec = vecGradrient; tmpVec.transpose(); tmpVec.multiply(vecGradrient); double f = exp(-0.5*tmpVec[0]); vecGradrient.multiply(f); return f; }
void Toolbox::calculateGlobalMean(DataSet &X,dVector& mean) { dVector seqSum; int nbElements = 0; //Calculate mean for(int i = 0;i < (int)X.size() ;i++) { X.at(i)->getPrecomputedFeatures()->rowSum(seqSum); mean.add(seqSum); nbElements+=X.at(i)->getPrecomputedFeatures()->getWidth(); } mean.multiply(1.0/(double)nbElements); }
void dCustomBallAndSocket::SubmitConstraintTwistLimits(const dMatrix& matrix0, const dMatrix& matrix1, const dVector& relOmega, dFloat timestep) { dFloat jointOmega = relOmega.DotProduct3(matrix0.m_front); dFloat twistAngle = m_twistAngle.GetAngle() + jointOmega * timestep; if (twistAngle < m_minTwistAngle) { NewtonUserJointAddAngularRow(m_joint, 0.0f, &matrix0.m_front[0]); NewtonUserJointSetRowStiffness(m_joint, m_stiffness); NewtonUserJointSetRowMinimumFriction(m_joint, -m_twistFriction); const dFloat invtimestep = 1.0f / timestep; const dFloat speed = 0.5f * (m_minTwistAngle - m_twistAngle.GetAngle()) * invtimestep; const dFloat stopAccel = NewtonUserJointCalculateRowZeroAccelaration(m_joint) + speed * invtimestep; NewtonUserJointSetRowAcceleration(m_joint, stopAccel); } else if (twistAngle > m_maxTwistAngle) { NewtonUserJointAddAngularRow(m_joint, 0.0f, &matrix0.m_front[0]); NewtonUserJointSetRowStiffness(m_joint, m_stiffness); NewtonUserJointSetRowMaximumFriction(m_joint, m_twistFriction); const dFloat invtimestep = 1.0f / timestep; const dFloat speed = 0.5f * (m_maxTwistAngle - m_twistAngle.GetAngle()) * invtimestep; const dFloat stopAccel = NewtonUserJointCalculateRowZeroAccelaration(m_joint) + speed * invtimestep; NewtonUserJointSetRowAcceleration(m_joint, stopAccel); } else if (m_twistFriction > 0.0f) { NewtonUserJointAddAngularRow(m_joint, 0, &matrix0.m_front[0]); NewtonUserJointSetRowStiffness(m_joint, m_stiffness); dFloat accel = NewtonUserJointCalculateRowZeroAccelaration(m_joint); NewtonUserJointSetRowAcceleration(m_joint, accel); NewtonUserJointSetRowMinimumFriction(m_joint, -m_twistFriction); NewtonUserJointSetRowMaximumFriction(m_joint, m_twistFriction); } }
// ---------------------------------------------------------------------- double derivative(int i, const dVector& v, double lattice_spacing) { // A local "num points" variable. Used for simplicity. int num_points = get_num_points(v); assert (0 <= i && i < (int)v.size() && "The ith elemennt must be in the vector"); if ( i % num_points == 0 ) { // We're at the lower boundary of the grid. if (DEBUGGING) { cout << "\tUsing forward difference." << endl; } return forward_difference(i,v,lattice_spacing); } else if (i % num_points == num_points - 1) { // We're at the upper boundary of the grid. if (DEBUGGING) { cout << "\tUsing backward difference." << endl; } return backward_difference(i,v,lattice_spacing); } else { // We're in the middle of the grid. if (DEBUGGING) { cout << "\tUsing centered difference." << endl; } return centered_difference(i,v,lattice_spacing); } }
void dComplentaritySolver::dBodyState::IntegrateForce (dFloat timestep, const dVector& force, const dVector& torque) { dVector accel (force.Scale (m_invMass)); dVector alpha (m_invInertia.RotateVector(torque)); m_veloc += accel.Scale (timestep); m_omega += alpha.Scale (timestep); }
dVector CustomPlayerController::CalculateDesiredVelocity (dFloat forwardSpeed, dFloat lateralSpeed, dFloat verticalSpeed, const dVector& gravity, dFloat timestep) const { dMatrix matrix; NewtonBodyGetMatrix(m_body, &matrix[0][0]); dVector updir (matrix.RotateVector(m_upVector)); dVector frontDir (matrix.RotateVector(m_frontVector)); dVector rightDir (frontDir * updir); dVector veloc (0.0f, 0.0f, 0.0f, 0.0f); if ((verticalSpeed <= 0.0f) && (m_groundPlane % m_groundPlane) > 0.0f) { // plane is supported by a ground plane, apply the player input velocity if ((m_groundPlane % updir) >= m_maxSlope) { // player is in a legal slope, he is in full control of his movement dVector bodyVeloc; NewtonBodyGetVelocity(m_body, &bodyVeloc[0]); veloc = updir.Scale(bodyVeloc % updir) + gravity.Scale (timestep) + frontDir.Scale (forwardSpeed) + rightDir.Scale (lateralSpeed) + updir.Scale(verticalSpeed); veloc += (m_groundVelocity - updir.Scale (updir % m_groundVelocity)); dFloat speedLimitMag2 = forwardSpeed * forwardSpeed + lateralSpeed * lateralSpeed + verticalSpeed * verticalSpeed + m_groundVelocity % m_groundVelocity + 0.1f; dFloat speedMag2 = veloc % veloc; if (speedMag2 > speedLimitMag2) { veloc = veloc.Scale (dSqrt (speedLimitMag2 / speedMag2)); } dFloat normalVeloc = m_groundPlane % (veloc - m_groundVelocity); if (normalVeloc < 0.0f) { veloc -= m_groundPlane.Scale (normalVeloc); } } else { // player is in an illegal ramp, he slides down hill an loses control of his movement NewtonBodyGetVelocity(m_body, &veloc[0]); veloc += updir.Scale(verticalSpeed); veloc += gravity.Scale (timestep); dFloat normalVeloc = m_groundPlane % (veloc - m_groundVelocity); if (normalVeloc < 0.0f) { veloc -= m_groundPlane.Scale (normalVeloc); } } } else { // player is on free fall, only apply the gravity NewtonBodyGetVelocity(m_body, &veloc[0]); veloc += updir.Scale(verticalSpeed); veloc += gravity.Scale (timestep); } return veloc; }
void Quaternion::set_v(const dVector & v) { if(v.size() == 3) { v_ = v; } else { assert (0 && "Quaternion::set_v: input has a wrong size."); } }
void OutputASCIIShade(ostream& out,const dVector& x,double scale) { if(scale == 0) scale = x.maxAbsElement(); out<<scale<<" x "; if(scale == 0) scale = 1; out<<'['; for(int i=0;i<x.n;i++) out<<ASCIIShade(x(i)/scale); out<<']'; }
//Compute gradient void UnconstrainedOptimizer::G() { dVector dgrad(n); memcpy(vecGradient.get(),x,n*sizeof(double)); currentModel->setWeights(vecGradient); if(currentModel->getDebugLevel() >= 2) std::cout << "Compute gradient..." << std::endl; currentGradient->computeGradient(dgrad, currentModel,currentDataset); memcpy(g,dgrad.get(),n*sizeof(double)); }
void InitCamera (const dVector& eyePoint, const dVector& dir) { gCameraEyepoint = eyePoint; gCurrCameraDir = dir.Scale (1.0f / sqrt (dir % dir)); gRollAngle = dAsin (gCurrCameraDir.m_y); gPrevRollAngle = gYawAngle; gYawAngle = dAtan2 (-gCurrCameraDir.m_z, gCurrCameraDir.m_x); gPrevYawAngle = gYawAngle; }
Quaternion::Quaternion(const double angle, const dVector & axis) { if(axis.size() != 3) { assert (0 && "Quaternion::Quaternion, size of axis != 3"); return; } // make sure axis is a unit vector v_ = sin(angle/2) * axis/Norm2(axis); s_ = cos(angle/2); }
void Madd(const dMatrix& A,const dVector& x,dVector& y,double alpha,double beta,bool transpose) { if(A.isRowMajor()) { Assert(A.jstride == 1); transpose = !transpose; } else { Assert(A.istride == 1); //Assert(IsCompliant(A)); } //Assert(IsCompliant(A)); Assert(x.n == A.n); Assert(y.n == A.m); integer m=A.m; integer n=A.n; integer lda=A.m; integer xinc=x.stride; integer yinc=y.stride; char trans = (transpose?'T':'N'); dgemv_(&trans,&m,&n,&alpha,A.getStart(),&lda,x.getStart(),&xinc,&beta,y.getStart(),&yinc); }
dQuaternion dQuaternion::IntegrateOmega (const dVector& omega, dFloat timestep) const { // this is correct dQuaternion rotation (*this); dFloat omegaMag2 = omega % omega; const dFloat errAngle = 0.0125f * 3.141592f / 180.0f; const dFloat errAngle2 = errAngle * errAngle; if (omegaMag2 > errAngle2) { dFloat invOmegaMag = 1.0f / dSqrt (omegaMag2); dVector omegaAxis (omega.Scale (invOmegaMag)); dFloat omegaAngle = invOmegaMag * omegaMag2 * timestep; dQuaternion deltaRotation (omegaAxis, omegaAngle); rotation = rotation * deltaRotation; rotation.Scale(1.0f / dSqrt (rotation.DotProduct (rotation))); } return rotation; }
// Compute error function void UnconstrainedOptimizer::F() { memcpy(vecGradient.get(),x,n*sizeof(double)); currentModel->setWeights(vecGradient); if(currentModel->getDebugLevel() >= 2) std::cout << "Compute error..." << std::endl; f = currentEvaluator->computeError(currentDataset, currentModel); if(currentModel->getDebugLevel() >= 3) { // printf(" Iteration # = %i Nb error eval = %i Nb gradient eval = %i\n\n", cnls, cnf, cng); // printf("F = %-0.10lg\n",f); std::cout << " Iteration # = " << cnls << " Nb error eval = " <<cnf << " Nb gradient eval = " << cng <<std::endl << std::endl; std::cout << "F = " << f << std::endl; std::cout.flush(); } }
static void PhysicsApplyPickForce (const NewtonBody* body, dFloat timestep, int threadIndex) { dFloat mass; dFloat Ixx; dFloat Iyy; dFloat Izz; dVector com; dVector veloc; dVector omega; dMatrix matrix; // apply the thew body forces if (chainForceCallback) { chainForceCallback (body, timestep, threadIndex); } // add the mouse pick penalty force and torque NewtonBodyGetVelocity(body, &veloc[0]); NewtonBodyGetOmega(body, &omega[0]); NewtonBodyGetVelocity(body, &veloc[0]); NewtonBodyGetMassMatrix (body, &mass, &Ixx, &Iyy, &Izz); dVector force (pickedForce.Scale (mass * MOUSE_PICK_STIFFNESS)); dVector dampForce (veloc.Scale (MOUSE_PICK_DAMP * mass)); force -= dampForce; NewtonBodyGetMatrix(body, &matrix[0][0]); NewtonBodyGetCentreOfMass (body, &com[0]); // calculate local point relative to center of mass dVector point (matrix.RotateVector (attachmentPoint - com)); dVector torque (point * force); dVector torqueDamp (omega.Scale (mass * 0.1f)); NewtonBodyAddForce (body, &force.m_x); NewtonBodyAddTorque (body, &torque.m_x); // make sure the body is unfrozen, if it is picked NewtonBodySetFreezeState (body, 0); }
// ---------------------------------------------------------------------- // Iterative Function // ---------------------------------------------------------------------- dVector f(double t, const dVector& y, const dVector& optional_args) { // t is not used. But it is required by the integrator. // The optional arguments vector contains the lattice spacing and c^2. double h = optional_args[0]; // h = lattice spacing double c2 = optional_args[1]; // c2 = c^2. // For convenience, get the number of points in the lattice int num_points = get_num_points(y); // The output dVector. Starts empty. dVector output(y.size(),0); // The order of elements in v is s,r,u. So we have the following system: // (d/dt) [s,r,u] = [c^2 (dr/dx), (ds/dx), s] // Fill the output array for (int i = 0; i < num_points; i++) { // Fill the (du/dt) terms with s. // ES: A nicer notation would allow you to write e.g. // "output[i].*s" or "output.*s[i]" (see "member pointers), or // "output[i][s]", or maybe even "s[i]" where "s" is a local // variable defined before the loop. output[get_linear_index_u(i,output)] = get_ith_s(i,y); // Fill the (dr/dt) vectors with (ds/dx). // ES: A nicer notation would allow you to write e.g. // "diff(h,y,i,s)" or "diff(h,y,s,i)" instead. output[get_linear_index_r(i,output)] = derivative_for_s(i,y,h); // Fill the (ds/dt) vectors with c^2(dr/dx). output[get_linear_index_s(i,output)] = c2 * derivative_for_r(i,y,h); } // (du/dt) at the boundary is forced to be zero. Impose this. // ES: What about the boundary conditions for the other fields? output[get_linear_index_s(0,output)] = 0; output[get_linear_index_s(num_points-1,output)] = 0; // That's it. Return the array! return output; }
double GradientHCRF::computeGradient(dVector& vecGradient, Model* m, DataSequence* X) { int nbFeatures = pFeatureGen->getNumberOfFeatures(); int NumSeqLabels=m->getNumberOfSequenceLabels(); //Get adjency matrix uMatrix adjMat; m->getAdjacencyMatrix(adjMat, X); if(vecGradient.getLength() != nbFeatures) vecGradient.create(nbFeatures); dVector Partition; Partition.resize(1,NumSeqLabels); std::vector<Beliefs> ConditionalBeliefs(NumSeqLabels); // Step 1 : Run Inference in each network to compute marginals conditioned on Y for(int i=0; i<NumSeqLabels; i++) { pInfEngine->computeBeliefs(ConditionalBeliefs[i],pFeatureGen, X, m, true,i); Partition[i] = ConditionalBeliefs[i].partition; } double f_value = Partition.logSumExp() - Partition[X->getSequenceLabel()]; // Step 2: Compute expected values for feature nodes conditioned on Y #if !defined(_VEC_FEATURES) && !defined(_OPENMP) featureVector* vecFeatures; #endif #if defined(_OPENMP) int ThreadID = omp_get_thread_num(); if (ThreadID >= nbThreadsMP) ThreadID = 0; #else int ThreadID = 0; #endif double value; dMatrix CEValues; CEValues.resize(nbFeatures,NumSeqLabels); //Loop over nodes to compute features and update the gradient for(int j=0; j<NumSeqLabels; j++) { //For every labels for(int i = 0; i < X->length(); i++) {//For every nodes #if defined(_VEC_FEATURES) || defined(_OPENMP) pFeatureGen->getFeatures(vecFeaturesMP[ThreadID], X,m,i,-1,j); // Loop over features feature* pFeature = vecFeaturesMP[ThreadID].getPtr(); for(int k = 0; k < vecFeaturesMP[ThreadID].size(); k++, pFeature++) #else vecFeatures =pFeatureGen->getFeatures(X,m,i,-1,j); // Loop over features feature* pFeature = vecFeatures->getPtr(); for(int k = 0; k < vecFeatures->size(); k++, pFeature++) #endif { //p(s_i=s|x,Y) * f_k(i,s,x,y) value=ConditionalBeliefs[j].belStates[i][pFeature->nodeState] * pFeature->value; CEValues.setValue(j,pFeature->globalId, CEValues(j,pFeature->globalId) + value); // one row for each Y }// end for every feature }// end for every node }// end for ever Sequence Label // Step 3: Compute expected values for edge features conditioned on Y //Loop over edges to compute features and update the gradient for(int j=0; j<NumSeqLabels; j++) { int edgeIndex = 0; for(int row = 0; row < X->length(); row++) { // Loop over all rows (the previous node index) for(int col = row; col < X->length() ; col++) { //Loop over all columns (the current node index) if(adjMat(row,col) == 1) { //Get nodes features #if defined(_VEC_FEATURES) || defined(_OPENMP) pFeatureGen->getFeatures(vecFeaturesMP[ThreadID], X,m,col,row,j); // Loop over features feature* pFeature = vecFeaturesMP[ThreadID].getPtr(); for(int k = 0; k < vecFeaturesMP[ThreadID].size(); k++, pFeature++) #else vecFeatures = pFeatureGen->getFeatures(X,m,col,row,j); // Loop over features feature* pFeature = vecFeatures->getPtr(); for(int k = 0; k < vecFeatures->size(); k++, pFeature++) #endif { //p(y_i=s1,y_j=s2|x,Y)*f_k(i,j,s1,s2,x,y) value=ConditionalBeliefs[j].belEdges[edgeIndex](pFeature->prevNodeState,pFeature->nodeState) * pFeature->value; CEValues.setValue(j,pFeature->globalId, CEValues(j,pFeature->globalId) + value); } edgeIndex++; } } } } // Step 4: Compute Joint Expected Values dVector JointEValues; JointEValues.resize(1,nbFeatures); JointEValues.set(0); dVector rowJ; rowJ.resize(1,nbFeatures); dVector GradientVector; double sumZLog=Partition.logSumExp(); for (int j=0; j<NumSeqLabels; j++) { CEValues.getRow(j, rowJ); rowJ.multiply(exp(Partition.getValue(j)-sumZLog)); JointEValues.add(rowJ); } // Step 5 Compute Gradient as Exi[i,*,*] -Exi[*,*,*], that is difference // between expected values conditioned on Sequence Labels and Joint expected // values CEValues.getRow(X->getSequenceLabel(), rowJ); // rowJ=Expected value // conditioned on Sequence // label Y // [Negation moved to Gradient::ComputeGradient by LP] // rowJ.negate(); JointEValues.negate(); rowJ.add(JointEValues); vecGradient.add(rowJ); return f_value; }
void CustomPathFollow::SetPathTarget (const dVector& posit, const dVector& tangent) { m_pointOnPath = posit; m_pathTangent = tangent.Scale (1.0f / dSqrt (m_pathTangent % m_pathTangent)); }
double GradientCRF::computeGradient(dVector& vecGradient, Model* m, DataSequence* X) { //compute beliefs Beliefs bel; pInfEngine->computeBeliefs(bel,pFeatureGen, X, m, false); double phi = pFeatureGen->evaluateLabels(X,m); double partition = bel.partition; //Get adjency matrix uMatrix adjMat; m->getAdjacencyMatrix(adjMat, X); //Check the size of vecGradient int nbFeatures = pFeatureGen->getNumberOfFeatures(); if(vecGradient.getLength() != nbFeatures) vecGradient.create(nbFeatures); #if !defined(_VEC_FEATURES) && !defined(_OPENMP) featureVector* vecFeatures; #endif #if defined(_OPENMP) int ThreadID = omp_get_thread_num(); if (ThreadID >= nbThreadsMP) ThreadID = 0; #else int ThreadID = 0; #endif //Loop over nodes to compute features and update the gradient for(int i = 0; i < X->length(); i++) { // Read the label for this state int s = X->getStateLabels(i); //Get nodes features #if defined(_VEC_FEATURES) || defined(_OPENMP) pFeatureGen->getFeatures(vecFeaturesMP[ThreadID], X,m,i,-1); // Loop over features feature* pFeature = vecFeaturesMP[ThreadID].getPtr(); for(int j = 0; j < vecFeaturesMP[ThreadID].size(); j++, pFeature++) #else vecFeatures = pFeatureGen->getFeatures(X,m,i,-1); // Loop over features feature* pFeature = vecFeatures->getPtr(); for(int j = 0; j < vecFeatures->size(); j++, pFeature++) #endif { // If feature has same state label as the label from the // dataSequence, then add this to the gradient if(pFeature->nodeState == s) vecGradient[pFeature->id] += pFeature->value; //p(y_i=s|x)*f_k(i,s,x) is subtracted from the gradient vecGradient[pFeature->id] -= bel.belStates[i][pFeature->nodeState]*pFeature->value; } } //Loop over edges to compute features and update the gradient int edgeIndex = 0; for(int row = 0; row < X->length(); row++) // Loop over all rows (the previous node index) { for(int col = row; col < X->length() ; col++) //Loop over all columns (the current node index) { if(adjMat(row,col) == 1) { int s1 = X->getStateLabels(row); int s2 = X->getStateLabels(col); //Get nodes features #if defined(_VEC_FEATURES) || defined(_OPENMP) pFeatureGen->getFeatures(vecFeaturesMP[ThreadID], X,m,col,row); // Loop over features feature* pFeature = vecFeaturesMP[ThreadID].getPtr(); for(int j = 0; j < vecFeaturesMP[ThreadID].size(); j++, pFeature++) #else vecFeatures = pFeatureGen->getFeatures(X,m,col,row); // Loop over features feature* pFeature = vecFeatures->getPtr(); for(int j = 0; j < vecFeatures->size(); j++, pFeature++) #endif { // ++ Forward edge ++ // If edge feature has same state labels as the labels from the dataSequence, then add it to the gradient if(pFeature->nodeState == s2 && pFeature->prevNodeState == s1) vecGradient[pFeature->id] += pFeature->value; //p(y_i=s1,y_j=s2|x)*f_k(i,j,s1,s2,x) is subtracted from the gradient vecGradient[pFeature->id] -= bel.belEdges[edgeIndex](pFeature->prevNodeState,pFeature->nodeState)*pFeature->value; } edgeIndex++; } } } //Return -log instead of log() [Moved to Gradient::ComputeGradient by LP] // vecGradient.negate(); return partition-phi; }
dVector subtracao(dVector u, dVector v){ dVector retorno(v.size()); for (int i = 0; i < v.size(); i++) retorno[i] = u[i] - v[i]; return retorno; }