double NeuralNetwork::computeCost(const arma::mat& input, const arma::mat& output, const std::vector<arma::mat>& theta) { arma::mat h = feedForward(input, theta); arma::mat h_1 = 1 - h; unsigned int m = input.n_rows; double cost = (-1.0 / m) * arma::accu(output % logarithm(h) + (1 - output) % logarithm(h_1)); return cost + (m_regFactor / (2.0 * m)) * computeRegTerm(theta); }
void dpower(void) { push(caddr(p1)); // v/u push(cadr(p1)); divide(); push(cadr(p1)); // du/dx push(p2); derivative(); multiply(); push(cadr(p1)); // log u logarithm(); push(caddr(p1)); // dv/dx push(p2); derivative(); multiply(); add(); push(p1); // u^v multiply(); }
void ExpMapQuaternion::pseudoLog_(RefVec out, const ConstRefVec& x, const ConstRefVec& y) { Eigen::Vector4d tmp; toQuat q(tmp.data()); const toConstQuat xQ(x.data()); const toConstQuat yQ(y.data()); q = xQ.inverse()*yQ; //TODO double-check that formula logarithm(out,tmp); }
Decimal128 Decimal128::logarithm(const Decimal128& other, RoundingMode roundMode) const { std::uint32_t throwAwayFlag = 0; if (other.isEqual(Decimal128(2))) { BID_UINT128 current = decimal128ToLibraryType(_value); current = bid128_log2(current, roundMode, &throwAwayFlag); return Decimal128{libraryTypeToValue(current)}; } if (other.isEqual(Decimal128(10))) { BID_UINT128 current = decimal128ToLibraryType(_value); current = bid128_log10(current, roundMode, &throwAwayFlag); return Decimal128{libraryTypeToValue(current)}; } return logarithm(other, &throwAwayFlag); }
MC *new_logarithm(MC *ptr1) { MC *ans = logarithm(ptr1); return ans; }
Decimal128 Decimal128::logarithm(const Decimal128& other, std::uint32_t* signalingFlags, RoundingMode roundMode) const { return logarithm(signalingFlags, roundMode).divide(other); }
Decimal128 Decimal128::logarithm(RoundingMode roundMode) const { std::uint32_t throwAwayFlag = 0; return logarithm(&throwAwayFlag); }
void ExpMapQuaternion::pseudoLog0_(RefVec out, const ConstRefVec& x) { logarithm(out,x); }