btScalar getObjectCollisionPenalty(const scene_support_vertex_properties &support_graph_vertex)
{
	// TODO: Find a good parameter for the penetration depth
	const double &total_penetration_depth = support_graph_vertex.penetration_distance_;
	return logisticFunction(-1.5 , 1., 3., total_penetration_depth);
	// return 1.;
}
예제 #2
0
// loss function for the whole batch (Negative LogLikelihood for Bernoulli probability distribution)
double lossFunction(DataSet data_set) {
    double sum = 0.0;
    for (size_t i = 0; i < data_set.num_data_points; i++) {
        double probability_of_positive = logisticFunction(data_set.parameter_vector, &data_set.data_points[i * data_set.num_features], data_set.num_features);
        FeatureType label_i = (FeatureType) data_set.labels[i];
        sum += ((label_i * log(probability_of_positive)) + ((1 - label_i) * log(1 - probability_of_positive))) / data_set.num_data_points;
    }
    return -sum; // negative of the sum
}
예제 #3
0
// computes gradient for a single datapoint
static void gradientForSinglePoint(
    FeatureType* parameter_vector,
    FeatureType* data_point,
    LabelType label,
    size_t num_features,
    FeatureType* gradient) {

    //gradient for logistic function: x * (pi(theta, x) - y)
    double probability_of_positive =
        logisticFunction(parameter_vector, data_point, num_features);

    memset(gradient, 0, num_features * sizeof(FeatureType));

    addVectors(gradient,
               data_point,
               num_features,
               (probability_of_positive - label));
}
예제 #4
0
// computes gradient for the whole training set
static void gradientForWholeBatch(
    DataSet training_set,
    FeatureType* gradient) {

    memset(gradient, 0, training_set.num_features * sizeof(FeatureType));

    float* probabilities_of_positive = new float[training_set.num_data_points];

    // computes logistc function for each data point in the training set
    size_t idx = 0;
    for (size_t i = 0; i < training_set.num_data_points; i++) {

        idx = i * training_set.num_features;

        probabilities_of_positive[i] =  logisticFunction(
                                                training_set.parameter_vector,
                                                &training_set.data_points[idx],
                                                training_set.num_features);
    }

    // computes difference between
    // predicted probability and actual label: (PI - Y)
    addVectors(probabilities_of_positive,
               training_set.labels,
               training_set.num_data_points,
               -1);

    // finishes computation of gradient: (1/n) * X^T * (PI(theta, X) - YI)
    float factor = 1.0f / training_set.num_data_points;
    matrixVectorMultiply(training_set.data_points,
                         probabilities_of_positive,
                         factor,
                         training_set.num_data_points,
                         training_set.num_features,
                         gradient);

    delete[] probabilities_of_positive;
}
예제 #5
0
Vector3d JF12Field::getRegularField(const Vector3d& pos) const {
	Vector3d b(0.);

	double r = sqrt(pos.x * pos.x + pos.y * pos.y); // in-plane radius
	double d = pos.getR(); // distance to galactic center
	if ((d < 1 * kpc) or (d > 20 * kpc))
		return b; // 0 field for d < 1 kpc or d > 20 kpc

	double phi = pos.getPhi(); // azimuth
	double sinPhi = sin(phi);
	double cosPhi = cos(phi);

	double lfDisk = logisticFunction(pos.z, hDisk, wDisk);

	// disk field
	if (r > 3 * kpc) {
		double bMag;
		if (r < 5 * kpc) {
			// molecular ring
			bMag = bRing * (5 * kpc / r) * (1 - lfDisk);
			b.x += -bMag * sinPhi;
			b.y += bMag * cosPhi;

		} else {
			// spiral region
			double r_negx = r * exp(-(phi - M_PI) / tan90MinusPitch);
			if (r_negx > rArms[7])
				r_negx = r * exp(-(phi + M_PI) / tan90MinusPitch);
			if (r_negx > rArms[7])
				r_negx = r * exp(-(phi + 3 * M_PI) / tan90MinusPitch);

			for (int i = 7; i >= 0; i--)
				if (r_negx < rArms[i])
					bMag = bDisk[i];

			bMag *= (5 * kpc / r) * (1 - lfDisk);
			b.x += bMag * (sinPitch * cosPhi - cosPitch * sinPhi);
			b.y += bMag * (sinPitch * sinPhi + cosPitch * cosPhi);
		}
	}

	// toroidal halo field
	double bMagH = exp(-fabs(pos.z) / z0) * lfDisk;
	if (pos.z >= 0)
		bMagH *= bNorth * (1 - logisticFunction(r, rNorth, wHalo));
	else
		bMagH *= bSouth * (1 - logisticFunction(r, rSouth, wHalo));
	b.x += -bMagH * sinPhi;
	b.y += bMagH * cosPhi;

	// poloidal halo field
	double bMagX;
	double sinThetaX, cosThetaX;
	double rp;
	double rc = rXc + fabs(pos.z) / tanThetaX0;
	if (r < rc) {
		// varying elevation region
		rp = r * rXc / rc;
		bMagX = bX * exp(-1 * rp / rX) * pow(rp / r, 2.);
		double thetaX = atan2(fabs(pos.z), (r - rp));
		if (pos.z == 0)
			thetaX = M_PI / 2.;
		sinThetaX = sin(thetaX);
		cosThetaX = cos(thetaX);
	} else {
		// constant elevation region
		rp = r - fabs(pos.z) / tanThetaX0;
		bMagX = bX * exp(-rp / rX) * (rp / r);
		sinThetaX = sinThetaX0;
		cosThetaX = cosThetaX0;
	}
	double zsign = pos.z < 0 ? -1 : 1;
	b.x += zsign * bMagX * cosThetaX * cosPhi;
	b.y += zsign * bMagX * cosThetaX * sinPhi;
	b.z += bMagX * sinThetaX;

	return b;
}
예제 #6
0
// computes logistic function for a given parameter vector (parameter_vector) and a data point (data_point_i)
double logisticFunction(FeatureType* parameter_vector, FeatureType* data_point_i, const size_t num_features) {
    return logisticFunction(dotProduct(parameter_vector, data_point_i, num_features));
}
btScalar getObjectSupportContribution(const scene_support_vertex_properties &support_graph_vertex)
{
	const double &support_value =  support_graph_vertex.support_contributions_;
	return logisticFunction(0.5, 1., 0., support_value);
}
inline double stabilityPenaltyFormula(const double &a_t, const double &a_r, const double &w_t, const double &w_r)
{
	// use reversed logistic function for 
	return (logisticFunction(-40., 1., 0.10, a_t*w_t)) * (logisticFunction(-40., 1., 0.10, a_r*w_r));
}
btScalar dataProbabilityScale(const btScalar &hypothesis_confidence, const btScalar &max_confidence)
{
	// Scaling for hypothesis confidence
	return logisticFunction(10, 1, 0.36, hypothesis_confidence/max_confidence);
}