コード例 #1
0
ファイル: logreg.cpp プロジェクト: drknexus/buckshot
// Yua, Chang, Hsieh and Lin: A Comparison of Optimization Methods and Software for Large-scale L1-regularized Linear Classification; p. 14
double shoot_cdn(int x_i, double lambda) {
    // Compute d: (equation 29), i.e the solution to the quadratic approximation of the function 
    // for weight x_i
    if (!active[x_i]) return 0.0;
    
    double violation = 0.0;
    double xv = logregprob->x[x_i];
    
    double Ld0, Ldd0;
    logreg_cdn_derivandH(lambda, x_i, Ld0, Ldd0);    
    
    double Gp = (Ld0+1);
    double Gn = (Ld0-1);

    // Shrinking (practically copied from LibLinear)
    if (xv == 0) {
        if (Gp<0) {
            violation = -Gp;   
        } else if (Gn > 0) {
            violation = Gn;
       	} else if(Gp>Gmax_old/logregprob->ny && Gn<-Gmax_old/logregprob->ny) {
            // Remove
            active[x_i] = false;
            return 0.0;
        }   
    } else if(xv > 0)
      violation = fabs(Gp);
    else
      violation = fabs(Gn);
    
    // TODO: should use a reduction here! Or lock.
    //if (Gmax < violation)
    //    Gmax = violation;
    logregprob->Gmax.max(0, violation);

    // Newton direction d
    double rhs = Ldd0*xv;
    double d;
    if (Gp<= rhs) {
        d = -(Gp/Ldd0);
    } else if (Gn >= rhs) {
        d = -(Gn/Ldd0); 
    } else {
        d = -xv;
    }
    
     if (std::abs(d) < 1e-12) {
        return 0.0;
    }
    // Small optimization
    d = std::min(std::max(d,-10.0),10.0);
   
    // Backtracking line search (with Armijo-type of condition) (eq. 30)
    int iter=0;
    int max_num_linesearch=20;
    double gamma = 1.0; // Note: Yuan, Chang et al. use lambda instead of gamma
    double delta = (Ld0 * d + std::abs(xv+d) - std::abs(xv));
    double rhs_c = cdn_sigma * delta;    
    do {
        double change_in_obj = g_xi(d, x_i, lambda);
        if (change_in_obj <= gamma * rhs_c) {
            // Found ok.
            logregprob->x[x_i] += d;
            // Update dot products (Ax)
            sparse_array &col = logregprob->A_cols[x_i];
            #pragma omp parallel for
            for(int i=0; i<col.length(); i++) {
                logregprob->expAx.mul(col.idxs[i], exp(d * col.values[i]));
            }
            return std::abs(d);
        }
        gamma *= 0.5;
        d *= 0.5;
    } while(++iter < max_num_linesearch); 
    recompute_expAx();
    return 0.0;
}
コード例 #2
0
// Calculate local stiffness matrix. Use reduced integration with hourglass stabilization.
void ElementQuadrangleLin::set_K_isoparametric()
{	
	double shear = E / (2 + 2 * nu);
	double lame = E * nu / ((1 + nu) * (1 - 2 * nu));
	Eigen::Matrix3d C;
	C << 2 * shear + lame, lame, 0.,
		lame, 2 * shear + lame, 0.,
		0., 0., shear;
	
	Eigen::Vector4d x, y;
	int i;
	for (i = 0; i < 4; i++)
	{
		x(i) = domain->nodes[nodes[i] - 1].x;
		y(i) = domain->nodes[nodes[i] - 1].y;
	}
	const double gp = sqrt(1. / 3.);
	double gps[5][2] = { {-gp,-gp},{ gp,-gp },{ gp,gp },{ -gp,gp },{0.,0.} };
	Eigen::Vector4d g_xi(-0.25, 0.25, 0.25, -0.25);
	Eigen::Vector4d g_eta(-0.25, -0.25, 0.25, 0.25);
	Eigen::Vector4d h(0.25, -0.25, 0.25, -0.25);
	Eigen::Matrix2d J[5];
	Eigen::Matrix2d J_inv[5];
	for (i = 0; i < 5; i++)
	{
		double xi = gps[i][0];
		double eta = gps[i][1];
		J[i](0, 0) = x.dot(g_xi + eta*h);
		J[i](0, 1) = x.dot(g_eta + xi*h);
		J[i](1, 0) = y.dot(g_xi + eta*h);
		J[i](1, 1) = y.dot(g_eta + xi*h);
		J_inv[i] = J[i].inverse();
	}
	Eigen::MatrixXd xieta(4, 2);
	xieta << g_xi, g_eta;
	Eigen::MatrixXd b(4, 2);
	b = xieta*J_inv[4];
	Eigen::MatrixXd B_0T(8,3);
	Eigen::Vector4d zers = Eigen::Vector4d::Zero();
	B_0T << b.block<4,1>(0,0), zers, b.block<4, 1>(0, 1), zers, b.block<4, 1>(0, 1), b.block<4, 1>(0, 0);
	Eigen::MatrixXd xy(4,2);
	xy << x,y;
	Eigen::Matrix4d m_gamma;
	Eigen::Vector4d gamma;
	m_gamma = Eigen::Matrix4d::Identity() - b*xy.transpose();
	gamma = m_gamma * h;
	Eigen::MatrixXd j_0_dev(3, 4);
	Eigen::Matrix2d j0iT = J_inv[4].transpose();
	j_0_dev << 2 * j0iT.block<1, 2>(0, 0), -1 * j0iT.block<1, 2>(1, 0),
		-1 * j0iT.block<1, 2>(0, 0), 2 * j0iT.block<1, 2>(1, 0),
		3 * j0iT.block<1, 2>(0, 0), 3 * j0iT.block<1, 2>(1, 0);
	j_0_dev *= 1. / 3.;
	Eigen::MatrixXd L_hg[4];
	for (i = 0; i < 4; i++)
	{
		double xi = gps[i][0];
		double eta = gps[i][1];
		L_hg[i].resize(4,2);
		L_hg[i] << eta, 0.0, 
					xi, 0.0, 
					0.0, eta, 
					0.0, xi;
	}
	Eigen::MatrixXd M_hg(8,2);
	M_hg << gamma, zers, zers, gamma;
	M_hg.transposeInPlace(); // 2x8
	Eigen::MatrixXd B_red[4];
	Eigen::MatrixXd K_red = Eigen::MatrixXd::Zero(8,8);
	Eigen::MatrixXd B[4];
	Eigen::MatrixXd Bc[4];
	volume = 4 * J[4].determinant()*thickness;
	for (i = 0; i < 4; i++)
	{
		B_red[i].resize(3, 8);
		B[i].resize(3, 8);
		Bc[i].resize(3, 8);
		B_red[i] = j_0_dev * L_hg[i] * M_hg; // 3x4 * 4x2 * 2x8 = 3x8
		K_red += B_red[i].transpose()*C*B_red[i]; // 8x3 * 3x3 * 3x8 = 8x8
		Bc[i] = B_0T.transpose() + B_red[i]; // 3x8 + 3x8 = 3x8
		B[i] << Bc[i].block<3, 1>(0, 0), Bc[i].block<3, 1>(0, 4), Bc[i].block<3, 1>(0, 1), Bc[i].block<3, 1>(0, 5),	Bc[i].block<3, 1>(0, 2), Bc[i].block<3, 1>(0, 6), Bc[i].block<3, 1>(0, 3), Bc[i].block<3, 1>(0, 7); // 3x8
	}
	K_red *= volume / 4.0;
	Eigen::MatrixXd K(8, 8);
	Eigen::MatrixXd Kc(8, 8);
	Eigen::MatrixXd Kc2(8, 8);
	Kc = K_red + volume*B_0T*C*B_0T.transpose(); // 8x8 + 8x3 * 3x3 * 3x8
	Kc2 << Kc.block<8, 1>(0, 0), Kc.block<8, 1>(0, 4), Kc.block<8, 1>(0, 1), Kc.block<8, 1>(0, 5), Kc.block<8, 1>(0, 2), Kc.block<8, 1>(0, 6), Kc.block<8, 1>(0, 3), Kc.block<8, 1>(0, 7);
	K << Kc2.block<1, 8>(0, 0), Kc2.block<1, 8>(4, 0), Kc2.block<1, 8>(1, 0), Kc2.block<1, 8>(5, 0), Kc2.block<1, 8>(2, 0), Kc2.block<1, 8>(6, 0), Kc2.block<1, 8>(3, 0), Kc2.block<1, 8>(7, 0);
	// Don't forget to swap rows and/or columns of B and K matrix and store it in the object.
	K_loc = K;
	B_matrices = B;
}