void Tikhonov ( Orientation orientation, const SparseMatrix<F>& A, const Matrix<F>& B, const SparseMatrix<F>& G, Matrix<F>& X, const LeastSquaresCtrl<Base<F>>& ctrl ) { DEBUG_CSE // Explicitly form W := op(A) // ========================== SparseMatrix<F> W; if( orientation == NORMAL ) W = A; else if( orientation == TRANSPOSE ) Transpose( A, W ); else Adjoint( A, W ); const Int m = W.Height(); const Int n = W.Width(); const Int numRHS = B.Width(); // Embed into a higher-dimensional problem via appending regularization // ==================================================================== SparseMatrix<F> WEmb; if( m >= n ) VCat( W, G, WEmb ); else HCat( W, G, WEmb ); Matrix<F> BEmb; Zeros( BEmb, WEmb.Height(), numRHS ); if( m >= n ) { auto BEmbT = BEmb( IR(0,m), IR(0,numRHS) ); BEmbT = B; } else BEmb = B; // Solve the higher-dimensional problem // ==================================== Matrix<F> XEmb; LeastSquares( NORMAL, WEmb, BEmb, XEmb, ctrl ); // Extract the solution // ==================== if( m >= n ) X = XEmb; else X = XEmb( IR(0,n), IR(0,numRHS) ); }
/* See Waggoner and Zha, "A Gibbs sampler for structural vector autoregressions", JEDC 2003, for discription of notations. We take the square root of a symmetric and positive definite X to be any matrix Y such that Y*Y'=X. Note that this is not the usual definition because we do not require Y to be symmetric and positive definite. */ void SBVAR_symmetric_linear::SetSimulationInfo(void) { if (NumberObservations() == 0) throw dw_exception("SetSimulationInfo(): cannot simulate if no observations"); TDenseMatrix all_YY, all_XY, all_XX; if (flat_prior) { all_YY=YY; all_XY=XY; all_XX=XX; } else { TDenseMatrix all_Y, all_X; all_Y=VCat(sqrt(lambda)*Data(),sqrt(lambda_bar)*prior_Y); all_X=VCat(sqrt(lambda)*PredeterminedData(),sqrt(lambda_bar)*prior_X); all_YY=Transpose(all_Y)*all_Y; all_XY=Transpose(all_X)*all_Y; all_XX=Transpose(all_X)*all_X; } Simulate_SqrtH.resize(n_vars); Simulate_P.resize(n_vars); Simulate_SqrtS.resize(n_vars); Simulate_USqrtS.resize(n_vars); for (int i=n_vars-1; i >= 0; i--) { TDenseMatrix invH=Transpose(V[i])*(all_XX*V[i]); Simulate_SqrtH[i]=Inverse(Cholesky(invH,CHOLESKY_UPPER_TRIANGULAR),SOLVE_UPPER_TRIANGULAR); Simulate_P[i]=Simulate_SqrtH[i]*(Transpose(Simulate_SqrtH[i])*(Transpose(V[i])*(all_XY*U[i]))); Simulate_SqrtS[i]=sqrt(lambda_T)*Inverse(Cholesky(Transpose(U[i])*(all_YY*U[i]) - Transpose(Simulate_P[i])*(invH*Simulate_P[i]),CHOLESKY_UPPER_TRIANGULAR),SOLVE_UPPER_TRIANGULAR); Simulate_USqrtS[i]=U[i]*Simulate_SqrtS[i]; } simulation_info_set=true; }
void Tikhonov ( Orientation orientation, const DistSparseMatrix<F>& A, const DistMultiVec<F>& B, const DistSparseMatrix<F>& G, DistMultiVec<F>& X, const LeastSquaresCtrl<Base<F>>& ctrl ) { DEBUG_CSE mpi::Comm comm = A.Comm(); // Explicitly form W := op(A) // ========================== DistSparseMatrix<F> W(comm); if( orientation == NORMAL ) W = A; else if( orientation == TRANSPOSE ) Transpose( A, W ); else Adjoint( A, W ); const Int m = W.Height(); const Int n = W.Width(); const Int numRHS = B.Width(); // Embed into a higher-dimensional problem via appending regularization // ==================================================================== DistSparseMatrix<F> WEmb(comm); if( m >= n ) VCat( W, G, WEmb ); else HCat( W, G, WEmb ); DistMultiVec<F> BEmb(comm); Zeros( BEmb, WEmb.Height(), numRHS ); if( m >= n ) { // BEmb := [B; 0] // -------------- const Int mLocB = B.LocalHeight(); BEmb.Reserve( mLocB*numRHS ); for( Int iLoc=0; iLoc<mLocB; ++iLoc ) { const Int i = B.GlobalRow(iLoc); for( Int j=0; j<numRHS; ++j ) BEmb.QueueUpdate( i, j, B.GetLocal(iLoc,j) ); } BEmb.ProcessQueues(); } else BEmb = B; // Solve the higher-dimensional problem // ==================================== DistMultiVec<F> XEmb(comm); LeastSquares( NORMAL, WEmb, BEmb, XEmb, ctrl ); // Extract the solution // ==================== if( m >= n ) X = XEmb; else GetSubmatrix( XEmb, IR(0,n), IR(0,numRHS), X ); }