void Vertsplit::evalAdj(const std::vector<pv_MX>& adjSeed, const std::vector<pv_MX>& adjSens) { int nadj = adjSeed.size(); int nx = offset_.size()-1; // Get row offsets vector<int> row_offset; row_offset.reserve(offset_.size()); row_offset.push_back(0); for (std::vector<Sparsity>::const_iterator it=output_sparsity_.begin(); it!=output_sparsity_.end(); ++it) { row_offset.push_back(row_offset.back() + it->size1()); } for (int d=0; d<nadj; ++d) { if (adjSens[d][0]!=0) { vector<MX> v; for (int i=0; i<nx; ++i) { MX* x_i = adjSeed[d][i]; if (x_i!=0) { v.push_back(*x_i); *x_i = MX(); } else { v.push_back(MX(output_sparsity_[i].shape())); } } adjSens[d][0]->addToSum(vertcat(v)); } } }
int write_traj(FILE *log,t_commrec *cr, char *traj,t_nsborder *nsb, int step,real t,real lambda,t_nrnb nrnb[], int natoms,rvec *xx,rvec *vv,rvec *ff,matrix box) { static int fp=-1; if ((fp == -1) && MASTER(cr)) { #ifdef DEBUG fprintf(log,"Going to open trajectory file: %s\n",traj); #endif fp = open_trn(traj,"w"); } #define MX(xvf) moveit(log,cr->left,cr->right,#xvf,xvf,nsb) if (cr->nnodes > 1) { MX(xx); MX(vv); MX(ff); } if ((xx || vv || ff) && MASTER(cr)) { fwrite_trn(fp,step,t,lambda,box,natoms,xx,vv,ff); gmx_fio_flush(fp); } return fp; }
void DrawWindowContent() { int w = nWWidth; int h = nWHeight; WSetColor(DARKGRAY); WFillRectangle(0, 0, w, h); WSetColor(BLACK); // axis WDrawLine(0, h / 2, w, h / 2); WDrawLine(w / 2, 0, w / 2, h); // arrows WDrawLine(w / 2, 0, w / 2 - 5, 5); WDrawLine(w / 2, 0, w / 2 + 5, 5); WDrawLine(w, h / 2, w - 5, h / 2 - 5); WDrawLine(w, h / 2, w - 5, h / 2 + 5); WDrawString("Press Q to quit, F1...F4 to change function", 10, 20); WSetColor(GREEN); for (int i = 0; i < N; i++) WDrawLine(MX(x[i]), MY(f(x[i])), MX(x[i + 1]), MY(f(x[i + 1]))); WSetColor(BLUE); double stp = 0.0001; for (double x = a; x < b; x += stp) WDrawLine(MX(x), MY(f(x)), MX(x + stp), MY(f(x + stp))); // WSetColor (RED); // for (double x = a; x < b; x+=stp) // WDrawLine(MX(x), MY(F(x)), MX(x+stp), MY(F(x+stp))); }
void apply_translations(void) { unsigned int i; #define MX(a) (bit_count (conversions_mask & (a))) if ((MX (C_ASCII | C_EBCDIC | C_IBM) > 1) || (MX (C_BLOCK | C_UNBLOCK) > 1) || (MX (C_LCASE | C_UCASE) > 1) || (MX (C_UNBLOCK | C_SYNC) > 1)) { log_info("\ only one conv in {ascii,ebcdic,ibm}, {lcase,ucase}, {block,unblock}, {unblock,sync}");
void Inverse::evaluateMX(const MXPtrV& input, MXPtrV& output, const MXPtrVV& fwdSeed, MXPtrVV& fwdSens, const MXPtrVV& adjSeed, MXPtrVV& adjSens, bool output_given) { const MX& X = *input[0]; MX& inv_X = *output[0]; if (!output_given) { inv_X = inv(X); } // Forward sensitivities int nfwd = fwdSens.size(); for (int d=0; d<nfwd; ++d) { *fwdSens[d][0] = -mul(inv_X, mul(*fwdSeed[d][0], inv_X)); } // Adjoint sensitivities int nadj = adjSeed.size(); if (nadj>0) { MX trans_inv_X = inv_X.T(); for (int d=0; d<nadj; ++d) { adjSens[d][0]->addToSum(-mul(trans_inv_X, mul(*adjSeed[d][0], trans_inv_X))); *adjSeed[d][0] = MX(); } } }
void UnaryMX::evaluateMX(const MXPtrV& input, MXPtrV& output, const MXPtrVV& fwdSeed, MXPtrVV& fwdSens, const MXPtrVV& adjSeed, MXPtrVV& adjSens, bool output_given){ // Evaluate function MX f, dummy; // Function value, dummy second argument if(output_given){ f = *output[0]; } else { casadi_math<MX>::fun(op_,*input[0],dummy,f); } // Number of forward directions int nfwd = fwdSens.size(); int nadj = adjSeed.size(); if(nfwd>0 || nadj>0){ // Get partial derivatives MX pd[2]; casadi_math<MX>::der(op_,*input[0],dummy,f,pd); // Propagate forward seeds for(int d=0; d<nfwd; ++d){ *fwdSens[d][0] = pd[0]*(*fwdSeed[d][0]); } // Propagate adjoint seeds for(int d=0; d<nadj; ++d){ MX s = *adjSeed[d][0]; *adjSeed[d][0] = MX(); *adjSens[d][0] += pd[0]*s; } } // Perform the assignment (which may be inplace, hence delayed) if(!output_given){ *output[0] = f; } }
void Determinant::evaluateMX(const MXPtrV& input, MXPtrV& output, const MXPtrVV& fwdSeed, MXPtrVV& fwdSens, const MXPtrVV& adjSeed, MXPtrVV& adjSens, bool output_given) { int nfwd = fwdSens.size(); int nadj = adjSeed.size(); // Non-differentiated output const MX& X = *input[0]; MX& det_X = *output[0]; if (!output_given) { det_X = det(X); } // Quick return if (nfwd==0 && nadj==0) return; // Create only once MX trans_inv_X = inv(X).T(); // Forward sensitivities for (int d=0; d<nfwd; ++d) { *fwdSens[d][0] = det_X * inner_prod(trans_inv_X, *fwdSeed[d][0]); } // Adjoint sensitivities for (int d=0; d<nadj; ++d) { adjSens[d][0]->addToSum((*adjSeed[d][0]*det_X) * trans_inv_X); *adjSeed[d][0] = MX(); } }
AnyScalar pow(const AnyScalar&x, int i) { if (x.is_double()) return pow(x.as_double(), i); if (x.is_SX()) return pow(x.as_SX(), SX(i)); if (x.is_MX()) return pow(x.as_MX(), MX(i)); tensor_assert(false); return 0; }
void Reshape::evaluateMX(const MXPtrV& input, MXPtrV& output, const MXPtrVV& fwdSeed, MXPtrVV& fwdSens, const MXPtrVV& adjSeed, MXPtrVV& adjSens, bool output_given) { // Quick return if inplace if (input[0]==output[0]) return; if (!output_given) { *output[0] = reshape(*input[0], shape()); } // Forward sensitivities int nfwd = fwdSens.size(); for (int d = 0; d<nfwd; ++d) { *fwdSens[d][0] = reshape(*fwdSeed[d][0], shape()); } // Adjoint sensitivities int nadj = adjSeed.size(); for (int d=0; d<nadj; ++d) { MX& aseed = *adjSeed[d][0]; MX& asens = *adjSens[d][0]; asens.addToSum(reshape(aseed, dep().shape())); aseed = MX(); } }
Function Switch ::get_reverse(casadi_int nadj, const std::string& name, const std::vector<std::string>& inames, const std::vector<std::string>& onames, const Dict& opts) const { // Derivative of each case vector<Function> der(f_.size()); for (casadi_int k=0; k<f_.size(); ++k) { if (!f_[k].is_null()) der[k] = f_[k].reverse(nadj); } // Default case Function der_def; if (!f_def_.is_null()) der_def = f_def_.reverse(nadj); // New Switch for derivatives Function sw = Function::conditional("switch_" + name, der, der_def); // Get expressions for the derivative switch vector<MX> arg = sw.mx_in(); vector<MX> res = sw(arg); // No derivatives with respect to index res.insert(res.begin(), MX(1, nadj)); // Create wrapper return Function(name, arg, res, inames, onames, opts); }
/// Convert scalar to matrix inline static MX toMatrix(const MX& x, const Sparsity& sp) { if (x.size()==sp.size()) { return x; } else { return MX(sp, x); } }
Function Switch ::get_forward(casadi_int nfwd, const std::string& name, const std::vector<std::string>& inames, const std::vector<std::string>& onames, const Dict& opts) const { // Derivative of each case vector<Function> der(f_.size()); for (casadi_int k=0; k<f_.size(); ++k) { if (!f_[k].is_null()) der[k] = f_[k].forward(nfwd); } // Default case Function der_def; if (!f_def_.is_null()) der_def = f_def_.forward(nfwd); // New Switch for derivatives Function sw = Function::conditional("switch_" + name, der, der_def); // Get expressions for the derivative switch vector<MX> arg = sw.mx_in(); vector<MX> res = sw(arg); // Ignore seed for ind arg.insert(arg.begin() + n_in_ + n_out_, MX(1, nfwd)); // Create wrapper return Function(name, arg, res, inames, onames, opts); }
// calculate per cell scores static void mkscores(void) {int i,j,k,l,m; int*p; float f; for(i=0;i<nc;i++) for(j=0;j<nl;j++) cells[i].score[j]=0.0; for(i=0;i<nw;i++) { m=words[i].length; p=words[i].flist; l=words[i].flistlen; // if(p==NULL) {p=dwds[words[i].length];l=dcount[words[i].length];} // default feasible word list if(words[i].fe) continue; if(afunique&&words[i].commit>=0) { // avoid zero score if we've committed if(l==1) for(k=0;k<m;k++) words[i].c[k]->score[chartol[(int)lts[p[0]].s[k]]]+=1.0; else assert(l==0); } else { for(j=0;j<l;j++) if(!(afunique&&isused(p[j]))) { // for each remaining feasible word f=ansp[lts[p[j]].ans]->score; for(k=0;k<m;k++) words[i].c[k]->score[chartol[(int)lts[p[j]].s[k]]]+=f; // add in its score to this cell's score } } } for(i=0;i<ne;i++) for(j=0;j<nl;j++) entries[i].score[j]=1.0; for(i=0;i<nc;i++) { // f=(float)lcount[cells[i].w->length]; // if(f!=0.0) f=1.0/f; f=1.0; for(j=0;j<nl;j++) cells[i].e->score[j]*=f*cells[i].score[j]; // copy scores to entries, scaled by total word count at this length } for(i=0;i<ne;i++) { f=-BIGF; for(j=0;j<nl;j++) f=MX(f,entries[i].score[j]); entries[i].crux=f; // crux at an entry is the greatest score over all possible letters } }
/** \fn pre-open */ bool ADM_ffMpeg2Encoder::configureContext(void) { switch(Settings.params.mode) { case COMPRESS_2PASS: case COMPRESS_2PASS_BITRATE: if(false==setupPass()) { printf("[ffmpeg] Multipass setup failed\n"); return false; } break; case COMPRESS_SAME: case COMPRESS_CQ: _context->flags |= CODEC_FLAG_QSCALE; _context->bit_rate = 0; break; case COMPRESS_CBR: _context->bit_rate=Settings.params.bitrate*1000; // kb->b; break; default: return false; } presetContext(&Settings); // Override some parameters specific to this codec // Set matrix if any... #define MX(a,b,c) case a: _context->intra_matrix=b,_context->inter_matrix=c;break; switch(Mp2Settings.matrix) { MX(MPEG2_MATRIX_DEFAULT,NULL,NULL); MX(MPEG2_MATRIX_TMPGENC,tmpgenc_intra,tmpgenc_inter); MX(MPEG2_MATRIX_ANIME,anime_intra,anime_inter); MX(MPEG2_MATRIX_KVCD,kvcd_intra,kvcd_inter); default: ADM_error("unknown matrix type : %d\n",(int)Mp2Settings.matrix); ADM_assert(0); break; } _context->rc_buffer_size=Mp2Settings.lavcSettings.bufferSize*8*1024; _context->rc_buffer_size_header=Mp2Settings.lavcSettings.bufferSize*8*1024; _context->rc_initial_buffer_occupancy=_context->rc_buffer_size; _context->rc_max_rate=Mp2Settings.lavcSettings.maxBitrate*1000; _context->rc_max_rate_header=Mp2Settings.lavcSettings.maxBitrate*1000; // /Override some parameters specific to this codec return true; }
void Inverse::evalAdj(const std::vector<pv_MX>& adjSeed, const std::vector<pv_MX>& adjSens) { MX inv_X = shared_from_this<MX>(); MX trans_inv_X = inv_X.T(); for (int d=0; d<adjSeed.size(); ++d) { adjSens[d][0]->addToSum(-mul(trans_inv_X, mul(*adjSeed[d][0], trans_inv_X))); *adjSeed[d][0] = MX(); } }
void mark_node_id(DI *di, int node_index, int deep, size_t begin, size_t len) { (*di).all_node[node_index].id = node_index; (*di).all_node[node_index].deep = deep; (*di).all_node[node_index].begin = begin; (*di).all_node[node_index].len = len; (*di).the_deep = MX((*di).the_deep, deep+1); return; }
void Determinant::evalAdj(const std::vector<pv_MX>& adjSeed, const std::vector<pv_MX>& adjSens) { const MX& X = dep(); MX det_X = shared_from_this<MX>(); MX trans_inv_X = inv(X).T(); for (int d=0; d<adjSeed.size(); ++d) { adjSens[d][0]->addToSum((*adjSeed[d][0]*det_X) * trans_inv_X); *adjSeed[d][0] = MX(); } }
MX SymbolicMX::join_primitives(std::vector<MX>::const_iterator& it) const { MX ret = *it++; if (ret.size()==size()) { return ret; } else { casadi_assert(ret.is_empty(true)); return MX(size()); } }
//calculate M and B static inline void calcMB() { //todo code of EZ(i, j+1) and EZ(i,j) may be reverse ? for(int i=1; i<N_PX-1; i++){ for(int j=1; j<N_PY-1; j++){ double complex nowMx = MX(i,j); Mx[ind(i,j)] = CMX(i,j)*MX(i,j) - CMXEZ(i,j)*(EZ(i,j+1) - EZ(i,j)); Bx[ind(i,j)] = CBX(i,j)*BX(i,j) + CBXMX1(i,j)*MX(i,j) - CBXMX0(i,j)*nowMx; } } //todo code of EZ(i+1, j) and EZ(i,j) for(int i=1; i<N_PX-1; i++){ for(int j=1; j<N_PY-1; j++){ double complex nowMy = MY(i,j); My[ind(i,j)] = CMY(i,j)*MY(i,j) - CMYEZ(i,j)*(-EZ(i+1,j) + EZ(i,j)); By[ind(i,j)] = CBY(i,j)*BY(i,j) + CBYMY1(i,j)*MY(i,j) - CBYMY0(i,j)*nowMy; } } }
Function SwitchInternal ::getDerReverse(const std::string& name, int nadj, Dict& opts) { // Derivative of each case vector<Function> der(f_.size()); for (int k=0; k<f_.size(); ++k) { if (!f_[k].isNull()) der[k] = f_[k].derReverse(nadj); } // Default case Function der_def; if (!f_def_.isNull()) der_def = f_def_.derReverse(nadj); // New Switch for derivatives stringstream ss; ss << "adj" << nadj << "_" << name_; Switch sw(ss.str(), der, der_def); // Construct wrapper inputs and arguments for calling sw vector<MX> arg = symbolicInput(); vector<MX> res = symbolicOutput(); vector<vector<MX> > seed = symbolicAdjSeed(nadj, res); vector<MX> w_in = arg; w_in.insert(w_in.end(), res.begin(), res.end()); // Arguments for calling sw being constructed vector<MX> v; v.push_back(arg.at(0)); // index for (int d=0; d<nadj; ++d) { // Add to wrapper input w_in.insert(w_in.end(), seed[d].begin(), seed[d].end()); // Add to sw argument vector v.insert(v.end(), arg.begin()+1, arg.end()); v.insert(v.end(), res.begin(), res.end()); v.insert(v.end(), seed[d].begin(), seed[d].end()); } // Construct wrapper outputs casadi_assert(v.size()==sw.nIn()); v = sw(v); vector<MX> w_out; MX ind_sens = MX(1, 1); // no dependency on index vector<MX>::const_iterator v_it = v.begin(), v_it_next; for (int d=0; d<nadj; ++d) { w_out.push_back(ind_sens); v_it_next = v_it + (nIn()-1); w_out.insert(w_out.end(), v_it, v_it_next); v_it = v_it_next; } // Create wrapper return MXFunction(name, w_in, w_out, opts); }
void ClothoidPath::SetOffset( const CarModel& cm, double k, double t, PathPt* l3, const PathPt* l2, const PathPt* l4 ) { double marg = cm.WIDTH / 2 + 0.02;//1.0;//1.1 double wl = -MN(m_maxL, l3->Wl()) + marg; double wr = MN(m_maxR, l3->Wr()) - marg; double buf = MN(1.5, 100 * fabs(k)); // a = v*v/r; if( k >= 0 )// 0.00001 ) { if( t < wl ) t = wl; else if( t > wr - l3->rBuf - buf ) { if( l3->offs > wr - l3->rBuf - buf ) t = MN(t, l3->offs); else t = wr - l3->rBuf - buf; t = MN(t, wr); } } else //if( k < -0.00001 ) { if( t > wr ) t = wr; else if( t < wl + l3->lBuf + buf ) { if( l3->offs < wl + l3->lBuf + buf ) t = MX(t, l3->offs); else t = wl + l3->lBuf + buf; t = MX(t, wl); } } l3->offs = t; l3->pt = l3->CalcPt(); l3->k = Utils::CalcCurvatureXY(l2->pt, l3->pt, l4->pt); }
void write_xtc_traj(FILE *log,t_commrec *cr, char *xtc_traj,t_nsborder *nsb,t_mdatoms *md, int step,real t,rvec *xx,matrix box,real prec) { static bool bFirst=TRUE; static rvec *x_sel; static int natoms; int i,j; if ((bFirst) && MASTER(cr)) { #ifdef DEBUG fprintf(log,"Going to open compressed trajectory file: %s\n",xtc_traj); #endif xd = open_xtc(xtc_traj,"w"); /* Count the number of atoms in the selection */ natoms=0; for(i=0; (i<md->nr); i++) if (md->cXTC[i] == 0) natoms++; if(log) fprintf(log,"There are %d atoms in your xtc output selection\n",natoms); if (natoms != md->nr) snew(x_sel,natoms); bFirst=FALSE; } if (cr->nnodes > 1) { MX(xx); } if ((xx) && MASTER(cr)) { if (natoms == md->nr) x_sel = xx; else { /* We need to copy everything into a temp array */ for(i=j=0; (i<md->nr); i++) { if (md->cXTC[i] == 0) { copy_rvec(xx[i],x_sel[j]); j++; } } } if (write_xtc(xd,natoms,step,t,box,x_sel,prec) == 0) fatal_error(0,"XTC error"); } }
void DrawWindowContent() { int w = nWWidth; int h = nWHeight; WSetColor(DARKGRAY); WFillRectangle(0, 0, w, h); WSetColor(BLACK); // axis WDrawLine(0, h / 2, w, h / 2); WDrawLine(w / 2, 0, w / 2, h); // arrows WDrawLine(w / 2, 0, w / 2 - 5, 5); WDrawLine(w / 2, 0, w / 2 + 5, 5); WDrawLine(w, h / 2, w - 5, h / 2 - 5); WDrawLine(w, h / 2, w - 5, h / 2 + 5); WSetColor(RED); WDrawString("Q=quit, F1..F4 -- change scale, F5/F6 -- change node count", 10, 20); double diff = 0; for (int i = 1; i <= N - 1; i++) { int k = i; if (i == 1) k = 2; if (i == N - 1) k = N - 2; // edge conditions int ss = int((X(i) - a) / (b - a) * w); int se = int((X(i + 1) - a) / (b - a) * w); for (int j = ss; j <= se; j++) { double t1 = (double(j) / double(w)) * (b - a) + a; double t2 = (double(j + 1) / double(w)) * (b - a) + a; WSetColor(GREEN); WDrawLine(MX(t1), MY(Par(k, t1)), MX(t2), MY(Par(k, t2))); WSetColor(BLUE); WDrawLine(MX(t1), MY(f(t1)), MX(t2), MY(f(t2))); if (diff < fabs(f(t1) - Par(k, t1))) diff = fabs(f(t1) - Par(k, t1)); } } WSetColor(RED); for (int i = 1; i <= N; i++) WDrawLine(MX(X(i)), h / 2 - 3, MX(X(i)), h / 2 + 3); char str[256]; WSetColor(RED); sprintf(str, "Difference: %1.20lf", diff); WDrawString(str, 10, 40); }
void InnerProd::evaluateMX(const MXPtrV& input, MXPtrV& output, const MXPtrVV& fwdSeed, MXPtrVV& fwdSens, const MXPtrVV& adjSeed, MXPtrVV& adjSens, bool output_given){ if(!output_given){ *output[0] = (*input[0])->getInnerProd(*input[1]); } // Forward sensitivities int nfwd = fwdSens.size(); for(int d=0; d<nfwd; ++d){ *fwdSens[d][0] = (*input[0])->getInnerProd(*fwdSeed[d][1]) + (*fwdSeed[d][0])->getInnerProd(*input[1]); } // Adjoint sensitivities int nadj = adjSeed.size(); for(int d=0; d<nadj; ++d){ adjSens[d][0]->addToSum(*adjSeed[d][0] * *input[1]); adjSens[d][1]->addToSum(*adjSeed[d][0] * *input[0]); *adjSeed[d][0] = MX(); } }
void Multiplication<TrX,TrY>::evaluateMX(const MXPtrV& input, MXPtrV& output, const MXPtrVV& fwdSeed, MXPtrVV& fwdSens, const MXPtrVV& adjSeed, MXPtrVV& adjSens, bool output_given){ if(!output_given) *output[0] = *input[0] + mul(tr<TrX>(*input[1]),tr<TrY>(*input[2]),(*input[0]).sparsity()); // Forward sensitivities int nfwd = fwdSens.size(); for(int d=0; d<nfwd; ++d){ *fwdSens[d][0] = *fwdSeed[d][0] + mul(tr<TrX>(*input[1]),tr<TrY>(*fwdSeed[d][2]),(*input[0]).sparsity()) + mul(tr<TrX>(*fwdSeed[d][1]),tr<TrY>(*input[2]),(*input[0]).sparsity()); } // Adjoint sensitivities int nadj = adjSeed.size(); for(int d=0; d<nadj; ++d){ adjSens[d][1]->addToSum(tr<TrX>(mul(*adjSeed[d][0],tr<!TrY>(*input[2]),tr<TrX>(*input[1]).sparsity()))); adjSens[d][2]->addToSum(tr<TrY>(mul(tr<!TrX>(*input[1]),*adjSeed[d][0],tr<TrY>(*input[2]).sparsity()))); if(adjSeed[d][0]!=adjSens[d][0]){ adjSens[d][0]->addToSum(*adjSeed[d][0]); *adjSeed[d][0] = MX(); } } }
void Diagcat::evaluateMX(const MXPtrV& input, MXPtrV& output, const MXPtrVV& fwdSeed, MXPtrVV& fwdSens, const MXPtrVV& adjSeed, MXPtrVV& adjSens, bool output_given) { int nfwd = fwdSens.size(); int nadj = adjSeed.size(); // Non-differentiated output if (!output_given) { *output[0] = diagcat(getVector(input)); } // Forward sensitivities for (int d = 0; d<nfwd; ++d) { *fwdSens[d][0] = diagcat(getVector(fwdSeed[d])); } // Quick return? if (nadj==0) return; // Get offsets for each row and column vector<int> offset1(ndep()+1, 0); vector<int> offset2(ndep()+1, 0); for (int i=0; i<ndep(); ++i) { int ncol = dep(i).sparsity().size2(); int nrow = dep(i).sparsity().size1(); offset2[i+1] = offset2[i] + ncol; offset1[i+1] = offset1[i] + nrow; } // Adjoint sensitivities for (int d=0; d<nadj; ++d) { MX& aseed = *adjSeed[d][0]; vector<MX> s = diagsplit(aseed, offset1, offset2); aseed = MX(); for (int i=0; i<ndep(); ++i) { adjSens[d][i]->addToSum(s[i]); } } }
void OrthBoundingBox::AllDirectionDerivative(const Variable *x, Vector **dgfs, integer UpperBound, double threshold, integer &idxgf) { integer *maxidx = new integer[d * 2]; integer *minidx = maxidx + d; double *maxv = new double[d * 2]; double *minv = maxv + d; integer idx = 0; bool findmax = true; double volumn = 1; const double *xptr = x->ObtainReadData(); double *XE = new double[d * n]; Matrix MX(xptr, d, d), ME(E, d, n), MXE(XE, d, n); Matrix::DGEMM(1, MX, false, ME, false, 0, MXE); //ForDebug::Print("XE:", XE, d, n);//--- RecursiveDirDeri(x, maxidx, minidx, maxv, minv, idx, volumn, findmax, idxgf, dgfs, UpperBound, XE, threshold); delete[] XE; delete[] maxidx; delete[] maxv; };
MX GenericCall::projectArg(const MX& x, const Sparsity& sp, int i) { if (x.size()==sp.size()) { // Insert sparsity projection nodes if needed return project(x, sp); } else { // Different dimensions if (x.is_empty() || sp.is_empty()) { // NOTE: To permissive? // Replace nulls with zeros of the right dimension return MX::zeros(sp); } else if (x.is_scalar()) { // Scalar argument means set all return MX(sp, x); } else if (x.size1()==sp.size2() && x.size2()==sp.size1() && sp.is_vector()) { // Transposed vector return projectArg(x.T(), sp, i); } else { // Mismatching dimensions casadi_error("Cannot create function call node: Dimension mismatch for argument " << i << ". Argument has shape " << x.size() << " but function input has shape " << sp.size()); } } }
void SimpleIndefDleInternal::init() { DleInternal::init(); casadi_assert_message(!pos_def_, "pos_def option set to True: Solver only handles the indefinite case."); n_ = A_.size1(); MX As = MX::sym("A", A_); MX Vs = MX::sym("V", V_); MX Vss = (Vs+Vs.T())/2; MX A_total = DMatrix::eye(n_*n_) - kron(As, As); MX Pf = solve(A_total, vec(Vss), getOption("linear_solver")); MX P = reshape(Pf, n_, n_); f_ = MXFunction(dleIn("a", As, "v", Vs), dleOut("p", MX(P(output().sparsity())))); f_.init(); casadi_assert(getNumOutputs()==f_.getNumOutputs()); for (int i=0;i<getNumInputs();++i) { casadi_assert_message(input(i).sparsity()==f_.input(i).sparsity(), "Sparsity mismatch for input " << i << ":" << input(i).dimString() << " <-> " << f_.input(i).dimString() << "."); } for (int i=0;i<getNumOutputs();++i) { casadi_assert_message(output(i).sparsity()==f_.output(i).sparsity(), "Sparsity mismatch for output " << i << ":" << output(i).dimString() << " <-> " << f_.output(i).dimString() << "."); } }
MXFunction vec (const FX &a_) { FX a = a_; // Pass null if input is null if (a.isNull()) return MXFunction(); // Get the MX inputs, only used for shape const std::vector<MX> &symbolicInputMX = a.symbolicInput(); // Have a vector with MX that have the shape of vec(symbolicInputMX ) std::vector<MX> symbolicInputMX_vec(a.getNumInputs()); // Make vector valued MX's out of them std::vector<MX> symbolicInputMX_vec_reshape(a.getNumInputs()); // Apply the vec-transformation to the inputs for (int i=0;i<symbolicInputMX.size();++i) { std::stringstream s; s << "X_flat_" << i; symbolicInputMX_vec[i] = MX(s.str(),vec(symbolicInputMX[i].sparsity())); symbolicInputMX_vec_reshape[i] = trans(reshape(symbolicInputMX_vec[i],trans(symbolicInputMX[i].sparsity()))); } // Call the original function with the vecced inputs std::vector<MX> symbolicOutputMX = a.call(symbolicInputMX_vec_reshape); // Apply the vec-transformation to the outputs for (int i=0;i<symbolicOutputMX.size();++i) symbolicOutputMX[i] = vec(symbolicOutputMX[i]); // Make a new function with the vecced input/outputs MXFunction ret(symbolicInputMX_vec,symbolicOutputMX); // Initialize it if a was if (a.isInit()) ret.init(); return ret; }