static int load_history(void) { if ((decomp(history_fn, &history, sizeof(history), 1) != 1) || (history.id != CURRENT_ID)) { memset(&history, 0, sizeof(history)); history_v0_t v0; if ((decomp(history_fn, &v0, sizeof(v0), 1) != 1) || (v0.id != ID_V0)) { syslog(LOG_INFO, "Unable to load history, clearing..."); history.id = CURRENT_ID; return 0; } else { // --- temp conversion --- // V0 -> V1 history.id = CURRENT_ID; memcpy(history.daily, v0.daily, sizeof(history.daily)); history.dailyp = v0.dailyp; memcpy(history.monthly, v0.monthly, sizeof(v0.monthly)); // v0 is just shorter history.monthlyp = v0.monthlyp; } } else { _dprintf("history loaded d=%d m=%d\n", history.dailyp, history.monthlyp); } return 1; }
static int load_history(const char *fname) { history_t hist; _dprintf("%s: fname=%s\n", __FUNCTION__, fname); if ((decomp(fname, &hist, sizeof(hist), 1) != 1) || (hist.id != CURRENT_ID)) { history_v0_t v0; if ((decomp(fname, &v0, sizeof(v0), 1) != 1) || (v0.id != ID_V0)) { _dprintf("%s: load failed\n", __FUNCTION__); return 0; } else { // --- temp conversion --- clear_history(); // V0 -> V1 history.id = CURRENT_ID; memcpy(history.daily, v0.daily, sizeof(history.daily)); history.dailyp = v0.dailyp; memcpy(history.monthly, v0.monthly, sizeof(v0.monthly)); // v0 is just shorter history.monthlyp = v0.monthlyp; } } else { memcpy(&history, &hist, sizeof(history)); } _dprintf("%s: dailyp=%d monthlyp=%d\n", __FUNCTION__, history.dailyp, history.monthlyp); return 1; }
bool solvetest(const LinAlg::Matrix<real_type,n,n>& m, const LinAlg::Vector<real_type,n>& v) { // compute a decomposition LinAlg::MatrixFactors<real_type,n,n,LinAlg::LUTag> decomp(m); // crude condition estimation real_type nrm = 0; real_type rnrm = 0; for (unsigned i = 0; i < rows(v); ++i) { nrm = max(nrm, norm1(m*Vector::unit(i, rows(v)))); rnrm = max(rnrm, norm1(decomp.solve(Vector::unit(i, rows(v))))); } real_type cond = nrm*rnrm; // determine allowed tolerance from the condition number real_type rtol = sqrt(rows(v))*cond*10*Limits<real_type>::epsilon(); real_type atol = sqrt(rows(v))*1e-5*Limits<real_type>::epsilon(); // Check ... if (!equal(v, decomp.solve(m*v), rtol, atol)) { std::cerr << "Matrix solve test failed\n"; std::cerr << " condition number: " << cond << "\n"; std::cerr << " norm of the difference: " << norm(v - decomp.solve(m*v)) << std::endl; return false; } return true; }
python::object RGroupDecomp(python::object cores, python::object mols, bool asSmiles = false, bool asRows = true, const RGroupDecompositionParameters &options = RGroupDecompositionParameters() ) { RGroupDecompositionHelper decomp(cores, options); python::list unmatched; python::stl_input_iterator<ROMOL_SPTR> iter(mols), end; unsigned int idx=0; while (iter != end) { if (!*iter) throw_value_error("reaction called with None reactants"); if(decomp.Add(*(*iter)) == -1) { unmatched.append(idx); } ++iter; ++idx; } decomp.Process(); if ( asRows ) { return make_tuple(decomp.GetRGroupsAsRows(asSmiles), unmatched); } else { return make_tuple(decomp.GetRGroupsAsColumn(asSmiles), unmatched); } }
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs [] ){ int i, n = 0; int N, *pM, *pD; double *pfM, *pfD; int nM, nD; if (nrhs != 2){ mexErrMsgTxt("Usage: moddecomp n M.\n"); } N = (int)*(mxGetPr(prhs[0])); nM = mxGetN(prhs[1]); pfM = mxGetPr(prhs[1]); pM = mxCalloc(nM, sizeof(int)); for (i=0; i<nM; i++) pM[i] = (int)pfM[i]; pD = mxCalloc(nM, sizeof(int)); decomp(N, pM, pD, 0); plhs[0] = mxCreateDoubleMatrix(1, nM, mxREAL); pfD = mxGetPr(plhs[0]); for (i = 0; i<nM; i++) pfD[i] = (double)pD[i]; mxFree(pM); mxFree(pD); }
inline void GenEigsSolver<eT, SelectionRule, OpType>::retrieve_ritzpair() { arma_extra_debug_sigprint(); UpperHessenbergEigen<eT> decomp(fac_H); Col< std::complex<eT> > evals = decomp.eigenvalues(); Mat< std::complex<eT> > evecs = decomp.eigenvectors(); SortEigenvalue< std::complex<eT>, SelectionRule > sorting(evals.memptr(), evals.n_elem); std::vector<uword> ind = sorting.index(); // Copy the ritz values and vectors to ritz_val and ritz_vec, respectively for(uword i = 0; i < ncv; i++) { ritz_val(i) = evals(ind[i]); ritz_est(i) = evecs(ncv - 1, ind[i]); } for(uword i = 0; i < nev; i++) { ritz_vec.col(i) = evecs.col(ind[i]); } }
void DLL_EXPORT decompress(char *oldExeName, int exeSection, int dataOffset, char *newExeName, char *compressDll) { // decompress compressed dbpro exe void* (*decomp)(void*, int); FILE *oldExe = fopen(oldExeName, "rb"); //compressed exe FILE *newExe = fopen(newExeName, "wb"); //uncompressed exe HANDLE lib = LoadLibrary(compressDll); //compress.dll from compressed exe SIZE_T decompSize; int dataSize; long time; float seconds; void *buffer; decomp = (void* (*)(void*, int)) GetProcAddress(lib, "decompress_block"); //get compressed data size fseek(oldExe, 0, SEEK_END); dataSize = ftell(oldExe) - dataOffset; fseek(oldExe, 0, SEEK_SET); //write exe section buffer = malloc(exeSection); fread(buffer, exeSection, 1, oldExe); fwrite(buffer, exeSection, 1, newExe); free(buffer); //load compressed data into buffer; fseek(oldExe, dataOffset, SEEK_SET); buffer = malloc(dataSize); fread(buffer, dataSize, 1, oldExe); //decompress data time = GetTickCount(); void *data = decomp(buffer, dataSize); time = GetTickCount() - time; seconds = time / 1000.0; free(buffer); data = GlobalLock((HGLOBAL) data); decompSize = GlobalSize((HGLOBAL) data); //write decompressed data fwrite(data, decompSize, 1, newExe); GlobalUnlock((HGLOBAL) data); //write extra data fwrite(extraData, 16, 1, newExe); //write exeSection size fwrite(&exeSection, 4, 1, newExe); //show decompression stats message box char msg[255]; msg[0] = 0; sprintf(msg, "Decompress complete in %.2f seconds", seconds); MessageBox(GetActiveWindow(), msg, "dark_explorer", 0); FreeLibrary(lib); fclose(oldExe); fclose(newExe); }
void decomp(int q, int *pM, int *pD, int n){ int d, r; if (q > 0){ d = pM[n]; r = q % d; q = (q-r)/d; pD[n] = r; decomp(q,pM,pD,++n); } }
Mat mapLogMat(const Mat& Crxy, config_SystemParameter *param){ CV_Assert( Crxy.rows == param->numFeature && Crxy.cols == param->numFeature); SVD decomp(Crxy); cv::log(decomp.w,decomp.w); Mat W = decomp.w.diag(decomp.w); Mat logA = decomp.u*W*decomp.vt; return logA; }
Foam::labelList Foam::ptscotchDecomp::decompose ( const polyMesh& mesh, const pointField& points, const scalarField& pointWeights ) { if (points.size() != mesh.nCells()) { FatalErrorIn ( "ptscotchDecomp::decompose(const pointField&, const scalarField&)" ) << "Can use this decomposition method only for the whole mesh" << endl << "and supply one coordinate (cellCentre) for every cell." << endl << "The number of coordinates " << points.size() << endl << "The number of cells in the mesh " << mesh.nCells() << exit(FatalError); } // // For running sequential ... // if (Pstream::nProcs() <= 1) // { // return scotchDecomp(decompositionDict_, mesh_) // .decompose(points, pointWeights); // } // Make Metis CSR (Compressed Storage Format) storage // adjncy : contains neighbours (= edges in graph) // xadj(celli) : start of information in adjncy for celli CompactListList<label> cellCells; calcCellCells(mesh, identity(mesh.nCells()), mesh.nCells(), cellCells); // Decompose using default weights List<int> finalDecomp; decomposeZeroDomains ( mesh.time().path()/mesh.name(), cellCells.m(), cellCells.offsets(), pointWeights, finalDecomp ); // Copy back to labelList labelList decomp(finalDecomp.size()); forAll(decomp, i) { decomp[i] = finalDecomp[i]; } return decomp; }
void EKF<X, Y, A, B, C, D>::update(G observation_func, H observation_jacobian, C observation, D noise_covariance) { X prev_state = this->mean; Y prev_covariance = this->covariance; e::Matrix3d jacobian = observation_jacobian(prev_state); this->residual = observation - observation_func(prev_state); this->residual_covariance = (jacobian * (prev_covariance * jacobian.transpose())) + noise_covariance; e::FullPivHouseholderQR<D> decomp(this->residual_covariance); // Should we do M-P pseudoinverse here instead? // e::ColPivHouseholderQR<D> decomp(this->residual_covariance); // Should we do M-P pseudoinverse here instead? e::Matrix3d inverse = decomp.inverse(); e::Matrix3d temp_gain = (prev_covariance * (jacobian.transpose() * inverse)); if (s::isfinite(temp_gain.norm())) this->gain = temp_gain; // TODO Fixes numerical instability, but this could be better. this->mean = prev_state + (gain * this->residual); this->covariance = (e::Matrix3d::Identity() - (gain * jacobian)) * prev_covariance; };
Foam::labelList Foam::ptscotchDecomp::decompose ( const labelListList& globalCellCells, const pointField& cellCentres, const scalarField& cWeights ) { if (cellCentres.size() != globalCellCells.size()) { FatalErrorIn ( "ptscotchDecomp::decompose(const pointField&, const labelListList&)" ) << "Inconsistent number of cells (" << globalCellCells.size() << ") and number of cell centres (" << cellCentres.size() << ")." << exit(FatalError); } // // For running sequential ... // if (Pstream::nProcs() <= 1) // { // return scotchDecomp(decompositionDict_, mesh) // .decompose(globalCellCells, cellCentres, cWeights); // } // Make Metis CSR (Compressed Storage Format) storage // adjncy : contains neighbours (= edges in graph) // xadj(celli) : start of information in adjncy for celli CompactListList<label> cellCells(globalCellCells); // Decompose using weights List<int> finalDecomp; decomposeZeroDomains ( "ptscotch", cellCells.m(), cellCells.offsets(), cWeights, finalDecomp ); // Copy back to labelList labelList decomp(finalDecomp.size()); forAll(decomp, i) { decomp[i] = finalDecomp[i]; } return decomp; }
void decomp(void) { save(); p2 = pop(); p1 = pop(); // is the entire expression constant? if (find(p1, p2) == 0) { push(p1); //push(p1); // may need later for pushing both +a, -a //negate(); restore(); return; } // sum? if (isadd(p1)) { decomp_sum(); restore(); return; } // product? if (car(p1) == symbol(MULTIPLY)) { decomp_product(); restore(); return; } // naive decomp if not sum or product p3 = cdr(p1); while (iscons(p3)) { push(car(p3)); push(p2); decomp(); p3 = cdr(p3); } restore(); }
void eval_decomp(void) { int h = tos; push(symbol(NIL)); push(cadr(p1)); eval(); push(caddr(p1)); eval(); p1 = pop(); if (p1 == symbol(NIL)) guess(); else push(p1); decomp(); list(tos - h); }
/*! Run \brief Generates the WSPD \param Wsp Vector of WSPs storing the output \param num_threads Number of threads to use (requires OPENMP) */ void run(std::vector<WSP<Point> > &Wsp, int num_threads=1) { int i; int tid=0; std::vector<size_type> Wsp_size; std::vector<std::vector<WSP<Point> > > wsp; wsp.resize(num_threads); #ifdef _OPENMP omp_set_num_threads(num_threads); #pragma omp parallel private(i, tid) shared(wsp) { tid = omp_get_thread_num(); #pragma omp for schedule(static) #endif for(i=1; i < (int) cqtree.size()/2; ++i) { decomp(&cqtree[i], wsp[tid]); } #ifdef _OPENMP #pragma omp barrier #pragma omp single { #endif Wsp_size.resize(num_threads); Wsp_size[0]=0; for(i=1; i < num_threads; ++i) { Wsp_size[i] = wsp[i-1].size()+Wsp_size[i-1]; } Wsp.resize(Wsp_size[i-1]+wsp[i-1].size()); #ifdef _OPENMP } //End single section #pragma omp for schedule(static,1) #endif for(i=0; i < num_threads; ++i) { memcpy(&(Wsp[Wsp_size[i]]), &(wsp[i][0]), sizeof(WSP<Point>)*wsp[i].size()); } #ifdef _OPENMP } //End parallel section #endif };
Foam::labelList Foam::scotchDecomp::decompose ( const labelListList& globalCellCells, const pointField& cc, const scalarField& cWeights ) { if (cc.size() != globalCellCells.size()) { FatalErrorIn ( "scotchDecomp::decompose" "(const labelListList&, const pointField&, const scalarField&)" ) << "Inconsistent number of cells (" << globalCellCells.size() << ") and number of cell centres (" << cc.size() << ")." << exit(FatalError); } // Make Metis CSR (Compressed Storage Format) storage // adjncy : contains neighbours (= edges in graph) // xadj(celli) : start of information in adjncy for celli List<int> adjncy; List<int> xadj; calcCSR(globalCellCells, adjncy, xadj); // Decompose using weights List<int> finalDecomp; decompose(adjncy, xadj, cWeights, finalDecomp); // Copy back to labelList labelList decomp(finalDecomp.size()); forAll(decomp, i) { decomp[i] = finalDecomp[i]; } return decomp; }
/** * \fn tLUdcmp( double** mat , unsigned n ) * * \brief Creates an instance of this object. * * \param mat A ( n x n ) matrix. * \param n The number of rows and columns of matrix mat. * */ tLUdcmp::tLUdcmp( double** mat, unsigned n ) { m_nele = n; m_matA = allocate( m_nele ) ; for ( unsigned i = 0 ; i < m_nele ; i++ ) { for ( unsigned j = 0 ; j < m_nele ; j++ ) { m_matA[ i ][ j ] = mat[ i ][ j ] ; } } m_perm.resize( m_nele ) ; m_sign = 1 ; decomp() ; return ; }
int main(int argc, char **argv){ int i, N; int *pM, *pD, nM; N = (int)atof(argv[1]); nM = argc-2; pM = calloc(nM, sizeof(int)); pD = calloc(nM, sizeof(int)); for (i = 0; i<nM; i++) pM[i] = (int)atof(argv[i+2]); decomp(N, pM, pD, 0); free(pM); free(pD); return 0; }
Foam::labelList Foam::metisDecomp::decompose ( const pointField& points, const scalarField& pointWeights ) { if (points.size() != mesh_.nCells()) { FatalErrorIn ( "metisDecomp::decompose(const pointField&,const scalarField&)" ) << "Can use this decomposition method only for the whole mesh" << endl << "and supply one coordinate (cellCentre) for every cell." << endl << "The number of coordinates " << points.size() << endl << "The number of cells in the mesh " << mesh_.nCells() << exit(FatalError); } List<int> adjncy; List<int> xadj; scotchDecomp::calcCSR ( mesh_, adjncy, xadj ); // Decompose using default weights List<int> finalDecomp; decompose(adjncy, xadj, pointWeights, finalDecomp); // Copy back to labelList labelList decomp(finalDecomp.size()); forAll(decomp, i) { decomp[i] = finalDecomp[i]; } return decomp; }
void testSingle(std::string const & s0) { uint64_t const l0 = s0.size(); libmaus2::autoarray::AutoArray<char> C(s0.size(),false); for ( uint64_t bs = 1; bs <= 16; ++bs ) for ( uint64_t b_0 = 0; b_0 <= l0; ++b_0 ) for ( uint64_t b_1 = 0; b_1 <= l0-b_0; ++b_1 ) { std::ostringstream o_0; libmaus2::lz::ZlibCompressorObjectFactory scompfact; libmaus2::lz::SimpleCompressedOutputStream<std::ostream> lzout_0(o_0,scompfact,bs); lzout_0.write(s0.c_str(),b_0); std::pair<uint64_t,uint64_t> start_0 = lzout_0.getOffset(); lzout_0.write(s0.c_str()+b_0,b_1); // std::pair<uint64_t,uint64_t> end_0 = lzout_0.getOffset(); lzout_0.write(s0.c_str()+b_0+b_1,s0.size()-(b_0+b_1)); lzout_0.flush(); std::string const u0 = s0.substr(b_0,b_1); std::istringstream i_0(o_0.str()); libmaus2::lz::ZlibDecompressorObjectFactory decompfact; libmaus2::lz::SimpleCompressedInputStream<std::istream> decomp(i_0,decompfact,start_0); uint64_t j = 0; for ( uint64_t i = 0; i < u0.size(); ++i ) { int const c = decomp.get(); assert ( c == u0[j++] ); } // uint64_t const r = decomp.read(C.begin(),l0-(b_0+b_1)); assert ( decomp.get() == std::istream::traits_type::eof() ); } }
Foam::labelList Foam::scotchDecomp::decompose ( const pointField& points, const scalarField& pointWeights ) { if (points.size() != mesh_.nCells()) { FatalErrorIn ( "scotchDecomp::decompose(const pointField&, const scalarField&)" ) << "Can use this decomposition method only for the whole mesh" << endl << "and supply one coordinate (cellCentre) for every cell." << endl << "The number of coordinates " << points.size() << endl << "The number of cells in the mesh " << mesh_.nCells() << exit(FatalError); } // Make Metis CSR (Compressed Storage Format) storage // adjncy : contains neighbours (= edges in graph) // xadj(celli) : start of information in adjncy for celli List<int> adjncy; List<int> xadj; calcCSR(mesh_, adjncy, xadj); // Decompose using default weights List<int> finalDecomp; decompose(adjncy, xadj, pointWeights, finalDecomp); // Copy back to labelList labelList decomp(finalDecomp.size()); forAll(decomp, i) { decomp[i] = finalDecomp[i]; } return decomp; }
void MassDecompositionAlgorithm::getDecompositions(vector<MassDecomposition> & decomps, double mass) { double tolerance((double) param_.getValue("tolerance")); ims::RealMassDecomposer::decompositions_type decompositions = decomposer_->getDecompositions(mass, tolerance); for (ims::RealMassDecomposer::decompositions_type::const_iterator pos = decompositions.begin(); pos != decompositions.end(); ++pos) { String d; for (ims::IMSAlphabet::size_type i = 0; i < alphabet_->size(); ++i) { if ((*pos)[i] > 0) { d += alphabet_->getName(i) + String((*pos)[i]) + " "; } } d.trim(); MassDecomposition decomp(d); decomps.push_back(decomp); } return; }
void decomp_product(void) { int h; // decomp factors involving x p3 = cdr(p1); while (iscons(p3)) { if (find(car(p3), p2)) { push(car(p3)); push(p2); decomp(); } p3 = cdr(p3); } // multiply together all constant factors h = tos; p3 = cdr(p1); while (iscons(p3)) { if (find(car(p3), p2) == 0) push(car(p3)); p3 = cdr(p3); } if (tos - h) { multiply_all(tos - h); //p3 = pop(); // may need later for pushing both +a, -a //push(p3); //push(p3); //negate(); } }
void decomp_sum(void) { int h; // decomp terms involving x p3 = cdr(p1); while (iscons(p3)) { if (find(car(p3), p2)) { push(car(p3)); push(p2); decomp(); } p3 = cdr(p3); } // add together all constant terms h = tos; p3 = cdr(p1); while (iscons(p3)) { if (find(car(p3), p2) == 0) push(car(p3)); p3 = cdr(p3); } if (tos - h) { add_all(tos - h); p3 = pop(); push(p3); push(p3); negate(); // need both +a, -a for some integrals } }
qryproc() { register QTREE *root; register QTREE *q; register int i; register int mode; register int result_num; register int retr_uniq; extern long AAccuread; extern long AAccuwrite; extern long AAccusread; extern int derror(); extern QTREE *trbuild(); extern QTREE *readqry(); # ifdef xDTM if (AAtTf(76, 1)) timtrace(23, 0); # endif # ifdef xDTR1 if (AAtTf(50, 0)) AAccuread = AAccusread = AAccuwrite = 0; # endif /* initialize query buffer */ initbuf(Qbuf, QBUFSIZ, QBUFFULL, derror); /* init various variables in decomp for start of this query */ startdecomp(); /* Read in query, range table and mode */ root = readqry(); mode = Qmode; /* Initialize relation descriptors */ initdesc(mode); /* re-build the tree */ root = trbuild(root); if (!root) derror(STACKFULL); /* locate pointers to QLEND and TREE nodes */ for (q = root->right; q->sym.type != QLEND; q = q->right) continue; Qle = q; for (q = root->left; q->sym.type != TREE; q = q->left) continue; Tr = q; /* map the complete tree */ mapvar(root, 0); /* set logical locks */ lockit(root, Resultvar); /* If there is no result variable then this must be a retrieve to the terminal */ Qry_mode = Resultvar < 0 ? (int) mdRETTERM : mode; /* if the mode is retrieve_unique, then make a result rel */ retr_uniq = mode == (int) mdRET_UNI; if (retr_uniq) { mk_unique(root); mode = (int) mdRETR; } /* get id of result relation */ if (Resultvar < 0) result_num = NORESULT; else result_num = Rangev[Resultvar].relnum; /* evaluate aggregates in query */ aggregate(root); /* decompose and process aggregate free query */ decomp(root, mode, result_num); /* If this is a retrieve unique, then retrieve results */ if (retr_uniq) pr_unique(root, Resultvar); if (mode != (int) mdRETR) i = ACK; else i = NOACK; i = endovqp(i); /* call update processor if batch mode */ if (i == UPDATE) { initp(); call_dbu(mdUPDATE, -1); } /* ** send eop back to parser to indicate completion ** if UPDATE then return block comes from dbu else ** return block comes from decomp */ writeback(i == UPDATE ? -1 : 1); # ifdef xDTM if(AAtTf(76, 1)) timtrace(24, 0); # endif # ifdef xDTR1 AAtTfp(50, 1, "DECOMP read %ld pages,%ld catalog pages,wrote %ld pages\n", AAccuread, AAccusread, AAccuwrite); # endif /* clean decomp */ reinit(); /* return */ }
//void crosearch( int opt_value){ //float crosearch( int opt_value){ int crosearch(){ // initialization currentPopSize = 1; MoleColl= 0.9; initialKE = 1000; // the tolerance be = 50;// sprint be = 5; buffer =500; KElossRate = 0.5; al = 100000; //sprint al = 20000; //sprint //al = 10000; //sprint //al = 500000; //medium local_num = 0; generation_num = 500000000; // generation_num = 1; optiValueGlo= 0; //static FILE *decomp_fout = fopen("decomp_show.txt", "w+"); struct Molecule MoleculePop[popSizeMax] = {0}; // create a larger holder //char optiHolder[10][NURSE_NUM][DAY_NUM] = {0}; // for h distance //optiSolution[NURSE_NUM][DAY_NUM] = {0}; // ================ initial every mole ================= for(int moleidx = 0; moleidx < currentPopSize; moleidx++) // initalize the small size :10 { initMole(&MoleculePop[moleidx]); //solutionShow(MoleculePop[moleidx].mole); printf("\n"); } memset(optiSolution, 0, sizeof(optiSolution));// reset opti // =================== search =================== int optiFit = 10000; int optiFitForLog = 10000; start = clock(); //for (int generationIndex = 0; generationIndex <0 ; generationIndex++){ for (int generationIndex = 0; generationIndex < generation_num; generationIndex++){ //for (int generationIndex = 0; generationIndex < 1; generationIndex++){ double b = (double)rand()/RAND_MAX; if( b > MoleColl || currentPopSize == 1){// =================== uni reaction ====================== int selectedIdx = rand()% (currentPopSize); if (MoleculePop[selectedIdx].NumHit - MoleculePop[selectedIdx].MinHit > al ){ //for(int moleidx = 0; moleidx < currentPopSize; moleidx++) // initalize the small size :10 // resetEnergy(&MoleculePop[moleidx]); //solutionShow(MoleculePop[moleidx].mole); //printf("\n"); //int holder = 1; decomp(selectedIdx, MoleculePop); } else onwall(selectedIdx, MoleculePop); } else{//=================== inter reaction =================== int selectedIdx1 = rand()% (currentPopSize); int selectedIdx2 = rand()% (currentPopSize); while ( selectedIdx2 == selectedIdx1 ) selectedIdx2 = rand()% (currentPopSize); #ifdef intell_change // use this condition to let this reaction as effective as the onwall if(MoleculePop[selectedIdx1].NumHit - MoleculePop[selectedIdx1].MinHit > (al/100) || MoleculePop[selectedIdx2].NumHit - MoleculePop[selectedIdx2].MinHit > (al/100)){ #endif #ifndef intell_change if(MoleculePop[selectedIdx1].KE <= be && MoleculePop[selectedIdx2].KE <= be){ #endif //if(MoleculePop[selectedIdx1].KE > be && MoleculePop[selectedIdx2].KE > be){ //printf("KE are %d %d\n",MoleculePop[selectedIdx1].KE,MoleculePop[selectedIdx2].KE); //intcolli(selectedIdx1,selectedIdx2); //if(findMinEp() < 65) synthe(selectedIdx1,selectedIdx2, MoleculePop); } else intcolli(selectedIdx1,selectedIdx2,MoleculePop); } // logging the pop //if( findMinEp(MoleculePop) < optiFitForLog || currentPopSize >1){ ////if( findMinEp() < optiFitForLog ){ // //printf("what the \n"); // //showPop(MoleculePop); // // optiFitForLog = findMinEp(MoleculePop); // if(currentPopSize >1){ // optiFitForLog = 10000; // } //} optiValueGlo = findMinEp(MoleculePop); if ( findMinEp(MoleculePop) < optiFit ){ optiFit = findMinEp(MoleculePop); //printf("min is %d \n", optiFit); printf("min is %d buffer is %d wall %d decom %d syn %d int %d\n", optiFit,buffer, onwallNum,decompNum,synthNum,intcollNum); //showPenaltySpec(); solCopy(optiSolution,MoleculePop[findMinIdx(MoleculePop)].MinStruct); solutionStandardOutputFile(optiSolution); //finish = clock(); //duration = (double)(finish - start) / CLOCKS_PER_SEC; //printf( " %1.2f sec\n", duration ); // for evaluator test //if( optiFit == opt_value){ // //solutionOutput(optiSolution); // printf("%d \n", evaluate(optiSolution)); // solutionRosterShowScreen(optiSolution); // showPenaltySpec(); // solutionStandardOutputFile(optiSolution); // // finish = clock(); // duration = (double)(finish - start) / CLOCKS_PER_SEC; // printf( "%f seconds\n", duration ); // // printf("\n%s",instAttri.instanceName); // return duration; // break; //} } finish = clock(); duration = (double)(finish - start) / CLOCKS_PER_SEC; if ( duration >8.3){ printf("min is %d \n", optiFit); return optiFit; //break; } #ifdef debug_getchar //if( findMinEp(MoleculePop) < 80 ){ getchar(); //} #endif } // search of generations //fclose(decomp_fout); } void experiment(){ char instance_name[200][30] = { "holder", "instance\\sprint01.txt","instance\\sprint02.txt","instance\\sprint03.txt","instance\\sprint04.txt", "instance\\sprint05.txt","instance\\sprint06.txt","instance\\sprint07.txt","instance\\sprint08.txt", "instance\\sprint09.txt","instance\\sprint10.txt", "instance\\sprint_late01.txt","instance\\sprint_late02.txt","instance\\sprint_late03.txt", "instance\\sprint_late04.txt","instance\\sprint_late05.txt","instance\\sprint_late06.txt", "instance\\sprint_late07.txt","instance\\sprint_late08.txt","instance\\sprint_late09.txt", "instance\\sprint_late10.txt", "instance\\sprint_hidden01.txt","instance\\sprint_hidden02.txt","instance\\sprint_hidden03.txt","instance\\sprint_hidden04.txt", "instance\\sprint_hidden05.txt", "instance\\medium01.txt","instance\\medium02.txt","instance\\medium03.txt","instance\\medium04.txt", "instance\\medium05.txt", "instance\\medium_late01.txt","instance\\medium_late02.txt","instance\\medium_late03.txt","instance\\medium_late04.txt", "instance\\medium_late05.txt", "instance\\medium_hidden01.txt","instance\\medium_hidden02.txt","instance\\medium_hidden03.txt","instance\\medium_hidden04.txt", "instance\\medium_hidden05.txt", }; int opti_array[50] = { 0, 56,58,51,59,58,54,56,56,55,52, // sprint 1 ~ 10; difficult: #4,#7,#10 37,42,48,73,44,42,42,17,17,43, // sprint late 11 ~ 20; easy:18,19, almost diff 33,32,62,67,59, // sprint hidden 21-25: tend to trap into the near-optimal 240,240,236,237,303, // medium 26 ~ 30 158,18,29,35,107, // medium late 31 ~ 35 130,221,36,80,122, // medium hidden 36 ~ 40 }; //float time_found = 0; // single test int sprint_id = 13; Load(instance_name[sprint_id]); printf("value is %d", crosearch()); getchar(); //FILE *file_result = fopen("test_result.txt", "w+"); //for (int instance_id = 1; instance_id <= 25; instance_id++){ // int run_num = 5; // int max_value = 0; // int min_value = 1000; // float total_value = 0; // //float avg_value = 0; // Load(instance_name[instance_id]); // for(int i = 0; i < run_num; i++){ // int t = crosearch(); // //printf("value is %d", t); // if( t < min_value) min_value = t; // if( t > max_value) max_value = t; // total_value += t; // } // fprintf(file_result,"%s: opti %d max %d, min %d, avg %1.2f\n",instance_name[instance_id], // opti_array[instance_id],max_value,min_value,total_value/run_num); // //printf("%s: max %d, min %d, avg %1.2f\n",instance_name[instance_id],max_value, // // min_value,total_value/run_num); //} // //fclose(file_result); //getchar(); // bat test //int run_num = 20; // float min_time = 100000; // float max_time = 0; // // //FILE *experiment_fout = fopen("experiment.txt", "w"); // FILE *experiment_fout = fopen("test_dec_neigh_sp03_nurseShakingdecomp.txt", "w"); // // for( int sprint_id = 3 ; sprint_id < 4 ; sprint_id++){ // // time_found = 0; // // Load(instance_name[sprint_id]); // // for(int i = 0; i < run_num; i++){ // int t = crosearch(opti_array[sprint_id]); // if( t < min_time) min_time = t; // if( t > max_time) max_time = t; // time_found += t; // } // //printf("mean is %f", time_found/time_test); // fprintf(experiment_fout,"inst id is %s, run num is %d, mean time is %3.1f, min is %3.1f, max is %3.1f\n", // instance_name[sprint_id],run_num,time_found/run_num, min_time, max_time); // } // fclose(experiment_fout); }
void test_solver(BfmSolver solver) { g5dParams parms; int Ls=16; double M5=1.8; double mq=0.0001; double wilson_lo = 0.05; double wilson_hi = 6.8; double shamir_lo = 0.025; double shamir_hi = 1.7; double ht_scale=1.7; double hw_scale=1.0; if ( solver != DWF ) { exit(0); Printf("Should be testing HtCayleyTanh aka DWF\n"); } parms.pDWF(mq,M5,Ls); multi1d<LatticeColorMatrix> u(4); HotSt(u); // ArchivGauge_t Header ; readArchiv(Header,u,"ckpoint_lat.3000"); multi1d<LatticeFermion> src(Ls); /* Rudy calculate some eigenvectors */ BfmWrapperParams BWP; BWP.BfmInverter = BfmInv_CG; BWP.BfmMatrix = BfmMat_M; BWP.BfmPrecision= Bfm64bit; BWP.MaxIter = 10000; BWP.RsdTarget.resize(1); BWP.RsdTarget[0]= 1.0e-9; BWP.Delta = 1.0e-4; BWP.BAP = parms; BfmWrapper bfm(BWP); bfmarg bfma; #if defined(QDP_USE_OMP_THREADS) bfma.Threads(omp_get_max_threads()); #else bfma.Threads(16); #endif bfma.Verbose(0); //Physics parameters bfmActionParams *bfmap = (bfmActionParams *) &bfma; *bfmap = bfm.invParam.BAP; // Algorithm & code control bfma.time_report_iter=-100; bfma.max_iter = bfm.invParam.MaxIter; bfma.residual = toDouble(bfm.invParam.RsdTarget[0]); int lx = QDP::Layout::subgridLattSize()[0]; int ly = QDP::Layout::subgridLattSize()[1]; int lz = QDP::Layout::subgridLattSize()[2]; int lt = QDP::Layout::subgridLattSize()[3]; //Geometry bfma.node_latt[0] = lx; bfma.node_latt[1] = ly; bfma.node_latt[2] = lz; bfma.node_latt[3] = lt; multi1d<int> procs = QDP::Layout::logicalSize(); for(int mu=0;mu<4;mu++){ if (procs[mu]>1) bfma.local_comm[mu] = 0; else bfma.local_comm[mu] = 1; } // Bfm object bfm_qdp<double> bfm_eig; bfm_eig.init(bfma); //Gauge field import bfm_eig.importGauge(u); //Subspace #define NumberGaussian (1) Fermion_t subspace[NumberGaussian]; Fermion_t check; Fermion_t mp; Fermion_t mmp; Fermion_t tmp_t; check = bfm_eig.allocFermion(); mp = bfm_eig.allocFermion(); mmp = bfm_eig.allocFermion(); tmp_t = bfm_eig.allocFermion(); bfm_eig.importFermion(src,check,1); QDPIO::cout << "Ls = "<<Ls<<endl; for(int g=0;g<NumberGaussian;g++){ for(int s=0;s<Ls;s++){ gaussian(src[s]); } subspace[g]=bfm_eig.allocFermion(); bfm_eig.importFermion(src,subspace[g],1); // Half parity gaussian if ( g==0) { bfm_eig.importFermion(src,check,1); } for(int s=0;s<Ls;s++){ src[s]=zero; } bfm_eig.exportFermion(src,subspace[g],1); QDPIO::cout << "Subspace norm " << norm2(src)<<endl; } for(int s=0;s<Ls;s++){ gaussian(src[s]); } QDPIO::cout << "Got here " << endl; // Handle< LinearOperatorArray<T> > linop =GetLinOp(u, parms); int block[5]; for(int i=0;i<5;i++) block[i]=4; QDPIO::cout << "Initialised dirac op"<<endl; BfmLittleDiracOperator ldop(Ls,NumberGaussian,block,subspace,&bfm_eig); int ns = ldop.SubspaceDimension(); QDPIO::cout << "subspace dimension is "<< ns<<endl; ns = ldop.SubspaceLocalDimension(); QDPIO::cout << "subspace dimension per node is "<< ns<<endl; std::vector<std::complex<double> > decomp(ns); ldop.ProjectToSubspace(check,decomp); if (QMP_is_primary_node()){ FILE * fp = fopen("coeff.dat","w"); for(int s=0;s<ns;s++){ fprintf(fp,"coeff %d %le %le\n",s,real(decomp[s]),imag(decomp[s])); } fclose(fp); } for(int s=0;s<ns;s++){ QDPIO::cout << "coeff "<<s<<" " << real(decomp[s]) << " " << imag(decomp[s])<<endl; } ldop.PromoteFromSubspace(decomp,mp); double n; #pragma omp parallel { omp_set_num_threads(bfm_eig.nthread); #pragma omp for for(int t=0;t<bfm_eig.nthread;t++) { bfm_eig.axpy(check,mp,check,-1); n = bfm_eig.norm(check); } } QDPIO::cout << "project/promote n2diff "<< n<<endl; QMP_barrier(); QDPIO::cout << "Computing little dirac matrix"<<endl; ldop.ComputeLittleMatrixColored(); QDPIO::cout << "Done"<<endl; std::vector<std::complex<double> > Aphi(ns); // phi^dag DdagD phi = |Dphi|^2 with phi a subspace vector // should be equal to Project/Apply/Promote + inner product #pragma omp parallel { #pragma omp for for(int t=0;t<bfm_eig.nthread;t++) { bfm_eig.Mprec(subspace[0],mp,tmp_t,0); } } QDPIO::cout << "Applied BFM matrix "<<endl; double n2; #pragma omp parallel { omp_set_num_threads(bfm_eig.nthread); #pragma omp for for(int t=0;t<bfm_eig.nthread;t++) { n2 = bfm_eig.norm(mp); } } QDPIO::cout << "Applied BFM matrix "<<n2<<endl; ldop.ProjectToSubspace(subspace[0],decomp); QDPIO::cout << "Projected to subspace "<<endl; ldop.Apply(decomp,Aphi); QDPIO::cout << "Applied A "<<endl; ldop.PromoteFromSubspace(Aphi,check); QDPIO::cout << "Promoted "<<endl; complex<double> inn; #pragma omp parallel { #pragma omp for for(int t=0;t<bfm_eig.nthread;t++) { inn = bfm_eig.inner(subspace[0],check); } } QDPIO::cout << "phi^dag Ddag D phi check " << n2 << " " <<real(inn) << imag(inn) <<endl; std::vector<std::complex<double> > AinvAphi(ns); ldop.ProjectToSubspace(subspace[0],decomp); ldop.Apply(decomp,Aphi); for(int s=0;s<ns;s++){ QDPIO::cout << "Aphi "<<s<<" " << real(Aphi[s]) <<" " << imag(Aphi[s])<<endl; } ldop.PromoteFromSubspace(Aphi,check); #pragma omp parallel { #pragma omp for for(int t=0;t<bfm_eig.nthread;t++) { bfm_eig.Mprec(subspace[0],mp,tmp_t,0); bfm_eig.Mprec(mp,mmp,tmp_t,1); } } ldop.ProjectToSubspace(mmp,decomp); ldop.PromoteFromSubspace(decomp,mmp); #pragma omp parallel { #pragma omp for for(int t=0;t<bfm_eig.nthread;t++) { bfm_eig.axpy(check,mmp,check,-1.0); n2 = bfm_eig.norm(check); } } QDPIO::cout << "PMdagMP check n2diff "<< n2<<endl; QMP_barrier(); QDPIO::cout << "Applying inverse"<<endl; ldop.ApplyInverse(Aphi,AinvAphi); QMP_barrier(); for(int s=0;s<ns;s++){ QDPIO::cout << "AinvAphi "<<s<<" " << real(AinvAphi[s]) << " " << imag(AinvAphi[s])<<endl; } ldop.PromoteFromSubspace(AinvAphi,check); #pragma omp parallel { #pragma omp for for(int t=0;t<bfm_eig.nthread;t++) { bfm_eig.axpy(check,subspace[0],check,-1.0); n2 = bfm_eig.norm(check); } } QDPIO::cout << "AinvA check n2diff "<< n2<<endl; }
bool FakedSeeding::fitXProjection(PrSeedTrack * track) { float mat[6]; float rhs[3]; //std::fill(rhs,rhs+3,0); std::vector<Hit> Hits = track->hits(); for(int loop = 0;3>loop;++loop) { std::fill(mat,mat+6,0.); std::fill(rhs,rhs+3,0.); for( int i=0; i < Hits.size() ;i++ ) { const float w = Hits[i].w2();//squared //std::cout<<"W\t"<<w<<std::endl; const float dz = Hits[i].GetZ() - m_zReference; float deta = 0; deta = dz*dz*(1-m_dRatio*dz); Hit *hit = new Hit(Hits[i].GetX(),Hits[i].GetY(),Hits[i].GetZ()); float dist = track->distance(hit); //always()<<"Loop \t"<<loop<<"\n Distance From Hit \t"<<dist<<endmsg; // if(loop>0) // dist = track.distance( *itH ); //try the effect mat[0]+= w ; mat[1]+= w * dz; mat[2]+= w * dz * dz; mat[3]+= w * deta; mat[4]+= w * dz * deta; mat[5]+= w * deta * deta; //right hand side rhs[0]+= w * dist; rhs[1]+= w * dist * dz; rhs[2]+= w * dist * deta; } ROOT::Math::CholeskyDecomp<float,3> decomp(mat); if(!decomp) { //std::cout<<"Failed to decompose matrix"<<std::endl; return false; } decomp.Solve(rhs); track->updateParameters(rhs[0],rhs[1],rhs[2],0.,0.); } float chi2_track = 0.; float maxChi2 = 0.; float maxDistance = 0.; // int i = 0; unused //compute Chi2 of the fitted track and the single Hit chi2 for ( int i=0;i<Hits.size(); i++) { Hit *hit = new Hit(Hits[i].GetX(),Hits[i].GetY(),Hits[i].GetZ()); hit->SetW2(Hits[i].w2()); float distance = track->distance(hit); float chi2_onHit = track->chi2( hit ); //\frac{dist^{2}}{\sigma^{2}} if (chi2_onHit>maxChi2) { maxChi2 = chi2_onHit; } track_distance->Fill(distance); track_pullHits->Fill(chi2_onHit); chi2_track += track->chi2( hit ); track_pullHitsVsP->Fill(chi2_onHit,(float)P); // //All Hits in 3 sigma? to check externally too } float constanteC = (track->b() * m_zReference - track->a())/(track->c()); //backward extrapolation float X0 = track->a() - track->b()*m_zReference +track->c()*m_ConstC; //track_chi2PerDoFVs track->setChi2(chi2_track,3); XBackProjVsChi2->Fill(track->chi2(),X0); track_chi2->Fill(track->chi2()); track_chi2PerDoF->Fill(track->chi2()/(3.)); //std::cout<<"Delta Chi2 (should be 0) \t"<<chi2_track-track->chi2()<<std::endl; //if(std::abs(maxDistance) < 2 && std::sqrt(maxChi2) < 4) return true; return true; }
void daily_bgc (bgc_struct BGCM, bgc_grid * grid, const double t, const double naddfrac, int first_balance) { siteconst_struct *sitec; metvar_struct *metv; co2control_struct *co2; ndepcontrol_struct *ndepctrl; control_struct *ctrl; epconst_struct *epc; epvar_struct *epv; psn_struct *psn_sun, *psn_shade; wstate_struct *ws; wflux_struct *wf; cstate_struct *cs; cflux_struct *cf; nstate_struct *ns; nflux_struct *nf; ntemp_struct *nt; phenology_struct *phen; summary_struct *summary; struct tm *timestamp; time_t *rawtime; /* miscelaneous variables for program control in main */ int simyr, yday, metyr, metday; int annual_alloc; int outv; int i, nmetdays; double tair_avg, tdiff; int dayout; double daily_ndep, daily_nfix, ndep_scalar, ndep_diff, ndep; int ind_simyr; sitec = &grid->sitec; metv = &grid->metv; co2 = &BGCM->co2; ndepctrl = &BGCM->ndepctrl; ctrl = &BGCM->ctrl; epc = &grid->epc; epv = &grid->epv; ws = &grid->ws; wf = &grid->wf; cs = &grid->cs; cf = &grid->cf; ns = &grid->ns; nf = &grid->nf; nt = &grid->nt; phen = &grid->phen; psn_sun = &grid->psn_sun; psn_shade = &grid->psn_shade; summary = &grid->summary; rawtime = (time_t *) malloc (sizeof (time_t)); *rawtime = (int)t; timestamp = gmtime (rawtime); /* Get co2 and ndep */ if (ctrl->spinup == 1) /* Spinup mode */ { metv->co2 = co2->co2ppm; daily_ndep = ndepctrl->ndep / 365.0; daily_nfix = ndepctrl->nfix / 365.0; } else /* Model mode */ { /* atmospheric CO2 and Ndep handling */ if (!(co2->varco2)) { /* constant CO2, constant Ndep */ metv->co2 = co2->co2ppm; daily_ndep = ndepctrl->ndep / 365.0; daily_nfix = ndepctrl->nfix / 365.0; } else { /* When varco2 = 1, use file for co2 */ if (co2->varco2 == 1) metv->co2 = get_co2 (BGCM->Forcing[CO2_TS][0], t); if (metv->co2 < -999) { printf ("Error finding CO2 value on %4.4d-%2.2d-%2.2d\n", timestamp->tm_year + 1900, timestamp->tm_mon + 1, timestamp->tm_mday); exit (1); } /* When varco2 = 2, use the constant CO2 value, but can vary * Ndep */ if (co2->varco2 == 2) metv->co2 = co2->co2ppm; if (ndepctrl->varndep == 0) { /* Increasing CO2, constant Ndep */ daily_ndep = ndepctrl->ndep / 365.0; daily_nfix = ndepctrl->nfix / 365.0; } else { daily_ndep = get_ndep (BGCM->Forcing[NDEP_TS][0], t); daily_nfix = ndepctrl->nfix / 365.0; if (daily_ndep < -999) { printf ("Error finding NDEP %4.4d-%2.2d-%2.2d\n", timestamp->tm_year + 1900, timestamp->tm_mon + 1, timestamp->tm_mday); exit (1); } else { daily_ndep = daily_ndep / 365.0; } } } } precision_control (ws, cs, ns); /* zero all the daily flux variables */ make_zero_flux_struct (wf, cf, nf); /* phenology fluxes */ phenology (epc, metv, phen, epv, cs, cf, ns, nf); /* test for the annual allocation day */ if (phen->remdays_litfall == 1) annual_alloc = 1; else annual_alloc = 0; /* Calculate leaf area index, sun and shade fractions, and specific * leaf area for sun and shade canopy fractions, then calculate * canopy radiation interception and transmission */ radtrans (cs, epc, metv, epv, sitec->sw_alb); /* update the ann max LAI for annual diagnostic output */ if (epv->proj_lai > epv->ytd_maxplai) epv->ytd_maxplai = epv->proj_lai; /* soil water potential */ epv->vwc = metv->swc; soilpsi (sitec, epv->vwc, &epv->psi); /* daily maintenance respiration */ maint_resp (cs, ns, epc, metv, cf, epv); /* begin canopy bio-physical process simulation */ if (cs->leafc && metv->dayl) { /* conductance */ canopy_et (metv, epc, epv, wf); } /* Do photosynthesis only when it is part of the current growth season, as * defined by the remdays_curgrowth flag. This keeps the occurrence of * new growth consistent with the treatment of litterfall and * allocation */ //printf ("leafc %lf dormant %lf, dayl %lf, soilc = %lf\n", cs->leafc, epv->dormant_flag, metv->dayl, summary->soilc); if (cs->leafc && !epv->dormant_flag && metv->dayl) total_photosynthesis (metv, epc, epv, cf, psn_sun, psn_shade); else epv->assim_sun = epv->assim_shade = 0.0; nf->ndep_to_sminn = daily_ndep; nf->nfix_to_sminn = daily_nfix; /* daily litter and soil decomp and nitrogen fluxes */ decomp (metv->tsoil, epc, epv, cs, cf, ns, nf, nt); /* Daily allocation gets called whether or not this is a current growth * day, because the competition between decomp immobilization fluxes and * plant growth N demand is resolved here. On days with no growth, no * allocation occurs, but immobilization fluxes are updated normally */ daily_allocation (cf, cs, nf, ns, epc, epv, nt, naddfrac, ctrl->spinup); /* reassess the annual turnover rates for livewood --> deadwood, and for * evergreen leaf and fine root litterfall. This happens once each year, * on the annual_alloc day (the last litterfall day) */ if (annual_alloc) annual_rates (epc, epv); /* daily growth respiration */ growth_resp (epc, cf); /* daily update of carbon state variables */ daily_carbon_state_update (cf, cs, annual_alloc, epc->woody, epc->evergreen); /* daily update of nitrogen state variables */ daily_nitrogen_state_update (nf, ns, annual_alloc, epc->woody, epc->evergreen); /* Calculate N leaching loss. This is a special state variable update * routine, done after the other fluxes and states are reconciled in order * to avoid negative sminn under heavy leaching potential */ //nleaching(ns, nf, ws, wf); /* Calculate daily mortality fluxes and update state variables */ /* This is done last, with a special state update procedure, to insure * that pools don't go negative due to mortality fluxes conflicting with * other proportional fluxes */ mortality (epc, cs, cf, ns, nf); /* Test for carbon balance */ check_carbon_balance (cs, &epv->old_c_balance, first_balance); /* Test for nitrogen balance */ check_nitrogen_balance (ns, &epv->old_n_balance, first_balance); /* Calculate carbon summary variables */ csummary (cf, cs, summary); }
bool Compression::Decompression(void *src, unsigned int lenSrc, void *dst, unsigned int lenDst, bool bCheckVersion) { if(bCheckVersion) { if(GameControl::Get()->GetGameFileVersion() < 19) { ind.ir(src, lenSrc); outd.iw(dst, lenDst); decomp(); } else if(GameControl::Get()->GetGameFileVersion() < 20) { //Fast lzo decompression lzo1x_decompress((unsigned char *)src, lenSrc, (unsigned char *)dst, &lenDst, NULL); } else { //Ucl if(!bUclOk) return false; return ucl_nrv2b_decompress_8((const unsigned char *)src, lenSrc, (unsigned char *)dst, (unsigned int *)&lenDst, NULL) == UCL_E_OK; } } else { //Ucl if(!bUclOk) return false; return ucl_nrv2b_decompress_8((const unsigned char *)src, lenSrc, (unsigned char *)dst, (unsigned int *)&lenDst, NULL) == UCL_E_OK; } return true; }