/** * Instantiates a PLP frontend with energy based VAD and * VADGate components. */ Tracter::Component<float>* Tracter::PLPVADGraphFactory::Create(Component<float>* iComponent) { Component<float>* p = iComponent; p = new ZeroFilter(p); p = new Frame(p); p = new Periodogram(p); p = new MelFilter(p); p = new LPCepstrum(p); p = normaliseMean(p); p = deltas(p); p = normaliseVariance(p); /* VAD */ Component<float>* v = iComponent; v = new Frame(v); v = new Energy(v); Modulation* m = new Modulation(v); if (!GetEnv("MinimaVAD", 0)) { // Old VAD NoiseVAD* mv = new NoiseVAD(m, v); v = new VADGate(p, mv); } else { // New minima-based VAD Component<float>* n = new Minima(v); Component<BoolType>* b = new Comparator(m, n); b = new TimedLatch(b); v = new Gate(p, b); } return v; }
void frameFieldBackgroundMesh2D::exportCrossField(const std::string &filename) { FILE *f = Fopen(filename.c_str(), "w"); if(!f) { Msg::Error("Could not open file '%s'", filename.c_str()); return; } fprintf(f,"View \"Cross Field\"{\n"); std::vector<double> deltas(2); deltas[0] = 0.; deltas[1] = M_PI; for (std::vector<MVertex*>::iterator it = beginvertices(); it!=endvertices(); it++) { MVertex *v = *it; double angle_current = angle(v); GPoint p = get_GPoint_from_MVertex(v); for (int i=0; i<2; i++) { Pair<SVector3, SVector3> dirs = compute_crossfield_directions(v->x(),v->y(),angle_current+deltas[i]); fprintf(f,"VP(%g,%g,%g) {%g,%g,%g};\n",p.x(),p.y(),p.z(),dirs.first()[0], dirs.first()[1], dirs.first()[2]); fprintf(f,"VP(%g,%g,%g) {%g,%g,%g};\n",p.x(),p.y(),p.z(),dirs.second()[0], dirs.second()[1], dirs.second()[2]); } } fprintf(f,"};\n"); fclose(f); }
/** * Instantiates a basic MFCC frontend with speech/sil detection. */ Tracter::Component<float>* Tracter::BasicSpeechDetGraphFactory::Create(Component<float>* iComponent) { Component<float>* p = iComponent; p = new ZeroFilter(p); p = new Frame(p); p = new Periodogram(p); p = new MelFilter(p); p = new Cepstrum(p); p = normaliseMean(p); p = deltas(p); p = normaliseVariance(p); // Minima-based VAD Component<float>* v = iComponent; v = new Frame(v); v = new Energy(v); Modulation* m = new Modulation(v); Component<float>* n = new Minima(v); Component<BoolType>* b = new Comparator(m, n); b = new TimedLatch(b); Component<float>* f = new BoolToFloat(b); // Concatenation Concatenate* c = new Concatenate(); #ifdef HAVE_TORCH3 p = new MLP(p); #endif c->Add(p); c->Add(f); return c; }
void Subsampling::backpropagate(Eigen::MatrixXd* ein, Eigen::MatrixXd*& eout, bool backpropToPrevious) { const int N = a.rows(); yd.conservativeResize(N, Eigen::NoChange); e.conservativeResize(N, Eigen::NoChange); // Derive activations activationFunctionDerivative(act, y, yd); deltas = yd.cwiseProduct(*ein); e.setZero(); for(int fmo = 0; fmo < fm; fmo++) { Wd[fmo].setZero(); if(bias) Wbd[fmo].setZero(); } for(int n = 0; n < N; n++) { int outputIdx = 0; for(int fmo = 0; fmo < fm; fmo++) { for(int ri = 0, ro = 0; ri < maxRow; ri += kernelRows, ro++) { int rowBase = fmo * fmInSize + ri * inCols; for(int ci = 0, co = 0; ci < maxCol; ci += kernelCols, co++, outputIdx++) { const double d = deltas(n, outputIdx); for(int kr = 0; kr < kernelRows; kr++) { for(int kc = 0, inputIdx = rowBase + ci; kc < kernelCols; kc++, inputIdx++) { e(n, inputIdx) += W[fmo](ro, co) * d; Wd[fmo](ro, co) += d * (*x)(n, inputIdx); } } if(bias) Wbd[fmo](ro, co) += d; } } } } if(regularization.l1Penalty > 0.0) { for(int fmo = 0; fmo < fm; fmo++) Wd[fmo].array() += regularization.l1Penalty * W[fmo].array() / W[fmo].array().abs(); } if(regularization.l2Penalty > 0.0) { for(int fmo = 0; fmo < fm; fmo++) Wd[fmo] += regularization.l2Penalty * W[fmo]; } eout = &e; }
/** * Instantiates a SPTK based mcep frontend. */ Tracter::Component<float>* Tracter::MCepGraphFactory::Create(Component<float>* iComponent) { Component<float>* p = iComponent; p = new Frame(p); p = new Periodogram(p); p = new MCep(p); p = normaliseMean(p); p = deltas(p); p = normaliseVariance(p); return p; }
/** * Does nothing other than add CMVN and deltas if necessary. Requires * a feature level source. */ Tracter::Component<float>* Tracter::CMVNGraphFactory::Create(Component<float>* iComponent) { Component<float>* p = iComponent; p = normaliseMean(p); p = deltas(p); p = normaliseVariance(p); // Doesn't really belong, but it's easy to "comment out" behind the option if (GetEnv("LinearTransform", false)) p = new LinearTransform(p); return p; }
/** * Instantiates a PLP frontend. */ Tracter::Component<float>* Tracter::PLPGraphFactory::Create(Component<float>* iComponent) { Component<float>* p = iComponent; p = new ZeroFilter(p); p = new Frame(p); p = new Periodogram(p); p = new MelFilter(p); p = new LPCepstrum(p); p = normaliseMean(p); p = deltas(p); p = normaliseVariance(p); return p; }
/** * Calculate the multiple scattering correction factor and weight for the given * mur value * @param irp Index of current mur point (assumed zero based) * @param muR Single \f$\mu*r\f$ slice value * @param abs Absorption and self-attenuation factor (\f$A_s\f$ in Mayers paper) * @return A pair of (factor,weight) */ std::pair<double, double> MayersSampleCorrectionStrategy::calculateMS(const size_t irp, const double muR, const double abs) { // Radial coordinate raised to power 1/3 to ensure uniform density of points // across circle following discussion with W.G.Marshall (ISIS) const double radDistPower = 1. / 3.; const double muH = muR * (m_pars.cylHeight / m_pars.cylRadius); const double cosaz = cos(m_pars.azimuth); seedRNG(irp); // Take an average over a number of sets of second scatters std::vector<double> deltas(m_pars.msNRuns, 0.0); for (size_t j = 0; j < m_pars.msNRuns; ++j) { double sum = 0.0; for (size_t i = 0; i < m_pars.msNEvents; ++i) { // Random (r,theta,z) const double r1 = pow(m_rng->nextValue(), radDistPower) * muR; const double r2 = pow(m_rng->nextValue(), radDistPower) * muR; const double z1 = m_rng->nextValue() * muH; const double z2 = m_rng->nextValue() * muH; const double th1 = m_rng->nextValue() * TWOPI; const double th2 = m_rng->nextValue() * TWOPI; double fact1 = pow(muR, 2) - std::pow(r1 * sin(th1), 2); if (fact1 < 0.0) fact1 = 0.0; // Path into first point const double mul1 = sqrt(fact1) + r1 * cos(th1); double fact2 = pow(muR, 2) - pow(r2 * sin(m_pars.twoTheta - th2), 2); if (fact2 < 0.0) fact2 = 0.0; // Path out from final point const double mul2 = (sqrt(fact2) - r2 * cos(m_pars.twoTheta - th2)) / cosaz; // Path between point 1 & 2 const double mul12 = sqrt(pow(r1 * cos(th1) - r2 * cos(th2), 2) + pow(r1 * sin(th1) - r2 * sin(th2), 2) + pow(z1 - z2, 2)); if (mul12 < 0.01) continue; sum += exp(-(mul1 + mul2 + mul12)) / pow(mul12, 2); } const double beta = pow(M_PI * muR * muR * muH, 2) * sum / to<double>(m_pars.msNEvents); const double delta = 0.25 * beta / (M_PI * abs * muH); deltas[j] = delta; } auto stats = getStatistics(deltas, StatOptions::Mean | StatOptions::CorrectedStdDev); return std::make_pair(stats.mean, stats.mean / stats.standard_deviation); }
/** * Instantiates a "basic MFCC" frontend with SNR spectral features. */ Tracter::Component<float>* Tracter::SNRGraphFactory::Create(Component<float>* iComponent) { Component<float>* p = iComponent; p = new ZeroFilter(p); p = new Frame(p); p = new Periodogram(p); Component<float>* m = new Minima(p); m = new TransverseFilter(m); p = new SNRSpectrum(p, m); p = new MelFilter(p); p = new Cepstrum(p); p = normaliseMean(p); p = deltas(p); p = normaliseVariance(p); return p; }
void ConvolutionalLayer::backPropagate(vector<mat>& errors, const vector<mat>& fins, const vector<mat>& fouts, float learning_rate) { size_t nInputs = getNumInputMaps(), nOutputs = getNumOutputMaps(); size_t batch_size = fins[0].getCols(); // In the following codes, the iteration index i and j stands for // i : # of input features. i = 0 ~ nInputs - 1 // j : # of output features. j = 0 ~ nOutputs - 1 vector<mat> deltas(nOutputs); for (size_t j=0; j<nOutputs; ++j) deltas[j] = fouts[j] & ( 1.0f - fouts[j] ) & errors[j]; this->feedBackward(errors, deltas); assert(learning_rate > 0); float lr = learning_rate / batch_size; // iImgs represents the input images. // oImgs represents the output images. (Before sigmoid or any other activation function) vector<vector<mat> > iImgs(nInputs), oImgs(nOutputs); for (size_t i=0; i<nInputs; ++i) iImgs[i] = reshapeVectors2Images(fins[i], _input_img_size); for (size_t j=0; j<nOutputs; ++j) oImgs[j] = reshapeVectors2Images(deltas[j], this->get_output_img_size()); // Update kernels with learning rate for (size_t k=0; k<batch_size; ++k) { for (size_t j=0; j<nOutputs; ++j) { for (size_t i=0; i<nInputs; ++i) _kernels[i][j] -= convn(rot180(iImgs[i][k]), oImgs[j][k], "valid_shm") * lr; _bias[j] -= sum_all(oImgs[j][k]) * lr; } } }
/** * Instantiates a basic MFCC frontend with MLPVAD and VADGate * components. */ Tracter::Component<float>* Tracter::BasicMLPVADGraphFactory::Create(Component<float>* iComponent) { /* Basic signal processing chain */ Component<float>* p = iComponent; p = new ZeroFilter(p); p = new Frame(p); p = new Periodogram(p); p = new MelFilter(p); p = new Cepstrum(p); p = normaliseMean(p); p = deltas(p); p = normaliseVariance(p); /* VAD - works on the "basic" features */ Component<float>* v = new MLP(p); MLPVAD* mv = new MLPVAD(v); p = new VADGate(p, mv); return p; }
void MaxPooling::backpropagate(Eigen::MatrixXd* ein, Eigen::MatrixXd*& eout) { const int N = y.rows(); e.conservativeResize(N, Eigen::NoChange); deltas = (*ein); e.setZero(); #pragma omp parallel for for(int n = 0; n < N; n++) { int outputIdx = 0; int inputIdx = 0; for(int fmo = 0; fmo < fm; fmo++) { for(int ri = 0, ro = 0; ri < maxRow; ri += kernelRows, ro++) { int rowBase = fmo * fmInSize + ri * inCols; for(int ci = 0, co = 0; ci < maxCol; ci += kernelCols, co++, outputIdx++) { double m = -std::numeric_limits<double>::max(); int idx = -1; for(int kr = 0; kr < kernelRows; kr++) { inputIdx = rowBase + ci; for(int kc = 0; kc < kernelCols; kc++, inputIdx++) if((*x)(n, inputIdx) > m) { m = (*x)(n, inputIdx); idx = inputIdx; } } e(n, idx) = deltas(n, outputIdx); } } } } eout = &e; }
void VideoRegionsConfigDialog::initControls() { HWND hwnd = m_ctrlThis.getWindow(); m_videoClasses.setWindow(GetDlgItem(hwnd, IDC_VIDEO_CLASS_NAMES)); m_videoRects.setWindow(GetDlgItem(hwnd, IDC_VIDEO_RECTS)); m_videoRecognitionInterval.setWindow(GetDlgItem(hwnd, IDC_VIDEO_RECOGNITION_INTERVAL)); m_videoRecognitionIntervalSpin.setWindow(GetDlgItem(hwnd, IDC_VIDEO_RECOGNITION_INTERVAL_SPIN)); int limitersTmp[] = {50, 200}; int deltasTmp[] = {5, 10}; std::vector<int> limitters(limitersTmp, limitersTmp + sizeof(limitersTmp) / sizeof(int)); std::vector<int> deltas(deltasTmp, deltasTmp + sizeof(deltasTmp) / sizeof(int)); m_videoRecognitionIntervalSpin.setBuddy(&m_videoRecognitionInterval); m_videoRecognitionIntervalSpin.setAccel(0, 1); m_videoRecognitionIntervalSpin.setRange32(0, INT_MAX); m_videoRecognitionIntervalSpin.setAutoAccelerationParams(&limitters, &deltas, 50); m_videoRecognitionIntervalSpin.enableAutoAcceleration(true); }
std::vector<double> NeuralLayer::computeDeltas(const std::vector<double>& error, const std::vector<std::vector<double> >& nextWeights) { static unsigned int first = 1; int nextLayerOuts; if(first) nextLayerOuts = error.size(); else nextLayerOuts = error.size()-1; std::vector<double> deltas(m_numNodes+1); for(int j=0; j<m_numNodes+1; ++j) { for(int i=0; i<nextLayerOuts; ++i) { if(first) deltas[j] += error[i] * nextWeights[j][i]; else deltas[j] += error[i+1] * nextWeights[j][i]; } } first = 0; return deltas; }
int main() { DenseVector sings; GeMat deltas(3,2); std::vector<GeMat> _deltas; Function x2f(x2, sings); Function onef(one, sings); Function x3f(x3, sings); Function cosf(mycos, sings); Function sinf(mysin, sings); Function expf(myexp, sings); Basis basis(4); basis.template enforceBoundaryCondition<lawa::DirichletBC>(); IndexSet indexset; Coeff1D coeff; std::vector<Function> fvec; int rank = 2; int dim = 64; for (int i=1; i<=32; ++i) { fvec.push_back(cosf); fvec.push_back(onef); fvec.push_back(x2f); fvec.push_back(onef); } SepCoeff coeffs(rank, dim); IndexSetVec indexsetvec(dim); lawa::SeparableFunctionD<T> F(fvec, rank, dim); MatInt derivs(rank, dim); for (int i=1; i<=rank; ++i) { for (int j=1; j<=dim; ++j) { derivs(i,j) = 0; _deltas.push_back(deltas); } } lawa::SeparableRHSD<T, Basis> Fint(basis, F, _deltas, derivs); getFullIndexSet(basis, indexset, 2); std::cout << "The index set size is\n" << indexset.size() << std::endl; for (int l=0; (unsigned)l<indexsetvec.size(); ++l) { indexsetvec[l] = indexset; } /* Map */ lawa::Mapwavind<Index1D> map(dim); map.rehash(50); genCoefficients(coeffs, Fint, indexsetvec); lawa::HTCoefficients<T, Basis> f(dim, basis, map); lawa::HTCoefficients<T, Basis> u(dim, basis, map); lawa::HTCoefficients<T, Basis> r(dim, basis, map); Laplace1D LaplaceBil(basis); RefLaplace1D RefLaplaceBil(basis.refinementbasis); Identity1D IdentityBil(basis); RefIdentity1D RefIdentityBil(basis.refinementbasis); LOp_Lapl1D lapl(basis, basis, RefLaplaceBil, LaplaceBil); Sepop A(lapl, dim, dim); lawa::Sepdiagscal<Basis> S(dim, basis); setScaling(S, 0.5); lawa::HTAWGM_Params params; params.maxit_pcg = 100; params.maxit_awgm = 100; params.tol_awgm = 1e-08; params.delta1_pcg = 1e-01; params.delta2_pcg = 1e-01; params.delta3_pcg = 1e-01; params.alpha = 0.95; params.recompr = 1e-02; params.gamma = 0.1; params.theta = 1e-08; std::cout << "HTAWGM params =\n"; std::cout << params << std::endl; unsigned its; double res; its = htawgm(A, S, u, Fint, indexsetvec, res, params); std::cout << "htawgm took " << its << " iterations to reach " << res << " accuracy" << std::endl; std::cout << "Final scaling set to\n" << S << std::endl; return 0; }
Timeline::Timeline(const RealVector& points): time_points(points), deltas(points.extent(blitz::firstDim) - 1) { for (int i = 0; i < points.extent(blitz::firstDim) - 1; i++) deltas(i) = points(i + 1) - points(i); }
int main(int argc, char **argv) { #ifdef QUESO_HAVE_LIBMESH unsigned int i; unsigned int j; const unsigned int num_pairs = 5; const unsigned int num_samples = 1e4; const double alpha = 3.0; const double beta = 1.0; QUESO::EnvOptionsValues opts; opts.m_seed = -1; MPI_Init(&argc, &argv); QUESO::FullEnvironment env(MPI_COMM_WORLD, "", "", &opts); #ifdef LIBMESH_DEFAULT_SINGLE_PRECISION // SLEPc farts with libMesh::Real==float libmesh_example_assert(false, "--disable-singleprecision"); #endif // Need an artificial block here because libmesh needs to // call PetscFinalize before we call MPI_Finalize #ifdef LIBMESH_HAVE_SLEPC { libMesh::LibMeshInit init(argc, argv); libMesh::Mesh mesh(init.comm()); libMesh::MeshTools::Generation::build_square(mesh, 20, 20, 0.0, 1.0, 0.0, 1.0, libMeshEnums::QUAD4); QUESO::FunctionOperatorBuilder fobuilder; fobuilder.order = "FIRST"; fobuilder.family = "LAGRANGE"; fobuilder.num_req_eigenpairs = num_pairs; QUESO::LibMeshFunction mean(fobuilder, mesh); QUESO::LibMeshNegativeLaplacianOperator precision(fobuilder, mesh); QUESO::InfiniteDimensionalGaussian mu(env, mean, precision, alpha, beta); // Vector to hold all KL coeffs std::vector<double> means(num_pairs, 0.0); std::vector<double> sumsqs(num_pairs, 0.0); std::vector<double> deltas(num_pairs, 0.0); double draw; for (i = 1; i < num_samples + 1; i++) { mu.draw(); for (j = 0; j < num_pairs; j++) { draw = mu.get_kl_coefficient(j); deltas[j] = draw - means[j]; means[j] += (double) deltas[j] / i; sumsqs[j] += deltas[j] * (draw - means[j]); } // std::cerr << "MEAN IS: " << means[0] << std::endl; } std::vector<double> vars(num_pairs, 0.0); for (j = 0; j < num_pairs; j++) { vars[j] = sumsqs[j] / (num_samples - 1); } double sigma = beta / std::pow(precision.get_eigenvalue(j), alpha / 2.0); double sigmasq = sigma * sigma; double mean_min; double mean_max; for (j = 0; j < num_pairs; j++) { // Mean is N(0, (lambda_j^{- alpha / 2} * beta)^2 / n) mean_min = -3.0 * sigma / std::sqrt(num_samples); mean_max = 3.0 * sigma / std::sqrt(num_samples); if (means[j] < mean_min || means[j] > mean_max) { std::cerr << "mean kl test failed" << std::endl; return 1; } } double var_min; double var_max; // var[j] should be approximately ~ N(sigma^2, 2 sigma^4 / (num_samples - 1)) for (j = 0; j < num_pairs; j++) { var_min = sigmasq - 3.0 * sigmasq * std::sqrt(2.0 / (num_samples - 1)); var_max = sigmasq + 3.0 * sigmasq * std::sqrt(2.0 / (num_samples - 1)); if (vars[j] < var_min || vars[j] > var_max) { std::cerr << "variance kl test failed" << std::endl; return 1; } } } #endif // LIBMESH_HAVE_SLEPC MPI_Finalize(); return 0; #else return 77; #endif }
StatusWith<std::vector<BSONObj>> FTDCDecompressor::uncompress(ConstDataRange buf) { ConstDataRangeCursor compressedDataRange(buf); // Read the length of the uncompressed buffer auto swUncompressedLength = compressedDataRange.readAndAdvance<LittleEndian<std::uint32_t>>(); if (!swUncompressedLength.isOK()) { return {swUncompressedLength.getStatus()}; } // Now uncompress the data // Limit size of the buffer we need zlib auto uncompressedLength = swUncompressedLength.getValue(); if (uncompressedLength > 10000000) { return Status(ErrorCodes::InvalidLength, "Metrics chunk has exceeded the allowable size."); } auto statusUncompress = _compressor.uncompress(compressedDataRange, uncompressedLength); if (!statusUncompress.isOK()) { return {statusUncompress.getStatus()}; } ConstDataRangeCursor cdc = statusUncompress.getValue(); // The document is not part of any checksum so we must validate it is correct auto swRef = cdc.readAndAdvance<Validated<BSONObj>>(); if (!swRef.isOK()) { return {swRef.getStatus()}; } BSONObj ref = swRef.getValue(); // Read count of metrics auto swMetricsCount = cdc.readAndAdvance<LittleEndian<std::uint32_t>>(); if (!swMetricsCount.isOK()) { return {swMetricsCount.getStatus()}; } std::uint32_t metricsCount = swMetricsCount.getValue(); // Read count of samples auto swSampleCount = cdc.readAndAdvance<LittleEndian<std::uint32_t>>(); if (!swSampleCount.isOK()) { return {swSampleCount.getStatus()}; } std::uint32_t sampleCount = swSampleCount.getValue(); // Limit size of the buffer we need for metrics and samples if (metricsCount * sampleCount > 1000000) { return Status(ErrorCodes::InvalidLength, "Metrics Count and Sample Count have exceeded the allowable range."); } std::vector<std::uint64_t> metrics; metrics.reserve(metricsCount); // We pass the reference document as both the reference document and current document as we only // want the array of metrics. (void)FTDCBSONUtil::extractMetricsFromDocument(ref, ref, &metrics); if (metrics.size() != metricsCount) { return {ErrorCodes::BadValue, "The metrics in the reference document and metrics count do not match"}; } std::vector<BSONObj> docs; // Allocate space for the reference document + samples docs.reserve(1 + sampleCount); docs.emplace_back(ref.getOwned()); // We must always return the reference document if (sampleCount == 0) { return {docs}; } // Read the samples std::vector<std::uint64_t> deltas(metricsCount * sampleCount); // decompress the deltas std::uint64_t zeroesCount = 0; auto cdrc = ConstDataRangeCursor(cdc); for (std::uint32_t i = 0; i < metricsCount; i++) { for (std::uint32_t j = 0; j < sampleCount; j++) { if (zeroesCount) { deltas[FTDCCompressor::getArrayOffset(sampleCount, j, i)] = 0; zeroesCount--; continue; } auto swDelta = cdrc.readAndAdvance<FTDCVarInt>(); if (!swDelta.isOK()) { return swDelta.getStatus(); } if (swDelta.getValue() == 0) { auto swZero = cdrc.readAndAdvance<FTDCVarInt>(); if (!swZero.isOK()) { return swDelta.getStatus(); } zeroesCount = swZero.getValue(); } deltas[FTDCCompressor::getArrayOffset(sampleCount, j, i)] = swDelta.getValue(); } } // Inflate the deltas for (std::uint32_t i = 0; i < metricsCount; i++) { deltas[FTDCCompressor::getArrayOffset(sampleCount, 0, i)] += metrics[i]; } for (std::uint32_t i = 0; i < metricsCount; i++) { for (std::uint32_t j = 1; j < sampleCount; j++) { deltas[FTDCCompressor::getArrayOffset(sampleCount, j, i)] += deltas[FTDCCompressor::getArrayOffset(sampleCount, j - 1, i)]; } } for (std::uint32_t i = 0; i < sampleCount; ++i) { for (std::uint32_t j = 0; j < metricsCount; ++j) { metrics[j] = deltas[j * sampleCount + i]; } docs.emplace_back(FTDCBSONUtil::constructDocumentFromMetrics(ref, metrics).getValue()); } return {docs}; }
string getDBStructure() { return "----------------------------------------------------------------------\n\ --\n\ -- MPKG package system\n\ -- Database creation script\n\ -- $Id: dbstruct.cpp,v 1.3 2007/11/02 20:19:45 i27249 Exp $\n\ --\n\ ----------------------------------------------------------------------\n\ \n\ create table packages (\n\ package_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n\ package_name TEXT NOT NULL,\n\ package_version TEXT NOT NULL,\n\ package_arch TEXT NOT NULL,\n\ package_build TEXT NULL,\n\ package_compressed_size TEXT NOT NULL,\n\ package_installed_size TEXT NOT NULL,\n\ package_short_description TEXT NULL,\n\ package_description TEXT NULL, \n\ package_changelog TEXT NULL,\n\ package_packager TEXT NULL,\n\ package_packager_email TEXT NULL,\n\ package_installed INTEGER NOT NULL,\n\ package_configexist INTEGER NOT NULL,\n\ package_action INTEGER NOT NULL,\n\ package_md5 TEXT NOT NULL,\n\ package_filename TEXT NOT NULL,\n\ package_betarelease TEXT NOT NULL,\n\ package_installed_by_dependency INTEGER NOT NULL DEFAULT '0',\n\ package_type INTEGER NOT NULL DEFAULT '0',\n\ package_add_date INTEGER NOT NULL DEFAULT '0',\n\ package_build_date INTEGER NOT NULL DEFAULT '0',\n\ package_repository_tags TEXT NULL, \n\ package_distro_version TEXT NULL, \n\ package_provides TEXT NULL, \n\ package_conflicts TEXT NULL \n\ );\n\ create index ppname on packages (package_id, package_name, package_version, package_action, package_installed, package_md5);\n\ \n\ create table files (\n\ file_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n\ file_name TEXT NOT NULL,\n\ file_type INTEGER NOT NULL,\n\ packages_package_id INTEGER NOT NULL\n\ );\n\ create index pname on files (file_name, packages_package_id);\n\ \n\ create table conflicts (\n\ conflict_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n\ conflict_file_name TEXT NOT NULL,\n\ backup_file TEXT NOT NULL,\n\ conflicted_package_id INTEGER NOT NULL\n\ );\n\ \n\ create table locations (\n\ location_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n\ packages_package_id INTEGER NOT NULL,\n\ server_url TEXT NOT NULL,\n\ location_path TEXT NOT NULL\n\ );\n\ create index locpid on locations(packages_package_id, location_path, server_url);\n\ create table tags (\n\ tags_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n\ tags_name TEXT NOT NULL\n\ );\n\ create index ptag on tags (tags_id, tags_name);\n\ \n\ create table tags_links (\n\ tags_link_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n\ packages_package_id INTEGER NOT NULL,\n\ tags_tag_id INTEGER NOT NULL\n\ );\n\ create index ptaglink on tags_links (packages_package_id, tags_tag_id);\n\ \n\ create table dependencies (\n\ dependency_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n\ packages_package_id INTEGER NOT NULL,\n\ dependency_condition INTEGER NOT NULL DEFAULT '1',\n\ dependency_type INTEGER NOT NULL DEFAULT '1',\n\ dependency_package_name TEXT NOT NULL,\n\ dependency_package_version TEXT NULL,\n\ dependency_build_only INTEGER NOT NULL DEFAULT '0' \ );\n\ \n\ create index pdeps on dependencies (packages_package_id, dependency_id, dependency_package_name, dependency_package_version, dependency_condition);\n\ \n\ create table history (\n\ history_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n\ history_event INTEGER NOT NULL,\n\ history_data TEXT NULL\n\ );\n\ create table deltas (\n\ delta_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n\ packages_package_id INTEGER NOT NULL,\n\ delta_url TEXT NOT NULL,\n\ delta_md5 TEXT NOT NULL,\n\ delta_orig_filename TEXT NOT NULL,\n\ delta_orig_md5 TEXT NOT NULL,\n\ delta_size TEXT NULL\n\ );\n\ -- INTERNATIONAL SUPPORT\n\ \n\ --create table descriptions (\n\ -- description_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n\ -- packages_package_id INTEGER NOT NULL,\n\ -- description_language TEXT NOT NULL,\n\ -- description_text TEXT NOT NULL,\n\ -- short_description_text TEXT NOT NULL\n\ --);\n\ \n\ --create table changelogs (\n\ -- changelog_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n\ -- packages_package_id INTEGER NOT NULL,\n\ -- changelog_language TEXT NOT NULL,\n\ -- changelog_text TEXT NOT NULL\n\ --);\n\ \n\ -- RATING SYSTEM - SUPPORT FOR FUTURE\n\ --create table ratings (\n\ -- rating_id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n\ -- rating_value INTEGER NOT NULL,\n\ -- packages_package_name TEXT NOT NULL\n\ --);\n\ "; }
static void llSharesToLength(int totalLength, const std::vector<LiteralLength*>& lls, const char* f_at) { int lengthRemaining = totalLength; int totalShareCount = 0; std::vector<LiteralLength*> shareLLs; for (LiteralLength* ll : lls) { if (ll->shares) { totalShareCount += ll->value; shareLLs.push_back(ll); } else { lengthRemaining -= ll->value; } } if (lengthRemaining < 0) { throw DSLException(f_at, "Sum of length of fixed-length content exceeds available length."); } if (totalShareCount == 0) { if (lengthRemaining > 0) { throw DSLException(f_at, "No share-length content to distribute remaining length to."); } // Distributing 0 length amongst 0 total shares is fine: all resulting share lengths are 0. for (LiteralLength* ll : lls) { if (ll->shares) { ll->value = 0; ll->shares = false; } } return; } // Initially, distribute from the total length so that each length is the floor of its target // value based on uniform shares. float avgShareLength = lengthRemaining / (float)totalShareCount; std::vector<float> deltas(shareLLs.size()); for (int i = 0; i < shareLLs.size(); ++i) { float targetLength = shareLLs[i]->value * avgShareLength; int length = floorf(targetLength); shareLLs[i]->value = length; shareLLs[i]->shares = false; deltas[i] = targetLength - length; lengthRemaining -= length; } // lengthRemaining should be less than shareCounts.size(), but do this anyway in case of numerical // error while (lengthRemaining >= shareLLs.size()) { for (int i = 0; i < shareLLs.size(); ++i) { shareLLs[i]->value++; deltas[i] -= 1.f; } lengthRemaining -= shareLLs.size(); } // Distribute remaining length to the lengths with the largest deltas. int n = deltas.size() - 1 - lengthRemaining; std::vector<float> deltasCopy(deltas); std::nth_element(deltasCopy.begin(), deltasCopy.begin() + n, deltasCopy.end()); float deltaThreshold = deltasCopy[n]; for (int i = 0; i < shareLLs.size() && lengthRemaining > 0; ++i) { if (deltas[i] > deltaThreshold) { shareLLs[i]->value++; --lengthRemaining; } } for (int i = 0; i < shareLLs.size() && lengthRemaining > 0; ++i) { if (deltas[i] == deltaThreshold) { shareLLs[i]->value++; --lengthRemaining; } } assert(lengthRemaining == 0); }