bool Walk::find1Way(Cluster c) { const Cluster tab[4] = { Cluster(_vm, -1, 0), Cluster(_vm, 1, 0), Cluster(_vm, 0, -1), Cluster(_vm, 0, 1)}; const int tabLen = 4; if (c._pt == _target) // Found destination return true; if (_level >= _findLevel) // Nesting limit return false; // Look for barriers if (c.chkBar()) return false; if (c.cell()) // Location is occupied return false; // Loop through each direction Cluster start = c; for (int i = 0; i < tabLen; i++) { // Reset to starting position c = start; do { c._pt += tab[i]._pt; if (!c.isValid()) // Break to check next direction break; // Recursively check for further paths ++_level; ++start.cell(); bool foundPath = find1Way(c); --start.cell(); --_level; if (foundPath) { // Set route point _trace[_level]->_pt = start._pt; return true; } } while (!c.chkBar() && !c.cell()); } return false; }
void Swendsen::Step() { RandomizeBonds(); Cluster(); FlipClusters(); }
Cluster CGEEngine::XZ(int16 x, int16 y) { if (y < kMapTop) y = kMapTop; if (y > kMapTop + kMapHig - kMapGridZ) y = kMapTop + kMapHig - kMapGridZ; return Cluster(this, x / kMapGridX, (y - kMapTop) / kMapGridZ); }
Prediction::Prediction(int _numCluster, int _dim_feature) { num_cluster = _numCluster; dim_feature = _dim_feature; svm = RankingSVM(dim_feature); curRounds = -1; for (int i=0; i<num_cluster; i++ ) { clusters.push_back(Cluster(num_cluster, dim_feature)); } rougeStat = Stat(); }
Training::Training(int _numCluster, int _dim_feature) { //change to read config file to get num_cluster and dim_feature num_cluster = _numCluster; dim_feature = _dim_feature; SetVWParameter(0, 0.5, 1); //default setting pairDiffThreshold = 0; // featureFiles = vector<string>(num_cluster); for (int i=0; i<num_cluster; i++ ) { clusters.push_back(Cluster(num_cluster, dim_feature)); clusters[i].clusterId = i; } }
void Walk::findWay(Sprite *spr) { if (!spr || spr == this) return; int x = spr->_x; int z = spr->_z; if (spr->_flags._east) x += spr->_w + _w / 2 - kWalkSide; else x -= _w / 2 - kWalkSide; findWay(Cluster(_vm, (x / kMapGridX), ((z < kMapZCnt - kDistMax) ? (z + 1) : (z - 1)))); }
int main(int argc, char** argv) { pcl::PCDReader reader; pcl::PointCloud<PointT>::Ptr cloud (new pcl::PointCloud<PointT>); reader.read ("inputCloud0.pcd", *cloud); std::vector<Box2DPoint> my_points; my_points = Cluster (cloud); for (std::vector<Box2DPoint>::const_iterator it = my_points.begin (); it != my_points.end (); ++it) { std::cout << it->x_left_down << " " << it->y_left_down << std::endl; std::cout << it->x_right_up << " " << it->y_right_up << "\n" << std::endl; } return 0; }
void Walk::findWay(Cluster c) { if (c._pt == _here._pt) return; for (_findLevel = 1; _findLevel <= kMaxFindLevel; _findLevel++) { _target = _here._pt; int16 x = c._pt.x; int16 z = c._pt.y; if (find1Way(Cluster(_vm, x, z))) break; } _tracePtr = (_findLevel > kMaxFindLevel) ? -1 : (_findLevel - 1); if (_tracePtr < 0) noWay(); _time = 1; }
Resultado::Resultado(size_t k, size_t t, size_t n, size_t m,size_t q, size_t s, int id, double J, double CR) { for (size_t i = 0; i < k; i++) { this->cluster.push_back(Cluster(t,q)); } this->m = m; this->s = s; this->U.resize(n); for (size_t i = 0; i < n; i++) { this->U[i].resize(k); } this->coeficiente.resize(k); for (size_t i = 0; i < k; i++) { this->coeficiente[i].resize(t); } init(id, J, CR); overallPrototype.resize(t); }
vector<Float_t> Track::getLateralDeflectionFromExtrapolatedPosition(Int_t layer) { Cluster * clusterLastLayer = nullptr; Cluster * clusterLastLastLayer = nullptr; Cluster * clusterThisLayer = nullptr; Cluster extrapolatedClusterThisLayer; Cluster Slope; Int_t idxThis, idxLast, idxLastLast; vector<Float_t> deflection; idxThis = getClusterFromLayer(layer); idxLast = getClusterFromLayer(layer - 1); idxLastLast = getClusterFromLayer(layer - 2); if (idxThis < 0 || idxLast < 0) { deflection.push_back(0); deflection.push_back(0); return deflection; } clusterThisLayer = At(idxThis); clusterLastLayer = At(idxLast); if (idxLastLast < 0) { deflection.push_back(clusterThisLayer->getXmm() - clusterLastLayer->getXmm()); deflection.push_back(clusterThisLayer->getYmm() - clusterLastLayer->getYmm()); return deflection; } clusterLastLastLayer = At(idxLastLast); Cluster slope(clusterLastLayer->getX() - clusterLastLastLayer->getX(), clusterLastLayer->getY() - clusterLastLastLayer->getY()); extrapolatedClusterThisLayer = Cluster(clusterLastLayer->getX() + slope.getX(), clusterLastLayer->getY() + slope.getY()); deflection.push_back(clusterThisLayer->getXmm() - extrapolatedClusterThisLayer.getXmm()); deflection.push_back(clusterThisLayer->getYmm() - extrapolatedClusterThisLayer.getYmm()); return deflection; }
void Stats::cluster(const char *name, int margin) { sort(points.begin(),points.end(),pointDegCompare); cout<<"points sorted "<<points.begin()->degree<<endl; cout<<"margin "<<margin<<endl; ofstream oFile; char fname[1024]; sprintf(fname,"%s_margin_%d",name,margin); oFile.open((const char*)fname,fstream::out); // FILE *oFile = fopen(fname, "w"); vector<Cluster> cluster; for (std::vector<RandVar>::iterator itPoint = points.begin() ; itPoint != points.end(); ++itPoint) { double x1=itPoint->dvr-itPoint->conf*margin; double x2=itPoint->dvr+itPoint->conf*margin; int assignedTo=-1; int assignedCount=0; for (unsigned int i=0; i<cluster.size(); i++) { double y1=cluster[i].min; double y2=cluster[i].max; if ((x1 <= y2) && (y1 <= x2)) { assignedTo = i; assignedCount++; } } if ( assignedCount == 1 ) { oFile << itPoint->id << "\t" << itPoint->degree << "\t" << assignedTo << "\t" << itPoint->meanInvVisit << "\t" << itPoint->dvr << "\t" << itPoint->conf*margin <<"\t0" << endl; oFile.flush(); } else if ( assignedCount == 0 ) { assignedTo=cluster.size(); cluster.push_back(Cluster(assignedTo,x1,x2)); oFile << itPoint->id << "\t" << itPoint->degree << "\t" << assignedTo << "\t" << itPoint->meanInvVisit << "\t" << itPoint->dvr << "\t" << itPoint->conf*margin <<"\t1" << endl; oFile.flush(); } } oFile.close(); }
void RefinedStart::Cluster(const MatType& data, const size_t clusters, arma::Row<size_t>& assignments) const { // Perform the Bradley-Fayyad refined start algorithm, and get initial // centroids back. arma::mat centroids; Cluster(data, clusters, centroids); // Turn the final centroids into assignments. assignments.set_size(data.n_cols); for (size_t i = 0; i < data.n_cols; ++i) { // Find the closest centroid to this point. double minDistance = std::numeric_limits<double>::infinity(); size_t closestCluster = clusters; for (size_t j = 0; j < clusters; ++j) { // This is restricted to the L2 distance, and unfortunately it would take // a lot of refactoring and redesign to make this more general... we would // probably need to have KMeans take a template template parameter for the // initial partition policy. It's not clear how to best do this. const double distance = metric::EuclideanDistance::Evaluate(data.col(i), centroids.col(j)); if (distance < minDistance) { minDistance = distance; closestCluster = j; } } // Assign the point to its closest cluster. assignments[i] = closestCluster; } }
static std::vector<u_int32_t> Cluster(const std::vector<Datatype, Allocator>& data, std::function<double(const Datatype&, const Datatype&)>& distance_fn, std::function<Datatype(const std::vector<Datatype, Allocator>&)>& average_fn, const u_int32_t num_clusters, const bool do_preliminary_clustering=false) { assert(data.size() > 0); assert(num_clusters > 0); if (num_clusters == 1) { std::cerr << "[K-means clustering] Provided num_clusters = 1, returning default labels for cluster 0" << std::endl; return std::vector<u_int32_t>(data.size(), 0u); } // Prepare an RNG for cluster initialization auto seed = std::chrono::high_resolution_clock::now().time_since_epoch().count(); std::mt19937_64 prng(seed); std::uniform_int_distribution<size_t> initialization_distribution(0u, data.size() - 1); // Initialize cluster centers std::vector<Datatype, Allocator> cluster_centers; // Make sure we have enough datapoints to do meaningful preliminary clustering bool enable_preliminary_clustering = do_preliminary_clustering; if (enable_preliminary_clustering) { const size_t subset_size = (size_t)ceil((double)data.size() * 0.1); if (subset_size >= (num_clusters * 5)) { enable_preliminary_clustering = true; std::cerr << "[K-means clustering] Preliminary clustering enabled, using subset of " << subset_size << " datapoints from " << data.size() << " total" << std::endl; } else { enable_preliminary_clustering = false; std::cerr << "[K-means clustering] Preliminary clustering disabled as input data is too small w.r.t. number of clusters" << std::endl; } } if (enable_preliminary_clustering) { // Select a random 10% of the input data const size_t subset_size = (size_t)ceil((double)data.size() * 0.1); // This makes sure we don't get duplicates std::map<size_t, u_int8_t> index_map; while (index_map.size() < subset_size) { const size_t random_index = initialization_distribution(prng); index_map[random_index] = 1u; } std::vector<Datatype, Allocator> random_subset; random_subset.reserve(subset_size); for (auto itr = index_map.begin(); itr != index_map.end(); ++itr) { if (itr->second == 1u) { const size_t random_index = itr->first; const Datatype& random_element = data[random_index]; random_subset.push_back(random_element); } } assert(random_subset.size() == subset_size); // Run clustering on the subset std::vector<u_int32_t> random_subset_labels = Cluster(random_subset, distance_fn, average_fn, num_clusters, false); // Now we use the centers of the clusters to form the cluster centers cluster_centers = ComputeClusterCenters(random_subset, random_subset_labels, average_fn, num_clusters); } else { // This makes sure we don't get duplicates std::map<size_t, u_int8_t> index_map; while (index_map.size() < num_clusters) { const size_t random_index = initialization_distribution(prng); index_map[random_index] = 1u; } cluster_centers.reserve(num_clusters); for (auto itr = index_map.begin(); itr != index_map.end(); ++itr) { if (itr->second == 1u) { const size_t random_index = itr->first; const Datatype& random_element = data[random_index]; cluster_centers.push_back(random_element); } } assert(cluster_centers.size() == num_clusters); } assert(cluster_centers.size() == num_clusters); // Run the first iteration of clustering std::vector<u_int32_t> cluster_labels = PerformSingleClusteringIteration(data, distance_fn, cluster_centers); bool converged = false; u_int32_t iteration = 1u; while (!converged) { // Update cluster centers cluster_centers = ComputeClusterCenters(data, cluster_labels, average_fn, num_clusters); // Cluster with the new centers std::vector<u_int32_t> new_cluster_labels = PerformSingleClusteringIteration(data, distance_fn, cluster_centers); // Check for convergence converged = CheckForConvergence(cluster_labels, new_cluster_labels); cluster_labels = new_cluster_labels; iteration++; } std::cerr << "[K-means clustering] Clustering converged after " << iteration << " iterations" << std::endl; return cluster_labels; }
/** * Build/Create the cluster * * @return Cluster object */ static Cluster build() { return Cluster(); }
LineLayout* VirtualFont::createLineLayout(const TextLine &line, boost::iterator_range<vector<TextRun>::const_iterator> range) { auto layout = new LineLayout(this, line.langHint, line.overallDirection); int averageCount = 0; map<hb_codepoint_t, Cluster> clusterMap; auto buffer = hb_buffer_create(); for (auto &run : range) { clusterMap.clear(); for (auto &font : getFontSet(run.language)) { if (font->reload()) { layout->maxHeight = std::max(layout->maxHeight, font->metrics.height); layout->maxAscent = std::max(layout->maxAscent, font->metrics.ascent); layout->maxDescent = std::max(layout->maxDescent, font->metrics.descent); layout->maxLineThickness = std::max(layout->maxLineThickness, font->metrics.lineThickness); layout->maxUnderlineOffset = std::max(layout->maxUnderlineOffset, font->metrics.underlineOffset); layout->averageStrikethroughOffset += font->metrics.strikethroughOffset; averageCount++; run.apply(line.text, buffer); hb_shape(font->hbFont, buffer, nullptr, 0); auto glyphCount = hb_buffer_get_length(buffer); auto glyphInfos = hb_buffer_get_glyph_infos(buffer, nullptr); auto glyphPositions = hb_buffer_get_glyph_positions(buffer, nullptr); bool hasMissingGlyphs = false; for (int i = 0; i < glyphCount; i++) { auto codepoint = glyphInfos[i].codepoint; auto cluster = glyphInfos[i].cluster; auto it = clusterMap.find(cluster); bool clusterFound = (it != clusterMap.end()); if (codepoint) { if (clusterFound && (it->second.font != font)) { continue; // CLUSTER FOUND, WITH ANOTHER FONT (E.G. SPACE) } else { auto offset = Vec2f(glyphPositions[i].x_offset, -glyphPositions[i].y_offset) * font->scale; float advance = glyphPositions[i].x_advance * font->scale.x; if (!properties.useMipmap) { offset.x = snap(offset.x); offset.y = snap(offset.y); advance = snap(advance); } if (clusterFound) { it->second.addShape(codepoint, offset, advance); } else { clusterMap.insert(make_pair(cluster, Cluster(font, run.tag, codepoint, offset, advance))); } } } else if (!clusterFound) { hasMissingGlyphs = true; } } if (!hasMissingGlyphs) { break; // NO NEED TO PROCEED TO THE NEXT FONT IN THE LIST } } } if (run.direction == HB_DIRECTION_RTL) { for (auto it = clusterMap.rbegin(); it != clusterMap.rend(); ++it) { layout->addCluster(it->second); } } else { for (auto it = clusterMap.begin(); it != clusterMap.end(); ++it) { layout->addCluster(it->second); } } } layout->averageStrikethroughOffset /= averageCount; hb_buffer_destroy(buffer); return layout; }
PartialVolumeAnalysisClusteringCalculator::HelperStructPerformClusteringRetval* PartialVolumeAnalysisClusteringCalculator::PerformClustering(mitk::Image::ConstPointer image, const MitkHistType *histogram, int classIdent, HelperStructPerformClusteringRetval* precResult) const { HelperStructPerformClusteringRetval *retval = new HelperStructPerformClusteringRetval(); if(precResult == 0) { retval->hist = new HistType(); retval->hist->InitByMitkHistogram(histogram); ParamsType params; params.Initialize( Cluster(*(retval->hist)) ); ClusterResultType result = CalculateCurves(params,retval->hist->xVals); Normalize(params, &result); retval->params = new ParamsType(); retval->params->Initialize(¶ms); retval->result = new ClusterResultType(10); retval->result->Initialize(&result); } else { retval->params = new ParamsType(); retval->params->Initialize(precResult->params); retval->result = new ClusterResultType(10); retval->result->Initialize(precResult->result); } VecType totalProbs = retval->result->combiVals; VecType pvProbs = retval->result->mixedVals[0]; VecType fiberProbs; VecType nonFiberProbs; VecType interestingProbs; double p_fiber; double p_nonFiber; double p_interesting; // if(retval->params->means[0]<retval->params->means[1]) // { fiberProbs = retval->result->vals[1]; nonFiberProbs = retval->result->vals[0]; p_fiber = retval->params->ps[1]; p_nonFiber = retval->params->ps[0]; // } // else // { // fiberProbs = retval->result->vals[0]; // nonFiberProbs = retval->result->vals[1]; // p_fiber = retval->params->ps[0]; // p_nonFiber = retval->params->ps[1]; // } switch(classIdent) { case 0: interestingProbs = nonFiberProbs; p_interesting = p_nonFiber; break; case 1: interestingProbs = pvProbs; p_interesting = 1 - p_fiber - p_nonFiber; break; case 2: default: interestingProbs = fiberProbs; p_interesting = p_fiber; break; } double sum = histogram->GetTotalFrequency(); // initialize two histograms for class and total probabilities MitkHistType::MeasurementVectorType min(1); MitkHistType::MeasurementVectorType max(1); min.Fill(histogram->GetDimensionMins(0)[0]); max.Fill(histogram->GetDimensionMaxs(0)[histogram->GetDimensionMaxs(0).size()-1]); MitkHistType::Pointer interestingHist = MitkHistType::New(); interestingHist->SetMeasurementVectorSize(1); interestingHist->Initialize(histogram->GetSize(),min,max); MitkHistType::Iterator newIt = interestingHist->Begin(); MitkHistType::Iterator newEnd = interestingHist->End(); MitkHistType::Pointer totalHist = MitkHistType::New(); totalHist->SetMeasurementVectorSize(1); totalHist->Initialize(histogram->GetSize(),min,max); MitkHistType::Iterator totalIt = totalHist->Begin(); int i=0; while (newIt != newEnd) { newIt.SetFrequency(interestingProbs(i)*sum); totalIt.SetFrequency(totalProbs(i)*sum); ++newIt; ++totalIt; ++i; } mitk::Image::Pointer outImage1 = mitk::Image::New(); mitk::Image::Pointer outImage2 = mitk::Image::New(); HelperStructClusteringResults clusterResults; clusterResults.interestingHist = interestingHist; clusterResults.totalHist = totalHist; clusterResults.p_interesting = p_interesting; AccessFixedDimensionByItk_3( image.GetPointer(), InternalGenerateProbabilityImage, 3, clusterResults, outImage1, outImage2); retval->clusteredImage = outImage1; retval->displayImage = outImage2; return retval; }
void JetPythiaAnalysis::Loop() { if (fChain == 0) return; Long64_t nentries = fChain->GetEntriesFast(); Long64_t nbytes = 0, nb = 0; //Definir el tipo de Algoritmo para reconstruir Jets y pasarle el parametro requerido double R = 0.7; fastjet::JetDefinition jet_def(fastjet::kt_algorithm, R); //Definir un histograma para el momento transversal de los Jets TH1F * h_JetPt = new TH1F("JetsPt","Momento transversal de Jets ",50, 0, 100); //Inicia el Ciclo sobre eventos simulados for (Long64_t jentry=0; jentry<nentries;jentry++) { Long64_t ientry = LoadTree(jentry); if (ientry < 0) break; nb = fChain->GetEntry(jentry); nbytes += nb; int np = 0; int max_part = particles_; //Maximo numero de particulas // Definit un contenedor de Particulas para pasarle a FastJet std::vector<fastjet::PseudoJet> particles; //Implementar aqui el analisis while ( np < max_part ) { //llenar aqui con las particulas estables: estas particulas serian aquellas que no tienen hijas if ( (particles_fDaughter[np][0] == -1) && (particles_fDaughter[np][1] == -1) ) { bool isDetectable = true; // tenemos que saltarnos aquellas particulas que aun siendo estables, no serian detectables directables // 12 = nu_e ; 14 = nu_mu ; 16 = nu_tau ; 10000022 = SUSY LSP / neutrinalino if ( abs (particles_fPdgCode[np]) == 12 || abs (particles_fPdgCode[np]) == 14 || abs (particles_fPdgCode[np]) == 16 || abs (particles_fPdgCode[np]) == 10000022 ) isDetectable = false; // lenamos el contenedor con particulas estables if( isDetectable ) particles.push_back( fastjet::PseudoJet( particles_fPx[np], particles_fPy[np], particles_fPz[np], particles_fE[np] ) ); } ++np; } if ( particles.size() <= 0 ) continue; // Corremos ahora FastJet: hacer sobre las particulas la identificacion de Jets fastjet::ClusterSequence Cluster(particles, jet_def); // Objeto Cluster contiene los jets: podemos guardarlos en un contenedor de Jets std::vector<fastjet::PseudoJet> jets = Cluster.inclusive_jets(); // Ahora podemos hacer un ciclo sobre los jets reconstruidos y extraer el momento transversal // - llenar histograma for (unsigned i = 0; i < jets.size(); i++) { h_JetPt->Fill( jets[i].perp() ); } jets.clear(); //Limpiar el contenedor de jets y particulas en preparacion para el proximo evento particles.clear(); }//cierra loop sobre eventos // Dibujar la distribucion de momento transversal h_JetPt->Draw(); //termina Loop() limpiar memoria }
int main(int argc, char **argv) { if (argc <= 1) { std::cerr << "Please provide video filename" << std::endl; return 1; } std::string video_filename = argv[1]; cv::VideoCapture vc; vc.open(video_filename); cv::Mat image; for(int i_frame = 0; vc.read(image); ++i_frame) { // image = cv::Mat(480, 640, CV_8UC3, cv::Scalar(0, 0, 0)); // cv::rectangle(image, cv::Point(300, 100), cv::Point(400, 200), cv::Scalar(255, 255, 255), CV_FILLED); // cv::rectangle(image, cv::Point(200, 200), cv::Point(400, 300), cv::Scalar(255, 255, 255), CV_FILLED); // cv::rectangle(image, cv::Point(100, 300), cv::Point(400, 400), cv::Scalar(255, 255, 255), CV_FILLED); // if (i_frame < 700) // continue; int width = image.cols; int height = image.rows; int threshold = 200; Timer timer; std::vector<Cluster> rects; std::vector<int> rect_idx; cv::Mat blob_map(height, width, CV_32SC1, cv::Scalar(-1)); for(int y = 1; y < height - 1; ++y) { for(int x = 1; x < width - 1; ++x) { const cv::Vec3b& c = image.at<cv::Vec3b>(y, x); if (c[0] < threshold || c[1] < threshold || c[2] < threshold) { // image.at<cv::Vec3b>(y, x) = cv::Vec3b(0, 0, 0); continue; } int b_left = blob_map.at<int>(y, x - 1); int b_up = blob_map.at<int>(y - 1, x); int& b = blob_map.at<int>(y, x); if (b_left >= 0) { if (b_up >= 0 && rect_idx[b_left] != rect_idx[b_up]) { // Fuse Cluster& r1 = rects[rect_idx[b_left]]; Cluster& r2 = rects[rect_idx[b_up]]; // if (!r1.valid() || !r2.valid()) // std::cout << "ERROR!" << std::endl; r1.fuseInto(r2); r2.invalidate(); b = b_left; rect_idx[b_up] = rect_idx[b_left]; } else { b = b_left; } } else if (b_up >= 0) { b = b_up; } else { b = rects.size(); rect_idx.push_back(b); rects.push_back(Cluster(x, y)); } Cluster& r = rects[rect_idx[b]]; r.addPoint(x, y); blob_map.at<int>(y, x + 1) = b; blob_map.at<int>(y + 1, x) = b; blob_map.at<int>(y + 2, x) = b; blob_map.at<int>(y + 3, x) = b; blob_map.at<int>(y + 4, x) = b; blob_map.at<int>(y + 5, x) = b; blob_map.at<int>(y + 6, x) = b; } } timer.stop(); std::cout << i_frame << " " << timer.getElapsedTimeInMilliSec() << " ms" << std::endl; for(unsigned int j = 0; j < rects.size(); ++j) { const Cluster& r = rects[j]; if (!r.valid()) continue; if (r.height() > 4 * r.width()) cv::rectangle(image, cv::Rect(r.x_min, r.y_min, r.x_max - r.x_min, r.y_max - r.y_min), cv::Scalar(0, 0, 255), 2); else cv::rectangle(image, cv::Rect(r.x_min, r.y_min, r.x_max - r.x_min, r.y_max - r.y_min), cv::Scalar(255, 0, 0)); } cv::imshow("video", image); cv::waitKey(30); // if (i == 17) // { // cv::waitKey(); // return 0; // } } vc.release(); return 0; }
//--------------------------------------------------------------------- void SubMesh::generateExtremes(size_t count) { extremityPoints.clear(); if (count == 0) return; /* Currently this uses just one criteria: the points must be * as far as possible from each other. This at least ensures * that the extreme points characterise the submesh as * detailed as it's possible. */ VertexData *vert = useSharedVertices ? parent->sharedVertexData : vertexData; const VertexElement *poselem = vert->vertexDeclaration-> findElementBySemantic (VES_POSITION); HardwareVertexBufferSharedPtr vbuf = vert->vertexBufferBinding-> getBuffer (poselem->getSource ()); uint8 *vdata = (uint8 *)vbuf->lock (HardwareBuffer::HBL_READ_ONLY); size_t vsz = vbuf->getVertexSize (); vector<Cluster>::type boxes; boxes.reserve (count); // First of all, find min and max bounding box of the submesh boxes.push_back (Cluster ()); if (indexData->indexCount > 0) { uint elsz = indexData->indexBuffer->getType () == HardwareIndexBuffer::IT_32BIT ? 4 : 2; uint8 *idata = (uint8 *)indexData->indexBuffer->lock ( indexData->indexStart * elsz, indexData->indexCount * elsz, HardwareIndexBuffer::HBL_READ_ONLY); for (size_t i = 0; i < indexData->indexCount; i++) { int idx = (elsz == 2) ? ((uint16 *)idata) [i] : ((uint32 *)idata) [i]; boxes [0].mIndices.insert (idx); } indexData->indexBuffer->unlock (); } else { // just insert all indexes for (size_t i = vertexData->vertexStart; i < vertexData->vertexCount; i++) { boxes [0].mIndices.insert (static_cast<int>(i)); } } boxes [0].computeBBox (poselem, vdata, vsz); // Remember the geometrical center of the submesh Vector3 center = (boxes [0].mMax + boxes [0].mMin) * 0.5; // Ok, now loop until we have as many boxes, as we need extremes while (boxes.size () < count) { // Find the largest box with more than one vertex :) Cluster *split_box = NULL; Real split_volume = -1; for (vector<Cluster>::type::iterator b = boxes.begin (); b != boxes.end (); ++b) { if (b->empty ()) continue; Real v = b->volume (); if (v > split_volume) { split_volume = v; split_box = &*b; } } // If we don't have what to split, break if (!split_box) break; // Find the coordinate axis to split the box into two int split_axis = 0; Real split_length = split_box->mMax.x - split_box->mMin.x; for (int i = 1; i < 3; i++) { Real l = split_box->mMax [i] - split_box->mMin [i]; if (l > split_length) { split_length = l; split_axis = i; } } // Now split the box into halves boxes.push_back (split_box->split (split_axis, poselem, vdata, vsz)); } // Fine, now from every cluster choose the vertex that is most // distant from the geometrical center and from other extremes. for (vector<Cluster>::type::const_iterator b = boxes.begin (); b != boxes.end (); ++b) { Real rating = 0; Vector3 best_vertex; for (set<uint32>::type::const_iterator i = b->mIndices.begin (); i != b->mIndices.end (); ++i) { float *v; poselem->baseVertexPointerToElement (vdata + *i * vsz, &v); Vector3 vv (v [0], v [1], v [2]); Real r = (vv - center).squaredLength (); for (vector<Vector3>::type::const_iterator e = extremityPoints.begin (); e != extremityPoints.end (); ++e) r += (*e - vv).squaredLength (); if (r > rating) { rating = r; best_vertex = vv; } } if (rating > 0) extremityPoints.push_back (best_vertex); } vbuf->unlock (); }
void Context::CreateNewCluster(const StreamVideo::VideoFrame* pvf_stop) { #if 0 odbgstream os; os << "\nCreateNewCluster: pvf_stop="; if (pvf_stop == 0) os << "NULL"; else os << pvf_stop->GetTimecode(); os << endl; #endif assert(m_pVideo); const StreamVideo::frames_t& vframes = m_pVideo->GetFrames(); assert(!vframes.empty()); clusters_t& cc = m_clusters; //const Cluster* const pPrevCluster = cc.empty() ? 0 : &cc.back(); cc.push_back(Cluster()); Cluster& c = cc.back(); c.m_pos = m_file.GetPosition(); { const StreamVideo::VideoFrame* const pvf = vframes.front(); assert(pvf); assert(pvf != pvf_stop); const ULONG vt = pvf->GetTimecode(); if ((m_pAudio == 0) || m_pAudio->GetFrames().empty()) c.m_timecode = vt; else { const StreamAudio::frames_t& aframes = m_pAudio->GetFrames(); const StreamAudio::AudioFrame* const paf = aframes.front(); const ULONG at = paf->GetTimecode(); c.m_timecode = (at <= vt) ? at : vt; } } m_file.WriteID4(0x1F43B675); //Cluster ID #if 0 m_file.Write4UInt(0); //patch size later, during close #elif 0 m_file.SetPosition(4, STREAM_SEEK_CUR); #else m_file.Serialize4UInt(0x1FFFFFFF); #endif m_file.WriteID1(0xE7); m_file.Write1UInt(4); m_file.Serialize4UInt(c.m_timecode); const __int64 off = c.m_pos - m_segment_pos - 12; assert(off >= 0); #if 0 //TODO: disable until we're sure this is allowed per the Webm std m_file.WriteID1(0xA7); //Position ID m_file.Write1UInt(8); //payload size is 8 bytes m_file.Serialize8UInt(off); //payload if (pPrevCluster) { const __int64 size = c.m_pos - pPrevCluster->m_pos; assert(size > 0); m_file.WriteID1(0xAB); //PrevSize ID m_file.Write1UInt(8); //payload size is 8 bytes m_file.Serialize8UInt(size); //payload } #endif ULONG cFrames = 0; LONG vtc_prev = -1; StreamVideo::frames_t& rframes = m_pVideo->GetKeyFrames(); while (!vframes.empty()) { typedef StreamVideo::frames_t::const_iterator video_iter_t; video_iter_t video_iter = vframes.begin(); const video_iter_t video_iter_end = vframes.end(); const StreamVideo::VideoFrame* const pvf = *video_iter++; assert(pvf); if (pvf == pvf_stop) break; const StreamVideo::VideoFrame* const pvf_next = (video_iter == video_iter_end) ? 0 : *video_iter; //const bool bLastVideo = (pvf_next == pvf_stop); const ULONG vt = pvf->GetTimecode(); assert(vt >= c.m_timecode); assert((pvf_stop == 0) || (vt < pvf_stop->GetTimecode())); if ((m_pAudio == 0) || m_pAudio->GetFrames().empty()) { if (!rframes.empty() && (pvf == rframes.front())) rframes.pop_front(); const ULONG vtc = pvf->GetTimecode(); WriteVideoFrame(c, cFrames, pvf_stop, pvf_next, vtc_prev); vtc_prev = vtc; continue; } const StreamAudio::frames_t& aframes = m_pAudio->GetFrames(); typedef StreamAudio::frames_t::const_iterator audio_iter_t; audio_iter_t i = aframes.begin(); const audio_iter_t j = aframes.end(); const StreamAudio::AudioFrame* const paf = *i++; //1st audio frame assert(paf); const ULONG at = paf->GetTimecode(); assert(at >= c.m_timecode); if (vt < at) { if (!rframes.empty() && (pvf == rframes.front())) rframes.pop_front(); const ULONG vtc = pvf->GetTimecode(); WriteVideoFrame(c, cFrames, pvf_stop, pvf_next, vtc_prev); vtc_prev = vtc; continue; } //At this point, we have (at least) one audio frame, //and (at least) one video frame. They could have an //equal timecode, or the audio might be smaller than //the video. Our desire is that the largest audio //frame less than the pvf_stop go on the next cluster, //which means any video frames greater than the audio //frame will also go on the next cluster. if (pvf_stop == 0) //means write all extant frames { //We know that this audio frame is less or equal to //the video frame, so write it now. WriteAudioFrame(c, cFrames); continue; } //At this point, we still have an audio frame and a //video frame, neigther of which has been written yet. const ULONG vt_stop = pvf_stop->GetTimecode(); if (at >= vt_stop) //weird break; if (i == j) //weird break; const StreamAudio::AudioFrame* const paf_stop = *i; //2nd audio frame assert(paf_stop); const ULONG at_stop = paf_stop->GetTimecode(); if (at_stop >= vt_stop) break; WriteAudioFrame(c, cFrames); //write 1st audio frame } const __int64 pos = m_file.GetPosition(); const __int64 size_ = pos - c.m_pos - 8; assert(size_ <= ULONG_MAX); const ULONG size = static_cast<ULONG>(size_); m_file.SetPosition(c.m_pos + 4); m_file.Write4UInt(size); m_file.SetPosition(pos); }
void Context::CreateNewClusterAudioOnly() { assert(m_pAudio); const StreamAudio::frames_t& aframes = m_pAudio->GetFrames(); assert(!aframes.empty()); const StreamAudio::AudioFrame* const paf_first = aframes.front(); assert(paf_first); const StreamAudio::AudioFrame& af_first = *paf_first; const ULONG af_first_time = af_first.GetTimecode(); clusters_t& cc = m_clusters; assert(cc.empty() || (af_first_time > cc.back().m_timecode)); //const Cluster* const pPrevCluster = cc.empty() ? 0 : &cc.back(); cc.push_back(Cluster()); Cluster& c = cc.back(); c.m_pos = m_file.GetPosition(); c.m_timecode = af_first_time; m_file.WriteID4(0x1F43B675); //Cluster ID #if 0 m_file.Write4UInt(0); //patch size later, during close #else m_file.SetPosition(4, STREAM_SEEK_CUR); #endif m_file.WriteID1(0xE7); m_file.Write1UInt(4); m_file.Serialize4UInt(c.m_timecode); const __int64 off = c.m_pos - m_segment_pos - 12; assert(off >= 0); #if 0 //disable this until we're sure it's allowed per the WebM std m_file.WriteID1(0xA7); //Position ID m_file.Write1UInt(8); //payload size is 8 bytes m_file.Serialize8UInt(off); //payload if (pPrevCluster) { const __int64 size = c.m_pos - pPrevCluster->m_pos; assert(size > 0); m_file.WriteID1(0xAB); //PrevSize ID m_file.Write1UInt(8); //payload size is 8 bytes m_file.Serialize8UInt(size); //payload } #endif ULONG cFrames = 0; //TODO: must write cues for audio while (!aframes.empty()) { const StreamAudio::AudioFrame* const paf = aframes.front(); assert(paf); const ULONG t = paf->GetTimecode(); assert(t >= c.m_timecode); const LONG dt = LONG(t) - LONG(c.m_timecode); if (dt > 1000) break; WriteAudioFrame(c, cFrames); } const __int64 pos = m_file.GetPosition(); const __int64 size_ = pos - c.m_pos - 8; assert(size_ <= ULONG_MAX); const ULONG size = static_cast<ULONG>(size_); m_file.SetPosition(c.m_pos + 4); m_file.Write4UInt(size); m_file.SetPosition(pos); }
FLODCluster FLODCluster::operator-(const FLODCluster& Other) const { FLODCluster Cluster(*this); Cluster.SubtractCluster(Other); return *this; }