void Foam::isoCutFace::calcSubFaceCentreAndArea() { const label nPoints = subFacePoints_.size(); // If the face is a triangle, do a direct calculation for efficiency // and to avoid round-off error-related problems if (nPoints == 3) { subFaceCentre_ = sum(subFacePoints_)/scalar(3); subFaceArea_ = 0.5 *( (subFacePoints_[1] - subFacePoints_[0]) ^(subFacePoints_[2] - subFacePoints_[0]) ); } else if (nPoints > 0) { vector sumN(Zero); scalar sumA(0.0); vector sumAc(Zero); const point fCentre = sum(subFacePoints_)/scalar(nPoints); for (label pi = 0; pi < nPoints; pi++) { const point& nextPoint = subFacePoints_[subFacePoints_.fcIndex(pi)]; vector c = subFacePoints_[pi] + nextPoint + fCentre; vector n = (nextPoint - subFacePoints_[pi])^(fCentre - subFacePoints_[pi]); scalar a = magSqr(n); sumN += n; sumA += a; sumAc += a*c; } // This is to deal with zero-area faces. Mark very small faces // to be detected in e.g., processorPolyPatch. if (sumA < ROOTVSMALL) { subFaceCentre_ = fCentre; subFaceArea_ = vector::zero; } else { subFaceCentre_ = (1.0/3.0)*sumAc/sumA; subFaceArea_ = 0.5*sumN; } } subFaceCentreAndAreaIsCalculated_ = true; }
int main() { fillArray(test_array, 4, 128); cache = cache_new(CACHE_SIZE_IN_BLOCKS, block_size, 1, CACHE_REPLACEMENTPOLICY_LRU); print_stats(sumA(test_array, 4, 128)); cache = cache_new(CACHE_SIZE_IN_BLOCKS, block_size, 1, CACHE_REPLACEMENTPOLICY_LRU); print_stats(sumB(test_array, 4, 128)); cache = cache_new(CACHE_SIZE_IN_BLOCKS, block_size, 1, CACHE_REPLACEMENTPOLICY_LRU); print_stats(sumC(test_array, 4, 128)); }
/// R-MAT Generator. The modes is based on the recursive descent into a 2x2 /// matrix [A,B; C, 1-(A+B+C)]. /// See: R-MAT Generator: A Recursive Model for Graph Mining. /// D. Chakrabarti, Y. Zhan and C. Faloutsos, in SIAM Data Mining 2004. /// URL: http://www.cs.cmu.edu/~deepay/mywww/papers/siam04.pdf PNGraph GenRMat(const int& Nodes, const int& Edges, const double& A, const double& B, const double& C, TRnd& Rnd) { PNGraph GraphPt = TNGraph::New(); TNGraph& Graph = *GraphPt; Graph.Reserve(Nodes, Edges); IAssert(A+B+C < 1.0); int rngX, rngY, offX, offY; int Depth=0, Collisions=0, Cnt=0, PctDone=0; const int EdgeGap = Edges / 100 + 1; // sum of parameters (probabilities) TVec<double> sumA(128, 0), sumAB(128, 0), sumAC(128, 0), sumABC(128, 0); // up to 2^128 vertices ~ 3.4e38 for (int i = 0; i < 128; i++) { const double a = A * (Rnd.GetUniDev() + 0.5); const double b = B * (Rnd.GetUniDev() + 0.5); const double c = C * (Rnd.GetUniDev() + 0.5); const double d = (1.0 - (A+B+C)) * (Rnd.GetUniDev() + 0.5); const double abcd = a+b+c+d; sumA.Add(a / abcd); sumAB.Add((a+b) / abcd); sumAC.Add((a+c) / abcd); sumABC.Add((a+b+c) / abcd); } // nodes for (int node = 0; node < Nodes; node++) { IAssert(Graph.AddNode(-1) == node); } // edges for (int edge = 0; edge < Edges; ) { rngX = Nodes; rngY = Nodes; offX = 0; offY = 0; Depth = 0; // recurse the matrix while (rngX > 1 || rngY > 1) { const double RndProb = Rnd.GetUniDev(); if (rngX>1 && rngY>1) { if (RndProb < sumA[Depth]) { rngX/=2; rngY/=2; } else if (RndProb < sumAB[Depth]) { offX+=rngX/2; rngX-=rngX/2; rngY/=2; } else if (RndProb < sumABC[Depth]) { offY+=rngY/2; rngX/=2; rngY-=rngY/2; } else { offX+=rngX/2; offY+=rngY/2; rngX-=rngX/2; rngY-=rngY/2; } } else if (rngX>1) { // row vector if (RndProb < sumAC[Depth]) { rngX/=2; rngY/=2; } else { offX+=rngX/2; rngX-=rngX/2; rngY/=2; } } else if (rngY>1) { // column vector if (RndProb < sumAB[Depth]) { rngX/=2; rngY/=2; } else { offY+=rngY/2; rngX/=2; rngY-=rngY/2; } } else { Fail; } Depth++; } // add edge const int NId1 = offX; const int NId2 = offY; if (NId1 != NId2 && ! Graph.IsEdge(NId1, NId2)) { Graph.AddEdge(NId1, NId2); if (++Cnt > EdgeGap) { Cnt=0; printf("\r %d%% edges", ++PctDone); } edge++; } else { Collisions++; } } printf("\r RMat: nodes:%d, edges:%d, Iterations:%d, Collisions:%d (%.1f%%).\n", Nodes, Edges, Edges+Collisions, Collisions, 100*Collisions/double(Edges+Collisions)); Graph.Defrag(); return GraphPt; }
// for obtaining a fast empirical distribution of mean differences between two sets of columns for randomly sampled 'clusters' SEXP emp_diffs(SEXP matrix_, SEXP nrow_, SEXP const colsA_, SEXP const colsB_, SEXP nsample_, SEXP niter_){ SEXP diffs = NULL; try{ srand(time(NULL)); PROTECT(nrow_ = AS_INTEGER(nrow_)); int const nrow = *INTEGER_POINTER(nrow_); UNPROTECT(1); PROTECT(colsA_); int * const colsA = INTEGER_POINTER(colsA_); int const ncolA = LENGTH(colsA_); PROTECT(colsB_); int * const colsB = INTEGER_POINTER(colsB_); int const ncolB = LENGTH(colsB_); PROTECT(nsample_ = AS_INTEGER(nsample_)); int const nsample = *INTEGER_POINTER(nsample_); UNPROTECT(1); PROTECT(niter_ = AS_INTEGER(niter_)); int const niter = *INTEGER_POINTER(niter_); UNPROTECT(1); PROTECT(matrix_ = AS_NUMERIC(matrix_)); const double * const matrix = NUMERIC_POINTER(matrix_); PROTECT(diffs = NEW_NUMERIC(niter)); double * const diffsp = NUMERIC_POINTER(diffs); t_float val(0), diff(0), sumA(0), sumB(0); int row(0), i(0), j(0); for(int iter(0); iter<niter; ++iter){ // compute mean over nsample rows for column indices colsA // R matrices are filled BY COLUMN diff=0; for(i=0; i<nsample; ++i){ row = rand() % nrow; sumA=0; sumB=0; for(j=0; j<ncolA; ++j){ // R is 1-indexed val = matrix[row+nrow*(colsA[j]-1)]; if(ISNA(val)) continue; sumA += val; } for(j=0; j<ncolB; ++j){ val = matrix[row+nrow*(colsB[j]-1)]; if(ISNA(val)) continue; sumB += val; } diff += sumB/ncolB - sumA/ncolA; } diffsp[iter] = diff/nsample; } UNPROTECT(1); // matrix_ UNPROTECT(1); // colsA_ UNPROTECT(1); // colsB_ UNPROTECT(1); // diffs } catch (const std::bad_alloc&) { Rf_error( "Memory overflow."); } catch(const std::exception& e){ Rf_error( e.what() ); } catch(const nan_error&){ Rf_error("NaN dissimilarity value."); } catch(...){ Rf_error( "C++ exception (unknown reason)." ); } return diffs; }
TEST(AggregateTests, PlainSumCountDistinctTest) { /* * SELECT SUM(a), COUNT(b), COUNT(DISTINCT b) from table */ const int tuple_count = TESTS_TUPLES_PER_TILEGROUP; // Create a table and wrap it in logical tiles auto &txn_manager = concurrency::TransactionManager::GetInstance(); auto txn = txn_manager.BeginTransaction(); auto txn_id = txn->GetTransactionId(); std::unique_ptr<storage::DataTable> data_table( ExecutorTestsUtil::CreateTable(tuple_count, false)); ExecutorTestsUtil::PopulateTable(txn, data_table.get(), 2 * tuple_count, false, true, true); txn_manager.CommitTransaction(); std::unique_ptr<executor::LogicalTile> source_logical_tile1( executor::LogicalTileFactory::WrapTileGroup(data_table->GetTileGroup(0), txn_id)); std::unique_ptr<executor::LogicalTile> source_logical_tile2( executor::LogicalTileFactory::WrapTileGroup(data_table->GetTileGroup(1), txn_id)); // (1-5) Setup plan node // 1) Set up group-by columns std::vector<oid_t> group_by_columns; // 2) Set up project info planner::ProjectInfo::DirectMapList direct_map_list = { {0, {1, 0}}, {1, {1, 1}}, {2, {1, 2}}}; auto proj_info = new planner::ProjectInfo(planner::ProjectInfo::TargetList(), std::move(direct_map_list)); // 3) Set up unique aggregates std::vector<planner::AggregatePlan::AggTerm> agg_terms; planner::AggregatePlan::AggTerm sumA(EXPRESSION_TYPE_AGGREGATE_SUM, expression::TupleValueFactory(0, 0), false); planner::AggregatePlan::AggTerm countB(EXPRESSION_TYPE_AGGREGATE_COUNT, expression::TupleValueFactory(0, 1), false); // Flag distinct planner::AggregatePlan::AggTerm countDistinctB( EXPRESSION_TYPE_AGGREGATE_COUNT, expression::TupleValueFactory(0, 1), true); // Flag distinct agg_terms.push_back(sumA); agg_terms.push_back(countB); agg_terms.push_back(countDistinctB); // 4) Set up predicate (empty) expression::AbstractExpression* predicate = nullptr; // 5) Create output table schema auto data_table_schema = data_table.get()->GetSchema(); std::vector<oid_t> set = {0, 1, 1}; std::vector<catalog::Column> columns; for (auto column_index : set) { columns.push_back(data_table_schema->GetColumn(column_index)); } auto output_table_schema = new catalog::Schema(columns); // OK) Create the plan node planner::AggregatePlan node(proj_info, predicate, std::move(agg_terms), std::move(group_by_columns), output_table_schema, AGGREGATE_TYPE_PLAIN); // Create and set up executor auto txn2 = txn_manager.BeginTransaction(); std::unique_ptr<executor::ExecutorContext> context( new executor::ExecutorContext(txn2)); executor::AggregateExecutor executor(&node, context.get()); MockExecutor child_executor; executor.AddChild(&child_executor); EXPECT_CALL(child_executor, DInit()).WillOnce(Return(true)); EXPECT_CALL(child_executor, DExecute()) .WillOnce(Return(true)) .WillOnce(Return(true)) .WillOnce(Return(false)); EXPECT_CALL(child_executor, GetOutput()) .WillOnce(Return(source_logical_tile1.release())) .WillOnce(Return(source_logical_tile2.release())); EXPECT_TRUE(executor.Init()); EXPECT_TRUE(executor.Execute()); txn_manager.CommitTransaction(); /* Verify result */ std::unique_ptr<executor::LogicalTile> result_tile(executor.GetOutput()); EXPECT_TRUE(result_tile.get() != nullptr); EXPECT_TRUE(result_tile->GetValue(0, 0) .OpEquals(ValueFactory::GetIntegerValue(50)) .IsTrue()); EXPECT_TRUE(result_tile->GetValue(0, 1) .OpEquals(ValueFactory::GetIntegerValue(10)) .IsTrue()); EXPECT_TRUE(result_tile->GetValue(0, 2) .OpLessThanOrEqual(ValueFactory::GetIntegerValue(3)) .IsTrue()); }