/** * @brief Convenience function to pass a single logical tile through an * executor which has only one child. * @param executor Executor to pass logical tile through. * @param source_logical_tile Logical tile to pass through executor. * @param check the value of logical tiles * * @return Pointer to processed logical tile. */ executor::LogicalTile *ExecutorTestsUtil::ExecuteTile( executor::AbstractExecutor *executor, executor::LogicalTile *source_logical_tile) { MockExecutor child_executor; executor->AddChild(&child_executor); // Uneventful init... EXPECT_CALL(child_executor, DInit()).WillOnce(Return(true)); EXPECT_TRUE(executor->Init()); // Where the main work takes place... EXPECT_CALL(child_executor, DExecute()) .WillOnce(Return(true)) .WillOnce(Return(false)); EXPECT_CALL(child_executor, GetOutput()) .WillOnce(Return(source_logical_tile)); EXPECT_TRUE(executor->Execute()); std::unique_ptr<executor::LogicalTile> result_logical_tile( executor->GetOutput()); EXPECT_THAT(result_logical_tile, NotNull()); EXPECT_THAT(executor->Execute(), false); return result_logical_tile.release(); }
TEST_F(ProjectionTests, BasicTest) { MockExecutor child_executor; EXPECT_CALL(child_executor, DInit()).WillOnce(Return(true)); EXPECT_CALL(child_executor, DExecute()) .WillOnce(Return(true)) .WillOnce(Return(false)); size_t tile_size = 5; // Create a table and wrap it in logical tile auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance(); auto txn = txn_manager.BeginTransaction(); std::unique_ptr<storage::DataTable> data_table( ExecutorTestsUtil::CreateTable(tile_size)); ExecutorTestsUtil::PopulateTable(txn, data_table.get(), tile_size, false, false, false); txn_manager.CommitTransaction(); std::unique_ptr<executor::LogicalTile> source_logical_tile1( executor::LogicalTileFactory::WrapTileGroup(data_table->GetTileGroup(0))); EXPECT_CALL(child_executor, GetOutput()) .WillOnce(Return(source_logical_tile1.release())); // Create the plan node planner::ProjectInfo::TargetList target_list; planner::ProjectInfo::DirectMapList direct_map_list; ///////////////////////////////////////////////////////// // PROJECTION 0 ///////////////////////////////////////////////////////// // construct schema std::vector<catalog::Column> columns; auto orig_schema = data_table.get()->GetSchema(); columns.push_back(orig_schema->GetColumn(0)); std::shared_ptr<const catalog::Schema> schema(new catalog::Schema(columns)); // direct map planner::ProjectInfo::DirectMap direct_map = std::make_pair(0, std::make_pair(0, 0)); direct_map_list.push_back(direct_map); std::unique_ptr<const planner::ProjectInfo> project_info( new planner::ProjectInfo(std::move(target_list), std::move(direct_map_list))); planner::ProjectionPlan node(std::move(project_info), schema); // Create and set up executor executor::ProjectionExecutor executor(&node, nullptr); executor.AddChild(&child_executor); RunTest(executor, 1); }
// Insert a logical tile into a table TEST_F(MutateTests, InsertTest) { auto &txn_manager = concurrency::OptimisticTxnManager::GetInstance(); // We are going to insert a tile group into a table in this test std::unique_ptr<storage::DataTable> source_data_table( ExecutorTestsUtil::CreateAndPopulateTable()); std::unique_ptr<storage::DataTable> dest_data_table( ExecutorTestsUtil::CreateTable()); const std::vector<storage::Tuple *> tuples; EXPECT_EQ(source_data_table->GetTileGroupCount(), 3); EXPECT_EQ(dest_data_table->GetTileGroupCount(), 1); auto txn = txn_manager.BeginTransaction(); std::unique_ptr<executor::ExecutorContext> context( new executor::ExecutorContext(txn)); planner::InsertPlan node(dest_data_table.get(), nullptr); executor::InsertExecutor executor(&node, context.get()); MockExecutor child_executor; executor.AddChild(&child_executor); // Uneventful init... EXPECT_CALL(child_executor, DInit()).WillOnce(Return(true)); // Will return one tile. EXPECT_CALL(child_executor, DExecute()) .WillOnce(Return(true)) .WillOnce(Return(false)); // Construct input logical tile auto physical_tile_group = source_data_table->GetTileGroup(0); auto tile_count = physical_tile_group->GetTileCount(); std::vector<std::shared_ptr<storage::Tile> > physical_tile_refs; for (oid_t tile_itr = 0; tile_itr < tile_count; tile_itr++) physical_tile_refs.push_back( physical_tile_group->GetTileReference(tile_itr)); std::unique_ptr<executor::LogicalTile> source_logical_tile( executor::LogicalTileFactory::WrapTiles(physical_tile_refs)); EXPECT_CALL(child_executor, GetOutput()) .WillOnce(Return(source_logical_tile.release())); EXPECT_TRUE(executor.Init()); EXPECT_TRUE(executor.Execute()); EXPECT_FALSE(executor.Execute()); txn_manager.CommitTransaction(); // We have inserted all the tuples in this logical tile EXPECT_EQ(dest_data_table->GetTileGroupCount(), 1); }
static int common_prolog(struct cmd_syndesc * as, struct fssync_state * state) { struct cmd_item *ti; VolumePackageOptions opts; #ifdef AFS_NT40_ENV if (afs_winsockInit() < 0) { Exit(1); } #endif VOptDefaults(debugUtility, &opts); if (VInitVolumePackage2(debugUtility, &opts)) { /* VInitVolumePackage2 can fail on e.g. partition attachment errors, * but we don't really care, since all we're doing is trying to use * FSSYNC */ fprintf(stderr, "errors encountered initializing volume package, but " "trying to continue anyway\n"); } DInit(1); if ((ti = as->parms[COMMON_PARMS_OFFSET].items)) { /* -reason */ state->reason = atoi(ti->data); } else { state->reason = FSYNC_WHATEVER; } if ((ti = as->parms[COMMON_PARMS_OFFSET+1].items)) { /* -programtype */ if (!strcmp(ti->data, "fileServer")) { programType = fileServer; } else if (!strcmp(ti->data, "volumeUtility")) { programType = volumeUtility; } else if (!strcmp(ti->data, "salvager")) { programType = salvager; } else if (!strcmp(ti->data, "salvageServer")) { programType = salvageServer; } else if (!strcmp(ti->data, "volumeServer")) { programType = volumeServer; } else if (!strcmp(ti->data, "volumeSalvager")) { programType = volumeSalvager; } else { programType = (ProgramType) atoi(ti->data); } } VConnectFS(); return 0; }
/** * @brief Initializes the executor. * * This function executes any initialization code common to all executors. * It recursively initializes all children of this executor in the execution * tree. It calls SubInit() which is implemented by the subclass. * * @return true on success, false otherwise. */ bool AbstractExecutor::Init() { bool status = false; for (auto child : children_) { status = child->Init(); if (status == false) { LOG_ERROR("Initialization failed in child executor with plan id : %s\n", child->node_->GetInfo().c_str()); return false; } } status = DInit(); if (status == false) { LOG_ERROR("Initialization failed in executor with plan id : %s\n", node_->GetInfo().c_str()); return false; } return true; }
int main(int argc, char **argv) { DInit(600); argc--; argv++; if (argc == 0) Usage(); switch ((*argv++)[1]) { case 'l': ListDir(*argv); break; case 'c': CheckDir(*argv); break; case 's': SalvageDir(*argv, argv[1]); break; case 'f': CRTest(*argv, argv[1], atoi(argv[2])); break; case 'd': DelTest(*argv, argv[1]); break; case 'r': LookupDir(*argv, argv[1]); break; case 'a': AddEntry(*argv, argv[1]); break; default: Usage(); } exit(0); }
static int common_prolog(struct cmd_syndesc * as, struct state * state) { register struct cmd_item *ti; #ifdef AFS_NT40_ENV if (afs_winsockInit() < 0) { Exit(1); } #endif VInitVolumePackage(debugUtility, 1, 1, DONT_CONNECT_FS, 0); DInit(1); if ((ti = as->parms[COMMON_PARMS_OFFSET].items)) { /* -reason */ state->reason = atoi(ti->data); } if ((ti = as->parms[COMMON_PARMS_OFFSET+1].items)) { /* -programtype */ if (!strcmp(ti->data, "fileServer")) { programType = fileServer; } else if (!strcmp(ti->data, "volumeUtility")) { programType = volumeUtility; } else if (!strcmp(ti->data, "salvager")) { programType = salvager; } else if (!strcmp(ti->data, "salvageServer")) { programType = salvageServer; } else { programType = (ProgramType) atoi(ti->data); } } VConnectFS(); return 0; }
bool CALL HGE_Impl::System_Initiate() { OSVERSIONINFO os_ver; SYSTEMTIME tm; MEMORYSTATUS mem_st; WNDCLASS winclass; int width, height; // Log system info System_Log("HGE Started..\n"); System_Log("HGE version: %X.%X", HGE_VERSION>>8, HGE_VERSION & 0xFF); GetLocalTime(&tm); System_Log("Date: %02d.%02d.%d, %02d:%02d:%02d\n", tm.wDay, tm.wMonth, tm.wYear, tm.wHour, tm.wMinute, tm.wSecond); System_Log("Application: %s",szWinTitle); os_ver.dwOSVersionInfoSize=sizeof(os_ver); GetVersionEx(&os_ver); System_Log("OS: Windows %ld.%ld.%ld",os_ver.dwMajorVersion,os_ver.dwMinorVersion,os_ver.dwBuildNumber); GlobalMemoryStatus(&mem_st); System_Log("Memory: %ldK total, %ldK free\n",mem_st.dwTotalPhys/1024L,mem_st.dwAvailPhys/1024L); // Register window class winclass.style = CS_DBLCLKS | CS_OWNDC | CS_HREDRAW | CS_VREDRAW; winclass.lpfnWndProc = WindowProc; winclass.cbClsExtra = 0; winclass.cbWndExtra = 0; winclass.hInstance = hInstance; winclass.hCursor = LoadCursor(NULL, IDC_ARROW); winclass.hbrBackground = (HBRUSH)GetStockObject(BLACK_BRUSH); winclass.lpszMenuName = NULL; winclass.lpszClassName = WINDOW_CLASS_NAME; if(szIcon) winclass.hIcon = LoadIcon(hInstance, szIcon); else winclass.hIcon = LoadIcon(NULL, IDI_APPLICATION); if (!RegisterClass(&winclass)) { _PostError("Can't register window class"); return false; } // Create window width=nScreenWidth + GetSystemMetrics(SM_CXFIXEDFRAME)*2; height=nScreenHeight + GetSystemMetrics(SM_CYFIXEDFRAME)*2 + GetSystemMetrics(SM_CYCAPTION); rectW.left=(GetSystemMetrics(SM_CXSCREEN)-width)/2; rectW.top=(GetSystemMetrics(SM_CYSCREEN)-height)/2; rectW.right=rectW.left+width; rectW.bottom=rectW.top+height; styleW=WS_POPUP|WS_CAPTION|WS_SYSMENU|WS_MINIMIZEBOX|WS_VISIBLE; //WS_OVERLAPPED | WS_SYSMENU | WS_MINIMIZEBOX; rectFS.left=0; rectFS.top=0; rectFS.right=nScreenWidth; rectFS.bottom=nScreenHeight; styleFS=WS_POPUP|WS_VISIBLE; //WS_POPUP if(hwndParent) { rectW.left=0; rectW.top=0; rectW.right=nScreenWidth; rectW.bottom=nScreenHeight; styleW=WS_CHILD|WS_VISIBLE; bWindowed=true; } if(bWindowed) hwnd = CreateWindowEx(0, WINDOW_CLASS_NAME, szWinTitle, styleW, rectW.left, rectW.top, rectW.right-rectW.left, rectW.bottom-rectW.top, hwndParent, NULL, hInstance, NULL); else hwnd = CreateWindowEx(WS_EX_TOPMOST, WINDOW_CLASS_NAME, szWinTitle, styleFS, 0, 0, 0, 0, NULL, NULL, hInstance, NULL); if (!hwnd) { _PostError("Can't create window"); return false; } ShowWindow(hwnd, SW_SHOW); // Init subsystems timeBeginPeriod(1); Random_Seed(); _InitPowerStatus(); _InputInit(); if(!_GfxInit()) { System_Shutdown(); return false; } if(!_SoundInit()) { System_Shutdown(); return false; } System_Log("Init done.\n"); fTime=0.0f; t0=t0fps=timeGetTime(); dt=cfps=0; nFPS=0; // Show splash #ifdef DEMO bool (*func)(); bool (*rfunc)(); HWND hwndTmp; if(pHGE->bDMO) { Sleep(200); func=(bool(*)())pHGE->System_GetStateFunc(HGE_FRAMEFUNC); rfunc=(bool(*)())pHGE->System_GetStateFunc(HGE_RENDERFUNC); hwndTmp=hwndParent; hwndParent=0; pHGE->System_SetStateFunc(HGE_FRAMEFUNC, DFrame); pHGE->System_SetStateFunc(HGE_RENDERFUNC, 0); DInit(); pHGE->System_Start(); DDone(); hwndParent=hwndTmp; pHGE->System_SetStateFunc(HGE_FRAMEFUNC, func); pHGE->System_SetStateFunc(HGE_RENDERFUNC, rfunc); } #endif // Done return true; }
static int handleit(struct cmd_syndesc *as, void *arock) { register struct cmd_item *ti; int err = 0; int volumeId = 0; char *partName = 0; char *fileName = NULL; struct DiskPartition64 *partP = NULL; char name1[128]; char tmpPartName[20]; int fromtime = 0; afs_int32 code; #ifndef AFS_NT40_ENV #if 0 if (geteuid() != 0) { fprintf(stderr, "voldump must be run as root; sorry\n"); exit(1); } #endif #endif if ((ti = as->parms[0].items)) partName = ti->data; if ((ti = as->parms[1].items)) volumeId = atoi(ti->data); if ((ti = as->parms[2].items)) fileName = ti->data; if ((ti = as->parms[3].items)) verbose = 1; if (as->parms[4].items && strcmp(as->parms[4].items->data, "0")) { code = ktime_DateToInt32(as->parms[4].items->data, &fromtime); if (code) { fprintf(STDERR, "failed to parse date '%s' (error=%d))\n", as->parms[4].items->data, code); return code; } } DInit(10); err = VAttachPartitions(); if (err) { fprintf(stderr, "%d partitions had errors during attach.\n", err); } if (partName) { if (strlen(partName) == 1) { if (partName[0] >= 'a' && partName[0] <= 'z') { strcpy(tmpPartName, "/vicepa"); tmpPartName[6] = partName[0]; partP = VGetPartition(tmpPartName, 0); } } else { partP = VGetPartition(partName, 0); } if (!partP) { fprintf(stderr, "%s is not an AFS partition name on this server.\n", partName); exit(1); } } if (!volumeId) { fprintf(stderr, "Must specify volume id!\n"); exit(1); } if (!partP) { fprintf(stderr, "must specify vice partition.\n"); exit(1); } (void)afs_snprintf(name1, sizeof name1, VFORMAT, (unsigned long)volumeId); HandleVolume(partP, name1, fileName, fromtime); return 0; }
void ExecuteJoinTest(PlanNodeType join_algorithm, PelotonJoinType join_type, oid_t join_test_type) { //===--------------------------------------------------------------------===// // Mock table scan executors //===--------------------------------------------------------------------===// MockExecutor left_table_scan_executor, right_table_scan_executor; // Create a table and wrap it in logical tile size_t tile_group_size = TESTS_TUPLES_PER_TILEGROUP; size_t left_table_tile_group_count = 3; size_t right_table_tile_group_count = 2; auto &txn_manager = concurrency::TransactionManager::GetInstance(); auto txn = txn_manager.BeginTransaction(); auto txn_id = txn->GetTransactionId(); // Left table has 3 tile groups std::unique_ptr<storage::DataTable> left_table( ExecutorTestsUtil::CreateTable(tile_group_size)); ExecutorTestsUtil::PopulateTable( txn, left_table.get(), tile_group_size * left_table_tile_group_count, false, false, false); // Right table has 2 tile groups std::unique_ptr<storage::DataTable> right_table( ExecutorTestsUtil::CreateTable(tile_group_size)); ExecutorTestsUtil::PopulateTable( txn, right_table.get(), tile_group_size * right_table_tile_group_count, false, false, false); txn_manager.CommitTransaction(); //std::cout << (*left_table); //std::cout << (*right_table); // Wrap the input tables with logical tiles std::unique_ptr<executor::LogicalTile> left_table_logical_tile1( executor::LogicalTileFactory::WrapTileGroup(left_table->GetTileGroup(0), txn_id)); std::unique_ptr<executor::LogicalTile> left_table_logical_tile2( executor::LogicalTileFactory::WrapTileGroup(left_table->GetTileGroup(1), txn_id)); std::unique_ptr<executor::LogicalTile> left_table_logical_tile3( executor::LogicalTileFactory::WrapTileGroup(left_table->GetTileGroup(2), txn_id)); std::unique_ptr<executor::LogicalTile> right_table_logical_tile1( executor::LogicalTileFactory::WrapTileGroup( right_table->GetTileGroup(0), txn_id)); std::unique_ptr<executor::LogicalTile> right_table_logical_tile2( executor::LogicalTileFactory::WrapTileGroup( right_table->GetTileGroup(1), txn_id)); // Left scan executor returns logical tiles from the left table EXPECT_CALL(left_table_scan_executor, DInit()).WillOnce(Return(true)); //===--------------------------------------------------------------------===// // Setup left table //===--------------------------------------------------------------------===// if(join_test_type == BASIC_TEST) { EXPECT_CALL(left_table_scan_executor, DExecute()) .WillOnce(Return(true)) .WillOnce(Return(true)) .WillOnce(Return(true)) .WillOnce(Return(false)); EXPECT_CALL(left_table_scan_executor, GetOutput()) .WillOnce(Return(left_table_logical_tile1.release())) .WillOnce(Return(left_table_logical_tile2.release())) .WillOnce(Return(left_table_logical_tile3.release())); } // Right scan executor returns logical tiles from the right table EXPECT_CALL(right_table_scan_executor, DInit()).WillOnce(Return(true)); //===--------------------------------------------------------------------===// // Setup right table //===--------------------------------------------------------------------===// if(join_test_type == BASIC_TEST) { EXPECT_CALL(right_table_scan_executor, DExecute()) .WillOnce(Return(true)) .WillOnce(Return(true)) .WillOnce(Return(false)); EXPECT_CALL(right_table_scan_executor, GetOutput()) .WillOnce(Return(right_table_logical_tile1.release())) .WillOnce(Return(right_table_logical_tile2.release())); } //===--------------------------------------------------------------------===// // Setup join plan nodes and executors and run them //===--------------------------------------------------------------------===// oid_t result_tuple_count = 0; oid_t tuples_with_null = 0; auto projection = JoinTestsUtil::CreateProjection(); // Construct predicate expression::AbstractExpression *predicate = JoinTestsUtil::CreateJoinPredicate(); // Differ based on join algorithm switch (join_algorithm) { case PLAN_NODE_TYPE_NESTLOOP: { // Create nested loop join plan node. planner::NestedLoopJoinPlan nested_loop_join_node(join_type, predicate, projection); // Run the nested loop join executor executor::NestedLoopJoinExecutor nested_loop_join_executor( &nested_loop_join_node, nullptr); // Construct the executor tree nested_loop_join_executor.AddChild(&left_table_scan_executor); nested_loop_join_executor.AddChild(&right_table_scan_executor); // Run the nested loop join executor EXPECT_TRUE(nested_loop_join_executor.Init()); while (nested_loop_join_executor.Execute() == true) { std::unique_ptr<executor::LogicalTile> result_logical_tile( nested_loop_join_executor.GetOutput()); if (result_logical_tile != nullptr) { result_tuple_count += result_logical_tile->GetTupleCount(); tuples_with_null += CountTuplesWithNullFields(result_logical_tile.get()); //std::cout << (*result_logical_tile); } } } break; case PLAN_NODE_TYPE_MERGEJOIN: { // Create join clauses std::vector<planner::MergeJoinPlan::JoinClause> join_clauses; join_clauses = CreateJoinClauses(); // Create merge join plan node planner::MergeJoinPlan merge_join_node(join_type, predicate, projection, join_clauses); // Construct the merge join executor executor::MergeJoinExecutor merge_join_executor(&merge_join_node, nullptr); // Construct the executor tree merge_join_executor.AddChild(&left_table_scan_executor); merge_join_executor.AddChild(&right_table_scan_executor); // Run the merge join executor EXPECT_TRUE(merge_join_executor.Init()); while (merge_join_executor.Execute() == true) { std::unique_ptr<executor::LogicalTile> result_logical_tile( merge_join_executor.GetOutput()); if (result_logical_tile != nullptr) { result_tuple_count += result_logical_tile->GetTupleCount(); tuples_with_null += CountTuplesWithNullFields(result_logical_tile.get()); //std::cout << (*result_logical_tile); } } } break; case PLAN_NODE_TYPE_HASHJOIN: { // Create hash plan node expression::AbstractExpression *right_table_attr_1 = new expression::TupleValueExpression(1, 1); std::vector<std::unique_ptr<const expression::AbstractExpression> > hash_keys; hash_keys.emplace_back(right_table_attr_1); // Create hash plan node planner::HashPlan hash_plan_node(hash_keys); // Construct the hash executor executor::HashExecutor hash_executor(&hash_plan_node, nullptr); // Create hash join plan node. planner::HashJoinPlan hash_join_plan_node(join_type, predicate, projection); // Construct the hash join executor executor::HashJoinExecutor hash_join_executor(&hash_join_plan_node, nullptr); // Construct the executor tree hash_join_executor.AddChild(&left_table_scan_executor); hash_join_executor.AddChild(&hash_executor); hash_executor.AddChild(&right_table_scan_executor); // Run the hash_join_executor EXPECT_TRUE(hash_join_executor.Init()); while (hash_join_executor.Execute() == true) { std::unique_ptr<executor::LogicalTile> result_logical_tile( hash_join_executor.GetOutput()); if (result_logical_tile != nullptr) { result_tuple_count += result_logical_tile->GetTupleCount(); tuples_with_null += CountTuplesWithNullFields(result_logical_tile.get()); // std::cout << (*result_logical_tile); } } } break; default: throw Exception("Unsupported join algorithm : " + std::to_string(join_algorithm)); break; } //===--------------------------------------------------------------------===// // Execute test //===--------------------------------------------------------------------===// if(join_test_type == BASIC_TEST) { // Check output switch (join_type) { case JOIN_TYPE_INNER: EXPECT_EQ(result_tuple_count, 10); EXPECT_EQ(tuples_with_null, 0); break; case JOIN_TYPE_LEFT: EXPECT_EQ(result_tuple_count, 15); EXPECT_EQ(tuples_with_null, 5); break; case JOIN_TYPE_RIGHT: EXPECT_EQ(result_tuple_count, 10); EXPECT_EQ(tuples_with_null, 0); break; case JOIN_TYPE_OUTER: EXPECT_EQ(result_tuple_count, 15); EXPECT_EQ(tuples_with_null, 5); break; default: throw Exception("Unsupported join type : " + std::to_string(join_type)); break; } } }
static int handleit(struct cmd_syndesc *as, void *arock) { struct CmdLine *cmdline = (struct CmdLine*)arock; struct cmd_item *ti; char pname[100], *temp; afs_int32 seenpart = 0, seenvol = 0; VolumeId vid = 0; ProgramType pt; #ifdef FAST_RESTART afs_int32 seenany = 0; #endif char *filename = NULL; struct logOptions logopts; VolumePackageOptions opts; struct DiskPartition64 *partP; memset(&logopts, 0, sizeof(logopts)); #ifdef AFS_SGI_VNODE_GLUE if (afs_init_kernel_config(-1) < 0) { printf ("Can't determine NUMA configuration, not starting salvager.\n"); exit(1); } #endif #ifdef FAST_RESTART { afs_int32 i; for (i = 0; i < CMD_MAXPARMS; i++) { if (as->parms[i].items) { seenany = 1; break; } } } if (!seenany) { printf ("Exiting immediately without salvage. " "Look into the FileLog to find volumes which really need to be salvaged!\n"); Exit(0); } #endif /* FAST_RESTART */ if ((ti = as->parms[0].items)) { /* -partition */ seenpart = 1; strncpy(pname, ti->data, 100); } if ((ti = as->parms[1].items)) { /* -volumeid */ char *end; unsigned long vid_l; if (!seenpart) { printf ("You must also specify '-partition' option with the '-volumeid' option\n"); exit(-1); } seenvol = 1; vid_l = strtoul(ti->data, &end, 10); if (vid_l >= MAX_AFS_UINT32 || vid_l == ULONG_MAX || *end != '\0') { Log("salvage: invalid volume id specified; salvage aborted\n"); Exit(1); } vid = (VolumeId)vid_l; } if (as->parms[2].items) /* -debug */ debug = 1; if (as->parms[3].items) /* -nowrite */ Testing = 1; if (as->parms[4].items) /* -inodes */ ListInodeOption = 1; if (as->parms[5].items || as->parms[21].items) /* -force, -f */ ForceSalvage = 1; if (as->parms[6].items) /* -oktozap */ OKToZap = 1; if (as->parms[7].items) /* -rootinodes */ ShowRootFiles = 1; if (as->parms[8].items) /* -RebuildDirs */ RebuildDirs = 1; if (as->parms[9].items) /* -ForceReads */ forceR = 1; if ((ti = as->parms[10].items)) { /* -Parallel # */ temp = ti->data; if (strncmp(temp, "all", 3) == 0) { PartsPerDisk = 1; temp += 3; } if (strlen(temp) != 0) { Parallel = atoi(temp); if (Parallel < 1) Parallel = 1; if (Parallel > MAXPARALLEL) { printf("Setting parallel salvages to maximum of %d \n", MAXPARALLEL); Parallel = MAXPARALLEL; } } } if ((ti = as->parms[11].items)) { /* -tmpdir */ DIR *dirp; tmpdir = ti->data; dirp = opendir(tmpdir); if (!dirp) { printf ("Can't open temporary placeholder dir %s; using current partition \n", tmpdir); tmpdir = NULL; } else closedir(dirp); } if ((ti = as->parms[12].items)) /* -showlog */ ShowLog = 1; if ((ti = as->parms[13].items)) { /* -showsuid */ Testing = 1; ShowSuid = 1; Showmode = 1; } if ((ti = as->parms[14].items)) { /* -showmounts */ Testing = 1; Showmode = 1; ShowMounts = 1; } if ((ti = as->parms[15].items)) { /* -orphans */ if (Testing) orphans = ORPH_IGNORE; else if (strcmp(ti->data, "remove") == 0 || strcmp(ti->data, "r") == 0) orphans = ORPH_REMOVE; else if (strcmp(ti->data, "attach") == 0 || strcmp(ti->data, "a") == 0) orphans = ORPH_ATTACH; } if ((ti = as->parms[16].items)) { /* -syslog */ if (ShowLog) { fprintf(stderr, "Invalid options: -syslog and -showlog are exclusive.\n"); Exit(1); } if ((ti = as->parms[18].items)) { /* -datelogs */ fprintf(stderr, "Invalid option: -syslog and -datelogs are exclusive.\n"); Exit(1); } #ifndef HAVE_SYSLOG /* Do not silently ignore. */ fprintf(stderr, "Invalid option: -syslog is not available on this platform.\n"); Exit(1); #else logopts.lopt_dest = logDest_syslog; logopts.lopt_tag = "salvager"; if ((ti = as->parms[17].items)) /* -syslogfacility */ logopts.lopt_facility = atoi(ti->data); else logopts.lopt_facility = LOG_DAEMON; /* default value */ #endif } else { logopts.lopt_dest = logDest_file; if ((ti = as->parms[18].items)) { /* -datelogs */ int code = TimeStampLogFile(&filename); if (code != 0) { fprintf(stderr, "Failed to format log file name for -datelogs; code=%d\n", code); Exit(code); } logopts.lopt_filename = filename; } else { logopts.lopt_filename = AFSDIR_SERVER_SLVGLOG_FILEPATH; } } OpenLog(&logopts); SetupLogSignals(); free(filename); /* Free string created by -datelogs, if one. */ Log("%s\n", cml_version_number); LogCommandLine(cmdline->argc, cmdline->argv, "SALVAGER", SalvageVersion, "STARTING AFS", Log); #ifdef FAST_RESTART if (ti = as->parms[19].items) { /* -DontSalvage */ char *msg = "Exiting immediately without salvage. Look into the FileLog to find volumes which really need to be salvaged!"; Log("%s\n", msg); printf("%s\n", msg); Exit(0); } #endif /* Note: if seenvol we initialize this as a standard volume utility: this has the * implication that the file server may be running; negotations have to be made with * the file server in this case to take the read write volume and associated read-only * volumes off line before salvaging */ #ifdef AFS_NT40_ENV if (seenvol) { if (afs_winsockInit() < 0) { ReportErrorEventAlt(AFSEVT_SVR_WINSOCK_INIT_FAILED, 0, AFSDIR_SALVAGER_FILE, 0); Log("Failed to initailize winsock, exiting.\n"); Exit(1); } } #endif if (seenvol) { pt = volumeSalvager; } else { pt = salvager; } VOptDefaults(pt, &opts); if (VInitVolumePackage2(pt, &opts)) { Log("errors encountered initializing volume package; salvage aborted\n"); Exit(1); } /* defer lock until we init volume package */ if (get_salvage_lock) { if (seenvol && AskDAFS()) /* support forceDAFS */ ObtainSharedSalvageLock(); else ObtainSalvageLock(); } /* * Ok to defer this as Exit will clean up and no real work is done * init'ing volume package */ if (seenvol) { char *msg = NULL; #ifdef AFS_DEMAND_ATTACH_FS if (!AskDAFS()) { msg = "The DAFS dasalvager cannot be run with a non-DAFS fileserver. Please use 'salvager'."; } if (!msg && !as->parms[20].items) { msg = "The standalone salvager cannot be run concurrently with a Demand Attach Fileserver. Please use 'salvageserver -client <partition> <volume id>' to manually schedule volume salvages with the salvageserver (new versions of 'bos salvage' automatically do this for you). Or, if you insist on using the standalone salvager, add the -forceDAFS flag to your salvager command line."; } #else if (AskDAFS()) { msg = "The non-DAFS salvager cannot be run with a Demand Attach Fileserver. Please use 'salvageserver -client <partition> <volume id>' to manually schedule volume salvages with the salvageserver (new versions of 'bos salvage' automatically do this for you). Or, if you insist on using the standalone salvager, run dasalvager with the -forceDAFS flag."; } #endif if (msg) { Log("%s\n", msg); printf("%s\n", msg); Exit(1); } } DInit(10); #ifdef AFS_NT40_ENV if (myjob.cj_number != NOT_CHILD) { if (!seenpart) { seenpart = 1; (void)strcpy(pname, myjob.cj_part); } } #endif if (seenpart == 0) { for (partP = DiskPartitionList; partP; partP = partP->next) { SalvageFileSysParallel(partP); } SalvageFileSysParallel(0); } else { partP = VGetPartition(pname, 0); if (!partP) { Log("salvage: Unknown or unmounted partition %s; salvage aborted\n", pname); Exit(1); } if (!seenvol) SalvageFileSys(partP, 0); else { /* Salvage individual volume */ SalvageFileSys(partP, vid); } } return (0); }
TEST(AggregateTests, HashDistinctTest) { /* * SELECT d, a, b, c FROM table GROUP BY a, b, c, d; */ const int tuple_count = TESTS_TUPLES_PER_TILEGROUP; // Create a table and wrap it in logical tiles auto &txn_manager = concurrency::TransactionManager::GetInstance(); auto txn = txn_manager.BeginTransaction(); auto txn_id = txn->GetTransactionId(); std::unique_ptr<storage::DataTable> data_table( ExecutorTestsUtil::CreateTable(tuple_count, false)); ExecutorTestsUtil::PopulateTable(txn, data_table.get(), 2 * tuple_count, false, true, true); // let it be random txn_manager.CommitTransaction(); std::unique_ptr<executor::LogicalTile> source_logical_tile1( executor::LogicalTileFactory::WrapTileGroup(data_table->GetTileGroup(0), txn_id)); std::unique_ptr<executor::LogicalTile> source_logical_tile2( executor::LogicalTileFactory::WrapTileGroup(data_table->GetTileGroup(1), txn_id)); // (1-5) Setup plan node // 1) Set up group-by columns std::vector<oid_t> group_by_columns = {0, 1, 2, 3}; // 2) Set up project info planner::ProjectInfo::DirectMapList direct_map_list = { {0, {0, 3}}, {1, {0, 0}}, {2, {0, 1}}, {3, {0, 2}}}; auto proj_info = new planner::ProjectInfo(planner::ProjectInfo::TargetList(), std::move(direct_map_list)); // 3) Set up unique aggregates (empty) std::vector<planner::AggregatePlan::AggTerm> agg_terms; // 4) Set up predicate (empty) expression::AbstractExpression* predicate = nullptr; // 5) Create output table schema auto data_table_schema = data_table.get()->GetSchema(); std::vector<oid_t> set = {3, 0, 1, 2}; std::vector<catalog::Column> columns; for (auto column_index : set) { columns.push_back(data_table_schema->GetColumn(column_index)); } auto output_table_schema = new catalog::Schema(columns); // OK) Create the plan node planner::AggregatePlan node(proj_info, predicate, std::move(agg_terms), std::move(group_by_columns), output_table_schema, AGGREGATE_TYPE_HASH); // Create and set up executor auto txn2 = txn_manager.BeginTransaction(); std::unique_ptr<executor::ExecutorContext> context( new executor::ExecutorContext(txn2)); executor::AggregateExecutor executor(&node, context.get()); MockExecutor child_executor; executor.AddChild(&child_executor); EXPECT_CALL(child_executor, DInit()).WillOnce(Return(true)); EXPECT_CALL(child_executor, DExecute()) .WillOnce(Return(true)) .WillOnce(Return(true)) .WillOnce(Return(false)); EXPECT_CALL(child_executor, GetOutput()) .WillOnce(Return(source_logical_tile1.release())) .WillOnce(Return(source_logical_tile2.release())); EXPECT_TRUE(executor.Init()); EXPECT_TRUE(executor.Execute()); txn_manager.CommitTransaction(); /* Verify result */ std::unique_ptr<executor::LogicalTile> result_tile(executor.GetOutput()); EXPECT_TRUE(result_tile.get() != nullptr); for (auto tuple_id : *result_tile) { int colA = ValuePeeker::PeekAsInteger(result_tile->GetValue(tuple_id, 1)); (void)colA; } }
int main(int argc, char **argv) { afs_int32 code; struct rx_securityClass **securityClasses; afs_int32 numClasses; struct rx_service *service; struct ktc_encryptionKey tkey; int rxpackets = 100; int rxJumbograms = 0; /* default is to send and receive jumbograms. */ int rxMaxMTU = -1; int bufSize = 0; /* temp variable to read in udp socket buf size */ afs_uint32 host = ntohl(INADDR_ANY); char *auditFileName = NULL; VolumePackageOptions opts; #ifdef AFS_AIX32_ENV /* * The following signal action for AIX is necessary so that in case of a * crash (i.e. core is generated) we can include the user's data section * in the core dump. Unfortunately, by default, only a partial core is * generated which, in many cases, isn't too useful. */ struct sigaction nsa; sigemptyset(&nsa.sa_mask); nsa.sa_handler = SIG_DFL; nsa.sa_flags = SA_FULLDUMP; sigaction(SIGABRT, &nsa, NULL); sigaction(SIGSEGV, &nsa, NULL); #endif osi_audit_init(); osi_audit(VS_StartEvent, 0, AUD_END); /* Initialize dirpaths */ if (!(initAFSDirPath() & AFSDIR_SERVER_PATHS_OK)) { #ifdef AFS_NT40_ENV ReportErrorEventAlt(AFSEVT_SVR_NO_INSTALL_DIR, 0, argv[0], 0); #endif fprintf(stderr, "%s: Unable to obtain AFS server directory.\n", argv[0]); exit(2); } TTsleep = TTrun = 0; /* parse cmd line */ for (code = 1; code < argc; code++) { if (strcmp(argv[code], "-log") == 0) { /* set extra logging flag */ DoLogging = 1; } else if (strcmp(argv[code], "-help") == 0) { goto usage; } else if (strcmp(argv[code], "-rxbind") == 0) { rxBind = 1; } else if (strcmp(argv[code], "-allow-dotted-principals") == 0) { rxkadDisableDotCheck = 1; } else if (strcmp(argv[code], "-d") == 0) { if ((code + 1) >= argc) { fprintf(stderr, "missing argument for -d\n"); return -1; } debuglevel = atoi(argv[++code]); LogLevel = debuglevel; } else if (strcmp(argv[code], "-p") == 0) { lwps = atoi(argv[++code]); if (lwps > MAXLWP) { printf("Warning: '-p %d' is too big; using %d instead\n", lwps, MAXLWP); lwps = MAXLWP; } } else if (strcmp(argv[code], "-auditlog") == 0) { auditFileName = argv[++code]; } else if (strcmp(argv[code], "-audit-interface") == 0) { char *interface = argv[++code]; if (osi_audit_interface(interface)) { printf("Invalid audit interface '%s'\n", interface); return -1; } } else if (strcmp(argv[code], "-nojumbo") == 0) { rxJumbograms = 0; } else if (strcmp(argv[code], "-jumbo") == 0) { rxJumbograms = 1; } else if (!strcmp(argv[code], "-rxmaxmtu")) { if ((code + 1) >= argc) { fprintf(stderr, "missing argument for -rxmaxmtu\n"); exit(1); } rxMaxMTU = atoi(argv[++code]); if ((rxMaxMTU < RX_MIN_PACKET_SIZE) || (rxMaxMTU > RX_MAX_PACKET_DATA_SIZE)) { printf("rxMaxMTU %d invalid; must be between %d-%" AFS_SIZET_FMT "\n", rxMaxMTU, RX_MIN_PACKET_SIZE, RX_MAX_PACKET_DATA_SIZE); exit(1); } } else if (strcmp(argv[code], "-sleep") == 0) { sscanf(argv[++code], "%d/%d", &TTsleep, &TTrun); if ((TTsleep < 0) || (TTrun <= 0)) { printf("Warning: '-sleep %d/%d' is incorrect; ignoring\n", TTsleep, TTrun); TTsleep = TTrun = 0; } } else if (strcmp(argv[code], "-mbpersleep") == 0) { sscanf(argv[++code], "%d", &MBperSecSleep); if (MBperSecSleep < 0) MBperSecSleep = 0; } else if (strcmp(argv[code], "-udpsize") == 0) { if ((code + 1) >= argc) { printf("You have to specify -udpsize <integer value>\n"); exit(1); } sscanf(argv[++code], "%d", &bufSize); if (bufSize < rx_GetMinUdpBufSize()) printf ("Warning:udpsize %d is less than minimum %d; ignoring\n", bufSize, rx_GetMinUdpBufSize()); else udpBufSize = bufSize; } else if (strcmp(argv[code], "-enable_peer_stats") == 0) { rx_enablePeerRPCStats(); } else if (strcmp(argv[code], "-enable_process_stats") == 0) { rx_enableProcessRPCStats(); } else if (strcmp(argv[code], "-preserve-vol-stats") == 0) { DoPreserveVolumeStats = 1; } else if (strcmp(argv[code], "-sync") == 0) { if ((code + 1) >= argc) { printf("You have to specify -sync <sync_behavior>\n"); exit(1); } ih_PkgDefaults(); if (ih_SetSyncBehavior(argv[++code])) { printf("Invalid -sync value %s\n", argv[code]); exit(1); } } #ifndef AFS_NT40_ENV else if (strcmp(argv[code], "-syslog") == 0) { /* set syslog logging flag */ serverLogSyslog = 1; } else if (strncmp(argv[code], "-syslog=", 8) == 0) { serverLogSyslog = 1; serverLogSyslogFacility = atoi(argv[code] + 8); } #endif #ifdef AFS_PTHREAD_ENV else if (strcmp(argv[code], "-convert") == 0) convertToOsd = 1; else if (strcmp(argv[code], "-libafsosd") == 0) libafsosd = 1; #endif else { printf("volserver: unrecognized flag '%s'\n", argv[code]); usage: #ifndef AFS_NT40_ENV printf("Usage: volserver [-log] [-p <number of processes>] " "[-auditlog <log path>] [-d <debug level>] " "[-nojumbo] [-jumbo] [-rxmaxmtu <bytes>] [-rxbind] [-allow-dotted-principals] " "[-udpsize <size of socket buffer in bytes>] " "[-syslog[=FACILITY]] -mbpersleep <MB / 1 sec sleep>" "%s" "[-enable_peer_stats] [-enable_process_stats] " "[-sync <always | delayed | onclose | never>] " #ifdef AFS_PTHREAD_ENV , libafsosd ? "[-convert] ":"", #endif "[-help]\n"); #else printf("Usage: volserver [-log] [-p <number of processes>] " "[-auditlog <log path>] [-d <debug level>] " "[-nojumbo] [-jumbo] [-rxmaxmtu <bytes>] [-rxbind] [-allow-dotted-principals] " "[-udpsize <size of socket buffer in bytes>] " "[-enable_peer_stats] [-enable_process_stats] " "[-sync <always | delayed | onclose | never>] " "[-help]\n"); #endif VS_EXIT(1); } } if (auditFileName) { osi_audit_file(auditFileName); osi_audit(VS_StartEvent, 0, AUD_END); } #ifdef AFS_SGI_VNODE_GLUE if (afs_init_kernel_config(-1) < 0) { printf ("Can't determine NUMA configuration, not starting volserver.\n"); exit(1); } #endif InitErrTabs(); #ifdef AFS_PTHREAD_ENV SetLogThreadNumProgram( threadNum ); #endif #ifdef AFS_NT40_ENV if (afs_winsockInit() < 0) { ReportErrorEventAlt(AFSEVT_SVR_WINSOCK_INIT_FAILED, 0, argv[0], 0); printf("Volume server unable to start winsock, exiting.\n"); exit(1); } #endif /* Open VolserLog and map stdout, stderr into it; VInitVolumePackage2 can log, so we need to do this here */ OpenLog(AFSDIR_SERVER_VOLSERLOG_FILEPATH); VOptDefaults(volumeServer, &opts); #ifdef AFS_PTHREAD_ENV if (libafsosd) { extern struct vol_data_v0 vol_data_v0; extern struct volser_data_v0 volser_data_v0; struct init_volser_inputs input = { &vol_data_v0, &volser_data_v0 }; struct init_volser_outputs output = { &osdvol, &osdvolser }; code = load_libafsosd("init_volser_afsosd", &input, &output); if (code) { ViceLog(0, ("Loading libafsosd.so failed with code %d, aborting\n", code)); return -1; } } #endif if (VInitVolumePackage2(volumeServer, &opts)) { Log("Shutting down: errors encountered initializing volume package\n"); exit(1); } /* For nuke() */ Lock_Init(&localLock); DInit(40); #ifndef AFS_PTHREAD_ENV vol_PollProc = IOMGR_Poll; /* tell vol pkg to poll io system periodically */ #endif #ifndef AFS_NT40_ENV rxi_syscallp = volser_syscall; #endif rx_nPackets = rxpackets; /* set the max number of packets */ if (udpBufSize) rx_SetUdpBufSize(udpBufSize); /* set the UDP buffer size for receive */ if (rxBind) { afs_int32 ccode; if (AFSDIR_SERVER_NETRESTRICT_FILEPATH || AFSDIR_SERVER_NETINFO_FILEPATH) { char reason[1024]; ccode = parseNetFiles(SHostAddrs, NULL, NULL, ADDRSPERSITE, reason, AFSDIR_SERVER_NETINFO_FILEPATH, AFSDIR_SERVER_NETRESTRICT_FILEPATH); } else { ccode = rx_getAllAddr(SHostAddrs, ADDRSPERSITE); } if (ccode == 1) host = SHostAddrs[0]; } code = rx_InitHost(host, (int)htons(AFSCONF_VOLUMEPORT)); if (code) { fprintf(stderr, "rx init failed on socket AFSCONF_VOLUMEPORT %u\n", AFSCONF_VOLUMEPORT); VS_EXIT(1); } if (!rxJumbograms) { /* Don't allow 3.4 vos clients to send jumbograms and we don't send. */ rx_SetNoJumbo(); } if (rxMaxMTU != -1) { rx_SetMaxMTU(rxMaxMTU); } rx_GetIFInfo(); #ifndef AFS_PTHREAD_ENV rx_SetRxDeadTime(420); #endif memset(busyFlags, 0, sizeof(busyFlags)); SetupLogSignals(); { #ifdef AFS_PTHREAD_ENV pthread_t tid; pthread_attr_t tattr; osi_Assert(pthread_attr_init(&tattr) == 0); osi_Assert(pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_DETACHED) == 0); osi_Assert(pthread_create(&tid, &tattr, BKGLoop, NULL) == 0); #else PROCESS pid; LWP_CreateProcess(BKGLoop, 16*1024, 3, 0, "vol bkg daemon", &pid); LWP_CreateProcess(BKGSleep,16*1024, 3, 0, "vol slp daemon", &pid); #endif } /* Create a single security object, in this case the null security object, for unauthenticated connections, which will be used to control security on connections made to this server */ tdir = afsconf_Open(AFSDIR_SERVER_ETC_DIRPATH); if (!tdir) { Abort("volser: could not open conf files in %s\n", AFSDIR_SERVER_ETC_DIRPATH); VS_EXIT(1); } afsconf_GetKey(tdir, 999, &tkey); afsconf_BuildServerSecurityObjects(tdir, 0, &securityClasses, &numClasses); if (securityClasses[0] == NULL) Abort("rxnull_NewServerSecurityObject"); service = rx_NewServiceHost(host, 0, VOLSERVICE_ID, "VOLSER", securityClasses, numClasses, AFSVolExecuteRequest); if (service == (struct rx_service *)0) Abort("rx_NewService"); rx_SetBeforeProc(service, MyBeforeProc); rx_SetAfterProc(service, MyAfterProc); rx_SetIdleDeadTime(service, 0); /* never timeout */ if (lwps < 4) lwps = 4; rx_SetMaxProcs(service, lwps); #if defined(AFS_XBSD_ENV) rx_SetStackSize(service, (128 * 1024)); #elif defined(AFS_SGI_ENV) rx_SetStackSize(service, (48 * 1024)); #else rx_SetStackSize(service, (32 * 1024)); #endif if (rxkadDisableDotCheck) { rx_SetSecurityConfiguration(service, RXS_CONFIG_FLAGS, (void *)RXS_CONFIG_FLAGS_DISABLE_DOTCHECK); } service = rx_NewService(0, RX_STATS_SERVICE_ID, "rpcstats", securityClasses, numClasses, RXSTATS_ExecuteRequest); if (service == (struct rx_service *)0) Abort("rx_NewService"); rx_SetMinProcs(service, 2); rx_SetMaxProcs(service, 4); #ifdef AFS_PTHREAD_ENV if (libafsosd) { service = rx_NewService(0, 7, "afsosd", securityClasses, numClasses, (osdvolser->op_AFSVOLOSD_ExecuteRequest)); if (!service) { ViceLog(0, ("Failed to initialize afsosd rpc service.\n")); exit(-1); } rx_SetBeforeProc(service, MyBeforeProc); rx_SetAfterProc(service, MyAfterProc); rx_SetIdleDeadTime(service, 0); /* never timeout */ rx_SetMinProcs(service, 2); if (lwps < 4) lwps = 4; rx_SetMaxProcs(service, lwps); #if defined(AFS_XBSD_ENV) rx_SetStackSize(service, (128 * 1024)); #elif defined(AFS_SGI_ENV) rx_SetStackSize(service, (48 * 1024)); #else rx_SetStackSize(service, (32 * 1024)); #endif } #endif /* AFS_PTHREAD_ENV */ LogCommandLine(argc, argv, "Volserver", VolserVersion, "Starting AFS", Log); FT_GetTimeOfDay(&statisticStart, 0); if (afsconf_GetLatestKey(tdir, NULL, NULL) == 0) { LogDesWarning(); } if (TTsleep) { Log("Will sleep %d second%s every %d second%s\n", TTsleep, (TTsleep > 1) ? "s" : "", TTrun + TTsleep, (TTrun + TTsleep > 1) ? "s" : ""); } /* allow super users to manage RX statistics */ /* allow super users to manage RX statistics */ rx_SetRxStatUserOk(vol_rxstat_userok); rx_StartServer(1); /* Donate this process to the server process pool */ osi_audit(VS_FinishEvent, (-1), AUD_END); Abort("StartServer returned?"); return 0; /* not reached */ }
TEST(AggregateTests, PlainSumCountDistinctTest) { /* * SELECT SUM(a), COUNT(b), COUNT(DISTINCT b) from table */ const int tuple_count = TESTS_TUPLES_PER_TILEGROUP; // Create a table and wrap it in logical tiles auto &txn_manager = concurrency::TransactionManager::GetInstance(); auto txn = txn_manager.BeginTransaction(); auto txn_id = txn->GetTransactionId(); std::unique_ptr<storage::DataTable> data_table( ExecutorTestsUtil::CreateTable(tuple_count, false)); ExecutorTestsUtil::PopulateTable(txn, data_table.get(), 2 * tuple_count, false, true, true); txn_manager.CommitTransaction(); std::unique_ptr<executor::LogicalTile> source_logical_tile1( executor::LogicalTileFactory::WrapTileGroup(data_table->GetTileGroup(0), txn_id)); std::unique_ptr<executor::LogicalTile> source_logical_tile2( executor::LogicalTileFactory::WrapTileGroup(data_table->GetTileGroup(1), txn_id)); // (1-5) Setup plan node // 1) Set up group-by columns std::vector<oid_t> group_by_columns; // 2) Set up project info planner::ProjectInfo::DirectMapList direct_map_list = { {0, {1, 0}}, {1, {1, 1}}, {2, {1, 2}}}; auto proj_info = new planner::ProjectInfo(planner::ProjectInfo::TargetList(), std::move(direct_map_list)); // 3) Set up unique aggregates std::vector<planner::AggregatePlan::AggTerm> agg_terms; planner::AggregatePlan::AggTerm sumA(EXPRESSION_TYPE_AGGREGATE_SUM, expression::TupleValueFactory(0, 0), false); planner::AggregatePlan::AggTerm countB(EXPRESSION_TYPE_AGGREGATE_COUNT, expression::TupleValueFactory(0, 1), false); // Flag distinct planner::AggregatePlan::AggTerm countDistinctB( EXPRESSION_TYPE_AGGREGATE_COUNT, expression::TupleValueFactory(0, 1), true); // Flag distinct agg_terms.push_back(sumA); agg_terms.push_back(countB); agg_terms.push_back(countDistinctB); // 4) Set up predicate (empty) expression::AbstractExpression* predicate = nullptr; // 5) Create output table schema auto data_table_schema = data_table.get()->GetSchema(); std::vector<oid_t> set = {0, 1, 1}; std::vector<catalog::Column> columns; for (auto column_index : set) { columns.push_back(data_table_schema->GetColumn(column_index)); } auto output_table_schema = new catalog::Schema(columns); // OK) Create the plan node planner::AggregatePlan node(proj_info, predicate, std::move(agg_terms), std::move(group_by_columns), output_table_schema, AGGREGATE_TYPE_PLAIN); // Create and set up executor auto txn2 = txn_manager.BeginTransaction(); std::unique_ptr<executor::ExecutorContext> context( new executor::ExecutorContext(txn2)); executor::AggregateExecutor executor(&node, context.get()); MockExecutor child_executor; executor.AddChild(&child_executor); EXPECT_CALL(child_executor, DInit()).WillOnce(Return(true)); EXPECT_CALL(child_executor, DExecute()) .WillOnce(Return(true)) .WillOnce(Return(true)) .WillOnce(Return(false)); EXPECT_CALL(child_executor, GetOutput()) .WillOnce(Return(source_logical_tile1.release())) .WillOnce(Return(source_logical_tile2.release())); EXPECT_TRUE(executor.Init()); EXPECT_TRUE(executor.Execute()); txn_manager.CommitTransaction(); /* Verify result */ std::unique_ptr<executor::LogicalTile> result_tile(executor.GetOutput()); EXPECT_TRUE(result_tile.get() != nullptr); EXPECT_TRUE(result_tile->GetValue(0, 0) .OpEquals(ValueFactory::GetIntegerValue(50)) .IsTrue()); EXPECT_TRUE(result_tile->GetValue(0, 1) .OpEquals(ValueFactory::GetIntegerValue(10)) .IsTrue()); EXPECT_TRUE(result_tile->GetValue(0, 2) .OpLessThanOrEqual(ValueFactory::GetIntegerValue(3)) .IsTrue()); }
TEST_F(ProjectionTests, BasicTargetTest) { MockExecutor child_executor; EXPECT_CALL(child_executor, DInit()).WillOnce(Return(true)); EXPECT_CALL(child_executor, DExecute()) .WillOnce(Return(true)) .WillOnce(Return(false)); size_t tile_size = 5; // Create a table and wrap it in logical tile auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance(); auto txn = txn_manager.BeginTransaction(); std::unique_ptr<storage::DataTable> data_table( TestingExecutorUtil::CreateTable(tile_size)); TestingExecutorUtil::PopulateTable(data_table.get(), tile_size, false, false, false, txn); txn_manager.CommitTransaction(txn); std::unique_ptr<executor::LogicalTile> source_logical_tile1( executor::LogicalTileFactory::WrapTileGroup(data_table->GetTileGroup(0))); EXPECT_CALL(child_executor, GetOutput()) .WillOnce(Return(source_logical_tile1.release())); // Create the plan node TargetList target_list; DirectMapList direct_map_list; ///////////////////////////////////////////////////////// // PROJECTION 0, TARGET 0 + 20 ///////////////////////////////////////////////////////// // construct schema std::vector<catalog::Column> columns; auto orig_schema = data_table.get()->GetSchema(); columns.push_back(orig_schema->GetColumn(0)); columns.push_back(orig_schema->GetColumn(0)); std::shared_ptr<const catalog::Schema> schema(new catalog::Schema(columns)); // direct map DirectMap direct_map = std::make_pair(0, std::make_pair(0, 0)); direct_map_list.push_back(direct_map); // target list auto const_val = new expression::ConstantValueExpression( type::ValueFactory::GetIntegerValue(20)); auto tuple_value_expr = expression::ExpressionUtil::TupleValueFactory(type::Type::INTEGER, 0, 0); expression::AbstractExpression *expr = expression::ExpressionUtil::OperatorFactory(ExpressionType::OPERATOR_PLUS, type::Type::INTEGER, tuple_value_expr, const_val); Target target = std::make_pair(1, expr); target_list.push_back(target); std::unique_ptr<const planner::ProjectInfo> project_info( new planner::ProjectInfo(std::move(target_list), std::move(direct_map_list))); planner::ProjectionPlan node(std::move(project_info), schema); // Create and set up executor executor::ProjectionExecutor executor(&node, nullptr); executor.AddChild(&child_executor); RunTest(executor, 1); }
static void SalvageServer(void) { int pid, ret; struct SalvageQueueNode * node; pthread_t tid; pthread_attr_t attrs; int slot; VolumePackageOptions opts; /* All entries to the log will be appended. Useful if there are * multiple salvagers appending to the log. */ CheckLogFile((char *)AFSDIR_SERVER_SALSRVLOG_FILEPATH); #ifndef AFS_NT40_ENV #ifdef AFS_LINUX20_ENV fcntl(fileno(logFile), F_SETFL, O_APPEND); /* Isn't this redundant? */ #else fcntl(fileno(logFile), F_SETFL, FAPPEND); /* Isn't this redundant? */ #endif #endif setlinebuf(logFile); fprintf(logFile, "%s\n", cml_version_number); Log("Starting OpenAFS Online Salvage Server %s (%s)\n", SalvageVersion, commandLine); /* Get and hold a lock for the duration of the salvage to make sure * that no other salvage runs at the same time. The routine * VInitVolumePackage2 (called below) makes sure that a file server or * other volume utilities don't interfere with the salvage. */ /* even demand attach online salvager * still needs this because we don't want * a stand-alone salvager to conflict with * the salvager daemon */ ObtainSharedSalvageLock(); child_slot = (int *) malloc(Parallel * sizeof(int)); assert(child_slot != NULL); memset(child_slot, 0, Parallel * sizeof(int)); /* initialize things */ VOptDefaults(salvageServer, &opts); if (VInitVolumePackage2(salvageServer, &opts)) { Log("Shutting down: errors encountered initializing volume package\n"); Exit(1); } DInit(10); queue_Init(&pending_q); queue_Init(&log_cleanup_queue); assert(pthread_mutex_init(&worker_lock, NULL) == 0); assert(pthread_cond_init(&worker_cv, NULL) == 0); assert(pthread_cond_init(&log_cleanup_queue.queue_change_cv, NULL) == 0); assert(pthread_attr_init(&attrs) == 0); /* start up the reaper and log cleaner threads */ assert(pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED) == 0); assert(pthread_create(&tid, &attrs, &SalvageChildReaperThread, NULL) == 0); assert(pthread_create(&tid, &attrs, &SalvageLogCleanupThread, NULL) == 0); assert(pthread_create(&tid, &attrs, &SalvageLogScanningThread, NULL) == 0); /* loop forever serving requests */ while (1) { node = SALVSYNC_getWork(); assert(node != NULL); Log("dispatching child to salvage volume %u...\n", node->command.sop.parent); VOL_LOCK; /* find a slot */ for (slot = 0; slot < Parallel; slot++) { if (!child_slot[slot]) break; } assert (slot < Parallel); do_fork: pid = Fork(); if (pid == 0) { VOL_UNLOCK; ret = DoSalvageVolume(node, slot); Exit(ret); } else if (pid < 0) { Log("failed to fork child worker process\n"); sleep(1); goto do_fork; } else { child_slot[slot] = pid; node->pid = pid; VOL_UNLOCK; assert(pthread_mutex_lock(&worker_lock) == 0); current_workers++; /* let the reaper thread know another worker was spawned */ assert(pthread_cond_broadcast(&worker_cv) == 0); /* if we're overquota, wait for the reaper */ while (current_workers >= Parallel) { assert(pthread_cond_wait(&worker_cv, &worker_lock) == 0); } assert(pthread_mutex_unlock(&worker_lock) == 0); } } }
static int handleit(struct cmd_syndesc *as, void *arock) { struct cmd_item *ti; int err = 0; afs_uint32 volumeId = 0; char *partName = 0; struct DiskPartition64 *partP = NULL; #ifndef AFS_NT40_ENV if (geteuid() != 0) { printf("vol-info must be run as root; sorry\n"); exit(1); } #endif if (as->parms[0].items) online = 1; else online = 0; if (as->parms[1].items) DumpVnodes = 1; else DumpVnodes = 0; if (as->parms[2].items) DumpDate = 1; else DumpDate = 0; if (as->parms[3].items) DumpInodeNumber = 1; else DumpInodeNumber = 0; if (as->parms[4].items) InodeTimes = 1; else InodeTimes = 0; if ((ti = as->parms[5].items)) partName = ti->data; if ((ti = as->parms[6].items)) volumeId = strtoul(ti->data, NULL, 10); if (as->parms[7].items) dheader = 1; else dheader = 0; if (as->parms[8].items) { dsizeOnly = 1; dheader = 1; DumpVnodes = 1; } else dsizeOnly = 0; if (as->parms[9].items) { fixheader = 1; } else fixheader = 0; if (as->parms[10].items) { saveinodes = 1; dheader = 1; DumpVnodes = 1; } else saveinodes = 0; if (as->parms[11].items) { orphaned = 1; DumpVnodes = 1; } else #if defined(AFS_NAMEI_ENV) if (as->parms[12].items) { PrintFileNames = 1; DumpVnodes = 1; } else #endif orphaned = 0; DInit(10); err = VAttachPartitions(); if (err) { printf("%d partitions had errors during attach.\n", err); } if (partName) { partP = VGetPartition(partName, 0); if (!partP) { printf("%s is not an AFS partition name on this server.\n", partName); exit(1); } } if (!volumeId) { if (!partP) { HandleAllPart(); } else { HandlePart(partP); } } else { char name1[128]; if (!partP) { partP = FindCurrentPartition(); if (!partP) { printf("Current partition is not a vice partition.\n"); exit(1); } } (void)afs_snprintf(name1, sizeof name1, VFORMAT, afs_printable_uint32_lu(volumeId)); if (dsizeOnly && !saveinodes) printf ("Volume-Id\t Volsize Auxsize Inodesize AVolsize SizeDiff (VolName)\n"); HandleVolume(partP, name1); } return 0; }
int main(int argc, char **argv) { afs_int32 code; struct rx_securityClass **securityClasses; afs_int32 numClasses; struct rx_service *service; int rxpackets = 100; char hoststr[16]; afs_uint32 host = ntohl(INADDR_ANY); VolumePackageOptions opts; #ifdef AFS_AIX32_ENV /* * The following signal action for AIX is necessary so that in case of a * crash (i.e. core is generated) we can include the user's data section * in the core dump. Unfortunately, by default, only a partial core is * generated which, in many cases, isn't too useful. */ struct sigaction nsa; sigemptyset(&nsa.sa_mask); nsa.sa_handler = SIG_DFL; nsa.sa_flags = SA_FULLDUMP; sigaction(SIGABRT, &nsa, NULL); sigaction(SIGSEGV, &nsa, NULL); #endif osi_audit_init(); osi_audit(VS_StartEvent, 0, AUD_END); /* Initialize dirpaths */ if (!(initAFSDirPath() & AFSDIR_SERVER_PATHS_OK)) { #ifdef AFS_NT40_ENV ReportErrorEventAlt(AFSEVT_SVR_NO_INSTALL_DIR, 0, argv[0], 0); #endif fprintf(stderr, "%s: Unable to obtain AFS server directory.\n", argv[0]); exit(2); } configDir = strdup(AFSDIR_SERVER_ETC_DIRPATH); if (ParseArgs(argc, argv)) { exit(1); } if (auditFileName) { osi_audit_file(auditFileName); osi_audit(VS_StartEvent, 0, AUD_END); } #ifdef AFS_SGI_VNODE_GLUE if (afs_init_kernel_config(-1) < 0) { printf ("Can't determine NUMA configuration, not starting volserver.\n"); exit(1); } #endif InitErrTabs(); #ifdef AFS_PTHREAD_ENV SetLogThreadNumProgram( rx_GetThreadNum ); #endif #ifdef AFS_NT40_ENV if (afs_winsockInit() < 0) { ReportErrorEventAlt(AFSEVT_SVR_WINSOCK_INIT_FAILED, 0, argv[0], 0); printf("Volume server unable to start winsock, exiting.\n"); exit(1); } #endif OpenLog(&logopts); VOptDefaults(volumeServer, &opts); if (VInitVolumePackage2(volumeServer, &opts)) { Log("Shutting down: errors encountered initializing volume package\n"); exit(1); } /* For nuke() */ Lock_Init(&localLock); DInit(40); #ifndef AFS_PTHREAD_ENV vol_PollProc = IOMGR_Poll; /* tell vol pkg to poll io system periodically */ #endif #if !defined( AFS_NT40_ENV ) && !defined(AFS_DARWIN160_ENV) rxi_syscallp = volser_syscall; #endif rx_nPackets = rxpackets; /* set the max number of packets */ if (udpBufSize) rx_SetUdpBufSize(udpBufSize); /* set the UDP buffer size for receive */ if (rxBind) { afs_int32 ccode; if (AFSDIR_SERVER_NETRESTRICT_FILEPATH || AFSDIR_SERVER_NETINFO_FILEPATH) { char reason[1024]; ccode = afsconf_ParseNetFiles(SHostAddrs, NULL, NULL, ADDRSPERSITE, reason, AFSDIR_SERVER_NETINFO_FILEPATH, AFSDIR_SERVER_NETRESTRICT_FILEPATH); } else { ccode = rx_getAllAddr(SHostAddrs, ADDRSPERSITE); } if (ccode == 1) host = SHostAddrs[0]; } Log("Volserver binding rx to %s:%d\n", afs_inet_ntoa_r(host, hoststr), AFSCONF_VOLUMEPORT); code = rx_InitHost(host, (int)htons(AFSCONF_VOLUMEPORT)); if (code) { fprintf(stderr, "rx init failed on socket AFSCONF_VOLUMEPORT %u\n", AFSCONF_VOLUMEPORT); VS_EXIT(1); } if (!rxJumbograms) { /* Don't allow 3.4 vos clients to send jumbograms and we don't send. */ rx_SetNoJumbo(); } if (rxMaxMTU != -1) { if (rx_SetMaxMTU(rxMaxMTU) != 0) { fprintf(stderr, "rxMaxMTU %d is invalid\n", rxMaxMTU); VS_EXIT(1); } } rx_GetIFInfo(); rx_SetRxDeadTime(420); memset(busyFlags, 0, sizeof(busyFlags)); #ifdef AFS_PTHREAD_ENV opr_softsig_Init(); SetupLogSoftSignals(); #else SetupLogSignals(); #endif { #ifdef AFS_PTHREAD_ENV pthread_t tid; pthread_attr_t tattr; opr_Verify(pthread_attr_init(&tattr) == 0); opr_Verify(pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_DETACHED) == 0); opr_Verify(pthread_create(&tid, &tattr, BKGLoop, NULL) == 0); #else PROCESS pid; LWP_CreateProcess(BKGLoop, 16*1024, 3, 0, "vol bkg daemon", &pid); #endif } /* Create a single security object, in this case the null security object, for unauthenticated connections, which will be used to control security on connections made to this server */ tdir = afsconf_Open(configDir); if (!tdir) { Abort("volser: could not open conf files in %s\n", configDir); AFS_UNREACHED(VS_EXIT(1)); } /* initialize audit user check */ osi_audit_set_user_check(tdir, vol_IsLocalRealmMatch); afsconf_BuildServerSecurityObjects(tdir, &securityClasses, &numClasses); if (securityClasses[0] == NULL) Abort("rxnull_NewServerSecurityObject"); service = rx_NewServiceHost(host, 0, VOLSERVICE_ID, "VOLSER", securityClasses, numClasses, AFSVolExecuteRequest); if (service == (struct rx_service *)0) Abort("rx_NewService"); rx_SetBeforeProc(service, MyBeforeProc); rx_SetAfterProc(service, MyAfterProc); rx_SetIdleDeadTime(service, 0); /* never timeout */ if (lwps < 4) lwps = 4; rx_SetMaxProcs(service, lwps); #if defined(AFS_XBSD_ENV) rx_SetStackSize(service, (128 * 1024)); #elif defined(AFS_SGI_ENV) rx_SetStackSize(service, (48 * 1024)); #else rx_SetStackSize(service, (32 * 1024)); #endif if (rxkadDisableDotCheck) { code = rx_SetSecurityConfiguration(service, RXS_CONFIG_FLAGS, (void *)RXS_CONFIG_FLAGS_DISABLE_DOTCHECK); if (code) { fprintf(stderr, "volser: failed to allow dotted principals: code %d\n", code); VS_EXIT(1); } } service = rx_NewService(0, RX_STATS_SERVICE_ID, "rpcstats", securityClasses, numClasses, RXSTATS_ExecuteRequest); if (service == (struct rx_service *)0) Abort("rx_NewService"); rx_SetMinProcs(service, 2); rx_SetMaxProcs(service, 4); LogCommandLine(argc, argv, "Volserver", VolserVersion, "Starting AFS", Log); if (afsconf_GetLatestKey(tdir, NULL, NULL) == 0) { LogDesWarning(); } /* allow super users to manage RX statistics */ rx_SetRxStatUserOk(vol_rxstat_userok); rx_StartServer(1); /* Donate this process to the server process pool */ osi_audit(VS_FinishEvent, (-1), AUD_END); Abort("StartServer returned?"); AFS_UNREACHED(return 0); }