int main(){ srand(time(NULL)); data **zee = init(MAX); int i; for(i=0;i<5000;i++){ cluster(zee); } print_cluster(zee); return 0; }
/* * Testing distribute_work_2_gp_segments */ static void test__distribute_work_to_gp_segments(TestInputData *input) { List **segs_allocated_data = NULL; List * input_fragments_list = NIL; char** array_of_segs = NULL; bool *array_of_primaries; int total_segs; bool cluster_size_not_exceeded = input->m_num_hosts_in_cluster <= 65025; assert_true(cluster_size_not_exceeded); /* * 1. Initialize the test input parameters * We are testing an N hosts cluster. The size of the cluster is set in this section - section 1. * Basic test assumptions: * a. There is one datanode on each host in the cluster * b. There are Hawq segments on each host in the cluster. * c. There is an equal number of Hawq segments on each host - hardcoded in this section */ int num_hosts_in_cluster = input->m_num_hosts_in_cluster; /* cluster size musn't exceed 65025 - see function create_cluster() */ int num_data_fragments = input->m_num_data_fragments; /* number of fragments in the data we intend to allocate between the hawq segments */ int num_active_data_nodes = input->m_num_active_data_nodes; /* number of datanodes that hold the 'querried' data - there one datanode om each cluster host - so there are <num_hosts_in_cluster> datanodes */ int num_of_fragment_replicas = input->m_num_of_fragment_replicas; int num_segments_on_host = input->m_num_segments_on_host;/* number of Hawq segments on each cluster host - we assume all cluster hosts have Hawq segments installed */ int num_working_segs = input->m_num_working_segs; /* the subset of Hawq segments that will do the processing - not all the Hawqs segments in the cluster are involved */ bool enable_print_input_cluster = input->m_enable_print_input_cluster; bool enable_print_input_fragments = input->m_enable_print_input_fragments; bool enable_print_input_segments = input->m_enable_print_input_segments; bool enable_print_allocated_fragments = input->m_enable_print_allocated_fragments; /* 2. Create the cluster */ char **cluster = create_cluster(num_hosts_in_cluster); if (enable_print_input_cluster) print_cluster(cluster, num_hosts_in_cluster); /* 3. Input - data fragments */ input_fragments_list = spread_fragments_in_cluster(num_data_fragments, /* number of fragments in the data we are about to allocate */ num_active_data_nodes, /* hosts */ num_of_fragment_replicas, /* replicas */ cluster, /* the whole cluster*/ num_hosts_in_cluster/* the number of hosts in the cluster */); if (enable_print_input_fragments) print_fragment_list(input_fragments_list); /* 4. Input - hawq segments */ total_segs = num_hosts_in_cluster * num_segments_on_host; array_of_segs = create_array_of_segs(cluster, num_hosts_in_cluster, num_segments_on_host); array_of_primaries = create_array_of_primaries(total_segs); buildCdbComponentDatabases(total_segs, array_of_segs, array_of_primaries); if (enable_print_input_segments) print_segments_list(); /* 5. The actual unitest of distribute_work_2_gp_segments() */ segs_allocated_data = distribute_work_2_gp_segments(input_fragments_list, total_segs, num_working_segs); if (enable_print_allocated_fragments) print_allocated_fragments(segs_allocated_data, total_segs); /* 6. The validations - verifying that the expected output was obtained */ validate_total_fragments_allocated(segs_allocated_data, total_segs, num_data_fragments); validate_max_load_per_segment(segs_allocated_data, total_segs, num_working_segs, num_data_fragments); validate_all_working_segments_engagement(segs_allocated_data, total_segs, num_working_segs, num_data_fragments, num_hosts_in_cluster); /* 7. Cleanup */ restoreCdbComponentDatabases(); clean_cluster(cluster, num_hosts_in_cluster); clean_array_of_segs(array_of_segs, total_segs); clean_allocated_fragments(segs_allocated_data, total_segs); pfree(array_of_primaries); }