/*
 * Test clustering of segments to hosts.
 * Environment: 10 segments over 3 hosts, all primary.
 */
void 
test__do_segment_clustering_by_host__10SegmentsOn3Hosts(void **state)
{
	List* groups = NIL;
	ListCell* cell = NULL;
	GpHost* gphost = NULL;
	List* segs = NIL;

	char* array_of_segs[10] =
		{"1.2.3.1", "1.2.3.1", "1.2.3.1", "1.2.3.1",
		 "1.2.3.2", "1.2.3.2", "1.2.3.2",
		 "1.2.3.3", "1.2.3.3", "1.2.3.3"
	};
	int number_of_segments = 10;
	/* sanity */
	assert_true(number_of_segments == (sizeof(array_of_segs) / sizeof(array_of_segs[0])));

	/* build QueryResource */
	buildQueryResource(10, array_of_segs);
	/* sanity for QueryResource building*/
	assert_int_equal(resource->numSegments, 10);
	assert_string_equal(((Segment*)list_nth(resource->segments, 4))->hostip, array_of_segs[4]);
	will_return(GetActiveQueryResource, resource);

	/* test do_segment_clustering_by_host */
	groups = do_segment_clustering_by_host();

	assert_int_equal(list_length(groups), 3);

	cell = list_nth_cell(groups, 0);
	gphost = (GpHost*)lfirst(cell);
	assert_string_equal(gphost->ip, array_of_segs[0]);
	assert_int_equal(list_length(gphost->segs), 4);
	for (int i = 0; i < 4; ++i)
	{
		check_segment_info(gphost->segs, i, "1.2.3.1", i);
	}

	cell = list_nth_cell(groups, 1);
	gphost = (GpHost*)lfirst(cell);
	assert_string_equal(gphost->ip, "1.2.3.2");
	assert_int_equal(list_length(gphost->segs), 3);
	for (int i = 0; i < 3; ++i)
	{
		check_segment_info(gphost->segs, i, "1.2.3.2", i+4);
	}

	cell = list_nth_cell(groups, 2);
	gphost = (GpHost*)lfirst(cell);
	assert_string_equal(gphost->ip, "1.2.3.3");
	assert_int_equal(list_length(gphost->segs), 3);
	for (int i = 0; i < 3; ++i)
	{
		check_segment_info(gphost->segs, i, "1.2.3.3", i+7);
	}

	freeQueryResource();
}
/*
 * Testing distribute_work_2_gp_segments
 */
static void test__distribute_work_to_gp_segments(TestInputData *input)
{
	List **segs_allocated_data = NULL;
	List * input_fragments_list = NIL;
	char** array_of_segs = NULL;
	bool *array_of_primaries;
	int total_segs;
	bool cluster_size_not_exceeded = input->m_num_hosts_in_cluster <=  65025;
	
	assert_true(cluster_size_not_exceeded);
	/*  
	 * 1. Initialize the test input parameters
	 * We are testing an N hosts cluster. The size of the cluster is set in this section - section 1. 
	 * Basic test assumptions:
	 * a. There is one datanode on each host in the cluster
	 * b. There are Hawq segments on each host in the cluster.
	 * c. There is an equal number of Hawq segments on each host - hardcoded in this section
	 */
	int num_hosts_in_cluster = input->m_num_hosts_in_cluster; /* cluster size musn't exceed 65025 - see function create_cluster() */
	int num_data_fragments = input->m_num_data_fragments; /* number of fragments in the data we intend to allocate between the hawq segments */
	int num_active_data_nodes = input->m_num_active_data_nodes; /* number of datanodes that hold the 'querried' data - there one datanode om each cluster host - so there are <num_hosts_in_cluster> datanodes */
	int num_of_fragment_replicas = input->m_num_of_fragment_replicas;
	int num_segments_on_host = input->m_num_segments_on_host;/* number of Hawq segments on each cluster host - we assume all cluster hosts have Hawq segments installed */
	int num_working_segs = input->m_num_working_segs; /* the subset of Hawq segments that will do the processing  - not all the Hawqs segments in the cluster are involved */
	bool enable_print_input_cluster = input->m_enable_print_input_cluster;
	bool enable_print_input_fragments = input->m_enable_print_input_fragments;
	bool enable_print_input_segments = input->m_enable_print_input_segments;
	bool enable_print_allocated_fragments = input->m_enable_print_allocated_fragments;
		
	/* 2. Create the cluster */
	char **cluster = create_cluster(num_hosts_in_cluster);
	
	if (enable_print_input_cluster)
		print_cluster(cluster, num_hosts_in_cluster);
	 	
	/* 3. Input - data fragments */
	input_fragments_list = spread_fragments_in_cluster(num_data_fragments, /* number of fragments in the data we are about to allocate */
													   num_active_data_nodes, /* hosts */
													   num_of_fragment_replicas, /* replicas */
													   cluster, /* the whole cluster*/
													   num_hosts_in_cluster/* the number of hosts in the cluster */);
	if (enable_print_input_fragments)
		print_fragment_list(input_fragments_list); 
	
	/* 4. Input - hawq segments */
	total_segs = num_hosts_in_cluster * num_segments_on_host;
	array_of_segs = create_array_of_segs(cluster, num_hosts_in_cluster, num_segments_on_host);	
	array_of_primaries = create_array_of_primaries(total_segs);
		
	buildCdbComponentDatabases(total_segs, array_of_segs, array_of_primaries);	
	if (enable_print_input_segments)
		print_segments_list();

    /* 5. Build QueryResource */
    buildQueryResource(num_hosts_in_cluster*num_segments_on_host, array_of_segs);
    will_return(GetActiveQueryResource, resource);
    will_return(GetActiveQueryResource, resource);

	/* 6. The actual unitest of distribute_work_2_gp_segments() */
	segs_allocated_data = distribute_work_2_gp_segments(input_fragments_list, total_segs, num_working_segs);
	if (enable_print_allocated_fragments)
		print_allocated_fragments(segs_allocated_data, total_segs);
	
	/* 7. The validations - verifying that the expected output was obtained */
	validate_total_fragments_allocated(segs_allocated_data, total_segs, num_data_fragments);
	validate_max_load_per_segment(segs_allocated_data, total_segs, num_working_segs, num_data_fragments);
	validate_all_working_segments_engagement(segs_allocated_data, total_segs, num_working_segs, num_data_fragments, num_hosts_in_cluster);
	
	/* 8. Cleanup */
	freeQueryResource();
	restoreCdbComponentDatabases();
	clean_cluster(cluster, num_hosts_in_cluster);
	clean_array_of_segs(array_of_segs, total_segs);
	clean_allocated_fragments(segs_allocated_data, total_segs);
	pfree(array_of_primaries);
}