/** * This application groups 'num_points' row-vectors (which are randomly * generated) into 'num_means' clusters through an iterative algorithm - the * k-means algorith */ int main(int argc, char **argv) { int **points; int **means; int *clusters; int i; parse_args(argc, argv); points = (int **)malloc(sizeof(int *) * num_points); for (i=0; i<num_points; i++) { points[i] = (int *)malloc(sizeof(int) * dim); } dprintf("Generating points\n"); generate_points(points, num_points); means = (int **)malloc(sizeof(int *) * num_means); for (i=0; i<num_means; i++) { means[i] = (int *)malloc(sizeof(int) * dim); } dprintf("Generating means\n"); generate_points(means, num_means); clusters = (int *)malloc(sizeof(int) * num_points); memset(clusters, -1, sizeof(int) * num_points); modified = true; dprintf("\n\nStarting iterative algorithm\n"); while (modified) { modified = false; dprintf("."); find_clusters(points, means, clusters); calc_means(points, means, clusters); } dprintf("\n\nFinal Means:\n"); dump_matrix(means, num_means, dim); dprintf("Cleaning up\n"); for (i=0; i<num_means; i++) { free(means[i]); } free(means); for (i=0; i<num_points; i++) { free(points[i]); } free(points); return 0; }
int main() { // declare input set point_t inputs[] = { {2.1, 4.3}, {3.2618187593136714, 5.248776670511476}, {4.229518090705041, 6.394882252046046}, {4.970144154070997, 7.699287493626686}, {5.4584758171066685, 9.117572439213015}, {5.677883530716649, 10.601439096028392}, {5.620895628364498, 12.10035616477686}, {5.289452764715364, 13.563279822435065}, {4.694841828968918, 14.940391958424991}, {3.857311583384793, 16.18479667061946}, {2.8053831159695455, 17.25411724904363}, {1.574878589031866, 18.11193926390795}, {0.20770135840910497, 18.729050615340313}, {-1.2495909950582316, 19.08443631641118}, {-2.7473721281023433, 19.165994133308747}, {-4.234636900627881, 18.97094671239639}, {-5.66073829458397, 18.505936159659683}, {-6.9771121374195335, 17.786797851753388}, {-8.138930896733212, 16.83802118124192}, {-9.10663022812459, 15.691915599707357}, {-9.847256291490558, 14.387510358126725} }; point_t* points = calloc(21, sizeof(point_t)); // print calculated points generate_points(inputs, points, 0, 21); }
void setup() { PbScreenSetup( SCR_W, SCR_H, SCR_PSM ); build_matrices(); generate_points(); load_texture(); }
int main(int argc, char **argv) { int i; parse_args(argc, argv); // Create the matrix to store the points matrix = (int **)malloc(sizeof(int *) * num_rows); for (i=0; i<num_rows; i++) { matrix[i] = (int *)malloc(sizeof(int) * num_cols); } //Generate random values for all the points in the matrix generate_points(matrix, num_rows, num_cols); // Print the points // dump_points(matrix, num_rows, num_cols); // Allocate Memory to store the mean and the covariance matrix mean = (int *)malloc(sizeof(int) * num_rows); cov = (int **)malloc(sizeof(int *) * num_rows); for (i=0; i<num_rows; i++) { cov[i] = (int *)malloc(sizeof(int) * num_rows); // printf ("cov[%d] = %p\n", i, cov[i]); } // Compute the mean and the covariance CHECK_ERROR((num_procs = sysconf(_SC_NPROCESSORS_ONLN)) <= 0); // goes faster with more threads // maknum_procs = 48; printf("The number of processors is %d\n", num_procs); _pthread_mean(); printf ("now spawning cov threads.\n"); fflush (stdout); _pthread_cov(); dump_points(cov, num_rows, num_rows); #if 0 for (i=0; i<num_rows; i++) { free(cov[i]); free(matrix[i]); } free(mean); free(cov); free(matrix); #endif return 0; }
/** * @brief Initializes the main application class. */ ConvexHullApp::ConvexHullApp() { /* Do SDL initialization. */ surf = NULL; running = true; /* Generate a list of points. */ generate_points(); }
LivePoints(){ mndim = 2; msize = 10; mpoints.setZero(mndim,msize); mlikelihoods.setZero(msize); generate_points(); }
inline void apply(Point const& penultimate_point, Point const& perp_left_point, Point const& ultimate_point, Point const& , buffer_side_selector side, DistanceStrategy const& distance, RangeOut& range_out) const { typedef typename coordinate_type<Point>::type coordinate_type; typedef typename geometry::select_most_precise < coordinate_type, double >::type promoted_type; promoted_type const alpha = calculate_angle<promoted_type>(perp_left_point, ultimate_point); promoted_type const dist_left = distance.apply(penultimate_point, ultimate_point, buffer_side_left); promoted_type const dist_right = distance.apply(penultimate_point, ultimate_point, buffer_side_right); if (geometry::math::equals(dist_left, dist_right)) { generate_points(ultimate_point, alpha, dist_left, range_out); } else { promoted_type const two = 2.0; promoted_type dist_half_diff = (dist_left - dist_right) / two; if (side == buffer_side_right) { dist_half_diff = -dist_half_diff; } Point shifted_point; set<0>(shifted_point, get<0>(ultimate_point) + dist_half_diff * cos(alpha)); set<1>(shifted_point, get<1>(ultimate_point) + dist_half_diff * sin(alpha)); generate_points(shifted_point, alpha, (dist_left + dist_right) / two, range_out); } }
int main(int argc, char **argv) { int i; parse_args(argc, argv); // Create the matrix to store the points matrix = (int **)malloc(sizeof(int *) * num_rows); for (i=0; i<num_rows; i++) { matrix[i] = (int *)malloc(sizeof(int) * num_cols); } //Generate random values for all the points in the matrix generate_points(matrix, num_rows, num_cols); // Print the points #ifndef FAULTINJECTION dump_points(matrix, num_rows, num_cols); #endif // Allocate Memory to store the mean and the covariance matrix mean = (int *)malloc(sizeof(int) * num_rows); cov = (int **)malloc(sizeof(int *) * num_rows); for (i=0; i<num_rows; i++) { cov[i] = (int *)malloc(sizeof(int) * num_rows); } // Compute the mean and the covariance pthread_mean(); pthread_cov(); dump_points(cov, num_rows, num_rows); for (i=0; i<num_rows; i++) { free(cov[i]); free(matrix[i]); } free(mean); free(cov); free(matrix); return 0; }
int main(int argc, char **argv) { int num_procs, curr_point; int i; pthread_t *pid; pthread_attr_t attr; thread_arg *arg; int num_per_thread, excess; parse_args(argc, argv); points = (int **)malloc(sizeof(int *) * num_points); for (i=0; i<num_points; i++) { points[i] = (int *)malloc(sizeof(int) * dim); } dprintf("Generating points\n"); generate_points(points, num_points); means = (int **)malloc(sizeof(int *) * num_means); for (i=0; i<num_means; i++) { means[i] = (int *)malloc(sizeof(int) * dim); } dprintf("Generating means\n"); generate_points(means, num_means); clusters = (int *)malloc(sizeof(int) * num_points); memset(clusters, -1, sizeof(int) * num_points); pthread_attr_init(&attr); pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM); CHECK_ERROR((num_procs = sysconf(_SC_NPROCESSORS_ONLN)) <= 0); CHECK_ERROR( (pid = (pthread_t *)malloc(sizeof(pthread_t) * num_procs)) == NULL); modified = true; printf("Starting iterative algorithm\n"); /* Create the threads to process the distances between the various points and repeat until modified is no longer valid */ int num_threads; while (modified) { num_per_thread = num_points / num_procs; excess = num_points % num_procs; modified = false; dprintf("."); curr_point = 0; num_threads = 0; while (curr_point < num_points) { CHECK_ERROR((arg = (thread_arg *)malloc(sizeof(thread_arg))) == NULL); arg->start_idx = curr_point; arg->num_pts = num_per_thread; if (excess > 0) { arg->num_pts++; excess--; } CHECK_ERROR((pthread_create(&(pid[num_threads++]), &attr, find_clusters, (void *)(arg))) != 0); curr_point += arg->num_pts; } assert (num_threads == num_procs); for (i = 0; i < num_threads; i++) { pthread_join(pid[i], NULL); } num_per_thread = num_means / num_procs; excess = num_means % num_procs; curr_point = 0; num_threads = 0; while (curr_point < num_means) { CHECK_ERROR((arg = (thread_arg *)malloc(sizeof(thread_arg))) == NULL); arg->start_idx = curr_point; arg->sum = (int *)malloc(dim * sizeof(int)); arg->num_pts = num_per_thread; if (excess > 0) { arg->num_pts++; excess--; } CHECK_ERROR((pthread_create(&(pid[num_threads++]), &attr, calc_means, (void *)(arg))) != 0); curr_point += arg->num_pts; } assert (num_threads == num_procs); for (i = 0; i < num_threads; i++) { pthread_join(pid[i], NULL); } } dprintf("\n\nFinal means:\n"); dump_points(means, num_means); for (i = 0; i < num_points; i++) free(points[i]); free(points); for (i = 0; i < num_means; i++) { free(means[i]); } free(means); free(clusters); return 0; }
int main(int argc, char **argv) { final_data_t pca_mean_vals; final_data_t pca_cov_vals; map_reduce_args_t map_reduce_args; int i; struct timeval begin, end; #ifdef TIMING unsigned int library_time = 0; #endif get_time (&begin); parse_args(argc, argv); // Allocate space for the matrix pca_data.matrix = (int *)malloc(sizeof(int) * num_rows * num_cols); //Generate random values for all the points in the matrix generate_points(pca_data.matrix, num_rows, num_cols); // Print the points //dump_points(pca_data.matrix, num_rows, num_cols); /* Create the structure to store the mean value */ pca_data.unit_size = sizeof(int) * num_cols; // size of one row pca_data.next_start_row = pca_data.next_cov_row = 0; pca_data.mean = NULL; CHECK_ERROR (map_reduce_init ()); // Setup scheduler args for computing the mean memset(&map_reduce_args, 0, sizeof(map_reduce_args_t)); map_reduce_args.task_data = &pca_data; map_reduce_args.map = pca_mean_map; map_reduce_args.reduce = NULL; // use identity reduce map_reduce_args.splitter = pca_mean_splitter; map_reduce_args.locator = pca_mean_locator; map_reduce_args.key_cmp = mymeancmp; map_reduce_args.unit_size = pca_data.unit_size; map_reduce_args.partition = NULL; // use default map_reduce_args.result = &pca_mean_vals; map_reduce_args.data_size = num_rows * num_cols * sizeof(int); map_reduce_args.L1_cache_size = atoi(GETENV("MR_L1CACHESIZE"));//1024 * 1024 * 16; map_reduce_args.num_map_threads = atoi(GETENV("MR_NUMTHREADS"));//8; map_reduce_args.num_reduce_threads = atoi(GETENV("MR_NUMTHREADS"));//16; map_reduce_args.num_merge_threads = atoi(GETENV("MR_NUMTHREADS"));//8; map_reduce_args.num_procs = atoi(GETENV("MR_NUMPROCS"));//16; map_reduce_args.key_match_factor = (float)atof(GETENV("MR_KEYMATCHFACTOR"));//2; printf("PCA Mean: Calling MapReduce Scheduler\n"); get_time (&end); #ifdef TIMING fprintf (stderr, "initialize: %u\n", time_diff (&end, &begin)); #endif get_time (&begin); CHECK_ERROR(map_reduce(&map_reduce_args) < 0); get_time (&end); #ifdef TIMING library_time += time_diff (&end, &begin); #endif get_time (&begin); printf("PCA Mean: MapReduce Completed\n"); assert (pca_mean_vals.length == num_rows); //dprintf("Mean vector:\n"); pca_data.unit_size = sizeof(int) * num_cols * 2; // size of two rows pca_data.next_start_row = pca_data.next_cov_row = 0; pca_data.mean = pca_mean_vals.data; // array of keys and values - the keys have been freed tho // Setup Scheduler args for computing the covariance memset(&map_reduce_args, 0, sizeof(map_reduce_args_t)); map_reduce_args.task_data = &pca_data; map_reduce_args.map = pca_cov_map; map_reduce_args.reduce = NULL; // use identity reduce map_reduce_args.splitter = pca_cov_splitter; map_reduce_args.locator = pca_cov_locator; map_reduce_args.key_cmp = mycovcmp; map_reduce_args.unit_size = pca_data.unit_size; map_reduce_args.partition = NULL; // use default map_reduce_args.result = &pca_cov_vals; // data size is number of elements that need to be calculated in a cov matrix // multiplied by the size of two rows for each element map_reduce_args.data_size = ((((num_rows * num_rows) - num_rows)/2) + num_rows) * pca_data.unit_size; map_reduce_args.L1_cache_size = atoi(GETENV("MR_L1CACHESIZE"));//1024 * 1024 * 16; map_reduce_args.num_map_threads = atoi(GETENV("MR_NUMTHREADS"));//8; map_reduce_args.num_reduce_threads = atoi(GETENV("MR_NUMTHREADS"));//16; map_reduce_args.num_merge_threads = atoi(GETENV("MR_NUMTHREADS"));//8; map_reduce_args.num_procs = atoi(GETENV("MR_NUMPROCS"));//16; map_reduce_args.key_match_factor = atoi(GETENV("MR_KEYMATCHFACTOR"));//2; map_reduce_args.use_one_queue_per_task = true; printf("PCA Cov: Calling MapReduce Scheduler\n"); get_time (&end); #ifdef TIMING fprintf (stderr, "inter library: %u\n", time_diff (&end, &begin)); #endif get_time (&begin); CHECK_ERROR(map_reduce(&map_reduce_args) < 0); get_time (&end); #ifdef TIMING library_time += time_diff (&end, &begin); fprintf (stderr, "library: %u\n", library_time); #endif get_time (&begin); CHECK_ERROR (map_reduce_finalize ()); printf("PCA Cov: MapReduce Completed\n"); assert(pca_cov_vals.length == ((((num_rows * num_rows) - num_rows)/2) + num_rows)); // Free the allocated structures int cnt = 0; intptr_t sum = 0; dprintf("\n\nCovariance sum: "); for (i = 0; i <pca_cov_vals.length; i++) { sum += (intptr_t)(pca_cov_vals.data[i].val); //dprintf("%5d ", ); cnt++; if (cnt == num_rows) { //dprintf("\n"); num_rows--; cnt = 0; } free(pca_cov_vals.data[i].key); } dprintf ("%" PRIdPTR "\n", sum); free (pca_cov_vals.data); free (pca_mean_vals.data); free (pca_data.matrix); get_time (&end); #ifdef TIMING fprintf (stderr, "finalize: %u\n", time_diff (&end, &begin)); #endif return 0; }
int main(int argc, char **argv) { final_data_t kmeans_vals; map_reduce_args_t map_reduce_args; int i; int *means; bool first_run; struct timeval begin, end; #ifdef TIMING unsigned int library_time = 0; unsigned int inter_library_time = 0; #endif get_time (&begin); parse_args(argc, argv); // get points kmeans_data.points = (int *)malloc(sizeof(int) * num_points * dim); generate_points(kmeans_data.points, num_points); // get means kmeans_data.means = (keyval_t *)malloc(sizeof(keyval_t) * num_means); means = malloc(sizeof(int) * dim * num_means); for (i=0; i<num_means; i++) { kmeans_data.means[i].val = &means[i * dim]; kmeans_data.means[i].key = malloc(sizeof(void *)); } generate_means(kmeans_data.means, num_means); kmeans_data.next_point = 0; kmeans_data.unit_size = sizeof(int) * dim; kmeans_data.clusters = (int *)malloc(sizeof(int) * num_points); memset(kmeans_data.clusters, -1, sizeof(int) * num_points); modified = true; CHECK_ERROR (map_reduce_init ()); // Setup map reduce args memset(&map_reduce_args, 0, sizeof(map_reduce_args_t)); map_reduce_args.task_data = &kmeans_data; map_reduce_args.map = kmeans_map; map_reduce_args.reduce = kmeans_reduce; map_reduce_args.splitter = kmeans_splitter; map_reduce_args.locator = kmeans_locator; map_reduce_args.key_cmp = mykeycmp; map_reduce_args.unit_size = kmeans_data.unit_size; map_reduce_args.partition = NULL; // use default map_reduce_args.result = &kmeans_vals; map_reduce_args.data_size = (num_points + num_means) * dim * sizeof(int); map_reduce_args.L1_cache_size = atoi(GETENV("MR_L1CACHESIZE"));//1024 * 8; map_reduce_args.num_map_threads = atoi(GETENV("MR_NUMTHREADS"));//8; map_reduce_args.num_reduce_threads = atoi(GETENV("MR_NUMTHREADS"));//16; map_reduce_args.num_merge_threads = atoi(GETENV("MR_NUMTHREADS"));//8; map_reduce_args.num_procs = atoi(GETENV("MR_NUMPROCS"));//16; map_reduce_args.key_match_factor = (float)atof(GETENV("MR_KEYMATCHFACTOR"));//2; map_reduce_args.use_one_queue_per_task = true; printf("KMeans: Calling MapReduce Scheduler\n"); get_time (&end); #ifdef TIMING fprintf (stderr, "initialize: %u\n", time_diff (&end, &begin)); #endif first_run = true; while (modified == true) { modified = false; kmeans_data.next_point = 0; //dprintf("."); get_time (&begin); CHECK_ERROR (map_reduce (&map_reduce_args) < 0); get_time (&end); #ifdef TIMING library_time += time_diff (&end, &begin); #endif get_time (&begin); for (i = 0; i < kmeans_vals.length; i++) { int mean_idx = *((int *)(kmeans_vals.data[i].key)); if (first_run == false) free(kmeans_data.means[mean_idx].val); kmeans_data.means[mean_idx] = kmeans_vals.data[i]; } if (kmeans_vals.length > 0) free(kmeans_vals.data); get_time (&end); #ifdef TIMING inter_library_time += time_diff (&end, &begin); #endif first_run = false; } #ifdef TIMING fprintf (stderr, "library: %u\n", library_time); fprintf (stderr, "inter library: %u\n", inter_library_time); #endif get_time (&begin); CHECK_ERROR (map_reduce_finalize ()); dprintf("\n"); printf("KMeans: MapReduce Completed\n"); dprintf("\n\nFinal means:\n"); dump_means(kmeans_data.means, num_means); free(kmeans_data.points); for (i = 0; i < num_means; i++) { free(kmeans_data.means[i].key); free(kmeans_data.means[i].val); } free (kmeans_data.means); free (means); free(kmeans_data.clusters); get_time (&end); #ifdef TIMING fprintf (stderr, "finalize: %u\n", time_diff (&end, &begin)); #endif return 0; }
/** * This application groups 'num_points' row-vectors (which are randomly * generated) into 'num_means' clusters through an iterative algorithm - the * k-means algorith */ int main(int argc, char **argv) { int **points; int **means; int *clusters; int i; struct timeval begin, end; parse_args(argc, argv); points = (int **)malloc(sizeof(int *) * num_points); for (i=0; i<num_points; i++) { points[i] = (int *)malloc(sizeof(int) * dim); } dprintf("Generating points\n"); generate_points(points, num_points); means = (int **)malloc(sizeof(int *) * num_means); for (i=0; i<num_means; i++) { means[i] = (int *)malloc(sizeof(int) * dim); } dprintf("Generating means\n"); generate_points(means, num_means); clusters = (int *)malloc(sizeof(int) * num_points); memset(clusters, -1, sizeof(int) * num_points); modified = true; dprintf("\n\nStarting iterative algorithm\n"); get_time (&begin); while (modified) { modified = false; dprintf("."); find_clusters(points, means, clusters); calc_means(points, means, clusters); } get_time (&end); #ifdef TIMING fprintf (stderr, "library: %u\n", time_diff (&end, &begin)); #endif dprintf("\n\nFinal Means:\n"); dump_matrix(means, num_means, dim); dprintf("Cleaning up\n"); for (i=0; i<num_means; i++) { free(means[i]); } free(means); for (i=0; i<num_points; i++) { free(points[i]); } free(points); return 0; }
int main(int argc, char **argv) { int num_points; // number of vectors int num_means; // number of clusters int dim; // Dimension of each vector int grid_size; // size of each dimension of vector space int num_procs, curr_point; int i; pthread_t pid[256]; pthread_attr_t attr; int num_per_thread, excess; num_points = DEF_NUM_POINTS; num_means = DEF_NUM_MEANS; dim = DEF_DIM; grid_size = DEF_GRID_SIZE; { int c; extern char *optarg; extern int optind; while ((c = getopt(argc, argv, "d:c:p:s:")) != EOF) { switch (c) { case 'd': dim = atoi(optarg); break; case 'c': num_means = atoi(optarg); break; case 'p': num_points = atoi(optarg); break; case 's': grid_size = atoi(optarg); break; case '?': printf("Usage: %s -d <vector dimension> -c <num clusters> -p <num points> -s <grid size>\n", argv[0]); exit(1); } } if (dim <= 0 || num_means <= 0 || num_points <= 0 || grid_size <= 0) { printf("Illegal argument value. All values must be numeric and greater than 0\n"); exit(1); } } printf("Dimension = %d\n", dim); printf("Number of clusters = %d\n", num_means); printf("Number of points = %d\n", num_points); printf("Size of each dimension = %d\n", grid_size); int ** points = (int **)malloc(sizeof(int *) * num_points); for (i=0; i<num_points; i++) { points[i] = (int *)malloc(sizeof(int) * dim); } dprintf("Generating points\n"); generate_points(points, num_points, dim, grid_size); int **means; means = (int **)malloc(sizeof(int *) * num_means); for (i=0; i<num_means; i++) { // means[i] = (int *)malloc(sizeof(int) * dim); means[i] = (int *)malloc(128); } dprintf("Generating means\n"); generate_points(means, num_means, dim, grid_size); int * clusters = (int *)malloc(sizeof(int) * num_points); memset(clusters, -1, sizeof(int) * num_points); pthread_attr_init(&attr); pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM); CHECK_ERROR((num_procs = sysconf(_SC_NPROCESSORS_ONLN)) <= 0); // FIX ME //CHECK_ERROR((num_procs = 4 * sysconf(_SC_NPROCESSORS_ONLN)) <= 0); // FIX ME // CHECK_ERROR( (pid = (pthread_t *)malloc(sizeof(pthread_t) * num_procs)) == NULL); int modified = true; printf("Starting iterative algorithm!!!!!!\n"); /* Create the threads to process the distances between the various points and repeat until modified is no longer valid */ int num_threads; thread_arg arg[256]; while (modified) { num_per_thread = num_points / num_procs; excess = num_points % num_procs; modified = false; dprintf("."); curr_point = 0; num_threads = 0; while (curr_point < num_points) { // CHECK_ERROR((arg = (thread_arg *)malloc(sizeof(thread_arg))) == NULL); arg[num_threads].start_idx = curr_point; arg[num_threads].num_pts = num_per_thread; arg[num_threads].dim = dim; arg[num_threads].num_means = num_means; arg[num_threads].num_points = num_points; arg[num_threads].means = means; arg[num_threads].points = points; arg[num_threads].clusters = clusters; if (excess > 0) { arg[num_threads].num_pts++; excess--; } curr_point += arg[num_threads].num_pts; num_threads++; } // printf("in this run, num_threads is %d, num_per_thread is %d\n", num_threads, num_per_thread); for (i = 0; i < num_threads; i++) { CHECK_ERROR((pthread_create(&(pid[i]), &attr, find_clusters, (void *)(&arg[i]))) != 0); // EDB - with hierarchical commit we would not have had to // "localize" num_threads. } assert (num_threads == num_procs); for (i = 0; i < num_threads; i++) { int m; pthread_join(pid[i], (void *) &m); modified |= m; } num_per_thread = num_means / num_procs; excess = num_means % num_procs; curr_point = 0; num_threads = 0; assert (dim <= MAX_DIM); // printf("in this run again, num_threads is %d, num_per_thread is %d\n", num_threads, num_per_thread); while (curr_point < num_means) { // CHECK_ERROR((arg = (thread_arg *)malloc(sizeof(thread_arg))) == NULL); arg[num_threads].start_idx = curr_point; // arg[num_threads].sum = (int *)malloc(dim * sizeof(int)); arg[num_threads].num_pts = num_per_thread; if (excess > 0) { arg[num_threads].num_pts++; excess--; } curr_point += arg[num_threads].num_pts; num_threads++; } for (i = 0; i < num_threads; i++) { CHECK_ERROR((pthread_create(&(pid[i]), &attr, calc_means, (void *)(&arg[i]))) != 0); } // printf ("num threads = %d\n", num_threads); // printf ("num procs = %d\n", num_procs); assert (num_threads == num_procs); for (i = 0; i < num_threads; i++) { pthread_join(pid[i], NULL); // free (arg[i].sum); } } dprintf("\n\nFinal means:\n"); //dump_points(means, num_means, dim); for (i = 0; i < num_points; i++) free(points[i]); free(points); for (i = 0; i < num_means; i++) { free(means[i]); } free(means); free(clusters); return 0; }
// ----------------------------------------------------------------------------- // // ----------------------------------------------------------------------------- void SampleSurfaceMesh::execute() { setErrorCondition(0); dataCheck(); if(getErrorCondition() < 0) { return; } DataContainer::Pointer sm = getDataContainerArray()->getDataContainer(m_SurfaceMeshFaceLabelsArrayPath.getDataContainerName()); SIMPL_RANDOMNG_NEW() #ifdef SIMPLib_USE_PARALLEL_ALGORITHMS tbb::task_scheduler_init init; bool doParallel = true; #endif TriangleGeom::Pointer triangleGeom = sm->getGeometryAs<TriangleGeom>(); // pull down faces int64_t numFaces = m_SurfaceMeshFaceLabelsPtr.lock()->getNumberOfTuples(); // create array to hold bounding vertices for each face FloatArrayType::Pointer llPtr = FloatArrayType::CreateArray(3, "_INTERNAL_USE_ONLY_Lower_Left"); FloatArrayType::Pointer urPtr = FloatArrayType::CreateArray(3, "_INTERNAL_USE_ONLY_Upper_Right"); float* ll = llPtr->getPointer(0); float* ur = urPtr->getPointer(0); VertexGeom::Pointer faceBBs = VertexGeom::CreateGeometry(2 * numFaces, "_INTERNAL_USE_ONLY_faceBBs"); // walk through faces to see how many features there are int32_t g1 = 0, g2 = 0; int32_t maxFeatureId = 0; for (int64_t i = 0; i < numFaces; i++) { g1 = m_SurfaceMeshFaceLabels[2 * i]; g2 = m_SurfaceMeshFaceLabels[2 * i + 1]; if (g1 > maxFeatureId) { maxFeatureId = g1; } if (g2 > maxFeatureId) { maxFeatureId = g2; } } // add one to account for feature 0 int32_t numFeatures = maxFeatureId + 1; // create a dynamic list array to hold face lists Int32Int32DynamicListArray::Pointer faceLists = Int32Int32DynamicListArray::New(); std::vector<int32_t> linkCount(numFeatures, 0); // fill out lists with number of references to cells typedef boost::shared_array<int32_t> SharedInt32Array_t; SharedInt32Array_t linkLocPtr(new int32_t[numFaces]); int32_t* linkLoc = linkLocPtr.get(); ::memset(linkLoc, 0, numFaces * sizeof(int32_t)); // traverse data to determine number of faces belonging to each feature for (int64_t i = 0; i < numFaces; i++) { g1 = m_SurfaceMeshFaceLabels[2 * i]; g2 = m_SurfaceMeshFaceLabels[2 * i + 1]; if (g1 > 0) { linkCount[g1]++; } if (g2 > 0) { linkCount[g2]++; } } // now allocate storage for the faces faceLists->allocateLists(linkCount); // traverse data again to get the faces belonging to each feature for (int64_t i = 0; i < numFaces; i++) { g1 = m_SurfaceMeshFaceLabels[2 * i]; g2 = m_SurfaceMeshFaceLabels[2 * i + 1]; if (g1 > 0) { faceLists->insertCellReference(g1, (linkLoc[g1])++, i); } if (g2 > 0) { faceLists->insertCellReference(g2, (linkLoc[g2])++, i); } // find bounding box for each face GeometryMath::FindBoundingBoxOfFace(triangleGeom, i, ll, ur); faceBBs->setCoords(2 * i, ll); faceBBs->setCoords(2 * i + 1, ur); } // generate the list of sampling points from subclass VertexGeom::Pointer points = generate_points(); if(getErrorCondition() < 0 || NULL == points.get()) { return; } int64_t numPoints = points->getNumberOfVertices(); // create array to hold which polyhedron (feature) each point falls in Int32ArrayType::Pointer iArray = Int32ArrayType::NullPointer(); iArray = Int32ArrayType::CreateArray(numPoints, "_INTERNAL_USE_ONLY_polyhedronIds"); iArray->initializeWithZeros(); int32_t* polyIds = iArray->getPointer(0); #ifdef SIMPLib_USE_PARALLEL_ALGORITHMS if (doParallel == true) { tbb::parallel_for(tbb::blocked_range<size_t>(0, numFeatures), SampleSurfaceMeshImpl(triangleGeom, faceLists, faceBBs, points, polyIds), tbb::auto_partitioner()); } else #endif { SampleSurfaceMeshImpl serial(triangleGeom, faceLists, faceBBs, points, polyIds); serial.checkPoints(0, numFeatures); } assign_points(iArray); notifyStatusMessage(getHumanLabel(), "Complete"); }
int main(int argc, char **argv) { int i, j, iter; int k; int min_index; double min_dist; double *delta; double *delt_perc; struct Point *centroids; struct Point *data; FILE *file; srand(time(NULL)); printf("K-Means Test\n"); printf("============\n"); if (argc > 1) k = atoi(argv[1]); //assume first argument is desired k value else k = 3; centroids = malloc(k * sizeof(struct Point)); data = malloc(DATA_POINTS * sizeof(struct Point)); delta = malloc(k * sizeof(double)); delt_perc = malloc(k * sizeof(double)); generate_points(k, centroids, 0, 0, 0, 0); generate_points(DATA_POINTS, data, 0, 0, 0, 0); //label centroids printf("Initial Centroids\n"); printf("=================\n"); for(i = 0; i < k; i++) { printf("{%f, %f}\n", centroids[i].x, centroids[i].y); centroids[i].label = i; } iter = 0; do { char *gnuplot_commands[RULES]; gnuplot_commands[1] = "unset key"; char *plot = malloc(sizeof(char) *(k + 2) * strlen(" 'data_clust.dat' ,") + 20); strcat(plot, "plot "); //label all sample data from initial centroids for(i = 0; i < DATA_POINTS; i++) { min_dist = distance(&data[i], ¢roids[0]); min_index = 0; int j; for(j = 1; j < k; j++) { double dist = distance(&data[i], ¢roids[j]); if (dist < min_dist) { min_dist = dist; min_index = j; } data[i].label = min_index; } } //write out data points for this iteration for(i = 0; i < k; i++) { char fname[strlen("data_clust.dat") + k]; sprintf(fname, "data_clust%d.dat", i); file = fopen(fname, "w"); strcat(plot, "'"); strcat(plot, fname); strcat(plot, "', "); if(file == NULL) { perror("Failed to open data file"); return -1; } for(j = 0; j < DATA_POINTS; j++) { if(data[j].label == centroids[i].label) fprintf(file, "%f %f\n", data[j].x, data[j].y); } fclose(file); } char cenName[strlen("cent.dat") + k]; sprintf(cenName, "cent_%d.dat", i); file = fopen(cenName, "w"); strcat(plot, "'"); strcat(plot, cenName); strcat(plot, "' "); if(file == NULL) { perror("Failed to open init_cent.dat"); return -1; } //write out centroids for(i = 0; i < k; i++) { fprintf(file,"%f %f\n", centroids[i].x, centroids[i].y); } fclose(file); //plot clusters and centroids gnuplot_commands[0] = malloc(sizeof(char) * strlen("set title 'K-Means Clustering Iter' ") + iter + 1); sprintf(gnuplot_commands[0], "set title 'K-Means Clustering Iter %d'", iter); gnuplot_commands[2] = plot; //print commands printf("\n"); for(i = 0; i < RULES; i++) { printf("%s\n", gnuplot_commands[i]); } FILE *gnuplotPipe = popen("gnuplot -persistent", "w"); for(i = 0; i < RULES; i++) fprintf(gnuplotPipe, "%s \n", gnuplot_commands[i]); fclose(gnuplotPipe); //calculate new centroids based on initial clustering printf("New Centroids\n"); printf("=============\n"); for(i = 0; i < k; i++) { struct Point * mean = get_centroid_mean(¢roids[i], data, DATA_POINTS); double diff = distance(¢roids[i], mean); double change = diff / ((diff + delta[i])/2); delta[i] = diff; delt_perc[i] = change; centroids[i].x = mean->x; centroids[i].y = mean->y; free(mean); printf("{%f, %f}\n", centroids[i].x, centroids[i].y); } printf("\n"); // free(plot); iter++; } while(average(delt_perc, k) > THRESHOLD); free(centroids); free(data); free(delta); free(delt_perc); return 0; }