int main(void) { XmlNode *x, *t; /* This is how many tests you plan to run */ plan_tests(12); ok1(x = xml_load("./test/test.xml1")); ok1(!xml_find(x, "Doesn't Exist")); ok1(t = xml_find(x, "one")); ok1(xml_find(t, "two")); ok1(!xml_attr(t, "foobar")); ok1(!xml_attr(t, "Doesn't Exist")); ok1(xml_attr(t, "barfoo")); xml_free(x); /* Simple thing we expect to succeed */ ok1(!test_load("does not exist")); /* A file that doesn't exist */ ok1(test_load("./test/test.xml1")); /* A basic xml file. */ ok1(test_load("./test/test.xml2")); /* Very small well-formed xml file. */ ok1(test_load("./test/test.xml3")); /* Smallest well-formed xml file. */ ok1(test_load("./test/test.xml4")); /* A single unclosed tag. */ /* Same, with an explicit description of the test. */ // ok(some_test(), "%s with no args should return 1", "some_test") /* How to print out messages for debugging. */ // diag("Address of some_test is %p", &some_test) /* Conditional tests must be explicitly skipped. */ /* This exits depending on whether all tests passed */ return exit_status(); }
int main(void) { pout("peak load test suite... "); test_load(pcap_file, pcap_len, lengthof(pcap_len)); test_load(pcapng_file, pcapng_len, lengthof(pcapng_len)); test_load(erf_file, erf_len, lengthof(erf_len)); pout("ok\n"); return (0); }
static gint test_file (const gchar *filename) { GBookmarkFile *bookmark_file; gboolean success; g_return_val_if_fail (filename != NULL, 1); g_print ("checking GBookmarkFile...\n"); bookmark_file = g_bookmark_file_new (); g_assert (bookmark_file != NULL); success = test_load (bookmark_file, filename); if (success) { success = test_query (bookmark_file); success = test_modify (bookmark_file); } g_bookmark_file_free (bookmark_file); g_print ("ok\n"); return (success == TRUE ? 0 : 1); }
int main (int argc, char *argv[]) { mongoc_client_pool_t *pool; mongoc_client_t *client; mongoc_uri_t *uri; unsigned count = 10000; if (argc > 1) { if (!(uri = mongoc_uri_new(argv[1]))) { fprintf(stderr, "Failed to parse uri: %s\n", argv[1]); return 1; } } else { uri = mongoc_uri_new("mongodb://127.0.0.1:27017/?sockettimeoutms=500"); } if (argc > 2) { count = MAX(atoi(argv[2]), 1); } pool = mongoc_client_pool_new(uri); client = mongoc_client_pool_pop(pool); test_load(client, count); mongoc_client_pool_push(pool, client); mongoc_uri_destroy(uri); mongoc_client_pool_destroy(pool); return 0; }
void test_get_string (void) { cut_trace(test_load()); cut_assert_equal_string("/XXX/SENDMAIL", mz_config_get_string(config, "sendmail_path")); }
int main(int argc, char** argv) { if (argc==1) { test_save(); test_load(false); } else if (argc==2) { if (std::string(argv[1])== "save") test_save(); if (std::string(argv[1])== "load") test_load(false); if (std::string(argv[1])== "load_register") test_load(true); } }
void test_set_string (void) { cut_trace(test_load()); cut_assert_null(mz_config_get_string(config, "new_value")); mz_config_set_string(config, "new_value", "12345678X"); cut_assert_equal_string("12345678X", mz_config_get_string(config, "new_value")); }
int main(void) { test_context(); test_load(); test_flowgraph1(); test_flowgraph2(); fprintf(stderr, "\nAll tests passed!\n"); return 0; }
void load(char *appPath, char *resPath, long handle) { #ifdef MOD_TEST test_load(appPath, resPath, handle); #else benc_load(appPath, resPath, handle); robk_load(appPath, resPath, handle); vinh_load(appPath, resPath, handle); game_load(appPath, resPath, handle); #endif }
int main() { int tx_port = 0; char *source = "test/test_defs.b"; sys_init(0); tx_server(source, "bin/state", &tx_port); vol_init(0, "bin/volume"); char *code = sys_load(source); env = env_new(source, code); mem_free(code); int len = 0; char **files = sys_list("test/data", &len); vars = vars_new(len); rvars = vars_new(len); for (int i = 0; i < len; ++i) { vars_add(rvars, files[i], 0, NULL); vars_add(vars, files[i], 0, NULL); } vars_add(vars, "___param", 0, NULL); test_vars(); test_load(); test_param(); test_clone(); test_eq(); test_store(); test_select(); test_rename(); test_extend(); test_join(); test_project(); test_semidiff(); test_summary(); test_union(); test_compound(); test_call(); tx_free(); env_free(env); mem_free(files); vars_free(vars); vars_free(rvars); return 0; }
int main (void) { int i; for (i = 0; i < MAX; ++i) { pa[i] = i * 2; pb[i] = i * 2 + 1; } test_load (); test_store (); test_exch (); test_cas (); return 0; }
void MatrixTest::run_test_case(void) { message += "Running matrix test case...\n"; // Constructor and destructor methods test_constructor(); test_destructor(); // Assignment operators methods test_assignment_operator(); // Reference operator methods test_reference_operator(); // Arithmetic operators test_sum_operator(); test_rest_operator(); test_multiplication_operator(); test_division_operator(); // Arithmetic and assignment operators test_sum_assignment_operator(); test_rest_assignment_operator(); test_multiplication_assignment_operator(); test_division_assignment_operator(); // Equality and relational operators test_equal_to_operator(); test_not_equal_to_operator(); test_greater_than_operator(); test_less_than_operator(); test_greater_than_or_equal_to_operator(); test_less_than_or_equal_to_operator(); // Output operators test_output_operator(); // Get methods test_get_rows_number(); test_get_columns_number(); test_arrange_row(); test_arrange_column(); test_arrange_submatrix(); // Set methods test_set(); test_set_rows_number(); test_set_columns_number(); test_set_row(); test_set_column(); // Diagonal methods test_get_diagonal(); test_set_diagonal(); test_sum_diagonal(); // Resize methods test_append_row(); test_append_column(); test_insert_row(); test_insert_column(); test_subtract_row(); test_subtract_column(); test_sort_less_rows(); test_sort_greater_rows(); // Initialization methods test_initialize(); test_randomize_uniform(); test_randomize_normal(); test_set_to_identity(); // Mathematical methods test_calculate_sum(); test_calculate_rows_sum(); test_dot_vector(); test_dot_matrix(); test_calculate_eigenvalues(); test_calculate_eigenvectors(); test_direct(); test_calculate_minimum_maximum(); test_calculate_mean_standard_deviation(); test_calculate_statistics(); test_calculate_histogram(); test_calculate_covariance_matrix(); test_calculate_minimal_indices(); test_calculate_maximal_indices(); test_calculate_minimal_maximal_indices(); test_calculate_sum_squared_error(); test_calculate_mean_squared_error(); test_calculate_root_mean_squared_error(); test_calculate_determinant(); test_calculate_transpose(); test_calculate_cofactor(); test_calculate_inverse(); test_is_symmetric(); test_is_antisymmetric(); // Scaling methods test_scale_mean_standard_deviation(); test_scale_rows_mean_standard_deviation(); test_scale_columns_mean_standard_deviation(); test_scale_rows_columns_mean_standard_deviation(); test_scale_minimum_maximum(); test_scale_rows_minimum_maximum(); test_scale_columns_minimum_maximum(); test_scale_rows_columns_minimum_maximum(); // Unscaling methods test_unscale_mean_standard_deviation(); test_unscale_rows_mean_standard_deviation(); test_unscale_columns_mean_standard_deviation(); test_unscale_rows_columns_mean_standard_deviation(); test_unscale_minimum_maximum(); test_unscale_rows_minimum_maximum(); test_unscale_columns_minimum_maximum(); test_unscale_rows_columns_minimum_maximum(); test_convert_angular_variables_degrees(); test_convert_angular_variables_radians(); // Serialization methods test_print(); test_load(); test_save(); test_parse(); message += "End of matrix test case.\n"; }
int main (int argc, char **argv) { ServiceData svc = {0,}; GError *error = NULL; const char *address = NULL; char *guid; #ifdef ENABLE_NLS /* initialize i18n */ bindtextdomain (GETTEXT_PACKAGE, GNOMELOCALEDIR); bind_textdomain_codeset (GETTEXT_PACKAGE, "UTF-8"); textdomain (GETTEXT_PACKAGE); #endif g_type_init (); gst_init (NULL, NULL); g_set_prgname ("rhythmbox-metadata"); if (argv[1] != NULL && strcmp(argv[1], "--debug") == 0) { argv++; rb_debug_init (TRUE); } else if (argv[1] != NULL && strcmp (argv[1], "--debug-match") == 0) { rb_debug_init_match (argv[2]); argv += 2; } else { rb_debug_init (FALSE); } /* bug report modes */ if (argv[1] != NULL && strcmp(argv[1], "--load") == 0) { return test_load (argv[2]); } if (argv[1] != NULL && strcmp(argv[1], "--saveable-types") == 0) { return test_saveable_types (); } if (argv[1] != NULL && strcmp (argv[1], "--external") == 0) { argv++; svc.external = TRUE; } if (argv[1] == NULL) { address = "unix:tmpdir=/tmp"; } else { address = argv[1]; } rb_debug ("initializing metadata service; pid = %d; address = %s", getpid (), address); svc.metadata = rb_metadata_new (); svc.loop = g_main_loop_new (NULL, TRUE); /* create the server */ guid = g_dbus_generate_guid (); svc.server = g_dbus_server_new_sync (address, G_DBUS_SERVER_FLAGS_NONE, guid, NULL, NULL, &error); g_free (guid); if (error != NULL) { g_warning ("D-Bus server init failed: %s", error->message); return -1; } /* set up interface info */ svc.node_info = g_dbus_node_info_new_for_xml (rb_metadata_iface_xml, &error); if (error != NULL) { g_warning ("D-Bus server init failed: %s", error->message); return -1; } g_signal_connect (svc.server, "new-connection", G_CALLBACK (new_connection_cb), &svc); g_dbus_server_start (svc.server); /* write the server address back to the parent process */ { const char *addr; addr = g_dbus_server_get_client_address (svc.server); rb_debug ("D-BUS server listening on address %s", addr); printf ("%s\n", addr); fflush (stdout); } /* run main loop until we get bored */ if (!svc.external) g_timeout_add_seconds (ATTENTION_SPAN / 2, (GSourceFunc) electromagnetic_shotgun, &svc); g_main_loop_run (svc.loop); if (svc.connection) { g_dbus_connection_close_sync (svc.connection, NULL, NULL); g_object_unref (svc.connection); } g_object_unref (svc.metadata); g_main_loop_unref (svc.loop); g_dbus_server_stop (svc.server); g_object_unref (svc.server); gst_deinit (); return 0; }
int main() { test_load(); return 0; }
int main(int argc, char* argv[]) { // Parse test command line arguments, perform early // initializations. const char *name, *mode; int n, nt, sx, sy, ss, rank, szcomm; #ifdef CUDA struct cudaDeviceProp props; #endif test_parse(argc, argv, &name, &mode, &n, &nt, &sx, &sy, &ss, &rank, &szcomm #ifdef CUDA , &props #endif ); #ifdef CUDA int cpu = !strcmp(mode, "CPU"); int gpu = !strcmp(mode, "GPU"); #else int cpu = 1; int gpu = 0; #endif // Create test configuration. struct test_config_t* t = test_init( name, mode, n, nt, sx, sy, ss, rank, szcomm, xmin, ymin, zmin, xmax, ymax, zmax, bx, by, bs, ex, ey, es #ifdef CUDA , &props #endif ); // Create another test configuration to check correctness. struct test_config_t* t_check = NULL; #ifdef MPI if (t->rank == MPI_ROOT_NODE) #endif { t_check = test_init( name, mode, n, nt, 1, 1, 1, 0, 1, xmin, ymin, zmin, xmax, ymax, zmax, bx, by, bs, ex, ey, es #ifdef CUDA , &props #endif ); } // Generate the initial data disrtibution and load it // onto compute nodes. integer* array = (integer*)malloc(t->cpu.parent->grid->extsize * sizeof(integer)); genirand(t->cpu.parent->grid->extsize, array); test_load(t, n, sx, sy, ss, sizeof(integer), (char*)array); #ifdef MPI if (t->rank == MPI_ROOT_NODE) #endif { size_t nxysb = n * n * n * sizeof(integer); // Copy the data array. memcpy(t_check->cpu.arrays[0], array, nxysb); // Duplicate initial distribution to the second level array. memcpy(t_check->cpu.arrays[1], t_check->cpu.arrays[0], nxysb); } free(array); #ifdef VERBOSE printf("step 0\n"); printf("step 1\n"); #endif // The time iterations loop, CPU and GPU versions. for (int it = 2; it < t->nt; it++) { // Run one iteration of the stencil, measuring its time. // In case of MPI, the time of iteration is measured together // with the time of data sync. struct timespec start, stop; #ifdef MPI if (t->rank == MPI_ROOT_NODE) #endif { stenfw_get_time(&start); } #ifdef MPI struct grid_domain_t* subdomains = t->cpu.subdomains; int nsubdomains = t->cpu.nsubdomains; // Copy the current iteration data into boundary slices // and compute stencil in them. // Boundary slices themselves are subdomains with respect // to each MPI decomposition domains. { // Set subdomain data copying callbacks: // use simple memcpy in this case. for (int i = 0; i < nsubdomains; i++) { struct grid_domain_t* sub = subdomains + i; sub->scatter_memcpy = &grid_subcpy; sub->gather_memcpy = &grid_subcpy; } // Scatter domain edges for separate computation. grid_scatter(subdomains, &t->cpu, 0, LAYOUT_MODE_CUSTOM); // Process edges subdomains. for (int i = 0; i < nsubdomains; i++) { struct grid_domain_t* sub = subdomains + i; int nx = sub->grid[0].bx + sub->grid[0].nx + sub->grid[0].ex; int ny = sub->grid[0].by + sub->grid[0].ny + sub->grid[0].ey; int ns = sub->grid[0].bs + sub->grid[0].ns + sub->grid[0].es; isum13pt_cpu(nx, ny, ns, (integer(*)[ny][nx])sub->arrays[0], (integer(*)[ny][nx])sub->arrays[1], (integer(*)[ny][nx])sub->arrays[2]); } } // Start sharing boundary slices between linked subdomains. MPI_Request* reqs = (MPI_Request*)malloc(sizeof(MPI_Request) * 2 * nsubdomains); for (int i = 0; i < nsubdomains; i++) { struct grid_domain_t* subdomain = subdomains + i; struct grid_domain_t* neighbor = *(subdomain->links.dense[0]); assert(neighbor->grid[1].extsize == subdomain->grid[0].extsize); int szelem = sizeof(integer); size_t dnx = neighbor->grid[1].nx * szelem; size_t dny = neighbor->grid[1].ny; size_t dns = neighbor->grid[1].ns; size_t snx = subdomain->grid[0].nx * szelem; size_t sbx = subdomain->grid[0].bx * szelem; size_t sex = subdomain->grid[0].ex * szelem; size_t sny = subdomain->grid[0].ny, sns = subdomain->grid[0].ns; size_t sby = subdomain->grid[0].by, sbs = subdomain->grid[0].bs; size_t sey = subdomain->grid[0].ey, ses = subdomain->grid[0].es; size_t soffset = sbx + (sbx + snx + sex) * (sby + sbs * (sby + sny + sey)); struct grid_domain_t obuf; memset(&obuf, 0, sizeof(struct grid_domain_t)); obuf.arrays = subdomain->arrays + 1; obuf.narrays = 1; obuf.offset = 0; obuf.grid[0].nx = dnx; obuf.grid[0].ny = dny; obuf.grid[0].ns = dns; obuf.grid->size = dnx * dny * dns; struct grid_domain_t scpy = *subdomain; scpy.arrays = subdomain->arrays + 2; scpy.narrays = 1; scpy.offset = soffset; scpy.grid[0].nx = sbx + snx + sex; scpy.grid[0].ny = sby + sny + sey; scpy.grid[0].ns = sbs + sns + ses; // Copy data to the temporary buffer. grid_subcpy(dnx, dny, dns, &obuf, &scpy); // Exchange temporary buffers with the subdomain neighbour. int subdomain_rank = grid_rank1d(subdomain->parent->parent, subdomain->parent->grid); int neighbor_rank = grid_rank1d(neighbor->parent->parent, neighbor->parent->grid); MPI_SAFE_CALL(MPI_Isend(subdomain->arrays[1], obuf.grid->size, MPI_BYTE, neighbor_rank, 0, MPI_COMM_WORLD, &reqs[2 * i])); MPI_SAFE_CALL(MPI_Irecv(subdomain->arrays[0], obuf.grid->size, MPI_BYTE, neighbor_rank, 0, MPI_COMM_WORLD, &reqs[2 * i + 1])); #ifdef VERBOSE printf("sharing: send %d->%d\n", subdomain_rank, neighbor_rank); printf("sharing: recv %d->%d\n", neighbor_rank, subdomain_rank); #endif } #endif // MPI // Compute inner grid points of the subdomain. int nx = t->cpu.grid->bx + t->cpu.grid->nx + t->cpu.grid->ex; int ny = t->cpu.grid->by + t->cpu.grid->ny + t->cpu.grid->ey; int ns = t->cpu.grid->bs + t->cpu.grid->ns + t->cpu.grid->es; if (cpu) { isum13pt_cpu(nx, ny, ns, (integer(*)[ny][nx])t->cpu.arrays[0], (integer(*)[ny][nx])t->cpu.arrays[1], (integer(*)[ny][nx])t->cpu.arrays[2]); } #ifdef CUDA if (gpu) { isum13pt_gpu(nx, ny, ns, (integer*)t->gpu.arrays[0], (integer*)t->gpu.arrays[1], (integer*)t->gpu.arrays[2]); #ifdef VISUALIZE #ifndef CUDA_MAPPED // If GPU is not using mapped host memory, then need to fetch // the current iteration solution explicitly. // TODO: in case of MPI/CUDA/!MAPPED this copy must go AFTER // boundaries gathering. CUDA_SAFE_CALL(cudaMemcpy(t->cpu.arrays[2], t->gpu.arrays[2], t->gpu.grid->extsize * sizeof(real), cudaMemcpyDeviceToHost)); #endif // CUDA_MAPPED #endif } #endif // CUDA #ifdef MPI // Wait for boundaries sharing completion. MPI_Status* statuses = (MPI_Status*)malloc(2 * nsubdomains * sizeof(MPI_Status)); MPI_SAFE_CALL(MPI_Waitall(2 * nsubdomains, reqs, statuses)); for (int i = 0; i < 2 * nsubdomains; i++) MPI_SAFE_CALL(statuses[i].MPI_ERROR); free(statuses); free(reqs); for (int i = 0; i < nsubdomains; i++) { struct grid_domain_t* subdomain = subdomains + i; int szelem = sizeof(integer); size_t dnx = subdomain->grid[1].nx * szelem; size_t dbx = subdomain->grid[1].bx * szelem; size_t dex = subdomain->grid[1].ex * szelem; size_t dny = subdomain->grid[1].ny, dns = subdomain->grid[1].ns; size_t dby = subdomain->grid[1].by, dbs = subdomain->grid[1].bs; size_t dey = subdomain->grid[1].ey, des = subdomain->grid[1].es; size_t doffset = dbx + (dbx + dnx + dex) * (dby + dbs * (dby + dny + dey)); struct grid_domain_t dcpy = *subdomain; dcpy.arrays = subdomain->arrays + 2; dcpy.narrays = 1; dcpy.offset = doffset; dcpy.grid[0].nx = dbx + dnx + dex; dcpy.grid[0].ny = dby + dny + dey; dcpy.grid[0].ns = dbs + dns + des; struct grid_domain_t ibuf; memset(&ibuf, 0, sizeof(struct grid_domain_t)); ibuf.arrays = subdomain->arrays; ibuf.narrays = 1; ibuf.offset = 0; ibuf.grid[0].nx = dnx; ibuf.grid[0].ny = dny; ibuf.grid[0].ns = dns; // Copy data to temporary buffer. grid_subcpy(dnx, dny, dns, &dcpy, &ibuf); // Swap pointers to make the last iteration in the bottom. char* w = subdomain->arrays[0]; subdomain->arrays[0] = subdomain->arrays[2]; subdomain->arrays[2] = w; } // Gather bounradies on for the next time step. Insert the // separately computed boundaries back into the sudomains // for the next time step. struct grid_domain_t target = t->cpu; target.narrays = 1; target.arrays = t->cpu.arrays + 2; grid_gather(&target, subdomains, 1, LAYOUT_MODE_CUSTOM); if (t->rank != MPI_ROOT_NODE) { #ifdef VERBOSE printf("step %d\n", it); #endif } else #endif // MPI { stenfw_get_time(&stop); printf("step %d time = ", it); stenfw_print_time_diff(start, stop); printf(" sec\n"); } #ifdef MPI if (t->rank == MPI_ROOT_NODE) #endif { // Compute inner grid points of the control solution subdomain. int nx = t_check->cpu.grid->bx + t_check->cpu.grid->nx + t_check->cpu.grid->ex; int ny = t_check->cpu.grid->by + t_check->cpu.grid->ny + t_check->cpu.grid->ey; int ns = t_check->cpu.grid->bs + t_check->cpu.grid->ns + t_check->cpu.grid->es; isum13pt_cpu(nx, ny, ns, (integer(*)[ny][nx])t_check->cpu.arrays[0], (integer(*)[ny][nx])t_check->cpu.arrays[1], (integer(*)[ny][nx])t_check->cpu.arrays[2]); } // Print the stats of difference between the solution and // the control solution. test_write_imaxabsdiff(t, t_check, 2, it); // Swap pointers to rewrite the oldest iteration with // the next one. char* w = t->cpu.arrays[0]; t->cpu.arrays[0] = t->cpu.arrays[1]; t->cpu.arrays[1] = t->cpu.arrays[2]; t->cpu.arrays[2] = w; #ifdef CUDA if (gpu) { // Also swap the corresponding GPU arrays pointers. w = t->gpu.arrays[0]; t->gpu.arrays[0] = t->gpu.arrays[1]; t->gpu.arrays[1] = t->gpu.arrays[2]; t->gpu.arrays[2] = w; } #endif #ifdef MPI if (t->rank == MPI_ROOT_NODE) #endif { // Swap pointers to rewrite the oldest control solution // iteration with the next one. char* w = t_check->cpu.arrays[0]; t_check->cpu.arrays[0] = t_check->cpu.arrays[1]; t_check->cpu.arrays[1] = t_check->cpu.arrays[2]; t_check->cpu.arrays[2] = w; } } // Dispose the test configurations. #ifdef MPI if (t->rank == MPI_ROOT_NODE) #endif { test_dispose(t_check); } test_dispose(t); return 0; }
void VectorTest::run_test_case(void) { message += "Running vector test case...\n"; // Constructor and destructor methods test_constructor(); test_destructor(); // Arithmetic operators test_sum_operator(); test_rest_operator(); test_multiplication_operator(); test_division_operator(); // Operation and assignment operators test_sum_assignment_operator(); test_rest_assignment_operator(); test_multiplication_assignment_operator(); test_division_assignment_operator(); // Equality and relational operators test_equal_to_operator(); test_not_equal_to_operator(); test_greater_than_operator(); test_greater_than_or_equal_to_operator(); test_less_than_operator(); test_less_than_or_equal_to_operator(); // Output operator test_output_operator(); // Get methods test_get_display(); // Set methods test_set(); test_set_display(); // Resize methods test_resize(); test_tuck_in(); test_take_out(); test_remove_element(); test_get_assembly(); // Initialization methods test_initialize(); test_initialize_sequential(); test_randomize_uniform(); test_randomize_normal(); // Checking methods test_contains(); test_is_in(); test_is_constant(); test_is_crescent(); test_is_decrescent(); // Mathematical methods test_dot_vector(); test_dot_matrix(); test_calculate_sum(); test_calculate_partial_sum(); test_calculate_product(); test_calculate_mean(); test_calculate_standard_deviation(); test_calculate_covariance(); test_calculate_mean_standard_deviation(); test_calculate_minimum(); test_calculate_maximum(); test_calculate_minimum_maximum(); test_calculate_minimum_missing_values(); test_calculate_maximum_missing_values(); test_calculate_minimum_maximum_missing_values(); test_calculate_explained_variance(); test_calculate_histogram(); test_calculate_bin(); test_calculate_frequency(); test_calculate_total_frequencies(); test_calculate_minimal_index(); test_calculate_maximal_index(); test_calculate_minimal_indices(); test_calculate_maximal_indices(); test_calculate_minimal_maximal_index(); test_calculate_cumulative_index(); test_calculate_closest_index(); test_calculate_norm(); test_calculate_normalized(); test_calculate_sum_squared_error(); test_calculate_mean_squared_error(); test_calculate_root_mean_squared_error(); test_apply_absolute_value(); test_calculate_lower_bounded(); test_calculate_upper_bounded(); test_calculate_lower_upper_bounded(); test_apply_lower_bound(); test_apply_upper_bound(); test_apply_lower_upper_bounds(); test_calculate_less_rank(); test_calculate_greater_rank(); test_calculate_linear_correlation(); test_calculate_linear_correlation_missing_values(); test_calculate_linear_regression_parameters(); // Scaling and unscaling test_scale_minimum_maximum(); test_scale_mean_standard_deviation(); // Parsing methods test_parse(); // Serialization methods test_save(); test_load(); message += "End vector test case\n"; }
int main (int argc, char **argv) { ServiceData svc = {0,}; DBusError dbus_error = {0,}; const char *address = NULL; #ifdef ENABLE_NLS /* initialize i18n */ bindtextdomain (GETTEXT_PACKAGE, GNOMELOCALEDIR); bind_textdomain_codeset (GETTEXT_PACKAGE, "UTF-8"); textdomain (GETTEXT_PACKAGE); #endif g_type_init (); gst_init (NULL, NULL); g_set_prgname ("rhythmbox-metadata"); if (argv[1] != NULL && strcmp(argv[1], "--debug") == 0) { argv++; rb_debug_init (TRUE); } else if (argv[1] != NULL && strcmp (argv[1], "--debug-match") == 0) { rb_debug_init_match (argv[2]); argv += 2; } else { rb_debug_init (FALSE); } /* bug report modes */ if (argv[1] != NULL && strcmp(argv[1], "--load") == 0) { return test_load (argv[2]); } if (argv[1] != NULL && strcmp(argv[1], "--saveable-types") == 0) { return test_saveable_types (); } if (argv[1] != NULL && strcmp (argv[1], "--external") == 0) { argv++; svc.external = TRUE; } if (argv[1] == NULL) { address = "unix:tmpdir=/tmp"; } else { address = argv[1]; } rb_debug ("initializing metadata service; pid = %d; address = %s", getpid (), address); svc.metadata = rb_metadata_new (); /* set up D-BUS server */ svc.server = dbus_server_listen (address, &dbus_error); if (!svc.server) { rb_debug ("D-BUS server init failed: %s", dbus_error.message); return -1; } dbus_server_set_new_connection_function (svc.server, _new_connection, (gpointer) &svc, NULL); /* write the server address back to the parent process */ { char *addr; addr = dbus_server_get_address (svc.server); rb_debug ("D-BUS server listening on address %s", addr); printf ("%s\n", addr); fflush (stdout); free (addr); } /* run main loop until we get bored */ svc.loop = g_main_loop_new (NULL, TRUE); dbus_server_setup_with_g_main (svc.server, g_main_loop_get_context (svc.loop)); if (!svc.external) g_timeout_add_seconds (ATTENTION_SPAN / 2, (GSourceFunc) electromagnetic_shotgun, &svc); g_main_loop_run (svc.loop); if (svc.connection) { dbus_connection_close (svc.connection); dbus_connection_unref (svc.connection); } g_object_unref (svc.metadata); g_main_loop_unref (svc.loop); dbus_server_disconnect (svc.server); dbus_server_unref (svc.server); gst_deinit (); return 0; }