/** * Put out requests to have FFT operations done, and measure how * long it takes to get the results back. */ int main(int argc, char *argv[]) { struct tuple *s, *t, *u; int i, j, iters = 0; int r1[PARALLEL], r2[PARALLEL], r3[PARALLEL]; double x[N], y[N], mult; struct timeval T0, T1; int rndsock; struct context ctx; if (get_server_portnumber(&ctx)) { if (argc < 3) { /* help message */ fprintf(stderr, "Usage: %s <server> <portnumber>\n", argv[0]); exit(1); } strcpy(ctx.peername, argv[1]); ctx.portnumber = atoi(argv[2]); } rndsock = open("/dev/urandom", O_RDONLY); mult = 1.0 / pow(2.0, 31); for (i = 0; i < N; i++) { x[i] = mult * random_int(); y[i] = mult * random_int(); } s = make_tuple("siiis#s#", "fft", 0, 0, 0, x, N * sizeof(double), y, N * sizeof(double)); t = make_tuple("siii??", "fft done", 0, 0, 0); gettimeofday(&T0, NULL); while (1) { for (j = 0; j < PARALLEL; j++) { r1[j] = random_int(); r2[j] = random_int(); r3[j] = random_int(); s->elements[1].data.i = r1[j]; s->elements[2].data.i = r2[j]; s->elements[3].data.i = r3[j]; if (put_tuple(s, &ctx)) { perror("put_tuple failed"); exit(1); } } for (j = 0; j < PARALLEL; j++) { t->elements[1].data.i = r1[j]; t->elements[2].data.i = r2[j]; t->elements[3].data.i = r3[j]; u = get_tuple(t, &ctx); if (u == NULL) { perror("get_tuple failed"); exit(1); } } gettimeofday(&T1, NULL); iters += PARALLEL; printf("%f\n", TIMEVAL_DIFF(T0, T1) / iters); } close(rndsock); destroy_tuple(s); destroy_tuple(t); destroy_tuple(u); return 0; }
/* ---------- * cleanupThread_main * * Periodically calls the stored procedure to remove old events and log data and * vacuums those tables. * ---------- */ void * cleanupThread_main( /* @unused@ */ void *dummy) { SlonConn *conn; SlonDString query_baseclean; SlonDString query2; SlonDString query_pertbl; PGconn *dbconn; PGresult *res; PGresult *res2; struct timeval tv_start; struct timeval tv_end; int t; int vac_count = 0; int vac_enable = SLON_VACUUM_FREQUENCY; char *vacuum_action; int ntuples; slon_log(SLON_CONFIG, "cleanupThread: thread starts\n"); /* * Want the vacuum time bias to be between 0 and 100 seconds, hence * between 0 and 100000 */ if (vac_bias == 0) { vac_bias = rand() % (SLON_CLEANUP_SLEEP * 166); } slon_log(SLON_CONFIG, "cleanupThread: bias = %d\n", vac_bias); /* * Connect to the local database */ if ((conn = slon_connectdb(rtcfg_conninfo, "local_cleanup")) == NULL) { #ifndef WIN32 (void) kill(getpid(), SIGTERM); pthread_exit(NULL); #else exit(0); #endif /* slon_retry(); */ } dbconn = conn->dbconn; monitor_state("local_cleanup", 0, conn->conn_pid, "thread main loop", 0, "n/a"); /* * Build the query string for calling the cleanupEvent() stored procedure */ dstring_init(&query_baseclean); slon_mkquery(&query_baseclean, "begin;" "lock table %s.sl_config_lock;" "select %s.cleanupEvent('%s'::interval);" "commit;", rtcfg_namespace, rtcfg_namespace, cleanup_interval ); dstring_init(&query2); /* * Loop until shutdown time arrived * * Note the introduction of vac_bias and an up-to-100s random "fuzz"; this * reduces the likelihood that having multiple slons hitting the same * cluster will run into conflicts due to trying to vacuum common tables * * such as pg_listener concurrently */ while (sched_wait_time(conn, SCHED_WAIT_SOCK_READ, SLON_CLEANUP_SLEEP * 1000 + vac_bias + (rand() % (SLON_CLEANUP_SLEEP * 166))) == SCHED_STATUS_OK) { /* * Call the stored procedure cleanupEvent() */ monitor_state("local_cleanup", 0, conn->conn_pid, "cleanupEvent", 0, "n/a"); gettimeofday(&tv_start, NULL); res = PQexec(dbconn, dstring_data(&query_baseclean)); if (PQresultStatus(res) != PGRES_COMMAND_OK) { slon_log(SLON_FATAL, "cleanupThread: \"%s\" - %s", dstring_data(&query_baseclean), PQresultErrorMessage(res)); PQclear(res); slon_retry(); break; } PQclear(res); gettimeofday(&tv_end, NULL); slon_log(SLON_INFO, "cleanupThread: %8.3f seconds for cleanupEvent()\n", TIMEVAL_DIFF(&tv_start, &tv_end)); /* * Detain the usual suspects (vacuum event and log data) */ if (vac_frequency != 0) { vac_enable = vac_frequency; } if (++vac_count >= vac_enable) { unsigned long latest_xid; vac_count = 0; latest_xid = get_earliest_xid(dbconn); vacuum_action = ""; if (earliest_xid == latest_xid) { slon_log(SLON_INFO, "cleanupThread: xid %d still active - analyze instead\n", earliest_xid); } else { if (vac_enable == vac_frequency) { vacuum_action = "vacuum "; } } earliest_xid = latest_xid; /* * Build the query string for vacuuming replication runtime data * and event tables */ gettimeofday(&tv_start, NULL); slon_mkquery(&query2, "select nspname, relname from %s.TablesToVacuum();", rtcfg_namespace); res = PQexec(dbconn, dstring_data(&query2)); /* * for each table... and we should set up the query to return not * only the table name, but also a boolean to support what's in * the SELECT below; that'll nicely simplify this process... */ if (PQresultStatus(res) != PGRES_TUPLES_OK) /* query error */ { slon_log(SLON_ERROR, "cleanupThread: \"%s\" - %s", dstring_data(&query2), PQresultErrorMessage(res)); } ntuples = PQntuples(res); slon_log(SLON_DEBUG1, "cleanupThread: number of tables to clean: %d\n", ntuples); monitor_state("local_cleanup", 0, conn->conn_pid, "vacuumTables", 0, "n/a"); for (t = 0; t < ntuples; t++) { char *tab_nspname = PQgetvalue(res, t, 0); char *tab_relname = PQgetvalue(res, t, 1); ExecStatusType vrc; slon_log(SLON_DEBUG1, "cleanupThread: %s analyze \"%s\".%s;\n", vacuum_action, tab_nspname, tab_relname); dstring_init(&query_pertbl); slon_mkquery(&query_pertbl, "%s analyze \"%s\".%s;", vacuum_action, tab_nspname, tab_relname); res2 = PQexec(dbconn, dstring_data(&query_pertbl)); vrc = PQresultStatus(res2); if (vrc == PGRES_FATAL_ERROR) { slon_log(SLON_ERROR, "cleanupThread: \"%s\" - %s\n", dstring_data(&query_pertbl), PQresultErrorMessage(res2)); /* * slon_retry(); break; */ } else { if (vrc == PGRES_NONFATAL_ERROR) { slon_log(SLON_WARN, "cleanupThread: \"%s\" - %s\n", dstring_data(&query_pertbl), PQresultErrorMessage(res2)); } } PQclear(res2); dstring_reset(&query_pertbl); } gettimeofday(&tv_end, NULL); slon_log(SLON_INFO, "cleanupThread: %8.3f seconds for vacuuming\n", TIMEVAL_DIFF(&tv_start, &tv_end)); /* * Free Resources */ dstring_free(&query_pertbl); PQclear(res); monitor_state("local_cleanup", 0, conn->conn_pid, "thread main loop", 0, "n/a"); } } /* * Free Resources */ dstring_free(&query_baseclean); dstring_free(&query2); /* * Disconnect from the database */ slon_disconnectdb(conn); /* * Terminate this thread */ slon_log(SLON_DEBUG1, "cleanupThread: thread done\n"); pthread_exit(NULL); }
/** * Handle a request for an Actuator * @param context - FCGI context * @param params - Parameters passed */ void Actuator_Handler(FCGIContext * context, char * params) { struct timespec now; clock_gettime(CLOCK_MONOTONIC, &now); double current_time = TIMEVAL_DIFF(now, *Control_GetStartTime()); int id = 0; char * name = ""; char * set = ""; double start_time = 0; double end_time = current_time; char * fmt_str; // key/value pairs FCGIValue values[] = { {"id", &id, FCGI_INT_T}, {"name", &name, FCGI_STRING_T}, {"set", &set, FCGI_STRING_T}, {"start_time", &start_time, FCGI_DOUBLE_T}, {"end_time", &end_time, FCGI_DOUBLE_T}, {"format", &fmt_str, FCGI_STRING_T} }; // enum to avoid the use of magic numbers typedef enum { ID, NAME, SET, START_TIME, END_TIME, FORMAT } ActuatorParams; // Fill values appropriately if (!FCGI_ParseRequest(context, params, values, sizeof(values)/sizeof(FCGIValue))) { // Error occured; FCGI_RejectJSON already called return; } // Get the Actuator identified Actuator * a = NULL; if (FCGI_RECEIVED(values[NAME].flags)) { if (FCGI_RECEIVED(values[ID].flags)) { FCGI_RejectJSON(context, "Can't supply both id and name"); return; } a = Actuator_Identify(name); if (a == NULL) { FCGI_RejectJSON(context, "Unknown actuator name"); return; } } else if (!FCGI_RECEIVED(values[ID].flags)) { FCGI_RejectJSON(context, "No id or name supplied"); return; } else if (id < 0 || id >= g_num_actuators) { FCGI_RejectJSON(context, "Invalid Actuator id"); return; } else { a = &(g_actuators[id]); } DataFormat format = Data_GetFormat(&(values[FORMAT])); if (FCGI_RECEIVED(values[SET].flags)) { ActuatorControl c = {0.0, 0.0, 0.0, 0}; // Need to set default values (since we don't require them all) // sscanf returns the number of fields successfully read... int n = sscanf(set, "%lf,%lf,%lf,%d", &(c.start), &(c.stepwait), &(c.stepsize), &(c.steps)); // Set provided values in order if (n != 4) { // If the user doesn't provide all 4 values, the Actuator will get set *once* using the first of the provided values // (see Actuator_Loop) // Not really a problem if n = 1, but maybe generate a warning for 2 <= n < 4 ? Log(LOGDEBUG, "Only provided %d values (expect %d) for Actuator setting", n, 4); } // SANITY CHECKS if (c.stepwait < 0 || c.steps < 0 || (a->sanity != NULL && !a->sanity(a->user_id, c.start))) { FCGI_RejectJSON(context, "Bad Actuator setting"); return; } Actuator_SetControl(a, &c); } // Begin response Actuator_BeginResponse(context, a, format); if (format == JSON) FCGI_JSONPair("set", set); // Print Data Data_Handler(&(a->data_file), &(values[START_TIME]), &(values[END_TIME]), format, current_time); // Finish response Actuator_EndResponse(context, a, format); }
/** * Handle a request to the sensor module * @param context - The context to work in * @param params - Parameters passed */ void Sensor_Handler(FCGIContext *context, char * params) { struct timespec now; clock_gettime(CLOCK_MONOTONIC, &now); double current_time = TIMEVAL_DIFF(now, *Control_GetStartTime()); int id = 0; const char * name = ""; double start_time = 0; double end_time = current_time; const char * fmt_str; double sample_s = 0; // key/value pairs FCGIValue values[] = { {"id", &id, FCGI_INT_T}, {"name", &name, FCGI_STRING_T}, {"format", &fmt_str, FCGI_STRING_T}, {"start_time", &start_time, FCGI_DOUBLE_T}, {"end_time", &end_time, FCGI_DOUBLE_T}, {"sample_s", &sample_s, FCGI_DOUBLE_T} }; // enum to avoid the use of magic numbers typedef enum { ID, NAME, FORMAT, START_TIME, END_TIME, SAMPLE_S } SensorParams; // Fill values appropriately if (!FCGI_ParseRequest(context, params, values, sizeof(values)/sizeof(FCGIValue))) { // Error occured; FCGI_RejectJSON already called return; } Sensor * s = NULL; if (FCGI_RECEIVED(values[NAME].flags)) { if (FCGI_RECEIVED(values[ID].flags)) { FCGI_RejectJSON(context, "Can't supply both sensor id and name"); return; } s = Sensor_Identify(name); if (s == NULL) { FCGI_RejectJSON(context, "Unknown sensor name"); return; } } else if (!FCGI_RECEIVED(values[ID].flags)) { FCGI_RejectJSON(context, "No sensor id or name supplied"); return; } else if (id < 0 || id >= g_num_sensors) { FCGI_RejectJSON(context, "Invalid sensor id"); return; } else { s = &(g_sensors[id]); } // Adjust sample rate if necessary if (FCGI_RECEIVED(values[SAMPLE_S].flags)) { if (sample_s < 0) { FCGI_RejectJSON(context, "Negative sampling speed!"); return; } DOUBLE_TO_TIMEVAL(sample_s, &(s->sample_time)); } DataFormat format = Data_GetFormat(&(values[FORMAT])); // Begin response Sensor_BeginResponse(context, s, format); // Print Data Data_Handler(&(s->data_file), &(values[START_TIME]), &(values[END_TIME]), format, current_time); // Finish response Sensor_EndResponse(context, s, format); }
static void * run_clip_permutation(void *args) { CLIPSTATS_TREES *trees; JOB_COUNTER *jobcounter; RANDGEN_STATE randgen; WORKER *worker; gzFile fp=NULL; randgen_init(&randgen); worker = (WORKER *)args; jobcounter = worker->jobcounter; fp = gzopen(worker->readpool_input_path, "r"); if (fp == NULL) { fprintf(stderr, "Failed to open input file %s\n", worker->readpool_input_path); return NULL; } trees = clipstatstrees_new(); #define BEGIN_JOBCOUNTER_EXCLUSIVE pthread_mutex_lock(&jobcounter->lock); #define END_JOBCOUNTER_EXCLUSIVE pthread_mutex_unlock(&jobcounter->lock); #define LOCK_AND_UPDATE_STATUS(worker, newstatus) do { \ pthread_mutex_lock(&(worker)->jobcounter->lock); \ (worker)->status = newstatus; \ update_progression((worker)); \ pthread_mutex_unlock(&(worker)->jobcounter->lock); \ } while (0) for (;;) { READ_QUEUE_SET *read_queue_set=NULL; struct timeval tv; /* Increase the job counter, exit the loop if all works done */ BEGIN_JOBCOUNTER_EXCLUSIVE if (jobcounter->queued >= jobcounter->total) { END_JOBCOUNTER_EXCLUSIVE break; } jobcounter->queued++; gettimeofday(&worker->last_started, NULL); worker->status = WORKER_STATUS_READ_QUEUE_SHUFFLING; update_progression(worker); END_JOBCOUNTER_EXCLUSIVE /* Generate shuffled read queue from error profile */ read_queue_set = generate_read_queue_set(&randgen, worker->error_profile); if (read_queue_set == NULL) { LOCK_AND_UPDATE_STATUS(worker, WORKER_STATUS_ERROR); fclose(fp); return NULL; } /* Run simulated sequencing and get profiles */ LOCK_AND_UPDATE_STATUS(worker, WORKER_STATUS_SIMULATING); gzrewind(fp); simulate_sequencing(fp, read_queue_set, trees); free_read_queue_set(read_queue_set); /* Increase the job counter for jobs done */ BEGIN_JOBCOUNTER_EXCLUSIVE jobcounter->done++; worker->status = WORKER_STATUS_NOT_RUNNING; gettimeofday(&tv, NULL); worker->last_consumed = TIMEVAL_DIFF(worker->last_started, tv); update_progression(worker); END_JOBCOUNTER_EXCLUSIVE }
/* Assume the job counter mutex is locked inside this function */ static void update_progression(WORKER *worker) { JOB_COUNTER *jc; double total_job_time; int i, ran_threads; jc = worker->jobcounter; ran_threads = 0; total_job_time = 0.; if (worker->show_progression) printf("\r%5.1f%% done [", (100. * jc->done) / jc->total); for (i = 0; i < jc->nthreads; i++) { if (worker->show_progression) printf("%c", worker->workers[i].status); if (worker->workers[i].last_consumed >= 0.) { total_job_time += worker->workers[i].last_consumed; ran_threads++; } } if (worker->show_progression) printf("]"); if (ran_threads >= 1) { double mean_job_time, eta_waiting_jobs, eta_running_jobs, eta; struct timeval tv; gettimeofday(&tv, NULL); mean_job_time = total_job_time / ran_threads; eta_waiting_jobs = ceil(((double)(jc->total - jc->queued)) / jc->nthreads) * mean_job_time; eta_running_jobs = 0.; for (i = 0; i < jc->nthreads; i++) if (WORKER_STATUS_IS_RUNNING(worker->workers[i].status)) { double remaining; remaining = mean_job_time - TIMEVAL_DIFF( worker->workers[i].last_started, tv); if (remaining < 0.) remaining = 0.; if (remaining > eta_running_jobs) eta_running_jobs = remaining; } eta = eta_running_jobs + eta_waiting_jobs; if (eta > 0) { time_t eft_timestamp; struct tm *eft_tm; char eft_asc[BUFSIZ]; time(&eft_timestamp); eft_timestamp += eta; eft_tm = localtime(&eft_timestamp); strftime(eft_asc, BUFSIZ-1, "%b %d %H:%M:%S", eft_tm); if (worker->show_progression) printf(" Est.Fin. %s ", eft_asc); } } if (worker->show_progression) fflush(stdout); }