/** * @brief Function for application main entry. */ int main(void) { unsigned role = role_get(); unsigned group = role >> ROLE_GR_SHIFT; rtc_initialize(rtc_dummy_handler); uart_init(); stat_init(group); radio_configure( &g_report_packet, sizeof(g_report_packet), group ? GR1_CH : GR0_CH ); receiver_on(on_packet_received); receive_start(); while (true) { __WFI(); if (g_stat_request) { g_stat_request = 0; stat_dump(); } } }
void wff2obdd_init() { stat_init(); init_stopwatch("compilation time", "propositional Wff to OBDD conversion time: %d s %d.%d ms", "compilation"); init_int_counter("cache hits", "cache hits: %d", "internals"); init_int_counter("nodes", "OBDD nodes: %d", "output"); }
static int malloc_stat(int sz) { if (!dbg_stat_init) stat_init(); return malloc_sizes[DBG_INDEX(sz)] += 1; }
stats * rstatproc_stats_1() { if (! stat_is_init) stat_init(); sincelastreq = 0; return(&stats_all.s1); }
statsswtch * rstatproc_stats_2() { if (! stat_is_init) stat_init(); sincelastreq = 0; return(&stats_all.s2); }
statstime * rstatproc_stats_3() { if (! stat_is_init) stat_init(); sincelastreq = 0; return(&stats_all.s3); }
statstime * rstatproc_stats_3_svc(void *arg, struct svc_req *rqstp) { if (!stat_is_init) stat_init(); sincelastreq = 0; return (&stats_all.s3); }
statsswtch * rstatproc_stats_2_svc(void *arg, struct svc_req *rqstp) { if (!stat_is_init) stat_init(); sincelastreq = 0; return (&stats_all.s2); }
stats * rstatproc_stats_1_svc(void *argp, struct svc_req *rqstp) { if (! stat_is_init) stat_init(); sincelastreq = 0; return(&stats_all.s1); }
u_int * rstatproc_havedisk_3_svc(void *argp, struct svc_req *rqstp) { static u_int have; if (! stat_is_init) stat_init(); sincelastreq = 0; have = haveadisk(); return(&have); }
u_int * rstatproc_havedisk_3() { static u_int have; if (! stat_is_init) stat_init(); sincelastreq = 0; have = havedisk(); return(&have); }
int gpuvm_init(unsigned ndevs, void **devs, int flags) { // check arguments if(ndevs == 0) { fprintf(stderr, "gpuvm_init: zero devices not allowed\n"); return GPUVM_EARG; } if(flags & ~(GPUVM_API | GPUVM_STAT | GPUVM_WRITER_SIG_BLOCK | GPUVM_UNLINK_NO_SYNC_BACK) || !(flags & GPUVM_API)) { fprintf(stderr, "gpuvm_init: invalid flags\n"); return GPUVM_EARG; } // check state if(ndevs_g) { fprintf(stderr, "gpuvm_init: GPUVM already initialized\n"); return GPUVM_ETWICE; } ndevs_g = ndevs; // initialize auxiliary structures int err = 0; err = salloc_init(); if(err) return err; // initialize devices devs_g = (void**)smalloc(ndevs * sizeof(void*)); if(!devs_g) return GPUVM_ESALLOC; if(flags & GPUVM_OPENCL) { if(!devs) { fprintf(stderr, "gpuvm_init: null pointer to devices not allowed\n"); return GPUVM_ENULL; } memcpy(devs_g, devs, ndevs * sizeof(void*)); } else if(flags & GPUVM_CUDA) { // ignore devs, just zero out devs_g memset(devs_g, 0, ndevs * sizeof(void*)); } // continue with initialization (err = sync_init()) || (err = devapi_init(flags)) || (err = handler_init()) || (err = stat_init(flags)) || (err = tsem_init()) || (err = wthreads_init()); if(err) return err; return 0; } // gpuvm_init
/** wrapper function to initialize the resolver at startup */ int resolv_init(void) { int res = -1; _resolv_init(); reinit_proto_prefs(NULL,NULL); /* init counter API only at startup * This function must be called before DNS cache init method (if available) */ res = stat_init(); return res; }
/* wrapper function to initialize the resolver at startup */ int resolv_init(void) { int res = -1; _resolv_init(); #ifdef USE_NAPTR init_naptr_proto_prefs(); #endif /* init counter API only at startup * This function must be called before DNS cache init method (if available) */ res = stat_init(); return res; }
void gotcha_init(diagnostic_problem problem, const_tv_dnf_hierarchy input, const_node root) { stat_init(); gotcha_set_input(problem, input, root); init_int_counter("inconsistent", "inconsistent states: %d", "search tree"); init_int_counter("forward inconsistent", "unit check inconsistent states: %d", "search tree"); init_int_counter("pushed", "nodes pushed: %d", "opened queue"); init_int_counter("max", "maximum size: %d nodes", "opened queue"); init_stopwatch("search time", "search time: %d s %d.%d ms", "dynamics"); init_stopwatch("preprocessing time", "preprocessing time: %d s %d.%d ms", "dynamics"); init_stopwatch("observation filtering time", "observation filtering time: %d s %d.%d ms", "dynamics"); }
int main(int argc, char **argv) { Stat_T data; stat_init(&data); while (--argc) { double ftmp; ftmp = atof(*(++argv)); stat_add(ftmp, &data); } puts("\nBefore \"Olympic\" filtering\n"); printf("Minimum datum = %g\n", stat_min(&data)); printf("Maximum datum = %g\n", stat_max(&data)); printf("Number of samples = %d\n", stat_count(&data)); printf("Arithmetic mean = %g\n", stat_mean(&data)); printf("Geometric mean = %g\n", stat_gmean(&data)); printf("Harmonic mean = %g\n", stat_hmean(&data)); printf("Standard deviation (N) = %g\n", stat_stddevP(&data)); printf("Standard deviation (N-1) = %g\n", stat_stddevS(&data)); printf("Variance = %g\n", stat_var(&data)); printf("Population coeff. of var. = %g%%\n", stat_varcoeffP(&data)); printf("Sample coeff. of var. = %g%%\n", stat_varcoeffS(&data)); puts("\nAfter \"Olympic\" filtering\n"); printf("stat_olympic() returned %s\n", stat_olympic(&data) ? "ERROR" : "SUCCESS"); printf("Minimum datum = %g\n", stat_min(&data)); printf("Maximum datum = %g\n", stat_max(&data)); printf("Number of samples = %d\n", stat_count(&data)); printf("Arithmetic mean = %g\n", stat_mean(&data)); printf("Geometric mean = %g\n", stat_gmean(&data)); printf("Harmonic mean = %g\n", stat_hmean(&data)); printf("Standard deviation (N) = %g\n", stat_stddevP(&data)); printf("Standard deviation (N-1) = %g\n", stat_stddevS(&data)); printf("Variance = %g\n", stat_var(&data)); printf("Population coeff. of var. = %g%%\n", stat_varcoeffP(&data)); printf("Sample coeff. of var. = %g%%\n", stat_varcoeffS(&data)); return EXIT_SUCCESS; }
/* * private static native void initNative(); * * We rely on this function rather than lazy initialization because * the lazy approach may have a race if multiple callers try to * init at the same time. */ JNIEXPORT void JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_initNative( JNIEnv *env, jclass clazz) { stat_init(env, clazz); PASS_EXCEPTIONS_GOTO(env, error); nioe_init(env); PASS_EXCEPTIONS_GOTO(env, error); fd_init(env); PASS_EXCEPTIONS_GOTO(env, error); errno_enum_init(env); PASS_EXCEPTIONS_GOTO(env, error); return; error: // these are all idempodent and safe to call even if the // class wasn't initted yet stat_deinit(env); nioe_deinit(env); fd_deinit(env); errno_enum_deinit(env); }
int main(int argc, char *argv[]) { char mode = '\0'; int option = 0; srand(time(NULL)); stat_init(); // Ctrl+C: stop program: signal(SIGINT, stoprun); // Ctrl+Z: quick review of state signal(SIGTSTP, stat_quick_print); while ((option = getopt(argc, argv, "ce:")) != -1) { switch (option) { case 'c': mode = 'c'; break; case 'e': ebn0 = atof(optarg); break; default: print_usage(); exit(1); } } switch (mode) { case 'c': continuous_test(ebn0); break; default: printf("No mode have selected!\n"); print_usage(); exit(0); } return 0; }
CMStat::CMStat(){ stat_init(); }
void stat_init_global() { stat_init(GLOBAL_STATS); stat_init(THREAD_STATS); }
/* -------------------------------- * init_postgres * Initialize POSTGRES. * * The database can be specified by name, using the in_dbname parameter, or by * OID, using the dboid parameter. In the latter case, the actual database * name can be returned to the caller in out_dbname. If out_dbname isn't * NULL, it must point to a buffer of size NAMEDATALEN. * * In bootstrap mode no parameters are used. The autovacuum launcher process * doesn't use any parameters either, because it only goes far enough to be * able to read pg_database; it doesn't connect to any particular database. * In walsender mode only username is used. * * As of PostgreSQL 8.2, we expect init_process() was already called, so we * already have a struct proc struct ... but it's not completely filled in yet. * * Note: * Be very careful with the order of calls in the init_postgres function. * -------------------------------- */ void init_postgres(const char *in_dbname, oid_t dboid, const char *username, char *out_dbname) { bool bootstrap = BOOTSTRAP_MODE(); bool am_superuser; char *fullpath; char dbname[NAMEDATALEN]; elog(DEBUG3, "init_postgres"); /* * Add my struct proc struct to the ProcArray. * * Once I have done this, I am visible to other backends! */ init_proc_phase2(); /* * Initialize my entry in the shared-invalidation manager's array of * per-backend data. * * Sets up current_bid, a unique backend identifier. */ current_bid = INVALID_BKNID; sci_bkn_init(false); if (current_bid > MAX_NR_BACKENDS || current_bid <= 0) elog(FATAL, "bad backend ID: %d", current_bid); /* Now that we have a bid_t, we can participate in ProcSignal */ signal_init(current_bid); /* * bufmgr needs another initialization call too */ init_buffer_pool_bkn(); /* * Initialize local process's access to XLOG. */ if (child) { /* * The postmaster already started the XLOG machinery, but we need to * call init_xlog_access(), if the system isn't in hot-standby mode. * This is handled by calling recovery_in_progres and ignoring the * result. */ (void) recovery_in_progres(); } else { /* * We are either a bootstrap process or a standalone backend. Either * way, start up the XLOG machinery, and register to have it closed * down at exit. */ startup_xlog(); on_shmem_exit(shutdown_xlog, 0); } /* * Initialize the relation cache and the system catalog caches. Note that * no catalog access happens here; we only set up the hashtable structure. * We must do this before starting a transaction because transaction abort * would try to touch these hashtables. */ relcache_init_phase1(); init_catcache_phase1(); init_plan_cache(); /* Initialize portal manager */ start_portal(); /* Initialize stats collection --- must happen before first xact */ if (!bootstrap) stat_init(); /* * Load relcache entries for the shared system catalogs. This must * create at least entries for pg_database and catalogs used for * authentication. */ relcache_init_phase2(); /* * Set up process-exit callback to do pre-shutdown cleanup. This has to * be after we've initialized all the low-level modules like the buffer * manager, because during shutdown this has to run before the low-level * modules start to close down. On the other hand, we want it in place * before we begin our first transaction --- if we fail during the * initialization transaction, as is entirely possible, we need the * AbortTransaction call to clean up. */ on_shmem_exit(ShutdownPostgres, 0); /* The autovacuum launcher is done here */ if (is_avl_proc()) return; /* * Start a new transaction here before first access to db, and get a * snapshot. We don't have a use for the snapshot itself, but we're * interested in the secondary effect that it sets recent_global_xmin. (This * is critical for anything that reads heap pages, because HOT may decide * to prune them even if the process doesn't attempt to modify any * tuples.) */ if (!bootstrap) { /* statement_timestamp must be set for timeouts to work correctly */ set_current_stmt_start(); start_xact_cmd(); (void) get_xact_snap(); } /* * Perform client authentication if necessary, then figure out our * postgres user ID, and see if we are a superuser. * * In standalone mode and in autovacuum worker processes, we use a fixed * ID, otherwise we figure it out from the authenticated user name. */ if (bootstrap || is_avw_proc()) { init_session_uidStandalone(); am_superuser = true; } else if (!child) { init_session_uidStandalone(); am_superuser = true; if (!ThereIsAtLeastOneRole()) ereport(WARNING, ( errcode(E_UNDEFINED_OBJECT), errmsg("no roles are defined in this database system"), errhint("You should immediately run CREATE USER \"%s\" SUPERUSER;.", username))); } else { /* normal multiuser case */ ASSERT(proc_port != NULL); PerformAuthentication(proc_port); init_session_uid(username); am_superuser = superuser(); } /* * If we're trying to shut down, only superusers can connect, and new * replication connections are not allowed. */ if ((!am_superuser || am_walsender) && proc_port != NULL && proc_port->canAcceptConnections == CAC_WAITBACKUP) { if (am_walsender) ereport(FATAL, ( errcode(E_INSUFFICIENT_PRIVILEGE), errmsg("new replication connections are not allowed during database shutdown"))); else ereport(FATAL, ( errcode(E_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to connect during database shutdown"))); } /* * Binary upgrades only allowed super-user connections */ if (is_binary_upgrade && !am_superuser) { ereport(FATAL, ( errcode(E_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to connect in binary upgrade mode"))); } /* * The last few connections slots are reserved for superusers. Although * replication connections currently require superuser privileges, we * don't allow them to consume the reserved slots, which are intended for * interactive use. */ if ((!am_superuser || am_walsender) && reserved_bknds > 0 && !have_nfree_procs(reserved_bknds)) ereport(FATAL, ( errcode(E_TOO_MANY_CONNECTIONS), errmsg("remaining connection slots are reserved for non-replication superuser connections"))); /* * If walsender, we don't want to connect to any particular database. Just * finish the backend startup by processing any options from the startup * packet, and we're done. */ if (am_walsender) { ASSERT(!bootstrap); /* must have authenticated as a replication role */ if (!is_authenticated_user_replication_role()) ereport(FATAL, ( errcode(E_INSUFFICIENT_PRIVILEGE), errmsg("must be replication role to start walsender"))); /* process any options passed in the startup packet */ if (proc_port != NULL) process_startup_options(proc_port, am_superuser); /* Apply post_auth_delay as soon as we've read all options */ if (post_auth_delay > 0) pg_usleep(post_auth_delay * 1000000L); /* initialize client encoding */ init_client_encoding(); /* report this backend in the struct backend_status array */ stat_backend_start(); /* close the transaction we started above */ commit_xact_cmd(); return; } /* * Set up the global variables holding database id and default tablespace. * But note we won't actually try to touch the database just yet. * * We take a shortcut in the bootstrap case, otherwise we have to look up * the db's entry in pg_database. */ if (bootstrap) { current_db_id = TemplateDbOid; current_tbs_id = DEFAULT_TBS_OID; } else if (in_dbname != NULL) { struct heap_tuple *tuple; Form_pg_database dbform; tuple = GetDatabaseTuple(in_dbname); if (!HT_VALID(tuple)) ereport(FATAL, ( errcode(E_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", in_dbname))); dbform = (Form_pg_database) GET_STRUCT(tuple); current_db_id = HEAPTUP_OID(tuple); current_tbs_id = dbform->dattablespace; /* take database name from the caller, just for paranoia */ strlcpy(dbname, in_dbname, sizeof(dbname)); } else { /* caller specified database by OID */ struct heap_tuple *tuple; Form_pg_database dbform; tuple = GetDatabaseTupleByOid(dboid); if (!HT_VALID(tuple)) { ereport(FATAL, ( errcode(E_UNDEFINED_DATABASE), errmsg("database %u does not exist", dboid))); } dbform = (Form_pg_database) GET_STRUCT(tuple); current_db_id = HEAPTUP_OID(tuple); current_tbs_id = dbform->dattablespace; ASSERT(current_db_id == dboid); strlcpy(dbname, NAME_TO_STR(dbform->datname), sizeof(dbname)); /* pass the database name back to the caller */ if (out_dbname) strcpy(out_dbname, dbname); } /* Now we can mark our struct proc entry with the database ID */ /* (We assume this is an atomic store so no lock is needed) */ current_proc->databaseId = current_db_id; /* * Now, take a writer's lock on the database we are trying to connect to. * If there is a concurrently running DROP DATABASE on that database, this * will block us until it finishes (and has committed its update of * pg_database). * * Note that the lock is not held long, only until the end of this startup * transaction. This is OK since we are already advertising our use of * the database in the struct proc array; anyone trying a DROP DATABASE after * this point will see us there. * * Note: use of ROW_EXCL_LOCK here is reasonable because we envision * our session as being a concurrent writer of the database. If we had a * way of declaring a session as being guaranteed-read-only, we could use * ACCESS_SHR_LOCK for such sessions and thereby not conflict against * CREATE DATABASE. */ if (!bootstrap) lock_sobj(DatabaseRelationId, current_db_id, 0, ROW_EXCL_LOCK); /* * Recheck pg_database to make sure the target database hasn't gone away. * If there was a concurrent DROP DATABASE, this ensures we will die * cleanly without creating a mess. */ if (!bootstrap) { struct heap_tuple *tuple; tuple = GetDatabaseTuple(dbname); if (!HT_VALID(tuple) || current_db_id != HEAPTUP_OID(tuple) || current_tbs_id != ((Form_pg_database) GET_STRUCT(tuple))->dattablespace) ereport(FATAL, ( errcode(E_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", dbname), errdetail("It seems to have just been dropped or renamed."))); } /* * Now we should be able to access the database directory safely. Verify * it's there and looks reasonable. */ fullpath = get_db_path(current_db_id, current_tbs_id); if (!bootstrap) { if (access(fullpath, F_OK) == -1) { if (errno == ENOENT) { ereport(FATAL, ( errcode(E_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", dbname), errdetail("The database subdirectory \"%s\" is missing.", fullpath))); } else { ereport(FATAL, ( errcode_file_access(), errmsg("could not access directory \"%s\": %m", fullpath))); } } check_version(fullpath); } set_db_path(fullpath); /* * It's now possible to do real access to the system catalogs. * * Load relcache entries for the system catalogs. This must create at * least the minimum set of "nailed-in" cache entries. */ relcache_init_phase3(); /* set up ACL framework (so CheckMyDatabase can check permissions) */ initialize_acl(); /* * Re-read the pg_database row for our database, check permissions and set * up database-specific GUC settings. We can't do this until all the * database-access infrastructure is up. (Also, it wants to know if the * user is a superuser, so the above stuff has to happen first.) */ if (!bootstrap) CheckMyDatabase(dbname, am_superuser); /* * Now process any command-line switches and any additional GUC variable * settings passed in the startup packet. We couldn't do this before * because we didn't know if client is a superuser. */ if (proc_port != NULL) process_startup_options(proc_port, am_superuser); /* Process pg_db_role_setting options */ process_settings(current_db_id, get_session_uid()); /* Apply post_auth_delay as soon as we've read all options */ if (post_auth_delay > 0) pg_usleep(post_auth_delay * 1000000L); /* * Initialize various default states that can't be set up until we've * selected the active user and gotten the right GUC settings. */ /* set default namespace search path */ init_search_path(); /* initialize client encoding */ init_client_encoding(); /* report this backend in the struct backend_status array */ if (!bootstrap) stat_backend_start(); /* close the transaction we started above */ if (!bootstrap) commit_xact_cmd(); }
int __lxstat64(STAT_ARGS(stat64)) { stat_init(); return real___lxstat64(ver, path, buf); }
int __lxstat(int ver, const char *path, struct stat *buf) { stat_init(); return real___lxstat(ver, path, buf); }
int __fxstat64(FSTAT_ARGS(stat64)) { stat_init(); return real___fxstat64(ver, filedes, buf); }
int __xstat(STAT_ARGS(stat)) { stat_init(); return real___xstat(ver, path, buf); }
void stat_init_thread() { stat_init(THREAD_STATS); }
/***** INITIALIZE ********************************************************/ char* fmmv_initialize(FmmvHandle **fh, struct FmmvOptions *options, struct FmmvStatistics *statistics) { int err; struct FmmvOptions _options; FmmvHandle *FMMV = *fh; if (!options) { _options = fmmvGetDefaultOptions(); options = &_options; } FMMV->statistics.PAPIeventSet = options->PAPIeventSet; stat_init(FMMV); stat_start(FMMV, STAT_TOTAL); stat_start(FMMV, STAT_INITIALIZE); if (!FMMV) goto _err; /* TODO: check options */ FMMV->beta = options->beta; FMMV->pM = options->pM; FMMV->pL = options->pL; FMMV->s_eps = options->s; FMMV->ws = options->ws; FMMV->splitThreshold = options->splitThreshold; FMMV->splitTargetThreshold = options->splitTargetThreshold; FMMV->maxLevel = options->levels; FMMV->directEvalThreshold = options->directEvalThreshold; FMMV->periodicBoundaryConditions = options->periodicBoundaryConditions; FMMV->extrinsicCorrection = options->extrinsicCorrection; FMMV->scale = (_FLOAT_) options->scale; FMMV->useHilbertOrder = options->useHilbertOrder; FMMV->directEvalAccuracy = options->directEvalAccuracy; FMMV->useFarfieldNearfieldThreads = options->useFarfieldNearfieldThreads; FMMV->reducedScheme = options->reducedScheme; FMMV->lambda = 0; /* TODO: handle addtitional options */ /* if (options->x) { // TODO: no global variables!!! for (i=0; i<FMMV->s_eps; i++) { user_defined_x[i] = (_FLOAT_) options->x[i]; user_defined_w[i] = (_FLOAT_) options->w[i]; user_defined_M[i] = (_FLOAT_) options->M[i]; } FMMV->lambda = user_defined_x; FMMV->w = user_defined_w; FMMV->M = user_defined_M; } */ /* TODO: search for best values: */ if (FMMV->splitThreshold==-1) FMMV->splitThreshold = 200; /* TODO: find precision-, dipole_grad-, etc.- dependent better value */ if (FMMV->splitTargetThreshold==-1) FMMV->splitTargetThreshold = 200; /* TODO: find precision-, dipole_grad-, etc.- dependent better value */ if (FMMV->splitThreshold==0) FMMV->splitTargetThreshold = 0; /* non adaptive FMM! */ #if (FMM_PRECISION==0) if (FMMV->maxLevel==-1) FMMV->maxLevel = 22; #elif (FMM_PRECISION==1) if (FMMV->maxLevel==-1) FMMV->maxLevel = 51; #endif if (FMMV->directEvalThreshold==-1) FMMV->directEvalThreshold = 200; /* TODO: find precision-, dipole_grad-, etc.- dependent better value */ FMMV->allocatedMemory = 0; FMMV->maxAllocatedMemory = 0; FMMV->noOfDirectInteractions = 0; init_all(FMMV); stat_start(FMMV, STAT_BUILD_TREE); FMMV->perm = (int *) FMMV_MALLOC(FMMV, FMMV->NParticles*sizeof(int)); if (FMMV->perm==0) goto _err; if (FMMV->targets) { FMMV->permTargets = (int *) FMMV_MALLOC(FMMV, FMMV->NTargets*sizeof(int)); if (FMMV->permTargets==0) goto _err; err = buildTree_ST(FMMV); if (err) goto _err; } else { FMMV->permTargets = FMMV->perm; err = buildTree(FMMV); if (err) goto _err; } stat_stop(FMMV, STAT_BUILD_TREE); if (FMMV->targets) { FMMV->maxTargetLevel = FMMV->maxLevel; for (; FMMV->firstSourceBoxOfLevel[FMMV->maxLevel] == 0; FMMV->maxLevel--) ; for (; FMMV->firstTargetBoxOfLevel[FMMV->maxTargetLevel] == 0; FMMV->maxTargetLevel--) ; genTreeStatistics_ST(FMMV); } else { for (; FMMV->firstSourceBoxOfLevel[FMMV->maxLevel] == 0; FMMV->maxLevel--) ; genTreeStatistics(FMMV); FMMV->statistics.noOfTargets = -1; FMMV->statistics.noOfTargetLevels = -1; FMMV->statistics.noOfTargetBoxes = -1; FMMV->statistics.noOfTargetLeafBoxes = -1; FMMV->statistics.averageNoOfTargetsPerLeafBox = -1; } ida_allocate(FMMV); copy_particles(FMMV); stat_stop(FMMV, STAT_INITIALIZE); if (statistics) { *statistics = FMMV->statistics; statistics->maxAllocatedMemory = FMMV->maxAllocatedMemory; statistics->noOfDirectInteractions = FMMV->noOfDirectInteractions; statistics->PAPIeventSet = FMMV->statistics.PAPIeventSet; } *fh = (void*) FMMV; return 0; _err: return errbuf; }
void kb_init(kb_t * kb, cmd_ln_t *config) { kbcore_t *kbcore; mdef_t *mdef; dict_t *dict; dict2pid_t *d2p; int32 cisencnt; /* STRUCTURE: Initialize the kb structure to zero, just in case */ memset(kb, 0, sizeof(*kb)); kb->kbcore = kbcore_init(config); if (kb->kbcore == NULL) E_FATAL("Initialization of kb failed\n"); kbcore = kb->kbcore; mdef = kbcore_mdef(kbcore); dict = kbcore_dict(kbcore); d2p = kbcore_dict2pid(kbcore); err_set_debug_level(cmd_ln_int32_r(config, "-debug")); /* STRUCTURE INITIALIZATION: Initialize the beam data structure */ if (cmd_ln_exists_r(config, "-ptranskip")) { kb->beam = beam_init(cmd_ln_float64_r(config, "-beam"), cmd_ln_float64_r(config, "-pbeam"), cmd_ln_float64_r(config, "-wbeam"), cmd_ln_float64_r(config, "-wend_beam"), cmd_ln_int32_r(config, "-ptranskip"), mdef_n_ciphone(mdef), kbcore->logmath ); /* REPORT : Report the parameters in the beam data structure */ if (REPORT_KB) beam_report(kb->beam); } /* STRUCTURE INITIALIZATION: Initialize the fast GMM computation data structure */ if (cmd_ln_exists_r(config, "-ci_pbeam")) { kb->fastgmm = fast_gmm_init(cmd_ln_int32_r(config, "-ds"), cmd_ln_int32_r(config, "-cond_ds"), cmd_ln_int32_r(config, "-dist_ds"), cmd_ln_int32_r(config, "-gs4gs"), cmd_ln_int32_r(config, "-svq4svq"), cmd_ln_float64_r(config, "-subvqbeam"), cmd_ln_float64_r(config, "-ci_pbeam"), cmd_ln_float64_r(config, "-tighten_factor"), cmd_ln_int32_r(config, "-maxcdsenpf"), mdef->n_ci_sen, kbcore->logmath); /* REPORT : Report the parameters in the fast_gmm_t data struture */ if (REPORT_KB) fast_gmm_report(kb->fastgmm); } /* STRUCTURE INITIALIZATION: Initialize the phoneme lookahead data structure */ if (cmd_ln_exists_r(config, "-pl_beam")) { kb->pl = pl_init(cmd_ln_int32_r(config, "-pheurtype"), cmd_ln_float64_r(config, "-pl_beam"), mdef_n_ciphone(mdef), kbcore->logmath ); /* REPORT : Report the parameters in the pl_t data struture */ if (REPORT_KB) pl_report(kb->pl); } /* STRUCTURE INITIALIZATION: Initialize the acoustic score data structure */ { int32 pl_window = 1; if (cmd_ln_exists_r(config, "-pl_window")) pl_window = cmd_ln_int32_r(config, "-pl_window"); for (cisencnt = 0; cisencnt == mdef->cd2cisen[cisencnt]; cisencnt++) ; kb->ascr = ascr_init(kbcore_n_mgau(kbcore), kb->kbcore->dict2pid->n_comstate, mdef_n_sseq(mdef), dict2pid_n_comsseq(d2p), pl_window, cisencnt); if (REPORT_KB) ascr_report(kb->ascr); } /* Initialize the front end if -adcin is specified */ if (cmd_ln_exists_r(config, "-adcin") && cmd_ln_boolean_r(config, "-adcin")) { if ((kb->fe = fe_init_auto_r(config)) == NULL) { E_FATAL("fe_init_auto_r() failed\n"); } } /* STRUCTURE INITIALIZATION : The feature vector */ if ((kb->feat = feat_array_alloc(kbcore_fcb(kbcore), S3_MAX_FRAMES)) == NULL) E_FATAL("feat_array_alloc() failed\n"); /* STRUCTURE INITIALIZATION : The statistics for the search */ kb->stat = stat_init(); /* STRUCTURE INITIALIZATION : The adaptation routines of the search */ kb->adapt_am = adapt_am_init(); if (cmd_ln_str_r(config, "-mllr")) { kb_setmllr(cmd_ln_str_r(config, "-mllr"), cmd_ln_str_r(config, "-cb2mllr"), kb); } /* CHECK: make sure when (-cond_ds) is specified, a Gaussian map is also specified */ if (cmd_ln_int32_r(config, "-cond_ds") > 0 && kb->kbcore->gs == NULL) E_FATAL ("Conditional Down Sampling require the use of Gaussian Selection map\n"); /* MEMORY ALLOCATION : Word best score and exit */ /* Open hypseg file if specified */ kb->matchsegfp = kb->matchfp = NULL; kb->matchsegfp = file_open(cmd_ln_str_r(config, "-hypseg")); kb->matchfp = file_open(cmd_ln_str_r(config, "-hyp")); if (cmd_ln_exists_r(config, "-hmmdump")) kb->hmmdumpfp = cmd_ln_int32_r(config, "-hmmdump") ? stderr : NULL; /* STRUCTURE INITIALIZATION : The search data structure, done only after kb is initialized kb is acted as a clipboard. */ if (cmd_ln_exists_r(config, "-op_mode")) { /* -op_mode, if set (i.e. not -1), takes precedence over -mode. */ if (cmd_ln_int32_r(config, "-op_mode") != -1) kb->op_mode = cmd_ln_int32_r(config, "-op_mode"); else kb->op_mode = srch_mode_str_to_index(cmd_ln_str_r(config, "-mode")); E_INFO("SEARCH MODE INDEX %d\n", kb->op_mode); if ((kb->srch = (srch_t *) srch_init(kb, kb->op_mode)) == NULL) { E_FATAL("Search initialization failed. Forced exit\n"); } if (REPORT_KB) { srch_report(kb->srch); } } }
static int show_stats_output(struct imsg *imsg) { struct stats *stats; if (imsg->hdr.type != IMSG_STATS) errx(1, "show_stats_output: bad hdr type (%d)", imsg->hdr.type); if (IMSG_DATA_SIZE(imsg) != sizeof(*stats)) errx(1, "show_stats_output: bad data size"); stats = imsg->data; stat_init(stats->counters, STATS_MAX); stat_print(STATS_CONTROL_SESSION, STAT_COUNT); stat_print(STATS_CONTROL_SESSION, STAT_ACTIVE); stat_print(STATS_CONTROL_SESSION, STAT_MAXACTIVE); stat_print(STATS_MDA_SESSION, STAT_COUNT); stat_print(STATS_MDA_SESSION, STAT_ACTIVE); stat_print(STATS_MDA_SESSION, STAT_MAXACTIVE); stat_print(STATS_MTA_SESSION, STAT_COUNT); stat_print(STATS_MTA_SESSION, STAT_ACTIVE); stat_print(STATS_MTA_SESSION, STAT_MAXACTIVE); stat_print(STATS_LKA_SESSION, STAT_COUNT); stat_print(STATS_LKA_SESSION, STAT_ACTIVE); stat_print(STATS_LKA_SESSION, STAT_MAXACTIVE); stat_print(STATS_LKA_SESSION_MX, STAT_COUNT); stat_print(STATS_LKA_SESSION_HOST, STAT_COUNT); stat_print(STATS_LKA_SESSION_CNAME, STAT_COUNT); stat_print(STATS_LKA_FAILURE, STAT_COUNT); printf("parent.uptime=%lld\n", (long long int) (time(NULL) - stats->parent.start)); stat_print(STATS_QUEUE_LOCAL, STAT_COUNT); stat_print(STATS_QUEUE_REMOTE, STAT_COUNT); stat_print(STATS_SCHEDULER, STAT_COUNT); stat_print(STATS_SCHEDULER, STAT_ACTIVE); stat_print(STATS_SCHEDULER, STAT_MAXACTIVE); stat_print(STATS_SCHEDULER_BOUNCES, STAT_COUNT); stat_print(STATS_SCHEDULER_BOUNCES, STAT_ACTIVE); stat_print(STATS_SCHEDULER_BOUNCES, STAT_MAXACTIVE); stat_print(STATS_RAMQUEUE_HOST, STAT_ACTIVE); stat_print(STATS_RAMQUEUE_BATCH, STAT_ACTIVE); stat_print(STATS_RAMQUEUE_MESSAGE, STAT_ACTIVE); stat_print(STATS_RAMQUEUE_ENVELOPE, STAT_ACTIVE); stat_print(STATS_RAMQUEUE_HOST, STAT_MAXACTIVE); stat_print(STATS_RAMQUEUE_BATCH, STAT_MAXACTIVE); stat_print(STATS_RAMQUEUE_MESSAGE, STAT_MAXACTIVE); stat_print(STATS_RAMQUEUE_ENVELOPE, STAT_MAXACTIVE); printf("smtp.errors.delays=%zd\n", stats->smtp.delays); printf("smtp.errors.linetoolong=%zd\n", stats->smtp.linetoolong); printf("smtp.errors.read_eof=%zd\n", stats->smtp.read_eof); printf("smtp.errors.read_system=%zd\n", stats->smtp.read_error); printf("smtp.errors.read_timeout=%zd\n", stats->smtp.read_timeout); printf("smtp.errors.tempfail=%zd\n", stats->smtp.tempfail); printf("smtp.errors.toofast=%zd\n", stats->smtp.toofast); printf("smtp.errors.write_eof=%zd\n", stats->smtp.write_eof); printf("smtp.errors.write_system=%zd\n", stats->smtp.write_error); printf("smtp.errors.write_timeout=%zd\n", stats->smtp.write_timeout); stat_print(STATS_SMTP_SESSION, STAT_COUNT); stat_print(STATS_SMTP_SESSION_INET4, STAT_COUNT); stat_print(STATS_SMTP_SESSION_INET6, STAT_COUNT); printf("smtp.sessions.aborted=%zd\n", stats->smtp.read_eof + stats->smtp.read_error + stats->smtp.write_eof + stats->smtp.write_error); stat_print(STATS_SMTP_SESSION, STAT_ACTIVE); stat_print(STATS_SMTP_SESSION, STAT_MAXACTIVE); printf("smtp.sessions.timeout=%zd\n", stats->smtp.read_timeout + stats->smtp.write_timeout); stat_print(STATS_SMTP_SMTPS, STAT_COUNT); stat_print(STATS_SMTP_SMTPS, STAT_ACTIVE); stat_print(STATS_SMTP_SMTPS, STAT_MAXACTIVE); stat_print(STATS_SMTP_STARTTLS, STAT_COUNT); stat_print(STATS_SMTP_STARTTLS, STAT_ACTIVE); stat_print(STATS_SMTP_STARTTLS, STAT_MAXACTIVE); return (1); }
int capt_capt(opt_t *opt) { int frame_size; unsigned int frame_num; unsigned char *frame_buf_yuv, *frame_buf_rgb, *jpeg_buf; struct timeval start; const int fps = opt->fps_opt; const int wait_flag = (opt->fps_opt < opt->fps_dev); const int w = opt->width, h = opt->height; int q = 75; /* jpeg compress quality */ int len; char mh[PIPE_HEADER_LEN]; /* EMON system Message Header */ unsigned int ts; /* timestamp in Message Header */ frame_size = mchip_hsize() * mchip_vsize() * 3; frame_buf_yuv = (unsigned char*)malloc(frame_size); frame_buf_rgb = (unsigned char*)malloc(frame_size); jpeg_buf = (unsigned char*)malloc(frame_size); /* allocate enougth memory for jpeg_buf */ if (frame_buf_yuv == NULL || frame_buf_rgb == NULL ||jpeg_buf == NULL){ e_printf("cannot malloc for frame_buf or jpeg_buf\n"); return -1; } stat_init(&STAT); ts = rand() * opt->freq; mchip_continuous_start(); gettimeofday(&start, NULL); STAT.start = start; /* copy struct timeval */ d2_printf("\njpegcapt: %ld.%06ld: wait_flag=%d", start.tv_sec, start.tv_usec, wait_flag); for (frame_num = 0; opt->max_frame == 0 || frame_num < opt->max_frame; frame_num++, ts += opt->freq){ struct timeval c, b, a; /* capture, before(encoding), after */ int d1, d2; if (debug_level > 0 && (frame_num % opt->stat_freq)== 0){ stat_print(&STAT, frame_num); } if (wait_proper_time(&start, fps, frame_num, 1) < 0){ STAT.skip_count++; continue; /* skip capture because it's too late */ } STAT.capt_count++; gettimeofday(&c, NULL); mchip_continuous_read(frame_buf_yuv, mchip_hsize()*mchip_vsize()*2); yuv_convert(frame_buf_yuv, frame_buf_rgb, mchip_hsize(), mchip_vsize()); gettimeofday(&b, NULL); len = jpeg_encode(frame_buf_rgb, jpeg_buf, w, h, q); gettimeofday(&a, NULL); d1 = timeval_diff_usec(&b, &c); d2 = timeval_diff_usec(&a, &b); timeval_add_usec(&STAT.capt_total, d1); timeval_add_usec(&STAT.jpgenc_total, d2); d3_printf("\n frame=%d, ts=%d, jpg_len=%d, q=%d" ", t1=%d, t2=%d", frame_num, ts, len, q, d1, d2); if (len > opt->dsize ){ q *= 0.75; continue; /* skip this picture */ }else if (len < opt->dsize * 0.9 && q < 90) { q++; } bzero(&mh, PIPE_HEADER_LEN); pipe_set_version(&mh, 1); pipe_set_marker(&mh, 1); pipe_set_length(&mh, len); pipe_set_timestamp(&mh, ts); if (pipe_blocked_write_block(STDOUT, &mh, jpeg_buf) ==PIPE_ERROR){ d1_printf("\npipe_blocked_write_block error!!" "len=%d, ts=%ud", len, ts); } else { STAT.out_count++; } } if (debug_level > 0){ stat_print(&STAT, frame_num); } return 0; }