void pipe_setup( char *cluster, char *proc, char * /*capability*/ ) { static char host[1024]; UsePipes = TRUE; dprintf( D_ALWAYS, "Job = %s.%s\n", cluster, proc ); if( Spool ) { free( Spool ); } Spool = param( "SPOOL" ); if( Spool == NULL ) { EXCEPT( "Spool directory not specified in config file" ); } snprintf( host, 1024, "%s", get_local_hostname().Value() ); ExecutingHost = host; open_named_pipe( "/tmp/syscall_req", O_RDONLY, REQ_SOCK ); dprintf( D_ALWAYS, "Shadow: REQ_SOCK connected, fd = %d\n", REQ_SOCK ); open_named_pipe( "/tmp/syscall_rpl", O_WRONLY, RPL_SOCK ); dprintf( D_ALWAYS, "Shadow: RPL_SOCK connected, fd = %d\n", RPL_SOCK ); open_named_pipe( "/tmp/log", O_RDONLY, CLIENT_LOG ); dprintf( D_ALWAYS, "Shadow: CLIENT_LOG connected, fd = %d\n", CLIENT_LOG ); sock_RSC1 = RSC_ShadowInit( RSC_SOCK, CLIENT_LOG ); start_job( cluster, proc ); }
/* * NOTE: suppress_lock must be held. */ static void log_writer_last_msg_flush(LogWriter *self) { LogMessage *m; LogPathOptions path_options = LOG_PATH_OPTIONS_INIT; gchar buf[1024]; gssize len; const gchar *p; msg_debug("Suppress timer elapsed, emitting suppression summary", NULL); m = log_msg_new_empty(); m->timestamps[LM_TS_STAMP] = m->timestamps[LM_TS_RECVD]; m->pri = self->last_msg->pri; m->flags = LF_INTERNAL | LF_LOCAL; p = log_msg_get_value(self->last_msg, LM_V_HOST, &len); log_msg_set_value(m, LM_V_HOST, p, len); p = log_msg_get_value(self->last_msg, LM_V_PROGRAM, &len); log_msg_set_value(m, LM_V_PROGRAM, p, len); len = g_snprintf(buf, sizeof(buf), "Last message '%.20s' repeated %d times, suppressed by syslog-ng on %s", log_msg_get_value(self->last_msg, LM_V_MESSAGE, NULL), self->last_msg_count, get_local_hostname(NULL)); log_msg_set_value(m, LM_V_MESSAGE, buf, len); path_options.ack_needed = FALSE; log_queue_push_tail(self->queue, m, &path_options); log_writer_last_msg_release(self); }
// Return our hostname in a static data buffer. const char * my_hostname() { // if( ! hostnames_initialized ) { // init_hostnames(); // } // return hostname; static MyString __my_hostname; __my_hostname = get_local_hostname(); return __my_hostname.Value(); }
void Starter::exited(int status) { ClassAd *jobAd = NULL; bool jobAdNeedsFree = true; if (s_claim && s_claim->ad()) { // real jobs in the startd have claims and job ads, boinc and perhaps others won't jobAd = s_claim->ad(); jobAdNeedsFree = false; } else { // Dummy up an ad int now = (int) time(0); jobAd = new ClassAd(); jobAd->SetMyTypeName("Job"); jobAd->SetTargetTypeName("Machine"); jobAd->Assign(ATTR_CLUSTER_ID, now); jobAd->Assign(ATTR_PROC_ID, 1); jobAd->Assign(ATTR_OWNER, "boinc"); jobAd->Assign(ATTR_Q_DATE, (int)s_birthdate); jobAd->Assign(ATTR_JOB_PRIO, 0); jobAd->Assign(ATTR_IMAGE_SIZE, 0); jobAd->Assign(ATTR_JOB_CMD, "boinc"); MyString gjid; gjid.formatstr("%s#%d#%d#%d", get_local_hostname().Value(), now, 1, now); jobAd->Assign(ATTR_GLOBAL_JOB_ID, gjid); } // First, patch up the ad a little bit jobAd->Assign(ATTR_COMPLETION_DATE, (int)time(0)); int runtime = time(0) - s_birthdate; jobAd->Assign(ATTR_JOB_REMOTE_WALL_CLOCK, runtime); int jobStatus = COMPLETED; if (WIFSIGNALED(status)) { jobStatus = REMOVED; } jobAd->Assign(ATTR_JOB_STATUS, jobStatus); AppendHistory(jobAd); WritePerJobHistoryFile(jobAd, true /* use gjid for filename*/); if (jobAdNeedsFree) { delete jobAd; } // Make sure our time isn't going to go off. cancelKillTimer(); // Just for good measure, try to kill what's left of our whole // pid family. if (daemonCore->Kill_Family(s_pid) == FALSE) { dprintf(D_ALWAYS, "error killing process family of starter with pid %u\n", s_pid); } // Now, delete any files lying around. ASSERT( executeDir() ); cleanup_execute_dir( s_pid, executeDir() ); #if defined(LINUX) if( param_boolean( "GLEXEC_STARTER", false ) ) { cleanupAfterGlexec(); } #endif }
void main_init(int argc, char* argv[]) { char** ptr; MyString job_queue_name; int argc_count = 1; for(ptr = argv + 1, argc_count = 1; argc_count<argc && *ptr; ptr++,argc_count++) { if(ptr[0][0] != '-') { usage(argv[0]); } switch(ptr[0][1]) { case 'n': if (Name) { free(Name); } Name = build_valid_daemon_name( *(++ptr) ); break; default: usage(argv[0]); } } // Tell Attrlist to publish the server time AttrList_setPublishServerTime( true ); // Initialize DaemonCore's use of ProcFamily. We do this so that we // launch a ProcD if necessary so that any Starters that we launch // for Local Universe jobs can share a single ProcD, instead of // each creating their own daemonCore->Proc_Family_Init(); #if defined(HAVE_DLOPEN) ClassAdLogPluginManager::Load(); ScheddPluginManager::Load(); ScheddPluginManager::EarlyInitialize(); ClassAdLogPluginManager::EarlyInitialize(); #endif /* schedd doesn't care about other daemons. only that it has the ability * to run jobs. so the following code is for now not needed. // ZKM HACK TO MAKE SURE SCHEDD HAS USER CREDENTIALS // // if we are using the credd and credmon, we need to init them before // doing anything! char* p = param("SEC_CREDENTIAL_DIRECTORY"); if(p) { free(p); dprintf(D_ALWAYS, "SCHEDD: INITIALIZING USER CREDS\n"); Daemon *my_credd; // we will abort if we can't locate the credd, so let's try a // few times. locate() caches the result so we have to destroy // the object and make a new one each time. int retries = 20; bool success = false; do { // allocate a credd my_credd = new Daemon(DT_CREDD); if(my_credd) { // call locate bool loc_rc = my_credd->locate(); if(loc_rc) { // get a connected relisock CondorError errstack; ReliSock* r = (ReliSock*)my_credd->startCommand( CREDD_REFRESH_ALL, Stream::reli_sock, 20, &errstack); if ( r ) { // ask the credd to get us some fresh user creds ClassAd ad; putClassAd(r, ad); r->end_of_message(); r->decode(); getClassAd(r, ad); r->end_of_message(); dprintf(D_SECURITY | D_FULLDEBUG, "SCHEDD: received ad from CREDD:\n"); dPrintAd(D_SECURITY | D_FULLDEBUG, ad); MyString result; ad.LookupString("Result", result); if(result == "success") { success = true; } else { dprintf(D_FULLDEBUG, "SCHEDD: warning, creddmon returned failure.\n"); } // clean up. delete r; } else { dprintf(D_FULLDEBUG, "SCHEDD: warning, startCommand failed, %s\n", errstack.getFullText(true).c_str()); } } else { dprintf(D_FULLDEBUG, "SCHEDD: warning, locate failed.\n"); } // clean up. delete my_credd; } else { dprintf(D_FULLDEBUG, "SCHEDD: warning, new Daemon(DT_CREDD) failed.\n"); } // if something went wrong, sleep and retry (finit number of times) if(!success) { dprintf(D_FULLDEBUG, "SCHEDD: sleeping and trying again %i times.\n", retries); sleep(1); retries--; } } while ((retries > 0) && (success == false)); // except if fail if (!success) { EXCEPT("FAILED TO INITIALIZE USER CREDS (locate failed)"); } } // END ZKM HACK */ #ifndef WIN32 // if using the SEC_CREDENTIAL_DIRECTORY, confirm we are "up-to-date". // at the moment, we take an "all-or-nothing" approach. ultimately, this // should be per-user, and the SchedD should start normally and run jobs // for users who DO have valid credentials, and simply holding on to jobs // in idle state for users who do NOT have valid credentials. // char* p = param("SEC_CREDENTIAL_DIRECTORY"); if(p) { free(p); bool success = false; int retries = 60; do { // look for existence of file that says everything is up-to-date. success = credmon_poll(NULL, false, false); if(!success) { dprintf(D_ALWAYS, "SCHEDD: User credentials not up-to-date. Start-up delayed. Waiting 10 seconds and trying %i more times.\n", retries); sleep(10); retries--; } } while ((!success) && (retries > 0)); // we tried, we give up. if(!success) { EXCEPT("User credentials unavailable after 10 minutes"); } } // User creds good to go, let's start this thing up! #endif // WIN32 // Initialize all the modules scheduler.Init(); scheduler.Register(); // Initialize the job queue char *job_queue_param_name = param("JOB_QUEUE_LOG"); if (job_queue_param_name == NULL) { // the default place for the job_queue.log is in spool job_queue_name.formatstr( "%s/job_queue.log", Spool); } else { job_queue_name = job_queue_param_name; // convert char * to MyString free(job_queue_param_name); } // Make a backup of the job queue? if ( param_boolean_crufty("SCHEDD_BACKUP_SPOOL", false) ) { MyString hostname; hostname = get_local_hostname(); MyString job_queue_backup; job_queue_backup.formatstr( "%s/job_queue.bak.%s.%ld", Spool, hostname.Value(), (long)time(NULL) ); if ( copy_file( job_queue_name.Value(), job_queue_backup.Value() ) ) { dprintf( D_ALWAYS, "Failed to backup spool to '%s'\n", job_queue_backup.Value() ); } else { dprintf( D_FULLDEBUG, "Spool backed up to '%s'\n", job_queue_backup.Value() ); } } int max_historical_logs = param_integer( "MAX_JOB_QUEUE_LOG_ROTATIONS", DEFAULT_MAX_JOB_QUEUE_LOG_ROTATIONS ); InitJobQueue(job_queue_name.Value(),max_historical_logs); PostInitJobQueue(); // Initialize the dedicated scheduler stuff dedicated_scheduler.initialize(); // Do a timeout now at startup to get the ball rolling... scheduler.timeout(); #if defined(HAVE_DLOPEN) ScheddPluginManager::Initialize(); ClassAdLogPluginManager::Initialize(); #endif daemonCore->InstallAuditingCallback( AuditLogNewConnection ); }
gboolean log_macro_expand(GString *result, gint id, gboolean escape, LogTemplateOptions *opts, gint tz, gint32 seq_num, const gchar *context_id, LogMessage *msg) { static LogTemplateOptions default_opts = { TRUE, TS_FMT_BSD, 0, { NULL, NULL }, { NULL, NULL } }; if (!opts) opts = &default_opts; switch (id) { case M_FACILITY: { /* facility */ const char *n; n = syslog_name_lookup_name_by_value(msg->pri & LOG_FACMASK, sl_facilities); if (n) { g_string_append(result, n); } else { format_uint32_padded(result, 0, 0, 16, (msg->pri & LOG_FACMASK) >> 3); } break; } case M_FACILITY_NUM: { format_uint32_padded(result, 0, 0, 10, (msg->pri & LOG_FACMASK) >> 3); break; } case M_LEVEL: { /* level */ const char *n; n = syslog_name_lookup_name_by_value(msg->pri & LOG_PRIMASK, sl_levels); if (n) { g_string_append(result, n); } else { format_uint32_padded(result, 0, 0, 10, msg->pri & LOG_PRIMASK); } break; } case M_LEVEL_NUM: { format_uint32_padded(result, 0, 0, 10, msg->pri & LOG_PRIMASK); break; } case M_TAG: { format_uint32_padded(result, 2, '0', 16, msg->pri); break; } case M_TAGS: { log_msg_print_tags(msg, result); break; } case M_BSDTAG: { format_uint32_padded(result, 0, 0, 10, (msg->pri & LOG_PRIMASK)); g_string_append_c(result, (((msg->pri & LOG_FACMASK) >> 3) + 'A')); break; } case M_PRI: { format_uint32_padded(result, 0, 0, 10, msg->pri); break; } case M_HOST: { if (msg->flags & LF_CHAINED_HOSTNAME) { /* host */ const gchar *p1, *p2; int remaining, length; gssize host_len; const gchar *host = log_msg_get_value(msg, LM_V_HOST, &host_len); p1 = memchr(host, '@', host_len); if (p1) p1++; else p1 = host; remaining = host_len - (p1 - host); p2 = memchr(p1, '/', remaining); length = p2 ? p2 - p1 : host_len - (p1 - host); result_append(result, p1, length, escape); } else { result_append_value(result, msg, LM_V_HOST, escape); } break; } case M_SDATA: if (escape) { GString *sdstr = g_string_sized_new(0); log_msg_append_format_sdata(msg, sdstr, seq_num); result_append(result, sdstr->str, sdstr->len, TRUE); g_string_free(sdstr, TRUE); } else { log_msg_append_format_sdata(msg, result, seq_num); } break; case M_MSGHDR: if ((msg->flags & LF_LEGACY_MSGHDR)) { /* fast path for now, as most messages come from legacy devices */ result_append_value(result, msg, LM_V_LEGACY_MSGHDR, escape); } else { /* message, complete with program name and pid */ gssize len; len = result->len; result_append_value(result, msg, LM_V_PROGRAM, escape); if (len != result->len) { const gchar *pid = log_msg_get_value(msg, LM_V_PID, &len); if (len > 0) { result_append(result, "[", 1, FALSE); result_append(result, pid, len, escape); result_append(result, "]", 1, FALSE); } result_append(result, ": ", 2, FALSE); } } break; case M_MESSAGE: if (cfg_is_config_version_older(configuration, 0x0300)) log_macro_expand(result, M_MSGHDR, escape, opts, tz, seq_num, context_id, msg); result_append_value(result, msg, LM_V_MESSAGE, escape); break; case M_SOURCE_IP: { gchar *ip; if (msg->saddr && (g_sockaddr_inet_check(msg->saddr) || #if ENABLE_IPV6 g_sockaddr_inet6_check(msg->saddr)) #else 0) #endif ) { gchar buf[MAX_SOCKADDR_STRING]; g_sockaddr_format(msg->saddr, buf, sizeof(buf), GSA_ADDRESS_ONLY); ip = buf; } else { ip = "127.0.0.1"; } result_append(result, ip, strlen(ip), escape); break; } case M_SEQNUM: { if (seq_num) { format_uint32_padded(result, 0, 0, 10, seq_num); } break; } case M_CONTEXT_ID: { if (context_id) { result_append(result, context_id, strlen(context_id), escape); } break; } case M_LOGHOST: { gsize hname_len; const gchar *hname = get_local_hostname(&hname_len); result_append(result, hname, hname_len, escape); break; } case M_SYSUPTIME: { GTimeVal ct; g_get_current_time(&ct); format_uint64_padded(result, 0, 0, 10, g_time_val_diff(&ct, &app_uptime) / 1000 / 10); break; } default: { /* year, month, day */ struct tm *tm, tm_storage; gchar buf[64]; gint length; time_t t; LogStamp *stamp, sstamp; glong zone_ofs; guint tmp_hour; if (id >= M_TIME_FIRST && id <= M_TIME_LAST) { stamp = &msg->timestamps[LM_TS_STAMP]; } else if (id >= M_TIME_FIRST + M_RECVD_OFS && id <= M_TIME_LAST + M_RECVD_OFS) { id -= M_RECVD_OFS; stamp = &msg->timestamps[LM_TS_RECVD]; } else if (id >= M_TIME_FIRST + M_STAMP_OFS && id <= M_TIME_LAST + M_STAMP_OFS) { id -= M_STAMP_OFS; stamp = &msg->timestamps[LM_TS_STAMP]; } else if (id >= M_TIME_FIRST + M_CSTAMP_OFS && id <= M_TIME_LAST + M_CSTAMP_OFS) { GTimeVal tv; id -= M_CSTAMP_OFS; cached_g_current_time(&tv); sstamp.tv_sec = tv.tv_sec; sstamp.tv_usec = tv.tv_usec; sstamp.zone_offset = -1; stamp = &sstamp; } else { g_assert_not_reached(); break; } /* try to use the following zone values in order: * destination specific timezone, if one is specified * message specific timezone, if one is specified * local timezone */ zone_ofs = (opts->time_zone_info[tz] != NULL ? time_zone_info_get_offset(opts->time_zone_info[tz], stamp->tv_sec) : stamp->zone_offset); if (zone_ofs == -1) zone_ofs = stamp->zone_offset; t = stamp->tv_sec + zone_ofs; cached_gmtime(&t, &tm_storage); tm = &tm_storage; switch (id) { case M_WEEK_DAY_ABBREV: g_string_append_len(result, weekday_names_abbrev[tm->tm_wday], 3); break; case M_WEEK_DAY_NAME: g_string_append(result, weekday_names[tm->tm_wday]); break; case M_WEEK_DAY: format_uint32_padded(result, 0, 0, 10, tm->tm_wday + 1); break; case M_WEEK: format_uint32_padded(result, 2, '0', 10, (tm->tm_yday - (tm->tm_wday - 1 + 7) % 7 + 7) / 7); break; case M_YEAR: format_uint32_padded(result, 4, '0', 10, tm->tm_year + 1900); break; case M_YEAR_DAY: format_uint32_padded(result, 3, '0', 10, tm->tm_yday + 1); break; case M_MONTH: format_uint32_padded(result, 2, '0', 10, tm->tm_mon + 1); break; case M_MONTH_WEEK: format_uint32_padded(result, 0, 0, 10, ((tm->tm_mday / 7) + ((tm->tm_wday > 0) && ((tm->tm_mday % 7) >= tm->tm_wday)))); break; case M_MONTH_ABBREV: g_string_append_len(result, month_names_abbrev[tm->tm_mon], 3); break; case M_MONTH_NAME: g_string_append(result, month_names[tm->tm_mon]); break; case M_DAY: format_uint32_padded(result, 2, '0', 10, tm->tm_mday); break; case M_HOUR: format_uint32_padded(result, 2, '0', 10, tm->tm_hour); break; case M_HOUR12: if (tm->tm_hour < 12) tmp_hour = tm->tm_hour; else tmp_hour = tm->tm_hour - 12; if (tmp_hour == 0) tmp_hour = 12; format_uint32_padded(result, 2, '0', 10, tmp_hour); break; case M_MIN: format_uint32_padded(result, 2, '0', 10, tm->tm_min); break; case M_SEC: format_uint32_padded(result, 2, '0', 10, tm->tm_sec); break; case M_MSEC: format_uint32_padded(result, 3, '0', 10, stamp->tv_usec/1000); break; case M_USEC: format_uint32_padded(result, 6, '0', 10, stamp->tv_usec); break; case M_AMPM: g_string_append(result, tm->tm_hour < 12 ? "AM" : "PM"); break; case M_DATE: case M_STAMP: case M_ISODATE: case M_FULLDATE: case M_UNIXTIME: { gint format = id == M_DATE ? TS_FMT_BSD : id == M_ISODATE ? TS_FMT_ISO : id == M_FULLDATE ? TS_FMT_FULL : id == M_UNIXTIME ? TS_FMT_UNIX : opts->ts_format; log_stamp_append_format(stamp, result, format, zone_ofs, opts->frac_digits); break; } case M_TZ: case M_TZOFFSET: length = format_zone_info(buf, sizeof(buf), zone_ofs); g_string_append_len(result, buf, length); break; } break; } }
void main_init(int argc, char* argv[]) { char** ptr; MyString job_queue_name; int argc_count = 1; for(ptr = argv + 1, argc_count = 1; argc_count<argc && *ptr; ptr++,argc_count++) { if(ptr[0][0] != '-') { usage(argv[0]); } switch(ptr[0][1]) { case 'n': Name = build_valid_daemon_name( *(++ptr) ); break; default: usage(argv[0]); } } // Tell Attrlist to publish the server time AttrList_setPublishServerTime( true ); // Initialize DaemonCore's use of ProcFamily. We do this so that we // launch a ProcD if necessary so that any Starters that we launch // for Local Universe jobs can share a single ProcD, instead of // each creating their own daemonCore->Proc_Family_Init(); #if defined(WANT_CONTRIB) && defined(WITH_MANAGEMENT) #if defined(HAVE_DLOPEN) // Intialization of the plugin manager, i.e. loading all // plugins, should be performed before the job queue log is // read so plugins have a chance to learn about all jobs // already in the queue ClassAdLogPluginManager::Load(); // Load all ScheddPlugins. In reality this doesn't do much // since initializing any plugin manager loads plugins for all // plugin manager. ScheddPluginManager::Load(); // Tell all ScheddPlugins to initialze themselves ScheddPluginManager::EarlyInitialize(); // Tell all plugins to initialize themselves ClassAdLogPluginManager::EarlyInitialize(); #endif #endif // Initialize all the modules scheduler.Init(); scheduler.Register(); // Initialize the job queue char *job_queue_param_name = param("JOB_QUEUE_LOG"); if (job_queue_param_name == NULL) { // the default place for the job_queue.log is in spool job_queue_name.sprintf( "%s/job_queue.log", Spool); } else { job_queue_name = job_queue_param_name; // convert char * to MyString free(job_queue_param_name); } // Make a backup of the job queue? if ( param_boolean_crufty("SCHEDD_BACKUP_SPOOL", false) ) { MyString hostname; UtcTime now(true); hostname = get_local_hostname(); MyString job_queue_backup; job_queue_backup.sprintf( "%s/job_queue.bak.%s.%ld", Spool, hostname.Value(), now.seconds() ); if ( copy_file( job_queue_name.Value(), job_queue_backup.Value() ) ) { dprintf( D_ALWAYS, "Failed to backup spool to '%s'\n", job_queue_backup.Value() ); } else { dprintf( D_FULLDEBUG, "Spool backed up to '%s'\n", job_queue_backup.Value() ); } } int max_historical_logs = param_integer( "MAX_JOB_QUEUE_LOG_ROTATIONS", DEFAULT_MAX_JOB_QUEUE_LOG_ROTATIONS ); InitJobQueue(job_queue_name.Value(),max_historical_logs); mark_jobs_idle(); // The below must happen _after_ InitJobQueue is called. if ( scheduler.autocluster.config() ) { // clear out auto cluster id attributes WalkJobQueue( (int(*)(ClassAd *))clear_autocluster_id ); } // // Update the SchedDInterval attributes in jobs if they // have it defined. This will be for JobDeferral and // CronTab jobs // WalkJobQueue( (int(*)(ClassAd *))::updateSchedDInterval ); // Initialize the dedicated scheduler stuff dedicated_scheduler.initialize(); // Do a timeout now at startup to get the ball rolling... scheduler.timeout(); #if defined(WANT_CONTRIB) && defined(WITH_MANAGEMENT) #if defined(HAVE_DLOPEN) // Tell all ScheddPlugins to initialze themselves ScheddPluginManager::Initialize(); // Tell all plugins to initialize themselves ClassAdLogPluginManager::Initialize(); #endif #endif }
int main(int argc, char *argv[]) { configuration_t configuration; int i, j; struct timeval tm; struct timezone tz; measurement_t *measurement; struct timeval next, wait; int subject_id, flow_id; unsigned long long packets, bytes; double mbps; char command[MAX_COMMAND+1]; char hostname_interface[MAX_HOSTNAME_INTERFACE+1]; /* DiMAPI connect string as "hostname:interface,..." */ struct timeval tv_start, tv_stop; /* to measure how fast mapi_read_result() responds */ int tv_diff_pkt, tv_diff_byte; /* time used by mapi_read_results() */ int tv_diff_threshold; /* 1 if threshold was reached */ mapi_results_t *pkt_counter_res; mapi_results_t *byte_counter_res; unsigned long long pkt_counter; unsigned long long byte_counter; int scope_size; double pkt_sec; /* seconds from previous packet result */ double byte_sec; /* seconds from previous byte result */ mapi_flow_info_t info; mapi_device_info_t dinfo; openlog("abw", LOG_PID, LOG_LOCAL0); syslog(LOG_DEBUG, "starting abw"); memset((void *)&configuration, 0, (size_t)(sizeof(configuration))); /* Create global configuration */ if ((configuration.global=malloc(sizeof(global_t)))==NULL) { fprintf(stderr, "%s: malloc() failed\n", __func__); return -1; } memset(configuration.global, 0, sizeof(global_t)); /* Create first subject, scope, parameters and measurement so that they can be filled-in by command-line options */ /* if ((configuration.subject=new_subject())==NULL) { fprintf(stderr, "%s: new_subject() failed\n", __func__); return -1; } if ((configuration.scope=new_scope())==NULL) { fprintf(stderr, "%s: new_subject() failed\n", __func__); return -1; } if ((configuration.parameters=new_parameters())==NULL) { fprintf(stderr, "%s: new_parameters() failed\n", __func__); return -1; } if ((configuration.measurement=new_measurement())==NULL) { fprintf(stderr, "%s: new_measurement() failed\n", __func__); return -1; } */ /* Read command line */ if (read_command_line(argc, argv, &configuration)<0) { fprintf(stderr, "%s: read_command_line() failed\n", __func__); return -1; } /* Read configuration file */ if (configuration.global->conf_filename) { if (read_conf_file(&configuration)<0) { fprintf(stderr, "%s: read_conf_file() failed\n", __func__); return -1; } } /* Fill-in local hostname */ if (get_local_hostname(&(configuration.global->hostname))<0) { fprintf(stderr, "%s: get_local_hostname() failed\n", __func__); return -1; } /* Check if specified values are within acceptable limits */ if (check_conf(&configuration)<0) { fprintf(stderr, "%s: check_conf() failed\n", __func__); exit(-1); } /* Print configuration */ if (debug) print_conf(&configuration); if (daemonize) { printf("Switching to daemon\n"); if (continue_as_daemon()<0) { fprintf(stderr, "%s: continue_as_daemon() failed\n", __func__); return -1; } printf("Continuing as daemon\n"); } /* * Create RRD files */ /* Go over all measurements */ measurement=configuration.measurement; while (measurement) { int parameters_id; char *filename; parameters_id = measurement->parameters_id; /* Go over all protocols */ j=0; while (protocols[j].protocol) { if ((filename= abw_rrd_create_filename(measurement->scope, parameters_id, protocols[j].protocol))==NULL) { fprintf(stderr, "%s: rrd_create_filename() failed\n", __func__); return -1; } if (abw_rrd_create_file(filename)<0) { fprintf(stderr, "%s: abw_rrd_create_file() failed\n", __func__); return -1; } j++; } /* Go over all protocols */ /* Go over all tracked protocols */ j=0; while (tracked_protocols[j].protocol) { if ((filename= abw_rrd_create_filename(measurement->scope, parameters_id, tracked_protocols[j].protocol))==NULL) { fprintf(stderr, "%s: rrd_create_filename() failed\n", __func__); return -1; } if (abw_rrd_create_file(filename)<0) { fprintf(stderr, "%s: abw_rrd_create_file() failed\n", __func__); return -1; } j++; } /* Go over all tracked protocols */ /* Create RRD file for "all" protocol (all traffic together) */ if ((filename= abw_rrd_create_filename(measurement->scope, parameters_id, "all"))==NULL) { fprintf(stderr, "%s: rrd_create_filename() failed\n", __func__); return -1; } if (abw_rrd_create_file(filename)<0) { fprintf(stderr, "%s: abw_rrd_create_file() failed\n", __func__); return -1; } measurement=measurement->next; } /* while (measurement) */ /* * Create MAPI flows */ flow_id=0; /* Go over all measurements */ measurement=configuration.measurement; while (measurement) { /* Go over all monitored protocols */ i=0; while (measurement->protocols_array[i] && i<MAX_PROTOCOLS) { int parameters_id; char *protocol; /* Create data structure to maintain MAPI information */ if (flow_id>=MAX_FLOWS) { fprintf(stderr, "%s: more than %d flows requested\n", __func__, MAX_FLOWS); return -1; } if ((flow[flow_id]=new_flow())==NULL) { fprintf(stderr, "%s: new_flow() failed\n", __func__); return -1; } flow[flow_id]->measurement=measurement; flow[flow_id]->protocol=measurement->protocols_array[i]; parameters_id = measurement->parameters_id; protocol = measurement->protocols_array[i]; if ((flow[flow_id]->rrd_filename= abw_rrd_create_filename(measurement->scope, parameters_id, protocol))==NULL) { fprintf(stderr, "%s: rrd_create_filename() failed\n", __func__); return -1; } /* * If scope has only one subject and if hostname is "localhost" or * equal to local hostname, then use MAPI connect string (not DiMAPI) */ if (!(measurement->scope->subject[1]) && (!strcmp(measurement->scope->subject[0]->hostname, "localhost") || !strcmp(measurement->scope->subject[0]->hostname, configuration.global->hostname))) strcpy(hostname_interface, measurement->scope->subject[0]->interface); /* * Prepare DiMAPI connect string as hostname:interface, ... */ else { j=0; hostname_interface[0]='\0'; while (measurement->scope->subject[j] && j<MAX_SUBJECTS) { /* Append comma "," */ if (hostname_interface[0]) { if (strlen(hostname_interface)+1>=MAX_HOSTNAME_INTERFACE) { fprintf(stderr, "%s: DiMAPI connect string is longer than %d characters\n", __func__, MAX_HOSTNAME_INTERFACE); return -1; } strcat(hostname_interface, ","); } /* Append next hostname:interface */ if (strlen(hostname_interface) + strlen(measurement->scope->subject[j]->hostname) + strlen(measurement->scope->subject[j]->interface) >= MAX_HOSTNAME_INTERFACE) { fprintf(stderr, "%s: DiMAPI connect string is longer than %d characters\n", __func__, MAX_HOSTNAME_INTERFACE); return -1; } sprintf(hostname_interface + strlen(hostname_interface), "%s:%s", measurement->scope->subject[j]->hostname, measurement->scope->subject[j]->interface); j++; } /* while (measurement->scope->subject[j] && j<MAX_SUBJECTS) */ } /* Creating DiMAPI connect string */ /* Create a new MAPI flow */ if (debug) printf("%s: mapi_create_flow(%s)\n", __func__, hostname_interface); if ((flow[flow_id]->fd=mapi_create_flow(hostname_interface))<0) { fprintf(stderr, "%s: mapi_create_flow(%s) failed\n", __func__, hostname_interface); fprintf(stderr, "%s: Do you run mapid daemon on the machine where you connect to?\n", __func__); fprintf(stderr, "%s: Do you run mapicommd daemon on the machine where you connect to? (if you are connecting to a non-local machine or to multiple machines)\n", __func__); return -1; } /* If this is a MAPI flow (not DiMAPI flow), then set MPLS and VLAN flags according to mapi.conf. Otherwise the flags were set in abw.conf */ if (!strchr(hostname_interface, ':')) { if (debug) printf("%s: MAPI flow on \"%s\", setting MPLS and VLAN flags from mapi.conf\n", __func__, hostname_interface); if ((mapi_get_flow_info(flow[flow_id]->fd, &info)) < 0){ fprintf(stderr, "%s: mapi_get_flow_info() failed\n", __func__); return -1; } if ((mapi_get_device_info(info.devid, &dinfo)) < 0) { fprintf(stderr, "%s: mapi_get_device_info() failed\n", __func__); return -1; } measurement->scope->mpls = dinfo.mpls; measurement->scope->vlan = dinfo.vlan; } else if (debug) printf("%s: DiMAPI flow on \"%s\", setting MPLS and VLAN flags from abw.conf\n", __func__, hostname_interface); /* Prepare header filter for this protocol */ if ((flow[flow_id]->tracked_protocol= protocol_filter(measurement->parameters->header_filter, flow[flow_id]->protocol, measurement->scope->mpls, measurement->scope->vlan, &(flow[flow_id]->header_filter)))<0) { fprintf(stderr, "%s: protocol_filter() failed\n", __func__); return -1; } if (debug) printf("measurement->parameters->header_filter: %s, flow[flow_id]->protocol: %s, flow[flow_id]->header_filter: %s, track_function: %s\n", (measurement->parameters->header_filter)?measurement->parameters->header_filter:"NULL", flow[flow_id]->protocol, (flow[flow_id]->header_filter)?flow[flow_id]->header_filter:"NULL", (flow[flow_id]->tracked_protocol)?tracked_protocols[flow[flow_id]->tracked_protocol-1].track_function:"none"); /* Filter based on input port, we can use port number in the first subject of the scope, because all subjects in a scope must have the same port number */ if (measurement->scope->subject[0]->port >= 0) { if ((flow[flow_id]->interface_fid=mapi_apply_function(flow[flow_id]->fd, "INTERFACE", measurement->scope->subject[0]->port))<0) { fprintf(stderr, "%s: INTERFACE failed\n", __func__); return -1; } } /* Note that BPF_FILTER uses compiled header filter that selects packets of the given protocol */ /* BPF_FILTER is applied if a) header_filter was specified in [parameters] section or b) protocol other than "all" and other than some that requires tracking was specified in [parameters] section or c) MPLS is used on links in this [scope] */ if (flow[flow_id]->header_filter) { if (debug) printf("%s: mapi_apply_function(%d, BPF_FILTER, \"%s\")\n", __func__, flow[flow_id]->fd, flow[flow_id]->header_filter); if ((flow[flow_id]->bpf_filter_fid= mapi_apply_function(flow[flow_id]->fd, "BPF_FILTER", flow[flow_id]->header_filter))<0) { fprintf(stderr, "%s: BPF_FILTER (\"%s\") failed\n", __func__, flow[flow_id]->header_filter); return -1; } } /* Track application protocol, BPF_FILTER could have been applied before */ if (flow[flow_id]->tracked_protocol) { if (debug) printf("%s: mapi_apply_function(%d, %s)\n", __func__, flow[flow_id]->fd, tracked_protocols[flow[flow_id]->tracked_protocol-1]. track_function); if ((flow[flow_id]->track_function_fid= mapi_apply_function(flow[flow_id]->fd, tracked_protocols[flow[flow_id]->tracked_protocol-1]. track_function))<0) { fprintf(stderr, "%s: tracking (%s) failed\n", __func__, tracked_protocols[flow[flow_id]->tracked_protocol-1]. track_function); return -1; } } /* Sampling */ if (measurement->parameters->sau_mode == 'd' && (unsigned int)(measurement->parameters->sau_threshold) != 1) { if ((flow[flow_id]->sample_fid= mapi_apply_function(flow[flow_id]->fd, "SAMPLE", measurement->parameters->sau_threshold, PERIODIC))<0) { fprintf(stderr, "%s: SAMPLE (PERIODIC, %.02f) failed\n", __func__, measurement->parameters->sau_threshold); return -1; } } else if (measurement->parameters->sau_mode == 'p' && (unsigned int)(measurement->parameters->sau_threshold) != 1) { if ((flow[flow_id]->sample_fid= mapi_apply_function(flow[flow_id]->fd, "SAMPLE", (measurement->parameters->sau_threshold)*100, PROBABILISTIC))<0) { fprintf(stderr, "%s: SAMPLE (PROBABILISTIC, %.02f) failed\n", __func__, (measurement->parameters->sau_threshold)*100); return -1; } } /* Payload searching */ if (measurement->parameters->payload_strings[0]) { if ((flow[flow_id]->str_search_fid= mapi_apply_function(flow[flow_id]->fd, "STR_SEARCH", measurement->parameters->payload_strings[0], 0, 0))<0) { fprintf(stderr, "%s: STR_SEARCH (%s) failed\n", __func__, measurement->parameters->payload_strings[0]); return -1; } } /* Counting packets and bytes */ if ((flow[flow_id]->pkt_counter_fid= mapi_apply_function(flow[flow_id]->fd, "PKT_COUNTER"))<0) { fprintf(stderr, "%s: PKT_COUNTER failed\n", __func__); return -1; } /* Simultaneous use of PKT_COUNTER and BYTE_COUNTER does not work with DAG4.3GE. Temporary hack: always use stflib version */ if ((flow[flow_id]->byte_counter_fid= mapi_apply_function(flow[flow_id]->fd, "stdflib:BYTE_COUNTER"))<0) { fprintf(stderr, "%s: BYTE_COUNTER failed\n", __func__); return -1; } /* Connect to flow */ if (!configuration.global->no_measure) { if (mapi_connect(flow[flow_id]->fd)<0) { fprintf(stderr, "%s: mapi_connect() (%s) failed\n", __func__, hostname_interface); return -1; } if ((scope_size=mapi_get_scope_size(flow[flow_id]->fd)) != flow[flow_id]->measurement->scope->subject_no) { fprintf(stderr, "%s: mapi_get_scope_size() returned %d for %d subjects\n", __func__, scope_size, flow[flow_id]->measurement->scope->subject_no); return -1; } } i++; flow_id++; } /* while (measurement->protocols_array[i] && i<MAX_PROTOCOLS) */ measurement=measurement->next; } /* while (measurement) */ if (configuration.global->no_measure || !configuration.measurement) return 0; /* Periodically get results from MAPI flows */ while (1) { if (gettimeofday(&tm, &tz)<0) { fprintf(stderr, "%s: gettimeofday() failed\n", __func__); return -1; } flow_id=0; while (flow[flow_id] && flow_id<MAX_FLOWS) { int scope_packets, scope_bytes; if (!configuration.global->no_stdout) { printf("%d %u.%u", flow[flow_id]->measurement->scope->id, (unsigned int)(tm.tv_sec), (unsigned int)(tm.tv_usec)); if (!configuration.global->stdout_simple) printf(" %s\n", flow[flow_id]->protocol); } gettimeofday(&tv_start, NULL); if ((pkt_counter_res=mapi_read_results(flow[flow_id]->fd, flow[flow_id]->pkt_counter_fid))==NULL) { fprintf(stderr, "%s: mapi_read_results() for flow %d failed\n", __func__, flow_id); return -1; } gettimeofday(&tv_stop, NULL); tv_diff_pkt=timestamp_diff(&tv_start, &tv_stop); gettimeofday(&tv_start, NULL); if ((byte_counter_res=mapi_read_results(flow[flow_id]->fd, flow[flow_id]->byte_counter_fid))==NULL) { fprintf(stderr, "%s: mapi_read_results() for flow %d failed\n", __func__, flow_id); return -1; } gettimeofday(&tv_stop, NULL); tv_diff_byte=timestamp_diff(&tv_start, &tv_stop); if (tv_diff_pkt>=TV_DIFF_THRESHOLD || tv_diff_byte>=TV_DIFF_THRESHOLD) tv_diff_threshold=1; else tv_diff_threshold=0; if (tv_diff_pkt>=TV_DIFF_THRESHOLD) syslog(LOG_DEBUG, "mapi_read_result() for PKT_COUNTER takes %d us for measurement ID %d and protocol %s (threshold %d us reached)", tv_diff_pkt, flow[flow_id]->measurement->id, flow[flow_id]->protocol, TV_DIFF_THRESHOLD); if (tv_diff_byte>=TV_DIFF_THRESHOLD) syslog(LOG_DEBUG, "mapi_read_result() for BYTE_COUNTER takes %d us for measurement ID %d and protocol %s (threshold %d us reached)", tv_diff_byte, flow[flow_id]->measurement->id, flow[flow_id]->protocol, TV_DIFF_THRESHOLD); scope_size = flow[flow_id]->measurement->scope->subject_no; scope_packets=0; scope_bytes=0; for (subject_id=0; subject_id<scope_size; subject_id++) { pkt_counter= *((unsigned long long*)(pkt_counter_res[subject_id].res)); byte_counter= *((unsigned long long*)(byte_counter_res[subject_id].res)); packets=pkt_counter - flow[flow_id]->pkt_counter[subject_id]; bytes=byte_counter - flow[flow_id]->byte_counter[subject_id]; mbps=(double)bytes*8/1000000; flow[flow_id]->pkt_counter[subject_id]=pkt_counter; flow[flow_id]->byte_counter[subject_id]=byte_counter; /* Determine seconds from previous result */ if (flow[flow_id]->pkt_ts[subject_id]) pkt_sec=(double)(pkt_counter_res[subject_id].ts - flow[flow_id]->pkt_ts[subject_id])/1000000; else pkt_sec= flow[flow_id]->measurement->parameters->interval.tv_sec + (double)(flow[flow_id]->measurement->parameters->interval.tv_usec)/1000000; if (flow[flow_id]->byte_ts[subject_id]) byte_sec=(double)(byte_counter_res[subject_id].ts - flow[flow_id]->byte_ts[subject_id])/1000000; else byte_sec= flow[flow_id]->measurement->parameters->interval.tv_sec + (double)(flow[flow_id]->measurement->parameters->interval.tv_usec)/1000000; scope_packets+=(packets/pkt_sec); scope_bytes+=(bytes/byte_sec); flow[flow_id]->pkt_ts[subject_id]= pkt_counter_res[subject_id].ts; flow[flow_id]->byte_ts[subject_id]= byte_counter_res[subject_id].ts; if (tv_diff_threshold) { syslog(LOG_DEBUG, "%s:%s: %.02f seconds from previous result", flow[flow_id]->measurement->scope->subject[subject_id]->hostname, flow[flow_id]->measurement->scope->subject[subject_id]->interface, byte_sec); } /* Print result */ if (!configuration.global->no_stdout) { if (configuration.global->stdout_simple) printf(" %0.2f %0.2f %0.2f", packets/pkt_sec, bytes/byte_sec, mbps/byte_sec); else printf(" %0.2f packets/s, %0.2f bytes/s, %0.2f Mb/s, time %uus/%uus, interval %0.2fs/%0.2fs\n", packets/pkt_sec, bytes/byte_sec, mbps/byte_sec, tv_diff_pkt, tv_diff_byte, pkt_sec, byte_sec); } } /* for (subject_id=0; subject_id++; subject_id<scope_size) */ if (!configuration.global->no_stdout) printf("\n"); /* If interval is at least 1 second, then store results to RRD file */ if (flow[flow_id]->measurement->parameters->interval.tv_sec) { sprintf(command, "rrdtool update %s %u:%lu:%lu:%.6f", flow[flow_id]->rrd_filename, (unsigned int)(tm.tv_sec), (unsigned long)(scope_packets), (unsigned long)(scope_bytes), (double)scope_bytes*8/1000000); if (configuration.global->debug > 1) syslog(LOG_DEBUG, "system(%s)", command); if (tm.tv_sec == flow[flow_id]->rrd_ts) syslog(LOG_ERR, "duplicate RRD timestamp %u for scope %d\n", (unsigned int)(tm.tv_sec), flow[flow_id]->measurement->scope->id); else flow[flow_id]->rrd_ts=tm.tv_sec; if (debug) printf("%s: system(%s)\n", __func__, command); if (system(command)<0) { fprintf(stderr, "%s: command(%s) failed\n", __func__, command); return -1; } } flow_id++; } /* while (flow[flow_id] && flow_id<MAX_FLOWS) */ abw_next_timestamp(&(configuration.measurement->parameters->interval), &next, &wait); if (!configuration.global->no_stdout && !configuration.global->stdout_simple) { printf("next.tv_sec: %d, next.tv_usec: %d, wait.tv_sec: %d, wait.tv_usec: %d\n", (int)(next.tv_sec), (int)(next.tv_usec), (int)(wait.tv_sec), (int)(wait.tv_usec)); printf("===============================================================================\n"); } usleep(wait.tv_sec * 1000000 + wait.tv_usec); } /* while (1) */ return 0; } /* main() */
void store_pool_cred_handler(void *, int /*i*/, Stream *s) { int result; char *pw = NULL; char *domain = NULL; MyString username = POOL_PASSWORD_USERNAME "@"; if (s->type() != Stream::reli_sock) { dprintf(D_ALWAYS, "ERROR: pool password set attempt via UDP\n"); return; } // if we're the CREDD_HOST, make sure any password setting is done locally // (since knowing what the pool password is on the CREDD_HOST means being // able to fetch users' passwords) char *credd_host = param("CREDD_HOST"); if (credd_host) { MyString my_fqdn_str = get_local_fqdn(); MyString my_hostname_str = get_local_hostname(); MyString my_ip_str = get_local_ipaddr().to_ip_string(); // figure out if we're on the CREDD_HOST bool on_credd_host = (strcasecmp(my_fqdn_str.Value(), credd_host) == MATCH); on_credd_host = on_credd_host || (strcasecmp(my_hostname_str.Value(), credd_host) == MATCH); on_credd_host = on_credd_host || (strcmp(my_ip_str.Value(), credd_host) == MATCH); if (on_credd_host) { // we're the CREDD_HOST; make sure the source address matches ours const char *addr = ((ReliSock*)s)->peer_ip_str(); if (!addr || strcmp(my_ip_str.Value(), addr)) { dprintf(D_ALWAYS, "ERROR: attempt to set pool password remotely\n"); free(credd_host); return; } } free(credd_host); } s->decode(); if (!s->code(domain) || !s->code(pw) || !s->end_of_message()) { dprintf(D_ALWAYS, "store_pool_cred: failed to receive all parameters\n"); goto spch_cleanup; } if (domain == NULL) { dprintf(D_ALWAYS, "store_pool_cred_handler: domain is NULL\n"); goto spch_cleanup; } // construct the full pool username username += domain; // do the real work if (pw) { result = store_cred_service(username.Value(), pw, ADD_MODE); SecureZeroMemory(pw, strlen(pw)); } else { result = store_cred_service(username.Value(), NULL, DELETE_MODE); } s->encode(); if (!s->code(result)) { dprintf(D_ALWAYS, "store_pool_cred: Failed to send result.\n"); goto spch_cleanup; } if (!s->end_of_message()) { dprintf(D_ALWAYS, "store_pool_cred: Failed to send end of message.\n"); } spch_cleanup: if (pw) free(pw); if (domain) free(domain); }