/* Print some basic latency/rate information to assist in debugging. */ static void acpi_hpet_test(struct acpi_hpet_softc *sc) { int i; uint32_t u1, u2; struct timeval b0, b1, b2; struct timespec ts; microuptime(&b0); microuptime(&b0); microuptime(&b1); u1 = bus_space_read_4(acpi_hpet_bst, acpi_hpet_bsh, HPET_MAIN_COUNTER); for (i = 1; i < 1000; i++) { u2 = bus_space_read_4(acpi_hpet_bst, acpi_hpet_bsh, HPET_MAIN_COUNTER); } microuptime(&b2); u2 = bus_space_read_4(acpi_hpet_bst, acpi_hpet_bsh, HPET_MAIN_COUNTER); timevalsub(&b2, &b1); timevalsub(&b1, &b0); timevalsub(&b2, &b1); TIMEVAL_TO_TIMESPEC(&b2, &ts); device_printf(sc->dev, "%ld.%09ld: %u ... %u = %u\n", (long)b2.tv_sec, b2.tv_usec, u1, u2, u2 - u1); device_printf(sc->dev, "time per call: %ld ns\n", ts.tv_nsec / 1000); }
/* * does not implement security features of kern_time.c:settime() */ void afs_osi_SetTime(osi_timeval_t * atv) { #ifdef AFS_FBSD50_ENV printf("afs attempted to set clock; use \"afsd -nosettime\"\n"); #else struct timespec ts; struct timeval tv, delta; int s; AFS_GUNLOCK(); s = splclock(); microtime(&tv); delta = *atv; timevalsub(&delta, &tv); ts.tv_sec = atv->tv_sec; ts.tv_nsec = atv->tv_usec * 1000; set_timecounter(&ts); (void)splsoftclock(); lease_updatetime(delta.tv_sec); splx(s); resettodr(); AFS_GLOCK(); #endif }
/* * bfq_slow_tdio(): decide whether a tdio is slow * * This function decides whether a tdio is slow by the speed * estimated from the current time slice start time: if the * tdio is not fast enough to consume its budget (or 2/3 * its budget) within the time slice, it is judged slow. * * Called by bfq_expire() * * lock: * THREAD_IO_LOCK is expected to be held. * refcount: * none * */ static int bfq_slow_tdio(struct bfq_disk_ctx *bfq_diskctx, struct bfq_thread_io *bfq_tdio) { /** * A tdio is considered slow if it can not finish its budget * at its current average speed */ uint64_t usec_elapsed, service_received, speed; int expect; struct timeval tv = bfq_tdio->last_request_done_time; timevalsub (&tv, &bfq_tdio->service_start_time); usec_elapsed = (uint64_t)(1000000 * (uint64_t)tv.tv_sec + tv.tv_usec); /* discard absurd value */ if (usec_elapsed < 20000) return 0; service_received = (uint64_t)bfq_tdio->service_received << BFQ_FIXPOINT_SHIFT; speed = service_received / usec_elapsed; expect = (speed * BFQ_SLICE_TIMEOUT * (1000 * 1000 / hz)) >> BFQ_FIXPOINT_SHIFT; if (expect < 0) { dsched_debug(BFQ_DEBUG_NORMAL, "BFQ: overflow on calculating slow_tdio\n"); return 0; } if (expect < bfq_tdio->budget * 2 / 3) { dsched_debug(BFQ_DEBUG_NORMAL, "BFQ: %p is judged slow\n", bfq_tdio); return 1; } return 0; }
/* * bfq_update_peak_rate(): update the peak disk speed by sampling * the throughput within a time slice. * * lock: * BFQ_LOCK is expected to be held * * refcount: * none * * Caller: bfq_expire() */ static void bfq_update_peak_rate(struct bfq_disk_ctx *bfq_diskctx, struct bfq_thread_io *bfq_tdio) { struct timeval tv = bfq_tdio->last_request_done_time; uint64_t usec, service_received, peak_rate; timevalsub (&tv, &bfq_tdio->service_start_time); usec = (uint64_t)(1000000 * (uint64_t)tv.tv_sec + tv.tv_usec); /* discard absurd value */ if (usec < 2000 || usec > (BFQ_SLICE_TIMEOUT * (1000 / hz) * 1000)) { dsched_debug(BFQ_DEBUG_NORMAL, "BFQ: absurd interval for peak rate\n"); return; } service_received = (uint64_t)bfq_tdio->service_received << BFQ_FIXPOINT_SHIFT; peak_rate = service_received / usec; bfq_diskctx->bfq_peak_rate = (peak_rate + 7 * bfq_diskctx->bfq_peak_rate) / 8; bfq_diskctx->bfq_peak_rate_samples++; /* update the max_budget according to the peak rate */ if (bfq_diskctx->bfq_peak_rate_samples > BFQ_VALID_MIN_SAMPLES) { bfq_diskctx->bfq_peak_rate_samples = BFQ_VALID_MIN_SAMPLES; /* * if the auto max budget adjust is disabled, * the bfq_max_budget will always be BFQ_DEFAULT_MAX_BUDGET; */ if (bfq_diskctx->bfq_flag & BFQ_FLAG_AUTO_MAX_BUDGET) { bfq_diskctx->bfq_max_budget = (uint32_t)((BFQ_SLICE_TIMEOUT * (1000 / hz) * bfq_diskctx->bfq_peak_rate * 1000) >> BFQ_FIXPOINT_SHIFT); dsched_debug(BFQ_DEBUG_NORMAL, "max budget updated to %d\n", bfq_diskctx->bfq_max_budget); }
static void perf_stop(struct perf *perf) { gettimeofday(&perf->stop, NULL); ptop = perf->next; if (ptop) { ptop->as_req += perf->as_req; ptop->tgs_req += perf->tgs_req; } timevalsub(&perf->stop, &perf->start); printf("time: %lu.%06lu\n", (unsigned long)perf->stop.tv_sec, (unsigned long)perf->stop.tv_usec); #define USEC_PER_SEC 1000000 if (perf->as_req) { double as_ps = 0.0; as_ps = (perf->as_req * USEC_PER_SEC) / (double)((perf->stop.tv_sec * USEC_PER_SEC) + perf->stop.tv_usec); printf("as-req/s %.2lf (total %lu requests)\n", as_ps, perf->as_req); } if (perf->tgs_req) { double tgs_ps = 0.0; tgs_ps = (perf->tgs_req * USEC_PER_SEC) / (double)((perf->stop.tv_sec * USEC_PER_SEC) + perf->stop.tv_usec); printf("tgs-req/s %.2lf (total %lu requests)\n", tgs_ps, perf->tgs_req); } }
/* * change the system date on the master */ static void mchgdate(struct tsp *msg) { char tname[MAXHOSTNAMELEN]; char olddate[32]; struct timeval otime, ntime; strlcpy(tname, msg->tsp_name, sizeof(tname)); xmit(TSP_DATEACK, msg->tsp_seq, &from); strlcpy(olddate, date(), sizeof(olddate)); /* adjust time for residence on the queue */ gettimeofday(&otime, 0); adj_msg_time(msg,&otime); timevalsub(&ntime, &msg->tsp_time, &otime); if (ntime.tv_sec < MAXADJ && ntime.tv_sec > -MAXADJ) { /* * do not change the clock if we can adjust it */ dictate = 3; synch(tvtomsround(ntime)); } else { logwtmp("|", "date", ""); settimeofday(&msg->tsp_time, 0); logwtmp("{", "date", ""); spreadtime(); } syslog(LOG_NOTICE, "date changed by %s from %s", tname, olddate); }
static void endtesting(void) { gettimeofday(&time2, NULL); timevalsub(&time2, &time1); printf("timing: %ld.%06ld\n", (long)time2.tv_sec, (long)time2.tv_usec); }
static int pmtimer_suspend(device_t dev) { microtime(&diff_time); inittodr(0); microtime(&suspend_time); timevalsub(&diff_time, &suspend_time); return (0); }
static int dumpheader(struct ktr_header *kth) { static char unknown[64]; static struct timeval prevtime, temp; const char *type; int col; switch (kth->ktr_type) { case KTR_SYSCALL: type = "CALL"; break; case KTR_SYSRET: type = "RET "; break; case KTR_NAMEI: type = "NAMI"; break; case KTR_GENIO: type = "GIO "; break; case KTR_PSIG: type = "PSIG"; break; case KTR_CSW: type = "CSW"; break; case KTR_USER: type = "USER"; break; default: (void)sprintf(unknown, "UNKNOWN(%d)", kth->ktr_type); type = unknown; } if (kth->ktr_tid || (kth->ktr_flags & KTRH_THREADED) || fixedformat) col = printf("%5d:%-4d", kth->ktr_pid, kth->ktr_tid); else col = printf("%5d", kth->ktr_pid); col += printf(" %-8.*s ", MAXCOMLEN, kth->ktr_comm); if (timestamp) { if (timestamp == 2) { temp = kth->ktr_time; timevalsub(&kth->ktr_time, &prevtime); prevtime = temp; } col += printf("%ld.%06ld ", kth->ktr_time.tv_sec, kth->ktr_time.tv_usec); } col += printf("%s ", type); return col; }
static void time_encryption(krb5_context context, size_t size, krb5_enctype etype, int iterations) { struct timeval tv1, tv2; krb5_error_code ret; krb5_keyblock key; krb5_crypto crypto; krb5_data data; char *etype_name; void *buf; int i; ret = krb5_generate_random_keyblock(context, etype, &key); if (ret) krb5_err(context, 1, ret, "krb5_generate_random_keyblock"); ret = krb5_enctype_to_string(context, etype, &etype_name); if (ret) krb5_err(context, 1, ret, "krb5_enctype_to_string"); buf = malloc(size); if (buf == NULL) krb5_errx(context, 1, "out of memory"); memset(buf, 0, size); ret = krb5_crypto_init(context, &key, 0, &crypto); if (ret) krb5_err(context, 1, ret, "krb5_crypto_init"); gettimeofday(&tv1, NULL); for (i = 0; i < iterations; i++) { ret = krb5_encrypt(context, crypto, 0, buf, size, &data); if (ret) krb5_err(context, 1, ret, "encrypt: %d", i); krb5_data_free(&data); } gettimeofday(&tv2, NULL); timevalsub(&tv2, &tv1); printf("%s size: %7lu iterations: %d time: %3ld.%06ld\n", etype_name, (unsigned long)size, iterations, (long)tv2.tv_sec, (long)tv2.tv_usec); free(buf); free(etype_name); krb5_crypto_destroy(context, crypto); krb5_free_keyblock_contents(context, &key); }
static int pmtimer_suspend(device_t dev) { int pl; pl = splsoftclock(); microtime(&diff_time); inittodr(0); microtime(&suspend_time); timevalsub(&diff_time, &suspend_time); splx(pl); return (0); }
static int throttle_io_will_be_throttled_internal(int lowpri_window_msecs, void * throttle_info) { struct _throttle_io_info_t *info = throttle_info; struct timeval elapsed; int elapsed_msecs; microuptime(&elapsed); timevalsub(&elapsed, &info->last_normal_IO_timestamp); elapsed_msecs = elapsed.tv_sec * 1000 + elapsed.tv_usec / 1000; if (lowpri_window_msecs == -1) // use the max waiting time lowpri_window_msecs = lowpri_max_waiting_msecs; return elapsed_msecs < lowpri_window_msecs; }
static void racctd(void) { struct thread *td; struct proc *p; struct timeval wallclock; uint64_t runtime; for (;;) { sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { if (p->p_state != PRS_NORMAL) continue; if (p->p_flag & P_SYSTEM) continue; microuptime(&wallclock); timevalsub(&wallclock, &p->p_stats->p_start); PROC_LOCK(p); PROC_SLOCK(p); FOREACH_THREAD_IN_PROC(p, td) { ruxagg(p, td); thread_lock(td); thread_unlock(td); } runtime = cputick2usec(p->p_rux.rux_runtime); PROC_SUNLOCK(p); #ifdef notyet KASSERT(runtime >= p->p_prev_runtime, ("runtime < p_prev_runtime")); #else if (runtime < p->p_prev_runtime) runtime = p->p_prev_runtime; #endif p->p_prev_runtime = runtime; mtx_lock(&racct_lock); racct_set_locked(p, RACCT_CPU, runtime); racct_set_locked(p, RACCT_WALLCLOCK, wallclock.tv_sec * 1000000 + wallclock.tv_usec); mtx_unlock(&racct_lock); PROC_UNLOCK(p); } sx_sunlock(&allproc_lock); pause("-", hz); }
/* * bfq_bio_done(): .bio_done callback of the bfq policy * * Called after a bio is done, (by request_polling_biodone of dsched). * This function judges whet her a thread consumes up its time slice, and * if so, it will set the maybe_timeout flag in bfq_tdio structure. Any * further action of that thread or the bfq scheduler will cause the * thread to be expired. (in bfq_queue() or in bfq_dequeue()) * * This function requires the bfq_tdio pointer of the thread that pushes * bp to be stored by dsched_set_bio_priv() earlier. Currently it is * stored when bfq_queue() is called. * * lock: none. This function CANNOT be blocked by any lock * * refcount: * the corresponding tdio's refcount should decrease by 1 after * this function call. The counterpart increasing is in bfq_queue(). * For each bio pushed down, we increase the refcount of the pushing * tdio. */ static void bfq_bio_done(struct bio *bp) { struct disk *dp = dsched_get_bio_dp(bp); struct bfq_thread_io *bfq_tdio = dsched_get_bio_priv(bp); struct bfq_disk_ctx *bfq_diskctx = dsched_get_disk_priv(dp); struct timeval tv; int ticks_expired; KKASSERT(bfq_tdio); dsched_thread_io_ref(&bfq_tdio->head); atomic_add_int(&bfq_tdio->bio_completed, 1); /* the tdio has already expired */ if (bfq_tdio != bfq_diskctx->bfq_active_tdio) goto rtn; atomic_add_int(&bfq_tdio->service_received, BIO_SIZE(bp)); /* current time */ getmicrotime(&tv); bfq_tdio->last_request_done_time = tv; timevalsub (&tv, &bfq_tdio->service_start_time); ticks_expired = tvtohz_high(&tv); /* the thread has run out its time slice */ if ((ticks_expired != 0x7fffffff) && (ticks_expired >= BFQ_SLICE_TIMEOUT)) { /* * we cannot block here, so just set a flag */ #if 0 bfq_tdio->maybe_timeout = 1; #endif if (atomic_cmpset_int(&bfq_tdio->maybe_timeout, 0, 1)) { bfq_update_avg_time_slice(bfq_diskctx, tv); dsched_debug(BFQ_DEBUG_VERBOSE, "BFQ: %p may time out\n", bfq_tdio); } } rtn: dsched_thread_io_unref(&bfq_tdio->head); /* ref'ed in this function */ dsched_thread_io_unref(&bfq_tdio->head); /* ref'ed in queue() */ }
isc_result_t isc_mutex_unlock_profile(isc_mutex_t *mp, const char *file, int line) { struct timeval unlock_t; UNUSED(file); UNUSED(line); if (mp->stats->cur_locker != NULL) { gettimeofday(&unlock_t, NULL); timevalsub(&unlock_t, &mp->stats->lock_t); timevaladd(&mp->stats->locked_total, &unlock_t); timevaladd(&mp->stats->cur_locker->locked_total, &unlock_t); mp->stats->cur_locker = NULL; } return ((pthread_mutex_unlock((&mp->mutex)) == 0) ? \ ISC_R_SUCCESS : ISC_R_UNEXPECTED); }
static int pmtimer_resume(device_t dev) { int pl; u_int second, minute, hour; struct timeval resume_time, tmp_time; /* modified for adjkerntz */ pl = splsoftclock(); timer_restore(); /* restore the all timers */ inittodr(0); /* adjust time to RTC */ microtime(&resume_time); getmicrotime(&tmp_time); timevaladd(&tmp_time, &diff_time); #ifdef FIXME /* XXX THIS DOESN'T WORK!!! */ time = tmp_time; #endif #ifdef PMTIMER_FIXUP_CALLTODO /* Calculate the delta time suspended */ timevalsub(&resume_time, &suspend_time); /* Fixup the calltodo list with the delta time. */ adjust_timeout_calltodo(&resume_time); #endif /* PMTIMER_FIXUP_CALLTODOK */ splx(pl); #ifndef PMTIMER_FIXUP_CALLTODO second = resume_time.tv_sec - suspend_time.tv_sec; #else /* PMTIMER_FIXUP_CALLTODO */ /* * We've already calculated resume_time to be the delta between * the suspend and the resume. */ second = resume_time.tv_sec; #endif /* PMTIMER_FIXUP_CALLTODO */ hour = second / 3600; second %= 3600; minute = second / 60; second %= 60; log(LOG_NOTICE, "wakeup from sleeping state (slept %02d:%02d:%02d)\n", hour, minute, second); return (0); }
isc_result_t isc_mutex_lock_profile(isc_mutex_t *mp, const char *file, int line) { struct timeval prelock_t; struct timeval postlock_t; isc_mutexlocker_t *locker = NULL; int i; gettimeofday(&prelock_t, NULL); if (pthread_mutex_lock(&mp->mutex) != 0) return (ISC_R_UNEXPECTED); gettimeofday(&postlock_t, NULL); mp->stats->lock_t = postlock_t; timevalsub(&postlock_t, &prelock_t); mp->stats->count++; timevaladd(&mp->stats->wait_total, &postlock_t); for (i = 0; i < ISC_MUTEX_MAX_LOCKERS; i++) { if (mp->stats->lockers[i].file == NULL) { locker = &mp->stats->lockers[i]; locker->file = file; locker->line = line; break; } else if (mp->stats->lockers[i].file == file && mp->stats->lockers[i].line == line) { locker = &mp->stats->lockers[i]; break; } } if (locker != NULL) { locker->count++; timevaladd(&locker->wait_total, &postlock_t); } mp->stats->cur_locker = locker; return (ISC_R_SUCCESS); }
/* * change the system date on the master */ static void mchgdate(struct tsp *msg) { char tname[MAXHOSTNAMELEN]; char olddate[32]; struct timeval otime, ntime, tmptv; struct utmpx utx; (void)strcpy(tname, msg->tsp_name); xmit(TSP_DATEACK, msg->tsp_seq, &from); (void)strcpy(olddate, date()); /* adjust time for residence on the queue */ (void)gettimeofday(&otime, NULL); adj_msg_time(msg,&otime); tmptv.tv_sec = msg->tsp_time.tv_sec; tmptv.tv_usec = msg->tsp_time.tv_usec; timevalsub(&ntime, &tmptv, &otime); if (ntime.tv_sec < MAXADJ && ntime.tv_sec > -MAXADJ) { /* * do not change the clock if we can adjust it */ dictate = 3; synch(tvtomsround(ntime)); } else { utx.ut_type = OLD_TIME; (void)gettimeofday(&utx.ut_tv, NULL); pututxline(&utx); (void)settimeofday(&tmptv, 0); utx.ut_type = NEW_TIME; (void)gettimeofday(&utx.ut_tv, NULL); pututxline(&utx); spreadtime(); } syslog(LOG_NOTICE, "date changed by %s from %s", tname, olddate); }
static uint16_t pit_update_counter(struct counter *c, int latch) { struct timeval tv2; uint16_t lval; uint64_t delta_nsecs, delta_ticks; /* cannot latch a new value until the old one has been consumed */ if (latch && c->olbyte != 0) return (0); if (c->initial == 0 || c->initial == 1) { /* * XXX the program that runs the VM can be stopped and * restarted at any time. This means that state that was * created by the guest is destroyed between invocations * of the program. * * If the counter's initial value is not programmed we * assume a value that would be set to generate 100 * interrupts per second. */ c->initial = TIMER_DIV(PIT_8254_FREQ, 100); gettimeofday(&c->tv, NULL); } (void)gettimeofday(&tv2, NULL); timevalsub(&tv2, &c->tv); delta_nsecs = tv2.tv_sec * 1000000000 + tv2.tv_usec * 1000; delta_ticks = delta_nsecs / nsecs_per_tick; lval = c->initial - delta_ticks % c->initial; if (latch) { c->olbyte = 2; c->ol[1] = lval; /* LSB */ c->ol[0] = lval >> 8; /* MSB */ } return (lval); }
int nanosleep(const struct timespec *ts, struct timespec *rts) { struct timeval timeout, endtime, now; int rval; timeout.tv_sec = ts->tv_sec; timeout.tv_usec = ts->tv_nsec / 1000; if (rts != NULL) { gettimeofday(&endtime, NULL); timevaladd(&endtime, &timeout); } rval = select(0, NULL, NULL, NULL, &timeout); if (rts != NULL && rval == -1 && errno == EINTR) { gettimeofday(&now, NULL); timevalsub(&endtime, &now); rts->tv_sec = endtime.tv_sec; rts->tv_nsec = endtime.tv_usec * 1000; } return rval; }
static void time_s2k(krb5_context context, krb5_enctype etype, const char *password, krb5_salt salt, int iterations) { struct timeval tv1, tv2; krb5_error_code ret; krb5_keyblock key; krb5_data opaque; char *etype_name; int i; ret = krb5_enctype_to_string(context, etype, &etype_name); if (ret) krb5_err(context, 1, ret, "krb5_enctype_to_string"); opaque.data = NULL; opaque.length = 0; gettimeofday(&tv1, NULL); for (i = 0; i < iterations; i++) { ret = krb5_string_to_key_salt_opaque(context, etype, password, salt, opaque, &key); if (ret) krb5_err(context, 1, ret, "krb5_string_to_key_data_salt_opaque"); krb5_free_keyblock_contents(context, &key); } gettimeofday(&tv2, NULL); timevalsub(&tv2, &tv1); printf("%s string2key %d iterations time: %3ld.%06ld\n", etype_name, iterations, (long)tv2.tv_sec, (long)tv2.tv_usec); free(etype_name); }
static void nvme_ns_io_test_cb(void *arg, const struct nvme_completion *cpl) { struct nvme_io_test_thread *tth = arg; struct timeval t; tth->io_completed++; if (nvme_completion_is_error(cpl)) { printf("%s: error occurred\n", __func__); wakeup_one(tth); return; } getmicrouptime(&t); timevalsub(&t, &tth->start); if (t.tv_sec >= tth->time) { wakeup_one(tth); return; } switch (tth->opc) { case NVME_OPC_WRITE: nvme_ns_cmd_write(tth->ns, tth->buf, tth->idx * 2048, tth->size/nvme_ns_get_sector_size(tth->ns), nvme_ns_io_test_cb, tth); break; case NVME_OPC_READ: nvme_ns_cmd_read(tth->ns, tth->buf, tth->idx * 2048, tth->size/nvme_ns_get_sector_size(tth->ns), nvme_ns_io_test_cb, tth); break; default: break; } }
/* * Measures the differences between machines' clocks using * ICMP timestamp messages. * maxmsec wait this many msec at most * wmsec msec to wait for an answer * print print complaints on stderr */ int /* status val defined in globals.h */ measure(u_long maxmsec, u_long wmsec, char *hname, struct sockaddr_in *addr, int print) { int length; int measure_status; int rcvcount, trials; int cc, count; fd_set ready; long sendtime, recvtime, histime1, histime2; long idelta, odelta, total; long min_idelta, min_odelta; struct timeval tdone, tcur, ttrans, twait, tout; u_char packet[PACKET_IN], opacket[64]; register struct icmp *icp = (struct icmp *) packet; register struct icmp *oicp = (struct icmp *) opacket; struct ip *ip = (struct ip *) packet; min_idelta = min_odelta = 0x7fffffff; measure_status = HOSTDOWN; measure_delta = HOSTDOWN; trials = 0; errno = 0; /* open raw socket used to measure time differences */ if (sock_raw < 0) { sock_raw = socket(AF_INET, SOCK_RAW, IPPROTO_ICMP); if (sock_raw < 0) { syslog(LOG_ERR, "opening raw socket: %m"); goto quit; } } /* * empty the icmp input queue */ FD_ZERO(&ready); for (;;) { tout.tv_sec = tout.tv_usec = 0; FD_SET(sock_raw, &ready); if (select(sock_raw+1, &ready, 0,0, &tout)) { length = sizeof(struct sockaddr_in); cc = recvfrom(sock_raw, (char *)packet, PACKET_IN, 0, 0,&length); if (cc < 0) goto quit; continue; } break; } /* * Choose the smallest transmission time in each of the two * directions. Use these two latter quantities to compute the delta * between the two clocks. */ oicp->icmp_type = ICMP_TSTAMP; oicp->icmp_code = 0; oicp->icmp_id = getpid(); oicp->icmp_rtime = 0; oicp->icmp_ttime = 0; oicp->icmp_seq = seqno; FD_ZERO(&ready); (void)gettimeofday(&tdone, NULL); mstotvround(&tout, maxmsec); timevaladd(&tdone, &tout); /* when we give up */ mstotvround(&twait, wmsec); rcvcount = 0; while (rcvcount < MSGS) { (void)gettimeofday(&tcur, NULL); /* * keep sending until we have sent the max */ if (trials < TRIALS) { trials++; oicp->icmp_otime = htonl((tcur.tv_sec % SECDAY) * 1000 + tcur.tv_usec / 1000); oicp->icmp_cksum = 0; oicp->icmp_cksum = in_cksum((u_short*)oicp, sizeof(*oicp)); count = sendto(sock_raw, opacket, sizeof(*oicp), 0, (struct sockaddr*)addr, sizeof(struct sockaddr)); if (count < 0) { if (measure_status == HOSTDOWN) measure_status = UNREACHABLE; goto quit; } ++oicp->icmp_seq; ttrans = tcur; timevaladd(&ttrans, &twait); } else { ttrans = tdone; } while (rcvcount < trials) { timevalsub(&tout, &ttrans, &tcur); if (tout.tv_sec < 0) tout.tv_sec = 0; FD_SET(sock_raw, &ready); count = select(sock_raw+1, &ready, (fd_set *)0, (fd_set *)0, &tout); (void)gettimeofday(&tcur, NULL); if (count <= 0) break; length = sizeof(struct sockaddr_in); cc = recvfrom(sock_raw, (char *)packet, PACKET_IN, 0, 0,&length); if (cc < 0) goto quit; /* * got something. See if it is ours */ icp = (struct icmp *)(packet + (ip->ip_hl << 2)); if (cc < sizeof(*ip) || icp->icmp_type != ICMP_TSTAMPREPLY || icp->icmp_id != oicp->icmp_id || icp->icmp_seq < seqno || icp->icmp_seq >= oicp->icmp_seq) continue; sendtime = ntohl(icp->icmp_otime); recvtime = ((tcur.tv_sec % SECDAY) * 1000 + tcur.tv_usec / 1000); total = recvtime-sendtime; if (total < 0) /* do not hassle midnight */ continue; rcvcount++; histime1 = ntohl(icp->icmp_rtime); histime2 = ntohl(icp->icmp_ttime); /* * a host using a time format different from * msec. since midnight UT (as per RFC792) should * set the high order bit of the 32-bit time * value it transmits. */ if ((histime1 & 0x80000000) != 0) { measure_status = NONSTDTIME; goto quit; } measure_status = GOOD; idelta = recvtime-histime2; odelta = histime1-sendtime; /* do not be confused by midnight */ if (idelta < -MSEC_DAY/2) idelta += MSEC_DAY; else if (idelta > MSEC_DAY/2) idelta -= MSEC_DAY; if (odelta < -MSEC_DAY/2) odelta += MSEC_DAY; else if (odelta > MSEC_DAY/2) odelta -= MSEC_DAY; /* save the quantization error so that we can get a * measurement finer than our system clock. */ if (total < MIN_ROUND) { measure_delta = (odelta - idelta)/2; goto quit; } if (idelta < min_idelta) min_idelta = idelta; if (odelta < min_odelta) min_odelta = odelta; measure_delta = (min_odelta - min_idelta)/2; } if (tcur.tv_sec > tdone.tv_sec || (tcur.tv_sec == tdone.tv_sec && tcur.tv_usec >= tdone.tv_usec)) break; } quit: seqno += TRIALS; /* allocate our sequence numbers */ /* * If no answer is received for TRIALS consecutive times, * the machine is assumed to be down */ if (measure_status == GOOD) { if (trace) { fprintf(fd, "measured delta %4d, %d trials to %-15s %s\n", measure_delta, trials, inet_ntoa(addr->sin_addr), hname); } } else if (print) { if (errno != 0) warn("measure %s", hname); } else { if (errno != 0) { syslog(LOG_ERR, "measure %s: %m", hname); } else { syslog(LOG_ERR, "measure: %s did not respond", hname); } if (trace) { fprintf(fd, "measure: %s failed after %d trials\n", hname, trials); (void)fflush(fd); } } return(measure_status); }
int main(int argc, char **argv) { ENGINE *engine = NULL; int i, j, idx = 0; RSA *rsa; setprogname(argv[0]); /* if(getarg(args, sizeof(args) / sizeof(args[0]), argc, argv, &idx)) usage(1); if (help_flag) usage(0); if(version_flag){ print_version(NULL); exit(0); } */ while(1) { int c = getopt_long(argc, argv, "hq", args, &idx); if (c == -1) break; switch (c) { case 'q': verbose = 0; break; case 'h': usage(0); break; case '?': default: usage(-1); break; } } /* argc -= idx; argv += idx; */ if (verbose) printf("[TEST] RSA\n"); /* OpenSSL_add_all_algorithms(); */ #ifdef OPENSSL ENGINE_load_openssl(); #endif ENGINE_load_builtin_engines(); /* if (argc == 0) { engine = ENGINE_by_id("builtin"); } else { engine = ENGINE_by_id(argv[0]); if (engine == NULL) engine = ENGINE_by_dso(argv[0], id_flag); } if (engine == NULL) { fprintf(stderr, "ENGINE_by_dso failed"\n); return 76; } if (ENGINE_get_RSA(engine) == NULL) return 77; printf("rsa %s\n", ENGINE_get_RSA(engine)->name); */ if (time_keygen) { struct timeval tv1, tv2; BIGNUM *e; rsa = RSA_new_method(engine); if (!key_blinding) rsa->flags |= RSA_FLAG_NO_BLINDING; e = BN_new(); BN_set_word(e, 0x10001); printf("running keygen with %d loops\n", loops); gettimeofday(&tv1, NULL); for (i = 0; i < loops; i++) { rsa = RSA_new_method(engine); if (RSA_generate_key_ex(rsa, 1024, e, NULL) != 1) { RSA_free(rsa); fprintf(stderr, "RSA_generate_key_ex"); fail++; return 1; } RSA_free(rsa); } gettimeofday(&tv2, NULL); timevalsub(&tv2, &tv1); printf("time %lu.%06lu\n", (unsigned long)tv2.tv_sec, (unsigned long)tv2.tv_usec); BN_free(e); /* ENGINE_finish(engine); */ return 0; } /* if (time_key) { const int size = 20; struct timeval tv1, tv2; unsigned char *p; if (strcmp(time_key, "generate") == 0) { BIGNUM *e; rsa = RSA_new_method(engine); if (!key_blinding) rsa->flags |= RSA_FLAG_NO_BLINDING; e = BN_new(); BN_set_word(e, 0x10001); if (RSA_generate_key_ex(rsa, 1024, e, NULL) != 1) { fprintf(stderr, "RSA_generate_key_ex"); fail++; return (1); } } else { rsa = read_key(engine, time_key); } p = emalloc(loops * size); CCRandomCopyBytes(kCCRandomDefault, p, loops * size); gettimeofday(&tv1, NULL); for (i = 0; i < loops; i++) check_rsa(p + (i * size), size, rsa, RSA_PKCS1_PADDING); gettimeofday(&tv2, NULL); timevalsub(&tv2, &tv1); printf("time %lu.%06lu\n", (unsigned long)tv2.tv_sec, (unsigned long)tv2.tv_usec); RSA_free(rsa); ENGINE_finish(engine); return 0; } */ if (rsa_key) { rsa = read_key(engine, rsa_key); /* * Assuming that you use the RSA key in the distribution, this * test will generate a signature have a starting zero and thus * will generate a checksum that is 127 byte instead of the * checksum that is 128 byte (like the key). */ { const unsigned char sha1[20] = { 0x6d, 0x33, 0xf9, 0x40, 0x75, 0x5b, 0x4e, 0xc5, 0x90, 0x35, 0x48, 0xab, 0x75, 0x02, 0x09, 0x76, 0x9a, 0xb4, 0x7d, 0x6b }; check_rsa(sha1, sizeof(sha1), rsa, RSA_PKCS1_PADDING); } for (i = 0; i < 128; i++) { unsigned char sha1[20]; CCRandomCopyBytes(kCCRandomDefault, sha1, sizeof(sha1)); check_rsa(sha1, sizeof(sha1), rsa, RSA_PKCS1_PADDING); } for (i = 0; i < 128; i++) { unsigned char des3[21]; CCRandomCopyBytes(kCCRandomDefault, des3, sizeof(des3)); check_rsa(des3, sizeof(des3), rsa, RSA_PKCS1_PADDING); } for (i = 0; i < 128; i++) { unsigned char aes[32]; CCRandomCopyBytes(kCCRandomDefault, aes, sizeof(aes)); check_rsa(aes, sizeof(aes), rsa, RSA_PKCS1_PADDING); } RSA_free(rsa); } if (verbose) { printf("[BEGIN] RSA loops\n"); printf("Running %d loops\n", loops); } total++; for (i = 0; i < loops; i++) { BN_GENCB cb; BIGNUM *e; unsigned int n; rsa = RSA_new_method(engine); if (!key_blinding) rsa->flags |= RSA_FLAG_NO_BLINDING; e = BN_new(); BN_set_word(e, 0x10001); BN_GENCB_set(&cb, cb_func, NULL); CCRandomCopyBytes(kCCRandomDefault, &n, sizeof(n)); n &= 0x1ff; n += 1024; if (RSA_generate_key_ex(rsa, n, e, &cb) != 1) { fprintf(stderr, "RSA_generate_key_ex"); fail++; return 1; } BN_free(e); for (j = 0; j < 8; j++) { unsigned char sha1[20]; CCRandomCopyBytes(kCCRandomDefault, sha1, sizeof(sha1)); check_rsa(sha1, sizeof(sha1), rsa, RSA_PKCS1_PADDING); } RSA_free(rsa); } if (verbose) printf("[PASS] RSA loops\n"); pass++; if (verbose) { printf("[SUMMARY]\n"); printf("total: %d\n", total); printf("passed: %d\n", pass); printf("failed: %d\n", fail); } /* ENGINE_finish(engine); */ return (fail); }
int ripd(int argc, char *argv[]) { int n, nfd, tflags = 0, ch; struct timeval *tvp, waittime; struct itimerval itval; struct rip *query = msg; fd_set ibits; sigset_t sigset, osigset; while ((ch = getopt(argc, argv, "sqtdg")) != EOF) { switch (ch) { case 's': supplier = 1; break; case 'q': supplier = 0; break; case 't': tflags++; break; case 'd': debug++; setlogmask(LOG_UPTO(LOG_DEBUG)); break; case 'g': gateway = 1; break; default: fprintf(stderr, "%% Incomplete command\r\n"); return(1); } } getkversion(); sock = getsocket(); assert(sock>=0); openlog("ripd", LOG_PID | LOG_ODELAY, LOG_DAEMON); if (debug == 0 && tflags == 0) { switch (fork()) { case -1: perror("fork"); exit(1); case 0: break; /* child */ default: exit(0); /* parent */ } close(0); close(1); close(2); setsid(); setlogmask(LOG_UPTO(LOG_WARNING)); } else { setlogmask(LOG_UPTO(LOG_DEBUG)); } /* pid */ pidfile = fopen( "/var/run/ripd.pid", "w" ) ; if (pidfile==NULL) { fprintf(stderr,"%% Error write pid\n"); return (-1); } else fprintf(pidfile,"%d", (int) getpid()); fclose(pidfile); /* * Any extra argument is considered * a tracing log file. * * Note: because traceon() redirects stderr, anything planning to * crash on startup should do so before this point. */ if (argc > optind) { traceon(argv[optind]); } while (tflags-- > 0) { bumploglevel(); } gettimeofday(&now, NULL); /* * Collect an initial view of the world by * checking the interface configuration and the gateway kludge * file. Then, send a request packet on all * directly connected networks to find out what * everyone else thinks. */ rtinit(); ifinit(); gwkludge(); if (gateway > 0) { rtdefault(); } if (supplier < 0) { supplier = 0; } query->rip_cmd = RIPCMD_REQUEST; query->rip_vers = RIPVERSION; if (sizeof(query->rip_nets[0].rip_dst.sa_family) > 1) { /* XXX */ query->rip_nets[0].rip_dst.sa_family = htons((u_short)AF_UNSPEC); } else { /* unreachable code (at least on most platforms) */ query->rip_nets[0].rip_dst.sa_family = AF_UNSPEC; } query->rip_nets[0].rip_metric = htonl((u_long)HOPCNT_INFINITY); toall(sndmsg, 0, NULL); signal(SIGALRM, timer); signal(SIGHUP, hup); signal(SIGTERM, hup); signal(SIGINT, rtdeleteall); signal(SIGUSR1, sigtrace); signal(SIGUSR2, sigtrace); itval.it_interval.tv_sec = TIMER_RATE; itval.it_value.tv_sec = TIMER_RATE; itval.it_interval.tv_usec = 0; itval.it_value.tv_usec = 0; srandom(time(NULL) ^ getpid()); if (setitimer(ITIMER_REAL, &itval, (struct itimerval *)NULL) < 0) { syslog(LOG_ERR, "setitimer: %m\n"); } FD_ZERO(&ibits); nfd = sock + 1; /* 1 + max(fd's) */ for (;;) { FD_SET(sock, &ibits); /* * If we need a dynamic update that was held off, * needupdate will be set, and nextbcast is the time * by which we want select to return. Compute time * until dynamic update should be sent, and select only * until then. If we have already passed nextbcast, * just poll. */ if (needupdate) { waittime = nextbcast; timevalsub(&waittime, &now); if (waittime.tv_sec < 0) { waittime.tv_sec = 0; waittime.tv_usec = 0; } if (traceactions) fprintf(ftrace, "select until dynamic update %ld/%ld sec/usec\n", (long)waittime.tv_sec, (long)waittime.tv_usec); tvp = &waittime; } else { tvp = (struct timeval *)NULL; } n = select(nfd, &ibits, 0, 0, tvp); if (n <= 0) { /* * Need delayed dynamic update if select returned * nothing and we timed out. Otherwise, ignore * errors (e.g. EINTR). */ if (n < 0) { if (errno == EINTR) continue; syslog(LOG_ERR, "select: %m"); } sigemptyset(&sigset); sigaddset(&sigset, SIGALRM); sigprocmask(SIG_BLOCK, &sigset, &osigset); if (n == 0 && needupdate) { if (traceactions) fprintf(ftrace, "send delayed dynamic update\n"); (void) gettimeofday(&now, (struct timezone *)NULL); toall(supply, RTS_CHANGED, (struct interface *)NULL); lastbcast = now; needupdate = 0; nextbcast.tv_sec = 0; } sigprocmask(SIG_SETMASK, &osigset, NULL); continue; } gettimeofday(&now, (struct timezone *)NULL); sigemptyset(&sigset); sigaddset(&sigset, SIGALRM); sigprocmask(SIG_BLOCK, &sigset, &osigset); if (FD_ISSET(sock, &ibits)) { process(sock); } /* handle ICMP redirects */ sigprocmask(SIG_SETMASK, &osigset, NULL); } }
int slave() { int tries; long electiontime, refusetime, looktime, looptime, adjtime; u_short seq; long fastelection; #define FASTTOUT 3 struct in_addr cadr; struct timeval otime; struct sockaddr_in taddr; char tname[MAXHOSTNAMELEN]; struct tsp *msg, to; struct timeval ntime, wait, tmptv; time_t tsp_time_sec; struct tsp *answer; int timeout(); char olddate[32]; char newdate[32]; struct netinfo *ntp; struct hosttbl *htp; struct utmpx utx; old_slavenet = 0; seq = 0; refusetime = 0; adjtime = 0; (void)gettimeofday(&ntime, NULL); electiontime = ntime.tv_sec + delay2; fastelection = ntime.tv_sec + FASTTOUT; if (justquit) looktime = electiontime; else looktime = fastelection; looptime = fastelection; if (slavenet) xmit(TSP_SLAVEUP, 0, &slavenet->dest_addr); if (status & MASTER) { for (ntp = nettab; ntp != NULL; ntp = ntp->next) { if (ntp->status == MASTER) masterup(ntp); } } loop: get_goodgroup(0); (void)gettimeofday(&ntime, NULL); if (ntime.tv_sec > electiontime) { if (trace) fprintf(fd, "election timer expired\n"); longjmp(jmpenv, 1); } if (ntime.tv_sec >= looktime) { if (trace) fprintf(fd, "Looking for nets to master\n"); if (Mflag && nignorednets > 0) { for (ntp = nettab; ntp != NULL; ntp = ntp->next) { if (ntp->status == IGNORE || ntp->status == NOMASTER) { lookformaster(ntp); if (ntp->status == MASTER) { masterup(ntp); } else if (ntp->status == MASTER) { ntp->status = NOMASTER; } } if (ntp->status == MASTER && --ntp->quit_count < 0) ntp->quit_count = 0; } makeslave(slavenet); /* prune extras */ setstatus(); } (void)gettimeofday(&ntime, NULL); looktime = ntime.tv_sec + delay2; } if (ntime.tv_sec >= looptime) { if (trace) fprintf(fd, "Looking for loops\n"); for (ntp = nettab; ntp != NULL; ntp = ntp->next) { if (ntp->status == MASTER) { to.tsp_type = TSP_LOOP; to.tsp_vers = TSPVERSION; to.tsp_seq = sequence++; to.tsp_hopcnt = MAX_HOPCNT; (void)strcpy(to.tsp_name, hostname); bytenetorder(&to); if (sendto(sock, (char *)&to, sizeof(struct tsp), 0, (struct sockaddr*)&ntp->dest_addr, sizeof(ntp->dest_addr)) < 0) { trace_sendto_err(ntp->dest_addr.sin_addr); } } } (void)gettimeofday(&ntime, NULL); looptime = ntime.tv_sec + delay2; } wait.tv_sec = min(electiontime,min(looktime,looptime)) - ntime.tv_sec; if (wait.tv_sec < 0) wait.tv_sec = 0; wait.tv_sec += FASTTOUT; wait.tv_usec = 0; msg = readmsg(TSP_ANY, ANYADDR, &wait, 0); if (msg != NULL) { /* * filter stuff not for us */ switch (msg->tsp_type) { case TSP_SETDATE: case TSP_TRACEOFF: case TSP_TRACEON: /* * XXX check to see they are from ourself */ break; case TSP_TEST: case TSP_MSITE: break; case TSP_MASTERUP: if (!fromnet) { if (trace) { fprintf(fd, "slave ignored: "); print(msg, &from); } goto loop; } break; default: if (!fromnet || fromnet->status == IGNORE || fromnet->status == NOMASTER) { if (trace) { fprintf(fd, "slave ignored: "); print(msg, &from); } goto loop; } break; } /* * now process the message */ switch (msg->tsp_type) { case TSP_ADJTIME: if (fromnet != slavenet) break; if (!good_host_name(msg->tsp_name)) { syslog(LOG_NOTICE, "attempted time adjustment by %s", msg->tsp_name); suppress(&from, msg->tsp_name, fromnet); break; } /* * Speed up loop detection in case we have a loop. * Otherwise the clocks can race until the loop * is found. */ (void)gettimeofday(&otime, NULL); if (adjtime < otime.tv_sec) looptime -= (looptime-otime.tv_sec)/2 + 1; setmaster(msg); if (seq != msg->tsp_seq) { seq = msg->tsp_seq; synch(tvtomsround(msg->tsp_time)); } (void)gettimeofday(&ntime, NULL); electiontime = ntime.tv_sec + delay2; fastelection = ntime.tv_sec + FASTTOUT; adjtime = ntime.tv_sec + SAMPLEINTVL*2; break; case TSP_SETTIME: if (fromnet != slavenet) break; if (seq == msg->tsp_seq) break; seq = msg->tsp_seq; /* adjust time for residence on the queue */ (void)gettimeofday(&otime, NULL); adj_msg_time(msg,&otime); /* * the following line is necessary due to syslog * calling ctime() which clobbers the static buffer */ (void)strcpy(olddate, date()); tsp_time_sec = msg->tsp_time.tv_sec; (void)strcpy(newdate, ctime(&tsp_time_sec)); if (!good_host_name(msg->tsp_name)) { syslog(LOG_NOTICE, "attempted time setting by untrusted %s to %s", msg->tsp_name, newdate); suppress(&from, msg->tsp_name, fromnet); break; } setmaster(msg); tmptv.tv_sec = msg->tsp_time.tv_sec; tmptv.tv_usec = msg->tsp_time.tv_usec; timevalsub(&ntime, &tmptv, &otime); if (ntime.tv_sec < MAXADJ && ntime.tv_sec > -MAXADJ) { /* * do not change the clock if we can adjust it */ synch(tvtomsround(ntime)); } else { utx.ut_type = OLD_TIME; gettimeofday(&utx.ut_tv, NULL); pututxline(&utx); (void)settimeofday(&tmptv, 0); utx.ut_type = NEW_TIME; gettimeofday(&utx.ut_tv, NULL); pututxline(&utx); syslog(LOG_NOTICE, "date changed by %s from %s", msg->tsp_name, olddate); if (status & MASTER) spreadtime(); } (void)gettimeofday(&ntime, NULL); electiontime = ntime.tv_sec + delay2; fastelection = ntime.tv_sec + FASTTOUT; /* This patches a bad protocol bug. Imagine a system with several networks, * where there are a pair of redundant gateways between a pair of networks, * each running timed. Assume that we start with a third machine mastering * one of the networks, and one of the gateways mastering the other. * Imagine that the third machine goes away and the non-master gateway * decides to replace it. If things are timed just 'right,' we will have * each gateway mastering one network for a little while. If a SETTIME * message gets into the network at that time, perhaps from the newly * masterful gateway as it was taking control, the SETTIME will loop * forever. Each time a gateway receives it on its slave side, it will * call spreadtime to forward it on its mastered network. We are now in * a permanent loop, since the SETTIME msgs will keep any clock * in the network from advancing. Normally, the 'LOOP' stuff will detect * and correct the situation. However, with the clocks stopped, the * 'looptime' timer cannot expire. While they are in this state, the * masters will try to saturate the network with SETTIME packets. */ looptime = ntime.tv_sec + (looptime-otime.tv_sec)/2-1; break; case TSP_MASTERUP: if (slavenet && fromnet != slavenet) break; if (!good_host_name(msg->tsp_name)) { suppress(&from, msg->tsp_name, fromnet); if (electiontime > fastelection) electiontime = fastelection; break; } makeslave(fromnet); setmaster(msg); setstatus(); answerdelay(); xmit(TSP_SLAVEUP, 0, &from); (void)gettimeofday(&ntime, NULL); electiontime = ntime.tv_sec + delay2; fastelection = ntime.tv_sec + FASTTOUT; refusetime = 0; break; case TSP_MASTERREQ: if (fromnet->status != SLAVE) break; (void)gettimeofday(&ntime, NULL); electiontime = ntime.tv_sec + delay2; break; case TSP_SETDATE: tsp_time_sec = msg->tsp_time.tv_sec; (void)strcpy(newdate, ctime(&tsp_time_sec)); schgdate(msg, newdate); break; case TSP_SETDATEREQ: if (fromnet->status != MASTER) break; tsp_time_sec = msg->tsp_time.tv_sec; (void)strcpy(newdate, ctime(&tsp_time_sec)); htp = findhost(msg->tsp_name); if (0 == htp) { syslog(LOG_WARNING, "DATEREQ from uncontrolled machine"); break; } if (!htp->good) { syslog(LOG_WARNING, "attempted date change by untrusted %s to %s", htp->name, newdate); spreadtime(); break; } schgdate(msg, newdate); break; case TSP_TRACEON: traceon(); break; case TSP_TRACEOFF: traceoff("Tracing ended at %s\n"); break; case TSP_SLAVEUP: newslave(msg); break; case TSP_ELECTION: if (fromnet->status == SLAVE) { (void)gettimeofday(&ntime, NULL); electiontime = ntime.tv_sec + delay2; fastelection = ntime.tv_sec + FASTTOUT; seq = 0; if (!good_host_name(msg->tsp_name)) { syslog(LOG_NOTICE, "suppress election of %s", msg->tsp_name); to.tsp_type = TSP_QUIT; electiontime = fastelection; } else if (cadr.s_addr != from.sin_addr.s_addr && ntime.tv_sec < refusetime) { /* if the candidate has to repeat itself, the old code would refuse it * the second time. That would prevent elections. */ to.tsp_type = TSP_REFUSE; } else { cadr.s_addr = from.sin_addr.s_addr; to.tsp_type = TSP_ACCEPT; refusetime = ntime.tv_sec + 30; } taddr = from; (void)strcpy(tname, msg->tsp_name); (void)strcpy(to.tsp_name, hostname); answerdelay(); if (!acksend(&to, &taddr, tname, TSP_ACK, 0, 0)) syslog(LOG_WARNING, "no answer from candidate %s\n", tname); } else { /* fromnet->status == MASTER */ htp = addmach(msg->tsp_name, &from,fromnet); to.tsp_type = TSP_QUIT; (void)strcpy(to.tsp_name, hostname); if (!acksend(&to, &htp->addr, htp->name, TSP_ACK, 0, htp->noanswer)) { syslog(LOG_ERR, "no reply from %s to ELECTION-QUIT", htp->name); (void)remmach(htp); } } break; case TSP_CONFLICT: if (fromnet->status != MASTER) break; /* * After a network partition, there can be * more than one master: the first slave to * come up will notify here the situation. */ (void)strcpy(to.tsp_name, hostname); /* The other master often gets into the same state, * with boring results. */ ntp = fromnet; /* (acksend() can leave fromnet=0 */ for (tries = 0; tries < 3; tries++) { to.tsp_type = TSP_RESOLVE; answer = acksend(&to, &ntp->dest_addr, ANYADDR, TSP_MASTERACK, ntp, 0); if (answer == NULL) break; htp = addmach(answer->tsp_name,&from,ntp); to.tsp_type = TSP_QUIT; answer = acksend(&to, &htp->addr, htp->name, TSP_ACK, 0, htp->noanswer); if (!answer) { syslog(LOG_WARNING, "conflict error: no reply from %s to QUIT", htp->name); (void)remmach(htp); } } masterup(ntp); break; case TSP_MSITE: if (!slavenet) break; taddr = from; to.tsp_type = TSP_MSITEREQ; to.tsp_vers = TSPVERSION; to.tsp_seq = 0; (void)strcpy(to.tsp_name, hostname); answer = acksend(&to, &slavenet->dest_addr, ANYADDR, TSP_ACK, slavenet, 0); if (answer != NULL && good_host_name(answer->tsp_name)) { setmaster(answer); to.tsp_type = TSP_ACK; (void)strcpy(to.tsp_name, answer->tsp_name); bytenetorder(&to); if (sendto(sock, (char *)&to, sizeof(struct tsp), 0, (struct sockaddr*)&taddr, sizeof(taddr)) < 0) { trace_sendto_err(taddr.sin_addr); } } break; case TSP_MSITEREQ: break; case TSP_ACCEPT: case TSP_REFUSE: case TSP_RESOLVE: break; case TSP_QUIT: doquit(msg); /* become a slave */ break; case TSP_TEST: electiontime = 0; break; case TSP_LOOP: /* looking for loops of masters */ if (!(status & MASTER)) break; if (fromnet->status == SLAVE) { if (!strcmp(msg->tsp_name, hostname)) { /* * Someone forwarded our message back to * us. There must be a loop. Tell the * master of this network to quit. * * The other master often gets into * the same state, with boring results. */ ntp = fromnet; for (tries = 0; tries < 3; tries++) { to.tsp_type = TSP_RESOLVE; answer = acksend(&to, &ntp->dest_addr, ANYADDR, TSP_MASTERACK, ntp,0); if (answer == NULL) break; taddr = from; (void)strcpy(tname, answer->tsp_name); to.tsp_type = TSP_QUIT; (void)strcpy(to.tsp_name, hostname); if (!acksend(&to, &taddr, tname, TSP_ACK, 0, 1)) { syslog(LOG_ERR, "no reply from %s to slave LOOP-QUIT", tname); } else { electiontime = 0; } } (void)gettimeofday(&ntime, NULL); looptime = ntime.tv_sec + FASTTOUT; } else { if (msg->tsp_hopcnt-- < 1) break; bytenetorder(msg); for (ntp = nettab; ntp != 0; ntp = ntp->next) { if (ntp->status == MASTER && 0 > sendto(sock, (char *)msg, sizeof(struct tsp), 0, (struct sockaddr*)&ntp->dest_addr, sizeof(ntp->dest_addr))) trace_sendto_err(ntp->dest_addr.sin_addr); } } } else { /* fromnet->status == MASTER */ /* * We should not have received this from a net * we are master on. There must be two masters, * unless the packet was really from us. */ if (from.sin_addr.s_addr == fromnet->my_addr.s_addr) { if (trace) fprintf(fd,"discarding forwarded LOOP\n"); break; } /* * The other master often gets into the same * state, with boring results. */ ntp = fromnet; for (tries = 0; tries < 3; tries++) { to.tsp_type = TSP_RESOLVE; answer = acksend(&to, &ntp->dest_addr, ANYADDR, TSP_MASTERACK, ntp,0); if (!answer) break; htp = addmach(answer->tsp_name, &from,ntp); to.tsp_type = TSP_QUIT; (void)strcpy(to.tsp_name, hostname); if (!acksend(&to,&htp->addr,htp->name, TSP_ACK, 0, htp->noanswer)) { syslog(LOG_ERR, "no reply from %s to master LOOP-QUIT", htp->name); (void)remmach(htp); } } (void)gettimeofday(&ntime, NULL); looptime = ntime.tv_sec + FASTTOUT; } break; default: if (trace) { fprintf(fd, "garbage message: "); print(msg, &from); } break; } } goto loop; }
int main(int argc, char *argv[]) { int i, n; struct interface *ifp; int c; struct timeval waittime; int timeout; boolean_t daemon = _B_TRUE; /* Fork off a detached daemon */ FILE *pidfp; mode_t pidmode = (S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); /* 0644 */ rip6_port = htons(IPPORT_ROUTESERVER6); allrouters.sin6_family = AF_INET6; allrouters.sin6_port = rip6_port; allrouters.sin6_addr = allrouters_in6; while ((c = getopt(argc, argv, "nsqvTtdgPp:")) != EOF) { switch (c) { case 'n': install = _B_FALSE; break; case 's': supplier = _B_TRUE; break; case 'q': supplier = _B_FALSE; break; case 'v': tracing |= ACTION_BIT; break; case 'T': daemon = _B_FALSE; break; case 't': tracepackets = _B_TRUE; daemon = _B_FALSE; tracing |= (INPUT_BIT | OUTPUT_BIT); break; case 'd': break; case 'P': dopoison = _B_FALSE; break; case 'p': rip6_port = htons(atoi(optarg)); allrouters.sin6_port = rip6_port; break; default: usage(argv[0]); /* NOTREACHED */ } } /* * Any extra argument is considered * a tracing log file. */ if (optind < argc) { traceon(argv[optind]); } else if (tracing && !daemon) { traceonfp(stdout); } else if (tracing) { (void) fprintf(stderr, "Need logfile with -v\n"); usage(argv[0]); /* NOTREACHED */ } if (daemon) { int t; if (fork()) exit(EXIT_SUCCESS); for (t = 0; t < 20; t++) { if (!tracing || (t != fileno(ftrace))) (void) close(t); } (void) open("/", 0); (void) dup2(0, 1); (void) dup2(0, 2); (void) setsid(); } /* Store our process id, blow away any existing file if it exists. */ if ((pidfp = fopen(PATH_PID, "w")) == NULL) { (void) fprintf(stderr, "%s: unable to open " PATH_PID ": %s\n", argv[0], strerror(errno)); } else { (void) fprintf(pidfp, "%ld\n", getpid()); (void) fclose(pidfp); (void) chmod(PATH_PID, pidmode); } iocsoc = socket(AF_INET6, SOCK_DGRAM, 0); if (iocsoc < 0) { syslog(LOG_ERR, "main: socket: %m"); exit(EXIT_FAILURE); } setup_rtsock(); /* * Allocate the buffer to hold the RIPng packet. In reality, it will be * smaller than IPV6_MAX_PACKET octets due to (at least) the IPv6 and * UDP headers but IPV6_MAX_PACKET is a convenient size. */ packet = (char *)malloc(IPV6_MAX_PACKET); if (packet == NULL) { syslog(LOG_ERR, "main: malloc: %m"); exit(EXIT_FAILURE); } msg = (struct rip6 *)packet; /* * Allocate the buffer to hold the ancillary data. This data is used to * insure that the incoming hop count of a RIPCMD6_RESPONSE message is * IPV6_MAX_HOPS which indicates that it came from a direct neighbor * (namely, no intervening router decremented it). */ control = (char *)malloc(IPV6_MAX_PACKET); if (control == NULL) { syslog(LOG_ERR, "main: malloc: %m"); exit(EXIT_FAILURE); } openlog("in.ripngd", LOG_PID | LOG_CONS, LOG_DAEMON); (void) gettimeofday(&now, (struct timezone *)NULL); initifs(); solicitall(&allrouters); if (supplier) supplyall(&allrouters, 0, (struct interface *)NULL, _B_TRUE); (void) sigset(SIGALRM, (void (*)(int))timer); (void) sigset(SIGHUP, (void (*)(int))initifs); (void) sigset(SIGTERM, (void (*)(int))term); (void) sigset(SIGUSR1, (void (*)(int))if_dump); (void) sigset(SIGUSR2, (void (*)(int))rtdump); /* * Seed the pseudo-random number generator for GET_RANDOM(). */ srandom((uint_t)gethostid()); timer(); for (;;) { if (needupdate) { waittime = nextmcast; timevalsub(&waittime, &now); if (waittime.tv_sec < 0) { timeout = 0; } else { timeout = TIME_TO_MSECS(waittime); } if (tracing & ACTION_BIT) { (void) fprintf(ftrace, "poll until dynamic update in %d msec\n", timeout); (void) fflush(ftrace); } } else { timeout = INFTIM; } if ((n = poll(poll_ifs, poll_ifs_num, timeout)) < 0) { if (errno == EINTR) continue; syslog(LOG_ERR, "main: poll: %m"); exit(EXIT_FAILURE); } (void) sighold(SIGALRM); (void) sighold(SIGHUP); /* * Poll timed out. */ if (n == 0) { if (needupdate) { TRACE_ACTION("send delayed dynamic update", (struct rt_entry *)NULL); (void) gettimeofday(&now, (struct timezone *)NULL); supplyall(&allrouters, RTS_CHANGED, (struct interface *)NULL, _B_TRUE); lastmcast = now; needupdate = _B_FALSE; nextmcast.tv_sec = 0; } (void) sigrelse(SIGHUP); (void) sigrelse(SIGALRM); continue; } (void) gettimeofday(&now, (struct timezone *)NULL); for (i = 0; i < poll_ifs_num; i++) { /* * This case should never happen. */ if (poll_ifs[i].revents & POLLERR) { syslog(LOG_ERR, "main: poll returned a POLLERR event"); continue; } if (poll_ifs[i].revents & POLLIN) { for (ifp = ifnet; ifp != NULL; ifp = ifp->int_next) { if (poll_ifs[i].fd == ifp->int_sock) in_data(ifp); } } } (void) sigrelse(SIGHUP); (void) sigrelse(SIGALRM); } return (0); }
static void nvme_ns_bio_test(void *arg) { struct nvme_io_test_internal *io_test = arg; struct cdevsw *csw; struct mtx *mtx; struct bio *bio; struct cdev *dev; void *buf; struct timeval t; uint64_t offset; uint32_t idx, io_completed = 0; #if __FreeBSD_version >= 900017 int ref; #endif buf = malloc(io_test->size, M_NVME, M_WAITOK); idx = atomic_fetchadd_int(&io_test->td_idx, 1); dev = io_test->ns->cdev; offset = idx * 2048 * nvme_ns_get_sector_size(io_test->ns); while (1) { bio = g_alloc_bio(); memset(bio, 0, sizeof(*bio)); bio->bio_cmd = (io_test->opc == NVME_OPC_READ) ? BIO_READ : BIO_WRITE; bio->bio_done = nvme_ns_bio_test_cb; bio->bio_dev = dev; bio->bio_offset = offset; bio->bio_data = buf; bio->bio_bcount = io_test->size; if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) { #if __FreeBSD_version >= 900017 csw = dev_refthread(dev, &ref); #else csw = dev_refthread(dev); #endif } else csw = dev->si_devsw; mtx = mtx_pool_find(mtxpool_sleep, bio); mtx_lock(mtx); (*csw->d_strategy)(bio); msleep(bio, mtx, PRIBIO, "biotestwait", 0); mtx_unlock(mtx); if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) { #if __FreeBSD_version >= 900017 dev_relthread(dev, ref); #else dev_relthread(dev); #endif } if ((bio->bio_flags & BIO_ERROR) || (bio->bio_resid > 0)) break; g_destroy_bio(bio); io_completed++; getmicrouptime(&t); timevalsub(&t, &io_test->start); if (t.tv_sec >= io_test->time) break; offset += io_test->size; if ((offset + io_test->size) > nvme_ns_get_size(io_test->ns)) offset = 0; } io_test->io_completed[idx] = io_completed; wakeup_one(io_test); free(buf, M_NVME); atomic_subtract_int(&io_test->td_active, 1); mb(); #if __FreeBSD_version >= 800000 kthread_exit(); #else kthread_exit(0); #endif }
ACPI_STATUS AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout) { #ifndef ACPI_NO_SEMAPHORES ACPI_STATUS result; struct acpi_semaphore *as = (struct acpi_semaphore *)Handle; int rv, tmo; struct timeval timeouttv, currenttv, timelefttv; AS_LOCK_DECL; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (as == NULL) return_ACPI_STATUS (AE_BAD_PARAMETER); if (cold) return_ACPI_STATUS (AE_OK); #if 0 if (as->as_units < Units && as->as_timeouts > 10) { kprintf("%s: semaphore %p too many timeouts, resetting\n", __func__, as); AS_LOCK(as); as->as_units = as->as_maxunits; if (as->as_pendings) as->as_resetting = 1; as->as_timeouts = 0; wakeup(as); AS_UNLOCK(as); return_ACPI_STATUS (AE_TIME); } if (as->as_resetting) return_ACPI_STATUS (AE_TIME); #endif /* a timeout of ACPI_WAIT_FOREVER means "forever" */ if (Timeout == ACPI_WAIT_FOREVER) { tmo = 0; timeouttv.tv_sec = ((0xffff/1000) + 1); /* cf. ACPI spec */ timeouttv.tv_usec = 0; } else { /* compute timeout using microseconds per tick */ tmo = (Timeout * 1000) / (1000000 / hz); if (tmo <= 0) tmo = 1; timeouttv.tv_sec = Timeout / 1000; timeouttv.tv_usec = (Timeout % 1000) * 1000; } /* calculate timeout value in timeval */ getmicrouptime(¤ttv); timevaladd(&timeouttv, ¤ttv); AS_LOCK(as); ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "get %d units from semaphore %p (has %d), timeout %d\n", Units, as, as->as_units, Timeout)); for (;;) { if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) { result = AE_OK; break; } if (as->as_units >= Units) { as->as_units -= Units; result = AE_OK; break; } /* limit number of pending treads */ if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) { result = AE_TIME; break; } /* if timeout values of zero is specified, return immediately */ if (Timeout == 0) { result = AE_TIME; break; } ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "semaphore blocked, calling ssleep(%p, %p, %d, \"acsem\", %d)\n", as, &as->as_spin, PCATCH, tmo)); as->as_pendings++; if (acpi_semaphore_debug) { kprintf("%s: Sleep %jd, pending %jd, semaphore %p, thread %jd\n", __func__, (intmax_t)Timeout, (intmax_t)as->as_pendings, as, (intmax_t)AcpiOsGetThreadId()); } rv = ssleep(as, &as->as_spin, PCATCH, "acsem", tmo); as->as_pendings--; #if 0 if (as->as_resetting) { /* semaphore reset, return immediately */ if (as->as_pendings == 0) { as->as_resetting = 0; } result = AE_TIME; break; } #endif ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "ssleep(%d) returned %d\n", tmo, rv)); if (rv == EWOULDBLOCK) { result = AE_TIME; break; } /* check if we already awaited enough */ timelefttv = timeouttv; getmicrouptime(¤ttv); timevalsub(&timelefttv, ¤ttv); if (timelefttv.tv_sec < 0) { ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n", as)); result = AE_TIME; break; } /* adjust timeout for the next sleep */ tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) / (1000000 / hz); if (tmo <= 0) tmo = 1; if (acpi_semaphore_debug) { kprintf("%s: Wakeup timeleft(%ju, %ju), tmo %ju, sem %p, thread %jd\n", __func__, (intmax_t)timelefttv.tv_sec, (intmax_t)timelefttv.tv_usec, (intmax_t)tmo, as, (intmax_t)AcpiOsGetThreadId()); } } if (acpi_semaphore_debug) { if (result == AE_TIME && Timeout > 0) { kprintf("%s: Timeout %d, pending %d, semaphore %p\n", __func__, Timeout, as->as_pendings, as); } if (ACPI_SUCCESS(result) && (as->as_timeouts > 0 || as->as_pendings > 0)) { kprintf("%s: Acquire %d, units %d, pending %d, sem %p, thread %jd\n", __func__, Units, as->as_units, as->as_pendings, as, (intmax_t)AcpiOsGetThreadId()); } } if (result == AE_TIME) as->as_timeouts++; else as->as_timeouts = 0; AS_UNLOCK(as); return_ACPI_STATUS (result); #else return_ACPI_STATUS (AE_OK); #endif /* !ACPI_NO_SEMAPHORES */ }
struct tsp * readmsg(int type, char *machfrom, struct timeval *intvl, struct netinfo *netfrom) { int length; fd_set ready; static struct tsplist *head = &msgslist; static struct tsplist *tail = &msgslist; static int msgcnt = 0; struct tsplist *prev; register struct netinfo *ntp; register struct tsplist *ptr; ssize_t n; if (trace) { fprintf(fd, "readmsg: looking for %s from %s, %s\n", tsptype[type], machfrom == NULL ? "ANY" : machfrom, netfrom == NULL ? "ANYNET" : inet_ntoa(netfrom->net)); if (head->p != 0) { length = 1; for (ptr = head->p; ptr != 0; ptr = ptr->p) { /* do not repeat the hundreds of messages */ if (++length > 3) { if (ptr == tail) { fprintf(fd,"\t ...%d skipped\n", length); } else { continue; } } fprintf(fd, length > 1 ? "\t" : "queue:\t"); print(&ptr->info, &ptr->addr); } } } ptr = head->p; prev = head; /* * Look for the requested message scanning through the * linked list. If found, return it and free the space */ while (ptr != NULL) { if (LOOKAT(ptr->info, type, machfrom, netfrom, ptr->addr)) { again: msgin = ptr->info; from = ptr->addr; from_when = ptr->when; prev->p = ptr->p; if (ptr == tail) tail = prev; free((char *)ptr); fromnet = NULL; if (netfrom == NULL) for (ntp = nettab; ntp != NULL; ntp = ntp->next) { if ((ntp->mask & from.sin_addr.s_addr) == ntp->net.s_addr) { fromnet = ntp; break; } } else fromnet = netfrom; if (trace) { fprintf(fd, "readmsg: found "); print(&msgin, &from); } /* The protocol can get far behind. When it does, it gets * hopelessly confused. So delete duplicate messages. */ for (ptr = prev; (ptr = ptr->p) != NULL; prev = ptr) { if (ptr->addr.sin_addr.s_addr == from.sin_addr.s_addr && ptr->info.tsp_type == msgin.tsp_type) { if (trace) fprintf(fd, "\tdup "); goto again; } } msgcnt--; return(&msgin); } else { prev = ptr; ptr = ptr->p; } } /* * If the message was not in the linked list, it may still be * coming from the network. Set the timer and wait * on a select to read the next incoming message: if it is the * right one, return it, otherwise insert it in the linked list. */ (void)gettimeofday(&rtout, NULL); timevaladd(&rtout, intvl); FD_ZERO(&ready); for (;;) { (void)gettimeofday(&rtime, NULL); timevalsub(&rwait, &rtout, &rtime); if (rwait.tv_sec < 0) rwait.tv_sec = rwait.tv_usec = 0; else if (rwait.tv_sec == 0 && rwait.tv_usec < 1000000/CLK_TCK) rwait.tv_usec = 1000000/CLK_TCK; if (trace) { fprintf(fd, "readmsg: wait %jd.%6ld at %s\n", (intmax_t)rwait.tv_sec, rwait.tv_usec, date()); /* Notice a full disk, as we flush trace info. * It is better to flush periodically than at * every line because the tracing consists of bursts * of many lines. Without care, tracing slows * down the code enough to break the protocol. */ if (rwait.tv_sec != 0 && EOF == fflush(fd)) traceoff("Tracing ended for cause at %s\n"); } FD_SET(sock, &ready); if (!select(sock+1, &ready, (fd_set *)0, (fd_set *)0, &rwait)) { if (rwait.tv_sec == 0 && rwait.tv_usec == 0) return(0); continue; } length = sizeof(from); if ((n = recvfrom(sock, (char *)&msgin, sizeof(struct tsp), 0, (struct sockaddr*)&from, &length)) < 0) { syslog(LOG_ERR, "recvfrom: %m"); exit(1); } /* * The 4.3BSD protocol spec had a 32-byte tsp_name field, and * this is still OS-dependent. Demand that the packet is at * least long enough to hold a 4.3BSD packet. */ if (n < (ssize_t)(sizeof(struct tsp) - MAXHOSTNAMELEN + 32)) { syslog(LOG_NOTICE, "short packet (%zd/%zu bytes) from %s", n, sizeof(struct tsp) - MAXHOSTNAMELEN + 32, inet_ntoa(from.sin_addr)); continue; } (void)gettimeofday(&from_when, NULL); bytehostorder(&msgin); if (msgin.tsp_vers > TSPVERSION) { if (trace) { fprintf(fd,"readmsg: version mismatch\n"); /* should do a dump of the packet */ } continue; } if (memchr(msgin.tsp_name, '\0', sizeof msgin.tsp_name) == NULL) { syslog(LOG_NOTICE, "hostname field not NUL terminated " "in packet from %s", inet_ntoa(from.sin_addr)); continue; } fromnet = NULL; for (ntp = nettab; ntp != NULL; ntp = ntp->next) if ((ntp->mask & from.sin_addr.s_addr) == ntp->net.s_addr) { fromnet = ntp; break; } /* * drop packets from nets we are ignoring permanently */ if (fromnet == NULL) { /* * The following messages may originate on * this host with an ignored network address */ if (msgin.tsp_type != TSP_TRACEON && msgin.tsp_type != TSP_SETDATE && msgin.tsp_type != TSP_MSITE && msgin.tsp_type != TSP_TEST && msgin.tsp_type != TSP_TRACEOFF) { if (trace) { fprintf(fd,"readmsg: discard null net "); print(&msgin, &from); } continue; } } /* * Throw away messages coming from this machine, * unless they are of some particular type. * This gets rid of broadcast messages and reduces * master processing time. */ if (!strcmp(msgin.tsp_name, hostname) && msgin.tsp_type != TSP_SETDATE && msgin.tsp_type != TSP_TEST && msgin.tsp_type != TSP_MSITE && msgin.tsp_type != TSP_TRACEON && msgin.tsp_type != TSP_TRACEOFF && msgin.tsp_type != TSP_LOOP) { if (trace) { fprintf(fd, "readmsg: discard own "); print(&msgin, &from); } continue; } /* * Send acknowledgements here; this is faster and * avoids deadlocks that would occur if acks were * sent from a higher level routine. Different * acknowledgements are necessary, depending on * status. */ if (fromnet == NULL) /* do not de-reference 0 */ ignoreack(); else if (fromnet->status == MASTER) masterack(); else if (fromnet->status == SLAVE) slaveack(); else ignoreack(); if (LOOKAT(msgin, type, machfrom, netfrom, from)) { if (trace) { fprintf(fd, "readmsg: "); print(&msgin, &from); } return(&msgin); } else if (++msgcnt > NHOSTS*3) { /* The protocol gets hopelessly confused if it gets too far * behind. However, it seems able to recover from all cases of lost * packets. Therefore, if we are swamped, throw everything away. */ if (trace) fprintf(fd, "readmsg: discarding %d msgs\n", msgcnt); msgcnt = 0; while ((ptr=head->p) != NULL) { head->p = ptr->p; free((char *)ptr); } tail = head; } else { tail->p = (struct tsplist *) malloc(sizeof(struct tsplist)); tail = tail->p; tail->p = NULL; tail->info = msgin; tail->addr = from; /* timestamp msgs so SETTIMEs are correct */ tail->when = from_when; } } }