Пример #1
0
/*
 * Step our concept of UTC.  This is done by modifying our estimate of
 * when we booted.
 * XXX: not locked.
 */
void
tc_setclock(struct timespec *ts)
{
	struct timespec tbef, taft;
	struct bintime bt, bt2;

	cpu_tick_calibrate(1);
	nanotime(&tbef);
	timespec2bintime(ts, &bt);
	binuptime(&bt2);
	bintime_sub(&bt, &bt2);
	bintime_add(&bt2, &boottimebin);
	boottimebin = bt;
	bintime2timeval(&bt, &boottime);

	/* XXX fiddle all the little crinkly bits around the fiords... */
	tc_windup();
	nanotime(&taft);
	if (timestepwarnings) {
		log(LOG_INFO,
		    "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
		    (intmax_t)tbef.tv_sec, tbef.tv_nsec,
		    (intmax_t)taft.tv_sec, taft.tv_nsec,
		    (intmax_t)ts->tv_sec, ts->tv_nsec);
	}
	cpu_tick_calibrate(1);
}
Пример #2
0
void
ffclock_microdifftime(ffcounter ffdelta, struct timeval *tvp)
{
	struct bintime bt;

	ffclock_difftime(ffdelta, &bt, NULL);
	bintime2timeval(&bt, tvp);
}
Пример #3
0
void
ffclock_microuptime(struct timeval *tvp)
{
	struct bintime bt;

	ffclock_abstime(NULL, &bt, NULL, FFCLOCK_LERP | FFCLOCK_UPTIME);
	bintime2timeval(&bt, tvp);
}
Пример #4
0
void
microtime(struct timeval *tvp)
{
	struct bintime bt;

	bintime(&bt);
	bintime2timeval(&bt, tvp);
}
Пример #5
0
void
ffclock_getmicrotime(struct timeval *tvp)
{
	struct bintime bt;

	ffclock_abstime(NULL, &bt, NULL,
	    FFCLOCK_LERP | FFCLOCK_LEAPSEC | FFCLOCK_FAST);
	bintime2timeval(&bt, tvp);
}
Пример #6
0
void
microuptime(struct timeval *tvp)
{
	struct bintime bt;

	nmicrouptime++;
	binuptime(&bt);
	bintime2timeval(&bt, tvp);
}
Пример #7
0
void
getmicrouptime(struct timeval *tvp)
{
	struct timehands *th;
	u_int gen;

	do {
		th = timehands;
		gen = th->th_generation;
		bintime2timeval(&th->th_offset, tvp);
	} while (gen == 0 || gen != th->th_generation);
}
Пример #8
0
void
timebase(struct timeval *tv)
{
	void *p;
	struct bintime timebasebin;

	p = lookup("timebasebin");
	if (!p)
		return;
	snarf(p, &timebasebin, sizeof(timebasebin));
	bintime2timeval(&timebasebin, tv);
}
Пример #9
0
int
__vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{
    struct bintime bt;
    int error;

    if (tz != NULL)
        return (ENOSYS);
    if (tk == NULL) {
        error = __vdso_gettimekeep(&tk);
        if (error != 0 || tk == NULL)
            return (ENOSYS);
    }
    if (tk->tk_ver != VDSO_TK_VER_CURR)
        return (ENOSYS);
    error = binuptime(&bt, tk, 1);
    if (error != 0)
        return (error);
    bintime2timeval(&bt, tv);
    return (0);
}
Пример #10
0
/*
 * Step our concept of UTC.  This is done by modifying our estimate of
 * when we booted.
 * XXX: not locked.
 */
void
tc_setclock(struct timespec *ts)
{
	struct timespec ts2;
	struct bintime bt, bt2;

	binuptime(&bt2);
	timespec2bintime(ts, &bt);
	bintime_sub(&bt, &bt2);
	bintime_add(&bt2, &boottimebin);
	boottimebin = bt;
	bintime2timeval(&bt, &boottime);

	/* XXX fiddle all the little crinkly bits around the fiords... */
	tc_windup();
	if (timestepwarnings) {
		bintime2timespec(&bt2, &ts2);
		log(LOG_INFO, "Time stepped from %ld.%09ld to %ld.%09ld\n",
		    (long)ts2.tv_sec, ts2.tv_nsec,
		    (long)ts->tv_sec, ts->tv_nsec);
	}
}
Пример #11
0
/*
 * Initialize the next struct timehands in the ring and make
 * it the active timehands.  Along the way we might switch to a different
 * timecounter and/or do seconds processing in NTP.  Slightly magic.
 */
void
tc_windup(void)
{
	struct bintime bt;
	struct timehands *th, *tho;
	u_int64_t scale;
	u_int delta, ncount, ogen;
	int i;
#ifdef leapsecs
	time_t t;
#endif

	/*
	 * Make the next timehands a copy of the current one, but do not
	 * overwrite the generation or next pointer.  While we update
	 * the contents, the generation must be zero.
	 */
	tho = timehands;
	th = tho->th_next;
	ogen = th->th_generation;
	th->th_generation = 0;
	bcopy(tho, th, offsetof(struct timehands, th_generation));

	/*
	 * Capture a timecounter delta on the current timecounter and if
	 * changing timecounters, a counter value from the new timecounter.
	 * Update the offset fields accordingly.
	 */
	delta = tc_delta(th);
	if (th->th_counter != timecounter)
		ncount = timecounter->tc_get_timecount(timecounter);
	else
		ncount = 0;
	th->th_offset_count += delta;
	th->th_offset_count &= th->th_counter->tc_counter_mask;
	bintime_addx(&th->th_offset, th->th_scale * delta);

#ifdef notyet
	/*
	 * Hardware latching timecounters may not generate interrupts on
	 * PPS events, so instead we poll them.  There is a finite risk that
	 * the hardware might capture a count which is later than the one we
	 * got above, and therefore possibly in the next NTP second which might
	 * have a different rate than the current NTP second.  It doesn't
	 * matter in practice.
	 */
	if (tho->th_counter->tc_poll_pps)
		tho->th_counter->tc_poll_pps(tho->th_counter);
#endif

	/*
	 * Deal with NTP second processing.  The for loop normally
	 * iterates at most once, but in extreme situations it might
	 * keep NTP sane if timeouts are not run for several seconds.
	 * At boot, the time step can be large when the TOD hardware
	 * has been read, so on really large steps, we call
	 * ntp_update_second only twice.  We need to call it twice in
	 * case we missed a leap second.
	 */
	bt = th->th_offset;
	bintime_add(&bt, &boottimebin);
	i = bt.sec - tho->th_microtime.tv_sec;
	if (i > LARGE_STEP)
		i = 2;
	for (; i > 0; i--)
		ntp_update_second(&th->th_adjustment, &bt.sec);

	/* Update the UTC timestamps used by the get*() functions. */
	/* XXX shouldn't do this here.  Should force non-`get' versions. */
	bintime2timeval(&bt, &th->th_microtime);
	bintime2timespec(&bt, &th->th_nanotime);

	/* Now is a good time to change timecounters. */
	if (th->th_counter != timecounter) {
		th->th_counter = timecounter;
		th->th_offset_count = ncount;
	}

	/*-
	 * Recalculate the scaling factor.  We want the number of 1/2^64
	 * fractions of a second per period of the hardware counter, taking
	 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
	 * processing provides us with.
	 *
	 * The th_adjustment is nanoseconds per second with 32 bit binary
	 * fraction and we want 64 bit binary fraction of second:
	 *
	 *	 x = a * 2^32 / 10^9 = a * 4.294967296
	 *
	 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
	 * we can only multiply by about 850 without overflowing, but that
	 * leaves suitably precise fractions for multiply before divide.
	 *
	 * Divide before multiply with a fraction of 2199/512 results in a
	 * systematic undercompensation of 10PPM of th_adjustment.  On a
	 * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
 	 *
	 * We happily sacrifice the lowest of the 64 bits of our result
	 * to the goddess of code clarity.
	 *
	 */
	scale = (u_int64_t)1 << 63;
	scale += (th->th_adjustment / 1024) * 2199;
	scale /= th->th_counter->tc_frequency;
	th->th_scale = scale * 2;

	/*
	 * Now that the struct timehands is again consistent, set the new
	 * generation number, making sure to not make it zero.
	 */
	if (++ogen == 0)
		ogen = 1;
	th->th_generation = ogen;

	/* Go live with the new struct timehands. */
	time_second = th->th_microtime.tv_sec;
	time_uptime = th->th_offset.sec;
	timehands = th;
}
struct kinfo_proc2 *
kvm_getproc2(kvm_t *kd, int op, int arg, size_t esize, int *cnt)
{
	size_t size;
	int mib[6], st, nprocs;
	struct pstats pstats;

	if (ISSYSCTL(kd)) {
		size = 0;
		mib[0] = CTL_KERN;
		mib[1] = KERN_PROC2;
		mib[2] = op;
		mib[3] = arg;
		mib[4] = (int)esize;
again:
		mib[5] = 0;
		st = sysctl(mib, 6, NULL, &size, NULL, (size_t)0);
		if (st == -1) {
			_kvm_syserr(kd, kd->program, "kvm_getproc2");
			return (NULL);
		}

		mib[5] = (int) (size / esize);
		KVM_ALLOC(kd, procbase2, size);
		st = sysctl(mib, 6, kd->procbase2, &size, NULL, (size_t)0);
		if (st == -1) {
			if (errno == ENOMEM) {
				goto again;
			}
			_kvm_syserr(kd, kd->program, "kvm_getproc2");
			return (NULL);
		}
		nprocs = (int) (size / esize);
	} else {
		char *kp2c;
		struct kinfo_proc *kp;
		struct kinfo_proc2 kp2, *kp2p;
		struct kinfo_lwp *kl;
		int i, nlwps;

		kp = kvm_getprocs(kd, op, arg, &nprocs);
		if (kp == NULL)
			return (NULL);

		size = nprocs * esize;
		KVM_ALLOC(kd, procbase2, size);
		kp2c = (char *)(void *)kd->procbase2;
		kp2p = &kp2;
		for (i = 0; i < nprocs; i++, kp++) {
			struct timeval tv;

			kl = kvm_getlwps(kd, kp->kp_proc.p_pid,
			    (u_long)PTRTOUINT64(kp->kp_eproc.e_paddr),
			    sizeof(struct kinfo_lwp), &nlwps);

			if (kl == NULL) {
				_kvm_syserr(kd, NULL,
					"kvm_getlwps() failed on process %u\n",
					kp->kp_proc.p_pid);
				if (nlwps == 0)
					return NULL;
				else
					continue;
			}

			/* We use kl[0] as the "representative" LWP */
			memset(kp2p, 0, sizeof(kp2));
			kp2p->p_forw = kl[0].l_forw;
			kp2p->p_back = kl[0].l_back;
			kp2p->p_paddr = PTRTOUINT64(kp->kp_eproc.e_paddr);
			kp2p->p_addr = kl[0].l_addr;
			kp2p->p_fd = PTRTOUINT64(kp->kp_proc.p_fd);
			kp2p->p_cwdi = PTRTOUINT64(kp->kp_proc.p_cwdi);
			kp2p->p_stats = PTRTOUINT64(kp->kp_proc.p_stats);
			kp2p->p_limit = PTRTOUINT64(kp->kp_proc.p_limit);
			kp2p->p_vmspace = PTRTOUINT64(kp->kp_proc.p_vmspace);
			kp2p->p_sigacts = PTRTOUINT64(kp->kp_proc.p_sigacts);
			kp2p->p_sess = PTRTOUINT64(kp->kp_eproc.e_sess);
			kp2p->p_tsess = 0;
#if 1 /* XXX: dsl - p_ru was only ever non-zero for zombies */
			kp2p->p_ru = 0;
#else
			kp2p->p_ru = PTRTOUINT64(pstats.p_ru);
#endif

			kp2p->p_eflag = 0;
			kp2p->p_exitsig = kp->kp_proc.p_exitsig;
			kp2p->p_flag = kp->kp_proc.p_flag;

			kp2p->p_pid = kp->kp_proc.p_pid;

			kp2p->p_ppid = kp->kp_eproc.e_ppid;
			kp2p->p_sid = kp->kp_eproc.e_sid;
			kp2p->p__pgid = kp->kp_eproc.e_pgid;

			kp2p->p_tpgid = -1 /* XXX NO_PGID! */;

			kp2p->p_uid = kp->kp_eproc.e_ucred.cr_uid;
			kp2p->p_ruid = kp->kp_eproc.e_pcred.p_ruid;
			kp2p->p_svuid = kp->kp_eproc.e_pcred.p_svuid;
			kp2p->p_gid = kp->kp_eproc.e_ucred.cr_gid;
			kp2p->p_rgid = kp->kp_eproc.e_pcred.p_rgid;
			kp2p->p_svgid = kp->kp_eproc.e_pcred.p_svgid;

			/*CONSTCOND*/
			memcpy(kp2p->p_groups, kp->kp_eproc.e_ucred.cr_groups,
			    MIN(sizeof(kp2p->p_groups),
			    sizeof(kp->kp_eproc.e_ucred.cr_groups)));
			kp2p->p_ngroups = kp->kp_eproc.e_ucred.cr_ngroups;

			kp2p->p_jobc = kp->kp_eproc.e_jobc;
			kp2p->p_tdev = kp->kp_eproc.e_tdev;
			kp2p->p_tpgid = kp->kp_eproc.e_tpgid;
			kp2p->p_tsess = PTRTOUINT64(kp->kp_eproc.e_tsess);

			kp2p->p_estcpu = 0;
			bintime2timeval(&kp->kp_proc.p_rtime, &tv);
			kp2p->p_rtime_sec = (uint32_t)tv.tv_sec;
			kp2p->p_rtime_usec = (uint32_t)tv.tv_usec;
			kp2p->p_cpticks = kl[0].l_cpticks;
			kp2p->p_pctcpu = kp->kp_proc.p_pctcpu;
			kp2p->p_swtime = kl[0].l_swtime;
			kp2p->p_slptime = kl[0].l_slptime;
#if 0 /* XXX thorpej */
			kp2p->p_schedflags = kp->kp_proc.p_schedflags;
#else
			kp2p->p_schedflags = 0;
#endif

			kp2p->p_uticks = kp->kp_proc.p_uticks;
			kp2p->p_sticks = kp->kp_proc.p_sticks;
			kp2p->p_iticks = kp->kp_proc.p_iticks;

			kp2p->p_tracep = PTRTOUINT64(kp->kp_proc.p_tracep);
			kp2p->p_traceflag = kp->kp_proc.p_traceflag;

			kp2p->p_holdcnt = kl[0].l_holdcnt;

			memcpy(&kp2p->p_siglist,
			    &kp->kp_proc.p_sigpend.sp_set,
			    sizeof(ki_sigset_t));
			memset(&kp2p->p_sigmask, 0,
			    sizeof(ki_sigset_t));
			memcpy(&kp2p->p_sigignore,
			    &kp->kp_proc.p_sigctx.ps_sigignore,
			    sizeof(ki_sigset_t));
			memcpy(&kp2p->p_sigcatch,
			    &kp->kp_proc.p_sigctx.ps_sigcatch,
			    sizeof(ki_sigset_t));

			kp2p->p_stat = kl[0].l_stat;
			kp2p->p_priority = kl[0].l_priority;
			kp2p->p_usrpri = kl[0].l_priority;
			kp2p->p_nice = kp->kp_proc.p_nice;

			kp2p->p_xstat = kp->kp_proc.p_xstat;
			kp2p->p_acflag = kp->kp_proc.p_acflag;

			/*CONSTCOND*/
			strncpy(kp2p->p_comm, kp->kp_proc.p_comm,
			    MIN(sizeof(kp2p->p_comm),
			    sizeof(kp->kp_proc.p_comm)));

			strncpy(kp2p->p_wmesg, kp->kp_eproc.e_wmesg,
			    sizeof(kp2p->p_wmesg));
			kp2p->p_wchan = kl[0].l_wchan;
			strncpy(kp2p->p_login, kp->kp_eproc.e_login,
			    sizeof(kp2p->p_login));

			kp2p->p_vm_rssize = kp->kp_eproc.e_xrssize;
			kp2p->p_vm_tsize = kp->kp_eproc.e_vm.vm_tsize;
			kp2p->p_vm_dsize = kp->kp_eproc.e_vm.vm_dsize;
			kp2p->p_vm_ssize = kp->kp_eproc.e_vm.vm_ssize;
			kp2p->p_vm_vsize = kp->kp_eproc.e_vm.vm_map.size
			    / kd->nbpg;
			/* Adjust mapped size */
			kp2p->p_vm_msize =
			    (kp->kp_eproc.e_vm.vm_map.size / kd->nbpg) -
			    kp->kp_eproc.e_vm.vm_issize +
			    kp->kp_eproc.e_vm.vm_ssize;

			kp2p->p_eflag = (int32_t)kp->kp_eproc.e_flag;

			kp2p->p_realflag = kp->kp_proc.p_flag;
			kp2p->p_nlwps = kp->kp_proc.p_nlwps;
			kp2p->p_nrlwps = kp->kp_proc.p_nrlwps;
			kp2p->p_realstat = kp->kp_proc.p_stat;

			if (P_ZOMBIE(&kp->kp_proc) ||
			    kp->kp_proc.p_stats == NULL ||
			    KREAD(kd, (u_long)kp->kp_proc.p_stats, &pstats)) {
				kp2p->p_uvalid = 0;
			} else {
				kp2p->p_uvalid = 1;

				kp2p->p_ustart_sec = (u_int32_t)
				    pstats.p_start.tv_sec;
				kp2p->p_ustart_usec = (u_int32_t)
				    pstats.p_start.tv_usec;

				kp2p->p_uutime_sec = (u_int32_t)
				    pstats.p_ru.ru_utime.tv_sec;
				kp2p->p_uutime_usec = (u_int32_t)
				    pstats.p_ru.ru_utime.tv_usec;
				kp2p->p_ustime_sec = (u_int32_t)
				    pstats.p_ru.ru_stime.tv_sec;
				kp2p->p_ustime_usec = (u_int32_t)
				    pstats.p_ru.ru_stime.tv_usec;

				kp2p->p_uru_maxrss = pstats.p_ru.ru_maxrss;
				kp2p->p_uru_ixrss = pstats.p_ru.ru_ixrss;
				kp2p->p_uru_idrss = pstats.p_ru.ru_idrss;
				kp2p->p_uru_isrss = pstats.p_ru.ru_isrss;
				kp2p->p_uru_minflt = pstats.p_ru.ru_minflt;
				kp2p->p_uru_majflt = pstats.p_ru.ru_majflt;
				kp2p->p_uru_nswap = pstats.p_ru.ru_nswap;
				kp2p->p_uru_inblock = pstats.p_ru.ru_inblock;
				kp2p->p_uru_oublock = pstats.p_ru.ru_oublock;
				kp2p->p_uru_msgsnd = pstats.p_ru.ru_msgsnd;
				kp2p->p_uru_msgrcv = pstats.p_ru.ru_msgrcv;
				kp2p->p_uru_nsignals = pstats.p_ru.ru_nsignals;
				kp2p->p_uru_nvcsw = pstats.p_ru.ru_nvcsw;
				kp2p->p_uru_nivcsw = pstats.p_ru.ru_nivcsw;

				kp2p->p_uctime_sec = (u_int32_t)
				    (pstats.p_cru.ru_utime.tv_sec +
				    pstats.p_cru.ru_stime.tv_sec);
				kp2p->p_uctime_usec = (u_int32_t)
				    (pstats.p_cru.ru_utime.tv_usec +
				    pstats.p_cru.ru_stime.tv_usec);
			}

			memcpy(kp2c, &kp2, esize);
			kp2c += esize;
		}
	}
	*cnt = nprocs;
	return (kd->procbase2);
}
Пример #13
0
int
svr4_32_trap(int type, struct lwp *l)
{
	int n;
	struct proc *p = l->l_proc;
	struct trapframe64 *tf = l->l_md.md_tf;
	struct timespec ts;
	struct timeval tv;
	struct timeval rtime, stime;
	uint64_t tm;

	if (p->p_emul != &emul_svr4_32)
		return 0;

	switch (type) {
	case T_SVR4_GETCC:
		uprintf("T_SVR4_GETCC\n");
		break;

	case T_SVR4_SETCC:
		uprintf("T_SVR4_SETCC\n");
		break;

	case T_SVR4_GETPSR:
		tf->tf_out[0] = TSTATECCR_TO_PSR(tf->tf_tstate);
		break;

	case T_SVR4_SETPSR:
		uprintf("T_SVR4_SETPSR\n");
		break;

	case T_SVR4_GETHRTIME:
		/*
		 * This is like gethrtime(3), returning the time expressed
		 * in nanoseconds since an arbitrary time in the past and
		 * guaranteed to be monotonically increasing, which we
		 * obtain from nanouptime(9).
		 */
		nanouptime(&ts);

		tm = ts.tv_nsec;
		tm += ts.tv_sec * (uint64_t)1000000000u;
		tf->tf_out[0] = (tm >> 32) & 0x00000000ffffffffffUL;
		tf->tf_out[1] = tm & 0x00000000ffffffffffUL;
		break;

	case T_SVR4_GETHRVTIME:
		/*
		 * This is like gethrvtime(3). returning the LWP's (now:
		 * proc's) virtual time expressed in nanoseconds. It is
		 * supposedly guaranteed to be monotonically increasing, but
		 * for now using the process's real time augmented with its
		 * current runtime is the best we can do.
		 */
		microtime(&tv);
		bintime2timeval(&l->l_rtime, &rtime);
		bintime2timeval(&l->l_stime, &stime);

		tm = (rtime.tv_sec + tv.tv_sec - stime.tv_sec) * 1000000ull;
		tm += rtime.tv_usec + tv.tv_usec;
		tm -= stime.tv_usec;
		tm *= 1000u;
		tf->tf_out[0] = (tm >> 32) & 0x00000000ffffffffffUL;
		tf->tf_out[1] = tm & 0x00000000ffffffffffUL;
		break;

	case T_SVR4_GETHRESTIME:
		/* I assume this is like gettimeofday(3) */
		nanotime(&ts);
		tf->tf_out[0] = ts.tv_sec;
		tf->tf_out[1] = ts.tv_nsec;
		break;

	default:
		return 0;
	}

	ADVANCE;
	return 1;
}