Exemple #1
0
static su_time64_t mono64(void)
{
#if HAVE_CLOCK_GETTIME && CLOCK_MONOTONIC
  {
    struct timespec tv;

    if (clock_gettime(CLOCK_MONOTONIC, &tv) == 0)
      return (su_time64_t)tv.tv_sec * E9 + tv.tv_nsec;
  }
#endif

#if HAVE_NANOUPTIME
  {
    struct timespec tv;

    nanouptime(&tv);

    return (su_time64_t)tv.tv_sec * E9 + tv.tv_nsec;
  }
#elif HAVE_MICROUPTIME
  {
    struct timeval tv;

    microuptime(&tv);

    return (su_time64_t)tv.tv_sec * E9 + tv.tv_usec * 1000;
  }
#endif

  return now64();
}
Exemple #2
0
RTDECL(uint64_t) RTTimeNanoTS(void)
{
    struct timespec tsp;
    nanouptime(&tsp);
    return tsp.tv_sec * RT_NS_1SEC_64
         + tsp.tv_nsec;
}
Exemple #3
0
/*
 * Further secondary CPU initialization.
 *
 * We are now running on our startup stack, with proper page tables.
 * There is nothing to do but display some details about the CPU and its CMMUs.
 */
void
secondary_main()
{
	struct cpu_info *ci = curcpu();
	int s;

	cpu_configuration_print(0);
	ncpus++;

	sched_init_cpu(ci);
	nanouptime(&ci->ci_schedstate.spc_runtime);
	ci->ci_curproc = NULL;
	ci->ci_randseed = (arc4random() & 0x7fffffff) + 1;

	/*
	 * Release cpu_hatch_mutex to let other secondary processors
	 * have a chance to run.
	 */
	hatch_pending_count--;
	__cpu_simple_unlock(&cpu_hatch_mutex);

	/* wait for cpu_boot_secondary_processors() */
	__cpu_simple_lock(&cpu_boot_mutex);
	__cpu_simple_unlock(&cpu_boot_mutex);

	spl0();
	SCHED_LOCK(s);
	set_psr(get_psr() & ~PSR_IND);

	SET(ci->ci_flags, CIF_ALIVE);

	cpu_switchto(NULL, sched_chooseproc());
}
RTDECL(uint64_t) RTTimeNanoTS(void)
{
    struct timespec tsp;
    nanouptime(&tsp);
    return tsp.tv_sec * UINT64_C(1000000000)
         + tsp.tv_nsec;
}
Exemple #5
0
/*
 * The CPU ends up here when it's ready to run
 * XXX should share some of this with init386 in machdep.c
 * for now it jumps into an infinite loop.
 */
void
cpu_hatch(void *v)
{
	struct cpu_info *ci = (struct cpu_info *)v;
	int s;

	cpu_init_idt();
	lapic_enable();
	lapic_startclock();
	lapic_set_lvt();
	gdt_init_cpu(ci);

	lldt(0);

	npxinit(ci);

	cpu_init(ci);

	/* Re-initialise memory range handling on AP */
	if (mem_range_softc.mr_op != NULL)
		mem_range_softc.mr_op->initAP(&mem_range_softc);

	s = splhigh();		/* XXX prevent softints from running here.. */
	lapic_tpr = 0;
	enable_intr();
	if (mp_verbose)
		printf("%s: CPU at apid %ld running\n",
		    ci->ci_dev.dv_xname, ci->ci_cpuid);
	nanouptime(&ci->ci_schedstate.spc_runtime);
	splx(s);

	SCHED_LOCK(s);
	cpu_switchto(NULL, sched_chooseproc());
}
Exemple #6
0
/*
 * Start the real-time and statistics clocks. Leave stathz 0 since there
 * are no other timers available.
 */
void
cp0_startclock(struct cpu_info *ci)
{
	int s;

#ifdef MULTIPROCESSOR
	if (!CPU_IS_PRIMARY(ci)) {
		s = splhigh();
		nanouptime(&ci->ci_schedstate.spc_runtime);
		splx(s);

		/* try to avoid getting clock interrupts early */
		cp0_set_compare(cp0_get_count() - 1);

		cp0_calibrate(ci);
	}
#endif

	/* Start the clock. */
	s = splclock();
	ci->ci_cpu_counter_interval =
	    (ci->ci_hw.clock / CP0_CYCLE_DIVIDER) / hz;
	ci->ci_cpu_counter_last = cp0_get_count() + ci->ci_cpu_counter_interval;
	cp0_set_compare(ci->ci_cpu_counter_last);
	ci->ci_clock_started++;
	splx(s);
}
Exemple #7
0
int
fuse_isvalid_attr(struct vnode *vp)
{
	struct fuse_vnode_data *fvdat = VTOFUD(vp);
	struct timespec uptsp;

	nanouptime(&uptsp);
	return fuse_timespec_cmp(&uptsp, &fvdat->cached_attrs_valid, <=);
}
Exemple #8
0
/*
 * DTrace needs a high resolution time function which can
 * be called from a probe context and guaranteed not to have
 * instrumented with probes itself.
 *
 * Returns nanoseconds since boot.
 */
uint64_t
dtrace_gethrtime()
{
	struct      timespec curtime;

	nanouptime(&curtime);

	return (curtime.tv_sec * 1000000000UL + curtime.tv_nsec);

}
Exemple #9
0
/* -----------------------------------------------------------------------------
called from l2tp_rfc when data are present
----------------------------------------------------------------------------- */
int l2tp_wan_input(struct ppp_link *link, mbuf_t m)
{
	struct timespec tv;	
    
	lck_mtx_assert(ppp_domain_mutex, LCK_MTX_ASSERT_OWNED);
	
    link->lk_ipackets++;
    link->lk_ibytes += mbuf_pkthdr_len(m);
	nanouptime(&tv);
	link->lk_last_recv = tv.tv_sec;
    ppp_link_input(link, m);	
    return 0;
}
Exemple #10
0
void
m_do_tx_compl_callback(struct mbuf *m, struct ifnet *ifp)
{
	int i;

	if (m == NULL)
		return;

	if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0)
		return;

#if (DEBUG || DEVELOPMENT)
	if (mbuf_tx_compl_debug != 0 && ifp != NULL &&
	    (ifp->if_xflags & IFXF_TIMESTAMP_ENABLED) != 0 &&
	    (m->m_pkthdr.pkt_flags & PKTF_DRV_TS_VALID) == 0) {
		struct timespec now;

		nanouptime(&now);
		net_timernsec(&now, &m->m_pkthdr.pkt_timestamp);
	}
#endif /* (DEBUG || DEVELOPMENT) */

	for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
		mbuf_tx_compl_func callback;

		if ((m->m_pkthdr.pkt_compl_callbacks & (1 << i)) == 0)
			continue;

		lck_rw_lock_shared(mbuf_tx_compl_tbl_lock);
		callback = mbuf_tx_compl_table[i];
		lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock);

		if (callback != NULL) {
			callback(m->m_pkthdr.pkt_compl_context,
			    ifp, m->m_pkthdr.pkt_timestamp,
			    m->m_pkthdr.drv_tx_compl_arg,
			    m->m_pkthdr.drv_tx_compl_data,
			    m->m_pkthdr.drv_tx_status);
		}
	}
	m->m_pkthdr.pkt_compl_callbacks = 0;

#if (DEBUG || DEVELOPMENT)
	if (mbuf_tx_compl_debug != 0) {
		OSDecrementAtomic64(&mbuf_tx_compl_outstanding);
		if (ifp == NULL)
			atomic_add_64(&mbuf_tx_compl_aborted, 1);
	}
#endif /* (DEBUG || DEVELOPMENT) */
}
Exemple #11
0
/*
 * MPSAFE
 */
int
kern_clock_gettime(clockid_t clock_id, struct timespec *ats)
{
	int error = 0;

	switch(clock_id) {
	case CLOCK_REALTIME:
		nanotime(ats);
		break;
	case CLOCK_MONOTONIC:
		nanouptime(ats);
		break;
	default:
		error = EINVAL;
		break;
	}
	return (error);
}
Exemple #12
0
/*
 * This sets the current real time of day.  Timespecs are in seconds and
 * nanoseconds.  We do not mess with gd_time_seconds and gd_cpuclock_base,
 * instead we adjust basetime so basetime + gd_* results in the current
 * time of day.  This way the gd_* fields are guarenteed to represent
 * a monotonically increasing 'uptime' value.
 *
 * When set_timeofday() is called from userland, the system call forces it
 * onto cpu #0 since only cpu #0 can update basetime_index.
 */
void
set_timeofday(struct timespec *ts)
{
	struct timespec *nbt;
	int ni;

	/*
	 * XXX SMP / non-atomic basetime updates
	 */
	crit_enter();
	ni = (basetime_index + 1) & BASETIME_ARYMASK;
	nbt = &basetime[ni];
	nanouptime(nbt);
	nbt->tv_sec = ts->tv_sec - nbt->tv_sec;
	nbt->tv_nsec = ts->tv_nsec - nbt->tv_nsec;
	if (nbt->tv_nsec < 0) {
	    nbt->tv_nsec += 1000000000;
	    --nbt->tv_sec;
	}

	/*
	 * Note that basetime diverges from boottime as the clock drift is
	 * compensated for, so we cannot do away with boottime.  When setting
	 * the absolute time of day the drift is 0 (for an instant) and we
	 * can simply assign boottime to basetime.  
	 *
	 * Note that nanouptime() is based on gd_time_seconds which is drift
	 * compensated up to a point (it is guarenteed to remain monotonically
	 * increasing).  gd_time_seconds is thus our best uptime guess and
	 * suitable for use in the boottime calculation.  It is already taken
	 * into account in the basetime calculation above.
	 */
	boottime.tv_sec = nbt->tv_sec;
	ntp_delta = 0;

	/*
	 * We now have a new basetime, make sure all other cpus have it,
	 * then update the index.
	 */
	cpu_sfence();
	basetime_index = ni;

	crit_exit();
}
Exemple #13
0
/* -----------------------------------------------------------------------------
This gets called at splnet from if_ppp.c at various times
when there is data ready to be sent
----------------------------------------------------------------------------- */
int l2tp_wan_output(struct ppp_link *link, mbuf_t m)
{
    struct l2tp_wan 	*wan = (struct l2tp_wan *)link;
    u_int32_t		len = mbuf_pkthdr_len(m);	// take it now, as output will change the mbuf
    int 		err;
	struct timespec tv;	
	
	lck_mtx_assert(ppp_domain_mutex, LCK_MTX_ASSERT_OWNED);
    
    if (err = l2tp_rfc_output(wan->rfc, m, 0)) {
        link->lk_oerrors++;
        return err;
    }

    link->lk_opackets++;
    link->lk_obytes += len;
	nanouptime(&tv);
	link->lk_last_xmit = tv.tv_sec;
    return 0;
}
Exemple #14
0
/* -----------------------------------------------------------------------------
This gets called at splnet from if_ppp.c at various times
when there is data ready to be sent
----------------------------------------------------------------------------- */
int pppoe_wan_output(struct ppp_link *link, mbuf_t m)
{
    struct pppoe_wan 	*wan = (struct pppoe_wan *)link;
    int			err;
	struct timespec tv;	
    
	lck_mtx_assert(ppp_domain_mutex, LCK_MTX_ASSERT_OWNED);
	
    if (err = pppoe_rfc_output(wan->rfc, m)) {
        link->lk_oerrors++;
        return err;
    }

    link->lk_opackets++;
    link->lk_obytes += mbuf_pkthdr_len(m);
    //getmicrotime(link->lk_last_xmit);
	nanouptime(&tv);
	link->lk_last_xmit = tv.tv_sec;
    return 0;
}
Exemple #15
0
static void
sfb_calc_hinterval(struct sfb *sp, u_int64_t *t)
{
	u_int64_t hinterval = 0;
	struct timespec now;

	if (t != NULL) {
		/*
		 * TODO [email protected]: use dq_avg to derive hinterval.
		 */
		hinterval = *t;
	}

	if (sfb_hinterval != 0)
		hinterval = sfb_hinterval;
	else if (t == NULL || hinterval == 0)
		hinterval = ((u_int64_t)SFB_HINTERVAL(sp) * NSEC_PER_SEC);

	net_nsectimer(&hinterval, &sp->sfb_hinterval);

	nanouptime(&now);
	net_timeradd(&now, &sp->sfb_hinterval, &sp->sfb_nextreset);
}
Exemple #16
0
xtime_t
gethrxtime (void)
{
# if HAVE_NANOUPTIME
  {
    struct timespec ts;
    nanouptime (&ts);
    return xtime_make (ts.tv_sec, ts.tv_nsec);
  }
# else

#  if defined CLOCK_MONOTONIC && HAVE_CLOCK_GETTIME
  {
    struct timespec ts;
    if (clock_gettime (CLOCK_MONOTONIC, &ts) == 0)
      return xtime_make (ts.tv_sec, ts.tv_nsec);
  }
#  endif

#  if HAVE_MICROUPTIME
  {
    struct timeval tv;
    microuptime (&tv);
    return xtime_make (tv.tv_sec, 1000 * tv.tv_usec);
  }

#  else
  /* No monotonically increasing clocks are available; fall back on a
     clock that might jump backwards, since it's the best we can do.  */
  {
    struct timespec ts;
    gettime (&ts);
    return xtime_make (ts.tv_sec, ts.tv_nsec);
  }
#  endif
# endif
}
Exemple #17
0
static int
futex_copyin_timeout(int op, struct l_timespec *luts, int clockrt,
    struct timespec *ts)
{
	struct l_timespec lts;
	struct timespec kts;
	int error;

	error = copyin(luts, &lts, sizeof(lts));
	if (error)
		return (error);

	error = linux_to_native_timespec(ts, &lts);
	if (error)
		return (error);
	if (clockrt) {
		nanotime(&kts);
		timespecsub(ts, &kts);
	} else if (op == LINUX_FUTEX_WAIT_BITSET) {
		nanouptime(&kts);
		timespecsub(ts, &kts);
	}
	return (error);
}
int
svr4_32_trap(int type, struct lwp *l)
{
	int n;
	struct proc *p = l->l_proc;
	struct trapframe64 *tf = l->l_md.md_tf;
	struct timespec ts;
	struct timeval tv;
	struct timeval rtime, stime;
	uint64_t tm;

	if (p->p_emul != &emul_svr4_32)
		return 0;

	switch (type) {
	case T_SVR4_GETCC:
		uprintf("T_SVR4_GETCC\n");
		break;

	case T_SVR4_SETCC:
		uprintf("T_SVR4_SETCC\n");
		break;

	case T_SVR4_GETPSR:
		tf->tf_out[0] = TSTATECCR_TO_PSR(tf->tf_tstate);
		break;

	case T_SVR4_SETPSR:
		uprintf("T_SVR4_SETPSR\n");
		break;

	case T_SVR4_GETHRTIME:
		/*
		 * This is like gethrtime(3), returning the time expressed
		 * in nanoseconds since an arbitrary time in the past and
		 * guaranteed to be monotonically increasing, which we
		 * obtain from nanouptime(9).
		 */
		nanouptime(&ts);

		tm = ts.tv_nsec;
		tm += ts.tv_sec * (uint64_t)1000000000u;
		tf->tf_out[0] = (tm >> 32) & 0x00000000ffffffffffUL;
		tf->tf_out[1] = tm & 0x00000000ffffffffffUL;
		break;

	case T_SVR4_GETHRVTIME:
		/*
		 * This is like gethrvtime(3). returning the LWP's (now:
		 * proc's) virtual time expressed in nanoseconds. It is
		 * supposedly guaranteed to be monotonically increasing, but
		 * for now using the process's real time augmented with its
		 * current runtime is the best we can do.
		 */
		microtime(&tv);
		bintime2timeval(&l->l_rtime, &rtime);
		bintime2timeval(&l->l_stime, &stime);

		tm = (rtime.tv_sec + tv.tv_sec - stime.tv_sec) * 1000000ull;
		tm += rtime.tv_usec + tv.tv_usec;
		tm -= stime.tv_usec;
		tm *= 1000u;
		tf->tf_out[0] = (tm >> 32) & 0x00000000ffffffffffUL;
		tf->tf_out[1] = tm & 0x00000000ffffffffffUL;
		break;

	case T_SVR4_GETHRESTIME:
		/* I assume this is like gettimeofday(3) */
		nanotime(&ts);
		tf->tf_out[0] = ts.tv_sec;
		tf->tf_out[1] = ts.tv_nsec;
		break;

	default:
		return 0;
	}

	ADVANCE;
	return 1;
}