Exemplo n.º 1
0
void
nanotime(struct timespec *tsp)
{
	struct bintime bt;

	bintime(&bt);
	bintime2timespec(&bt, tsp);
}
Exemplo n.º 2
0
void
ffclock_getnanouptime(struct timespec *tsp)
{
	struct bintime bt;

	ffclock_abstime(NULL, &bt, NULL,
	    FFCLOCK_LERP | FFCLOCK_UPTIME | FFCLOCK_FAST);
	bintime2timespec(&bt, tsp);
}
Exemplo n.º 3
0
void
nanouptime(struct timespec *tsp)
{
	struct bintime bt;

	nnanouptime++;
	binuptime(&bt);
	bintime2timespec(&bt, tsp);
}
Exemplo n.º 4
0
void
getnanouptime(struct timespec *tsp)
{
	struct timehands *th;
	u_int gen;

	do {
		th = timehands;
		gen = th->th_generation;
		bintime2timespec(&th->th_offset, tsp);
	} while (gen == 0 || gen != th->th_generation);
}
Exemplo n.º 5
0
int
__vdso_clock_gettime(clockid_t clock_id, struct timespec *ts)
{
    struct bintime bt;
    int abs, error;

    if (tk == NULL) {
        error = _elf_aux_info(AT_TIMEKEEP, &tk, sizeof(tk));
        if (error != 0 || tk == NULL)
            return (ENOSYS);
    }
    if (tk->tk_ver != VDSO_TK_VER_CURR)
        return (ENOSYS);
    switch (clock_id) {
    case CLOCK_REALTIME:
    case CLOCK_REALTIME_PRECISE:
    case CLOCK_REALTIME_FAST:
    case CLOCK_SECOND:
        abs = 1;
        break;
    case CLOCK_MONOTONIC:
    case CLOCK_MONOTONIC_PRECISE:
    case CLOCK_MONOTONIC_FAST:
    case CLOCK_UPTIME:
    case CLOCK_UPTIME_PRECISE:
    case CLOCK_UPTIME_FAST:
        abs = 0;
        break;
    default:
        return (ENOSYS);
    }
    error = binuptime(&bt, tk, abs);
    if (error != 0)
        return (error);
    bintime2timespec(&bt, ts);
    if (clock_id == CLOCK_SECOND)
        ts->tv_nsec = 0;
    return (0);
}
Exemplo n.º 6
0
/*
 * Step our concept of UTC.  This is done by modifying our estimate of
 * when we booted.
 * XXX: not locked.
 */
void
tc_setclock(struct timespec *ts)
{
	struct timespec ts2;
	struct bintime bt, bt2;

	binuptime(&bt2);
	timespec2bintime(ts, &bt);
	bintime_sub(&bt, &bt2);
	bintime_add(&bt2, &boottimebin);
	boottimebin = bt;
	bintime2timeval(&bt, &boottime);

	/* XXX fiddle all the little crinkly bits around the fiords... */
	tc_windup();
	if (timestepwarnings) {
		bintime2timespec(&bt2, &ts2);
		log(LOG_INFO, "Time stepped from %ld.%09ld to %ld.%09ld\n",
		    (long)ts2.tv_sec, ts2.tv_nsec,
		    (long)ts->tv_sec, ts->tv_nsec);
	}
}
Exemplo n.º 7
0
static void
compute_stats(struct ctl_lun_io_stats *cur_stats,
	      struct ctl_lun_io_stats *prev_stats, long double etime,
	      long double *mbsec, long double *kb_per_transfer,
	      long double *transfers_per_second, long double *ms_per_transfer,
	      long double *ms_per_dma, long double *dmas_per_second)
{
	uint64_t total_bytes = 0, total_operations = 0, total_dmas = 0;
	uint32_t port;
	struct bintime total_time_bt, total_dma_bt;
	struct timespec total_time_ts, total_dma_ts;
	int i;

	bzero(&total_time_bt, sizeof(total_time_bt));
	bzero(&total_dma_bt, sizeof(total_dma_bt));
	bzero(&total_time_ts, sizeof(total_time_ts));
	bzero(&total_dma_ts, sizeof(total_dma_ts));
	for (port = 0; port < CTL_MAX_PORTS; port++) {
		for (i = 0; i < CTL_STATS_NUM_TYPES; i++) {
			total_bytes += cur_stats->ports[port].bytes[i];
			total_operations +=
			    cur_stats->ports[port].operations[i];
			total_dmas += cur_stats->ports[port].num_dmas[i];
			bintime_add(&total_time_bt,
			    &cur_stats->ports[port].time[i]);
			bintime_add(&total_dma_bt,
			    &cur_stats->ports[port].dma_time[i]);
			if (prev_stats != NULL) {
				total_bytes -=
				    prev_stats->ports[port].bytes[i];
				total_operations -=
				    prev_stats->ports[port].operations[i];
				total_dmas -=
				    prev_stats->ports[port].num_dmas[i];
				bintime_sub(&total_time_bt,
				    &prev_stats->ports[port].time[i]);
				bintime_sub(&total_dma_bt,
				    &prev_stats->ports[port].dma_time[i]);
			}
		}
	}

	*mbsec = total_bytes;
	*mbsec /= 1024 * 1024;
	if (etime > 0.0)
		*mbsec /= etime;
	else
		*mbsec = 0;
	*kb_per_transfer = total_bytes;
	*kb_per_transfer /= 1024;
	if (total_operations > 0)
		*kb_per_transfer /= total_operations;
	else
		*kb_per_transfer = 0;
	*transfers_per_second = total_operations;
	*dmas_per_second = total_dmas;
	if (etime > 0.0) {
		*transfers_per_second /= etime;
		*dmas_per_second /= etime;
	} else {
		*transfers_per_second = 0;
		*dmas_per_second = 0;
	}

	bintime2timespec(&total_time_bt, &total_time_ts);
	bintime2timespec(&total_dma_bt, &total_dma_ts);
	if (total_operations > 0) {
		/*
		 * Convert the timespec to milliseconds.
		 */
		*ms_per_transfer = total_time_ts.tv_sec * 1000;
		*ms_per_transfer += total_time_ts.tv_nsec / 1000000;
		*ms_per_transfer /= total_operations;
	} else
		*ms_per_transfer = 0;

	if (total_dmas > 0) {
		/*
		 * Convert the timespec to milliseconds.
		 */
		*ms_per_dma = total_dma_ts.tv_sec * 1000;
		*ms_per_dma += total_dma_ts.tv_nsec / 1000000;
		*ms_per_dma /= total_dmas;
	} else
		*ms_per_dma = 0;
}
Exemplo n.º 8
0
Arquivo: net.c Projeto: DomChey/ptpd
ssize_t 
netRecvEvent(Octet * buf, TimeInternal * time, NetPath * netPath, int flags)
{
	ssize_t ret = 0;
	struct msghdr msg;
	struct iovec vec[1];
	struct sockaddr_in from_addr;

#ifdef PTPD_PCAP
	struct pcap_pkthdr *pkt_header;
	const u_char *pkt_data;
#endif

	union {
		struct cmsghdr cm;
		char	control[256];
	}     cmsg_un;

	struct cmsghdr *cmsg;

#if defined(SO_TIMESTAMPNS) || defined(SO_TIMESTAMPING)
	struct timespec * ts;
#elif defined(SO_BINTIME)
	struct bintime * bt;
	struct timespec ts;
#endif
	
#if defined(SO_TIMESTAMP)
	struct timeval * tv;
#endif
	Boolean timestampValid = FALSE;

#ifdef PTPD_PCAP
	if (netPath->pcapEvent == NULL) { /* Using sockets */
#endif
		vec[0].iov_base = buf;
		vec[0].iov_len = PACKET_SIZE;

		memset(&msg, 0, sizeof(msg));
		memset(&from_addr, 0, sizeof(from_addr));
		memset(buf, 0, PACKET_SIZE);
		memset(&cmsg_un, 0, sizeof(cmsg_un));

		msg.msg_name = (caddr_t)&from_addr;
		msg.msg_namelen = sizeof(from_addr);
		msg.msg_iov = vec;
		msg.msg_iovlen = 1;
		msg.msg_control = cmsg_un.control;
		msg.msg_controllen = sizeof(cmsg_un.control);
		msg.msg_flags = 0;

		ret = recvmsg(netPath->eventSock, &msg, flags | MSG_DONTWAIT);
		if (ret <= 0) {
			if (errno == EAGAIN || errno == EINTR)
				return 0;

			return ret;
		};
		if (msg.msg_flags & MSG_TRUNC) {
			ERROR("received truncated message\n");
			return 0;
		}
		/* get time stamp of packet */
		if (!time) {
			ERROR("null receive time stamp argument\n");
			return 0;
		}
		if (msg.msg_flags & MSG_CTRUNC) {
			ERROR("received truncated ancillary data\n");
			return 0;
		}

#if defined(HAVE_DECL_MSG_ERRQUEUE) && HAVE_DECL_MSG_ERRQUEUE
		if(!(flags & MSG_ERRQUEUE))
#endif
			netPath->lastRecvAddr = from_addr.sin_addr.s_addr;
		netPath->receivedPackets++;

		if (msg.msg_controllen <= 0) {
			ERROR("received short ancillary data (%ld/%ld)\n",
			      (long)msg.msg_controllen, (long)sizeof(cmsg_un.control));

			return 0;
		}

		for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
		     cmsg = CMSG_NXTHDR(&msg, cmsg)) {
			if (cmsg->cmsg_level == SOL_SOCKET) {
#if defined(SO_TIMESTAMPING) && defined(SO_TIMESTAMPNS)
				if(cmsg->cmsg_type == SO_TIMESTAMPING || 
				    cmsg->cmsg_type == SO_TIMESTAMPNS) {
					ts = (struct timespec *)CMSG_DATA(cmsg);
					time->seconds = ts->tv_sec;
					time->nanoseconds = ts->tv_nsec;
					timestampValid = TRUE;
					DBG("rcvevent: SO_TIMESTAMP%s %s time stamp: %us %dns\n", netPath->txTimestampFailure ?
					    "NS" : "ING",
					    (flags & MSG_ERRQUEUE) ? "(TX)" : "(RX)" , time->seconds, time->nanoseconds);
					break;
				}
#elif defined(SO_TIMESTAMPNS)
				if(cmsg->cmsg_type == SCM_TIMESTAMPNS) {
					ts = (struct timespec *)CMSG_DATA(cmsg);
					time->seconds = ts->tv_sec;
					time->nanoseconds = ts->tv_nsec;
					timestampValid = TRUE;
					DBGV("kernel NANO recv time stamp %us %dns\n", 
					     time->seconds, time->nanoseconds);
					break;
				}
#elif defined(SO_BINTIME)
				if(cmsg->cmsg_type == SCM_BINTIME) {
					bt = (struct bintime *)CMSG_DATA(cmsg);
					bintime2timespec(bt, &ts);
					time->seconds = ts.tv_sec;
					time->nanoseconds = ts.tv_nsec;
					timestampValid = TRUE;
					DBGV("kernel NANO recv time stamp %us %dns\n",
					     time->seconds, time->nanoseconds);
					break;
				}
#endif
			
#if defined(SO_TIMESTAMP)
				if(cmsg->cmsg_type == SCM_TIMESTAMP) {
					tv = (struct timeval *)CMSG_DATA(cmsg);
					time->seconds = tv->tv_sec;
					time->nanoseconds = tv->tv_usec * 1000;
					timestampValid = TRUE;
					DBGV("kernel MICRO recv time stamp %us %dns\n",
					     time->seconds, time->nanoseconds);
				}
#endif
			}
		}

		if (!timestampValid) {
			/*
			 * do not try to get by with recording the time here, better
			 * to fail because the time recorded could be well after the
			 * message receive, which would put a big spike in the
			 * offset signal sent to the clock servo
			 */
			DBG("netRecvEvent: no receive time stamp\n");
			return 0;
		}
#ifdef PTPD_PCAP
	}
#endif

#ifdef PTPD_PCAP
	else { /* Using PCAP */
		/* Discard packet on socket */
		if (netPath->eventSock >= 0) {
			recv(netPath->eventSock, buf, PACKET_SIZE, MSG_DONTWAIT);
		}
		
		if ((ret = pcap_next_ex(netPath->pcapEvent, &pkt_header, 
					&pkt_data)) < 1) {
			if (ret < 0)
				DBGV("netRecvEvent: pcap_next_ex failed %s\n",
				     pcap_geterr(netPath->pcapEvent));
			return 0;
		}

	/* Make sure this is IP (could dot1q get here?) */
	if( ntohs(*(u_short *)(pkt_data + 12)) != ETHERTYPE_IP)
		DBGV("PCAP payload received is not Ethernet: 0x%04x\n",
		    ntohs(*(u_short *)(pkt_data + 12)));
	/* Retrieve source IP from the payload - 14 eth + 12 IP */
	netPath->lastRecvAddr = *(Integer32 *)(pkt_data + 26);

		netPath->receivedPackets++;
		/* XXX Total cheat */
		memcpy(buf, pkt_data + netPath->headerOffset, 
		       pkt_header->caplen - netPath->headerOffset);
		time->seconds = pkt_header->ts.tv_sec;
		time->nanoseconds = pkt_header->ts.tv_usec * 1000;
		timestampValid = TRUE;
		DBGV("netRecvEvent: kernel PCAP recv time stamp %us %dns\n",
		     time->seconds, time->nanoseconds);
		fflush(NULL);
		ret = pkt_header->caplen - netPath->headerOffset;
	}
#endif
	return ret;
}
Exemplo n.º 9
0
ssize_t
netRecvEvent(Octet * buf, TimeInternal * time, NetPath * netPath)
{
    ssize_t ret;
    struct msghdr msg;
    struct iovec vec[1];
    struct sockaddr_in from_addr;
    union {
        struct cmsghdr cm;
        char	control[CMSG_SPACE(sizeof(struct timeval))];
    }     cmsg_un;
    struct cmsghdr *cmsg;
#if defined(linux)
    struct timeval *tv;
#else /* FreeBSD */
    struct timespec ts;
#endif /* FreeBSD or Linux */

    vec[0].iov_base = buf;
    vec[0].iov_len = PACKET_SIZE;

    memset(&msg, 0, sizeof(msg));
    memset(&from_addr, 0, sizeof(from_addr));
    memset(buf, 0, PACKET_SIZE);
    memset(&cmsg_un, 0, sizeof(cmsg_un));

    msg.msg_name = (caddr_t)&from_addr;
    msg.msg_namelen = sizeof(from_addr);
    msg.msg_iov = vec;
    msg.msg_iovlen = 1;
    msg.msg_control = cmsg_un.control;
    msg.msg_controllen = sizeof(cmsg_un.control);
    msg.msg_flags = 0;

    ret = recvmsg(netPath->eventSock, &msg, MSG_DONTWAIT);
    if (ret <= 0) {
        if (errno == EAGAIN || errno == EINTR)
            return 0;

        return ret;
    }
    if (msg.msg_flags & MSG_TRUNC) {
        ERROR("received truncated message\n");
        return 0;
    }
    /* get time stamp of packet */
    if (!time) {
        ERROR("null receive time stamp argument\n");
        return 0;
    }
    if (msg.msg_flags & MSG_CTRUNC) {
        ERROR("received truncated ancillary data\n");
        return 0;
    }
    if (msg.msg_controllen < sizeof(cmsg_un.control)) {
        ERROR("received short ancillary data (%d/%d)\n",
              msg.msg_controllen, (int)sizeof(cmsg_un.control));

        return 0;
    }

#if defined(linux)
    tv = 0;
    for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
            cmsg = CMSG_NXTHDR(&msg, cmsg))
        if (cmsg->cmsg_level == SOL_SOCKET &&
                cmsg->cmsg_type == SCM_TIMESTAMP)
            tv = (struct timeval *)CMSG_DATA(cmsg);

    if (tv) {
        time->seconds = tv->tv_sec;
        time->nanoseconds = tv->tv_usec * 1000;
        DBGV("kernel recv time stamp %us %dns\n", time->seconds, time->nanoseconds);
    } else {
        /*
         * do not try to get by with recording the time here, better
         * to fail because the time recorded could be well after the
         * message receive, which would put a big spike in the
         * offset signal sent to the clock servo
         */
        DBG("no recieve time stamp\n");
        return 0;
    }
#else /* FreeBSD */
    bzero(&ts, sizeof(ts));
    for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
            cmsg = CMSG_NXTHDR(&msg, cmsg))
        if (cmsg->cmsg_level == SOL_SOCKET &&
                cmsg->cmsg_type == SCM_BINTIME)
            bintime2timespec((struct bintime *)CMSG_DATA(cmsg),
                             &ts);

    if (ts.tv_sec != 0) {
        time->seconds = ts.tv_sec;
        time->nanoseconds = ts.tv_nsec;
        DBGV("kernel recv time stamp %us %dns\n", time->seconds, time->nanoseconds);
    } else {
        /*
         * do not try to get by with recording the time here, better
         * to fail because the time recorded could be well after the
         * message receive, which would put a big spike in the
         * offset signal sent to the clock servo
         */
        DBG("no recieve time stamp\n");
        return 0;
    }
#endif /* FreeBSD or Linux */

    return ret;
}
Exemplo n.º 10
0
/*
 * Initialize the next struct timehands in the ring and make
 * it the active timehands.  Along the way we might switch to a different
 * timecounter and/or do seconds processing in NTP.  Slightly magic.
 */
void
tc_windup(void)
{
	struct bintime bt;
	struct timehands *th, *tho;
	u_int64_t scale;
	u_int delta, ncount, ogen;
	int i;
#ifdef leapsecs
	time_t t;
#endif

	/*
	 * Make the next timehands a copy of the current one, but do not
	 * overwrite the generation or next pointer.  While we update
	 * the contents, the generation must be zero.
	 */
	tho = timehands;
	th = tho->th_next;
	ogen = th->th_generation;
	th->th_generation = 0;
	bcopy(tho, th, offsetof(struct timehands, th_generation));

	/*
	 * Capture a timecounter delta on the current timecounter and if
	 * changing timecounters, a counter value from the new timecounter.
	 * Update the offset fields accordingly.
	 */
	delta = tc_delta(th);
	if (th->th_counter != timecounter)
		ncount = timecounter->tc_get_timecount(timecounter);
	else
		ncount = 0;
	th->th_offset_count += delta;
	th->th_offset_count &= th->th_counter->tc_counter_mask;
	bintime_addx(&th->th_offset, th->th_scale * delta);

#ifdef notyet
	/*
	 * Hardware latching timecounters may not generate interrupts on
	 * PPS events, so instead we poll them.  There is a finite risk that
	 * the hardware might capture a count which is later than the one we
	 * got above, and therefore possibly in the next NTP second which might
	 * have a different rate than the current NTP second.  It doesn't
	 * matter in practice.
	 */
	if (tho->th_counter->tc_poll_pps)
		tho->th_counter->tc_poll_pps(tho->th_counter);
#endif

	/*
	 * Deal with NTP second processing.  The for loop normally
	 * iterates at most once, but in extreme situations it might
	 * keep NTP sane if timeouts are not run for several seconds.
	 * At boot, the time step can be large when the TOD hardware
	 * has been read, so on really large steps, we call
	 * ntp_update_second only twice.  We need to call it twice in
	 * case we missed a leap second.
	 */
	bt = th->th_offset;
	bintime_add(&bt, &boottimebin);
	i = bt.sec - tho->th_microtime.tv_sec;
	if (i > LARGE_STEP)
		i = 2;
	for (; i > 0; i--)
		ntp_update_second(&th->th_adjustment, &bt.sec);

	/* Update the UTC timestamps used by the get*() functions. */
	/* XXX shouldn't do this here.  Should force non-`get' versions. */
	bintime2timeval(&bt, &th->th_microtime);
	bintime2timespec(&bt, &th->th_nanotime);

	/* Now is a good time to change timecounters. */
	if (th->th_counter != timecounter) {
		th->th_counter = timecounter;
		th->th_offset_count = ncount;
	}

	/*-
	 * Recalculate the scaling factor.  We want the number of 1/2^64
	 * fractions of a second per period of the hardware counter, taking
	 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
	 * processing provides us with.
	 *
	 * The th_adjustment is nanoseconds per second with 32 bit binary
	 * fraction and we want 64 bit binary fraction of second:
	 *
	 *	 x = a * 2^32 / 10^9 = a * 4.294967296
	 *
	 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
	 * we can only multiply by about 850 without overflowing, but that
	 * leaves suitably precise fractions for multiply before divide.
	 *
	 * Divide before multiply with a fraction of 2199/512 results in a
	 * systematic undercompensation of 10PPM of th_adjustment.  On a
	 * 5000PPM adjustment this is a 0.05PPM error.  This is acceptable.
 	 *
	 * We happily sacrifice the lowest of the 64 bits of our result
	 * to the goddess of code clarity.
	 *
	 */
	scale = (u_int64_t)1 << 63;
	scale += (th->th_adjustment / 1024) * 2199;
	scale /= th->th_counter->tc_frequency;
	th->th_scale = scale * 2;

	/*
	 * Now that the struct timehands is again consistent, set the new
	 * generation number, making sure to not make it zero.
	 */
	if (++ogen == 0)
		ogen = 1;
	th->th_generation = ogen;

	/* Go live with the new struct timehands. */
	time_second = th->th_microtime.tv_sec;
	time_uptime = th->th_offset.sec;
	timehands = th;
}
/*
 * Software (low priority) clock interrupt.
 * Run periodic events from timeout queue.
 */
void
softclock(void *dummy)
{
	struct callout *c;
	struct callout_tailq *bucket;
	int curticks;
	int steps;	/* #steps since we last allowed interrupts */
	int depth;
	int mpcalls;
	int mtxcalls;
	int gcalls;
#ifdef DIAGNOSTIC
	struct bintime bt1, bt2;
	struct timespec ts2;
	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
	static timeout_t *lastfunc;
#endif

#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */

	mpcalls = 0;
	mtxcalls = 0;
	gcalls = 0;
	depth = 0;
	steps = 0;
	mtx_lock_spin(&callout_lock);
	while (softticks != ticks) {
		softticks++;
		/*
		 * softticks may be modified by hard clock, so cache
		 * it while we work on a given bucket.
		 */
		curticks = softticks;
		bucket = &callwheel[curticks & callwheelmask];
		c = TAILQ_FIRST(bucket);
		while (c) {
			depth++;
			if (c->c_time != curticks) {
				c = TAILQ_NEXT(c, c_links.tqe);
				++steps;
				if (steps >= MAX_SOFTCLOCK_STEPS) {
					nextsoftcheck = c;
					/* Give interrupts a chance. */
					mtx_unlock_spin(&callout_lock);
					;	/* nothing */
					mtx_lock_spin(&callout_lock);
					c = nextsoftcheck;
					steps = 0;
				}
			} else {
				void (*c_func)(void *);
				void *c_arg;
				struct mtx *c_mtx;
				int c_flags;

				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(bucket, c, c_links.tqe);
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_mtx = c->c_mtx;
				c_flags = c->c_flags;
				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
					c->c_func = NULL;
					c->c_flags = CALLOUT_LOCAL_ALLOC;
					SLIST_INSERT_HEAD(&callfree, c,
							  c_links.sle);
					curr_callout = NULL;
				} else {
					c->c_flags =
					    (c->c_flags & ~CALLOUT_PENDING);
					curr_callout = c;
				}
				curr_cancelled = 0;
				mtx_unlock_spin(&callout_lock);
				if (c_mtx != NULL) {
					mtx_lock(c_mtx);
					/*
					 * The callout may have been cancelled
					 * while we switched locks.
					 */
					if (curr_cancelled) {
						mtx_unlock(c_mtx);
						goto skip;
					}
					/* The callout cannot be stopped now. */
					curr_cancelled = 1;

					if (c_mtx == &Giant) {
						gcalls++;
						CTR3(KTR_CALLOUT,
						    "callout %p func %p arg %p",
						    c, c_func, c_arg);
					} else {
						mtxcalls++;
						CTR3(KTR_CALLOUT, "callout mtx"
						    " %p func %p arg %p",
						    c, c_func, c_arg);
					}
				} else {
					mpcalls++;
					CTR3(KTR_CALLOUT,
					    "callout mpsafe %p func %p arg %p",
					    c, c_func, c_arg);
				}
#ifdef DIAGNOSTIC
				binuptime(&bt1);
#endif
#ifdef MAXHE_TODO
				THREAD_NO_SLEEPING();
				c_func(c_arg);
				THREAD_SLEEPING_OK();
#else
				c_func(c_arg);
#endif // MAXHE_TODO
#ifdef DIAGNOSTIC
				binuptime(&bt2);
				bintime_sub(&bt2, &bt1);
				if (bt2.frac > maxdt) {
					if (lastfunc != c_func ||
					    bt2.frac > maxdt * 2) {
						bintime2timespec(&bt2, &ts2);
						printf(
			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
						    c_func, c_arg,
						    (intmax_t)ts2.tv_sec,
						    ts2.tv_nsec);
					}
					maxdt = bt2.frac;
					lastfunc = c_func;
				}
#endif
				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
					mtx_unlock(c_mtx);
			skip:
				mtx_lock_spin(&callout_lock);
				curr_callout = NULL;
				if (callout_wait) {
					/*
					 * There is someone waiting
					 * for the callout to complete.
					 */
					callout_wait = 0;
					mtx_unlock_spin(&callout_lock);
					wakeup(&callout_wait);
					mtx_lock_spin(&callout_lock);
				}
				steps = 0;
				c = nextsoftcheck;
			}
		}
	}
#ifdef MAXHE_TODO
	avg_depth += (depth * 1000 - avg_depth) >> 8;
	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
	avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8;
	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
#endif // MAXHE_TODO
	nextsoftcheck = NULL;
	mtx_unlock_spin(&callout_lock);
}
Exemplo n.º 12
0
static void
softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
    int *lockcalls, int *gcalls)
{
	void (*c_func)(void *);
	void *c_arg;
	int c_flags;
#ifdef DIAGNOSTIC
	struct bintime bt1, bt2;
	struct timespec ts2;
	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
	static timeout_t *lastfunc;
#endif

	KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) ==
	    (CALLOUT_PENDING | CALLOUT_ACTIVE),
	    ("softclock_call_cc: pend|act %p %x", c, c->c_flags));
	c_func = c->c_func;
	c_arg = c->c_arg;
	c_flags = c->c_flags;
	if (c->c_flags & CALLOUT_LOCAL_ALLOC)
		c->c_flags = CALLOUT_LOCAL_ALLOC;
	else
		c->c_flags &= ~CALLOUT_PENDING;
	cc->cc_curr = c;
	cc->cc_cancel = 0;
	CC_UNLOCK(cc);
	{
		(*mpcalls)++;
		CTR3(KTR_CALLOUT, "callout mpsafe %p func %p arg %p",
		    c, c_func, c_arg);
	}
#ifdef DIAGNOSTIC
	binuptime(&bt1);
#endif
	c_func(c_arg);
#ifdef DIAGNOSTIC
	binuptime(&bt2);
	bintime_sub(&bt2, &bt1);
	if (bt2.frac > maxdt) {
		if (lastfunc != c_func || bt2.frac > maxdt * 2) {
			bintime2timespec(&bt2, &ts2);
			printf(
		"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
			    c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
		}
		maxdt = bt2.frac;
		lastfunc = c_func;
	}
#endif
	CTR1(KTR_CALLOUT, "callout %p finished", c);

	CC_LOCK(cc);
	KASSERT(cc->cc_curr == c, ("mishandled cc_curr"));
	cc->cc_curr = NULL;

	/*
	 * If the current callout is locally allocated (from
	 * timeout(9)) then put it on the freelist.
	 *
	 * Note: we need to check the cached copy of c_flags because
	 * if it was not local, then it's not safe to deref the
	 * callout pointer.
	 */
	KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 ||
	    c->c_flags == CALLOUT_LOCAL_ALLOC,
	    ("corrupted callout"));
	if (c_flags & CALLOUT_LOCAL_ALLOC)
		callout_cc_del(c, cc);
}
Exemplo n.º 13
0
/*
 * Software (low priority) clock interrupt.
 * Run periodic events from timeout queue.
 */
void
softclock(void *dummy)
{
	struct callout *c;
	struct callout_tailq *bucket;
	int curticks;
	int steps;	/* #steps since we last allowed interrupts */
	int depth;
	int mpcalls;
	int gcalls;
	int wakeup_cookie;
#ifdef DIAGNOSTIC
	struct bintime bt1, bt2;
	struct timespec ts2;
	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
	static timeout_t *lastfunc;
#endif

#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */

	mpcalls = 0;
	gcalls = 0;
	depth = 0;
	steps = 0;
	mtx_lock_spin(&callout_lock);
	while (softticks != ticks) {
		softticks++;
		/*
		 * softticks may be modified by hard clock, so cache
		 * it while we work on a given bucket.
		 */
		curticks = softticks;
		bucket = &callwheel[curticks & callwheelmask];
		c = TAILQ_FIRST(bucket);
		while (c) {
			depth++;
			if (c->c_time != curticks) {
				c = TAILQ_NEXT(c, c_links.tqe);
				++steps;
				if (steps >= MAX_SOFTCLOCK_STEPS) {
					nextsoftcheck = c;
					/* Give interrupts a chance. */
					mtx_unlock_spin(&callout_lock);
					;	/* nothing */
					mtx_lock_spin(&callout_lock);
					c = nextsoftcheck;
					steps = 0;
				}
			} else {
				void (*c_func)(void *);
				void *c_arg;
				int c_flags;

				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(bucket, c, c_links.tqe);
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_flags = c->c_flags;
				c->c_func = NULL;
				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
					c->c_flags = CALLOUT_LOCAL_ALLOC;
					SLIST_INSERT_HEAD(&callfree, c,
							  c_links.sle);
				} else {
					c->c_flags =
					    (c->c_flags & ~CALLOUT_PENDING);
				}
				curr_callout = c;
				mtx_unlock_spin(&callout_lock);
				if (!(c_flags & CALLOUT_MPSAFE)) {
					mtx_lock(&Giant);
					gcalls++;
					CTR1(KTR_CALLOUT, "callout %p", c_func);
				} else {
					mpcalls++;
					CTR1(KTR_CALLOUT, "callout mpsafe %p",
					    c_func);
				}
#ifdef DIAGNOSTIC
				binuptime(&bt1);
				mtx_lock(&dont_sleep_in_callout);
#endif
				c_func(c_arg);
#ifdef DIAGNOSTIC
				mtx_unlock(&dont_sleep_in_callout);
				binuptime(&bt2);
				bintime_sub(&bt2, &bt1);
				if (bt2.frac > maxdt) {
					if (lastfunc != c_func ||
					    bt2.frac > maxdt * 2) {
					bintime2timespec(&bt2, &ts2);
					printf(
			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
					c_func, c_arg,
						    (intmax_t)ts2.tv_sec,
						    ts2.tv_nsec);
					}
					maxdt = bt2.frac;
					lastfunc = c_func;
				}
#endif
				if (!(c_flags & CALLOUT_MPSAFE))
					mtx_unlock(&Giant);
				mtx_lock_spin(&callout_lock);
				curr_callout = NULL;
				if (wakeup_needed) {
					/*
					 * There might be someone waiting
					 * for the callout to complete.
					 */
					wakeup_cookie = wakeup_ctr;
					mtx_unlock_spin(&callout_lock);
					mtx_lock(&callout_wait_lock);
					cv_broadcast(&callout_wait);
					wakeup_done_ctr = wakeup_cookie;
					mtx_unlock(&callout_wait_lock);
					mtx_lock_spin(&callout_lock);
					wakeup_needed = 0;
				}
				steps = 0;
				c = nextsoftcheck;
			}
		}
	}
	avg_depth += (depth * 1000 - avg_depth) >> 8;
	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
	nextsoftcheck = NULL;
	mtx_unlock_spin(&callout_lock);
}
Exemplo n.º 14
0
/*
 * Software (low priority) clock interrupt.
 * Run periodic events from timeout queue.
 */
void
softclock(void *arg)
{
	struct callout_cpu *cc;
	struct callout *c;
	struct callout_tailq *bucket;
	int curticks;
	int steps;	/* #steps since we last allowed interrupts */
	int depth;
	int mpcalls;
	int lockcalls;
	int gcalls;
#ifdef DIAGNOSTIC
	struct bintime bt1, bt2;
	struct timespec ts2;
	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
	static timeout_t *lastfunc;
#endif

#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */

	mpcalls = 0;
	lockcalls = 0;
	gcalls = 0;
	depth = 0;
	steps = 0;
	cc = (struct callout_cpu *)arg;
	CC_LOCK(cc);
	while (cc->cc_softticks != ticks) {
		/*
		 * cc_softticks may be modified by hard clock, so cache
		 * it while we work on a given bucket.
		 */
		curticks = cc->cc_softticks;
		cc->cc_softticks++;
		bucket = &cc->cc_callwheel[curticks & callwheelmask];
		c = TAILQ_FIRST(bucket);
		while (c) {
			depth++;
			if (c->c_time != curticks) {
				c = TAILQ_NEXT(c, c_links.tqe);
				++steps;
				if (steps >= MAX_SOFTCLOCK_STEPS) {
					cc->cc_next = c;
					/* Give interrupts a chance. */
					CC_UNLOCK(cc);
					;	/* nothing */
					CC_LOCK(cc);
					c = cc->cc_next;
					steps = 0;
				}
			} else {
				void (*c_func)(void *);
				void *c_arg;
				struct lock_class *class;
				struct lock_object *c_lock;
				int c_flags, sharedlock;

				cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(bucket, c, c_links.tqe);
				class = (c->c_lock != NULL) ?
				    LOCK_CLASS(c->c_lock) : NULL;
				sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ?
				    0 : 1;
				c_lock = c->c_lock;
				c_func = c->c_func;
				c_arg = c->c_arg;
				c_flags = c->c_flags;
				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
					c->c_flags = CALLOUT_LOCAL_ALLOC;
				} else {
					c->c_flags =
					    (c->c_flags & ~CALLOUT_PENDING);
				}
				cc->cc_curr = c;
				cc->cc_cancel = 0;
				CC_UNLOCK(cc);
				if (c_lock != NULL) {
					class->lc_lock(c_lock, sharedlock);
					/*
					 * The callout may have been cancelled
					 * while we switched locks.
					 */
					if (cc->cc_cancel) {
						class->lc_unlock(c_lock);
						goto skip;
					}
					/* The callout cannot be stopped now. */
					cc->cc_cancel = 1;

					if (c_lock == &Giant.lock_object) {
						gcalls++;
						CTR3(KTR_CALLOUT,
						    "callout %p func %p arg %p",
						    c, c_func, c_arg);
					} else {
						lockcalls++;
						CTR3(KTR_CALLOUT, "callout lock"
						    " %p func %p arg %p",
						    c, c_func, c_arg);
					}
				} else {
					mpcalls++;
					CTR3(KTR_CALLOUT,
					    "callout mpsafe %p func %p arg %p",
					    c, c_func, c_arg);
				}
#ifdef DIAGNOSTIC
				binuptime(&bt1);
#endif
				THREAD_NO_SLEEPING();
				SDT_PROBE(callout_execute, kernel, ,
				    callout_start, c, 0, 0, 0, 0);
				c_func(c_arg);
				SDT_PROBE(callout_execute, kernel, ,
				    callout_end, c, 0, 0, 0, 0);
				THREAD_SLEEPING_OK();
#ifdef DIAGNOSTIC
				binuptime(&bt2);
				bintime_sub(&bt2, &bt1);
				if (bt2.frac > maxdt) {
					if (lastfunc != c_func ||
					    bt2.frac > maxdt * 2) {
						bintime2timespec(&bt2, &ts2);
						printf(
			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
						    c_func, c_arg,
						    (intmax_t)ts2.tv_sec,
						    ts2.tv_nsec);
					}
					maxdt = bt2.frac;
					lastfunc = c_func;
				}
#endif
				CTR1(KTR_CALLOUT, "callout %p finished", c);
				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
					class->lc_unlock(c_lock);
			skip:
				CC_LOCK(cc);
				/*
				 * If the current callout is locally
				 * allocated (from timeout(9))
				 * then put it on the freelist.
				 *
				 * Note: we need to check the cached
				 * copy of c_flags because if it was not
				 * local, then it's not safe to deref the
				 * callout pointer.
				 */
				if (c_flags & CALLOUT_LOCAL_ALLOC) {
					KASSERT(c->c_flags ==
					    CALLOUT_LOCAL_ALLOC,
					    ("corrupted callout"));
					c->c_func = NULL;
					SLIST_INSERT_HEAD(&cc->cc_callfree, c,
					    c_links.sle);
				}
				cc->cc_curr = NULL;
				if (cc->cc_waiting) {
					/*
					 * There is someone waiting
					 * for the callout to complete.
					 */
					cc->cc_waiting = 0;
					CC_UNLOCK(cc);
					wakeup(&cc->cc_waiting);
					CC_LOCK(cc);
				}
				steps = 0;
				c = cc->cc_next;
			}
		}
	}
Exemplo n.º 15
0
void
pps_event(struct pps_state *pps, int event)
{
	struct bintime bt;
	struct timespec ts, *tsp, *osp;
	u_int tcount, *pcount;
	int foff, fhard;
	pps_seq_t *pseq;

	KASSERT(pps != NULL, ("NULL pps pointer in pps_event"));
	/* If the timecounter was wound up underneath us, bail out. */
	if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation)
		return;

	/* Things would be easier with arrays. */
	if (event == PPS_CAPTUREASSERT) {
		tsp = &pps->ppsinfo.assert_timestamp;
		osp = &pps->ppsparam.assert_offset;
		foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
		fhard = pps->kcmode & PPS_CAPTUREASSERT;
		pcount = &pps->ppscount[0];
		pseq = &pps->ppsinfo.assert_sequence;
	} else {
		tsp = &pps->ppsinfo.clear_timestamp;
		osp = &pps->ppsparam.clear_offset;
		foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
		fhard = pps->kcmode & PPS_CAPTURECLEAR;
		pcount = &pps->ppscount[1];
		pseq = &pps->ppsinfo.clear_sequence;
	}

	/*
	 * If the timecounter changed, we cannot compare the count values, so
	 * we have to drop the rest of the PPS-stuff until the next event.
	 */
	if (pps->ppstc != pps->capth->th_counter) {
		pps->ppstc = pps->capth->th_counter;
		*pcount = pps->capcount;
		pps->ppscount[2] = pps->capcount;
		return;
	}

	/* Convert the count to a timespec. */
	tcount = pps->capcount - pps->capth->th_offset_count;
	tcount &= pps->capth->th_counter->tc_counter_mask;
	bt = pps->capth->th_offset;
	bintime_addx(&bt, pps->capth->th_scale * tcount);
	bintime_add(&bt, &boottimebin);
	bintime2timespec(&bt, &ts);

	/* If the timecounter was wound up underneath us, bail out. */
	if (pps->capgen != pps->capth->th_generation)
		return;

	*pcount = pps->capcount;
	(*pseq)++;
	*tsp = ts;

	if (foff) {
		timespecadd(tsp, osp);
		if (tsp->tv_nsec < 0) {
			tsp->tv_nsec += 1000000000;
			tsp->tv_sec -= 1;
		}
	}
#ifdef PPS_SYNC
	if (fhard) {
		uint64_t scale;

		/*
		 * Feed the NTP PLL/FLL.
		 * The FLL wants to know how many (hardware) nanoseconds
		 * elapsed since the previous event.
		 */
		tcount = pps->capcount - pps->ppscount[2];
		pps->ppscount[2] = pps->capcount;
		tcount &= pps->capth->th_counter->tc_counter_mask;
		scale = (uint64_t)1 << 63;
		scale /= pps->capth->th_counter->tc_frequency;
		scale *= 2;
		bt.sec = 0;
		bt.frac = 0;
		bintime_addx(&bt, scale * tcount);
		bintime2timespec(&bt, &ts);
		hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
	}
#endif
}