Esempio n. 1
0
/*
 * cleanup_after_child() runs in parent.
 */
static void
cleanup_after_child(
	blocking_child *	c
	)
{
	u_int	idx;

	DEBUG_INSIST(!c->reusable);
	free(c->thread_ref);
	c->thread_ref = NULL;
	c->thread_id = 0;
#ifdef USE_WORK_PIPE
	DEBUG_INSIST(-1 != c->resp_read_pipe);
	DEBUG_INSIST(-1 != c->resp_write_pipe);
	(*addremove_io_fd)(c->resp_read_pipe, c->ispipe, true);
	close(c->resp_write_pipe);
	close(c->resp_read_pipe);
	c->resp_write_pipe = -1;
	c->resp_read_pipe = -1;
#else
	DEBUG_INSIST(NULL != c->blocking_response_ready);
	(*addremove_io_semaphore)(c->blocking_response_ready, true);
#endif
	for (idx = 0; idx < c->workitems_alloc; idx++)
		c->workitems[idx] = NULL;
	c->next_workitem = 0;
	c->next_workeritem = 0;
	for (idx = 0; idx < c->responses_alloc; idx++)
		c->responses[idx] = NULL;
	c->next_response = 0;
	c->next_workresp = 0;
	c->reusable = true;
}
Esempio n. 2
0
/*
 * worker_resp_cb() is invoked when resp_read_pipe is readable.
 */
void
worker_resp_cb(
	evutil_socket_t	fd,
	short		what,
	void *		ctx	/* blocking_child * */
	)
{
	blocking_child *	c;

	DEBUG_INSIST(EV_READ & what);
	c = ctx;
	DEBUG_INSIST(fd == c->resp_read_pipe);
	process_blocking_resp(c);
}
Esempio n. 3
0
/* --------------------------------------------------------------------
 * Create a worker thread. There are several differences between POSIX
 * and Windows, of course -- most notably the Windows thread is no
 * detached thread, and we keep the handle around until we want to get
 * rid of the thread. The notification scheme also differs: Windows
 * makes use of semaphores in both directions, POSIX uses a pipe for
 * integration with 'select()' or alike.
 */
static void
start_blocking_thread_internal(
	blocking_child *	c
	)
#ifdef SYS_WINNT
{
	BOOL	resumed;

	c->thread_ref = NULL;
	(*addremove_io_semaphore)(c->responses_pending->shnd, FALSE);
	c->thr_table[0].thnd =
		(HANDLE)_beginthreadex(
			NULL,
			0,
			&blocking_thread,
			c,
			CREATE_SUSPENDED,
			NULL);

	if (NULL == c->thr_table[0].thnd) {
		msyslog(LOG_ERR, "start blocking thread failed: %m");
		exit(-1);
	}
	/* remember the thread priority is only within the process class */
	if (!SetThreadPriority(c->thr_table[0].thnd,
			       THREAD_PRIORITY_BELOW_NORMAL))
		msyslog(LOG_ERR, "Error lowering blocking thread priority: %m");

	resumed = ResumeThread(c->thr_table[0].thnd);
	DEBUG_INSIST(resumed);
	c->thread_ref = &c->thr_table[0];
}
Esempio n. 4
0
/*
 * cleanup_after_child() runs in parent.
 */
static void
cleanup_after_child(
	blocking_child *	c
	)
{
	harvest_child_status(c);
	if (-1 != c->resp_read_pipe) {
		(*addremove_io_fd)(c->resp_read_pipe, c->ispipe, TRUE);
		close(c->resp_read_pipe);
		c->resp_read_pipe = -1;
	}
	c->resp_read_ctx = NULL;
	DEBUG_INSIST(-1 == c->req_read_pipe);
	DEBUG_INSIST(-1 == c->resp_write_pipe);
	c->reusable = TRUE;
}
Esempio n. 5
0
/*
 * general fractional timestamp formatting
 *
 * Many pieces of ntpd require a machine with two's complement
 * representation of signed integers, so we don't go through the whole
 * rigamarole of creating fully portable code here. But we have to stay
 * away from signed integer overflow, as this might cause trouble even
 * with two's complement representation.
 */
const char *
format_time_fraction(
	time_t	secs,
	long	frac,
	int	prec
	)
{
	char *		cp;
	u_int		prec_u;
	u_time		secs_u;
	u_int		u;
	long		fraclimit;
	int		notneg;	/* flag for non-negative value	*/
	ldiv_t		qr;

	DEBUG_REQUIRE(prec != 0);

	LIB_GETBUF(cp);
	secs_u = (u_time)secs;
	
	/* check if we need signed or unsigned mode */
	notneg = (prec < 0);
	prec_u = abs(prec);
	/* fraclimit = (long)pow(10, prec_u); */
	for (fraclimit = 10, u = 1; u < prec_u; u++) {
		DEBUG_INSIST(fraclimit < fraclimit * 10);
		fraclimit *= 10;
	}

	/*
	 * Since conversion to string uses lots of divisions anyway,
	 * there's no big extra penalty for normalisation. We do it for
	 * consistency.
	 */
	if (frac < 0 || frac >= fraclimit) {
		qr = ldiv(frac, fraclimit);
		if (qr.rem < 0) {
			qr.quot--;
			qr.rem += fraclimit;
		}
		secs_u += (time_t)qr.quot;
		frac = qr.rem;
	}

	/* Get the absolute value of the split representation time. */
	notneg = notneg || ((time_t)secs_u >= 0);
	if (!notneg) {
		secs_u = ~secs_u;
		if (0 == frac)
			secs_u++;
		else
			frac = fraclimit - frac;
	}

	/* finally format the data and return the result */
	snprintf(cp, LIB_BUFLENGTH, "%s%" UTIME_FORMAT ".%0*ld",
	    notneg? "" : "-", secs_u, prec_u, frac);
	
	return cp;
}
Esempio n. 6
0
static void
start_blocking_thread_internal(
	blocking_child *	c
	)
#ifdef SYS_WINNT
{
	thr_ref	blocking_child_thread;
	u_int	blocking_thread_id;
	BOOL	resumed;

	(*addremove_io_semaphore)(c->blocking_response_ready, FALSE);
	blocking_child_thread =
		(HANDLE)_beginthreadex(
			NULL,
			0,
			&blocking_thread,
			c,
			CREATE_SUSPENDED,
			&blocking_thread_id);

	if (NULL == blocking_child_thread) {
		msyslog(LOG_ERR, "start blocking thread failed: %m");
		exit(-1);
	}
	c->thread_id = blocking_thread_id;
	c->thread_ref = blocking_child_thread;
	/* remember the thread priority is only within the process class */
	if (!SetThreadPriority(blocking_child_thread,
			       THREAD_PRIORITY_BELOW_NORMAL))
		msyslog(LOG_ERR, "Error lowering blocking thread priority: %m");

	resumed = ResumeThread(blocking_child_thread);
	DEBUG_INSIST(resumed);
}
Esempio n. 7
0
/*
 * mon_reclaim_entry - Remove an entry from the MRU list and from the
 *		       hash array, then zero-initialize it.  Indirectly
 *		       decrements mru_entries.

 * The entry is prepared to be reused.  Before return, in
 * remove_from_hash(), mru_entries is decremented.  It is the caller's
 * responsibility to increment it again.
 */
static inline void
mon_reclaim_entry(
    mon_entry *m
)
{
    DEBUG_INSIST(NULL != m);

    UNLINK_DLIST(m, mru);
    remove_from_hash(m);
    ZERO(*m);
}
Esempio n. 8
0
/* --------------------------------------------------------------------
 * Light up a new worker.
 */
static void
start_blocking_thread(
	blocking_child *	c
	)
{

	DEBUG_INSIST(!c->reusable);

	prepare_child_sems(c);
	start_blocking_thread_internal(c);
}
Esempio n. 9
0
int
send_blocking_req_internal(
	blocking_child *	c,
	blocking_pipe_header *	hdr,
	void *			data
	)
{
	int octets;
	int rc;

	DEBUG_REQUIRE(hdr != NULL);
	DEBUG_REQUIRE(data != NULL);
	DEBUG_REQUIRE(BLOCKING_REQ_MAGIC == hdr->magic_sig);

	if (-1 == c->req_write_pipe) {
		fork_blocking_child(c);
		DEBUG_INSIST(-1 != c->req_write_pipe);
	}

	octets = sizeof(*hdr);
	rc = write(c->req_write_pipe, hdr, octets);

	if (rc == octets) {
		octets = hdr->octets - sizeof(*hdr);
		rc = write(c->req_write_pipe, data, octets);

		if (rc == octets)
			return 0;
	}

	if (rc < 0)
		msyslog(LOG_ERR,
			"send_blocking_req_internal: pipe write: %m");
	else
		msyslog(LOG_ERR,
			"send_blocking_req_internal: short write %d of %d",
			rc, octets);

	/* Fatal error.  Clean up the child process.  */
	req_child_exit(c);
	exit(1);	/* otherwise would be return -1 */
}
Esempio n. 10
0
/*
** xmt_timer_cb
*/
void
xmt_timer_cb(
	evutil_socket_t	fd,
	short		what,
	void *		ctx
	)
{
	struct timeval	start_cb;
	struct timeval	delay;
	xmt_ctx *	x;

	UNUSED_ARG(fd);
	UNUSED_ARG(ctx);
	DEBUG_INSIST(EV_TIMEOUT == what);

	if (NULL == xmt_q || shutting_down)
		return;
	gettimeofday_cached(base, &start_cb);
	if (xmt_q->sched <= start_cb.tv_sec) {
		UNLINK_HEAD_SLIST(x, xmt_q, link);
		TRACE(2, ("xmt_timer_cb: at .%6.6u -> %s\n",
			  (u_int)start_cb.tv_usec, stoa(&x->spkt->addr)));
		xmt(x);
		free(x);
		if (NULL == xmt_q)
			return;
	}
	if (xmt_q->sched <= start_cb.tv_sec) {
		event_add(ev_xmt_timer, &gap);
		TRACE(2, ("xmt_timer_cb: at .%6.6u gap %6.6u\n",
			  (u_int)start_cb.tv_usec,
			  (u_int)gap.tv_usec));
	} else {
		delay.tv_sec = xmt_q->sched - start_cb.tv_sec;
		delay.tv_usec = 0;
		event_add(ev_xmt_timer, &delay);
		TRACE(2, ("xmt_timer_cb: at .%6.6u next %ld seconds\n",
			  (u_int)start_cb.tv_usec,
			  (long)delay.tv_sec));
	}
}
Esempio n. 11
0
/*
 * intres_timeout_req(s) is invoked in the parent to schedule an idle
 * timeout to fire in s seconds, if not reset earlier by a call to
 * intres_timeout_req(0), which clears any pending timeout.  When the
 * timeout expires, worker_idle_timer_fired() is invoked (again, in the
 * parent).
 *
 * sntp and ntpd each provide implementations adapted to their timers.
 */
void
intres_timeout_req(
	u_int	seconds		/* 0 cancels */
	)
{
	struct timeval	tv_to;

	if (NULL == ev_worker_timeout) {
		ev_worker_timeout = event_new(base, -1,
					      EV_TIMEOUT | EV_PERSIST,
					      &worker_timeout, NULL);
		DEBUG_INSIST(NULL != ev_worker_timeout);
	} else {
		event_del(ev_worker_timeout);
	}
	if (0 == seconds)
		return;
	tv_to.tv_sec = seconds;
	tv_to.tv_usec = 0;
	event_add(ev_worker_timeout, &tv_to);
}
Esempio n. 12
0
static void
fork_blocking_child(
	blocking_child *	c
	)
{
	static int	atexit_installed;
	static int	blocking_pipes[4] = { -1, -1, -1, -1 };
	int		rc;
	int		was_pipe;
	int		is_pipe;
	int		saved_errno = 0;
	int		childpid;
	int		keep_fd;
	int		fd;

	/*
	 * parent and child communicate via a pair of pipes.
	 * 
	 * 0 child read request
	 * 1 parent write request
	 * 2 parent read response
	 * 3 child write response
	 */
	if (-1 == c->req_write_pipe) {
		rc = pipe_socketpair(&blocking_pipes[0], &was_pipe);
		if (0 != rc) {
			saved_errno = errno;
		} else {
			rc = pipe_socketpair(&blocking_pipes[2], &is_pipe);
			if (0 != rc) {
				saved_errno = errno;
				close(blocking_pipes[0]);
				close(blocking_pipes[1]);
			} else {
				INSIST(was_pipe == is_pipe);
			}
		}
		if (0 != rc) {
			errno = saved_errno;
			msyslog(LOG_ERR, "unable to create worker pipes: %m");
			exit(1);
		}

		/*
		 * Move the descriptors the parent will keep open out of the
		 * low descriptors preferred by C runtime buffered FILE *.
		 */
		c->req_write_pipe = move_fd(blocking_pipes[1]);
		c->resp_read_pipe = move_fd(blocking_pipes[2]);
		/*
		 * wake any worker child on orderly shutdown of the
		 * daemon so that it can notice the broken pipes and
		 * go away promptly.
		 */
		if (!atexit_installed) {
			atexit(&send_worker_home_atexit);
			atexit_installed = TRUE;
		}
	}

#ifdef HAVE_DROPROOT
	/* defer the fork until after root is dropped */
	if (droproot && !root_dropped)
		return;
#endif
	if (syslog_file != NULL)
		fflush(syslog_file);
	fflush(stdout);
	fflush(stderr);

	signal_no_reset(SIGCHLD, SIG_IGN);

	childpid = fork();
	if (-1 == childpid) {
		msyslog(LOG_ERR, "unable to fork worker: %m");
		exit(1);
	}

	if (childpid) {
		/* this is the parent */
		TRACE(1, ("forked worker child (pid %d)\n", childpid));
		c->pid = childpid;
		c->ispipe = is_pipe;

		/* close the child's pipe descriptors. */
		close(blocking_pipes[0]);
		close(blocking_pipes[3]);

		memset(blocking_pipes, -1, sizeof(blocking_pipes));

		/* wire into I/O loop */
		(*addremove_io_fd)(c->resp_read_pipe, is_pipe, FALSE);

		return;		/* parent returns */
	}

	/*
	 * The parent gets the child pid as the return value of fork().
	 * The child must work for it.
	 */
	c->pid = getpid();
	worker_process = TRUE;

	/*
	 * In the child, close all files except stdin, stdout, stderr,
	 * and the two child ends of the pipes.
	 */
	DEBUG_INSIST(-1 == c->req_read_pipe);
	DEBUG_INSIST(-1 == c->resp_write_pipe);
	c->req_read_pipe = blocking_pipes[0];
	c->resp_write_pipe = blocking_pipes[3];

	kill_asyncio(0);
	closelog();
	if (syslog_file != NULL) {
		fclose(syslog_file);
		syslog_file = NULL;
		syslogit = TRUE;
	}
	keep_fd = max(c->req_read_pipe, c->resp_write_pipe);
	for (fd = 3; fd < keep_fd; fd++)
		if (fd != c->req_read_pipe && 
		    fd != c->resp_write_pipe)
			close(fd);
	close_all_beyond(keep_fd);
	/*
	 * We get signals from refclock serial I/O on NetBSD in the
	 * worker if we do not reset SIGIO's handler to the default.
	 * It is not conditionalized for NetBSD alone because on
	 * systems where it is not needed, it is harmless, and that
	 * allows us to handle unknown others with NetBSD behavior.
	 * [Bug 1386]
	 */
#if defined(USE_SIGIO)
	signal_no_reset(SIGIO, SIG_DFL);
#elif defined(USE_SIGPOLL)
	signal_no_reset(SIGPOLL, SIG_DFL);
#endif
	signal_no_reset(SIGHUP, worker_sighup);
	init_logging("ntp_intres", 0, FALSE);
	setup_logfile(NULL);

	/*
	 * And now back to the portable code
	 */
	exit_worker(blocking_child_common(c));
}
Esempio n. 13
0
/*
 * adj_host_clock - Called once every second to update the local clock.
 *
 * LOCKCLOCK: The only thing this routine does is increment the
 * sys_rootdisp variable.
 */
void
adj_host_clock(
	void
	)
{
	double	offset_adj;
	double	freq_adj;

	/*
	 * Update the dispersion since the last update. In contrast to
	 * NTPv3, NTPv4 does not declare unsynchronized after one day,
	 * since the dispersion check serves this function. Also,
	 * since the poll interval can exceed one day, the old test
	 * would be counterproductive. During the startup clamp period, the
	 * time constant is clamped at 2.
	 */
	sys_rootdisp += clock_phi;
#ifndef LOCKCLOCK
	if (!ntp_enable || mode_ntpdate)
		return;
	/*
	 * Determine the phase adjustment. The gain factor (denominator)
	 * increases with poll interval, so is dominated by the FLL
	 * above the Allan intercept. Note the reduced time constant at
	 * startup.
	 */
	if (state != EVNT_SYNC) {
		offset_adj = 0.;
	} else if (freq_cnt > 0) {
		offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(1));
		freq_cnt--;
#ifdef KERNEL_PLL
	} else if (pll_control && kern_enable) {
		offset_adj = 0.;
#endif /* KERNEL_PLL */
	} else {
		offset_adj = clock_offset / (CLOCK_PLL * ULOGTOD(sys_poll));
	}

	/*
	 * If the kernel discipline is enabled the frequency correction
	 * drift_comp has already been engaged via ntp_adjtime() in
	 * set_freq().  Otherwise it is a component of the adj_systime()
	 * offset.
	 */
#ifdef KERNEL_PLL
	if (pll_control && kern_enable)
		freq_adj = 0.;
	else
#endif /* KERNEL_PLL */
		freq_adj = drift_comp;

	/* Bound absolute value of total adjustment to NTP_MAXFREQ. */
	if (offset_adj + freq_adj > NTP_MAXFREQ)
		offset_adj = NTP_MAXFREQ - freq_adj;
	else if (offset_adj + freq_adj < -NTP_MAXFREQ)
		offset_adj = -NTP_MAXFREQ - freq_adj;

	clock_offset -= offset_adj;
	/*
	 * Windows port adj_systime() must be called each second,
	 * even if the argument is zero, to ease emulation of
	 * adjtime() using Windows' slew API which controls the rate
	 * but does not automatically stop slewing when an offset
	 * has decayed to zero.
	 */
	DEBUG_INSIST(enable_panic_check == TRUE);
	enable_panic_check = FALSE;
	adj_systime(offset_adj + freq_adj);
	enable_panic_check = TRUE;
#endif /* LOCKCLOCK */
}