/* * init_restrict - initialize the restriction data structures */ void init_restrict(void) { /* * The restriction lists begin with a default entry with address * and mask 0, which will match any entry. The lists are kept * sorted by descending address followed by descending mask: * * address mask * 192.168.0.0 255.255.255.0 kod limited noquery nopeer * 192.168.0.0 255.255.0.0 kod limited * 0.0.0.0 0.0.0.0 kod limited noquery * * The first entry which matches an address is used. With the * example restrictions above, 192.168.0.0/24 matches the first * entry, the rest of 192.168.0.0/16 matches the second, and * everything else matches the third (default). * * Note this achieves the same result a little more efficiently * than the documented behavior, which is to keep the lists * sorted by ascending address followed by ascending mask, with * the _last_ matching entry used. * * An additional wrinkle is we may have multiple entries with * the same address and mask but differing match flags (mflags). * At present there is only one, RESM_NTPONLY. Entries with * RESM_NTPONLY are sorted earlier so they take precedence over * any otherwise similar entry without. Again, this is the same * behavior as but reversed implementation compared to the docs. * */ LINK_SLIST(restrictlist4, &restrict_def4, link); LINK_SLIST(restrictlist6, &restrict_def6, link); restrictcount = 2; }
/* * auth_moremem - get some more free key structures */ void auth_moremem( int keycount ) { symkey * sk; int i; #ifdef DEBUG void * base; symkey_alloc * allocrec; # define MOREMEM_EXTRA_ALLOC (sizeof(*allocrec)) #else # define MOREMEM_EXTRA_ALLOC (0) #endif i = (keycount > 0) ? keycount : MEMINC; sk = emalloc_zero(i * sizeof(*sk) + MOREMEM_EXTRA_ALLOC); #ifdef DEBUG base = sk; #endif authnumfreekeys += i; for (; i > 0; i--, sk++) { LINK_SLIST(authfreekeys, sk, llink.f); } #ifdef DEBUG allocrec = (void *)sk; allocrec->mem = base; LINK_SLIST(authallocs, allocrec, link); #endif }
static void create_buffers(int nbufs) { register recvbuf_t *bufp; int i, abuf; abuf = nbufs + buffer_shortfall; buffer_shortfall = 0; #ifndef DEBUG bufp = emalloc_zero(abuf * sizeof(*bufp)); #endif for (i = 0; i < abuf; i++) { #ifdef DEBUG /* * Allocate each buffer individually so they can be * free()d during ntpd shutdown on DEBUG builds to * keep them out of heap leak reports. */ bufp = emalloc_zero(sizeof(*bufp)); #endif LINK_SLIST(free_recv_list, bufp, link); bufp++; free_recvbufs++; total_recvbufs++; } lowater_adds++; }
static void free_res( restrict_u * res, int v6 ) { restrict_u ** plisthead; restrict_u * unlinked; restrictcount--; if (RES_LIMITED & res->flags) dec_res_limited(); if (v6) plisthead = &restrictlist6; else plisthead = &restrictlist4; UNLINK_SLIST(unlinked, *plisthead, res, link, restrict_u); INSIST(unlinked == res); if (v6) { zero_mem(res, V6_SIZEOF_RESTRICT_U); plisthead = &resfree6; } else { zero_mem(res, V4_SIZEOF_RESTRICT_U); plisthead = &resfree4; } LINK_SLIST(*plisthead, res, link); }
/* * allocsymkey - common code to allocate and link in symkey * * secret must be allocated with a free-compatible allocator. It is * owned by the referring symkey structure, and will be free()d by * freesymkey(). */ static void allocsymkey( symkey ** bucket, keyid_t id, u_short flags, u_short type, u_long lifetime, u_short secretsize, uint8_t * secret ) { symkey * sk; if (authnumfreekeys < 1) auth_moremem(-1); UNLINK_HEAD_SLIST(sk, authfreekeys, llink.f); DEBUG_ENSURE(sk != NULL); sk->keyid = id; sk->flags = flags; sk->type = type; sk->secretsize = secretsize; sk->secret = secret; sk->lifetime = lifetime; LINK_SLIST(*bucket, sk, hlink); LINK_TAIL_DLIST(key_listhead, sk, llink); authnumfreekeys--; authnumkeys++; }
/* * auth_resize_hashtable * * Size hash table to average 4 or fewer entries per bucket initially, * within the bounds of at least 4 and no more than 15 bits for the hash * table index. Populate the hash table. */ static void auth_resize_hashtable(void) { u_long totalkeys; u_short hashbits; u_short hash; size_t newalloc; symkey * sk; totalkeys = authnumkeys + authnumfreekeys; hashbits = auth_log2(totalkeys / 4.0) + 1; hashbits = max(4, hashbits); hashbits = min(15, hashbits); authhashbuckets = 1 << hashbits; authhashmask = authhashbuckets - 1; newalloc = authhashbuckets * sizeof(key_hash[0]); key_hash = erealloc(key_hash, newalloc); memset(key_hash, '\0', newalloc); ITER_DLIST_BEGIN(key_listhead, sk, llink, symkey) hash = KEYHASH(sk->keyid); LINK_SLIST(key_hash[hash], sk, hlink); ITER_DLIST_END() }
static inline void mon_free_entry( mon_entry *m ) { ZERO(*m); LINK_SLIST(mon_free, m, hash_next); }
/* * free_peer - internal routine to free memory referred to by a struct * peer and return it to the peer free list. If unlink is * nonzero, unlink from the various lists. */ static void free_peer( struct peer * p, int unlink_peer ) { struct peer * unlinked; int hash; if (unlink_peer) { hash = NTP_HASH_ADDR(&p->srcadr); peer_hash_count[hash]--; UNLINK_SLIST(unlinked, peer_hash[hash], p, adr_link, struct peer); if (NULL == unlinked) { peer_hash_count[hash]++; msyslog(LOG_ERR, "peer %s not in address table!", stoa(&p->srcadr)); } /* * Remove him from the association hash as well. */ hash = p->associd & NTP_HASH_MASK; assoc_hash_count[hash]--; UNLINK_SLIST(unlinked, assoc_hash[hash], p, aid_link, struct peer); if (NULL == unlinked) { assoc_hash_count[hash]++; msyslog(LOG_ERR, "peer %s not in association ID table!", stoa(&p->srcadr)); } /* Remove him from the overall list. */ UNLINK_SLIST(unlinked, peer_list, p, p_link, struct peer); if (NULL == unlinked) msyslog(LOG_ERR, "%s not in peer list!", stoa(&p->srcadr)); } if (p->hostname != NULL) free(p->hostname); if (p->ident != NULL) free(p->ident); if (p->addrs != NULL) free(p->addrs); /* from copy_addrinfo_list() */ /* Add his corporeal form to peer free list */ ZERO(*p); LINK_SLIST(peer_free, p, p_link); peer_free_count++; }
/* * freerecvbuf - make a single recvbuf available for reuse */ void freerecvbuf(recvbuf_t *rb) { if (rb) { LOCK(); rb->used--; if (rb->used != 0) msyslog(LOG_ERR, "******** freerecvbuff non-zero usage: %d *******", rb->used); LINK_SLIST(free_recv_list, rb, link); free_recvbufs++; UNLOCK(); } }
/* * getmorepeermem - add more peer structures to the free list */ static void getmorepeermem(void) { int i; struct peer *peers; peers = emalloc_zero(INC_PEER_ALLOC * sizeof(*peers)); for (i = INC_PEER_ALLOC - 1; i >= 0; i--) LINK_SLIST(peer_free, &peers[i], p_link); total_peer_structs += INC_PEER_ALLOC; peer_free_count += INC_PEER_ALLOC; }
/* * freerecvbuf - make a single recvbuf available for reuse */ void freerecvbuf(recvbuf_t *rb) { if (rb == NULL) { msyslog(LOG_ERR, "freerecvbuff received NULL buffer"); return; } rb->used--; if (rb->used != 0) msyslog(LOG_ERR, "******** freerecvbuff non-zero usage: %d *******", rb->used); LINK_SLIST(free_recv_list, rb, link); free_recvbufs++; }
/* * init_peer - initialize peer data structures and counters * * N.B. We use the random number routine in here. It had better be * initialized prior to getting here. */ void init_peer(void) { int i; /* * Initialize peer free list from static allocation. */ for (i = COUNTOF(init_peer_alloc) - 1; i >= 0; i--) LINK_SLIST(peer_free, &init_peer_alloc[i], p_link); total_peer_structs = COUNTOF(init_peer_alloc); peer_free_count = COUNTOF(init_peer_alloc); /* * Initialize our first association ID */ do current_association_ID = ntp_random() & ASSOCID_MAX; while (!current_association_ID); }
/* * freesymkey - common code to remove a symkey and recycle its entry. */ static void freesymkey( symkey * sk, symkey ** bucket ) { symkey * unlinked; if (sk->secret != NULL) { memset(sk->secret, '\0', sk->secretsize); free(sk->secret); } UNLINK_SLIST(unlinked, *bucket, sk, hlink, symkey); DEBUG_ENSURE(sk == unlinked); UNLINK_DLIST(sk, llink); memset((char *)sk + offsetof(symkey, symkey_payload), '\0', sizeof(*sk) - offsetof(symkey, symkey_payload)); LINK_SLIST(authfreekeys, sk, llink.f); authnumkeys--; authnumfreekeys++; }
static restrict_u * alloc_res6(void) { const size_t cb = V6_SIZEOF_RESTRICT_U; const size_t count = INC_RESLIST6; restrict_u * rl; restrict_u * res; int i; UNLINK_HEAD_SLIST(res, resfree6, link); if (res != NULL) return res; rl = emalloc_zero(count * cb); /* link all but the first onto free list */ res = (void *)((char *)rl + (count - 1) * cb); for (i = count - 1; i > 0; i--) { LINK_SLIST(resfree6, res, link); res = (void *)((char *)res - cb); } INSIST(rl == res); /* allocate the first */ return res; }
/* * ntp_monitor - record stats about this packet * * Returns supplied restriction flags, with RES_LIMITED and RES_KOD * cleared unless the packet should not be responded to normally * (RES_LIMITED) and possibly should trigger a KoD response (RES_KOD). * The returned flags are saved in the MRU entry, so that it reflects * whether the last packet from that source triggered rate limiting, * and if so, possible KoD response. This implies you can not tell * whether a given address is eligible for rate limiting/KoD from the * monlist restrict bits, only whether or not the last packet triggered * such responses. ntpdc -c reslist lets you see whether RES_LIMITED * or RES_KOD is lit for a particular address before ntp_monitor()'s * typical dousing. */ u_short ntp_monitor( struct recvbuf *rbufp, u_short flags ) { l_fp interval_fp; struct pkt * pkt; mon_entry * mon; mon_entry * oldest; int oldest_age; u_int hash; u_short restrict_mask; u_char mode; u_char version; int interval; int head; /* headway increment */ int leak; /* new headway */ int limit; /* average threshold */ REQUIRE(rbufp != NULL); if (mon_enabled == MON_OFF) return ~(RES_LIMITED | RES_KOD) & flags; pkt = &rbufp->recv_pkt; hash = MON_HASH(&rbufp->recv_srcadr); mode = PKT_MODE(pkt->li_vn_mode); version = PKT_VERSION(pkt->li_vn_mode); mon = mon_hash[hash]; /* * We keep track of all traffic for a given IP in one entry, * otherwise cron'ed ntpdate or similar evades RES_LIMITED. */ for (; mon != NULL; mon = mon->hash_next) if (SOCK_EQ(&mon->rmtadr, &rbufp->recv_srcadr)) break; if (mon != NULL) { interval_fp = rbufp->recv_time; L_SUB(&interval_fp, &mon->last); /* add one-half second to round up */ L_ADDUF(&interval_fp, 0x80000000); interval = interval_fp.l_i; mon->last = rbufp->recv_time; NSRCPORT(&mon->rmtadr) = NSRCPORT(&rbufp->recv_srcadr); mon->count++; restrict_mask = flags; mon->vn_mode = VN_MODE(version, mode); /* Shuffle to the head of the MRU list. */ UNLINK_DLIST(mon, mru); LINK_DLIST(mon_mru_list, mon, mru); /* * At this point the most recent arrival is first in the * MRU list. Decrease the counter by the headway, but * not less than zero. */ mon->leak -= interval; mon->leak = max(0, mon->leak); head = 1 << ntp_minpoll; leak = mon->leak + head; limit = NTP_SHIFT * head; DPRINTF(2, ("MRU: interval %d headway %d limit %d\n", interval, leak, limit)); /* * If the minimum and average thresholds are not * exceeded, douse the RES_LIMITED and RES_KOD bits and * increase the counter by the headway increment. Note * that we give a 1-s grace for the minimum threshold * and a 2-s grace for the headway increment. If one or * both thresholds are exceeded and the old counter is * less than the average threshold, set the counter to * the average threshold plus the increment and leave * the RES_LIMITED and RES_KOD bits lit. Otherwise, * leave the counter alone and douse the RES_KOD bit. * This rate-limits the KoDs to no less than the average * headway. */ if (interval + 1 >= ntp_minpkt && leak < limit) { mon->leak = leak - 2; restrict_mask &= ~(RES_LIMITED | RES_KOD); } else if (mon->leak < limit) mon->leak = limit + head; else restrict_mask &= ~RES_KOD; mon->flags = restrict_mask; return mon->flags; } /* * If we got here, this is the first we've heard of this * guy. Get him some memory, either from the free list * or from the tail of the MRU list. * * The following ntp.conf "mru" knobs come into play determining * the depth (or count) of the MRU list: * - mru_mindepth ("mru mindepth") is a floor beneath which * entries are kept without regard to their age. The * default is 600 which matches the longtime implementation * limit on the total number of entries. * - mru_maxage ("mru maxage") is a ceiling on the age in * seconds of entries. Entries older than this are * reclaimed once mon_mindepth is exceeded. 64s default. * Note that entries older than this can easily survive * as they are reclaimed only as needed. * - mru_maxdepth ("mru maxdepth") is a hard limit on the * number of entries. * - "mru maxmem" sets mru_maxdepth to the number of entries * which fit in the given number of kilobytes. The default is * 1024, or 1 megabyte. * - mru_initalloc ("mru initalloc" sets the count of the * initial allocation of MRU entries. * - "mru initmem" sets mru_initalloc in units of kilobytes. * The default is 4. * - mru_incalloc ("mru incalloc" sets the number of entries to * allocate on-demand each time the free list is empty. * - "mru incmem" sets mru_incalloc in units of kilobytes. * The default is 4. * Whichever of "mru maxmem" or "mru maxdepth" occurs last in * ntp.conf controls. Similarly for "mru initalloc" and "mru * initmem", and for "mru incalloc" and "mru incmem". */ if (mru_entries < mru_mindepth) { if (NULL == mon_free) mon_getmoremem(); UNLINK_HEAD_SLIST(mon, mon_free, hash_next); } else { oldest = TAIL_DLIST(mon_mru_list, mru); oldest_age = 0; /* silence uninit warning */ if (oldest != NULL) { interval_fp = rbufp->recv_time; L_SUB(&interval_fp, &oldest->last); /* add one-half second to round up */ L_ADDUF(&interval_fp, 0x80000000); oldest_age = interval_fp.l_i; } /* note -1 is legal for mru_maxage (disables) */ if (oldest != NULL && mru_maxage < oldest_age) { mon_reclaim_entry(oldest); mon = oldest; } else if (mon_free != NULL || mru_alloc < mru_maxdepth) { if (NULL == mon_free) mon_getmoremem(); UNLINK_HEAD_SLIST(mon, mon_free, hash_next); /* Preempt from the MRU list if old enough. */ } else if (ntp_random() / (2. * FRAC) > (double)oldest_age / mon_age) { return ~(RES_LIMITED | RES_KOD) & flags; } else { mon_reclaim_entry(oldest); mon = oldest; } } INSIST(mon != NULL); /* * Got one, initialize it */ mru_entries++; mru_peakentries = max(mru_peakentries, mru_entries); mon->last = rbufp->recv_time; mon->first = mon->last; mon->count = 1; mon->flags = ~(RES_LIMITED | RES_KOD) & flags; mon->leak = 0; memcpy(&mon->rmtadr, &rbufp->recv_srcadr, sizeof(mon->rmtadr)); mon->vn_mode = VN_MODE(version, mode); mon->lcladr = rbufp->dstadr; mon->cast_flags = (u_char)(((rbufp->dstadr->flags & INT_MCASTOPEN) && rbufp->fd == mon->lcladr->fd) ? MDF_MCAST : rbufp->fd == mon->lcladr->bfd ? MDF_BCAST : MDF_UCAST); /* * Drop him into front of the hash table. Also put him on top of * the MRU list. */ LINK_SLIST(mon_hash[hash], mon, hash_next); LINK_DLIST(mon_mru_list, mon, mru); return mon->flags; }
/* ** queue_xmt */ void queue_xmt( SOCKET sock, struct dns_ctx * dctx, sent_pkt * spkt, u_int xmt_delay ) { sockaddr_u * dest; sent_pkt ** pkt_listp; sent_pkt * match; xmt_ctx * xctx; struct timeval start_cb; struct timeval delay; dest = &spkt->addr; if (IS_IPV6(dest)) pkt_listp = &v6_pkts_list; else pkt_listp = &v4_pkts_list; /* reject attempts to add address already listed */ for (match = *pkt_listp; match != NULL; match = match->link) { if (ADDR_PORT_EQ(&spkt->addr, &match->addr)) { if (strcasecmp(spkt->dctx->name, match->dctx->name)) printf("%s %s duplicate address from %s ignored.\n", sptoa(&match->addr), match->dctx->name, spkt->dctx->name); else printf("%s %s, duplicate address ignored.\n", sptoa(&match->addr), match->dctx->name); dec_pending_ntp(spkt->dctx->name, &spkt->addr); free(spkt); return; } } LINK_SLIST(*pkt_listp, spkt, link); xctx = emalloc_zero(sizeof(*xctx)); xctx->sock = sock; xctx->spkt = spkt; gettimeofday_cached(base, &start_cb); xctx->sched = start_cb.tv_sec + (2 * xmt_delay); LINK_SORT_SLIST(xmt_q, xctx, (xctx->sched < L_S_S_CUR()->sched), link, xmt_ctx); if (xmt_q == xctx) { /* * The new entry is the first scheduled. The timer is * either not active or is set for the second xmt * context in xmt_q. */ if (NULL == ev_xmt_timer) ev_xmt_timer = event_new(base, INVALID_SOCKET, EV_TIMEOUT, &xmt_timer_cb, NULL); if (NULL == ev_xmt_timer) { msyslog(LOG_ERR, "queue_xmt: event_new(base, -1, EV_TIMEOUT) failed!"); exit(1); } ZERO(delay); if (xctx->sched > start_cb.tv_sec) delay.tv_sec = xctx->sched - start_cb.tv_sec; event_add(ev_xmt_timer, &delay); TRACE(2, ("queue_xmt: xmt timer for %u usec\n", (u_int)delay.tv_usec)); } }