/* * authistrustedip - determine if the IP is OK for the keyid */ int authistrustedip( keyid_t keyno, sockaddr_u * sau ) { symkey * sk; symkey ** bucket; KeyAccT * kal; KeyAccT * k; if (keyno == cache_keyid) kal = cache_keyacclist; else { authkeyuncached++; bucket = &key_hash[KEYHASH(keyno)]; for (sk = *bucket; sk != NULL; sk = sk->hlink) { if (keyno == sk->keyid) break; } if (NULL == sk || !(KEY_TRUSTED & sk->flags)) { INSIST(!"authistrustedip: keyid not found/trusted!"); return FALSE; } kal = sk->keyacclist; } if (NULL == kal) return TRUE; for (k = kal; k; k = k->next) { if (SOCK_EQ(&k->addr, sau)) return TRUE; } return FALSE; }
/* * ntp_monitor - record stats about this packet * * Returns supplied restriction flags, with RES_LIMITED and RES_KOD * cleared unless the packet should not be responded to normally * (RES_LIMITED) and possibly should trigger a KoD response (RES_KOD). * The returned flags are saved in the MRU entry, so that it reflects * whether the last packet from that source triggered rate limiting, * and if so, possible KoD response. This implies you can not tell * whether a given address is eligible for rate limiting/KoD from the * monlist restrict bits, only whether or not the last packet triggered * such responses. ntpdc -c reslist lets you see whether RES_LIMITED * or RES_KOD is lit for a particular address before ntp_monitor()'s * typical dousing. */ u_short ntp_monitor( struct recvbuf *rbufp, u_short flags ) { l_fp interval_fp; struct pkt * pkt; mon_entry * mon; mon_entry * oldest; int oldest_age; u_int hash; u_short restrict_mask; u_char mode; u_char version; int interval; int head; /* headway increment */ int leak; /* new headway */ int limit; /* average threshold */ REQUIRE(rbufp != NULL); if (mon_enabled == MON_OFF) return ~(RES_LIMITED | RES_KOD) & flags; pkt = &rbufp->recv_pkt; hash = MON_HASH(&rbufp->recv_srcadr); mode = PKT_MODE(pkt->li_vn_mode); version = PKT_VERSION(pkt->li_vn_mode); mon = mon_hash[hash]; /* * We keep track of all traffic for a given IP in one entry, * otherwise cron'ed ntpdate or similar evades RES_LIMITED. */ for (; mon != NULL; mon = mon->hash_next) if (SOCK_EQ(&mon->rmtadr, &rbufp->recv_srcadr)) break; if (mon != NULL) { interval_fp = rbufp->recv_time; L_SUB(&interval_fp, &mon->last); /* add one-half second to round up */ L_ADDUF(&interval_fp, 0x80000000); interval = interval_fp.l_i; mon->last = rbufp->recv_time; NSRCPORT(&mon->rmtadr) = NSRCPORT(&rbufp->recv_srcadr); mon->count++; restrict_mask = flags; mon->vn_mode = VN_MODE(version, mode); /* Shuffle to the head of the MRU list. */ UNLINK_DLIST(mon, mru); LINK_DLIST(mon_mru_list, mon, mru); /* * At this point the most recent arrival is first in the * MRU list. Decrease the counter by the headway, but * not less than zero. */ mon->leak -= interval; mon->leak = max(0, mon->leak); head = 1 << ntp_minpoll; leak = mon->leak + head; limit = NTP_SHIFT * head; DPRINTF(2, ("MRU: interval %d headway %d limit %d\n", interval, leak, limit)); /* * If the minimum and average thresholds are not * exceeded, douse the RES_LIMITED and RES_KOD bits and * increase the counter by the headway increment. Note * that we give a 1-s grace for the minimum threshold * and a 2-s grace for the headway increment. If one or * both thresholds are exceeded and the old counter is * less than the average threshold, set the counter to * the average threshold plus the increment and leave * the RES_LIMITED and RES_KOD bits lit. Otherwise, * leave the counter alone and douse the RES_KOD bit. * This rate-limits the KoDs to no less than the average * headway. */ if (interval + 1 >= ntp_minpkt && leak < limit) { mon->leak = leak - 2; restrict_mask &= ~(RES_LIMITED | RES_KOD); } else if (mon->leak < limit) mon->leak = limit + head; else restrict_mask &= ~RES_KOD; mon->flags = restrict_mask; return mon->flags; } /* * If we got here, this is the first we've heard of this * guy. Get him some memory, either from the free list * or from the tail of the MRU list. * * The following ntp.conf "mru" knobs come into play determining * the depth (or count) of the MRU list: * - mru_mindepth ("mru mindepth") is a floor beneath which * entries are kept without regard to their age. The * default is 600 which matches the longtime implementation * limit on the total number of entries. * - mru_maxage ("mru maxage") is a ceiling on the age in * seconds of entries. Entries older than this are * reclaimed once mon_mindepth is exceeded. 64s default. * Note that entries older than this can easily survive * as they are reclaimed only as needed. * - mru_maxdepth ("mru maxdepth") is a hard limit on the * number of entries. * - "mru maxmem" sets mru_maxdepth to the number of entries * which fit in the given number of kilobytes. The default is * 1024, or 1 megabyte. * - mru_initalloc ("mru initalloc" sets the count of the * initial allocation of MRU entries. * - "mru initmem" sets mru_initalloc in units of kilobytes. * The default is 4. * - mru_incalloc ("mru incalloc" sets the number of entries to * allocate on-demand each time the free list is empty. * - "mru incmem" sets mru_incalloc in units of kilobytes. * The default is 4. * Whichever of "mru maxmem" or "mru maxdepth" occurs last in * ntp.conf controls. Similarly for "mru initalloc" and "mru * initmem", and for "mru incalloc" and "mru incmem". */ if (mru_entries < mru_mindepth) { if (NULL == mon_free) mon_getmoremem(); UNLINK_HEAD_SLIST(mon, mon_free, hash_next); } else { oldest = TAIL_DLIST(mon_mru_list, mru); oldest_age = 0; /* silence uninit warning */ if (oldest != NULL) { interval_fp = rbufp->recv_time; L_SUB(&interval_fp, &oldest->last); /* add one-half second to round up */ L_ADDUF(&interval_fp, 0x80000000); oldest_age = interval_fp.l_i; } /* note -1 is legal for mru_maxage (disables) */ if (oldest != NULL && mru_maxage < oldest_age) { mon_reclaim_entry(oldest); mon = oldest; } else if (mon_free != NULL || mru_alloc < mru_maxdepth) { if (NULL == mon_free) mon_getmoremem(); UNLINK_HEAD_SLIST(mon, mon_free, hash_next); /* Preempt from the MRU list if old enough. */ } else if (ntp_random() / (2. * FRAC) > (double)oldest_age / mon_age) { return ~(RES_LIMITED | RES_KOD) & flags; } else { mon_reclaim_entry(oldest); mon = oldest; } } INSIST(mon != NULL); /* * Got one, initialize it */ mru_entries++; mru_peakentries = max(mru_peakentries, mru_entries); mon->last = rbufp->recv_time; mon->first = mon->last; mon->count = 1; mon->flags = ~(RES_LIMITED | RES_KOD) & flags; mon->leak = 0; memcpy(&mon->rmtadr, &rbufp->recv_srcadr, sizeof(mon->rmtadr)); mon->vn_mode = VN_MODE(version, mode); mon->lcladr = rbufp->dstadr; mon->cast_flags = (u_char)(((rbufp->dstadr->flags & INT_MCASTOPEN) && rbufp->fd == mon->lcladr->fd) ? MDF_MCAST : rbufp->fd == mon->lcladr->bfd ? MDF_BCAST : MDF_UCAST); /* * Drop him into front of the hash table. Also put him on top of * the MRU list. */ LINK_SLIST(mon_hash[hash], mon, hash_next); LINK_DLIST(mon_mru_list, mon, mru); return mon->flags; }
const char * socktohost( const sockaddr_u *sock ) { const char svc[] = "ntp"; char * pbuf; char * pliar; int gni_flags; struct addrinfo hints; struct addrinfo * alist; struct addrinfo * ai; sockaddr_u addr; size_t octets; int a_info; /* reverse the address to purported DNS name */ LIB_GETBUF(pbuf); gni_flags = NI_DGRAM | NI_NAMEREQD; if (getnameinfo(&sock->sa, SOCKLEN(sock), pbuf, LIB_BUFLENGTH, NULL, 0, gni_flags)) return stoa(sock); /* use address */ TRACE(1, ("%s reversed to %s\n", stoa(sock), pbuf)); /* * Resolve the reversed name and make sure the reversed address * is among the results. */ ZERO(hints); hints.ai_family = AF(sock); hints.ai_protocol = IPPROTO_UDP; hints.ai_socktype = SOCK_DGRAM; hints.ai_flags = 0; alist = NULL; a_info = getaddrinfo(pbuf, svc, &hints, &alist); if (a_info == EAI_NONAME #ifdef EAI_NODATA || a_info == EAI_NODATA #endif ) { hints.ai_flags = AI_CANONNAME; #ifdef AI_ADDRCONFIG hints.ai_flags |= AI_ADDRCONFIG; #endif a_info = getaddrinfo(pbuf, svc, &hints, &alist); } #ifdef AI_ADDRCONFIG /* Some older implementations don't like AI_ADDRCONFIG. */ if (a_info == EAI_BADFLAGS) { hints.ai_flags &= ~AI_ADDRCONFIG; a_info = getaddrinfo(pbuf, svc, &hints, &alist); } #endif if (a_info) goto forward_fail; NTP_INSIST(alist != NULL); for (ai = alist; ai != NULL; ai = ai->ai_next) { /* * Make a convenience sockaddr_u copy from ai->ai_addr * because casting from sockaddr * to sockaddr_u * is * risking alignment problems on platforms where * sockaddr_u has stricter alignment than sockaddr, * such as sparc. */ ZERO_SOCK(&addr); octets = min(sizeof(addr), ai->ai_addrlen); memcpy(&addr, ai->ai_addr, octets); if (SOCK_EQ(sock, &addr)) break; } freeaddrinfo(alist); if (ai != NULL) return pbuf; /* forward check passed */ forward_fail: TRACE(1, ("%s forward check lookup fail: %s\n", pbuf, gai_strerror(a_info))); LIB_GETBUF(pliar); snprintf(pliar, LIB_BUFLENGTH, "%s (%s)", stoa(sock), pbuf); return pliar; }
/* ** Socket readable/timeout Callback: ** Read in the packet ** Unicast: ** - close socket ** - decrement n_pending_ntp ** - If packet is good, set the time and "exit" ** Broadcast: ** - If packet is good, set the time and "exit" */ void sock_cb( evutil_socket_t fd, short what, void *ptr ) { sockaddr_u sender; sockaddr_u * psau; sent_pkt ** p_pktlist; sent_pkt * spkt; int rpktl; int rc; INSIST(sock4 == fd || sock6 == fd); TRACE(3, ("sock_cb: event on sock%s:%s%s%s%s\n", (fd == sock6) ? "6" : "4", (what & EV_TIMEOUT) ? " timeout" : "", (what & EV_READ) ? " read" : "", (what & EV_WRITE) ? " write" : "", (what & EV_SIGNAL) ? " signal" : "")); if (!(EV_READ & what)) { if (EV_TIMEOUT & what) timeout_queries(); return; } /* Read in the packet */ rpktl = recvdata(fd, &sender, &rbuf, sizeof(rbuf)); if (rpktl < 0) { msyslog(LOG_DEBUG, "recvfrom error %m"); return; } if (sock6 == fd) p_pktlist = &v6_pkts_list; else p_pktlist = &v4_pkts_list; for (spkt = *p_pktlist; spkt != NULL; spkt = spkt->link) { psau = &spkt->addr; if (SOCK_EQ(&sender, psau)) break; } if (NULL == spkt) { msyslog(LOG_WARNING, "Packet from unexpected source %s dropped", sptoa(&sender)); return; } TRACE(1, ("sock_cb: %s %s\n", spkt->dctx->name, sptoa(&sender))); rpktl = process_pkt(&r_pkt, &sender, rpktl, MODE_SERVER, &spkt->x_pkt, "sock_cb"); TRACE(2, ("sock_cb: process_pkt returned %d\n", rpktl)); /* If this is a Unicast packet, one down ... */ if (!spkt->done && (CTX_UCST & spkt->dctx->flags)) { dec_pending_ntp(spkt->dctx->name, &spkt->addr); spkt->done = TRUE; } /* If the packet is good, set the time and we're all done */ rc = handle_pkt(rpktl, &r_pkt, &spkt->addr, spkt->dctx->name); if (0 != rc) TRACE(1, ("sock_cb: handle_pkt() returned %d\n", rc)); check_exit_conditions(); }