void tearDown(void) { restrict_u *empty_restrict = malloc(sizeof(restrict_u)); memset(empty_restrict, 0, sizeof(restrict_u)); restrict_u *current; do { UNLINK_HEAD_SLIST(current, restrictlist4, link); if (current != NULL) { *current = *empty_restrict; } } while (current != NULL); do { UNLINK_HEAD_SLIST(current, restrictlist6, link); if (current != NULL) { *current = *empty_restrict; } } while (current != NULL); free(empty_restrict); }
/* * allocsymkey - common code to allocate and link in symkey * * secret must be allocated with a free-compatible allocator. It is * owned by the referring symkey structure, and will be free()d by * freesymkey(). */ static void allocsymkey( symkey ** bucket, keyid_t id, u_short flags, u_short type, u_long lifetime, u_short secretsize, uint8_t * secret ) { symkey * sk; if (authnumfreekeys < 1) auth_moremem(-1); UNLINK_HEAD_SLIST(sk, authfreekeys, llink.f); DEBUG_ENSURE(sk != NULL); sk->keyid = id; sk->flags = flags; sk->type = type; sk->secretsize = secretsize; sk->secret = secret; sk->lifetime = lifetime; LINK_SLIST(*bucket, sk, hlink); LINK_TAIL_DLIST(key_listhead, sk, llink); authnumfreekeys--; authnumkeys++; }
recvbuf_t * get_free_recv_buffer(void) { recvbuf_t *buffer; LOCK(); UNLINK_HEAD_SLIST(buffer, free_recv_list, link.next); if (buffer != NULL) { free_recvbufs--; initialise_buffer(buffer); (buffer->used)++; } else buffer_shortfall++; UNLOCK(); return (buffer); }
static void uninit_recvbuff(void) { recvbuf_t *rbunlinked; while ((rbunlinked = ISC_LIST_HEAD(full_recv_list)) != NULL) { ISC_LIST_DEQUEUE_TYPE(full_recv_list, rbunlinked, link, recvbuf_t); free(rbunlinked); } do { UNLINK_HEAD_SLIST(rbunlinked, free_recv_list, link.next); if (rbunlinked != NULL) free(rbunlinked); } while (rbunlinked != NULL); }
recvbuf_t * get_free_recv_buffer(void) { recvbuf_t *buffer; UNLINK_HEAD_SLIST(buffer, free_recv_list, link); if (buffer != NULL) { free_recvbufs--; initialise_buffer(buffer); buffer->used++; } else { buffer_shortfall++; } return buffer; }
static void uninit_recvbuff(void) { recvbuf_t *rbunlinked; do { UNLINK_FIFO(rbunlinked, full_recv_fifo, link); if (rbunlinked != NULL) free(rbunlinked); } while (rbunlinked != NULL); do { UNLINK_HEAD_SLIST(rbunlinked, free_recv_list, link); if (rbunlinked != NULL) free(rbunlinked); } while (rbunlinked != NULL); }
static void uninit_recvbuff(void) { recvbuf_t *rbunlinked; for (;;) { UNLINK_FIFO(rbunlinked, full_recv_fifo, link); if (rbunlinked == NULL) break; free(rbunlinked); } for (;;) { UNLINK_HEAD_SLIST(rbunlinked, free_recv_list, link); if (rbunlinked == NULL) break; free(rbunlinked); } }
/* ** xmt_timer_cb */ void xmt_timer_cb( evutil_socket_t fd, short what, void * ctx ) { struct timeval start_cb; struct timeval delay; xmt_ctx * x; UNUSED_ARG(fd); UNUSED_ARG(ctx); DEBUG_INSIST(EV_TIMEOUT == what); if (NULL == xmt_q || shutting_down) return; gettimeofday_cached(base, &start_cb); if (xmt_q->sched <= start_cb.tv_sec) { UNLINK_HEAD_SLIST(x, xmt_q, link); TRACE(2, ("xmt_timer_cb: at .%6.6u -> %s\n", (u_int)start_cb.tv_usec, stoa(&x->spkt->addr))); xmt(x); free(x); if (NULL == xmt_q) return; } if (xmt_q->sched <= start_cb.tv_sec) { event_add(ev_xmt_timer, &gap); TRACE(2, ("xmt_timer_cb: at .%6.6u gap %6.6u\n", (u_int)start_cb.tv_usec, (u_int)gap.tv_usec)); } else { delay.tv_sec = xmt_q->sched - start_cb.tv_sec; delay.tv_usec = 0; event_add(ev_xmt_timer, &delay); TRACE(2, ("xmt_timer_cb: at .%6.6u next %ld seconds\n", (u_int)start_cb.tv_usec, (long)delay.tv_sec)); } }
static restrict_u * alloc_res6(void) { const size_t cb = V6_SIZEOF_RESTRICT_U; const size_t count = INC_RESLIST6; restrict_u * rl; restrict_u * res; int i; UNLINK_HEAD_SLIST(res, resfree6, link); if (res != NULL) return res; rl = emalloc_zero(count * cb); /* link all but the first onto free list */ res = (void *)((char *)rl + (count - 1) * cb); for (i = count - 1; i > 0; i--) { LINK_SLIST(resfree6, res, link); res = (void *)((char *)res - cb); } INSIST(rl == res); /* allocate the first */ return res; }
/* * ntp_monitor - record stats about this packet * * Returns supplied restriction flags, with RES_LIMITED and RES_KOD * cleared unless the packet should not be responded to normally * (RES_LIMITED) and possibly should trigger a KoD response (RES_KOD). * The returned flags are saved in the MRU entry, so that it reflects * whether the last packet from that source triggered rate limiting, * and if so, possible KoD response. This implies you can not tell * whether a given address is eligible for rate limiting/KoD from the * monlist restrict bits, only whether or not the last packet triggered * such responses. ntpdc -c reslist lets you see whether RES_LIMITED * or RES_KOD is lit for a particular address before ntp_monitor()'s * typical dousing. */ u_short ntp_monitor( struct recvbuf *rbufp, u_short flags ) { l_fp interval_fp; struct pkt * pkt; mon_entry * mon; mon_entry * oldest; int oldest_age; u_int hash; u_short restrict_mask; u_char mode; u_char version; int interval; int head; /* headway increment */ int leak; /* new headway */ int limit; /* average threshold */ REQUIRE(rbufp != NULL); if (mon_enabled == MON_OFF) return ~(RES_LIMITED | RES_KOD) & flags; pkt = &rbufp->recv_pkt; hash = MON_HASH(&rbufp->recv_srcadr); mode = PKT_MODE(pkt->li_vn_mode); version = PKT_VERSION(pkt->li_vn_mode); mon = mon_hash[hash]; /* * We keep track of all traffic for a given IP in one entry, * otherwise cron'ed ntpdate or similar evades RES_LIMITED. */ for (; mon != NULL; mon = mon->hash_next) if (SOCK_EQ(&mon->rmtadr, &rbufp->recv_srcadr)) break; if (mon != NULL) { interval_fp = rbufp->recv_time; L_SUB(&interval_fp, &mon->last); /* add one-half second to round up */ L_ADDUF(&interval_fp, 0x80000000); interval = interval_fp.l_i; mon->last = rbufp->recv_time; NSRCPORT(&mon->rmtadr) = NSRCPORT(&rbufp->recv_srcadr); mon->count++; restrict_mask = flags; mon->vn_mode = VN_MODE(version, mode); /* Shuffle to the head of the MRU list. */ UNLINK_DLIST(mon, mru); LINK_DLIST(mon_mru_list, mon, mru); /* * At this point the most recent arrival is first in the * MRU list. Decrease the counter by the headway, but * not less than zero. */ mon->leak -= interval; mon->leak = max(0, mon->leak); head = 1 << ntp_minpoll; leak = mon->leak + head; limit = NTP_SHIFT * head; DPRINTF(2, ("MRU: interval %d headway %d limit %d\n", interval, leak, limit)); /* * If the minimum and average thresholds are not * exceeded, douse the RES_LIMITED and RES_KOD bits and * increase the counter by the headway increment. Note * that we give a 1-s grace for the minimum threshold * and a 2-s grace for the headway increment. If one or * both thresholds are exceeded and the old counter is * less than the average threshold, set the counter to * the average threshold plus the increment and leave * the RES_LIMITED and RES_KOD bits lit. Otherwise, * leave the counter alone and douse the RES_KOD bit. * This rate-limits the KoDs to no less than the average * headway. */ if (interval + 1 >= ntp_minpkt && leak < limit) { mon->leak = leak - 2; restrict_mask &= ~(RES_LIMITED | RES_KOD); } else if (mon->leak < limit) mon->leak = limit + head; else restrict_mask &= ~RES_KOD; mon->flags = restrict_mask; return mon->flags; } /* * If we got here, this is the first we've heard of this * guy. Get him some memory, either from the free list * or from the tail of the MRU list. * * The following ntp.conf "mru" knobs come into play determining * the depth (or count) of the MRU list: * - mru_mindepth ("mru mindepth") is a floor beneath which * entries are kept without regard to their age. The * default is 600 which matches the longtime implementation * limit on the total number of entries. * - mru_maxage ("mru maxage") is a ceiling on the age in * seconds of entries. Entries older than this are * reclaimed once mon_mindepth is exceeded. 64s default. * Note that entries older than this can easily survive * as they are reclaimed only as needed. * - mru_maxdepth ("mru maxdepth") is a hard limit on the * number of entries. * - "mru maxmem" sets mru_maxdepth to the number of entries * which fit in the given number of kilobytes. The default is * 1024, or 1 megabyte. * - mru_initalloc ("mru initalloc" sets the count of the * initial allocation of MRU entries. * - "mru initmem" sets mru_initalloc in units of kilobytes. * The default is 4. * - mru_incalloc ("mru incalloc" sets the number of entries to * allocate on-demand each time the free list is empty. * - "mru incmem" sets mru_incalloc in units of kilobytes. * The default is 4. * Whichever of "mru maxmem" or "mru maxdepth" occurs last in * ntp.conf controls. Similarly for "mru initalloc" and "mru * initmem", and for "mru incalloc" and "mru incmem". */ if (mru_entries < mru_mindepth) { if (NULL == mon_free) mon_getmoremem(); UNLINK_HEAD_SLIST(mon, mon_free, hash_next); } else { oldest = TAIL_DLIST(mon_mru_list, mru); oldest_age = 0; /* silence uninit warning */ if (oldest != NULL) { interval_fp = rbufp->recv_time; L_SUB(&interval_fp, &oldest->last); /* add one-half second to round up */ L_ADDUF(&interval_fp, 0x80000000); oldest_age = interval_fp.l_i; } /* note -1 is legal for mru_maxage (disables) */ if (oldest != NULL && mru_maxage < oldest_age) { mon_reclaim_entry(oldest); mon = oldest; } else if (mon_free != NULL || mru_alloc < mru_maxdepth) { if (NULL == mon_free) mon_getmoremem(); UNLINK_HEAD_SLIST(mon, mon_free, hash_next); /* Preempt from the MRU list if old enough. */ } else if (ntp_random() / (2. * FRAC) > (double)oldest_age / mon_age) { return ~(RES_LIMITED | RES_KOD) & flags; } else { mon_reclaim_entry(oldest); mon = oldest; } } INSIST(mon != NULL); /* * Got one, initialize it */ mru_entries++; mru_peakentries = max(mru_peakentries, mru_entries); mon->last = rbufp->recv_time; mon->first = mon->last; mon->count = 1; mon->flags = ~(RES_LIMITED | RES_KOD) & flags; mon->leak = 0; memcpy(&mon->rmtadr, &rbufp->recv_srcadr, sizeof(mon->rmtadr)); mon->vn_mode = VN_MODE(version, mode); mon->lcladr = rbufp->dstadr; mon->cast_flags = (u_char)(((rbufp->dstadr->flags & INT_MCASTOPEN) && rbufp->fd == mon->lcladr->fd) ? MDF_MCAST : rbufp->fd == mon->lcladr->bfd ? MDF_BCAST : MDF_UCAST); /* * Drop him into front of the hash table. Also put him on top of * the MRU list. */ LINK_SLIST(mon_hash[hash], mon, hash_next); LINK_DLIST(mon_mru_list, mon, mru); return mon->flags; }