static void Label(struct QuantizedValue *q, int updatecolor) { // fill in max/min values for tree, etc. if (q) { Label(q->Children[0],updatecolor); Label(q->Children[1],updatecolor); if (! q->Children[0]) // leaf node? { if (updatecolor) { q->value=colorid++; for(int j=0;j<q->NSamples;j++) { SAMPLE(q->Samples,j)->QNum=q->value; SAMPLE(q->Samples,j)->qptr=q; } } for(int i=0;i<current_ndims;i++) { q->Mins[i]=q->Mean[i]; q->Maxs[i]=q->Mean[i]; } } else for(int i=0;i<current_ndims;i++) { q->Mins[i]=min(q->Children[0]->Mins[i],q->Children[1]->Mins[i]); q->Maxs[i]=max(q->Children[0]->Maxs[i],q->Children[1]->Maxs[i]); } } }
/** * Cisco VLAN Trunking Protocol. */ void process_cisco_vtp(struct Ferret *ferret, struct NetFrame *frame, const unsigned char *px, unsigned length) { struct vtp_summary { unsigned char version; unsigned char code; unsigned char followers; unsigned char domain_length; unsigned char domain[32]; unsigned revision; unsigned updater; unsigned char timestamp[12]; unsigned char md5[16]; } vtp; unsigned offset=0; const unsigned char *domain_name; if (offset+4 > length) { FRAMERR(frame, "%s: truncated\n", "VTP"); return; } vtp.version = px[offset++]; SAMPLE(ferret,"Cisco", JOT_NUM("VTP version", vtp.version)); if (vtp.version != 1) { FRAMERR(frame, "%s: unknown version %d\n", "VTP", vtp.version); return; } vtp.code = px[offset++]; SAMPLE(ferret,"Cisco", JOT_NUM("VTP code", vtp.code)); vtp.followers = px[offset++]; SAMPLE(ferret,"Cisco", JOT_NUM("VTP followers", vtp.followers)); vtp.domain_length = px[offset++]; if (offset + vtp.domain_length > length) { FRAMERR(frame, "%s: truncated\n", "VTP"); return; } domain_name = px+offset; offset += 32; if (offset + 8 > length) { FRAMERR(frame, "%s: truncated\n", "VTP"); return; } vtp.revision = ex32be(px+offset); offset += 4; vtp.updater = ex32be(px+offset); offset += 4; JOTDOWN(ferret, JOT_MACADDR("ID-MAC", frame->src_mac), JOT_PRINT("Cisco VTP Domain", domain_name, vtp.domain_length), JOT_NUM("Revision", vtp.revision), JOT_IPv4("Updater", vtp.updater), 0); }
void sample_to_file(Sampler sampler, float sample_freq, float max, FILE* out) { int ii = 0; long samples = max * SAMPLE_FREQ; Filter filter = lowpass_make(sampler, 2000, sample_freq); for (ii = 0; ii < samples; ++ii) { float t = (float)ii / SAMPLE_FREQ; fprintf(out, "%f, %d, %d\n", t, SAMPLE(sampler, ii), SAMPLE(filter, ii)); } }
int ff_hevc_skip_flag_decode(HEVCContext *s, int x_cb, int y_cb) { int inc = 0; if (x_cb > 0) inc = SAMPLE(s->cu.skip_flag, x_cb-1, y_cb); if (y_cb > 0) inc += SAMPLE(s->cu.skip_flag, x_cb, y_cb-1); return GET_CABAC(elem_offset[SKIP_FLAG] + inc); }
static void UpdateStats(struct QuantizedValue *v) { // first, find mean int32 Means[MAXDIMS]; double Errors[MAXDIMS]; double WorstError[MAXDIMS]; int i,j; memset(Means,0,sizeof(Means)); int N=0; for(i=0;i<v->NSamples;i++) { struct Sample *s=SAMPLE(v->Samples,i); N+=s->Count; for(j=0;j<current_ndims;j++) { uint8 v=s->Value[j]; Means[j]+=v*s->Count; } } for(j=0;j<current_ndims;j++) { if (N) v->Mean[j]=(uint8) (Means[j]/N); Errors[j]=WorstError[j]=0.; } for(i=0;i<v->NSamples;i++) { struct Sample *s=SAMPLE(v->Samples,i); double c=s->Count; for(j=0;j<current_ndims;j++) { double diff=SQ(s->Value[j]-v->Mean[j]); Errors[j]+=c*diff; // charles uses abs not sq() if (diff>WorstError[j]) WorstError[j]=diff; } } v->TotalError=0.; double ErrorScale=1.; // /sqrt((double) (N)); for(j=0;j<current_ndims;j++) { v->ErrorMeasure[j]=(ErrorScale*Errors[j]*current_weights[j]); v->TotalError+=v->ErrorMeasure[j]; #if SPLIT_THEN_SORT v->ErrorMeasure[j]*=WorstError[j]; #endif } v->TotSamples=N; }
void StructureAbstractValue::filter(const StructureSet& other) { SAMPLE("StructureAbstractValue filter set"); if (isTop()) { m_set = other; return; } if (isClobbered()) { // We have two choices here: // // Do nothing: It's legal to keep our set intact, which would essentially mean that for // now, our set would behave like TOP but after the next invalidation point it wold be // a finite set again. This may be a good choice if 'other' is much bigger than our // m_set. // // Replace m_set with other and clear the clobber bit: This is also legal, and means that // we're no longer clobbered. This is usually better because it immediately gives us a // smaller set. // // This scenario should come up rarely. We usually don't do anything to an abstract value // after it is clobbered. But we apply some heuristics. if (other.size() > m_set.size() + clobberedSupremacyThreshold) return; // Keep the clobbered set. m_set = other; setClobbered(false); return; } m_set.filter(other); }
void StructureAbstractValue::filter(const StructureAbstractValue& other) { SAMPLE("StructureAbstractValue filter value"); if (other.isTop()) return; if (other.isClobbered()) { if (isTop()) return; if (!isClobbered()) { // See justification in filter(const StructureSet&), above. An unclobbered set is // almost always better. if (m_set.size() > other.m_set.size() + clobberedSupremacyThreshold) *this = other; // Keep the clobbered set. return; } m_set.filter(other.m_set); return; } filter(other.m_set); }
void process_cisco00000c(struct Ferret *ferret, struct NetFrame *frame, const unsigned char *px, unsigned length) { unsigned offset=0; unsigned pid; if (offset+2 > length) { FRAMERR(frame, "%s: truncated\n", "cisco"); return; } pid = ex16be(px); SAMPLE(ferret,"Cisco", JOT_NUM("0x00000c-pid", pid)); offset+= 2; switch (pid) { case 0x2000: parse_CDP(ferret, frame, px+offset, length-offset); break; case 0x010b: parse_PVSTP(ferret, frame, px+offset, length-offset); break; case 0x2003: /* Cisco VLAN Trunking Protocol */ process_cisco_vtp(ferret, frame, px+offset, length-offset); break; case 0x2004: /* Cisco Dynamic Trunking Protocol */ parse_dynamic_trunking_protocol(ferret, frame, px+offset, length-offset); break; default: FRAMERR(frame, "%s: unknown value: 0x%x\n", "cisco", pid); } }
void StructureAbstractValue::clobber() { SAMPLE("StructureAbstractValue clobber"); // The premise of this approach to clobbering is that anytime we introduce // a watchable structure into an abstract value, we watchpoint it. You can assert // that this holds by calling assertIsWatched(). if (isTop()) return; setClobbered(true); if (m_set.isThin()) { if (!m_set.singleStructure()) return; if (!m_set.singleStructure()->dfgShouldWatch()) makeTopWhenThin(); return; } StructureSet::OutOfLineList* list = m_set.structureList(); for (unsigned i = list->m_length; i--;) { if (!list->list()[i]->dfgShouldWatch()) { makeTop(); return; } } }
bool StructureAbstractValue::contains(Structure* structure) const { SAMPLE("StructureAbstractValue contains"); if (isTop() || isClobbered()) return true; return m_set.contains(structure); }
bool StructureAbstractValue::merge(const StructureSet& other) { SAMPLE("StructureAbstractValue merge set"); if (isTop()) return false; return mergeNotTop(other); }
bool StructureAbstractValue::isSupersetOf(const StructureSet& other) const { SAMPLE("StructureAbstractValue isSupersetOf set"); if (isTop() || isClobbered()) return true; return m_set.isSupersetOf(other); }
bool StructureAbstractValue::overlaps(const StructureSet& other) const { SAMPLE("StructureAbstractValue overlaps set"); if (isTop() || isClobbered()) return true; return m_set.overlaps(other); }
bool StructureAbstractValue::overlaps(const StructureAbstractValue& other) const { SAMPLE("StructureAbstractValue overlaps value"); if (other.isTop() || other.isClobbered()) return true; return overlaps(other.m_set); }
void StructureAbstractValue::assertIsWatched(Graph& graph) const { SAMPLE("StructureAbstractValue assertIsWatched"); if (isTop()) return; for (unsigned i = size(); i--;) graph.assertIsWatched(at(i)); }
bool StructureAbstractValue::equalsSlow(const StructureAbstractValue& other) const { SAMPLE("StructureAbstractValue equalsSlow"); ASSERT(m_set.m_pointer != other.m_set.m_pointer); ASSERT(!isTop()); ASSERT(!other.isTop()); return m_set == other.m_set && isClobbered() == other.isClobbered(); }
/* The audio function callback takes the following parameters: stream: A pointer to the audio buffer to be filled len: The length (in bytes) of the audio buffer */ void fill_audio(void *udata, Uint8 *stream, int len) { int nwords = len / 2; int ii; Sint16 *words = (Sint16*)stream; for(ii = 0; ii < nwords; ++ii) { words[ii] = SAMPLE(gfilter, ii + last_count); } last_count += nwords; }
bool StructureAbstractValue::mergeNotTop(const StructureSet& other) { SAMPLE("StructureAbstractValue merge not top"); if (!m_set.merge(other)) return false; if (m_set.size() > polymorphismLimit) makeTop(); return true; }
/*TODO: currently, nobody references this function*/ void dns_dynamic_update(struct Ferret *ferret, struct NetFrame *frame, const unsigned char *px, unsigned length, struct DNS *dns) { unsigned i; for (i=0; i<dns->answer_count; i++) { char name[256]; unsigned name_length; unsigned x; struct DNSRECORD *rec = &dns->answers[i]; name_length = dns_extract_name(frame, px, length, rec->name_offset, name, sizeof(name)); x = rec->clss<<16 | rec->type; SAMPLE(ferret,"DynDNS", JOT_NUM("Prereq", x)); switch (rec->type) { case 0x0001: /*A*/ switch (rec->clss) { case 0x0001: /*INTERNET*/ { unsigned ip_address = ex32be(px+rec->rdata_offset); if (rec->rdata_length != 4) FRAMERR(frame, "dns: data not 4-bytes long, was %d-bytes instead (class=%d, type=%d, name=%s)\n", rec->rdata_length, rec->clss, rec->type, name); JOTDOWN(ferret, JOT_IPv4("ID-IP", ip_address), JOT_PRINT("name", name, name_length), 0); JOTDOWN(ferret, JOT_SZ("proto","NETBIOS"), JOT_SZ("op","register"), JOT_SRC("ip.src", frame), JOT_PRINT("name", name, name_length), JOT_IPv4("address", ip_address), 0); } break; default: FRAMERR(frame, "dns: unknown class=%d (type=%d, name=%s)\n", rec->clss, rec->type, name); } break; } } }
void StructureAbstractValue::filterSlow(SpeculatedType type) { SAMPLE("StructureAbstractValue filter type slow"); if (!(type & SpecCell)) { clear(); return; } ASSERT(!isTop()); ConformsToType conformsToType(type); m_set.genericFilter(conformsToType); }
void line_plot (FILE * plot, /* stream to gnuplot */ int line_count, /* how many lines */ struct params * par, /* command line parameters */ int ibuf, /* index of last line */ struct event_head * line_buf, /* line buffer */ struct scan_params * command, /* frame parameters */ int slot_size) { int i, j, k, nl; struct event_head * plot_buf, * point; int16_t adc; nl = (line_count+1 >= par->L) ? par->L : line_count+1; fprintf (plot, "plot '-' tit '%d'", line_count); for ( j = 1 ; j < nl ; j++) fprintf (plot, ",'-' tit '%d'", line_count-j); fprintf (plot, "\n"); for ( j = 0 ; j < nl ; j++ ) { plot_buf = (void *) line_buf + command->points_per_line * slot_size * ((ibuf + par->L - j) % par->L); if (par->v >= 1) printf ("plotting line %d %d at %p\n", ibuf, j, plot_buf); for ( i=0 ; i < command->points_per_line ; i++) { point = J_SLOT(plot_buf,i); for ( k = 0 ; k < command->samples_per_point ; k++ ) { adc = SAMPLE(point,k)[par->C]; if (par->R) { fprintf (plot, "%6g\n", scale_adc_data(adc)); } else { fprintf (plot, "%6d\n", adc); } if (par->C < 0) break; } } fprintf (plot, "e\n"); } fflush (plot); }
bool StructureAbstractValue::add(Structure* structure) { SAMPLE("StructureAbstractValue add"); if (isTop()) return false; if (!m_set.add(structure)) return false; if (m_set.size() > polymorphismLimit) makeTop(); return true; }
/* * refclock_process_offset - update median filter * * This routine uses the given offset and timestamps to construct a new * entry in the median filter circular buffer. Samples that overflow the * filter are quietly discarded. */ void refclock_process_offset( struct refclockproc *pp, l_fp offset, l_fp lastrec, double fudge ) { double doffset; pp->lastref = offset; pp->lastrec = lastrec; L_SUB(&offset, &lastrec); LFPTOD(&offset, doffset); SAMPLE(doffset + fudge); }
void StructureAbstractValue::filterSlow(SpeculatedType type) { SAMPLE("StructureAbstractValue filter type slow"); if (!(type & SpecCell)) { clear(); return; } ASSERT(!isTop()); m_set.genericFilter( [&] (Structure* structure) { return !!(speculationFromStructure(structure) & type); }); }
/* * refclock_process_offset - update median filter * * This routine uses the given offset and timestamps to construct a new * entry in the median filter circular buffer. Samples that overflow the * filter are quietly discarded. */ void refclock_process_offset( struct refclockproc *pp, /* refclock structure pointer */ l_fp lasttim, /* last timecode timestamp */ l_fp lastrec, /* last receive timestamp */ double fudge ) { l_fp lftemp; double doffset; pp->lastrec = lastrec; lftemp = lasttim; L_SUB(&lftemp, &lastrec); LFPTOD(&lftemp, doffset); SAMPLE(doffset + fudge); }
void StructureAbstractValue::observeTransition(Structure* from, Structure* to) { SAMPLE("StructureAbstractValue observeTransition"); ASSERT(!from->dfgShouldWatch()); if (isTop()) return; if (!m_set.contains(from)) return; if (!m_set.add(to)) return; if (m_set.size() > polymorphismLimit) makeTop(); }
bool StructureAbstractValue::mergeSlow(const StructureAbstractValue& other) { SAMPLE("StructureAbstractValue merge value slow"); // It isn't immediately obvious that the code below is doing the right thing, so let's go // through it. // // This not clobbered, other not clobbered: Clearly, we don't want to make anything clobbered // since we just have two sets and we are merging them. mergeNotTop() can handle this just // fine. // // This clobbered, other clobbered: Clobbered means that we have a set of things, plus we // temporarily have the set of all things but the latter will go away once we hit the next // invalidation point. This allows us to merge two clobbered sets the natural way. For now // the set will still be TOP (and so we keep the clobbered bit set), but we know that after // invalidation, we will have the union of the this and other. // // This clobbered, other not clobbered: It's safe to merge in other for both before and after // invalidation, so long as we leave the clobbered bit set. Before invalidation this has no // effect since the set will still appear to have all things in it. The way to think about // what invalidation would do is imagine if we had a set A that was clobbered and a set B // that wasn't and we considered the following two cases. Note that we expect A to be the // same at the end in both cases: // // A.merge(B) InvalidationPoint // InvalidationPoint A.merge(B) // // The fact that we expect A to be the same in both cases means that we want to merge other // into this but keep the clobbered bit. // // This not clobbered, other clobbered: This is just the converse of the previous case. We // want to merge other into this and set the clobbered bit. bool changed = false; if (!isClobbered() && other.isClobbered()) { setClobbered(true); changed = true; } changed |= mergeNotTop(other.m_set); return changed; }
void process_isakmp(struct Ferret *ferret, struct NetFrame *frame, const unsigned char *px, unsigned length) { unsigned type; return; /*TODO: add code later */ if (length < 1) { FRAMERR_TRUNCATED(frame, "isakmp"); return; } type = px[0]; SAMPLE(ferret,"ISAKMP", JOT_NUM("type", type)); switch (type) { case 0xFF: /* keep alive */ break; default: FRAMERR_UNKNOWN_UNSIGNED(frame, "isakmp", type); break; } }
int main(int argc, char** argv) { SETUP(); int iter, repet; iter = 1 << 15; repet = 100; if (argc > 1) iter = atoi(argv[1]); if (argc > 2) repet = atoi(argv[2]); printf("pts,repet,avg,se\n"); for (j = 2; j < iter; j *= 2) { r1 = r2 = 0; mcount = -1; for (k = 0; k < repet; k++) { SAMPLE(j); TEST(im, j); SAVE(j); TEST(if, j); total = 0; for (i = 0; i < j; i++) { if (1 / correct[i] != 0 && correct[i] == correct[i]) { unsigned long long int error = ulp(out[i], correct[i]); total += log(error + 1.0) / log(2); } } r1old = r1; r1 += (total / count - r1) / (k + 1); r2 += (total / count - r1old) * (total / count - r1); if (mcount == -1 || count < mcount) mcount = count; free(rands); free(out); free(correct); } printf("%i,%i,%g,%g\n", mcount, repet, r1, sqrt(r2 / (repet - 1.5)) / sqrt(repet)); } }
bool StructureAbstractValue::isSubsetOf(const StructureAbstractValue& other) const { SAMPLE("StructureAbstractValue isSubsetOf value"); if (isTop()) return false; if (other.isTop()) return true; if (isClobbered() == other.isClobbered()) return m_set.isSubsetOf(other.m_set); // Here it gets tricky. If in doubt, return false! if (isClobbered()) return false; // A clobbered set is never a subset of an unclobbered set. // An unclobbered set is currently a subset of a clobbered set, but it may not be so after // invalidation. return m_set.isSubsetOf(other.m_set); }