static void append_known_answers_and_send(AvahiQueryScheduler *s, AvahiDnsPacket *p) { AvahiKnownAnswer *ka; unsigned n; assert(s); assert(p); n = 0; while ((ka = s->known_answers)) { int too_large = 0; while (!avahi_dns_packet_append_record(p, ka->record, 0, 0)) { if (avahi_dns_packet_is_empty(p)) { /* The record is too large to fit into one packet, so there's no point in sending it. Better is letting the owner of the record send it as a response. This has the advantage of a cache refresh. */ too_large = 1; break; } avahi_dns_packet_set_field(p, AVAHI_DNS_FIELD_FLAGS, avahi_dns_packet_get_field(p, AVAHI_DNS_FIELD_FLAGS) | AVAHI_DNS_FLAG_TC); avahi_dns_packet_set_field(p, AVAHI_DNS_FIELD_ANCOUNT, n); avahi_interface_send_packet(s->interface, p, AVAHI_MDNS); avahi_dns_packet_free(p); p = avahi_dns_packet_new_query(s->interface->hardware->mtu); n = 0; } AVAHI_LLIST_REMOVE(AvahiKnownAnswer, known_answer, s->known_answers, ka); avahi_record_unref(ka->record); avahi_free(ka); if (!too_large) n++; } avahi_dns_packet_set_field(p, AVAHI_DNS_FIELD_ANCOUNT, n); avahi_interface_send_packet(s->interface, p, AVAHI_MDNS); avahi_dns_packet_free(p); }
static void elapse_callback(AVAHI_GCC_UNUSED AvahiTimeEvent *e, void* data) { AvahiQueryJob *qj = data; AvahiQueryScheduler *s; AvahiDnsPacket *p; unsigned n; int b; assert(qj); s = qj->scheduler; if (qj->done) { /* Lets remove it from the history */ job_free(s, qj); return; } assert(!s->known_answers); if (!(p = avahi_dns_packet_new_query(s->interface->hardware->mtu))) return; /* OOM */ b = packet_add_query_job(s, p, qj); assert(b); /* An query must always fit in */ n = 1; /* Try to fill up packet with more queries, if available */ while (s->jobs) { if (!packet_add_query_job(s, p, s->jobs)) break; n++; } avahi_dns_packet_set_field(p, AVAHI_DNS_FIELD_QDCOUNT, n); /* Now add known answers */ append_known_answers_and_send(s, p); }
static void elapse_callback(AVAHI_GCC_UNUSED AvahiTimeEvent *e, void* data) { AvahiProbeJob *pj = data, *next; AvahiProbeScheduler *s; AvahiDnsPacket *p; unsigned n; assert(pj); s = pj->scheduler; if (pj->done) { /* Lets remove it from the history */ job_free(s, pj); return; } if (!(p = avahi_dns_packet_new_query(s->interface->hardware->mtu))) return; /* OOM */ n = 1; /* Add the import probe */ if (!packet_add_probe_query(s, p, pj)) { size_t size; AvahiKey *k; int b; avahi_dns_packet_free(p); /* The probe didn't fit in the package, so let's allocate a larger one */ size = avahi_key_get_estimate_size(pj->record->key) + avahi_record_get_estimate_size(pj->record) + AVAHI_DNS_PACKET_HEADER_SIZE; if (size > AVAHI_DNS_PACKET_SIZE_MAX) size = AVAHI_DNS_PACKET_SIZE_MAX; if (!(p = avahi_dns_packet_new_query(size))) return; /* OOM */ if (!(k = avahi_key_new(pj->record->key->name, pj->record->key->clazz, AVAHI_DNS_TYPE_ANY))) { avahi_dns_packet_free(p); return; /* OOM */ } b = avahi_dns_packet_append_key(p, k, 0) && avahi_dns_packet_append_record(p, pj->record, 0, 0); avahi_key_unref(k); if (b) { avahi_dns_packet_set_field(p, AVAHI_DNS_FIELD_NSCOUNT, 1); avahi_dns_packet_set_field(p, AVAHI_DNS_FIELD_QDCOUNT, 1); avahi_interface_send_packet(s->interface, p); } else avahi_log_warn("Probe record too large, cannot send"); avahi_dns_packet_free(p); job_mark_done(s, pj); return; } /* Try to fill up packet with more probes, if available */ for (pj = s->jobs; pj; pj = pj->jobs_next) { if (pj->chosen) continue; if (!packet_add_probe_query(s, p, pj)) break; n++; } avahi_dns_packet_set_field(p, AVAHI_DNS_FIELD_QDCOUNT, n); n = 0; /* Now add the chosen records to the authorative section */ for (pj = s->jobs; pj; pj = next) { next = pj->jobs_next; if (!pj->chosen) continue; if (!avahi_dns_packet_append_record(p, pj->record, 0, 0)) { /* avahi_log_warn("Bad probe size estimate!"); */ /* Unmark all following jobs */ for (; pj; pj = pj->jobs_next) pj->chosen = 0; break; } job_mark_done(s, pj); n ++; } avahi_dns_packet_set_field(p, AVAHI_DNS_FIELD_NSCOUNT, n); /* Send it now */ avahi_interface_send_packet(s->interface, p); avahi_dns_packet_free(p); }