Exemplo n.º 1
0
/**
 * Create canonical form of rrset in the scratch buffer.
 * @param region: temporary region.
 * @param buf: the buffer to use.
 * @param k: the rrset to insert.
 * @param sig: RRSIG rdata to include.
 * @param siglen: RRSIG rdata len excluding signature field, but inclusive
 * 	signer name length.
 * @param sortree: if NULL is passed a new sorted rrset tree is built.
 * 	Otherwise it is reused.
 * @return false on alloc error.
 */
static int
rrset_canonical(struct regional* region, sldns_buffer* buf, 
	struct ub_packed_rrset_key* k, uint8_t* sig, size_t siglen,
	struct rbtree_t** sortree)
{
	struct packed_rrset_data* d = (struct packed_rrset_data*)k->entry.data;
	uint8_t* can_owner = NULL;
	size_t can_owner_len = 0;
	struct canon_rr* walk;
	struct canon_rr* rrs;

	if(!*sortree) {
		*sortree = (struct rbtree_t*)regional_alloc(region, 
			sizeof(rbtree_t));
		if(!*sortree)
			return 0;
		if(d->count > RR_COUNT_MAX)
			return 0; /* integer overflow protection */
		rrs = regional_alloc(region, sizeof(struct canon_rr)*d->count);
		if(!rrs) {
			*sortree = NULL;
			return 0;
		}
		rbtree_init(*sortree, &canonical_tree_compare);
		canonical_sort(k, d, *sortree, rrs);
	}

	sldns_buffer_clear(buf);
	sldns_buffer_write(buf, sig, siglen);
	/* canonicalize signer name */
	query_dname_tolower(sldns_buffer_begin(buf)+18); 
	RBTREE_FOR(walk, struct canon_rr*, (*sortree)) {
		/* see if there is enough space left in the buffer */
		if(sldns_buffer_remaining(buf) < can_owner_len + 2 + 2 + 4
			+ d->rr_len[walk->rr_idx]) {
			log_err("verify: failed to canonicalize, "
				"rrset too big");
			return 0;
		}
		/* determine canonical owner name */
		if(can_owner)
			sldns_buffer_write(buf, can_owner, can_owner_len);
		else	insert_can_owner(buf, k, sig, &can_owner, 
				&can_owner_len);
		sldns_buffer_write(buf, &k->rk.type, 2);
		sldns_buffer_write(buf, &k->rk.rrset_class, 2);
		sldns_buffer_write(buf, sig+4, 4);
		sldns_buffer_write(buf, d->rr_data[walk->rr_idx], 
			d->rr_len[walk->rr_idx]);
		canonicalize_rdata(buf, k, d->rr_len[walk->rr_idx]);
	}
	sldns_buffer_flip(buf);
	return 1;
}
Exemplo n.º 2
0
/* Called when we are expecting no more codepoints. */
void MVM_unicode_normalizer_eof(MVMThreadContext *tc, MVMNormalizer *n) {
    /* Perform canonical ordering and, if needed, canonical composition on
     * what remains. */
    canonical_sort(tc, n, n->buffer_start, n->buffer_end);
    if (MVM_NORMALIZE_COMPOSE(n->form)) {
        canonical_composition(tc, n, n->buffer_start, n->buffer_end);
        if (MVM_NORMALIZE_GRAPHEME(n->form))
            grapheme_composition(tc, n, n->buffer_start, n->buffer_end);
    }

    /* We've now normalized all that remains. */
    n->buffer_norm_end = n->buffer_end;
}
Exemplo n.º 3
0
/* Called when the very fast case of normalization fails (that is, when we get
 * any two codepoints in a row where at least one is greater than the first
 * significant codepoint identified by a quick check for the target form). We
 * may find the quick check itself is enough; if not, we have to do real work
 * compute the normalization. */
MVMint32 MVM_unicode_normalizer_process_codepoint_full(MVMThreadContext *tc, MVMNormalizer *n, MVMCodepoint in, MVMCodepoint *out) {
    /* Do a quickcheck on the codepoint we got in and get its CCC. */
    MVMint64 qc_in  = passes_quickcheck(tc, n, in);
    MVMint64 ccc_in = ccc(tc, in);

    /* Fast cases when we pass quick check and what we got in has CCC = 0. */
    if (qc_in && ccc_in == 0) {
        if (MVM_NORMALIZE_COMPOSE(n->form)) {
            /* We're composing. If we have exactly one thing in the buffer and
             * it also passes the quick check, and both it and the thing in the
             * buffer have a CCC of zero, we can hand back the first of the
             * two - effectively replacing what's in the buffer with the new
             * codepoint coming in. */
            if (n->buffer_end - n->buffer_start == 1) {
                MVMCodepoint maybe_result = n->buffer[n->buffer_start];
                if (passes_quickcheck(tc, n, maybe_result) && ccc(tc, maybe_result) == 0) {
                    *out = n->buffer[n->buffer_start];
                    n->buffer[n->buffer_start] = in;
                    return 1;
                }
            }
        }
        else {
            /* We're only decomposing. There should probably be nothing in the
             * buffer in this case; if so we can simply return the codepoint. */
            if (n->buffer_start == n->buffer_end) {
                *out = in;
                return 1;
            }
        }
    }

    /* If we didn't pass quick check... */
    if (!qc_in) {
        /* If we're composing, then decompose the last thing placed in the
         * buffer, if any. We need to do this since it may have passed
         * quickcheck, but having seen some character that does pass then we
         * must make sure we decomposed the prior passing one too. */
        if (MVM_NORMALIZE_COMPOSE(n->form) && n->buffer_end != n->buffer_start) {
            MVMCodepoint decomp = n->buffer[n->buffer_end - 1];
            n->buffer_end--;
            decomp_codepoint_to_buffer(tc, n, decomp);
        }

        /* Decompose this new character into the buffer. We'll need to see
         * more before we can go any further. */
        decomp_codepoint_to_buffer(tc, n, in);
        return 0;
    }

    /* Since anything we have at this point does pass quick check, add it to
     * the buffer directly. */
    add_codepoint_to_buffer(tc, n, in);

    /* If the codepoint has a CCC that is non-zero, it's not a starter so we
     * should see more before normalizing. */
    if (ccc_in > 0)
        return 0;

    /* If we don't have at least one codepoint in the buffer, it's too early
     * to hand anything back. */
    if (n->buffer_end - n->buffer_start <= 1)
        return 0;

    /* Perform canonical sorting on everything from the start of the buffer
     * up to but excluding the quick-check-passing thing we just added. */
    canonical_sort(tc, n, n->buffer_start, n->buffer_end - 1);

    /* Perform canonical composition and grapheme composition if needed. */
    if (MVM_NORMALIZE_COMPOSE(n->form)) {
        canonical_composition(tc, n, n->buffer_start, n->buffer_end - 1);
        if (MVM_NORMALIZE_GRAPHEME(n->form))
            grapheme_composition(tc, n, n->buffer_start, n->buffer_end - 1);
    }

    /* We've now normalized all except the latest, quick-check-passing
     * codepoint. */
    n->buffer_norm_end = n->buffer_end - 1;

    /* Hand back a codepoint, and flag how many more are available. */
    *out = n->buffer[n->buffer_start];
    return n->buffer_norm_end - n->buffer_start++;
}
Exemplo n.º 4
0
int rrset_canonical_equal(struct regional* region,
	struct ub_packed_rrset_key* k1, struct ub_packed_rrset_key* k2)
{
	struct rbtree_t sortree1, sortree2;
	struct canon_rr *rrs1, *rrs2, *p1, *p2;
	struct packed_rrset_data* d1=(struct packed_rrset_data*)k1->entry.data;
	struct packed_rrset_data* d2=(struct packed_rrset_data*)k2->entry.data;
	struct ub_packed_rrset_key fk;
	struct packed_rrset_data fd;
	size_t flen[2];
	uint8_t* fdata[2];

	/* basic compare */
	if(k1->rk.dname_len != k2->rk.dname_len ||
		k1->rk.flags != k2->rk.flags ||
		k1->rk.type != k2->rk.type ||
		k1->rk.rrset_class != k2->rk.rrset_class ||
		query_dname_compare(k1->rk.dname, k2->rk.dname) != 0)
		return 0;
	if(d1->ttl != d2->ttl ||
		d1->count != d2->count ||
		d1->rrsig_count != d2->rrsig_count ||
		d1->trust != d2->trust ||
		d1->security != d2->security)
		return 0;

	/* init */
	memset(&fk, 0, sizeof(fk));
	memset(&fd, 0, sizeof(fd));
	fk.entry.data = &fd;
	fd.count = 2;
	fd.rr_len = flen;
	fd.rr_data = fdata;
	rbtree_init(&sortree1, &canonical_tree_compare);
	rbtree_init(&sortree2, &canonical_tree_compare);
	if(d1->count > RR_COUNT_MAX || d2->count > RR_COUNT_MAX)
		return 1; /* protection against integer overflow */
	rrs1 = regional_alloc(region, sizeof(struct canon_rr)*d1->count);
	rrs2 = regional_alloc(region, sizeof(struct canon_rr)*d2->count);
	if(!rrs1 || !rrs2) return 1; /* alloc failure */

	/* sort */
	canonical_sort(k1, d1, &sortree1, rrs1);
	canonical_sort(k2, d2, &sortree2, rrs2);

	/* compare canonical-sorted RRs for canonical-equality */
	if(sortree1.count != sortree2.count)
		return 0;
	p1 = (struct canon_rr*)rbtree_first(&sortree1);
	p2 = (struct canon_rr*)rbtree_first(&sortree2);
	while(p1 != (struct canon_rr*)RBTREE_NULL &&
		p2 != (struct canon_rr*)RBTREE_NULL) {
		flen[0] = d1->rr_len[p1->rr_idx];
		flen[1] = d2->rr_len[p2->rr_idx];
		fdata[0] = d1->rr_data[p1->rr_idx];
		fdata[1] = d2->rr_data[p2->rr_idx];

		if(canonical_compare(&fk, 0, 1) != 0)
			return 0;
		p1 = (struct canon_rr*)rbtree_next(&p1->node);
		p2 = (struct canon_rr*)rbtree_next(&p2->node);
	}
	return 1;
}