Пример #1
0
bool lm_ots_generate_public_key(
    param_set_t lm_ots_type,
    const unsigned char *I, /* Public key identifier */
    merkle_index_t q,       /* Diversification string, 4 bytes value */
    struct seed_derive *seed,
    unsigned char *public_key, size_t public_key_len) {

    /* Look up the parameter set */
    unsigned h, n, w, p, ls;
    if (!lm_ots_look_up_parameter_set( lm_ots_type, &h, &n, &w, &p, &ls ))
        return false;

    /* Start the hash that computes the final value */
    union hash_context public_ctx;
    hss_init_hash_context(h, &public_ctx);
    {
        unsigned char prehash_prefix[ PBLC_PREFIX_LEN ];
        memcpy( prehash_prefix + PBLC_I, I, I_LEN );
        put_bigendian( prehash_prefix + PBLC_Q, q, 4 );
        SET_D( prehash_prefix + PBLC_D, D_PBLC );
        hss_update_hash_context(h, &public_ctx, prehash_prefix,
                                PBLC_PREFIX_LEN );
    }

    /* Now generate the public key */
    /* This is where we spend the majority of the time during key gen and */
    /* signing operations; it would make sense to attempt to try to take */
    /* advantage of parallel (SIMD) hardware; even if we use it nowhere */
    /* else, we'd get a significant speed up */
    int i, j;

    unsigned char buf[ ITER_MAX_LEN ];
    memcpy( buf + ITER_I, I, I_LEN );
    put_bigendian( buf + ITER_Q, q, 4 );
    union hash_context ctx;

    hss_seed_derive_set_j( seed, 0 );

    for (i=0; i<p; i++) {
        hss_seed_derive( buf + ITER_PREV, seed, i < p-1 );
        put_bigendian( buf + ITER_K, i, 2 );
        /* We'll place j in the buffer below */
        for (j=0; j < (1<<w) - 1; j++) {
            buf[ITER_J] = j;

            hss_hash_ctx( buf + ITER_PREV, h, &ctx, buf, ITER_LEN(n) );
        }
        /* Include that in the hash */
        hss_update_hash_context(h, &public_ctx, buf + ITER_PREV, n );
    }

    /* And the result of the running hash is the public key */
    hss_finalize_hash_context( h, &public_ctx, public_key );

    hss_zeroize( &ctx, sizeof ctx );

    return true;
}
Пример #2
0
/*
 * Combine adjacent left and right nodes within the Merkle tree
 * together 
 */
void hss_combine_internal_nodes( unsigned char *dest,
        const unsigned char *left_node, const unsigned char *right_node,
        int h, const unsigned char *I, unsigned hash_size,
        merkle_index_t node_num) {
    unsigned char hash_val[ INTR_MAX_LEN ];
    memcpy( hash_val + INTR_I, I, I_LEN );
    put_bigendian( hash_val + INTR_R, node_num, 4 );
    SET_D( hash_val + INTR_D, D_INTR );

    memcpy( hash_val + INTR_PK,             left_node,  hash_size );
    memcpy( hash_val + INTR_PK + hash_size, right_node, hash_size );
    union hash_context ctx;
    hss_hash_ctx( dest, h, &ctx, hash_val, INTR_LEN(hash_size) );
}
Пример #3
0
/*
 * Compute the value of an internal node within a Merkle tree
 */
static enum hss_error_code hss_compute_internal_node( unsigned char *dest,
                            merkle_index_t node_num, 
                            const unsigned char *seed,
                            param_set_t lm_type,
                            param_set_t lm_ots_type,
                            unsigned h,
                            unsigned leaf_level,
                            const unsigned char *I) {
    unsigned hash_size = hss_hash_length(h);

    /* We're store intermediate nodes here */
    unsigned char stack[ MAX_HASH * MAX_MERKLE_HEIGHT];

    merkle_index_t tree_size = (merkle_index_t)1 << leaf_level;
    merkle_index_t r = node_num;
    unsigned levels_to_bottom = 0;
    if (r == 0) return hss_error_internal;  /* So no to infinite loops */
    while (r < tree_size) {
        r <<= 1;
        levels_to_bottom++;
    }
    merkle_index_t q = r - tree_size;

    merkle_index_t i;
    unsigned ots_len = lm_ots_get_public_key_len(lm_ots_type);
    unsigned char pub_key[ LEAF_MAX_LEN ];
    memcpy( pub_key + LEAF_I, I, I_LEN );
    SET_D( pub_key + LEAF_D, D_LEAF );

    struct seed_derive derive;
    if (!hss_seed_derive_init( &derive, lm_type, lm_ots_type,
                               I, seed)) {
        return hss_error_bad_param_set;
    }

    for (i=0;; i++, r++, q++) {
        /* Generate the next OTS public key */
        hss_seed_derive_set_q( &derive, q );
        if (!lm_ots_generate_public_key(lm_ots_type, I,
                   q, &derive, pub_key + LEAF_PK, ots_len)) {
            return hss_error_bad_param_set; /* The only reason the above */
                                            /* could fail */
        }

        /*
         * For the subtree which this leaf node forms the final piece, put the
         * destination to where we'll want it, either on the stack, or if this
         * is the final piece, to where the caller specified
         */
        unsigned char *current_buf;
        int stack_offset = trailing_1_bits( i );
        if (stack_offset == levels_to_bottom) {
            current_buf = dest;
        } else {
            current_buf = &stack[stack_offset * hash_size ];
        }

        /* Hash it to form the leaf node */
        put_bigendian( pub_key + LEAF_R, r, 4);
        union hash_context ctx;
        hss_hash_ctx( current_buf, h, &ctx, pub_key, LEAF_LEN(hash_size) );

        /* Work up the stack, combining right nodes with the left nodes */
        /* that we've already computed */
        unsigned sp;
        for (sp = 1; sp <= stack_offset; sp++) {
            hss_combine_internal_nodes( current_buf,
                            &stack[(sp-1) * hash_size], current_buf,
                            h, I, hash_size,
                            r >> sp );
        }

        /* We're not at a left branch, or at the target node */

        /* Because we've set current_buf to point to where we want to place */
        /* the result of this loop, we don't need to memcpy it */

        /* Check if this was the last leaf (and so we've just computed the */
        /* target node) */
        if (stack_offset == levels_to_bottom) {
            /* We're at the target node; the node we were asked to compute */
            /* We've already placed the value into dest, so we're all done */
            break;
        }
    }

    hss_seed_derive_done( &derive );

    return hss_error_none;
}
Пример #4
0
bool lm_ots_generate_signature(
    param_set_t lm_ots_type,
    const unsigned char *I, /* Public key identifier */
    merkle_index_t q,       /* Diversification string, 4 bytes value */
    struct seed_derive *seed,
    const void *message, size_t message_len, bool prehashed,
    unsigned char *signature, size_t signature_len) {

    /* Look up the parameter set */
    unsigned h, n, w, p, ls;
    if (!lm_ots_look_up_parameter_set( lm_ots_type, &h, &n, &w, &p, &ls ))
        return false;

    /* Check if we have enough room */
    if (signature_len < 4 + n + p*n) return false;

    /* Export the parameter set to the signature */
    put_bigendian( signature, lm_ots_type, 4 );

    union hash_context ctx;
    /* Select the randomizer.  Note: we do this determanistically, because
     * upper levels of the HSS tree sometimes sign the same message with the
     * same index (between multiple reboots), hence we want to make sure that
     * the randomizer for a particualr index is the same
     * Also, if we're prehashed, we assume the caller has already selected it,
     * and placed it into the siganture */
    
    if (!prehashed) {
        lm_ots_generate_randomizer( signature+4, n, seed);
    }

    /* Compute the initial hash */
    unsigned char Q[MAX_HASH + 2];
    if (!prehashed) {
        hss_init_hash_context(h, &ctx);

        /* First, we hash the message prefix */
        unsigned char prefix[MESG_PREFIX_MAXLEN];
        memcpy( prefix + MESG_I, I, I_LEN );
        put_bigendian( prefix + MESG_Q, q, 4 );
        SET_D( prefix + MESG_D, D_MESG );
        memcpy( prefix + MESG_C, signature+4, n );
        hss_update_hash_context(h, &ctx, prefix, MESG_PREFIX_LEN(n) );

            /* Then, the message */
        hss_update_hash_context(h, &ctx, message, message_len );
        hss_finalize_hash_context( h, &ctx, Q );
    } else {
        memcpy( Q, message, n );
    }

    /* Append the checksum to the randomized hash */
    put_bigendian( &Q[n], lm_ots_compute_checksum(Q, n, w, ls), 2 );

    int i;
    unsigned char tmp[ITER_MAX_LEN];

    /* Preset the parts of tmp that don't change */
    memcpy( tmp + ITER_I, I, I_LEN );
    put_bigendian( tmp + ITER_Q, q, 4 );
    
    hss_seed_derive_set_j( seed, 0 );
    for (i=0; i<p; i++) {
        put_bigendian( tmp + ITER_K, i, 2 );
        hss_seed_derive( tmp + ITER_PREV, seed, i<p-1 );
        unsigned a = lm_ots_coef( Q, i, w );
        unsigned j;
        for (j=0; j<a; j++) {
            tmp[ITER_J] = j;
            hss_hash_ctx( tmp + ITER_PREV, h, &ctx, tmp, ITER_LEN(n) );
        }
        memcpy( &signature[ 4 + n + n*i ], tmp + ITER_PREV, n );
    }

    hss_zeroize( &ctx, sizeof ctx );

    return true;
}
Пример #5
0
/*
 * Check the blocks belonging to inode INO, whose inode has already
 * been loaded into SFI. ISDIR is a shortcut telling us if the inode
 * is a directory.
 *
 * Returns nonzero if SFI has been modified and needs to be written
 * back.
 */
static
int
check_inode_blocks(uint32_t ino, struct sfs_dinode *sfi, int isdir)
{
	struct ibstate ibs;
	uint32_t size, datablock;
	int changed;
	int i;

	size = SFS_ROUNDUP(sfi->sfi_size, SFS_BLOCKSIZE);

	ibs.ino = ino;
	/*ibs.curfileblock = 0;*/
	ibs.fileblocks = size/SFS_BLOCKSIZE;
	ibs.volblocks = sb_totalblocks();
	ibs.pasteofcount = 0;
	ibs.usagetype = isdir ? B_DIRDATA : B_DATA;

	changed = 0;

	for (ibs.curfileblock=0; ibs.curfileblock<NUM_D; ibs.curfileblock++) {
		datablock = GET_D(sfi, ibs.curfileblock);
		if (datablock >= ibs.volblocks) {
			warnx("Inode %lu: direct block pointer for "
			      "block %lu outside of volume "
			      "(cleared)\n",
			      (unsigned long)ibs.ino,
			      (unsigned long)ibs.curfileblock);
			SET_D(sfi, ibs.curfileblock) = 0;
			changed = 1;
		}
		else if (datablock > 0) {
			if (ibs.curfileblock < ibs.fileblocks) {
				bitmap_blockinuse(datablock, ibs.usagetype,
						  ibs.ino);
			}
			else {
				ibs.pasteofcount++;
				changed = 1;
				bitmap_blockfree(datablock);
				SET_D(sfi, ibs.curfileblock) = 0;
			}
		}
	}

	for (i=0; i<NUM_I; i++) {
		check_indirect_block(&ibs, &SET_I(sfi, i), &changed, 1);
	}
	for (i=0; i<NUM_II; i++) {
		check_indirect_block(&ibs, &SET_II(sfi, i), &changed, 2);
	}
	for (i=0; i<NUM_III; i++) {
		check_indirect_block(&ibs, &SET_III(sfi, i), &changed, 3);
	}

	if (ibs.pasteofcount > 0) {
		warnx("Inode %lu: %u blocks after EOF (freed)",
		     (unsigned long) ibs.ino, ibs.pasteofcount);
		setbadness(EXIT_RECOV);
	}

	return changed;
}