Ejemplo n.º 1
0
void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
		   unsigned long *v2_in, unsigned long *v3_in)
{
	DEFINE(v1);
	DEFINE(v2);
	DEFINE(v3);
	unsigned long lines = bytes / (sizeof(unative_t)) / 4;

	preempt_disable();
	enable_kernel_altivec();

	do {
		LOAD(v1);
		LOAD(v2);
		LOAD(v3);
		XOR(v1, v2);
		XOR(v1, v3);
		STORE(v1);

		v1 += 4;
		v2 += 4;
		v3 += 4;
	} while (--lines > 0);

	preempt_enable();
}
Ejemplo n.º 2
0
static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
    struct scatterlist *dst, struct scatterlist *src,
    unsigned int nbytes)
{
    int ret;
    struct blkcipher_walk walk;
    struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(
            crypto_blkcipher_tfm(desc->tfm));
    struct blkcipher_desc fallback_desc = {
        .tfm = ctx->fallback,
        .info = desc->info,
        .flags = desc->flags
    };

    if (in_interrupt()) {
        ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes);
    } else {
        blkcipher_walk_init(&walk, dst, src, nbytes);
        ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
        while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
            pagefault_disable();
            enable_kernel_altivec();
            aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr,
                (nbytes & AES_BLOCK_MASK)/AES_BLOCK_SIZE, &ctx->enc_key, walk.iv);
            pagefault_enable();

            crypto_inc(walk.iv, AES_BLOCK_SIZE);
            nbytes &= AES_BLOCK_SIZE - 1;
            ret = blkcipher_walk_done(desc, &walk, nbytes);
        }
        if (walk.nbytes) {
            p8_aes_ctr_final(ctx, &walk);
            ret = blkcipher_walk_done(desc, &walk, 0);
        }
    }

    return ret;
}

struct crypto_alg p8_aes_ctr_alg = {
    .cra_name = "ctr(aes)",
    .cra_driver_name = "p8_aes_ctr",
    .cra_module = THIS_MODULE,
    .cra_priority = 1000,
    .cra_type = &crypto_blkcipher_type,
    .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
    .cra_alignmask = 0,
    .cra_blocksize = 1,
    .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
    .cra_init = p8_aes_ctr_init,
    .cra_exit = p8_aes_ctr_exit,
    .cra_blkcipher = {
        .ivsize = 0,
        .min_keysize = AES_MIN_KEY_SIZE,
        .max_keysize = AES_MAX_KEY_SIZE,
        .setkey = p8_aes_ctr_setkey,
        .encrypt = p8_aes_ctr_crypt,
        .decrypt = p8_aes_ctr_crypt,
    },
};
Ejemplo n.º 3
0
static int p8_ghash_update(struct shash_desc *desc,
        const u8 *src, unsigned int srclen)
{
    unsigned int len;
    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
    struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);

    if (IN_INTERRUPT) {
        return crypto_shash_update(&dctx->fallback_desc, src, srclen);
    } else {
        if (dctx->bytes) {
            if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
                memcpy(dctx->buffer + dctx->bytes, src, srclen);
                dctx->bytes += srclen;
                return 0;
            }
            memcpy(dctx->buffer + dctx->bytes, src,
                    GHASH_DIGEST_SIZE - dctx->bytes);
            pagefault_disable();
            enable_kernel_altivec();
            enable_kernel_fp();
            gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
                    GHASH_DIGEST_SIZE);
            pagefault_enable();
            src += GHASH_DIGEST_SIZE - dctx->bytes;
            srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
            dctx->bytes = 0;
        }
        len = srclen & ~(GHASH_DIGEST_SIZE - 1);
        if (len) {
            pagefault_disable();
            enable_kernel_altivec();
            enable_kernel_fp();
            gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
            pagefault_enable();
            src += len;
            srclen -= len;
        }
        if (srclen) {
            memcpy(dctx->buffer, src, srclen);
            dctx->bytes = srclen;
        }
        return 0;
    }
}
Ejemplo n.º 4
0
int enter_vmx_copy(void)
{
	if (in_interrupt())
		return 0;

	preempt_disable();

	enable_kernel_altivec();

	return 1;
}
Ejemplo n.º 5
0
static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
    unsigned int keylen)
{
    int ret;
    struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);

    pagefault_disable();
    enable_kernel_altivec();
    ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
    pagefault_enable();

    ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
    return ret;
}
Ejemplo n.º 6
0
static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
    unsigned int keylen)
{
    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm));

    if (keylen != GHASH_KEY_LEN)
        return -EINVAL;

    pagefault_disable();
    enable_kernel_altivec();
    enable_kernel_fp();
    gcm_init_p8(ctx->htable, (const u64 *) key);
    pagefault_enable();
    return crypto_shash_setkey(ctx->fallback, key, keylen);
}
Ejemplo n.º 7
0
int enter_vmx_usercopy(void)
{
	if (in_interrupt())
		return 0;

	/* This acts as preempt_disable() as well and will make
	 * enable_kernel_altivec(). We need to disable page faults
	 * as they can call schedule and thus make us lose the VMX
	 * context. So on page faults, we just fail which will cause
	 * a fallback to the normal non-vmx copy.
	 */
	pagefault_disable();

	enable_kernel_altivec();

	return 1;
}
Ejemplo n.º 8
0
static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
                struct blkcipher_walk *walk)
{
    u8 *ctrblk = walk->iv;
    u8 keystream[AES_BLOCK_SIZE];
    u8 *src = walk->src.virt.addr;
    u8 *dst = walk->dst.virt.addr;
    unsigned int nbytes = walk->nbytes;

    pagefault_disable();
    enable_kernel_altivec();
    aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
    pagefault_enable();

    crypto_xor(keystream, src, nbytes);
    memcpy(dst, keystream, nbytes);
    crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
Ejemplo n.º 9
0
static int p8_ghash_final(struct shash_desc *desc, u8 *out)
{
    int i;
    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
    struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);

    if (IN_INTERRUPT) {
        return crypto_shash_final(&dctx->fallback_desc, out);
    } else {
        if (dctx->bytes) {
            for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
                dctx->buffer[i] = 0;
            pagefault_disable();
            enable_kernel_altivec();
            enable_kernel_fp();
            gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
                    GHASH_DIGEST_SIZE);
            pagefault_enable();
            dctx->bytes = 0;
        }
        memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
        return 0;
    }
}