Ejemplo n.º 1
0
static void
hamsi_big(sph_hamsi_big_context *sc, const unsigned char *buf, size_t num)
{
	DECL_STATE_BIG
#if !SPH_64
	sph_u32 tmp;
#endif

#if SPH_64
	sc->count += (sph_u64)num << 6;
#else
	tmp = SPH_T32((sph_u32)num << 6);
	sc->count_low = SPH_T32(sc->count_low + tmp);
	sc->count_high += (sph_u32)((num >> 13) >> 13);
	if (sc->count_low < tmp)
		sc->count_high ++;
#endif
	READ_STATE_BIG(sc);
	while (num -- > 0) {
		sph_u32 m0, m1, m2, m3, m4, m5, m6, m7;
		sph_u32 m8, m9, mA, mB, mC, mD, mE, mF;

		INPUT_BIG;
		P_BIG;
		T_BIG;
		buf += 8;
	}
	WRITE_STATE_BIG(sc);
}
Ejemplo n.º 2
0
static void shavite_big_core(sph_shavite_big_context *sc, const void *data, size_t len)
{
	unsigned char *buf;
	size_t ptr;

	buf = sc->buf;
	ptr = sc->ptr;
	while (len > 0) {
		size_t clen;

		clen = (sizeof sc->buf) - ptr;
		if (clen > len)
			clen = len;
		memcpy(buf + ptr, data, clen);
		data = (const unsigned char *)data + clen;
		ptr += clen;
		len -= clen;
		if (ptr == sizeof sc->buf) {
			if ((sc->count0 = SPH_T32(sc->count0 + 1024)) == 0) {
				sc->count1 = SPH_T32(sc->count1 + 1);
				if (sc->count1 == 0) {
					sc->count2 = SPH_T32(sc->count2 + 1);
					if (sc->count2 == 0) {
						sc->count3 = SPH_T32(
							sc->count3 + 1);
					}
				}
			}
			c512(sc, buf);
			ptr = 0;
		}
	}
	sc->ptr = ptr;
}
Ejemplo n.º 3
0
static void
hamsi_small(sph_hamsi_small_context *sc, const unsigned char *buf, size_t num)
{
	DECL_STATE_SMALL
#if !SPH_64
	sph_u32 tmp;
#endif

#if SPH_64
	sc->count += (sph_u64)num << 5;
#else
	tmp = SPH_T32((sph_u32)num << 5);
	sc->count_low = SPH_T32(sc->count_low + tmp);
	sc->count_high += (sph_u32)((num >> 13) >> 14);
	if (sc->count_low < tmp)
		sc->count_high ++;
#endif
	READ_STATE_SMALL(sc);
	while (num -- > 0) {
		sph_u32 m0, m1, m2, m3, m4, m5, m6, m7;

		INPUT_SMALL;
		P_SMALL;
		T_SMALL;
		buf += 4;
	}
	WRITE_STATE_SMALL(sc);
}
Ejemplo n.º 4
0
Archivo: jh.c Proyecto: atcsecure/XC2
static void
jh_close(sph_jh_context *sc, unsigned ub, unsigned n,
	void *dst, size_t out_size_w32, const void *iv)
{
	unsigned z;
	unsigned char buf[128];
	size_t numz, u;
#if SPH_64
	sph_u64 l0, l1;
#else
	sph_u32 l0, l1, l2, l3;
#endif

	z = 0x80 >> n;
	buf[0] = ((ub & -z) | z) & 0xFF;
	if (sc->ptr == 0 && n == 0) {
		numz = 47;
	} else {
		numz = 111 - sc->ptr;
	}
	memset(buf + 1, 0, numz);
#if SPH_64
	l0 = SPH_T64(sc->block_count << 9) + (sc->ptr << 3) + n;
	l1 = SPH_T64(sc->block_count >> 55);
	sph_enc64be(buf + numz + 1, l1);
	sph_enc64be(buf + numz + 9, l0);
#else
	l0 = SPH_T32(sc->block_count_low << 9) + (sc->ptr << 3) + n;
	l1 = SPH_T32(sc->block_count_low >> 23)
		+ SPH_T32(sc->block_count_high << 9);
	l2 = SPH_T32(sc->block_count_high >> 23);
	l3 = 0;
	sph_enc32be(buf + numz +  1, l3);
	sph_enc32be(buf + numz +  5, l2);
	sph_enc32be(buf + numz +  9, l1);
	sph_enc32be(buf + numz + 13, l0);
#endif
	jh_core(sc, buf, numz + 17);
#if SPH_JH_64
	for (u = 0; u < 8; u ++)
		enc64e(buf + (u << 3), sc->H.wide[u + 8]);
#else
	for (u = 0; u < 16; u ++)
		enc32e(buf + (u << 2), sc->H.narrow[u + 16]);
#endif
	memcpy(dst, buf + ((16 - out_size_w32) << 2), out_size_w32 << 2);
	jh_init(sc, iv);
}
Ejemplo n.º 5
0
static void
blake32_close(sph_blake_small_context *sc,
	unsigned ub, unsigned n, void *dst, size_t out_size_w32)
{
	union {
		unsigned char buf[64];
		sph_u32 dummy;
	} u;
	size_t ptr, k;
	unsigned bit_len;
	unsigned z;
	sph_u32 th, tl;
	unsigned char *out;

	ptr = sc->ptr;
	bit_len = ((unsigned)ptr << 3) + n;
	z = 0x80 >> n;
	u.buf[ptr] = ((ub & -z) | z) & 0xFF;
	tl = sc->T0 + bit_len;
	th = sc->T1;
	if (ptr == 0 && n == 0) {
		sc->T0 = SPH_C32(0xFFFFFE00);
		sc->T1 = SPH_C32(0xFFFFFFFF);
	} else if (sc->T0 == 0) {
		sc->T0 = SPH_C32(0xFFFFFE00) + bit_len;
		sc->T1 = SPH_T32(sc->T1 - 1);
	} else {
		sc->T0 -= 512 - bit_len;
	}
	if (bit_len <= 446) {
		memset(u.buf + ptr + 1, 0, 55 - ptr);
		if (out_size_w32 == 8)
			u.buf[55] |= 1;
		sph_enc32be_aligned(u.buf + 56, th);
		sph_enc32be_aligned(u.buf + 60, tl);
		blake32(sc, u.buf + ptr, 64 - ptr);
	} else {
		memset(u.buf + ptr + 1, 0, 63 - ptr);
		blake32(sc, u.buf + ptr, 64 - ptr);
		sc->T0 = SPH_C32(0xFFFFFE00);
		sc->T1 = SPH_C32(0xFFFFFFFF);
		memset(u.buf, 0, 56);
		if (out_size_w32 == 8)
			u.buf[55] = 1;
		sph_enc32be_aligned(u.buf + 56, th);
		sph_enc32be_aligned(u.buf + 60, tl);
		blake32(sc, u.buf, 64);
	}
	out = dst;
	for (k = 0; k < out_size_w32; k ++)
		sph_enc32be(out + (k << 2), sc->H[k]);
}
Ejemplo n.º 6
0
static void
blake32(sph_blake_small_context *sc, const void *data, size_t len)
{
	unsigned char *buf;
	size_t ptr;
	DECL_STATE32

	buf = sc->buf;
	ptr = sc->ptr;
	if (len < (sizeof sc->buf) - ptr) {
		memcpy(buf + ptr, data, len);
		ptr += len;
		sc->ptr = ptr;
		return;
	}

	READ_STATE32(sc);
	while (len > 0) {
		size_t clen;

		clen = (sizeof sc->buf) - ptr;
		if (clen > len)
			clen = len;
		memcpy(buf + ptr, data, clen);
		ptr += clen;
		data = (const unsigned char *)data + clen;
		len -= clen;
		if (ptr == sizeof sc->buf) {
			if ((T0 = SPH_T32(T0 + 512)) < 512)
				T1 = SPH_T32(T1 + 1);
			COMPRESS32;
			ptr = 0;
		}
	}
	WRITE_STATE32(sc);
	sc->ptr = ptr;
}
Ejemplo n.º 7
0
static void
bmw32(sph_bmw_small_context *sc, const void *data, size_t len)
{
    unsigned char *buf;
    size_t ptr;
    sph_u32 htmp[16];
    sph_u32 *h1, *h2;
#if !SPH_64
    sph_u32 tmp;
#endif

#if SPH_64
    sc->bit_count += (sph_u64)len << 3;
#else
    tmp = sc->bit_count_low;
    sc->bit_count_low = SPH_T32(tmp + ((sph_u32)len << 3));
    if (sc->bit_count_low < tmp)
        sc->bit_count_high ++;
    sc->bit_count_high += len >> 29;
#endif
    buf = sc->buf;
    ptr = sc->ptr;
    h1 = sc->H;
    h2 = htmp;
    while (len > 0) {
        size_t clen;

        clen = (sizeof sc->buf) - ptr;
        if (clen > len)
            clen = len;
        memcpy(buf + ptr, data, clen);
        data = (const unsigned char *)data + clen;
        len -= clen;
        ptr += clen;
        if (ptr == sizeof sc->buf) {
            sph_u32 *ht;

            compress_small(buf, h1, h2);
            ht = h1;
            h1 = h2;
            h2 = ht;
            ptr = 0;
        }
    }
    sc->ptr = ptr;
    if (h1 != sc->H)
        memcpy(sc->H, h1, sizeof sc->H);
}
Ejemplo n.º 8
0
static void
jh_core(sph_jh_context *sc, const void *data, size_t len)
{
	unsigned char *buf;
	size_t ptr;
	DECL_STATE

	buf = sc->buf;
	ptr = sc->ptr;

	if (len < (sizeof sc->buf) - ptr) {
		memcpy(buf + ptr, data, len);
		ptr += len;
		sc->ptr = ptr;
		return;
	}

	READ_STATE(sc);
	while (len > 0) {
		size_t clen;

		clen = (sizeof sc->buf) - ptr;
		if (clen > len)
			clen = len;
		memcpy(buf + ptr, data, clen);
		ptr += clen;
		data = (const unsigned char *)data + clen;
		len -= clen;
		if (ptr == sizeof sc->buf) {
			INPUT_BUF1;
			E8;
			INPUT_BUF2;
#if SPH_64
			sc->block_count ++;
#else
			if ((sc->block_count_low = SPH_T32(
				sc->block_count_low + 1)) == 0)
				sc->block_count_high ++;
#endif
			ptr = 0;
		}
	}
	WRITE_STATE(sc);
	sc->ptr = ptr;
}
Ejemplo n.º 9
0
static void
bmw32_close(sph_bmw_small_context *sc, unsigned ub, unsigned n,
            void *dst, size_t out_size_w32)
{
    unsigned char *buf, *out;
    size_t ptr, u, v;
    unsigned z;
    sph_u32 h1[16], h2[16], *h;

    buf = sc->buf;
    ptr = sc->ptr;
    z = 0x80 >> n;
    buf[ptr ++] = ((ub & -z) | z) & 0xFF;
    h = sc->H;
    if (ptr > (sizeof sc->buf) - 8) {
        memset(buf + ptr, 0, (sizeof sc->buf) - ptr);
        compress_small(buf, h, h1);
        ptr = 0;
        h = h1;
    }
    memset(buf + ptr, 0, (sizeof sc->buf) - 8 - ptr);
#if SPH_64
    sph_enc64le_aligned(buf + (sizeof sc->buf) - 8,
                        SPH_T64(sc->bit_count + n));
#else
    sph_enc32le_aligned(buf + (sizeof sc->buf) - 8,
                        sc->bit_count_low + n);
    sph_enc32le_aligned(buf + (sizeof sc->buf) - 4,
                        SPH_T32(sc->bit_count_high));
#endif
    compress_small(buf, h, h2);
    for (u = 0; u < 16; u ++)
        sph_enc32le_aligned(buf + 4 * u, h2[u]);
    compress_small(buf, final_s, h1);
    out = dst;
    for (u = 0, v = 16 - out_size_w32; u < out_size_w32; u ++, v ++)
        sph_enc32le(out + 4 * u, h1[v]);
}
Ejemplo n.º 10
0
/*
 * This function assumes that "msg" is aligned for 32-bit access.
 */
static void
c512(sph_shavite_big_context *sc, const void *msg)
{
	sph_u32 p0, p1, p2, p3, p4, p5, p6, p7;
	sph_u32 p8, p9, pA, pB, pC, pD, pE, pF;
	sph_u32 x0, x1, x2, x3;
	sph_u32 rk00, rk01, rk02, rk03, rk04, rk05, rk06, rk07;
	sph_u32 rk08, rk09, rk0A, rk0B, rk0C, rk0D, rk0E, rk0F;
	sph_u32 rk10, rk11, rk12, rk13, rk14, rk15, rk16, rk17;
	sph_u32 rk18, rk19, rk1A, rk1B, rk1C, rk1D, rk1E, rk1F;
	int r;

	p0 = sc->h[0x0];
	p1 = sc->h[0x1];
	p2 = sc->h[0x2];
	p3 = sc->h[0x3];
	p4 = sc->h[0x4];
	p5 = sc->h[0x5];
	p6 = sc->h[0x6];
	p7 = sc->h[0x7];
	p8 = sc->h[0x8];
	p9 = sc->h[0x9];
	pA = sc->h[0xA];
	pB = sc->h[0xB];
	pC = sc->h[0xC];
	pD = sc->h[0xD];
	pE = sc->h[0xE];
	pF = sc->h[0xF];
	/* round 0 */
	rk00 = sph_dec32le_aligned((const unsigned char *)msg +   0);
	x0 = p4 ^ rk00;
	rk01 = sph_dec32le_aligned((const unsigned char *)msg +   4);
	x1 = p5 ^ rk01;
	rk02 = sph_dec32le_aligned((const unsigned char *)msg +   8);
	x2 = p6 ^ rk02;
	rk03 = sph_dec32le_aligned((const unsigned char *)msg +  12);
	x3 = p7 ^ rk03;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk04 = sph_dec32le_aligned((const unsigned char *)msg +  16);
	x0 ^= rk04;
	rk05 = sph_dec32le_aligned((const unsigned char *)msg +  20);
	x1 ^= rk05;
	rk06 = sph_dec32le_aligned((const unsigned char *)msg +  24);
	x2 ^= rk06;
	rk07 = sph_dec32le_aligned((const unsigned char *)msg +  28);
	x3 ^= rk07;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk08 = sph_dec32le_aligned((const unsigned char *)msg +  32);
	x0 ^= rk08;
	rk09 = sph_dec32le_aligned((const unsigned char *)msg +  36);
	x1 ^= rk09;
	rk0A = sph_dec32le_aligned((const unsigned char *)msg +  40);
	x2 ^= rk0A;
	rk0B = sph_dec32le_aligned((const unsigned char *)msg +  44);
	x3 ^= rk0B;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk0C = sph_dec32le_aligned((const unsigned char *)msg +  48);
	x0 ^= rk0C;
	rk0D = sph_dec32le_aligned((const unsigned char *)msg +  52);
	x1 ^= rk0D;
	rk0E = sph_dec32le_aligned((const unsigned char *)msg +  56);
	x2 ^= rk0E;
	rk0F = sph_dec32le_aligned((const unsigned char *)msg +  60);
	x3 ^= rk0F;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	p0 ^= x0;
	p1 ^= x1;
	p2 ^= x2;
	p3 ^= x3;
	rk10 = sph_dec32le_aligned((const unsigned char *)msg +  64);
	x0 = pC ^ rk10;
	rk11 = sph_dec32le_aligned((const unsigned char *)msg +  68);
	x1 = pD ^ rk11;
	rk12 = sph_dec32le_aligned((const unsigned char *)msg +  72);
	x2 = pE ^ rk12;
	rk13 = sph_dec32le_aligned((const unsigned char *)msg +  76);
	x3 = pF ^ rk13;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk14 = sph_dec32le_aligned((const unsigned char *)msg +  80);
	x0 ^= rk14;
	rk15 = sph_dec32le_aligned((const unsigned char *)msg +  84);
	x1 ^= rk15;
	rk16 = sph_dec32le_aligned((const unsigned char *)msg +  88);
	x2 ^= rk16;
	rk17 = sph_dec32le_aligned((const unsigned char *)msg +  92);
	x3 ^= rk17;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk18 = sph_dec32le_aligned((const unsigned char *)msg +  96);
	x0 ^= rk18;
	rk19 = sph_dec32le_aligned((const unsigned char *)msg + 100);
	x1 ^= rk19;
	rk1A = sph_dec32le_aligned((const unsigned char *)msg + 104);
	x2 ^= rk1A;
	rk1B = sph_dec32le_aligned((const unsigned char *)msg + 108);
	x3 ^= rk1B;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk1C = sph_dec32le_aligned((const unsigned char *)msg + 112);
	x0 ^= rk1C;
	rk1D = sph_dec32le_aligned((const unsigned char *)msg + 116);
	x1 ^= rk1D;
	rk1E = sph_dec32le_aligned((const unsigned char *)msg + 120);
	x2 ^= rk1E;
	rk1F = sph_dec32le_aligned((const unsigned char *)msg + 124);
	x3 ^= rk1F;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	p8 ^= x0;
	p9 ^= x1;
	pA ^= x2;
	pB ^= x3;

	for (r = 0; r < 3; r ++) {
		/* round 1, 5, 9 */
		KEY_EXPAND_ELT(rk00, rk01, rk02, rk03);
		rk00 ^= rk1C;
		rk01 ^= rk1D;
		rk02 ^= rk1E;
		rk03 ^= rk1F;
		if (r == 0) {
			rk00 ^= sc->count0;
			rk01 ^= sc->count1;
			rk02 ^= sc->count2;
			rk03 ^= SPH_T32(~sc->count3);
		}
		x0 = p0 ^ rk00;
		x1 = p1 ^ rk01;
		x2 = p2 ^ rk02;
		x3 = p3 ^ rk03;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		KEY_EXPAND_ELT(rk04, rk05, rk06, rk07);
		rk04 ^= rk00;
		rk05 ^= rk01;
		rk06 ^= rk02;
		rk07 ^= rk03;
		if (r == 1) {
			rk04 ^= sc->count3;
			rk05 ^= sc->count2;
			rk06 ^= sc->count1;
			rk07 ^= SPH_T32(~sc->count0);
		}
		x0 ^= rk04;
		x1 ^= rk05;
		x2 ^= rk06;
		x3 ^= rk07;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		KEY_EXPAND_ELT(rk08, rk09, rk0A, rk0B);
		rk08 ^= rk04;
		rk09 ^= rk05;
		rk0A ^= rk06;
		rk0B ^= rk07;
		x0 ^= rk08;
		x1 ^= rk09;
		x2 ^= rk0A;
		x3 ^= rk0B;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		KEY_EXPAND_ELT(rk0C, rk0D, rk0E, rk0F);
		rk0C ^= rk08;
		rk0D ^= rk09;
		rk0E ^= rk0A;
		rk0F ^= rk0B;
		x0 ^= rk0C;
		x1 ^= rk0D;
		x2 ^= rk0E;
		x3 ^= rk0F;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		pC ^= x0;
		pD ^= x1;
		pE ^= x2;
		pF ^= x3;
		KEY_EXPAND_ELT(rk10, rk11, rk12, rk13);
		rk10 ^= rk0C;
		rk11 ^= rk0D;
		rk12 ^= rk0E;
		rk13 ^= rk0F;
		x0 = p8 ^ rk10;
		x1 = p9 ^ rk11;
		x2 = pA ^ rk12;
		x3 = pB ^ rk13;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		KEY_EXPAND_ELT(rk14, rk15, rk16, rk17);
		rk14 ^= rk10;
		rk15 ^= rk11;
		rk16 ^= rk12;
		rk17 ^= rk13;
		x0 ^= rk14;
		x1 ^= rk15;
		x2 ^= rk16;
		x3 ^= rk17;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		KEY_EXPAND_ELT(rk18, rk19, rk1A, rk1B);
		rk18 ^= rk14;
		rk19 ^= rk15;
		rk1A ^= rk16;
		rk1B ^= rk17;
		x0 ^= rk18;
		x1 ^= rk19;
		x2 ^= rk1A;
		x3 ^= rk1B;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		KEY_EXPAND_ELT(rk1C, rk1D, rk1E, rk1F);
		rk1C ^= rk18;
		rk1D ^= rk19;
		rk1E ^= rk1A;
		rk1F ^= rk1B;
		if (r == 2) {
			rk1C ^= sc->count2;
			rk1D ^= sc->count3;
			rk1E ^= sc->count0;
			rk1F ^= SPH_T32(~sc->count1);
		}
		x0 ^= rk1C;
		x1 ^= rk1D;
		x2 ^= rk1E;
		x3 ^= rk1F;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		p4 ^= x0;
		p5 ^= x1;
		p6 ^= x2;
		p7 ^= x3;
		/* round 2, 6, 10 */
		rk00 ^= rk19;
		x0 = pC ^ rk00;
		rk01 ^= rk1A;
		x1 = pD ^ rk01;
		rk02 ^= rk1B;
		x2 = pE ^ rk02;
		rk03 ^= rk1C;
		x3 = pF ^ rk03;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		rk04 ^= rk1D;
		x0 ^= rk04;
		rk05 ^= rk1E;
		x1 ^= rk05;
		rk06 ^= rk1F;
		x2 ^= rk06;
		rk07 ^= rk00;
		x3 ^= rk07;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		rk08 ^= rk01;
		x0 ^= rk08;
		rk09 ^= rk02;
		x1 ^= rk09;
		rk0A ^= rk03;
		x2 ^= rk0A;
		rk0B ^= rk04;
		x3 ^= rk0B;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		rk0C ^= rk05;
		x0 ^= rk0C;
		rk0D ^= rk06;
		x1 ^= rk0D;
		rk0E ^= rk07;
		x2 ^= rk0E;
		rk0F ^= rk08;
		x3 ^= rk0F;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		p8 ^= x0;
		p9 ^= x1;
		pA ^= x2;
		pB ^= x3;
		rk10 ^= rk09;
		x0 = p4 ^ rk10;
		rk11 ^= rk0A;
		x1 = p5 ^ rk11;
		rk12 ^= rk0B;
		x2 = p6 ^ rk12;
		rk13 ^= rk0C;
		x3 = p7 ^ rk13;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		rk14 ^= rk0D;
		x0 ^= rk14;
		rk15 ^= rk0E;
		x1 ^= rk15;
		rk16 ^= rk0F;
		x2 ^= rk16;
		rk17 ^= rk10;
		x3 ^= rk17;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		rk18 ^= rk11;
		x0 ^= rk18;
		rk19 ^= rk12;
		x1 ^= rk19;
		rk1A ^= rk13;
		x2 ^= rk1A;
		rk1B ^= rk14;
		x3 ^= rk1B;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		rk1C ^= rk15;
		x0 ^= rk1C;
		rk1D ^= rk16;
		x1 ^= rk1D;
		rk1E ^= rk17;
		x2 ^= rk1E;
		rk1F ^= rk18;
		x3 ^= rk1F;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		p0 ^= x0;
		p1 ^= x1;
		p2 ^= x2;
		p3 ^= x3;
		/* round 3, 7, 11 */
		KEY_EXPAND_ELT(rk00, rk01, rk02, rk03);
		rk00 ^= rk1C;
		rk01 ^= rk1D;
		rk02 ^= rk1E;
		rk03 ^= rk1F;
		x0 = p8 ^ rk00;
		x1 = p9 ^ rk01;
		x2 = pA ^ rk02;
		x3 = pB ^ rk03;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		KEY_EXPAND_ELT(rk04, rk05, rk06, rk07);
		rk04 ^= rk00;
		rk05 ^= rk01;
		rk06 ^= rk02;
		rk07 ^= rk03;
		x0 ^= rk04;
		x1 ^= rk05;
		x2 ^= rk06;
		x3 ^= rk07;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		KEY_EXPAND_ELT(rk08, rk09, rk0A, rk0B);
		rk08 ^= rk04;
		rk09 ^= rk05;
		rk0A ^= rk06;
		rk0B ^= rk07;
		x0 ^= rk08;
		x1 ^= rk09;
		x2 ^= rk0A;
		x3 ^= rk0B;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		KEY_EXPAND_ELT(rk0C, rk0D, rk0E, rk0F);
		rk0C ^= rk08;
		rk0D ^= rk09;
		rk0E ^= rk0A;
		rk0F ^= rk0B;
		x0 ^= rk0C;
		x1 ^= rk0D;
		x2 ^= rk0E;
		x3 ^= rk0F;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		p4 ^= x0;
		p5 ^= x1;
		p6 ^= x2;
		p7 ^= x3;
		KEY_EXPAND_ELT(rk10, rk11, rk12, rk13);
		rk10 ^= rk0C;
		rk11 ^= rk0D;
		rk12 ^= rk0E;
		rk13 ^= rk0F;
		x0 = p0 ^ rk10;
		x1 = p1 ^ rk11;
		x2 = p2 ^ rk12;
		x3 = p3 ^ rk13;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		KEY_EXPAND_ELT(rk14, rk15, rk16, rk17);
		rk14 ^= rk10;
		rk15 ^= rk11;
		rk16 ^= rk12;
		rk17 ^= rk13;
		x0 ^= rk14;
		x1 ^= rk15;
		x2 ^= rk16;
		x3 ^= rk17;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		KEY_EXPAND_ELT(rk18, rk19, rk1A, rk1B);
		rk18 ^= rk14;
		rk19 ^= rk15;
		rk1A ^= rk16;
		rk1B ^= rk17;
		x0 ^= rk18;
		x1 ^= rk19;
		x2 ^= rk1A;
		x3 ^= rk1B;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		KEY_EXPAND_ELT(rk1C, rk1D, rk1E, rk1F);
		rk1C ^= rk18;
		rk1D ^= rk19;
		rk1E ^= rk1A;
		rk1F ^= rk1B;
		x0 ^= rk1C;
		x1 ^= rk1D;
		x2 ^= rk1E;
		x3 ^= rk1F;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		pC ^= x0;
		pD ^= x1;
		pE ^= x2;
		pF ^= x3;
		/* round 4, 8, 12 */
		rk00 ^= rk19;
		x0 = p4 ^ rk00;
		rk01 ^= rk1A;
		x1 = p5 ^ rk01;
		rk02 ^= rk1B;
		x2 = p6 ^ rk02;
		rk03 ^= rk1C;
		x3 = p7 ^ rk03;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		rk04 ^= rk1D;
		x0 ^= rk04;
		rk05 ^= rk1E;
		x1 ^= rk05;
		rk06 ^= rk1F;
		x2 ^= rk06;
		rk07 ^= rk00;
		x3 ^= rk07;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		rk08 ^= rk01;
		x0 ^= rk08;
		rk09 ^= rk02;
		x1 ^= rk09;
		rk0A ^= rk03;
		x2 ^= rk0A;
		rk0B ^= rk04;
		x3 ^= rk0B;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		rk0C ^= rk05;
		x0 ^= rk0C;
		rk0D ^= rk06;
		x1 ^= rk0D;
		rk0E ^= rk07;
		x2 ^= rk0E;
		rk0F ^= rk08;
		x3 ^= rk0F;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		p0 ^= x0;
		p1 ^= x1;
		p2 ^= x2;
		p3 ^= x3;
		rk10 ^= rk09;
		x0 = pC ^ rk10;
		rk11 ^= rk0A;
		x1 = pD ^ rk11;
		rk12 ^= rk0B;
		x2 = pE ^ rk12;
		rk13 ^= rk0C;
		x3 = pF ^ rk13;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		rk14 ^= rk0D;
		x0 ^= rk14;
		rk15 ^= rk0E;
		x1 ^= rk15;
		rk16 ^= rk0F;
		x2 ^= rk16;
		rk17 ^= rk10;
		x3 ^= rk17;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		rk18 ^= rk11;
		x0 ^= rk18;
		rk19 ^= rk12;
		x1 ^= rk19;
		rk1A ^= rk13;
		x2 ^= rk1A;
		rk1B ^= rk14;
		x3 ^= rk1B;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		rk1C ^= rk15;
		x0 ^= rk1C;
		rk1D ^= rk16;
		x1 ^= rk1D;
		rk1E ^= rk17;
		x2 ^= rk1E;
		rk1F ^= rk18;
		x3 ^= rk1F;
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		p8 ^= x0;
		p9 ^= x1;
		pA ^= x2;
		pB ^= x3;
	}
	/* round 13 */
	KEY_EXPAND_ELT(rk00, rk01, rk02, rk03);
	rk00 ^= rk1C;
	rk01 ^= rk1D;
	rk02 ^= rk1E;
	rk03 ^= rk1F;
	x0 = p0 ^ rk00;
	x1 = p1 ^ rk01;
	x2 = p2 ^ rk02;
	x3 = p3 ^ rk03;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rk04, rk05, rk06, rk07);
	rk04 ^= rk00;
	rk05 ^= rk01;
	rk06 ^= rk02;
	rk07 ^= rk03;
	x0 ^= rk04;
	x1 ^= rk05;
	x2 ^= rk06;
	x3 ^= rk07;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rk08, rk09, rk0A, rk0B);
	rk08 ^= rk04;
	rk09 ^= rk05;
	rk0A ^= rk06;
	rk0B ^= rk07;
	x0 ^= rk08;
	x1 ^= rk09;
	x2 ^= rk0A;
	x3 ^= rk0B;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rk0C, rk0D, rk0E, rk0F);
	rk0C ^= rk08;
	rk0D ^= rk09;
	rk0E ^= rk0A;
	rk0F ^= rk0B;
	x0 ^= rk0C;
	x1 ^= rk0D;
	x2 ^= rk0E;
	x3 ^= rk0F;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	pC ^= x0;
	pD ^= x1;
	pE ^= x2;
	pF ^= x3;
	KEY_EXPAND_ELT(rk10, rk11, rk12, rk13);
	rk10 ^= rk0C;
	rk11 ^= rk0D;
	rk12 ^= rk0E;
	rk13 ^= rk0F;
	x0 = p8 ^ rk10;
	x1 = p9 ^ rk11;
	x2 = pA ^ rk12;
	x3 = pB ^ rk13;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rk14, rk15, rk16, rk17);
	rk14 ^= rk10;
	rk15 ^= rk11;
	rk16 ^= rk12;
	rk17 ^= rk13;
	x0 ^= rk14;
	x1 ^= rk15;
	x2 ^= rk16;
	x3 ^= rk17;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rk18, rk19, rk1A, rk1B);
	rk18 ^= rk14 ^ sc->count1;
	rk19 ^= rk15 ^ sc->count0;
	rk1A ^= rk16 ^ sc->count3;
	rk1B ^= rk17 ^ SPH_T32(~sc->count2);
	x0 ^= rk18;
	x1 ^= rk19;
	x2 ^= rk1A;
	x3 ^= rk1B;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rk1C, rk1D, rk1E, rk1F);
	rk1C ^= rk18;
	rk1D ^= rk19;
	rk1E ^= rk1A;
	rk1F ^= rk1B;
	x0 ^= rk1C;
	x1 ^= rk1D;
	x2 ^= rk1E;
	x3 ^= rk1F;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	p4 ^= x0;
	p5 ^= x1;
	p6 ^= x2;
	p7 ^= x3;
	sc->h[0x0] ^= p8;
	sc->h[0x1] ^= p9;
	sc->h[0x2] ^= pA;
	sc->h[0x3] ^= pB;
	sc->h[0x4] ^= pC;
	sc->h[0x5] ^= pD;
	sc->h[0x6] ^= pE;
	sc->h[0x7] ^= pF;
	sc->h[0x8] ^= p0;
	sc->h[0x9] ^= p1;
	sc->h[0xA] ^= p2;
	sc->h[0xB] ^= p3;
	sc->h[0xC] ^= p4;
	sc->h[0xD] ^= p5;
	sc->h[0xE] ^= p6;
	sc->h[0xF] ^= p7;
}
Ejemplo n.º 11
0
/*
 * This function assumes that "msg" is aligned for 32-bit access.
 */
static void
c512(sph_shavite_big_context *sc, const void *msg)
{
	sph_u32 p0, p1, p2, p3, p4, p5, p6, p7;
	sph_u32 p8, p9, pA, pB, pC, pD, pE, pF;
	sph_u32 rk[448];
	size_t u;
	int r, s;

#if SPH_LITTLE_ENDIAN
	memcpy(rk, msg, 128);
#else
	for (u = 0; u < 32; u += 4) {
		rk[u + 0] = sph_dec32le_aligned(
			(const unsigned char *)msg + (u << 2) +  0);
		rk[u + 1] = sph_dec32le_aligned(
			(const unsigned char *)msg + (u << 2) +  4);
		rk[u + 2] = sph_dec32le_aligned(
			(const unsigned char *)msg + (u << 2) +  8);
		rk[u + 3] = sph_dec32le_aligned(
			(const unsigned char *)msg + (u << 2) + 12);
	}
#endif
	u = 32;
	for (;;) {
		for (s = 0; s < 4; s ++) {
			sph_u32 x0, x1, x2, x3;

			x0 = rk[u - 31];
			x1 = rk[u - 30];
			x2 = rk[u - 29];
			x3 = rk[u - 32];
			AES_ROUND_NOKEY(x0, x1, x2, x3);
			rk[u + 0] = x0 ^ rk[u - 4];
			rk[u + 1] = x1 ^ rk[u - 3];
			rk[u + 2] = x2 ^ rk[u - 2];
			rk[u + 3] = x3 ^ rk[u - 1];
			if (u == 32) {
				rk[ 32] ^= sc->count0;
				rk[ 33] ^= sc->count1;
				rk[ 34] ^= sc->count2;
				rk[ 35] ^= SPH_T32(~sc->count3);
			} else if (u == 440) {
				rk[440] ^= sc->count1;
				rk[441] ^= sc->count0;
				rk[442] ^= sc->count3;
				rk[443] ^= SPH_T32(~sc->count2);
			}
			u += 4;

			x0 = rk[u - 31];
			x1 = rk[u - 30];
			x2 = rk[u - 29];
			x3 = rk[u - 32];
			AES_ROUND_NOKEY(x0, x1, x2, x3);
			rk[u + 0] = x0 ^ rk[u - 4];
			rk[u + 1] = x1 ^ rk[u - 3];
			rk[u + 2] = x2 ^ rk[u - 2];
			rk[u + 3] = x3 ^ rk[u - 1];
			if (u == 164) {
				rk[164] ^= sc->count3;
				rk[165] ^= sc->count2;
				rk[166] ^= sc->count1;
				rk[167] ^= SPH_T32(~sc->count0);
			} else if (u == 316) {
				rk[316] ^= sc->count2;
				rk[317] ^= sc->count3;
				rk[318] ^= sc->count0;
				rk[319] ^= SPH_T32(~sc->count1);
			}
			u += 4;
		}
		if (u == 448)
			break;
		for (s = 0; s < 8; s ++) {
			rk[u + 0] = rk[u - 32] ^ rk[u - 7];
			rk[u + 1] = rk[u - 31] ^ rk[u - 6];
			rk[u + 2] = rk[u - 30] ^ rk[u - 5];
			rk[u + 3] = rk[u - 29] ^ rk[u - 4];
			u += 4;
		}
	}

	p0 = sc->h[0x0];
	p1 = sc->h[0x1];
	p2 = sc->h[0x2];
	p3 = sc->h[0x3];
	p4 = sc->h[0x4];
	p5 = sc->h[0x5];
	p6 = sc->h[0x6];
	p7 = sc->h[0x7];
	p8 = sc->h[0x8];
	p9 = sc->h[0x9];
	pA = sc->h[0xA];
	pB = sc->h[0xB];
	pC = sc->h[0xC];
	pD = sc->h[0xD];
	pE = sc->h[0xE];
	pF = sc->h[0xF];
	u = 0;
	for (r = 0; r < 14; r ++) {
#define C512_ELT(l0, l1, l2, l3, r0, r1, r2, r3)   do { \
		sph_u32 x0, x1, x2, x3; \
		x0 = r0 ^ rk[u ++]; \
		x1 = r1 ^ rk[u ++]; \
		x2 = r2 ^ rk[u ++]; \
		x3 = r3 ^ rk[u ++]; \
		AES_ROUND_NOKEY(x0, x1, x2, x3); \
		x0 ^= rk[u ++]; \
		x1 ^= rk[u ++]; \
		x2 ^= rk[u ++]; \
		x3 ^= rk[u ++]; \
		AES_ROUND_NOKEY(x0, x1, x2, x3); \
		x0 ^= rk[u ++]; \
		x1 ^= rk[u ++]; \
		x2 ^= rk[u ++]; \
		x3 ^= rk[u ++]; \
		AES_ROUND_NOKEY(x0, x1, x2, x3); \
		x0 ^= rk[u ++]; \
		x1 ^= rk[u ++]; \
		x2 ^= rk[u ++]; \
		x3 ^= rk[u ++]; \
		AES_ROUND_NOKEY(x0, x1, x2, x3); \
		l0 ^= x0; \
		l1 ^= x1; \
		l2 ^= x2; \
		l3 ^= x3; \
	} while (0)

#define WROT(a, b, c, d)   do { \
		sph_u32 t = d; \
		d = c; \
		c = b; \
		b = a; \
		a = t; \
	} while (0)

		C512_ELT(p0, p1, p2, p3, p4, p5, p6, p7);
		C512_ELT(p8, p9, pA, pB, pC, pD, pE, pF);

		WROT(p0, p4, p8, pC);
		WROT(p1, p5, p9, pD);
		WROT(p2, p6, pA, pE);
		WROT(p3, p7, pB, pF);

#undef C512_ELT
#undef WROT
	}
	sc->h[0x0] ^= p0;
	sc->h[0x1] ^= p1;
	sc->h[0x2] ^= p2;
	sc->h[0x3] ^= p3;
	sc->h[0x4] ^= p4;
	sc->h[0x5] ^= p5;
	sc->h[0x6] ^= p6;
	sc->h[0x7] ^= p7;
	sc->h[0x8] ^= p8;
	sc->h[0x9] ^= p9;
	sc->h[0xA] ^= pA;
	sc->h[0xB] ^= pB;
	sc->h[0xC] ^= pC;
	sc->h[0xD] ^= pD;
	sc->h[0xE] ^= pE;
	sc->h[0xF] ^= pF;
}
Ejemplo n.º 12
0
/*
 * This function assumes that "msg" is aligned for 32-bit access.
 */
static void
c256(sph_shavite_small_context *sc, const void *msg)
{
	sph_u32 p0, p1, p2, p3, p4, p5, p6, p7;
	sph_u32 x0, x1, x2, x3;
	sph_u32 rk0, rk1, rk2, rk3, rk4, rk5, rk6, rk7;
	sph_u32 rk8, rk9, rkA, rkB, rkC, rkD, rkE, rkF;

	p0 = sc->h[0x0];
	p1 = sc->h[0x1];
	p2 = sc->h[0x2];
	p3 = sc->h[0x3];
	p4 = sc->h[0x4];
	p5 = sc->h[0x5];
	p6 = sc->h[0x6];
	p7 = sc->h[0x7];
	/* round 0 */
	rk0 = sph_dec32le_aligned((const unsigned char *)msg +  0);
	x0 = p4 ^ rk0;
	rk1 = sph_dec32le_aligned((const unsigned char *)msg +  4);
	x1 = p5 ^ rk1;
	rk2 = sph_dec32le_aligned((const unsigned char *)msg +  8);
	x2 = p6 ^ rk2;
	rk3 = sph_dec32le_aligned((const unsigned char *)msg + 12);
	x3 = p7 ^ rk3;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk4 = sph_dec32le_aligned((const unsigned char *)msg + 16);
	x0 ^= rk4;
	rk5 = sph_dec32le_aligned((const unsigned char *)msg + 20);
	x1 ^= rk5;
	rk6 = sph_dec32le_aligned((const unsigned char *)msg + 24);
	x2 ^= rk6;
	rk7 = sph_dec32le_aligned((const unsigned char *)msg + 28);
	x3 ^= rk7;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk8 = sph_dec32le_aligned((const unsigned char *)msg + 32);
	x0 ^= rk8;
	rk9 = sph_dec32le_aligned((const unsigned char *)msg + 36);
	x1 ^= rk9;
	rkA = sph_dec32le_aligned((const unsigned char *)msg + 40);
	x2 ^= rkA;
	rkB = sph_dec32le_aligned((const unsigned char *)msg + 44);
	x3 ^= rkB;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	p0 ^= x0;
	p1 ^= x1;
	p2 ^= x2;
	p3 ^= x3;
	/* round 1 */
	rkC = sph_dec32le_aligned((const unsigned char *)msg + 48);
	x0 = p0 ^ rkC;
	rkD = sph_dec32le_aligned((const unsigned char *)msg + 52);
	x1 = p1 ^ rkD;
	rkE = sph_dec32le_aligned((const unsigned char *)msg + 56);
	x2 = p2 ^ rkE;
	rkF = sph_dec32le_aligned((const unsigned char *)msg + 60);
	x3 = p3 ^ rkF;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rk0, rk1, rk2, rk3);
	rk0 ^= rkC ^ sc->count0;
	rk1 ^= rkD ^ SPH_T32(~sc->count1);
	rk2 ^= rkE;
	rk3 ^= rkF;
	x0 ^= rk0;
	x1 ^= rk1;
	x2 ^= rk2;
	x3 ^= rk3;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rk4, rk5, rk6, rk7);
	rk4 ^= rk0;
	rk5 ^= rk1;
	rk6 ^= rk2;
	rk7 ^= rk3;
	x0 ^= rk4;
	x1 ^= rk5;
	x2 ^= rk6;
	x3 ^= rk7;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	p4 ^= x0;
	p5 ^= x1;
	p6 ^= x2;
	p7 ^= x3;
	/* round 2 */
	KEY_EXPAND_ELT(rk8, rk9, rkA, rkB);
	rk8 ^= rk4;
	rk9 ^= rk5;
	rkA ^= rk6;
	rkB ^= rk7;
	x0 = p4 ^ rk8;
	x1 = p5 ^ rk9;
	x2 = p6 ^ rkA;
	x3 = p7 ^ rkB;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rkC, rkD, rkE, rkF);
	rkC ^= rk8;
	rkD ^= rk9;
	rkE ^= rkA;
	rkF ^= rkB;
	x0 ^= rkC;
	x1 ^= rkD;
	x2 ^= rkE;
	x3 ^= rkF;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk0 ^= rkD;
	x0 ^= rk0;
	rk1 ^= rkE;
	x1 ^= rk1;
	rk2 ^= rkF;
	x2 ^= rk2;
	rk3 ^= rk0;
	x3 ^= rk3;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	p0 ^= x0;
	p1 ^= x1;
	p2 ^= x2;
	p3 ^= x3;
	/* round 3 */
	rk4 ^= rk1;
	x0 = p0 ^ rk4;
	rk5 ^= rk2;
	x1 = p1 ^ rk5;
	rk6 ^= rk3;
	x2 = p2 ^ rk6;
	rk7 ^= rk4;
	x3 = p3 ^ rk7;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk8 ^= rk5;
	x0 ^= rk8;
	rk9 ^= rk6;
	x1 ^= rk9;
	rkA ^= rk7;
	x2 ^= rkA;
	rkB ^= rk8;
	x3 ^= rkB;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rkC ^= rk9;
	x0 ^= rkC;
	rkD ^= rkA;
	x1 ^= rkD;
	rkE ^= rkB;
	x2 ^= rkE;
	rkF ^= rkC;
	x3 ^= rkF;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	p4 ^= x0;
	p5 ^= x1;
	p6 ^= x2;
	p7 ^= x3;
	/* round 4 */
	KEY_EXPAND_ELT(rk0, rk1, rk2, rk3);
	rk0 ^= rkC;
	rk1 ^= rkD;
	rk2 ^= rkE;
	rk3 ^= rkF;
	x0 = p4 ^ rk0;
	x1 = p5 ^ rk1;
	x2 = p6 ^ rk2;
	x3 = p7 ^ rk3;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rk4, rk5, rk6, rk7);
	rk4 ^= rk0;
	rk5 ^= rk1;
	rk6 ^= rk2;
	rk7 ^= rk3;
	x0 ^= rk4;
	x1 ^= rk5;
	x2 ^= rk6;
	x3 ^= rk7;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rk8, rk9, rkA, rkB);
	rk8 ^= rk4;
	rk9 ^= rk5 ^ sc->count1;
	rkA ^= rk6 ^ SPH_T32(~sc->count0);
	rkB ^= rk7;
	x0 ^= rk8;
	x1 ^= rk9;
	x2 ^= rkA;
	x3 ^= rkB;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	p0 ^= x0;
	p1 ^= x1;
	p2 ^= x2;
	p3 ^= x3;
	/* round 5 */
	KEY_EXPAND_ELT(rkC, rkD, rkE, rkF);
	rkC ^= rk8;
	rkD ^= rk9;
	rkE ^= rkA;
	rkF ^= rkB;
	x0 = p0 ^ rkC;
	x1 = p1 ^ rkD;
	x2 = p2 ^ rkE;
	x3 = p3 ^ rkF;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk0 ^= rkD;
	x0 ^= rk0;
	rk1 ^= rkE;
	x1 ^= rk1;
	rk2 ^= rkF;
	x2 ^= rk2;
	rk3 ^= rk0;
	x3 ^= rk3;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk4 ^= rk1;
	x0 ^= rk4;
	rk5 ^= rk2;
	x1 ^= rk5;
	rk6 ^= rk3;
	x2 ^= rk6;
	rk7 ^= rk4;
	x3 ^= rk7;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	p4 ^= x0;
	p5 ^= x1;
	p6 ^= x2;
	p7 ^= x3;
	/* round 6 */
	rk8 ^= rk5;
	x0 = p4 ^ rk8;
	rk9 ^= rk6;
	x1 = p5 ^ rk9;
	rkA ^= rk7;
	x2 = p6 ^ rkA;
	rkB ^= rk8;
	x3 = p7 ^ rkB;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rkC ^= rk9;
	x0 ^= rkC;
	rkD ^= rkA;
	x1 ^= rkD;
	rkE ^= rkB;
	x2 ^= rkE;
	rkF ^= rkC;
	x3 ^= rkF;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rk0, rk1, rk2, rk3);
	rk0 ^= rkC;
	rk1 ^= rkD;
	rk2 ^= rkE;
	rk3 ^= rkF;
	x0 ^= rk0;
	x1 ^= rk1;
	x2 ^= rk2;
	x3 ^= rk3;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	p0 ^= x0;
	p1 ^= x1;
	p2 ^= x2;
	p3 ^= x3;
	/* round 7 */
	KEY_EXPAND_ELT(rk4, rk5, rk6, rk7);
	rk4 ^= rk0;
	rk5 ^= rk1;
	rk6 ^= rk2 ^ sc->count1;
	rk7 ^= rk3 ^ SPH_T32(~sc->count0);
	x0 = p0 ^ rk4;
	x1 = p1 ^ rk5;
	x2 = p2 ^ rk6;
	x3 = p3 ^ rk7;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rk8, rk9, rkA, rkB);
	rk8 ^= rk4;
	rk9 ^= rk5;
	rkA ^= rk6;
	rkB ^= rk7;
	x0 ^= rk8;
	x1 ^= rk9;
	x2 ^= rkA;
	x3 ^= rkB;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rkC, rkD, rkE, rkF);
	rkC ^= rk8;
	rkD ^= rk9;
	rkE ^= rkA;
	rkF ^= rkB;
	x0 ^= rkC;
	x1 ^= rkD;
	x2 ^= rkE;
	x3 ^= rkF;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	p4 ^= x0;
	p5 ^= x1;
	p6 ^= x2;
	p7 ^= x3;
	/* round 8 */
	rk0 ^= rkD;
	x0 = p4 ^ rk0;
	rk1 ^= rkE;
	x1 = p5 ^ rk1;
	rk2 ^= rkF;
	x2 = p6 ^ rk2;
	rk3 ^= rk0;
	x3 = p7 ^ rk3;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk4 ^= rk1;
	x0 ^= rk4;
	rk5 ^= rk2;
	x1 ^= rk5;
	rk6 ^= rk3;
	x2 ^= rk6;
	rk7 ^= rk4;
	x3 ^= rk7;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk8 ^= rk5;
	x0 ^= rk8;
	rk9 ^= rk6;
	x1 ^= rk9;
	rkA ^= rk7;
	x2 ^= rkA;
	rkB ^= rk8;
	x3 ^= rkB;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	p0 ^= x0;
	p1 ^= x1;
	p2 ^= x2;
	p3 ^= x3;
	/* round 9 */
	rkC ^= rk9;
	x0 = p0 ^ rkC;
	rkD ^= rkA;
	x1 = p1 ^ rkD;
	rkE ^= rkB;
	x2 = p2 ^ rkE;
	rkF ^= rkC;
	x3 = p3 ^ rkF;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rk0, rk1, rk2, rk3);
	rk0 ^= rkC;
	rk1 ^= rkD;
	rk2 ^= rkE;
	rk3 ^= rkF;
	x0 ^= rk0;
	x1 ^= rk1;
	x2 ^= rk2;
	x3 ^= rk3;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rk4, rk5, rk6, rk7);
	rk4 ^= rk0;
	rk5 ^= rk1;
	rk6 ^= rk2;
	rk7 ^= rk3;
	x0 ^= rk4;
	x1 ^= rk5;
	x2 ^= rk6;
	x3 ^= rk7;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	p4 ^= x0;
	p5 ^= x1;
	p6 ^= x2;
	p7 ^= x3;
	/* round 10 */
	KEY_EXPAND_ELT(rk8, rk9, rkA, rkB);
	rk8 ^= rk4;
	rk9 ^= rk5;
	rkA ^= rk6;
	rkB ^= rk7;
	x0 = p4 ^ rk8;
	x1 = p5 ^ rk9;
	x2 = p6 ^ rkA;
	x3 = p7 ^ rkB;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	KEY_EXPAND_ELT(rkC, rkD, rkE, rkF);
	rkC ^= rk8 ^ sc->count0;
	rkD ^= rk9;
	rkE ^= rkA;
	rkF ^= rkB ^ SPH_T32(~sc->count1);
	x0 ^= rkC;
	x1 ^= rkD;
	x2 ^= rkE;
	x3 ^= rkF;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk0 ^= rkD;
	x0 ^= rk0;
	rk1 ^= rkE;
	x1 ^= rk1;
	rk2 ^= rkF;
	x2 ^= rk2;
	rk3 ^= rk0;
	x3 ^= rk3;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	p0 ^= x0;
	p1 ^= x1;
	p2 ^= x2;
	p3 ^= x3;
	/* round 11 */
	rk4 ^= rk1;
	x0 = p0 ^ rk4;
	rk5 ^= rk2;
	x1 = p1 ^ rk5;
	rk6 ^= rk3;
	x2 = p2 ^ rk6;
	rk7 ^= rk4;
	x3 = p3 ^ rk7;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rk8 ^= rk5;
	x0 ^= rk8;
	rk9 ^= rk6;
	x1 ^= rk9;
	rkA ^= rk7;
	x2 ^= rkA;
	rkB ^= rk8;
	x3 ^= rkB;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	rkC ^= rk9;
	x0 ^= rkC;
	rkD ^= rkA;
	x1 ^= rkD;
	rkE ^= rkB;
	x2 ^= rkE;
	rkF ^= rkC;
	x3 ^= rkF;
	AES_ROUND_NOKEY(x0, x1, x2, x3);
	p4 ^= x0;
	p5 ^= x1;
	p6 ^= x2;
	p7 ^= x3;
	sc->h[0x0] ^= p0;
	sc->h[0x1] ^= p1;
	sc->h[0x2] ^= p2;
	sc->h[0x3] ^= p3;
	sc->h[0x4] ^= p4;
	sc->h[0x5] ^= p5;
	sc->h[0x6] ^= p6;
	sc->h[0x7] ^= p7;
}
Ejemplo n.º 13
0
/*
 * This function assumes that "msg" is aligned for 32-bit access.
 */
static void
c256(sph_shavite_small_context *sc, const void *msg)
{
	sph_u32 p0, p1, p2, p3, p4, p5, p6, p7;
	sph_u32 rk[144];
	size_t u;
	int r, s;

#if SPH_LITTLE_ENDIAN
	memcpy(rk, msg, 64);
#else
	for (u = 0; u < 16; u += 4) {
		rk[u + 0] = sph_dec32le_aligned(
			(const unsigned char *)msg + (u << 2) +  0);
		rk[u + 1] = sph_dec32le_aligned(
			(const unsigned char *)msg + (u << 2) +  4);
		rk[u + 2] = sph_dec32le_aligned(
			(const unsigned char *)msg + (u << 2) +  8);
		rk[u + 3] = sph_dec32le_aligned(
			(const unsigned char *)msg + (u << 2) + 12);
	}
#endif
	u = 16;
	for (r = 0; r < 4; r ++) {
		for (s = 0; s < 2; s ++) {
			sph_u32 x0, x1, x2, x3;

			x0 = rk[u - 15];
			x1 = rk[u - 14];
			x2 = rk[u - 13];
			x3 = rk[u - 16];
			AES_ROUND_NOKEY(x0, x1, x2, x3);
			rk[u + 0] = x0 ^ rk[u - 4];
			rk[u + 1] = x1 ^ rk[u - 3];
			rk[u + 2] = x2 ^ rk[u - 2];
			rk[u + 3] = x3 ^ rk[u - 1];
			if (u == 16) {
				rk[ 16] ^= sc->count0;
				rk[ 17] ^= SPH_T32(~sc->count1);
			} else if (u == 56) {
				rk[ 57] ^= sc->count1;
				rk[ 58] ^= SPH_T32(~sc->count0);
			}
			u += 4;

			x0 = rk[u - 15];
			x1 = rk[u - 14];
			x2 = rk[u - 13];
			x3 = rk[u - 16];
			AES_ROUND_NOKEY(x0, x1, x2, x3);
			rk[u + 0] = x0 ^ rk[u - 4];
			rk[u + 1] = x1 ^ rk[u - 3];
			rk[u + 2] = x2 ^ rk[u - 2];
			rk[u + 3] = x3 ^ rk[u - 1];
			if (u == 84) {
				rk[ 86] ^= sc->count1;
				rk[ 87] ^= SPH_T32(~sc->count0);
			} else if (u == 124) {
				rk[124] ^= sc->count0;
				rk[127] ^= SPH_T32(~sc->count1);
			}
			u += 4;
		}
		for (s = 0; s < 4; s ++) {
			rk[u + 0] = rk[u - 16] ^ rk[u - 3];
			rk[u + 1] = rk[u - 15] ^ rk[u - 2];
			rk[u + 2] = rk[u - 14] ^ rk[u - 1];
			rk[u + 3] = rk[u - 13] ^ rk[u - 0];
			u += 4;
		}
	}

	p0 = sc->h[0x0];
	p1 = sc->h[0x1];
	p2 = sc->h[0x2];
	p3 = sc->h[0x3];
	p4 = sc->h[0x4];
	p5 = sc->h[0x5];
	p6 = sc->h[0x6];
	p7 = sc->h[0x7];
	u = 0;
	for (r = 0; r < 6; r ++) {
		sph_u32 x0, x1, x2, x3;

		x0 = p4 ^ rk[u ++];
		x1 = p5 ^ rk[u ++];
		x2 = p6 ^ rk[u ++];
		x3 = p7 ^ rk[u ++];
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		x0 ^= rk[u ++];
		x1 ^= rk[u ++];
		x2 ^= rk[u ++];
		x3 ^= rk[u ++];
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		x0 ^= rk[u ++];
		x1 ^= rk[u ++];
		x2 ^= rk[u ++];
		x3 ^= rk[u ++];
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		p0 ^= x0;
		p1 ^= x1;
		p2 ^= x2;
		p3 ^= x3;

		x0 = p0 ^ rk[u ++];
		x1 = p1 ^ rk[u ++];
		x2 = p2 ^ rk[u ++];
		x3 = p3 ^ rk[u ++];
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		x0 ^= rk[u ++];
		x1 ^= rk[u ++];
		x2 ^= rk[u ++];
		x3 ^= rk[u ++];
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		x0 ^= rk[u ++];
		x1 ^= rk[u ++];
		x2 ^= rk[u ++];
		x3 ^= rk[u ++];
		AES_ROUND_NOKEY(x0, x1, x2, x3);
		p4 ^= x0;
		p5 ^= x1;
		p6 ^= x2;
		p7 ^= x3;
	}
	sc->h[0x0] ^= p0;
	sc->h[0x1] ^= p1;
	sc->h[0x2] ^= p2;
	sc->h[0x3] ^= p3;
	sc->h[0x4] ^= p4;
	sc->h[0x5] ^= p5;
	sc->h[0x6] ^= p6;
	sc->h[0x7] ^= p7;
}
Ejemplo n.º 14
0
/*
 * Write out HAVAL output. The output length is tailored to the requested
 * length.
 */
static void
haval_out(sph_haval_context *sc, void *dst)
{
	DSTATE;
	unsigned char *buf;

	buf = dst;
	RSTATE;
	switch (sc->olen) {
	case 4:
		sph_enc32le(buf,      SPH_T32(s0 + mix128(s7, s4, s5, s6, 24)));
		sph_enc32le(buf + 4,  SPH_T32(s1 + mix128(s6, s7, s4, s5, 16)));
		sph_enc32le(buf + 8,  SPH_T32(s2 + mix128(s5, s6, s7, s4, 8)));
		sph_enc32le(buf + 12, SPH_T32(s3 + mix128(s4, s5, s6, s7, 0)));
		break;
	case 5:
		sph_enc32le(buf,      SPH_T32(s0 + mix160_0(s5, s6, s7)));
		sph_enc32le(buf + 4,  SPH_T32(s1 + mix160_1(s5, s6, s7)));
		sph_enc32le(buf + 8,  SPH_T32(s2 + mix160_2(s5, s6, s7)));
		sph_enc32le(buf + 12, SPH_T32(s3 + mix160_3(s5, s6, s7)));
		sph_enc32le(buf + 16, SPH_T32(s4 + mix160_4(s5, s6, s7)));
		break;
	case 6:
		sph_enc32le(buf,      SPH_T32(s0 + mix192_0(s6, s7)));
		sph_enc32le(buf + 4,  SPH_T32(s1 + mix192_1(s6, s7)));
		sph_enc32le(buf + 8,  SPH_T32(s2 + mix192_2(s6, s7)));
		sph_enc32le(buf + 12, SPH_T32(s3 + mix192_3(s6, s7)));
		sph_enc32le(buf + 16, SPH_T32(s4 + mix192_4(s6, s7)));
		sph_enc32le(buf + 20, SPH_T32(s5 + mix192_5(s6, s7)));
		break;
	case 7:
		sph_enc32le(buf,      SPH_T32(s0 + ((s7 >> 27) & 0x1F)));
		sph_enc32le(buf + 4,  SPH_T32(s1 + ((s7 >> 22) & 0x1F)));
		sph_enc32le(buf + 8,  SPH_T32(s2 + ((s7 >> 18) & 0x0F)));
		sph_enc32le(buf + 12, SPH_T32(s3 + ((s7 >> 13) & 0x1F)));
		sph_enc32le(buf + 16, SPH_T32(s4 + ((s7 >>  9) & 0x0F)));
		sph_enc32le(buf + 20, SPH_T32(s5 + ((s7 >>  4) & 0x1F)));
		sph_enc32le(buf + 24, SPH_T32(s6 + ((s7      ) & 0x0F)));
		break;
	case 8:
		sph_enc32le(buf,      s0);
		sph_enc32le(buf + 4,  s1);
		sph_enc32le(buf + 8,  s2);
		sph_enc32le(buf + 12, s3);
		sph_enc32le(buf + 16, s4);
		sph_enc32le(buf + 20, s5);
		sph_enc32le(buf + 24, s6);
		sph_enc32le(buf + 28, s7);
		break;
	}
}