Beispiel #1
0
/**
 * Performs an absorb operation for a single block (BLOCK_LEN_BLAKE2_SAFE_INT64
 * words of type uint64_t), using Blake2b's G function as the internal permutation
 *
 * @param state The current state of the sponge
 * @param in    The block to be absorbed (BLOCK_LEN_BLAKE2_SAFE_INT64 words)
 */
void absorbBlockBlake2Safe(uint64_t *state, const uint64_t *in)
{

//XORs the first BLOCK_LEN_BLAKE2_SAFE_INT64 words of "in" with the current state
#if defined __AVX2__

    __m256i state_v[2], in_v[2];

    state_v[0] = _mm256_load_si256( (__m256i*)(&state[0]) );
    in_v   [0] = _mm256_loadu_si256( (__m256i*)(&in[0]) );
    state_v[1] = _mm256_load_si256( (__m256i*)(&state[4]) );
    in_v   [1] = _mm256_loadu_si256( (__m256i*)(&in[4]) );

    _mm256_store_si256( (__m256i*)(&state[0]),
                          _mm256_xor_si256( state_v[0], in_v[0] ) );
    _mm256_store_si256( (__m256i*)(&state[4]),
                          _mm256_xor_si256( state_v[1], in_v[1] ) );

#elif defined __AVX__

    __m128i state_v[4], in_v[4];

    state_v[0] = _mm_load_si128( (__m128i*)(&state[0]) );
    state_v[1] = _mm_load_si128( (__m128i*)(&state[2]) );
    state_v[2] = _mm_load_si128( (__m128i*)(&state[4]) );
    state_v[3] = _mm_load_si128( (__m128i*)(&state[6]) );

    in_v[0]    = _mm_loadu_si128( (__m128i*)(&in[0]) );
    in_v[1]    = _mm_loadu_si128( (__m128i*)(&in[2]) );
    in_v[2]    = _mm_loadu_si128( (__m128i*)(&in[4]) );
    in_v[3]    = _mm_loadu_si128( (__m128i*)(&in[6]) );

    _mm_store_si128( (__m128i*)(&state[0]),
                       _mm_xor_si128( state_v[0], in_v[0] ) );
    _mm_store_si128( (__m128i*)(&state[2]),
                       _mm_xor_si128( state_v[1], in_v[1] ) );
    _mm_store_si128( (__m128i*)(&state[4]),
                       _mm_xor_si128( state_v[2], in_v[2] ) );
    _mm_store_si128( (__m128i*)(&state[6]),
                        _mm_xor_si128( state_v[3], in_v[3] ) );

#else

   state[0] ^= in[0];
   state[1] ^= in[1];
   state[2] ^= in[2];
   state[3] ^= in[3];
   state[4] ^= in[4];
   state[5] ^= in[5];
   state[6] ^= in[6];
   state[7] ^= in[7];

#endif

//Applies the transformation f to the sponge's state
blake2bLyra(state);

}
Beispiel #2
0
static fstb_FORCEINLINE void	TransLut_store_avx2 (T *dst_ptr, __m256 val)
{
	_mm256_store_si256 (
		reinterpret_cast <__m256i *> (dst_ptr),
		_mm256_cvtps_epi32 (val)
	);
}
Beispiel #3
0
Datei: wm.c Projekt: krh/ksim
static char *
detile_xmajor(struct surface *s, __m256i alpha)
{
	int height = align_u64(s->height, 8);
	void *pixels;
	int tile_stride = s->stride / 512;
	int ret;

	ret = posix_memalign(&pixels, 32, s->stride * height);
	ksim_assert(ret == 0);

	ksim_assert((s->stride & 511) == 0);

	for (int y = 0; y < height; y++) {
		int tile_y = y / 8;
		int iy = y & 7;
		void *src = s->pixels + tile_y * tile_stride * 4096 + iy * 512;
		void *dst = pixels + y * s->stride;

		for (int x = 0; x < tile_stride; x++) {
			for (int c = 0; c < 512; c += 32) {
				__m256i m = _mm256_load_si256(src + x * 4096 + c);
				m = _mm256_or_si256(m, alpha);
				_mm256_store_si256(dst + x * 512 + c, m);
			}
		}
	}

	return pixels;
}
Beispiel #4
0
void
mulrc16_shuffle_avx2(uint8_t *region, uint8_t constant, size_t length)
{
	uint8_t *end;
	register __m256i in, out, t1, t2, m1, m2, l, h;
	register __m128i bc;

	if (constant == 0) {
		memset(region, 0, length);
		return;
	}

	if (constant == 1)
		return;

	bc = _mm_load_si128((void *)tl[constant]);
	t1 = __builtin_ia32_vbroadcastsi256(bc);
	bc = _mm_load_si128((void *)th[constant]);
	t2 = __builtin_ia32_vbroadcastsi256(bc);
	m1 = _mm256_set1_epi8(0x0f);
	m2 = _mm256_set1_epi8(0xf0);

	for (end=region+length; region<end; region+=32) {
		in = _mm256_load_si256((void *)region);
		l = _mm256_and_si256(in, m1);
		l = _mm256_shuffle_epi8(t1, l);
		h = _mm256_and_si256(in, m2);
		h = _mm256_srli_epi64(h, 4);
		h = _mm256_shuffle_epi8(t2, h);
		out = _mm256_xor_si256(h, l);
		_mm256_store_si256((void *)region, out);
	}
}
        template <> SIMD_INLINE void InterpolateX<1>(const __m256i * alpha, __m256i * buffer)
        {
#if defined(_MSC_VER) // Workaround for Visual Studio 2012 compiler bug in release mode:
            __m256i _buffer = _mm256_or_si256(K_ZERO, _mm256_load_si256(buffer));
#else
            __m256i _buffer = _mm256_load_si256(buffer);
#endif
            _mm256_store_si256(buffer, _mm256_maddubs_epi16(_buffer, _mm256_load_si256(alpha)));
        }
Beispiel #6
0
void PriorityQueue_AVX2::clear()
{
    _size = _current = 0;
    _maxrank = INT_MAX;
    _isPopping = false;

    __m256i max = _mm256_set1_epi32(_maxrank);
    for (int i = 0; i < 5; ++i)
        _mm256_store_si256(_rv + i, max);
}
        template <> SIMD_INLINE void InterpolateX<3>(const __m256i * alpha, __m256i * buffer)
        {
            __m256i src[3], shuffled;
            src[0] = _mm256_load_si256(buffer + 0);
            src[1] = _mm256_load_si256(buffer + 1);
            src[2] = _mm256_load_si256(buffer + 2);

            shuffled = _mm256_shuffle_epi8(_mm256_permute2x128_si256(src[0], src[0], 0x21), K8_SHUFFLE_X3_00);
            shuffled = _mm256_or_si256(shuffled, _mm256_shuffle_epi8(src[0], K8_SHUFFLE_X3_01));
            shuffled = _mm256_or_si256(shuffled, _mm256_shuffle_epi8(_mm256_permute2x128_si256(src[0], src[1], 0x21), K8_SHUFFLE_X3_02));
            _mm256_store_si256(buffer + 0, _mm256_maddubs_epi16(shuffled, _mm256_load_si256(alpha + 0)));

            shuffled = _mm256_shuffle_epi8(_mm256_permute2x128_si256(src[0], src[1], 0x21), K8_SHUFFLE_X3_10);
            shuffled = _mm256_or_si256(shuffled, _mm256_shuffle_epi8(src[1], K8_SHUFFLE_X3_11));
            shuffled = _mm256_or_si256(shuffled, _mm256_shuffle_epi8(_mm256_permute2x128_si256(src[1], src[2], 0x21), K8_SHUFFLE_X3_12));
            _mm256_store_si256(buffer + 1, _mm256_maddubs_epi16(shuffled, _mm256_load_si256(alpha + 1)));

            shuffled = _mm256_shuffle_epi8(_mm256_permute2x128_si256(src[1], src[2], 0x21), K8_SHUFFLE_X3_20);
            shuffled = _mm256_or_si256(shuffled, _mm256_shuffle_epi8(src[2], K8_SHUFFLE_X3_21));
            shuffled = _mm256_or_si256(shuffled, _mm256_shuffle_epi8(_mm256_permute2x128_si256(src[2], src[2], 0x21), K8_SHUFFLE_X3_22));
            _mm256_store_si256(buffer + 2, _mm256_maddubs_epi16(shuffled, _mm256_load_si256(alpha + 2)));
        }        
Beispiel #8
0
void
maddrc16_imul_avx2(uint8_t* region1, const uint8_t* region2,
					uint8_t constant, size_t length)
{
	uint8_t *end;
	register __m256i reg1, reg2, ri[4], sp[4], mi[4];
	const uint8_t *p = pt[constant];

	if (constant == 0)
		return;

	if (constant == 1) {
		xorr_avx2(region1, region2, length);
		return;
	}
	
	mi[0] = _mm256_set1_epi8(0x11);
	mi[1] = _mm256_set1_epi8(0x22);
	mi[2] = _mm256_set1_epi8(0x44);
	mi[3] = _mm256_set1_epi8(0x88);
	sp[0] = _mm256_set1_epi16(p[0]);
	sp[1] = _mm256_set1_epi16(p[1]);
	sp[2] = _mm256_set1_epi16(p[2]);
	sp[3] = _mm256_set1_epi16(p[3]);

	for (end=region1+length; region1<end; region1+=32, region2+=32) {
		reg2 = _mm256_load_si256((void *)region2);
		reg1 = _mm256_load_si256((void *)region1);
		ri[0] = _mm256_and_si256(reg2, mi[0]);
		ri[1] = _mm256_and_si256(reg2, mi[1]);
		ri[2] = _mm256_and_si256(reg2, mi[2]);
		ri[3] = _mm256_and_si256(reg2, mi[3]);
		ri[1] = _mm256_srli_epi16(ri[1], 1);
		ri[2] = _mm256_srli_epi16(ri[2], 2);
		ri[3] = _mm256_srli_epi16(ri[3], 3);
		ri[0] = _mm256_mullo_epi16(ri[0], sp[0]);
		ri[1] = _mm256_mullo_epi16(ri[1], sp[1]);
		ri[2] = _mm256_mullo_epi16(ri[2], sp[2]);
		ri[3] = _mm256_mullo_epi16(ri[3], sp[3]);
		ri[0] = _mm256_xor_si256(ri[0], ri[1]);
		ri[2] = _mm256_xor_si256(ri[2], ri[3]);
		ri[0] = _mm256_xor_si256(ri[0], ri[2]);
		ri[0] = _mm256_xor_si256(ri[0], reg1);
		_mm256_store_si256((void *)region1, ri[0]);
	}
}
int main() {
    unsigned int i, bytes_read, n, written;
    char buffer[BUFF_SIZE];

    __m256i* mem256 = (__m256i*)&mem[0];
    __m256i* final =  (__m256i*)&mem[999936];
    __m256i zero256 = _mm256_setzero_si256();
    for (; mem256 < final; mem256 += 8) {
        _mm256_store_si256(&mem256[0], zero256);
        _mm256_store_si256(&mem256[1], zero256);
        _mm256_store_si256(&mem256[2], zero256);
        _mm256_store_si256(&mem256[3], zero256);
        _mm256_store_si256(&mem256[4], zero256);
        _mm256_store_si256(&mem256[5], zero256);
        _mm256_store_si256(&mem256[6], zero256);
        _mm256_store_si256(&mem256[7], zero256);
    }

    final = (__m256i*) &mem[1000000];
    inline void Sort4Deg6(__m256 llrI, int pos[], int ipos[])
    {
        int llr[8] __attribute__((aligned(64)));
        const auto v1 = _mm256_set1_ps( 67108864.0f );
        const auto v2 = _mm256_mul_ps( v1, llrI );
        _mm256_store_si256((__m256i *)llr,  _mm256_cvttps_epi32(v2));

        //register float x0,x1,x2,x3,x4,x5;
        const auto x0 = llr[0];
        const auto x1 = llr[1];
        const auto x2 = llr[2];
        const auto x3 = llr[3];
        const auto x4 = llr[4];
        const auto x5 = llr[5];
        int o0 = (x0<x1) +(x0<x2)+(x0<x3)+(x0<x4)+(x0<x5);
        int o1 = (x1<=x0)+(x1<x2)+(x1<x3)+(x1<x4)+(x1<x5);
        int o2 = (x2<=x0)+(x2<=x1)+(x2<x3)+(x2<x4)+(x2<x5);
        int o3 = (x3<=x0)+(x3<=x1)+(x3<=x2)+(x3<x4)+(x3<x5);
        int o4 = (x4<=x0)+(x4<=x1)+(x4<=x2)+(x4<=x3)+(x4<x5);
        int o5 = 15-(o0+o1+o2+o3+o4);
        pos[o0] =  0;  pos[o1]= 1;  pos[o2]= 2;  pos[o3]= 3;  pos[o4]= 4;  pos[o5]= 5;  pos[6]=6;  pos[7]=7;
        ipos[ 0] = o0; ipos[ 1]=o1; ipos[ 2]=o2; ipos[ 3]=o3; ipos[ 4]=o4; ipos[ 5]=o5; ipos[6]=6; ipos[7]=7;
    }
Beispiel #11
0
void
maddrc16_shuffle_avx2(uint8_t* region1, const uint8_t* region2,
					uint8_t constant, size_t length)
{
	uint8_t *end;
	register __m256i in1, in2, out, t1, t2, m1, m2, l, h;
	register __m128i bc;

	if (constant == 0)
		return;

	if (constant == 1) {
		xorr_avx2(region1, region2, length);
		return;
	}

	bc = _mm_load_si128((void *)tl[constant]);
	t1 = __builtin_ia32_vbroadcastsi256(bc);
	bc = _mm_load_si128((void *)th[constant]);
	t2 = __builtin_ia32_vbroadcastsi256(bc);
	m1 = _mm256_set1_epi8(0x0f);
	m2 = _mm256_set1_epi8(0xf0);

	for (end=region1+length; region1<end; region1+=32, region2+=32) {
		in2 = _mm256_load_si256((void *)region2);
		in1 = _mm256_load_si256((void *)region1);
		l = _mm256_and_si256(in2, m1);
		l = _mm256_shuffle_epi8(t1, l);
		h = _mm256_and_si256(in2, m2);
		h = _mm256_srli_epi64(h, 4);
		h = _mm256_shuffle_epi8(t2, h);
		out = _mm256_xor_si256(h,l);
		out = _mm256_xor_si256(out, in1);
		_mm256_store_si256((void *)region1, out);
	}
}
Beispiel #12
0
/**
 * Performs a reduced duplex operation for a single row, from the highest to
 * the lowest index, using the reduced-round Blake2b's G function as the
 * internal permutation
 *
 * @param state		The current state of the sponge
 * @param rowIn		Row to feed the sponge
 * @param rowOut	Row to receive the sponge's output
 */
void reducedDuplexRow1(uint64_t *state, uint64_t *rowIn, uint64_t *rowOut, const uint32_t nCols)
{
   uint64_t* ptrWordIn = rowIn;				//In Lyra2: pointer to prev
   uint64_t* ptrWordOut = rowOut + (nCols-1)*BLOCK_LEN_INT64; //In Lyra2: pointer to row
   unsigned int i;

   for (i = 0; i < nCols; i++)
   {
      //Absorbing "M[prev][col]"
      #if defined __AVX2__

         __m256i state_v[3], in_v[3];

         state_v[0] = _mm256_load_si256( (__m256i*)(&state[0]) );
         in_v   [0] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[0]) );
         state_v[1] = _mm256_load_si256( (__m256i*)(&state[4]) );
         in_v   [1] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[4]) );
         state_v[2] = _mm256_load_si256( (__m256i*)(&state[8]) );
         in_v   [2] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[8]) );
 
         _mm256_store_si256( (__m256i*)(&state[0]),
                              _mm256_xor_si256( state_v[0], in_v[0] ) );
         _mm256_store_si256( (__m256i*)(&state[4]),
                              _mm256_xor_si256( state_v[1], in_v[1] ) );
         _mm256_store_si256( (__m256i*)(&state[8]),
                              _mm256_xor_si256( state_v[2], in_v[2] ) );

      #elif defined __AVX__

         __m128i state_v[6], in_v[6];

         state_v[0] = _mm_load_si128( (__m128i*)(&state[0]) );
         state_v[1] = _mm_load_si128( (__m128i*)(&state[2]) );
         state_v[2] = _mm_load_si128( (__m128i*)(&state[4]) );
         state_v[3] = _mm_load_si128( (__m128i*)(&state[6]) );
         state_v[4] = _mm_load_si128( (__m128i*)(&state[8]) );
         state_v[5] = _mm_load_si128( (__m128i*)(&state[10]) );

         in_v[0]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[0]) );
         in_v[1]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[2]) );
         in_v[2]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[4]) );
         in_v[3]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[6]) );
         in_v[4]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[8]) );
         in_v[5]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[10]) );

         _mm_store_si128( (__m128i*)(&state[0]),
                           _mm_xor_si128( state_v[0], in_v[0] ) );
         _mm_store_si128( (__m128i*)(&state[2]),
                           _mm_xor_si128( state_v[1], in_v[1] ) );
         _mm_store_si128( (__m128i*)(&state[4]),
                           _mm_xor_si128( state_v[2], in_v[2] ) );
         _mm_store_si128( (__m128i*)(&state[6]),
                           _mm_xor_si128( state_v[3], in_v[3] ) );
         _mm_store_si128( (__m128i*)(&state[8]),
                           _mm_xor_si128( state_v[4], in_v[4] ) );
         _mm_store_si128( (__m128i*)(&state[10]),
                           _mm_xor_si128( state_v[5], in_v[5] ) );

      #else

         state[0]  ^= (ptrWordIn[0]);
         state[1]  ^= (ptrWordIn[1]);
         state[2]  ^= (ptrWordIn[2]);
         state[3]  ^= (ptrWordIn[3]);
         state[4]  ^= (ptrWordIn[4]);
         state[5]  ^= (ptrWordIn[5]);
         state[6]  ^= (ptrWordIn[6]);
         state[7]  ^= (ptrWordIn[7]);
         state[8]  ^= (ptrWordIn[8]);
         state[9]  ^= (ptrWordIn[9]);
         state[10] ^= (ptrWordIn[10]);
         state[11] ^= (ptrWordIn[11]);

      #endif

      //Applies the reduced-round transformation f to the sponge's state
      reducedBlake2bLyra(state);

      //M[row][C-1-col] = M[prev][col] XOR rand
      #if defined __AVX2__
// in_v should not need to be reloaded, but it does and it segfaults if
// loading alogned
         state_v[0] = _mm256_load_si256( (__m256i*)(&state[0]) );
         in_v   [0] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[0]) );
         state_v[1] = _mm256_load_si256( (__m256i*)(&state[4]) );
         in_v   [1] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[4]) );
         state_v[2] = _mm256_load_si256( (__m256i*)(&state[8]) );
         in_v   [2] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[8]) );

         _mm256_storeu_si256( (__m256i*)(&ptrWordOut[0]),
                              _mm256_xor_si256( state_v[0], in_v[0] ) );
         _mm256_storeu_si256( (__m256i*)(&ptrWordOut[4]),
                              _mm256_xor_si256( state_v[1], in_v[1] ) );
         _mm256_storeu_si256( (__m256i*)(&ptrWordOut[8]),
                              _mm256_xor_si256( state_v[2], in_v[2] ) );

      #elif defined __AVX__

         state_v[0] = _mm_load_si128( (__m128i*)(&state[0]) );
         state_v[1] = _mm_load_si128( (__m128i*)(&state[2]) );
         state_v[2] = _mm_load_si128( (__m128i*)(&state[4]) );
         state_v[3] = _mm_load_si128( (__m128i*)(&state[6]) );
         state_v[4] = _mm_load_si128( (__m128i*)(&state[8]) );
         state_v[5] = _mm_load_si128( (__m128i*)(&state[10]) );

         in_v[0]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[0]) );
         in_v[1]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[2]) );
         in_v[2]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[4]) );
         in_v[3]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[6]) );
         in_v[4]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[8]) );
         in_v[5]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[10]) );

         _mm_storeu_si128( (__m128i*)(&ptrWordOut[0]),
                           _mm_xor_si128( state_v[0], in_v[0] ) );
         _mm_storeu_si128( (__m128i*)(&ptrWordOut[2]),
                           _mm_xor_si128( state_v[1], in_v[1] ) );
         _mm_storeu_si128( (__m128i*)(&ptrWordOut[4]),
                           _mm_xor_si128( state_v[2], in_v[2] ) );
         _mm_storeu_si128( (__m128i*)(&ptrWordOut[6]),
                           _mm_xor_si128( state_v[3], in_v[3] ) );
         _mm_storeu_si128( (__m128i*)(&ptrWordOut[8]),
                           _mm_xor_si128( state_v[4], in_v[4] ) );
         _mm_storeu_si128( (__m128i*)(&ptrWordOut[10]),
                            _mm_xor_si128( state_v[5], in_v[5] ) );

      #else

        ptrWordOut[0] = ptrWordIn[0]  ^ state[0];
        ptrWordOut[1] = ptrWordIn[1]  ^ state[1];
        ptrWordOut[2] = ptrWordIn[2]  ^ state[2];
        ptrWordOut[3] = ptrWordIn[3]  ^ state[3];
	ptrWordOut[4] = ptrWordIn[4]  ^ state[4];
	ptrWordOut[5] = ptrWordIn[5]  ^ state[5];
	ptrWordOut[6] = ptrWordIn[6]  ^ state[6];
	ptrWordOut[7] = ptrWordIn[7]  ^ state[7];
	ptrWordOut[8] = ptrWordIn[8]  ^ state[8];
	ptrWordOut[9] = ptrWordIn[9]  ^ state[9];
	ptrWordOut[10] = ptrWordIn[10] ^ state[10];
	ptrWordOut[11] = ptrWordIn[11] ^ state[11];
     #endif

     //Input: next column (i.e., next block in sequence)
     ptrWordIn += BLOCK_LEN_INT64;
     //Output: goes to previous column
     ptrWordOut -= BLOCK_LEN_INT64;
   }
}
	EvalSum(const EvalSum& es) {
		_mm256_store_si256(&mm, es.mm);
	}
	EvalSum& operator = (const EvalSum& rhs) {
		_mm256_store_si256(&mm, rhs.mm);
		return *this;
	}
Beispiel #15
0
 /*!
  * \brief Aligned store of the given packed vector at the
  * given memory position
  */
 ETL_STATIC_INLINE(void) store(int64_t* memory, avx_simd_long value) {
     _mm256_store_si256(reinterpret_cast<__m256i*>(memory), value.value);
 }
Beispiel #16
0
 SIMD_INLINE void InterpolateX4(const __m256i * alpha, __m256i * buffer)
 {
     __m256i src = _mm256_shuffle_epi8(_mm256_load_si256(buffer), K8_SHUFFLE_X4);
     _mm256_store_si256(buffer, _mm256_maddubs_epi16(src, _mm256_load_si256(alpha)));
 }
Beispiel #17
0
/**
 * Performs a reduced duplex operation for a single row, from the highest to
 * the lowest index, using the reduced-round Blake2b's G function as the
 * internal permutation
 *
 * @param state		The current state of the sponge
 * @param rowIn		Row to feed the sponge
 * @param rowOut	Row to receive the sponge's output
 */
inline void reducedDuplexRow1( uint64_t *state, uint64_t *rowIn,
                               uint64_t *rowOut, uint64_t nCols )
{
    uint64_t* ptrWordIn = rowIn;	//In Lyra2: pointer to prev
    uint64_t* ptrWordOut = rowOut + (nCols-1)*BLOCK_LEN_INT64; //In Lyra2: pointer to row
    int i;

#if defined __AVX2__
         __m256i state_v[4], in_v[3];
         state_v[0] = _mm256_load_si256( (__m256i*)(&state[0]) );
         state_v[1] = _mm256_load_si256( (__m256i*)(&state[4]) );
         state_v[2] = _mm256_load_si256( (__m256i*)(&state[8]) );
         state_v[3] = _mm256_load_si256( (__m256i*)(&state[12]) );
#endif

    for ( i = 0; i < nCols; i++ )
    {
#if defined __AVX2__

         in_v   [0] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[0]) );
         in_v   [1] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[4]) );
         in_v   [2] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[8]) );
 
         state_v[0] = _mm256_xor_si256( state_v[0], in_v[0] );
         state_v[1] = _mm256_xor_si256( state_v[1], in_v[1] );
         state_v[2] = _mm256_xor_si256( state_v[2], in_v[2] );

         LYRA_ROUND_AVX2( state_v[0], state_v[1], state_v[2], state_v[3] );

         _mm256_storeu_si256( (__m256i*)(&ptrWordOut[0]),
                              _mm256_xor_si256( state_v[0], in_v[0] ) );
         _mm256_storeu_si256( (__m256i*)(&ptrWordOut[4]),
                              _mm256_xor_si256( state_v[1], in_v[1] ) );
         _mm256_storeu_si256( (__m256i*)(&ptrWordOut[8]),
                              _mm256_xor_si256( state_v[2], in_v[2] ) );

#elif defined __AVX__

        __m128i state_v[6], in_v[6];

         state_v[0] = _mm_load_si128( (__m128i*)(&state[0]) );
         state_v[1] = _mm_load_si128( (__m128i*)(&state[2]) );
         state_v[2] = _mm_load_si128( (__m128i*)(&state[4]) );
         state_v[3] = _mm_load_si128( (__m128i*)(&state[6]) );
         state_v[4] = _mm_load_si128( (__m128i*)(&state[8]) );
         state_v[5] = _mm_load_si128( (__m128i*)(&state[10]) );

         in_v[0]    = _mm_load_si128( (__m128i*)(&ptrWordIn[0]) );
         in_v[1]    = _mm_load_si128( (__m128i*)(&ptrWordIn[2]) );
         in_v[2]    = _mm_load_si128( (__m128i*)(&ptrWordIn[4]) );
         in_v[3]    = _mm_load_si128( (__m128i*)(&ptrWordIn[6]) );
         in_v[4]    = _mm_load_si128( (__m128i*)(&ptrWordIn[8]) );
         in_v[5]    = _mm_load_si128( (__m128i*)(&ptrWordIn[10]) );

         _mm_store_si128( (__m128i*)(&state[0]),
                           _mm_xor_si128( state_v[0], in_v[0] ) );
         _mm_store_si128( (__m128i*)(&state[2]),
                           _mm_xor_si128( state_v[1], in_v[1] ) );
         _mm_store_si128( (__m128i*)(&state[4]),
                           _mm_xor_si128( state_v[2], in_v[2] ) );
         _mm_store_si128( (__m128i*)(&state[6]),
                           _mm_xor_si128( state_v[3], in_v[3] ) );
         _mm_store_si128( (__m128i*)(&state[8]),
                           _mm_xor_si128( state_v[4], in_v[4] ) );
         _mm_store_si128( (__m128i*)(&state[10]),
                           _mm_xor_si128( state_v[5], in_v[5] ) );

        //Applies the reduced-round transformation f to the sponge's state
        reducedBlake2bLyra(state);

#else

        //Absorbing "M[prev][col]"
        state[0]  ^= (ptrWordIn[0]);
        state[1]  ^= (ptrWordIn[1]);
        state[2]  ^= (ptrWordIn[2]);
        state[3]  ^= (ptrWordIn[3]);
        state[4]  ^= (ptrWordIn[4]);
        state[5]  ^= (ptrWordIn[5]);
        state[6]  ^= (ptrWordIn[6]);
        state[7]  ^= (ptrWordIn[7]);
        state[8]  ^= (ptrWordIn[8]);
        state[9]  ^= (ptrWordIn[9]);
        state[10] ^= (ptrWordIn[10]);
        state[11] ^= (ptrWordIn[11]);

        //Applies the reduced-round transformation f to the sponge's state
        reducedBlake2bLyra(state);
#endif

      #if defined __AVX2__
/*         state_v[0] = _mm256_load_si256( (__m256i*)(&state[0]) );
         state_v[1] = _mm256_load_si256( (__m256i*)(&state[4]) );
         state_v[2] = _mm256_load_si256( (__m256i*)(&state[8]) );

         _mm256_storeu_si256( (__m256i*)(&ptrWordOut[0]),
                              _mm256_xor_si256( state_v[0], in_v[0] ) );
         _mm256_storeu_si256( (__m256i*)(&ptrWordOut[4]),
                              _mm256_xor_si256( state_v[1], in_v[1] ) );
         _mm256_storeu_si256( (__m256i*)(&ptrWordOut[8]),
                              _mm256_xor_si256( state_v[2], in_v[2] ) );
*/
      #elif defined __AVX__

         state_v[0] = _mm_load_si128( (__m128i*)(&state[0]) );
         state_v[1] = _mm_load_si128( (__m128i*)(&state[2]) );
         state_v[2] = _mm_load_si128( (__m128i*)(&state[4]) );
         state_v[3] = _mm_load_si128( (__m128i*)(&state[6]) );
         state_v[4] = _mm_load_si128( (__m128i*)(&state[8]) );
         state_v[5] = _mm_load_si128( (__m128i*)(&state[10]) );

         _mm_storeu_si128( (__m128i*)(&ptrWordOut[0]),
                           _mm_xor_si128( state_v[0], in_v[0] ) );
         _mm_storeu_si128( (__m128i*)(&ptrWordOut[2]),
                           _mm_xor_si128( state_v[1], in_v[1] ) );
         _mm_storeu_si128( (__m128i*)(&ptrWordOut[4]),
                           _mm_xor_si128( state_v[2], in_v[2] ) );
         _mm_storeu_si128( (__m128i*)(&ptrWordOut[6]),
                           _mm_xor_si128( state_v[3], in_v[3] ) );
         _mm_storeu_si128( (__m128i*)(&ptrWordOut[8]),
                           _mm_xor_si128( state_v[4], in_v[4] ) );
         _mm_storeu_si128( (__m128i*)(&ptrWordOut[10]),
                            _mm_xor_si128( state_v[5], in_v[5] ) );

      #else

    //M[row][C-1-col] = M[prev][col] XOR rand
    ptrWordOut[0] = ptrWordIn[0]  ^ state[0];
    ptrWordOut[1] = ptrWordIn[1]  ^ state[1];
    ptrWordOut[2] = ptrWordIn[2]  ^ state[2];
    ptrWordOut[3] = ptrWordIn[3]  ^ state[3];
    ptrWordOut[4] = ptrWordIn[4]  ^ state[4];
    ptrWordOut[5] = ptrWordIn[5]  ^ state[5];
    ptrWordOut[6] = ptrWordIn[6]  ^ state[6];
    ptrWordOut[7] = ptrWordIn[7]  ^ state[7];
    ptrWordOut[8] = ptrWordIn[8]  ^ state[8];
    ptrWordOut[9] = ptrWordIn[9]  ^ state[9];
    ptrWordOut[10] = ptrWordIn[10] ^ state[10];
    ptrWordOut[11] = ptrWordIn[11] ^ state[11];
#endif

       //Input: next column (i.e., next block in sequence)
       ptrWordIn += BLOCK_LEN_INT64;
       //Output: goes to previous column
       ptrWordOut -= BLOCK_LEN_INT64;
    }

#if defined __AVX2__
    _mm256_store_si256( (__m256i*)&state[0], state_v[0] );
    _mm256_store_si256( (__m256i*)&state[4], state_v[1] );
    _mm256_store_si256( (__m256i*)&state[8], state_v[2] );
    _mm256_store_si256( (__m256i*)&state[12], state_v[3] );
#endif

}
Beispiel #18
0
/**
 * Performs a duplexing operation over "M[rowInOut][col] [+] M[rowIn][col]" (i.e.,
 * the wordwise addition of two columns, ignoring carries between words). The
 * output of this operation, "rand", is then used to make
 * "M[rowOut][(N_COLS-1)-col] = M[rowIn][col] XOR rand" and
 * "M[rowInOut][col] =  M[rowInOut][col] XOR rotW(rand)", where rotW is a 64-bit
 * rotation to the left and N_COLS is a system parameter.
 *
 * @param state          The current state of the sponge
 * @param rowIn          Row used only as input
 * @param rowInOut       Row used as input and to receive output after rotation
 * @param rowOut         Row receiving the output
 *
 */
inline void reducedDuplexRowSetup( uint64_t *state, uint64_t *rowIn,
                                   uint64_t *rowInOut, uint64_t *rowOut,
                                   uint64_t nCols )
{
    uint64_t* ptrWordIn = rowIn;	//In Lyra2: pointer to prev
    uint64_t* ptrWordInOut = rowInOut;	//In Lyra2: pointer to row*
    uint64_t* ptrWordOut = rowOut + (nCols-1)*BLOCK_LEN_INT64; //In Lyra2: pointer to row
    int i;

#if defined __AVX2__
    __m256i state_v[4], in_v[3], inout_v[3];
    #define t_state in_v

    state_v[0] = _mm256_load_si256( (__m256i*)(&state[0]) );
    state_v[1] = _mm256_load_si256( (__m256i*)(&state[4]) );
    state_v[2] = _mm256_load_si256( (__m256i*)(&state[8]) );
    state_v[3] = _mm256_load_si256( (__m256i*)(&state[12]) );

    for ( i = 0; i < nCols; i++ )
    {
       in_v   [0] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[0]) );
       inout_v[0] = _mm256_loadu_si256( (__m256i*)(&ptrWordInOut[0]) );
       in_v   [1] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[4]) );
       inout_v[1] = _mm256_loadu_si256( (__m256i*)(&ptrWordInOut[4]) );
       in_v   [2] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[8]) );
       inout_v[2] = _mm256_loadu_si256( (__m256i*)(&ptrWordInOut[8]) );

       state_v[0] = _mm256_xor_si256( state_v[0],  _mm256_add_epi64( in_v[0],
                                                                inout_v[0] ) );
       state_v[1] = _mm256_xor_si256( state_v[1],  _mm256_add_epi64( in_v[1],
                                                                inout_v[1] ) );
       state_v[2] = _mm256_xor_si256( state_v[2],  _mm256_add_epi64( in_v[2],
                                                                inout_v[2] ) );

       LYRA_ROUND_AVX2( state_v[0], state_v[1], state_v[2], state_v[3] );

       _mm256_storeu_si256( (__m256i*)(&ptrWordOut[0]),
                              _mm256_xor_si256( state_v[0], in_v[0] ) );
       _mm256_storeu_si256( (__m256i*)(&ptrWordOut[4]),
                              _mm256_xor_si256( state_v[1], in_v[1] ) );
       _mm256_storeu_si256( (__m256i*)(&ptrWordOut[8]),
                              _mm256_xor_si256( state_v[2], in_v[2] ) );

       //M[row*][col] = M[row*][col] XOR rotW(rand)
      t_state[0] = _mm256_permute4x64_epi64( state_v[0], 0x93 );
      t_state[1] = _mm256_permute4x64_epi64( state_v[1], 0x93 );
      t_state[2] = _mm256_permute4x64_epi64( state_v[2], 0x93 );

      inout_v[0] = _mm256_xor_si256( inout_v[0],
                         _mm256_blend_epi32( t_state[0], t_state[2], 0x03 ) );
      inout_v[1] = _mm256_xor_si256( inout_v[1],
                         _mm256_blend_epi32( t_state[1], t_state[0], 0x03 ) );
      inout_v[2] = _mm256_xor_si256( inout_v[2],
                         _mm256_blend_epi32( t_state[2], t_state[1], 0x03 ) );

      _mm256_storeu_si256( (__m256i*)&ptrWordInOut[0], inout_v[0] );
      _mm256_storeu_si256( (__m256i*)&ptrWordInOut[4], inout_v[1] );
      _mm256_storeu_si256( (__m256i*)&ptrWordInOut[8], inout_v[2] );

       //Inputs: next column (i.e., next block in sequence)
       ptrWordInOut += BLOCK_LEN_INT64;
       ptrWordIn += BLOCK_LEN_INT64;
       //Output: goes to previous column
       ptrWordOut -= BLOCK_LEN_INT64;
    }

    _mm256_store_si256( (__m256i*)&state[0], state_v[0] );
    _mm256_store_si256( (__m256i*)&state[4], state_v[1] );
    _mm256_store_si256( (__m256i*)&state[8], state_v[2] );
    _mm256_store_si256( (__m256i*)&state[12], state_v[3] );

    #undef t_state 

#elif defined __AVX__

        __m128i state_v[6], in_v[6], inout_v[6];

    for ( i = 0; i < nCols; i++ )
    {

        state_v[0] = _mm_load_si128( (__m128i*)(&state[0]) );
        state_v[1] = _mm_load_si128( (__m128i*)(&state[2]) );
        state_v[2] = _mm_load_si128( (__m128i*)(&state[4]) );
        state_v[3] = _mm_load_si128( (__m128i*)(&state[6]) );
        state_v[4] = _mm_load_si128( (__m128i*)(&state[8]) );
        state_v[5] = _mm_load_si128( (__m128i*)(&state[10]) );

        inout_v[0]    = _mm_load_si128( (__m128i*)(&ptrWordInOut[0]) );
        inout_v[1]    = _mm_load_si128( (__m128i*)(&ptrWordInOut[2]) );
        inout_v[2]    = _mm_load_si128( (__m128i*)(&ptrWordInOut[4]) );
        inout_v[3]    = _mm_load_si128( (__m128i*)(&ptrWordInOut[6]) );
        inout_v[4]    = _mm_load_si128( (__m128i*)(&ptrWordInOut[8]) );
        inout_v[5]    = _mm_load_si128( (__m128i*)(&ptrWordInOut[10]) );

        in_v[0]    = _mm_load_si128( (__m128i*)(&ptrWordIn[0]) );
        in_v[1]    = _mm_load_si128( (__m128i*)(&ptrWordIn[2]) );
        in_v[2]    = _mm_load_si128( (__m128i*)(&ptrWordIn[4]) );
        in_v[3]    = _mm_load_si128( (__m128i*)(&ptrWordIn[6]) );
        in_v[4]    = _mm_load_si128( (__m128i*)(&ptrWordIn[8]) );
        in_v[5]    = _mm_load_si128( (__m128i*)(&ptrWordIn[10]) );

        _mm_store_si128( (__m128i*)(&state[0]),
                          _mm_xor_si128( state_v[0],
                                         _mm_add_epi64( in_v[0],
                                                        inout_v[0] ) ) );
        _mm_store_si128( (__m128i*)(&state[2]),
                          _mm_xor_si128( state_v[1],
                                         _mm_add_epi64( in_v[1],
                                                        inout_v[1] ) ) );
        _mm_store_si128( (__m128i*)(&state[4]),
                          _mm_xor_si128( state_v[2],
                                         _mm_add_epi64( in_v[2],
                                                        inout_v[2] ) ) );
        _mm_store_si128( (__m128i*)(&state[6]),
                          _mm_xor_si128( state_v[3],
                                         _mm_add_epi64( in_v[3],
                                                        inout_v[3] ) ) );
        _mm_store_si128( (__m128i*)(&state[8]),
                          _mm_xor_si128( state_v[4],
                                         _mm_add_epi64( in_v[4],
                                                        inout_v[4] ) ) );
        _mm_store_si128( (__m128i*)(&state[10]),
                          _mm_xor_si128( state_v[5],
                                         _mm_add_epi64( in_v[5],
                                                        inout_v[5] ) ) );

    //Applies the reduced-round transformation f to the sponge's state
    reducedBlake2bLyra(state);
#else
    for ( i = 0; i < nCols; i++ )
    {

    //Absorbing "M[prev] [+] M[row*]"
    state[0]  ^= (ptrWordIn[0]  + ptrWordInOut[0]);
    state[1]  ^= (ptrWordIn[1]  + ptrWordInOut[1]);
    state[2]  ^= (ptrWordIn[2]  + ptrWordInOut[2]);
    state[3]  ^= (ptrWordIn[3]  + ptrWordInOut[3]);
    state[4]  ^= (ptrWordIn[4]  + ptrWordInOut[4]);
    state[5]  ^= (ptrWordIn[5]  + ptrWordInOut[5]);
    state[6]  ^= (ptrWordIn[6]  + ptrWordInOut[6]);
    state[7]  ^= (ptrWordIn[7]  + ptrWordInOut[7]);
    state[8]  ^= (ptrWordIn[8]  + ptrWordInOut[8]);
    state[9]  ^= (ptrWordIn[9]  + ptrWordInOut[9]);
    state[10] ^= (ptrWordIn[10] + ptrWordInOut[10]);
    state[11] ^= (ptrWordIn[11] + ptrWordInOut[11]);
    //Applies the reduced-round transformation f to the sponge's state
    reducedBlake2bLyra(state);

    //M[row][col] = M[prev][col] XOR rand
#endif


      #if defined __AVX2__

      #elif defined __AVX__

         state_v[0] = _mm_load_si128( (__m128i*)(&state[0]) );
         state_v[1] = _mm_load_si128( (__m128i*)(&state[2]) );
         state_v[2] = _mm_load_si128( (__m128i*)(&state[4]) );
         state_v[3] = _mm_load_si128( (__m128i*)(&state[6]) );
         state_v[4] = _mm_load_si128( (__m128i*)(&state[8]) );
         state_v[5] = _mm_load_si128( (__m128i*)(&state[10]) );

         _mm_store_si128( (__m128i*)(&ptrWordOut[0]),
                           _mm_xor_si128( state_v[0], in_v[0] ) );
         _mm_store_si128( (__m128i*)(&ptrWordOut[2]),
                           _mm_xor_si128( state_v[1], in_v[1] ) );
         _mm_store_si128( (__m128i*)(&ptrWordOut[4]),
                           _mm_xor_si128( state_v[2], in_v[2] ) );
         _mm_store_si128( (__m128i*)(&ptrWordOut[6]),
                           _mm_xor_si128( state_v[3], in_v[3] ) );
         _mm_store_si128( (__m128i*)(&ptrWordOut[8]),
                           _mm_xor_si128( state_v[4], in_v[4] ) );
         _mm_store_si128( (__m128i*)(&ptrWordOut[10]),
                           _mm_xor_si128( state_v[5], in_v[5] ) );

      #else

    ptrWordOut[0] = ptrWordIn[0]  ^ state[0];
    ptrWordOut[1] = ptrWordIn[1]  ^ state[1];
    ptrWordOut[2] = ptrWordIn[2]  ^ state[2];
    ptrWordOut[3] = ptrWordIn[3]  ^ state[3];
    ptrWordOut[4] = ptrWordIn[4]  ^ state[4];
    ptrWordOut[5] = ptrWordIn[5]  ^ state[5];
    ptrWordOut[6] = ptrWordIn[6]  ^ state[6];
    ptrWordOut[7] = ptrWordIn[7]  ^ state[7];
    ptrWordOut[8] = ptrWordIn[8]  ^ state[8];
    ptrWordOut[9] = ptrWordIn[9]  ^ state[9];
    ptrWordOut[10] = ptrWordIn[10] ^ state[10];
    ptrWordOut[11] = ptrWordIn[11] ^ state[11];
#endif

    //M[row*][col] = M[row*][col] XOR rotW(rand)
// Need to fix this before taking state load/store out of loop
#ifdef __AVX2__


#else

    ptrWordInOut[0]  ^= state[11];
    ptrWordInOut[1]  ^= state[0];
    ptrWordInOut[2]  ^= state[1];
    ptrWordInOut[3]  ^= state[2];
    ptrWordInOut[4]  ^= state[3];
    ptrWordInOut[5]  ^= state[4];
    ptrWordInOut[6]  ^= state[5];
    ptrWordInOut[7]  ^= state[6];
    ptrWordInOut[8]  ^= state[7];
    ptrWordInOut[9]  ^= state[8];
    ptrWordInOut[10] ^= state[9];
    ptrWordInOut[11] ^= state[10];

       //Inputs: next column (i.e., next block in sequence)
       ptrWordInOut += BLOCK_LEN_INT64;
       ptrWordIn += BLOCK_LEN_INT64;
       //Output: goes to previous column
       ptrWordOut -= BLOCK_LEN_INT64;
    }

#endif

}

/**
 * Performs a duplexing operation over "M[rowInOut][col] [+] M[rowIn][col]" (i.e.,
 * the wordwise addition of two columns, ignoring carries between words). The
 * output of this operation, "rand", is then used to make
 * "M[rowOut][col] = M[rowOut][col] XOR rand" and
 * "M[rowInOut][col] =  M[rowInOut][col] XOR rotW(rand)", where rotW is a 64-bit
 * rotation to the left.
 *
 * @param state          The current state of the sponge
 * @param rowIn          Row used only as input
 * @param rowInOut       Row used as input and to receive output after rotation
 * @param rowOut         Row receiving the output
 *
 */
inline void reducedDuplexRow( uint64_t *state, uint64_t *rowIn,
                              uint64_t *rowInOut, uint64_t *rowOut,
                              uint64_t nCols )
{
    uint64_t* ptrWordInOut = rowInOut; //In Lyra2: pointer to row*
    uint64_t* ptrWordIn = rowIn; //In Lyra2: pointer to prev
    uint64_t* ptrWordOut = rowOut; //In Lyra2: pointer to row
    int i;


#if defined __AVX2__

    for ( i = 0; i < nCols; i++)
    {

       //Absorbing "M[prev] [+] M[row*]"

       __m256i state_v[4], in_v[3], inout_v[3];
       #define out_v in_v    // reuse register in next code block
       #define t_state in_v
       state_v[0] = _mm256_load_si256( (__m256i*)(&state[0]) );
       in_v   [0] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[0]) );
       inout_v[0] = _mm256_loadu_si256( (__m256i*)(&ptrWordInOut[0]) );
       state_v[1] = _mm256_load_si256( (__m256i*)(&state[4]) );
       in_v   [1] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[4]) );
       inout_v[1] = _mm256_loadu_si256( (__m256i*)(&ptrWordInOut[4]) );
       state_v[2] = _mm256_load_si256( (__m256i*)(&state[8]) );
       in_v   [2] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[8]) );
       inout_v[2] = _mm256_loadu_si256( (__m256i*)(&ptrWordInOut[8]) );
       state_v[3] = _mm256_load_si256( (__m256i*)(&state[12]) );

       state_v[0] = _mm256_xor_si256( state_v[0], _mm256_add_epi64( in_v[0],
                                                               inout_v[0] ) );
       state_v[1] = _mm256_xor_si256( state_v[1], _mm256_add_epi64( in_v[1],
                                                               inout_v[1] ) );
       state_v[2] = _mm256_xor_si256( state_v[2], _mm256_add_epi64( in_v[2],
                                                               inout_v[2] ) );

       out_v[0] = _mm256_loadu_si256( (__m256i*)(&ptrWordOut[0]) );
       out_v[1] = _mm256_loadu_si256( (__m256i*)(&ptrWordOut[4]) );
       out_v[2] = _mm256_loadu_si256( (__m256i*)(&ptrWordOut[8]) );

       LYRA_ROUND_AVX2( state_v[0], state_v[1], state_v[2], state_v[3] );

       _mm256_store_si256( (__m256i*)&state[0], state_v[0] );
       _mm256_store_si256( (__m256i*)&state[4], state_v[1] );
       _mm256_store_si256( (__m256i*)&state[8], state_v[2] );
       _mm256_store_si256( (__m256i*)&state[12], state_v[3] );

       _mm256_storeu_si256( (__m256i*)(&ptrWordOut[0]),
                            _mm256_xor_si256( state_v[0], out_v[0] ) );
       _mm256_storeu_si256( (__m256i*)(&ptrWordOut[4]),
                            _mm256_xor_si256( state_v[1], out_v[1] ) );
       _mm256_storeu_si256( (__m256i*)(&ptrWordOut[8]),
                            _mm256_xor_si256( state_v[2], out_v[2] ) );

/*
       t_state[0] = _mm256_permute4x64_epi64( state_v[0], 0x93 );
       t_state[1] = _mm256_permute4x64_epi64( state_v[1], 0x93 );
       t_state[2] = _mm256_permute4x64_epi64( state_v[2], 0x93 );

       inout_v[0] = _mm256_xor_si256( inout_v[0],
                        _mm256_blend_epi32( t_state[0], t_state[2], 0x03 ) );
       inout_v[1] = _mm256_xor_si256( inout_v[1], 
                        _mm256_blend_epi32( t_state[1], t_state[0], 0x03 ) );
       inout_v[2] = _mm256_xor_si256( inout_v[2], 
                        _mm256_blend_epi32( t_state[2], t_state[1], 0x03 ) );

       _mm256_storeu_si256( (__m256i*)(&ptrWordInOut[0]), inout_v[0] );
       _mm256_storeu_si256( (__m256i*)(&ptrWordInOut[4]), inout_v[1] );
       _mm256_storeu_si256( (__m256i*)(&ptrWordInOut[8]), inout_v[2] );

       _mm256_store_si256( (__m256i*)&state[0], state_v[0] );
       _mm256_store_si256( (__m256i*)&state[4], state_v[1] );
       _mm256_store_si256( (__m256i*)&state[8], state_v[2] );
       _mm256_store_si256( (__m256i*)&state[12], state_v[3] );
*/
       #undef out_v
       #undef t_state 

    //M[rowInOut][col] = M[rowInOut][col] XOR rotW(rand)
    ptrWordInOut[0] ^= state[11];
    ptrWordInOut[1] ^= state[0];
    ptrWordInOut[2] ^= state[1];
    ptrWordInOut[3] ^= state[2];
    ptrWordInOut[4] ^= state[3];
    ptrWordInOut[5] ^= state[4];
    ptrWordInOut[6] ^= state[5];
    ptrWordInOut[7] ^= state[6];
    ptrWordInOut[8] ^= state[7];
    ptrWordInOut[9] ^= state[8];
    ptrWordInOut[10] ^= state[9];
    ptrWordInOut[11] ^= state[10];

       //Goes to next block
       ptrWordOut += BLOCK_LEN_INT64;
       ptrWordInOut += BLOCK_LEN_INT64;
       ptrWordIn += BLOCK_LEN_INT64;
    }

   #elif defined __AVX__

    for ( i = 0; i < nCols; i++)
    {

       __m128i state_v[6], in_v[6], inout_v[6];
       #define out_v in_v    // reuse register in next code block

       state_v[0] = _mm_load_si128( (__m128i*)(&state[0]) );
       state_v[1] = _mm_load_si128( (__m128i*)(&state[2]) );
       state_v[2] = _mm_load_si128( (__m128i*)(&state[4]) );
       state_v[3] = _mm_load_si128( (__m128i*)(&state[6]) );
       state_v[4] = _mm_load_si128( (__m128i*)(&state[8]) );
       state_v[5] = _mm_load_si128( (__m128i*)(&state[10]) );

       inout_v[0]    = _mm_load_si128( (__m128i*)(&ptrWordInOut[0]) );
       inout_v[1]    = _mm_load_si128( (__m128i*)(&ptrWordInOut[2]) );
       inout_v[2]    = _mm_load_si128( (__m128i*)(&ptrWordInOut[4]) );
       inout_v[3]    = _mm_load_si128( (__m128i*)(&ptrWordInOut[6]) );
       inout_v[4]    = _mm_load_si128( (__m128i*)(&ptrWordInOut[8]) );
       inout_v[5]    = _mm_load_si128( (__m128i*)(&ptrWordInOut[10]) );

       in_v[0]    = _mm_load_si128( (__m128i*)(&ptrWordIn[0]) );
       in_v[1]    = _mm_load_si128( (__m128i*)(&ptrWordIn[2]) );
       in_v[2]    = _mm_load_si128( (__m128i*)(&ptrWordIn[4]) );
       in_v[3]    = _mm_load_si128( (__m128i*)(&ptrWordIn[6]) );
       in_v[4]    = _mm_load_si128( (__m128i*)(&ptrWordIn[8]) );
       in_v[5]    = _mm_load_si128( (__m128i*)(&ptrWordIn[10]) );

       _mm_store_si128( (__m128i*)(&state[0]),
                         _mm_xor_si128( state_v[0],
                                        _mm_add_epi64( in_v[0],
                                                       inout_v[0] ) ) );
       _mm_store_si128( (__m128i*)(&state[2]),
                         _mm_xor_si128( state_v[1],
                                        _mm_add_epi64( in_v[1],
                                                       inout_v[1] ) ) );
       _mm_store_si128( (__m128i*)(&state[4]),
                         _mm_xor_si128( state_v[2],
                                        _mm_add_epi64( in_v[2],
                                                       inout_v[2] ) ) );
       _mm_store_si128( (__m128i*)(&state[6]),
                         _mm_xor_si128( state_v[3],
                                        _mm_add_epi64( in_v[3],
                                                       inout_v[3] ) ) );
       _mm_store_si128( (__m128i*)(&state[8]),
                         _mm_xor_si128( state_v[4],
                                        _mm_add_epi64( in_v[4],
                                                       inout_v[4] ) ) );
       _mm_store_si128( (__m128i*)(&state[10]),
                         _mm_xor_si128( state_v[5],
                                        _mm_add_epi64( in_v[5],
                                                       inout_v[5] ) ) );

    //Applies the reduced-round transformation f to the sponge's state
    reducedBlake2bLyra(state);

   #else

    for ( i = 0; i < nCols; i++)
    {

    state[0]  ^= (ptrWordIn[0]  + ptrWordInOut[0]);
    state[1]  ^= (ptrWordIn[1]  + ptrWordInOut[1]);
    state[2]  ^= (ptrWordIn[2]  + ptrWordInOut[2]);
    state[3]  ^= (ptrWordIn[3]  + ptrWordInOut[3]);
    state[4]  ^= (ptrWordIn[4]  + ptrWordInOut[4]);
    state[5]  ^= (ptrWordIn[5]  + ptrWordInOut[5]);
    state[6]  ^= (ptrWordIn[6]  + ptrWordInOut[6]);
    state[7]  ^= (ptrWordIn[7]  + ptrWordInOut[7]);
    state[8]  ^= (ptrWordIn[8]  + ptrWordInOut[8]);
    state[9]  ^= (ptrWordIn[9]  + ptrWordInOut[9]);
    state[10] ^= (ptrWordIn[10] + ptrWordInOut[10]);
    state[11] ^= (ptrWordIn[11] + ptrWordInOut[11]);

    //Applies the reduced-round transformation f to the sponge's state
    reducedBlake2bLyra(state);
#endif

    //M[rowOut][col] = M[rowOut][col] XOR rand

    #if defined __AVX2__
/*
       state_v[0] = _mm256_load_si256( (__m256i*)(&state[0]) );
       out_v  [0] = _mm256_loadu_si256( (__m256i*)(&ptrWordOut[0]) );
       state_v[1] = _mm256_load_si256( (__m256i*)(&state[4]) );
       out_v  [1] = _mm256_loadu_si256( (__m256i*)(&ptrWordOut[4]) );
       state_v[2] = _mm256_load_si256( (__m256i*)(&state[8]) );
       out_v  [2] = _mm256_loadu_si256( (__m256i*)(&ptrWordOut[8]) );

       _mm256_storeu_si256( (__m256i*)(&ptrWordOut[0]),
                            _mm256_xor_si256( state_v[0], out_v[0] ) );
       _mm256_storeu_si256( (__m256i*)(&ptrWordOut[4]),
                            _mm256_xor_si256( state_v[1], out_v[1] ) );
       _mm256_storeu_si256( (__m256i*)(&ptrWordOut[8]),
                            _mm256_xor_si256( state_v[2], out_v[2] ) );
*/
    #elif defined __AVX__

       state_v[0] = _mm_load_si128( (__m128i*)(&state[0]) );
       state_v[1] = _mm_load_si128( (__m128i*)(&state[2]) );
       state_v[2] = _mm_load_si128( (__m128i*)(&state[4]) );
       state_v[3] = _mm_load_si128( (__m128i*)(&state[6]) );
       state_v[4] = _mm_load_si128( (__m128i*)(&state[8]) );
       state_v[5] = _mm_load_si128( (__m128i*)(&state[10]) );

       out_v[0]    = _mm_load_si128( (__m128i*)(&ptrWordOut[0]) );
       out_v[1]    = _mm_load_si128( (__m128i*)(&ptrWordOut[2]) );
       out_v[2]    = _mm_load_si128( (__m128i*)(&ptrWordOut[4]) );
       out_v[3]    = _mm_load_si128( (__m128i*)(&ptrWordOut[6]) );
       out_v[4]    = _mm_load_si128( (__m128i*)(&ptrWordOut[8]) );
       out_v[5]    = _mm_load_si128( (__m128i*)(&ptrWordOut[10]) );

       _mm_store_si128( (__m128i*)(&ptrWordOut[0]),
                         _mm_xor_si128( state_v[0], out_v[0] ) );
       _mm_store_si128( (__m128i*)(&ptrWordOut[2]),
                         _mm_xor_si128( state_v[1], out_v[1] ) );
       _mm_store_si128( (__m128i*)(&ptrWordOut[4]),
                         _mm_xor_si128( state_v[2], out_v[2] ) );
       _mm_store_si128( (__m128i*)(&ptrWordOut[6]),
                         _mm_xor_si128( state_v[3], out_v[3] ) );
       _mm_store_si128( (__m128i*)(&ptrWordOut[8]),
                         _mm_xor_si128( state_v[4], out_v[4] ) );
       _mm_store_si128( (__m128i*)(&ptrWordOut[10]),
                         _mm_xor_si128( state_v[5], out_v[5] ) );

    //M[rowInOut][col] = M[rowInOut][col] XOR rotW(rand)
    ptrWordInOut[0] ^= state[11];
    ptrWordInOut[1] ^= state[0];
    ptrWordInOut[2] ^= state[1];
    ptrWordInOut[3] ^= state[2];
    ptrWordInOut[4] ^= state[3];
    ptrWordInOut[5] ^= state[4];
    ptrWordInOut[6] ^= state[5];
    ptrWordInOut[7] ^= state[6];
    ptrWordInOut[8] ^= state[7];
    ptrWordInOut[9] ^= state[8];
    ptrWordInOut[10] ^= state[9];
    ptrWordInOut[11] ^= state[10];

       //Goes to next block
       ptrWordOut += BLOCK_LEN_INT64;
       ptrWordInOut += BLOCK_LEN_INT64;
       ptrWordIn += BLOCK_LEN_INT64;
    }

#else
    ptrWordOut[0] ^= state[0];
    ptrWordOut[1] ^= state[1];
    ptrWordOut[2] ^= state[2];
    ptrWordOut[3] ^= state[3];
    ptrWordOut[4] ^= state[4];
    ptrWordOut[5] ^= state[5];
    ptrWordOut[6] ^= state[6];
    ptrWordOut[7] ^= state[7];
    ptrWordOut[8] ^= state[8];
    ptrWordOut[9] ^= state[9];
    ptrWordOut[10] ^= state[10];
    ptrWordOut[11] ^= state[11];

    //M[rowInOut][col] = M[rowInOut][col] XOR rotW(rand)
    ptrWordInOut[0] ^= state[11];
    ptrWordInOut[1] ^= state[0];
    ptrWordInOut[2] ^= state[1];
    ptrWordInOut[3] ^= state[2];
    ptrWordInOut[4] ^= state[3];
    ptrWordInOut[5] ^= state[4];
    ptrWordInOut[6] ^= state[5];
    ptrWordInOut[7] ^= state[6];
    ptrWordInOut[8] ^= state[7];
    ptrWordInOut[9] ^= state[8];
    ptrWordInOut[10] ^= state[9];
    ptrWordInOut[11] ^= state[10];

       //Goes to next block
       ptrWordOut += BLOCK_LEN_INT64;
       ptrWordInOut += BLOCK_LEN_INT64;
       ptrWordIn += BLOCK_LEN_INT64;
    }
#endif
}
Beispiel #19
0
/**
 * Performs a reduced squeeze operation for a single row, from the highest to
 * the lowest index, using the reduced-round Blake2b's G function as the
 * internal permutation
 *
 * @param state     The current state of the sponge
 * @param rowOut    Row to receive the data squeezed
 */
inline void reducedSqueezeRow0( uint64_t* state, uint64_t* rowOut,
                                uint64_t nCols )
{
    uint64_t* ptrWord = rowOut + (nCols-1)*BLOCK_LEN_INT64; //In Lyra2: pointer to M[0][C-1]
    int i;
    //M[row][C-1-col] = H.reduced_squeeze()

#if defined __AVX2__
       __m256i state_v[4];
       state_v[0] = _mm256_load_si256( (__m256i*)(&state[0]) );
       state_v[1] = _mm256_load_si256( (__m256i*)(&state[4]) );
       state_v[2] = _mm256_load_si256( (__m256i*)(&state[8]) );
       state_v[3] = _mm256_load_si256( (__m256i*)(&state[12]) );
#endif

    for ( i = 0; i < nCols; i++ )
    {

#if defined __AVX2__

       _mm256_storeu_si256( (__m256i*)&ptrWord[0], state_v[0] );
       _mm256_storeu_si256( (__m256i*)&ptrWord[4], state_v[1] );
       _mm256_storeu_si256( (__m256i*)&ptrWord[8], state_v[2] );

       //Goes to next block (column) that will receive the squeezed data
       ptrWord -= BLOCK_LEN_INT64;

       LYRA_ROUND_AVX2( state_v[0], state_v[1], state_v[2], state_v[3] );

#elif defined __AVX__

      _mm_store_si128( (__m128i*)(&ptrWord[0]),
                       _mm_load_si128( (__m128i*)(&state[0]) ) );
      _mm_store_si128( (__m128i*)(&ptrWord[2]),
                       _mm_load_si128( (__m128i*)(&state[2]) ) );
      _mm_store_si128( (__m128i*)(&ptrWord[4]),
                       _mm_load_si128( (__m128i*)(&state[4]) ) );
      _mm_store_si128( (__m128i*)(&ptrWord[6]),
                       _mm_load_si128( (__m128i*)(&state[6]) ) );
      _mm_store_si128( (__m128i*)(&ptrWord[8]),
                       _mm_load_si128( (__m128i*)(&state[8]) ) );
      _mm_store_si128( (__m128i*)(&ptrWord[10]),
                       _mm_load_si128( (__m128i*)(&state[10]) ) );

    //Goes to next block (column) that will receive the squeezed data
    ptrWord -= BLOCK_LEN_INT64;

    //Applies the reduced-round transformation f to the sponge's state
    reducedBlake2bLyra(state);

#else
    ptrWord[0] = state[0];
    ptrWord[1] = state[1];
    ptrWord[2] = state[2];
    ptrWord[3] = state[3];
    ptrWord[4] = state[4];
    ptrWord[5] = state[5];
    ptrWord[6] = state[6];
    ptrWord[7] = state[7];
    ptrWord[8] = state[8];
    ptrWord[9] = state[9];
    ptrWord[10] = state[10];
    ptrWord[11] = state[11];

    //Goes to next block (column) that will receive the squeezed data
    ptrWord -= BLOCK_LEN_INT64;

    //Applies the reduced-round transformation f to the sponge's state
    reducedBlake2bLyra(state);
#endif

    }

#if defined __AVX2__
    _mm256_store_si256( (__m256i*)&state[0], state_v[0] );
    _mm256_store_si256( (__m256i*)&state[4], state_v[1] );
    _mm256_store_si256( (__m256i*)&state[8], state_v[2] );
    _mm256_store_si256( (__m256i*)&state[12], state_v[3] );
#endif

}
Beispiel #20
0
void vec_i8_replace(int8_t *p, size_t n, int8_t val, int8_t substitute)
{
#ifdef COREARRAY_SIMD_SSE2

	// header 1, 16-byte aligned
	size_t h = (16 - ((size_t)p & 0x0F)) & 0x0F;
	for (; (n > 0) && (h > 0); n--, h--, p++)
		if (*p == val) *p = substitute;

	// body, SSE2
	const __m128i mask = _mm_set1_epi8(val);
	const __m128i sub  = _mm_set1_epi8(substitute);

#   ifdef COREARRAY_SIMD_AVX2

	// header 2, 32-byte aligned
	if ((n >= 16) && ((size_t)p & 0x10))
	{
		__m128i v = _mm_load_si128((__m128i const*)p);
		__m128i c = _mm_cmpeq_epi8(v, mask);
		if (_mm_movemask_epi8(c))
		{
			_mm_store_si128((__m128i *)p,
				_mm_or_si128(_mm_and_si128(c, sub), _mm_andnot_si128(c, v)));
		}
		n -= 16; p += 16;
	}

	const __m256i mask2 = _mm256_set1_epi8(val);
	const __m256i sub32 = _mm256_set1_epi8(substitute);
	const __m256i zero = _mm256_setzero_si256();
	const __m256i ones = _mm256_cmpeq_epi64(zero, zero);

	for (; n >= 32; n-=32, p+=32)
	{
		__m256i v = _mm256_load_si256((__m256i const*)p);
		__m256i c = _mm256_cmpeq_epi8(v, mask2);
		if (_mm256_movemask_epi8(c))
		{
			// TODO
			_mm256_store_si256((__m256i *)p,
				_mm256_or_si256(_mm256_and_si256(c, sub32),
				_mm256_andnot_si256(c, v)));
		}
	}

#   endif

	for (; n >= 16; n-=16, p+=16)
	{
		__m128i v = _mm_load_si128((__m128i const*)p);
		__m128i c = _mm_cmpeq_epi8(v, mask);
		if (_mm_movemask_epi8(c))
			_mm_maskmoveu_si128(sub, c, (char*)p);
	}

#endif

	// tail
	for (; n > 0; n--, p++)
		if (*p == val) *p = substitute;
}
Beispiel #21
0
static int make_frame_planar_yuv_stacked
(
    lw_video_output_handler_t *vohp,
    int                        height,
    AVFrame                   *av_frame,
    PVideoFrame               &as_frame
)
{
    as_picture_t dst_picture = { { { NULL } } };
    as_picture_t src_picture = { { { NULL } } };
    as_assign_planar_yuv( as_frame, &dst_picture );
    lw_video_scaler_handler_t *vshp = &vohp->scaler;
    as_video_output_handler_t *as_vohp = (as_video_output_handler_t *)vohp->private_handler;
    if( vshp->input_pixel_format == vshp->output_pixel_format )
        for( int i = 0; i < 3; i++ )
        {
            src_picture.data    [i] = av_frame->data    [i];
            src_picture.linesize[i] = av_frame->linesize[i];
        }
    else
    {
        if( convert_av_pixel_format( vshp->sws_ctx, height, av_frame, &as_vohp->scaled ) < 0 )
            return -1;
        src_picture = as_vohp->scaled;
    }
    for( int i = 0; i < 3; i++ )
    {
        const int src_height = height >> (i ? as_vohp->sub_height : 0);
        const int width      = vshp->input_width >> (i ? as_vohp->sub_width : 0);
        const int width16    = sse2_available > 0 ? (width & ~15) : 0;
        const int width32    = avx2_available > 0 ? (width & ~31) : 0;
        const int lsb_offset = src_height * dst_picture.linesize[i];
        for( int j = 0; j < src_height; j++ )
        {
            /* Here, if available, use SIMD instructions.
             * Note: There is assumption that the address of a given data can be divided by 32 or 16.
             *       The destination is always 32 byte alignment unless AviSynth legacy alignment is used.
             *       The source is not always 32 or 16 byte alignment if the frame buffer is from libavcodec directly. */
            static const uint8_t LW_ALIGN(32) sp16[32] =
                {
                    /* saturation protector
                     * For setting all upper 8 bits to zero so that saturation won't make sense. */
                    0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00 ,0xFF, 0x00, 0xFF, 0x00,
                    0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00 ,0xFF, 0x00, 0xFF, 0x00
                };
                  uint8_t *dst     = dst_picture.data[i] + j * dst_picture.linesize[i];     /* MSB: dst +     k,     LSB: dst +     k + lsb_offset */
            const uint8_t *src     = src_picture.data[i] + j * src_picture.linesize[i];     /* MSB: src + 2 * k + 1, LSB: src + 2 * k */
            const int     _width16 = ((intptr_t)src & 15) == 0 ? width16 : 0;               /* Don't use SSE2 instructions if set to 0. */
            const int     _width32 = ((intptr_t)src & 31) == 0 ? width32 : 0;               /* Don't use AVX(2) instructions if set to 0. */
#if VC_HAS_AVX2
            /* AVX, AVX2 */
            for( int k = 0; k < _width32; k += 32 )
            {
                __m256i ymm0 = _mm256_load_si256( (__m256i *)(src + 2 * k     ) );
                __m256i ymm1 = _mm256_load_si256( (__m256i *)(src + 2 * k + 32) );
                __m256i mask = _mm256_load_si256( (__m256i *)sp16 );
                __m256i ymm2 = _mm256_packus_epi16( _mm256_and_si256 ( ymm0, mask ), _mm256_and_si256 ( ymm1, mask ) );
                __m256i ymm3 = _mm256_packus_epi16( _mm256_srli_epi16( ymm0,    8 ), _mm256_srli_epi16( ymm1,    8 ) );
                _mm256_store_si256( (__m256i *)(dst + k + lsb_offset), _mm256_permute4x64_epi64( ymm2, _MM_SHUFFLE( 3, 1, 2, 0 ) ) );
                _mm256_store_si256( (__m256i *)(dst + k             ), _mm256_permute4x64_epi64( ymm3, _MM_SHUFFLE( 3, 1, 2, 0 ) ) );
            }
#endif
            /* SSE2 */
            for( int k = _width32; k < _width16; k += 16 )
            {
                __m128i xmm0 = _mm_load_si128( (__m128i *)(src + 2 * k     ) );
                __m128i xmm1 = _mm_load_si128( (__m128i *)(src + 2 * k + 16) );
                __m128i mask = _mm_load_si128( (__m128i *)sp16 );
                _mm_store_si128( (__m128i *)(dst + k + lsb_offset), _mm_packus_epi16( _mm_and_si128 ( xmm0, mask ), _mm_and_si128 ( xmm1, mask ) ) );
                _mm_store_si128( (__m128i *)(dst + k             ), _mm_packus_epi16( _mm_srli_epi16( xmm0,    8 ), _mm_srli_epi16( xmm1,    8 ) ) );
            }
            for( int k = _width16; k < width; k++ )
            {
                *(dst + k + lsb_offset) = *(src + 2 * k    );
                *(dst + k             ) = *(src + 2 * k + 1);
            }
        }
    }
    return 0;
}
Beispiel #22
0
void vec_i8_cnt_dosage2(const int8_t *p, int8_t *out, size_t n, int8_t val,
	int8_t missing, int8_t missing_substitute)
{
#ifdef COREARRAY_SIMD_SSE2

	// header 1, 16-byte aligned
	size_t h = (16 - ((size_t)out & 0x0F)) & 0x0F;
	for (; (n > 0) && (h > 0); n--, h--, p+=2)
	{
		*out ++ = ((p[0] == missing) || (p[1] == missing)) ?
			missing_substitute :
			(p[0]==val ? 1 : 0) + (p[1]==val ? 1 : 0);
	}

	// body, SSE2
	const __m128i val16  = _mm_set1_epi8(val);
	const __m128i miss16 = _mm_set1_epi8(missing);
	const __m128i sub16  = _mm_set1_epi8(missing_substitute);
	const __m128i mask   = _mm_set1_epi16(0x00FF);

#   ifdef COREARRAY_SIMD_AVX2

	// header 2, 32-byte aligned
	if ((n >= 16) && ((size_t)out & 0x10))
	{
		__m128i w1 = MM_LOADU_128((__m128i const*)p); p += 16;
		__m128i w2 = MM_LOADU_128((__m128i const*)p); p += 16;

		__m128i v1 = _mm_packus_epi16(_mm_and_si128(w1, mask), _mm_and_si128(w2, mask));
		__m128i v2 = _mm_packus_epi16(_mm_srli_epi16(w1, 8), _mm_srli_epi16(w2, 8));

		__m128i c = _mm_setzero_si128();
		c = _mm_sub_epi8(c, _mm_cmpeq_epi8(v1, val16));
		c = _mm_sub_epi8(c, _mm_cmpeq_epi8(v2, val16));

		w1 = _mm_cmpeq_epi8(v1, miss16);
		w2 = _mm_cmpeq_epi8(v2, miss16);
		__m128i w  = _mm_or_si128(w1, w2);
		c = _mm_or_si128(_mm_and_si128(w, sub16), _mm_andnot_si128(w, c));

		_mm_store_si128((__m128i *)out, c);
		n -= 16; out += 16;
	}

	const __m256i val32  = _mm256_set1_epi8(val);
	const __m256i miss32 = _mm256_set1_epi8(missing);
	const __m256i sub32  = _mm256_set1_epi8(missing_substitute);
	const __m256i mask2  = _mm256_set1_epi16(0x00FF);

	for (; n >= 32; n-=32)
	{
		__m256i w1 = MM_LOADU_256((__m256i const*)p); p += 32;
		__m256i w2 = MM_LOADU_256((__m256i const*)p); p += 32;

		__m256i v1 = _mm256_packus_epi16(_mm256_and_si256(w1, mask2), _mm256_and_si256(w2, mask2));
		__m256i v2 = _mm256_packus_epi16(_mm256_srli_epi16(w1, 8), _mm256_srli_epi16(w2, 8));

		__m256i c = _mm256_setzero_si256();
		c = _mm256_sub_epi8(c, _mm256_cmpeq_epi8(v1, val32));
		c = _mm256_sub_epi8(c, _mm256_cmpeq_epi8(v2, val32));

		w1 = _mm256_cmpeq_epi8(v1, miss32);
		w2 = _mm256_cmpeq_epi8(v2, miss32);
		__m256i w = _mm256_or_si256(w1, w2);
		c = _mm256_or_si256(_mm256_and_si256(w, sub32), _mm256_andnot_si256(w, c));

		c = _mm256_permute4x64_epi64(c, 0xD8);
		_mm256_store_si256((__m256i *)out, c);
		out += 32;
	}

#   endif

	// SSE2 only
	for (; n >= 16; n-=16)
	{
		__m128i w1 = MM_LOADU_128((__m128i const*)p); p += 16;
		__m128i w2 = MM_LOADU_128((__m128i const*)p); p += 16;

		__m128i v1 = _mm_packus_epi16(_mm_and_si128(w1, mask), _mm_and_si128(w2, mask));
		__m128i v2 = _mm_packus_epi16(_mm_srli_epi16(w1, 8), _mm_srli_epi16(w2, 8));

		__m128i c = _mm_setzero_si128();
		c = _mm_sub_epi8(c, _mm_cmpeq_epi8(v1, val16));
		c = _mm_sub_epi8(c, _mm_cmpeq_epi8(v2, val16));

		w1 = _mm_cmpeq_epi8(v1, miss16);
		w2 = _mm_cmpeq_epi8(v2, miss16);
		__m128i w = _mm_or_si128(w1, w2);
		c = _mm_or_si128(_mm_and_si128(w, sub16), _mm_andnot_si128(w, c));

		_mm_store_si128((__m128i *)out, c);
		out += 16;
	}

#endif

	// tail
	for (; n > 0; n--, p+=2)
	{
		*out ++ = ((p[0] == missing) || (p[1] == missing)) ?
			missing_substitute :
			(p[0]==val ? 1 : 0) + (p[1]==val ? 1 : 0);
	}
}
Beispiel #23
0
inline static void lyra_round( uint64_t *v )
{
#ifdef __AVX2__

   __m256i a = _mm256_load_si256( (__m256i*)(&v[ 0]) );
   __m256i b = _mm256_load_si256( (__m256i*)(&v[ 4]) );
   __m256i c = _mm256_load_si256( (__m256i*)(&v[ 8]) );
   __m256i d = _mm256_load_si256( (__m256i*)(&v[12]) );

   G_4X64( a, b, c, d );

   // swap words
   b = mm256_rotl256_1x64( b );
   c = mm256_swap128( c );
   d = mm256_rotr256_1x64( d );

   G_4X64( a, b, c, d );

   // unswap
   b = mm256_rotr256_1x64( b );
   c = mm256_swap128( c );
   d = mm256_rotl256_1x64( d );

   _mm256_store_si256( (__m256i*)(&v[ 0]), a );
   _mm256_store_si256( (__m256i*)(&v[ 4]), b );
   _mm256_store_si256( (__m256i*)(&v[ 8]), c );
   _mm256_store_si256( (__m256i*)(&v[12]), d );

#elif defined __AVX__

   __m128i a0, a1, b0, b1, c0, c1, d0, d1;

   a0 = _mm_load_si128( (__m128i*)(&v[ 0]) );
   a1 = _mm_load_si128( (__m128i*)(&v[ 2]) );
   b0 = _mm_load_si128( (__m128i*)(&v[ 4]) );
   b1 = _mm_load_si128( (__m128i*)(&v[ 6]) );
   c0 = _mm_load_si128( (__m128i*)(&v[ 8]) );
   c1 = _mm_load_si128( (__m128i*)(&v[10]) );
   d0 = _mm_load_si128( (__m128i*)(&v[12]) );
   d1 = _mm_load_si128( (__m128i*)(&v[14]) );

   G_2X64( a0, b0, c0, d0 );
   G_2X64( a1, b1, c1, d1 );

   // swap words
   mm128_rotl256_1x64( b0, b1 );
   mm128_swap128( c0, c1 );
   mm128_rotr256_1x64( d0, d1 );

   G_2X64( a0, b0, c0, d0 );
   G_2X64( a1, b1, c1, d1 );

   // unswap
   mm128_rotr256_1x64( b0, b1 );
   mm128_swap128( c0, c1 );
   mm128_rotl256_1x64( d0, d1 );

   _mm_store_si128( (__m128i*)(&v[ 0]), a0 );
   _mm_store_si128( (__m128i*)(&v[ 2]), a1 );
   _mm_store_si128( (__m128i*)(&v[ 4]), b0 );
   _mm_store_si128( (__m128i*)(&v[ 6]), b1 );
   _mm_store_si128( (__m128i*)(&v[ 8]), c0 );
   _mm_store_si128( (__m128i*)(&v[10]), c1 );
   _mm_store_si128( (__m128i*)(&v[12]), d0 );
   _mm_store_si128( (__m128i*)(&v[14]), d1 );

#else

   // macro assumes v is defined
   ROUND_LYRA(0);

#endif
}
Beispiel #24
0
/**
 * Performs a duplexing operation over "M[rowInOut][col] [+] M[rowIn][col]" (i.e.,
 * the wordwise addition of two columns, ignoring carries between words). The
 * output of this operation, "rand", is then used to make
 * "M[rowOut][col] = M[rowOut][col] XOR rand" and
 * "M[rowInOut][col] =  M[rowInOut][col] XOR rotW(rand)", where rotW is a 64-bit
 * rotation to the left.
 *
 * @param state          The current state of the sponge
 * @param rowIn          Row used only as input
 * @param rowInOut       Row used as input and to receive output after rotation
 * @param rowOut         Row receiving the output
 *
 */
void reducedDuplexRow(uint64_t *state, uint64_t *rowIn, uint64_t *rowInOut, uint64_t *rowOut, const uint32_t nCols)
{
   uint64_t* ptrWordInOut = rowInOut; //In Lyra2: pointer to row*
   uint64_t* ptrWordIn = rowIn; //In Lyra2: pointer to prev
   uint64_t* ptrWordOut = rowOut; //In Lyra2: pointer to row
   unsigned int i;

   for (i = 0; i < nCols; i++)
   {

   //Absorbing "M[prev] [+] M[row*]"
   #if defined __AVX2__

       __m256i state_v[3], in_v[3], inout_v[3];
       #define out_v in_v    // reuse register in next code block

       state_v[0] = _mm256_load_si256( (__m256i*)(&state[0]) );
       in_v   [0] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[0]) );
       inout_v[0] = _mm256_loadu_si256( (__m256i*)(&ptrWordInOut[0]) );
       state_v[1] = _mm256_load_si256( (__m256i*)(&state[4]) );
       in_v   [1] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[4]) );
       inout_v[1] = _mm256_loadu_si256( (__m256i*)(&ptrWordInOut[4]) );
       state_v[2] = _mm256_load_si256( (__m256i*)(&state[8]) );
       in_v   [2] = _mm256_loadu_si256( (__m256i*)(&ptrWordIn[8]) );
       inout_v[2] = _mm256_loadu_si256( (__m256i*)(&ptrWordInOut[8]) );

       _mm256_store_si256( (__m256i*)(&state[0]),
                             _mm256_xor_si256( state_v[0], 
                                               _mm256_add_epi64( in_v[0],
                                                               inout_v[0] ) ) );
       _mm256_store_si256( (__m256i*)(&state[4]), 
                            _mm256_xor_si256( state_v[1],
                                               _mm256_add_epi64( in_v[1],
                                                               inout_v[1] ) ) );
       _mm256_store_si256( (__m256i*)(&state[8]), 
                            _mm256_xor_si256( state_v[2], 
                                              _mm256_add_epi64( in_v[2],
                                                               inout_v[2] ) ) );
   #elif defined __AVX__

       __m128i state_v[6], in_v[6], inout_v[6];
       #define out_v in_v    // reuse register in next code block

       state_v[0] = _mm_load_si128( (__m128i*)(&state[0]) );
       state_v[1] = _mm_load_si128( (__m128i*)(&state[2]) );
       state_v[2] = _mm_load_si128( (__m128i*)(&state[4]) );
       state_v[3] = _mm_load_si128( (__m128i*)(&state[6]) );
       state_v[4] = _mm_load_si128( (__m128i*)(&state[8]) );
       state_v[5] = _mm_load_si128( (__m128i*)(&state[10]) );

       inout_v[0]    = _mm_loadu_si128( (__m128i*)(&ptrWordInOut[0]) );
       inout_v[1]    = _mm_loadu_si128( (__m128i*)(&ptrWordInOut[2]) );
       inout_v[2]    = _mm_loadu_si128( (__m128i*)(&ptrWordInOut[4]) );
       inout_v[3]    = _mm_loadu_si128( (__m128i*)(&ptrWordInOut[6]) );
       inout_v[4]    = _mm_loadu_si128( (__m128i*)(&ptrWordInOut[8]) );
       inout_v[5]    = _mm_loadu_si128( (__m128i*)(&ptrWordInOut[10]) );

       in_v[0]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[0]) );
       in_v[1]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[2]) );
       in_v[2]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[4]) );
       in_v[3]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[6]) );
       in_v[4]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[8]) );
       in_v[5]    = _mm_loadu_si128( (__m128i*)(&ptrWordIn[10]) );

       _mm_store_si128( (__m128i*)(&state[0]),
                         _mm_xor_si128( state_v[0],
                                        _mm_add_epi64( in_v[0],
                                                       inout_v[0] ) ) );
       _mm_store_si128( (__m128i*)(&state[2]),
                         _mm_xor_si128( state_v[1],
                                        _mm_add_epi64( in_v[1],
                                                       inout_v[1] ) ) );
       _mm_store_si128( (__m128i*)(&state[4]),
                         _mm_xor_si128( state_v[2],
                                        _mm_add_epi64( in_v[2],
                                                       inout_v[2] ) ) );
       _mm_store_si128( (__m128i*)(&state[6]),
                         _mm_xor_si128( state_v[3],
                                        _mm_add_epi64( in_v[3],
                                                       inout_v[3] ) ) );
       _mm_store_si128( (__m128i*)(&state[8]),
                         _mm_xor_si128( state_v[4],
                                        _mm_add_epi64( in_v[4],
                                                       inout_v[4] ) ) );
       _mm_store_si128( (__m128i*)(&state[10]),
                         _mm_xor_si128( state_v[5],
                                        _mm_add_epi64( in_v[5],
                                                       inout_v[5] ) ) );

   #else

       state[0]  ^= (ptrWordIn[0]  + ptrWordInOut[0]);
       state[1]  ^= (ptrWordIn[1]  + ptrWordInOut[1]);
       state[2]  ^= (ptrWordIn[2]  + ptrWordInOut[2]);
       state[3]  ^= (ptrWordIn[3]  + ptrWordInOut[3]);
       state[4]  ^= (ptrWordIn[4]  + ptrWordInOut[4]);
       state[5]  ^= (ptrWordIn[5]  + ptrWordInOut[5]);
       state[6]  ^= (ptrWordIn[6]  + ptrWordInOut[6]);
       state[7]  ^= (ptrWordIn[7]  + ptrWordInOut[7]);
       state[8]  ^= (ptrWordIn[8]  + ptrWordInOut[8]);
       state[9]  ^= (ptrWordIn[9]  + ptrWordInOut[9]);
       state[10] ^= (ptrWordIn[10] + ptrWordInOut[10]);
       state[11] ^= (ptrWordIn[11] + ptrWordInOut[11]);
    #endif

    //Applies the reduced-round transformation f to the sponge's state
    reducedBlake2bLyra(state);

    //M[rowOut][col] = M[rowOut][col] XOR rand
    #if defined __AVX2__

       state_v[0] = _mm256_load_si256( (__m256i*)(&state[0]) );
       out_v  [0] = _mm256_loadu_si256( (__m256i*)(&ptrWordOut[0]) );
       state_v[1] = _mm256_load_si256( (__m256i*)(&state[4]) );
       out_v  [1] = _mm256_loadu_si256( (__m256i*)(&ptrWordOut[4]) );
       state_v[2] = _mm256_load_si256( (__m256i*)(&state[8]) );
       out_v  [2] = _mm256_loadu_si256( (__m256i*)(&ptrWordOut[8]) );

       _mm256_storeu_si256( (__m256i*)(&ptrWordOut[0]),
                            _mm256_xor_si256( state_v[0], out_v[0] ) );
       _mm256_storeu_si256( (__m256i*)(&ptrWordOut[4]),
                            _mm256_xor_si256( state_v[1], out_v[1] ) );
       _mm256_storeu_si256( (__m256i*)(&ptrWordOut[8]),
                            _mm256_xor_si256( state_v[2], out_v[2] ) );

    #elif defined __AVX__

       state_v[0] = _mm_load_si128( (__m128i*)(&state[0]) );
       state_v[1] = _mm_load_si128( (__m128i*)(&state[2]) );
       state_v[2] = _mm_load_si128( (__m128i*)(&state[4]) );
       state_v[3] = _mm_load_si128( (__m128i*)(&state[6]) );
       state_v[4] = _mm_load_si128( (__m128i*)(&state[8]) );
       state_v[5] = _mm_load_si128( (__m128i*)(&state[10]) );

       out_v[0]    = _mm_loadu_si128( (__m128i*)(&ptrWordOut[0]) );
       out_v[1]    = _mm_loadu_si128( (__m128i*)(&ptrWordOut[2]) );
       out_v[2]    = _mm_loadu_si128( (__m128i*)(&ptrWordOut[4]) );
       out_v[3]    = _mm_loadu_si128( (__m128i*)(&ptrWordOut[6]) );
       out_v[4]    = _mm_loadu_si128( (__m128i*)(&ptrWordOut[8]) );
       out_v[5]    = _mm_loadu_si128( (__m128i*)(&ptrWordOut[10]) );

       _mm_storeu_si128( (__m128i*)(&ptrWordOut[0]),
                         _mm_xor_si128( state_v[0], out_v[0] ) );
       _mm_storeu_si128( (__m128i*)(&ptrWordOut[2]),
                         _mm_xor_si128( state_v[1], out_v[1] ) );
       _mm_storeu_si128( (__m128i*)(&ptrWordOut[4]),
                         _mm_xor_si128( state_v[2], out_v[2] ) );
       _mm_storeu_si128( (__m128i*)(&ptrWordOut[6]),
                         _mm_xor_si128( state_v[3], out_v[3] ) );
       _mm_storeu_si128( (__m128i*)(&ptrWordOut[8]),
                         _mm_xor_si128( state_v[4], out_v[4] ) );
       _mm_storeu_si128( (__m128i*)(&ptrWordOut[10]),
                         _mm_xor_si128( state_v[5], out_v[5] ) );

    #else

       ptrWordOut[0] ^= state[0];
       ptrWordOut[1] ^= state[1];
       ptrWordOut[2] ^= state[2];
       ptrWordOut[3] ^= state[3];
       ptrWordOut[4] ^= state[4];
       ptrWordOut[5] ^= state[5];
       ptrWordOut[6] ^= state[6];
       ptrWordOut[7] ^= state[7];
       ptrWordOut[8] ^= state[8];
       ptrWordOut[9] ^= state[9];
       ptrWordOut[10] ^= state[10];
       ptrWordOut[11] ^= state[11];

    #endif

    //M[rowInOut][col] = M[rowInOut][col] XOR rotW(rand)
    ptrWordInOut[0] ^= state[11];
    ptrWordInOut[1] ^= state[0];
    ptrWordInOut[2] ^= state[1];
    ptrWordInOut[3] ^= state[2];
    ptrWordInOut[4] ^= state[3];
    ptrWordInOut[5] ^= state[4];
    ptrWordInOut[6] ^= state[5];
    ptrWordInOut[7] ^= state[6];
    ptrWordInOut[8] ^= state[7];
    ptrWordInOut[9] ^= state[8];
    ptrWordInOut[10] ^= state[9];
    ptrWordInOut[11] ^= state[10];

    //Goes to next block
    ptrWordOut += BLOCK_LEN_INT64;
    ptrWordInOut += BLOCK_LEN_INT64;
    ptrWordIn += BLOCK_LEN_INT64;
  }
}
Beispiel #25
0
void*  xmemset(void* dest, int c, size_t n)
{
    void* ret = dest;
    if (n < 16) {
        xmemset_lt16(dest, c, n);
        return ret;
    }

    __m256i mm = _mm256_set1_epi8((char)c);

    if (((unsigned long)dest & 31) == 0) {
        for ( ; n >= 256; n -= 256) {
            _mm256_store_si256((__m256i*)dest, mm);
            _mm256_store_si256((__m256i*)dest + 1, mm);
            _mm256_store_si256((__m256i*)dest + 2, mm);
            _mm256_store_si256((__m256i*)dest + 3, mm);
            _mm256_store_si256((__m256i*)dest + 4, mm);
            _mm256_store_si256((__m256i*)dest + 5, mm);
            _mm256_store_si256((__m256i*)dest + 6, mm);
            _mm256_store_si256((__m256i*)dest + 7, mm); // 8
            dest = (void*)((__m256i*)dest + 8);
        }

        if (n >= 128) {
            _mm256_store_si256((__m256i*)dest, mm);
            _mm256_store_si256((__m256i*)dest + 1, mm);
            _mm256_store_si256((__m256i*)dest + 2, mm);
            _mm256_store_si256((__m256i*)dest + 3, mm);
            dest = (void*)((__m256i*)dest + 4);
            n -= 128;
        }

        if (n >= 64) {
            _mm256_store_si256((__m256i*)dest, mm);
            _mm256_store_si256((__m256i*)dest + 1, mm);
            dest = (void*)((__m256i*)dest + 2);
            n -= 64;
        }

        if (n >= 32) {
            _mm256_store_si256((__m256i*)dest, mm);
            dest = (void*)((__m256i*)dest + 1);
            n -= 32;
        }

        if (n >= 16) {
            _mm_store_si128((__m128i*)dest, _mm_set1_epi8((char)c));
            dest = (void*)((__m128i*)dest + 1);
            n -= 16;
        }

    } else {
        for ( ; n >= 256; n -= 256) {
            _mm256_storeu_si256((__m256i*)dest, mm);
            _mm256_storeu_si256((__m256i*)dest + 1, mm);
            _mm256_storeu_si256((__m256i*)dest + 2, mm);
            _mm256_storeu_si256((__m256i*)dest + 3, mm);
            _mm256_storeu_si256((__m256i*)dest + 4, mm);
            _mm256_storeu_si256((__m256i*)dest + 5, mm);
            _mm256_storeu_si256((__m256i*)dest + 6, mm);
            _mm256_storeu_si256((__m256i*)dest + 7, mm); // 8
            dest = (void*)((__m256i*)dest + 8);
        }

        if (n >= 128) {
            _mm256_storeu_si256((__m256i*)dest, mm);
            _mm256_storeu_si256((__m256i*)dest + 1, mm);
            _mm256_storeu_si256((__m256i*)dest + 2, mm);
            _mm256_storeu_si256((__m256i*)dest + 3, mm);
            dest = (void*)((__m256i*)dest + 4);
            n -= 128;
        }

        if (n >= 64) {
            _mm256_storeu_si256((__m256i*)dest, mm);
            _mm256_storeu_si256((__m256i*)dest + 1, mm);
            dest = (void*)((__m256i*)dest + 2);
            n -= 64;
        }

        if (n >= 32) {
            _mm256_storeu_si256((__m256i*)dest, mm);
            dest = (void*)((__m256i*)dest + 1);
            n -= 32;
        }

        if (n >= 16) {
            _mm_storeu_si128((__m128i*)dest, _mm_set1_epi8((char)c));
            dest = (void*)((__m128i*)dest + 1);
            n -= 16;
        }
    }

    xmemset_lt16(dest, c, n);
    return ret;
}
Beispiel #26
0
/**
 * Performs an absorb operation for a single block (BLOCK_LEN_INT64 words
 * of type uint64_t), using Blake2b's G function as the internal permutation
 *
 * @param state The current state of the sponge
 * @param in    The block to be absorbed (BLOCK_LEN_INT64 words)
 */
inline void absorbBlock(uint64_t *state, const uint64_t *in) {
#if defined __AVX2__

    __m256i state_v[4], in_v[3];

    // only state is guaranteed aligned 256
    state_v[0] = _mm256_load_si256( (__m256i*)(&state[0]) );
    in_v   [0] = _mm256_loadu_si256( (__m256i*)(&in[0]) );
    state_v[1] = _mm256_load_si256( (__m256i*)(&state[4]) );
    in_v   [1] = _mm256_loadu_si256( (__m256i*)(&in[4]) );
    state_v[2] = _mm256_load_si256( (__m256i*)(&state[8]) );
    in_v   [2] = _mm256_loadu_si256( (__m256i*)(&in[8]) ); 
    state_v[3] = _mm256_load_si256( (__m256i*)(&state[12]) );

    state_v[0] = _mm256_xor_si256( state_v[0], in_v[0] );
    state_v[1] = _mm256_xor_si256( state_v[1], in_v[1] );
    state_v[2] = _mm256_xor_si256( state_v[2], in_v[2] );

    LYRA_12_ROUNDS_AVX2( state_v[0], state_v[1], state_v[2], state_v[3] );

    _mm256_store_si256( (__m256i*)&state[0], state_v[0] );
    _mm256_store_si256( (__m256i*)&state[4], state_v[1] );
    _mm256_store_si256( (__m256i*)&state[8], state_v[2] );
    _mm256_store_si256( (__m256i*)&state[12], state_v[3] );

#elif defined __AVX__

    __m128i state_v[6], in_v[6];

    state_v[0] = _mm_load_si128( (__m128i*)(&state[0]) );
    state_v[1] = _mm_load_si128( (__m128i*)(&state[2]) );
    state_v[2] = _mm_load_si128( (__m128i*)(&state[4]) );
    state_v[3] = _mm_load_si128( (__m128i*)(&state[6]) );
    state_v[4] = _mm_load_si128( (__m128i*)(&state[8]) );
    state_v[5] = _mm_load_si128( (__m128i*)(&state[10]) );

    in_v[0]    = _mm_load_si128( (__m128i*)(&in[0]) );
    in_v[1]    = _mm_load_si128( (__m128i*)(&in[2]) );
    in_v[2]    = _mm_load_si128( (__m128i*)(&in[4]) );
    in_v[3]    = _mm_load_si128( (__m128i*)(&in[6]) );
    in_v[4]    = _mm_load_si128( (__m128i*)(&in[8]) );
    in_v[5]    = _mm_load_si128( (__m128i*)(&in[10]) );

// do blake2bLyra without init
// LYRA_ROUND_AVX2( state_v )
    _mm_store_si128( (__m128i*)(&state[0]),
                       _mm_xor_si128( state_v[0], in_v[0] ) );
    _mm_store_si128( (__m128i*)(&state[2]),
                       _mm_xor_si128( state_v[1], in_v[1] ) );
    _mm_store_si128( (__m128i*)(&state[4]),
                       _mm_xor_si128( state_v[2], in_v[2] ) );
    _mm_store_si128( (__m128i*)(&state[6]),
                       _mm_xor_si128( state_v[3], in_v[3] ) );
    _mm_store_si128( (__m128i*)(&state[8]),
                       _mm_xor_si128( state_v[4], in_v[4] ) );
    _mm_store_si128( (__m128i*)(&state[10]),
                       _mm_xor_si128( state_v[5], in_v[5] ) );

    //Applies the transformation f to the sponge's state
    blake2bLyra(state);

#else
    //XORs the first BLOCK_LEN_INT64 words of "in" with the current state
    state[0] ^= in[0];
    state[1] ^= in[1];
    state[2] ^= in[2];
    state[3] ^= in[3];
    state[4] ^= in[4];
    state[5] ^= in[5];
    state[6] ^= in[6];
    state[7] ^= in[7];
    state[8] ^= in[8];
    state[9] ^= in[9];
    state[10] ^= in[10];
    state[11] ^= in[11];

    //Applies the transformation f to the sponge's state
    blake2bLyra(state);

#endif
}