void AuthenticatedSymmetricCipherBase::ProcessData(byte *outString, const byte *inString, size_t length) { m_totalMessageLength += length; if (m_state >= State_IVSet && m_totalMessageLength > MaxMessageLength()) throw InvalidArgument(AlgorithmName() + ": message length exceeds maximum"); reswitch: switch (m_state) { case State_Start: case State_KeySet: throw BadState(AlgorithmName(), "ProcessData", "setting key and IV"); case State_AuthFooter: throw BadState(AlgorithmName(), "ProcessData was called after footer input has started"); case State_IVSet: AuthenticateLastHeaderBlock(); m_bufferedDataLength = 0; m_state = AuthenticationIsOnPlaintext()==IsForwardTransformation() ? State_AuthUntransformed : State_AuthTransformed; goto reswitch; case State_AuthUntransformed: AuthenticateData(inString, length); AccessSymmetricCipher().ProcessData(outString, inString, length); break; case State_AuthTransformed: AccessSymmetricCipher().ProcessData(outString, inString, length); AuthenticateData(outString, length); break; default: CRYPTOPP_ASSERT(false); } }
void Square::Base::UncheckedSetKey(const byte *userKey, unsigned int length, const NameValuePairs &) { AssertValidKeyLength(length); static const word32 offset[ROUNDS] = { 0x01000000UL, 0x02000000UL, 0x04000000UL, 0x08000000UL, 0x10000000UL, 0x20000000UL, 0x40000000UL, 0x80000000UL, }; GetUserKey(BIG_ENDIAN_ORDER, roundkeys[0], KEYLENGTH/4, userKey, KEYLENGTH); /* apply the key evolution function */ for (int i = 1; i < ROUNDS+1; i++) { roundkeys[i][0] = roundkeys[i-1][0] ^ rotlFixed(roundkeys[i-1][3], 8U) ^ offset[i-1]; roundkeys[i][1] = roundkeys[i-1][1] ^ roundkeys[i][0]; roundkeys[i][2] = roundkeys[i-1][2] ^ roundkeys[i][1]; roundkeys[i][3] = roundkeys[i-1][3] ^ roundkeys[i][2]; } /* produce the round keys */ if (IsForwardTransformation()) { for (int i = 0; i < ROUNDS; i++) SquareTransform (roundkeys[i], roundkeys[i]); } else { for (int i = 0; i < ROUNDS/2; i++) for (int j = 0; j < 4; j++) std::swap(roundkeys[i][j], roundkeys[ROUNDS-i][j]); SquareTransform (roundkeys[ROUNDS], roundkeys[ROUNDS]); } }
void Square::Base::UncheckedSetKey(const byte *userKey, unsigned int length, const NameValuePairs &) { AssertValidKeyLength(length); static const word32 offset[ROUNDS] = { 0x01000000UL, 0x02000000UL, 0x04000000UL, 0x08000000UL, 0x10000000UL, 0x20000000UL, 0x40000000UL, 0x80000000UL, }; GetUserKey(BIG_ENDIAN_ORDER, m_roundkeys.data(), KEYLENGTH/4, userKey, KEYLENGTH); /* apply the key evolution function */ for (int i = 1; i < ROUNDS+1; i++) { roundkeys(i, 0) = roundkeys(i-1, 0) ^ rotlFixed(roundkeys(i-1, 3), 8U) ^ offset[i-1]; roundkeys(i, 1) = roundkeys(i-1, 1) ^ roundkeys(i, 0); roundkeys(i, 2) = roundkeys(i-1, 2) ^ roundkeys(i, 1); roundkeys(i, 3) = roundkeys(i-1, 3) ^ roundkeys(i, 2); } /* produce the round keys */ if (IsForwardTransformation()) { for (int i = 0; i < ROUNDS; i++) SquareTransform (roundkeys4(i), roundkeys4(i)); } else { for (int i = 0; i < ROUNDS/2; i++) for (int j = 0; j < 4; j++) std::swap(roundkeys(i, j), roundkeys(ROUNDS-i, j)); SquareTransform (roundkeys4(ROUNDS), roundkeys4(ROUNDS)); } }
void CipherModeBase::GetNextIV(byte *IV) { if (!IsForwardTransformation()) throw NotImplemented("CipherModeBase: GetNextIV() must be called on an encryption object"); m_cipher->ProcessBlock(m_register); memcpy(IV, m_register, BlockSize()); }
void CAST256::Base::UncheckedSetKey(const byte *userKey, unsigned int keylength, const NameValuePairs &) { AssertValidKeyLength(keylength); word32 kappa[8]; GetUserKey(BIG_ENDIAN_ORDER, kappa, 8, userKey, keylength); for(int i=0; i<12; ++i) { Omega(2*i,kappa); Omega(2*i+1,kappa); K[8*i]=kappa[0] & 31; K[8*i+1]=kappa[2] & 31; K[8*i+2]=kappa[4] & 31; K[8*i+3]=kappa[6] & 31; K[8*i+4]=kappa[7]; K[8*i+5]=kappa[5]; K[8*i+6]=kappa[3]; K[8*i+7]=kappa[1]; } if (!IsForwardTransformation()) { for(int j=0; j<6; ++j) { for(int i=0; i<4; ++i) { int i1=8*j+i; int i2=8*(11-j)+i; assert(i1<i2); std::swap(K[i1],K[i2]); std::swap(K[i1+4],K[i2+4]); } } } memset(kappa, 0, sizeof(kappa)); }
void SHARK::Base::UncheckedSetKey(const byte *key, unsigned int keyLen, const NameValuePairs ¶ms) { AssertValidKeyLength(keyLen); m_rounds = GetRoundsAndThrowIfInvalid(params, this); m_roundKeys.New(m_rounds+1); // concatenate key enought times to fill a for (unsigned int i=0; i<(m_rounds+1)*8; i++) ((byte *)m_roundKeys.begin())[i] = key[i%keyLen]; SHARK::Encryption e; e.InitForKeySetup(); byte IV[8] = {0,0,0,0,0,0,0,0}; CFB_Mode_ExternalCipher::Encryption cfb(e, IV); cfb.ProcessString((byte *)m_roundKeys.begin(), (m_rounds+1)*8); ConditionalByteReverse(BIG_ENDIAN_ORDER, m_roundKeys.begin(), m_roundKeys.begin(), (m_rounds+1)*8); m_roundKeys[m_rounds] = SHARKTransform(m_roundKeys[m_rounds]); if (!IsForwardTransformation()) { unsigned int i; // transform encryption round keys into decryption round keys for (i=0; i<m_rounds/2; i++) std::swap(m_roundKeys[i], m_roundKeys[m_rounds-i]); for (i=1; i<m_rounds; i++) m_roundKeys[i] = SHARKTransform(m_roundKeys[i]); } #ifdef IS_LITTLE_ENDIAN m_roundKeys[0] = ByteReverse(m_roundKeys[0]); m_roundKeys[m_rounds] = ByteReverse(m_roundKeys[m_rounds]); #endif }
void Camellia::Base::UncheckedSetKey(const byte *key, unsigned int keylen, const NameValuePairs &) { m_rounds = (keylen >= 24) ? 4 : 3; unsigned int kslen = (8 * m_rounds + 2); m_key.New(kslen*2); word32 *ks32 = m_key.data(); int m=0, a=0; if (!IsForwardTransformation()) m = -1, a = kslen-1; word32 kl0, kl1, kl2, kl3; GetBlock<word32, BigEndian> getBlock(key); getBlock(kl0)(kl1)(kl2)(kl3); word32 k0=kl0, k1=kl1, k2=kl2, k3=kl3; #define CALC_ADDR2(base, i, j) ((byte *)(base)+8*(i)+4*(j)+((-16*(i))&m)) #define CALC_ADDR(base, i) CALC_ADDR2(base, i, 0) #if 1 word64 kwl, kwr; ks32 += 2*a; #define PREPARE_KS_ROUNDS \ kwl = (word64(k0) << 32) | k1; \ kwr = (word64(k2) << 32) | k3 #define KS_ROUND_0(i) \ assert(IsAlignedOn(CALC_ADDR(ks32, i+EFI(0)),GetAlignmentOf<word64>())); \ assert(IsAlignedOn(CALC_ADDR(ks32, i+EFI(1)),GetAlignmentOf<word64>())); \ *(word64*)(void*)CALC_ADDR(ks32, i+EFI(0)) = kwl; \ *(word64*)(void*)CALC_ADDR(ks32, i+EFI(1)) = kwr #define KS_ROUND(i, r, which) \ assert(IsAlignedOn(CALC_ADDR(ks32, i+EFI(r<64)),GetAlignmentOf<word64>())); \ assert(IsAlignedOn(CALC_ADDR(ks32, i+EFI(r>64)),GetAlignmentOf<word64>())); \ if (which & (1<<int(r<64))) *(word64*)(void*)CALC_ADDR(ks32, i+EFI(r<64)) = (kwr << (r%64)) | (kwl >> (64 - (r%64))); \ if (which & (1<<int(r>64))) *(word64*)(void*)CALC_ADDR(ks32, i+EFI(r>64)) = (kwl << (r%64)) | (kwr >> (64 - (r%64))) #else // SSE2 version is 30% faster on Intel Core 2. Doesn't seem worth the hassle of maintenance, but left here // #if'd out in case someone needs it. __m128i kw, kw2; __m128i *ks128 = (__m128i *)ks32+a/2; ks32 += 2*a; #define PREPARE_KS_ROUNDS \ kw = _mm_set_epi32(k0, k1, k2, k3); \ if (m) kw2 = kw, kw = _mm_shuffle_epi32(kw, _MM_SHUFFLE(1, 0, 3, 2)); \ else kw2 = _mm_shuffle_epi32(kw, _MM_SHUFFLE(1, 0, 3, 2)) #define KS_ROUND_0(i) \ _mm_store_si128((__m128i *)CALC_ADDR(ks128, i), kw) #define KS_ROUND(i, r, which) { \ __m128i temp; \ if (r<64 && (which!=1 || m)) temp = _mm_or_si128(_mm_slli_epi64(kw, r%64), _mm_srli_epi64(kw2, 64-r%64)); \ else temp = _mm_or_si128(_mm_slli_epi64(kw2, r%64), _mm_srli_epi64(kw, 64-r%64)); \ if (which & 2) _mm_store_si128((__m128i *)CALC_ADDR(ks128, i), temp); \ else _mm_storel_epi64((__m128i*)CALC_ADDR(ks32, i+EFI(0)), temp); \ } #endif if (keylen == 16) { // KL PREPARE_KS_ROUNDS; KS_ROUND_0(0); KS_ROUND(4, 15, 3); KS_ROUND(10, 45, 3); KS_ROUND(12, 60, 2); KS_ROUND(16, 77, 3); KS_ROUND(18, 94, 3); KS_ROUND(22, 111, 3); // KA k0=kl0, k1=kl1, k2=kl2, k3=kl3; DOUBLE_ROUND(k0, k1, k2, k3, 0xA09E667Ful, 0x3BCC908Bul, 0xB67AE858ul, 0x4CAA73B2ul); k0^=kl0, k1^=kl1, k2^=kl2, k3^=kl3; DOUBLE_ROUND(k0, k1, k2, k3, 0xC6EF372Ful, 0xE94F82BEul, 0x54FF53A5ul, 0xF1D36F1Cul); PREPARE_KS_ROUNDS; KS_ROUND_0(2); KS_ROUND(6, 15, 3); KS_ROUND(8, 30, 3); KS_ROUND(12, 45, 1); KS_ROUND(14, 60, 3); KS_ROUND(20, 94, 3); KS_ROUND(24, 47, 3); } else { // KL PREPARE_KS_ROUNDS; KS_ROUND_0(0); KS_ROUND(12, 45, 3); KS_ROUND(16, 60, 3); KS_ROUND(22, 77, 3); KS_ROUND(30, 111, 3); // KR word32 kr0, kr1, kr2, kr3; GetBlock<word32, BigEndian>(key+16)(kr0)(kr1); if (keylen == 24) kr2 = ~kr0, kr3 = ~kr1; else GetBlock<word32, BigEndian>(key+24)(kr2)(kr3); k0=kr0, k1=kr1, k2=kr2, k3=kr3; PREPARE_KS_ROUNDS; KS_ROUND(4, 15, 3); KS_ROUND(8, 30, 3); KS_ROUND(18, 60, 3); KS_ROUND(26, 94, 3); // KA k0^=kl0, k1^=kl1, k2^=kl2, k3^=kl3; DOUBLE_ROUND(k0, k1, k2, k3, 0xA09E667Ful, 0x3BCC908Bul, 0xB67AE858ul, 0x4CAA73B2ul); k0^=kl0, k1^=kl1, k2^=kl2, k3^=kl3; DOUBLE_ROUND(k0, k1, k2, k3, 0xC6EF372Ful, 0xE94F82BEul, 0x54FF53A5ul, 0xF1D36F1Cul); PREPARE_KS_ROUNDS; KS_ROUND(6, 15, 3); KS_ROUND(14, 45, 3); KS_ROUND(24, 77, 3); KS_ROUND(28, 94, 3); // KB k0^=kr0, k1^=kr1, k2^=kr2, k3^=kr3; DOUBLE_ROUND(k0, k1, k2, k3, 0x10E527FAul, 0xDE682D1Dul, 0xB05688C2ul, 0xB3E6C1FDul); PREPARE_KS_ROUNDS; KS_ROUND_0(2); KS_ROUND(10, 30, 3); KS_ROUND(20, 60, 3); KS_ROUND(32, 47, 3); } }