static void set_key(char *key, int index) { int len; #ifdef SIMD_COEF_64 ARCH_WORD_64 *ipadp = (ARCH_WORD_64*)&ipad[GETPOS(7, index)]; ARCH_WORD_64 *opadp = (ARCH_WORD_64*)&opad[GETPOS(7, index)]; const ARCH_WORD_64 *keyp = (ARCH_WORD_64*)key; ARCH_WORD_64 temp; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; #if PAD_SIZE < PLAINTEXT_LENGTH if (len > PAD_SIZE) { unsigned char k0[BINARY_SIZE]; SHA512_CTX ctx; int i; SHA384_Init(&ctx); SHA384_Update(&ctx, key, len); SHA384_Final(k0, &ctx); keyp = (ARCH_WORD_64*)k0; for(i = 0; i < BINARY_SIZE / 8; i++, ipadp += SIMD_COEF_64, opadp += SIMD_COEF_64) { temp = JOHNSWAP64(*keyp++); *ipadp ^= temp; *opadp ^= temp; } } else #endif while(((temp = JOHNSWAP64(*keyp++)) & 0xff00000000000000)) { if (!(temp & 0x00ff000000000000) || !(temp & 0x0000ff0000000000)) { ((unsigned short*)ipadp)[3] ^= (unsigned short)(temp >> 48); ((unsigned short*)opadp)[3] ^= (unsigned short)(temp >> 48); break; } if (!(temp & 0x00ff00000000) || !(temp & 0x0000ff000000)) { ((ARCH_WORD_32*)ipadp)[1] ^= (ARCH_WORD_32)(temp >> 32); ((ARCH_WORD_32*)opadp)[1] ^= (ARCH_WORD_32)(temp >> 32); break; }
static void alter_endianity_64(uint64_t *x, unsigned int size) { int i; for (i=0; i < (size / sizeof(*x)); i++) x[i] = JOHNSWAP64(x[i]); }
static void *get_binary(char *ciphertext) { static union { uint64_t swp[BINARY_SIZE/8]; uint8_t ret[BINARY_SIZE]; }u; int i = 0; //int len; char *p,delim; delim = strchr(ciphertext, '.') ? '.' : '$'; p = strrchr(ciphertext, delim) + 1; //len = strlen(p) / 2; //for (i = 0; i < len && *p; i++) { for (i = 0; i < BINARY_SIZE && *p; i++) { u.ret[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } // swap here, so we do not have to swap at end of GPU code. for (i = 0; i < BINARY_SIZE/8; ++i) u.swp[i] = JOHNSWAP64(u.swp[i]); return u.ret; }
static void *get_binary(char *ciphertext) { static union { unsigned char c[MAX_BINARY_SIZE]; ARCH_WORD_64 dummy; } buf; unsigned char *out = buf.c; char *p; int i, len; char delim; delim = strchr(ciphertext, '.') ? '.' : '$'; p = strrchr(ciphertext, delim) + 1; len = strlen(p) / 2; for (i = 0; i < len && *p; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #if !ARCH_LITTLE_ENDIAN for (i = 0; i < len/sizeof(ARCH_WORD_64); ++i) { ((ARCH_WORD_64*)out)[i] = JOHNSWAP64(((ARCH_WORD_64*)out)[i]); } #endif return out; }
void alter_endianity_w64(void * _x, unsigned int count) { int i = -1; ARCH_WORD_64 * x = (ARCH_WORD_64 *) _x; #if ARCH_ALLOWS_UNALIGNED while (++i < (int) count) { x[i] = JOHNSWAP64(x[i]); } #else unsigned char *cpX, c; if (is_aligned(x, sizeof(ARCH_WORD_64))) { // we are in alignment. while (++i < (int) count) { x[i] = JOHNSWAP64(x[i]); } return; } // non-aligned data :( cpX = (unsigned char *) x; while (++i < (int) count) { c = *cpX; *cpX = cpX[7]; cpX[7] = c; c = cpX[1]; cpX[1] = cpX[6]; cpX[6] = c; c = cpX[2]; cpX[2] = cpX[5]; cpX[5] = c; c = cpX[3]; cpX[3] = cpX[4]; cpX[4] = c; cpX += 8; } #endif }
static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA512 int lens[SSE_GROUP_SZ_SHA512], i; unsigned char *pin[SSE_GROUP_SZ_SHA512]; uint64_t key[SSE_GROUP_SZ_SHA512][8]; union { ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA512]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = (ARCH_WORD_32*)(key[i]); } pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, &(x.poutc), HASH_LENGTH, 0); for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) cracked[index+i] = ckcdecrypt((unsigned char*)(key[i])); #else uint64_t key[8]; pbkdf2_sha512((const unsigned char*)(saved_key[index]), strlen(saved_key[index]), cur_salt->salt, cur_salt->saltlen, cur_salt->iterations, (unsigned char*)key, HASH_LENGTH, 0); #if ARCH_LITTLE_ENDIAN==0 { int j; for (j = 0; j < 8; ++j) key[j] = JOHNSWAP64(key[j]); } #endif cracked[index] = ckcdecrypt((unsigned char*)key); #endif } return count; }
/* Check the FULL binary, just for good measure. There is no chance we'll have a false positive here but this function is not performance sensitive. */ static int cmp_exact(char *source, int index) { int i = 0, len, result; char *p; char delim; unsigned char *binary, *crypt; delim = strchr(source, '.') ? '.' : '$'; p = strrchr(source, delim) + 1; len = strlen(p) / 2; if (len == BINARY_SIZE) return 1; binary = mem_alloc(len); crypt = mem_alloc(len); while (*p) { binary[i++] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #if !ARCH_LITTLE_ENDIAN for (i = 0; i < len/sizeof(ARCH_WORD_64); ++i) { ((ARCH_WORD_64*)binary)[i] = JOHNSWAP64(((ARCH_WORD_64*)binary)[i]); } #endif pbkdf2_sha512((const unsigned char*)(saved_key[index]), strlen(saved_key[index]), cur_salt->salt, cur_salt->length, cur_salt->rounds, crypt, len, 0); result = !memcmp(binary, crypt, len); MEM_FREE(binary); MEM_FREE(crypt); if (!result) fprintf(stderr, "\n%s: Warning: Partial match for '%s'.\n" "This is a bug or a malformed input line of:\n%s\n", FORMAT_LABEL, saved_key[index], source); return result; }
static void set_key(char *key, int index) { #ifdef SIMD_COEF_64 const ARCH_WORD_64 *wkey = (ARCH_WORD_64*)key; ARCH_WORD_64 *keybuffer = &((ARCH_WORD_64*)saved_key)[(index&(SIMD_COEF_64-1)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]; ARCH_WORD_64 *keybuf_word = keybuffer; unsigned int len; ARCH_WORD_64 temp; len = 0; while((unsigned char)(temp = *wkey++)) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP64((temp & 0xff) | (0x80 << 8)); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP64((temp & 0xffff) | (0x80 << 16)); len+=2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = JOHNSWAP64((temp & 0xffffff) | (0x80ULL << 24)); len+=3; goto key_cleaning; } if (!(temp & 0xff00000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffff) | (0x80ULL << 32)); len+=4; goto key_cleaning; } if (!(temp & 0xff0000000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffffffULL) | (0x80ULL << 40)); len+=5; goto key_cleaning; } if (!(temp & 0xff000000000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffffffffULL) | (0x80ULL << 48)); len+=6; goto key_cleaning; } if (!(temp & 0xff00000000000000ULL)) { *keybuf_word = JOHNSWAP64((temp & 0xffffffffffffffULL) | (0x80ULL << 56)); len+=7; goto key_cleaning; } *keybuf_word = JOHNSWAP64(temp); len += 8; keybuf_word += SIMD_COEF_64; } *keybuf_word = 0x8000000000000000ULL; key_cleaning: keybuf_word += SIMD_COEF_64; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_64; } keybuffer[15*SIMD_COEF_64] = len << 3; #else int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); #endif }
void sha512_hash_block(sha512_ctx *ctx, const unsigned char data[128], int perform_endian_swap) { ARCH_WORD_64 A, B, C, D, E, F, G, H, tmp, W[80]; int i; #if ARCH_LITTLE_ENDIAN if (perform_endian_swap) { for(i = 0; i < 16; i++) { W[i] = JOHNSWAP64(*((ARCH_WORD_64 *)&(data[i<<3]))); } } else #endif { i = 16; memcpy(W, data, 128); } for(; i < 80; i++) W[i] = R1(W[i - 2]) + W[i - 7] + R0(W[i - 15]) + W[i - 16]; A = ctx->h[0]; B = ctx->h[1]; C = ctx->h[2]; D = ctx->h[3]; E = ctx->h[4]; F = ctx->h[5]; G = ctx->h[6]; H = ctx->h[7]; R( 0, A, B, C, D, E, F, G, H); R( 1, H, A, B, C, D, E, F, G); R( 2, G, H, A, B, C, D, E, F); R( 3, F, G, H, A, B, C, D, E); R( 4, E, F, G, H, A, B, C, D); R( 5, D, E, F, G, H, A, B, C); R( 6, C, D, E, F, G, H, A, B); R( 7, B, C, D, E, F, G, H, A); R( 8, A, B, C, D, E, F, G, H); R( 9, H, A, B, C, D, E, F, G); R(10, G, H, A, B, C, D, E, F); R(11, F, G, H, A, B, C, D, E); R(12, E, F, G, H, A, B, C, D); R(13, D, E, F, G, H, A, B, C); R(14, C, D, E, F, G, H, A, B); R(15, B, C, D, E, F, G, H, A); R(16, A, B, C, D, E, F, G, H); R(17, H, A, B, C, D, E, F, G); R(18, G, H, A, B, C, D, E, F); R(19, F, G, H, A, B, C, D, E); R(20, E, F, G, H, A, B, C, D); R(21, D, E, F, G, H, A, B, C); R(22, C, D, E, F, G, H, A, B); R(23, B, C, D, E, F, G, H, A); R(24, A, B, C, D, E, F, G, H); R(25, H, A, B, C, D, E, F, G); R(26, G, H, A, B, C, D, E, F); R(27, F, G, H, A, B, C, D, E); R(28, E, F, G, H, A, B, C, D); R(29, D, E, F, G, H, A, B, C); R(30, C, D, E, F, G, H, A, B); R(31, B, C, D, E, F, G, H, A); R(32, A, B, C, D, E, F, G, H); R(33, H, A, B, C, D, E, F, G); R(34, G, H, A, B, C, D, E, F); R(35, F, G, H, A, B, C, D, E); R(36, E, F, G, H, A, B, C, D); R(37, D, E, F, G, H, A, B, C); R(38, C, D, E, F, G, H, A, B); R(39, B, C, D, E, F, G, H, A); R(40, A, B, C, D, E, F, G, H); R(41, H, A, B, C, D, E, F, G); R(42, G, H, A, B, C, D, E, F); R(43, F, G, H, A, B, C, D, E); R(44, E, F, G, H, A, B, C, D); R(45, D, E, F, G, H, A, B, C); R(46, C, D, E, F, G, H, A, B); R(47, B, C, D, E, F, G, H, A); R(48, A, B, C, D, E, F, G, H); R(49, H, A, B, C, D, E, F, G); R(50, G, H, A, B, C, D, E, F); R(51, F, G, H, A, B, C, D, E); R(52, E, F, G, H, A, B, C, D); R(53, D, E, F, G, H, A, B, C); R(54, C, D, E, F, G, H, A, B); R(55, B, C, D, E, F, G, H, A); R(56, A, B, C, D, E, F, G, H); R(57, H, A, B, C, D, E, F, G); R(58, G, H, A, B, C, D, E, F); R(59, F, G, H, A, B, C, D, E); R(60, E, F, G, H, A, B, C, D); R(61, D, E, F, G, H, A, B, C); R(62, C, D, E, F, G, H, A, B); R(63, B, C, D, E, F, G, H, A); R(64, A, B, C, D, E, F, G, H); R(65, H, A, B, C, D, E, F, G); R(66, G, H, A, B, C, D, E, F); R(67, F, G, H, A, B, C, D, E); R(68, E, F, G, H, A, B, C, D); R(69, D, E, F, G, H, A, B, C); R(70, C, D, E, F, G, H, A, B); R(71, B, C, D, E, F, G, H, A); R(72, A, B, C, D, E, F, G, H); R(73, H, A, B, C, D, E, F, G); R(74, G, H, A, B, C, D, E, F); R(75, F, G, H, A, B, C, D, E); R(76, E, F, G, H, A, B, C, D); R(77, D, E, F, G, H, A, B, C); R(78, C, D, E, F, G, H, A, B); R(79, B, C, D, E, F, G, H, A); ctx->h[0] += A; ctx->h[1] += B; ctx->h[2] += C; ctx->h[3] += D; ctx->h[4] += E; ctx->h[5] += F; ctx->h[6] += G; ctx->h[7] += H; }