void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) { unsigned long *ptr = gdb_regs; int reg; for (reg = 0; reg < 32; reg++) UNPACK64(regs->gpr[reg], ptr); #ifdef CONFIG_FSL_BOOKE #ifdef CONFIG_SPE for (reg = 0; reg < 32; reg++) UNPACK64(current->thread.evr[reg], ptr); #else ptr += 32; #endif #else /* fp registers not used by kernel, leave zero */ ptr += 32 * 8 / sizeof(int); #endif UNPACK64(regs->nip, ptr); UNPACK64(regs->msr, ptr); UNPACK32(regs->ccr, ptr); UNPACK64(regs->link, ptr); UNPACK64(regs->ctr, ptr); UNPACK32(regs->xer, ptr); BUG_ON((unsigned long)ptr > (unsigned long)(((void *)gdb_regs) + NUMREGBYTES)); }
void sha256_final(sha256_ctx *ctx, unsigned char *digest) { uint32 block_nb; uint32 pm_len; uint32 len_b; int i; block_nb = (1 + ((SHA256_BLOCK_SIZE - 9) < (ctx->len % SHA256_BLOCK_SIZE))); len_b = (ctx->tot_len + ctx->len) << 3; pm_len = block_nb << 6; memset(ctx->block + ctx->len, 0, pm_len - ctx->len); ctx->block[ctx->len] = 0x80; UNPACK32(len_b, ctx->block + pm_len - 4); sha256_transf(ctx, ctx->block, block_nb); for (i = 0 ; i < 8; i++) { UNPACK32(ctx->h[i], &digest[i << 2]); } }
void sha256_final(sha256_ctx *ctx, unsigned char *digest) { unsigned int block_nb; unsigned int pm_len; unsigned int len_b; #ifndef UNROLL_LOOPS int i; #endif block_nb = (1 + ((SHA256_BLOCK_SIZE - 9) < (ctx->len % SHA256_BLOCK_SIZE))); len_b = (ctx->tot_len + ctx->len) << 3; pm_len = block_nb << 6; memset(ctx->block + ctx->len, 0, pm_len - ctx->len); ctx->block[ctx->len] = 0x80; UNPACK32(len_b, ctx->block + pm_len - 4); sha256_transf(ctx, ctx->block, block_nb); #ifndef UNROLL_LOOPS for (i = 0; i < 8; i++) { UNPACK32(ctx->h[i], &digest[i << 2]); } #else UNPACK32(ctx->h[0], &digest[ 0]); UNPACK32(ctx->h[1], &digest[ 4]); UNPACK32(ctx->h[2], &digest[ 8]); UNPACK32(ctx->h[3], &digest[12]); UNPACK32(ctx->h[4], &digest[16]); UNPACK32(ctx->h[5], &digest[20]); UNPACK32(ctx->h[6], &digest[24]); UNPACK32(ctx->h[7], &digest[28]); #endif }
void SHA256Final(SHA256Context *ctx, unsigned char *digest) { unsigned int block_nb = (1 + ((SHA256_BLOCK_SIZE - 9) < (ctx->len % SHA256_BLOCK_SIZE))); unsigned int len_b = (ctx->tot_len + ctx->len) << 3; unsigned int pm_len = block_nb << 6; memset(ctx->block + ctx->len, 0, pm_len - ctx->len); ctx->block[ctx->len] = 0x80; UNPACK32(len_b, ctx->block + pm_len - 4); SHA256Transform(ctx, ctx->block, block_nb); for (int i = 0 ; i < 8; i++) UNPACK32(ctx->h[i], &digest[i << 2]); }
void sha384_final(sha384_ctx *ctx, unsigned char *digest) { unsigned int block_nb; unsigned int pm_len; unsigned int len_b; #ifndef UNROLL_LOOPS int i; #endif block_nb = (1 + ((SHA384_BLOCK_SIZE - 17) < (ctx->len % SHA384_BLOCK_SIZE))); len_b = (ctx->tot_len + ctx->len) << 3; pm_len = block_nb << 7; memset(ctx->block + ctx->len, 0, pm_len - ctx->len); ctx->block[ctx->len] = 0x80; UNPACK32(len_b, ctx->block + pm_len - 4); sha512_transf(ctx, ctx->block, block_nb); #ifndef UNROLL_LOOPS for (i = 0 ; i < 6; i++) { UNPACK64(ctx->h[i], &digest[i << 3]); } #else UNPACK64(ctx->h[0], &digest[ 0]); UNPACK64(ctx->h[1], &digest[ 8]); UNPACK64(ctx->h[2], &digest[16]); UNPACK64(ctx->h[3], &digest[24]); UNPACK64(ctx->h[4], &digest[32]); UNPACK64(ctx->h[5], &digest[40]); #endif /* !UNROLL_LOOPS */ }
void sha512_update_final(sha512_ctx *ctx, const unsigned char *message, unsigned int len, unsigned char *digest) { unsigned int block_nb; unsigned int pm_len; unsigned int len_b; #ifndef UNROLL_LOOPS int i; #endif memcpy(&ctx->block[ctx->len], message, len); ctx->len += len; /*new_len = len - rem_len; block_nb = new_len / SHA512_BLOCK_SIZE; shifted_message = message + rem_len; sha512_transf(ctx, ctx->block, 1); sha512_transf(ctx, shifted_message, block_nb); rem_len = new_len % SHA512_BLOCK_SIZE; memcpy(ctx->block, &shifted_message[block_nb << 7], rem_len); ctx->len = rem_len; ctx->tot_len += (block_nb + 1) << 7; */ block_nb = 1 + ((SHA512_BLOCK_SIZE - 17) < (ctx->len % SHA512_BLOCK_SIZE)); len_b = (ctx->tot_len + ctx->len) << 3; pm_len = block_nb << 7; memset(ctx->block + ctx->len, 0, pm_len - ctx->len); ctx->block[ctx->len] = 0x80; UNPACK32(len_b, ctx->block + pm_len - 4); sha512_transf(ctx, ctx->block, block_nb); #ifndef UNROLL_LOOPS for (i = 0 ; i < 8; i++) { UNPACK64(ctx->h[i], &digest[i << 3]); } #else UNPACK64(ctx->h[0], &digest[ 0]); UNPACK64(ctx->h[1], &digest[ 8]); UNPACK64(ctx->h[2], &digest[16]); UNPACK64(ctx->h[3], &digest[24]); UNPACK64(ctx->h[4], &digest[32]); UNPACK64(ctx->h[5], &digest[40]); UNPACK64(ctx->h[6], &digest[48]); UNPACK64(ctx->h[7], &digest[56]); #endif /* !UNROLL_LOOPS */ }
uint8_t *SHA256_final(struct sha256_ctx *ctx) { unsigned int block_nb; unsigned int pm_len; unsigned int len_b; int i; block_nb = (1 + ((SHA256_BLOCK_SIZE - 9) < (ctx->len % SHA256_BLOCK_SIZE))); len_b = (ctx->tot_len + ctx->len) << 3; pm_len = block_nb << 6; memset(ctx->block + ctx->len, 0, pm_len - ctx->len); ctx->block[ctx->len] = 0x80; UNPACK32(len_b, ctx->block + pm_len - 4); SHA256_transform(ctx, ctx->block, block_nb); for (i = 0; i < 8; i++) UNPACK32(ctx->h[i], &ctx->buf[i << 2]); return ctx->buf; }
void vb2_sha256_finalize(struct vb2_sha256_context *ctx, uint8_t *digest) { unsigned int block_nb; unsigned int pm_size; unsigned int size_b; #ifndef UNROLL_LOOPS int i; #endif block_nb = (1 + ((VB2_SHA256_BLOCK_SIZE - 9) < (ctx->size % VB2_SHA256_BLOCK_SIZE))); size_b = (ctx->total_size + ctx->size) << 3; pm_size = block_nb << 6; memset(ctx->block + ctx->size, 0, pm_size - ctx->size); ctx->block[ctx->size] = 0x80; UNPACK32(size_b, ctx->block + pm_size - 4); vb2_sha256_transform(ctx, ctx->block, block_nb); #ifndef UNROLL_LOOPS for (i = 0 ; i < 8; i++) { UNPACK32(ctx->h[i], &digest[i << 2]); } #else UNPACK32(ctx->h[0], &digest[ 0]); UNPACK32(ctx->h[1], &digest[ 4]); UNPACK32(ctx->h[2], &digest[ 8]); UNPACK32(ctx->h[3], &digest[12]); UNPACK32(ctx->h[4], &digest[16]); UNPACK32(ctx->h[5], &digest[20]); UNPACK32(ctx->h[6], &digest[24]); UNPACK32(ctx->h[7], &digest[28]); #endif /* !UNROLL_LOOPS */ }
void vb2_sha256_extend(const uint8_t *from, const uint8_t *by, uint8_t *to) { struct vb2_sha256_context dc; int i; for (i = 0; i < 8; i++) { PACK32(from, &dc.h[i]); from += 4; } vb2_sha256_transform(&dc, by, 1); for (i = 0; i < 8; i++) { UNPACK32(dc.h[i], to); to += 4; } }
void hb_sha224_final(hb_sha224_ctx *ctx, unsigned char *digest) { HB_SIZE block_nb; HB_SIZE pm_len; HB_SIZE len_b; #ifndef UNROLL_LOOPS int i; #endif block_nb = (1 + ((HB_SHA224_BLOCK_SIZE - 9) < (ctx->len % HB_SHA224_BLOCK_SIZE))); len_b = (ctx->tot_len + ctx->len) << 3; pm_len = block_nb << 6; memset(ctx->block + ctx->len, 0, pm_len - ctx->len); ctx->block[ctx->len] = 0x80; UNPACK32(len_b, ctx->block + pm_len - 4); sha256_transf(ctx, ctx->block, block_nb); #ifndef UNROLL_LOOPS for (i = 0 ; i < 7; i++) { UNPACK32(ctx->h[i], &digest[i << 2]); } #else UNPACK32(ctx->h[0], &digest[ 0]); UNPACK32(ctx->h[1], &digest[ 4]); UNPACK32(ctx->h[2], &digest[ 8]); UNPACK32(ctx->h[3], &digest[12]); UNPACK32(ctx->h[4], &digest[16]); UNPACK32(ctx->h[5], &digest[20]); UNPACK32(ctx->h[6], &digest[24]); #endif /* !UNROLL_LOOPS */ }
uint8_t* avb_sha512_final(AvbSHA512Ctx* ctx) { unsigned int block_nb; unsigned int pm_len; unsigned int len_b; #ifndef UNROLL_LOOPS_SHA512 int i; #endif block_nb = 1 + ((AVB_SHA512_BLOCK_SIZE - 17) < (ctx->len % AVB_SHA512_BLOCK_SIZE)); len_b = (ctx->tot_len + ctx->len) << 3; pm_len = block_nb << 7; avb_memset(ctx->block + ctx->len, 0, pm_len - ctx->len); ctx->block[ctx->len] = 0x80; UNPACK32(len_b, ctx->block + pm_len - 4); SHA512_transform(ctx, ctx->block, block_nb); #ifdef UNROLL_LOOPS_SHA512 UNPACK64(ctx->h[0], &ctx->buf[0]); UNPACK64(ctx->h[1], &ctx->buf[8]); UNPACK64(ctx->h[2], &ctx->buf[16]); UNPACK64(ctx->h[3], &ctx->buf[24]); UNPACK64(ctx->h[4], &ctx->buf[32]); UNPACK64(ctx->h[5], &ctx->buf[40]); UNPACK64(ctx->h[6], &ctx->buf[48]); UNPACK64(ctx->h[7], &ctx->buf[56]); #else for (i = 0; i < 8; i++) UNPACK64(ctx->h[i], &ctx->buf[i << 3]); #endif /* UNROLL_LOOPS_SHA512 */ return ctx->buf; }