void JIT::compileLoadVarargs(Instruction* instruction) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; JumpList slowCase; JumpList end; bool canOptimize = m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister().offset() && !m_codeBlock->symbolTable()->slowArguments(); if (canOptimize) { emitGetVirtualRegister(arguments, regT0); slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue())))); emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0); slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1))); // regT0: argumentCountIncludingThis move(regT0, regT1); neg64(regT1); add64(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT1); lshift64(TrustedImm32(3), regT1); addPtr(callFrameRegister, regT1); // regT1: newCallFrame slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1)); // Initialize ArgumentCount. store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); // Initialize 'this'. emitGetVirtualRegister(thisValue, regT2); store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); // Copy arguments. signExtend32ToPtr(regT0, regT0); end.append(branchSub64(Zero, TrustedImm32(1), regT0)); // regT0: argumentCount Label copyLoop = label(); load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2); store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); branchSub64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this); end.append(jump()); } if (canOptimize) slowCase.link(this); emitGetVirtualRegister(thisValue, regT0); emitGetVirtualRegister(arguments, regT1); callOperation(operationLoadVarargs, regT0, regT1, firstFreeRegister); move(returnValueGPR, regT1); if (canOptimize) end.link(this); }
/* setup needed for both encrypt & decrypt */ static void setup(u64 mlen, u64 adlen, const byte *npub, const byte *k) { assert( k != NULL) ; assert( npub != NULL) ; /* run ChaCha key schedule and then ChaCha */ chacha_keysetup(chacha_ctx, (byte *) k, 256, 64) ; chacha_ivsetup(chacha_ctx, npub) ; chacha_round() ; /* set up H, I & finalblock for authentication */ chacha_copy( H, 1 ) ; memcpy( I, H, AUTH_BYTES ) ; store64((byte *) finalblock,8 * adlen); store64(((byte *) finalblock) + 8,8 * mlen); /* random init for counter */ chacha_copy( counter, 1 ) ; iter_count = 0 ; /* use ChaCha to generate aes round keys */ chacha_copy( aes_rk, AES_RKEYS ) ; #ifdef TESTING printf( "setup: called for %lld text and %lld ad\n", mlen, adlen ) ; printf( "setup: round keys start with %08x %08x\n", aes_rk[0], aes_rk[1] ) ; #endif }
int crypto_aead_encrypt( unsigned char *c,unsigned long long *clen, const unsigned char *m,unsigned long long mlen, const unsigned char *ad,unsigned long long adlen, const unsigned char *nsec, const unsigned char *npub, const unsigned char *k ) { ICESTATE S; unsigned int frameBit; initState128a(S, k, npub); /* ciphertext length is plaintext len + size of tag and nsec */ *clen = mlen + ICEPOLETAGLEN; /* secret message number is of zero length */ frameBit = 0; processIceBlock(S, NULL, NULL, 0, frameBit); /* process auth-only associated data blocks */ do { unsigned long long blocklen = 128; frameBit = (adlen <= blocklen ? 1 : 0); /* is it the last block? */ if (adlen < blocklen) { blocklen = adlen; } processIceBlock(S, ad, NULL, blocklen, frameBit); ad += blocklen; adlen -= blocklen; } while (adlen > 0); /* process plaintext blocks to get the ciphertext */ do { unsigned long long blocklen = 128; frameBit = (mlen <=blocklen ? 0 : 1); if (mlen < blocklen) { blocklen = mlen; } processIceBlock(S, m, &c, blocklen, frameBit); m += blocklen; mlen -= blocklen; } while (mlen > 0); /* store authentication tag at the end of the ciphertext */ store64(c, S[0][0], 8); c += 8; store64(c, S[1][0], 8); return 0; }
/* * Calculate the final hash and save it in md. * Always returns 1. */ int BLAKE2b_Final(unsigned char *md, BLAKE2B_CTX *c) { uint8_t outbuffer[BLAKE2B_OUTBYTES] = {0}; uint8_t *target = outbuffer; int iter = (c->outlen + 7) / 8; int i; /* Avoid writing to the temporary buffer if possible */ if ((c->outlen % sizeof(c->h[0])) == 0) target = md; blake2b_set_lastblock(c); /* Padding */ memset(c->buf + c->buflen, 0, sizeof(c->buf) - c->buflen); blake2b_compress(c, c->buf, c->buflen); /* Output full hash to buffer */ for (i = 0; i < iter; ++i) store64(target + sizeof(c->h[i]) * i, c->h[i]); if (target != md) memcpy(md, target, c->outlen); OPENSSL_cleanse(c, sizeof(BLAKE2B_CTX)); return 1; }
int blake2b_final(blake2b_state *S, void *out, size_t outlen) { uint8_t buffer[BLAKE2B_OUTBYTES] = {0}; unsigned int i; /* Sanity checks */ if (S == NULL || out == NULL || outlen < S->outlen) { return -1; } /* Is this a reused state? */ if (S->f[0] != 0) { return -1; } blake2b_increment_counter(S, S->buflen); blake2b_set_lastblock(S); memset(&S->buf[S->buflen], 0, BLAKE2B_BLOCKBYTES - S->buflen); /* Padding */ blake2b_compress(S, S->buf); for (i = 0; i < 8; ++i) { /* Output full hash to temp buffer */ store64(buffer + sizeof(S->h[i]) * i, S->h[i]); } memcpy(out, buffer, S->outlen); burn(buffer, sizeof(buffer)); burn(S->buf, sizeof(S->buf)); burn(S->h, sizeof(S->h)); return 0; }
/* Is this correct? */ int blake2b_final( blake2b_state *S, byte *out, byte outlen ) { byte buffer[BLAKE2B_OUTBYTES]; int i; if( S->buflen > BLAKE2B_BLOCKBYTES ) { blake2b_increment_counter( S, BLAKE2B_BLOCKBYTES ); if ( blake2b_compress( S, S->buf ) < 0 ) return -1; S->buflen -= BLAKE2B_BLOCKBYTES; XMEMCPY( S->buf, S->buf + BLAKE2B_BLOCKBYTES, (wolfssl_word)S->buflen ); } blake2b_increment_counter( S, S->buflen ); blake2b_set_lastblock( S ); XMEMSET( S->buf + S->buflen, 0, (wolfssl_word)(2 * BLAKE2B_BLOCKBYTES - S->buflen) ); /* Padding */ if ( blake2b_compress( S, S->buf ) < 0 ) return -1; for( i = 0; i < 8; ++i ) /* Output full hash to temp buffer */ store64( buffer + sizeof( S->h[i] ) * i, S->h[i] ); XMEMCPY( out, buffer, outlen ); return 0; }
void JIT::compileSetupVarargsFrame(Instruction* instruction, CallLinkInfo* info) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; int firstVarArgOffset = instruction[6].u.operand; emitGetVirtualRegister(arguments, regT1); callOperation(operationSizeFrameForVarargs, regT1, -firstFreeRegister, firstVarArgOffset); move(TrustedImm32(-firstFreeRegister), regT1); emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1); addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister); emitGetVirtualRegister(arguments, regT2); callOperation(operationSetupVarargsFrame, regT1, regT2, firstVarArgOffset, regT0); move(returnValueGPR, regT1); // Profile the argument count. load32(Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2); load8(info->addressOfMaxNumArguments(), regT0); Jump notBiggest = branch32(Above, regT0, regT2); Jump notSaturated = branch32(BelowOrEqual, regT2, TrustedImm32(255)); move(TrustedImm32(255), regT2); notSaturated.link(this); store8(regT2, info->addressOfMaxNumArguments()); notBiggest.link(this); // Initialize 'this'. emitGetVirtualRegister(thisValue, regT0); store64(regT0, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister); }
static void sha_done(sha256_state& md, void* out) { // Increase the length of the message md.length += md.curlen * 8; // Append the '1' bit md.buf[md.curlen++] = static_cast<unsigned char>(0x80); // If the length is currently above 56 bytes we append zeros then compress. // Then we can fall back to padding zeros and length encoding like normal. if(md.curlen > 56) { while(md.curlen < 64) md.buf[md.curlen++] = 0; sha_compress(md, md.buf); md.curlen = 0; } // Pad upto 56 bytes of zeroes while(md.curlen < 56) md.buf[md.curlen++] = 0; // Store length store64(md.length, md.buf+56); sha_compress(md, md.buf); // Copy output for(int i = 0; i < 8; i++) store32(md.state[i], static_cast<unsigned char*>(out)+(4*i)); }
/* Is this correct? */ int blake2b_final( blake2b_state *S, uint8_t *out, uint8_t outlen ) { uint8_t buffer[BLAKE2B_OUTBYTES] = {0}; if( outlen > BLAKE2B_OUTBYTES ) return -1; if( S->buflen > BLAKE2B_BLOCKBYTES ) { blake2b_increment_counter( S, BLAKE2B_BLOCKBYTES ); blake2b_compress( S, S->buf ); S->buflen -= BLAKE2B_BLOCKBYTES; memcpy( S->buf, S->buf + BLAKE2B_BLOCKBYTES, S->buflen ); } blake2b_increment_counter( S, S->buflen ); blake2b_set_lastblock( S ); memset( S->buf + S->buflen, 0, 2 * BLAKE2B_BLOCKBYTES - S->buflen ); /* Padding */ blake2b_compress( S, S->buf ); for( int i = 0; i < 8; ++i ) /* Output full hash to temp buffer */ store64( buffer + sizeof( S->h[i] ) * i, S->h[i] ); memcpy( out, buffer, outlen ); return 0; }
int blake2b_init_key( blake2b_state *S, const uint8_t outlen, const void *key, const uint8_t keylen ) { blake2b_param P[1]; if ( ( !outlen ) || ( outlen > BLAKE2B_OUTBYTES ) ) return -1; if ( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1; P->digest_length = outlen; P->key_length = keylen; P->fanout = 1; P->depth = 1; store32( &P->leaf_length, 0 ); store64( &P->node_offset, 0 ); P->node_depth = 0; P->inner_length = 0; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); if( blake2b_init_param( S, P ) < 0 ) return -1; { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); blake2b_update( S, block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } return 0; }
int blake2b_final( blake2b_state *S, uint8_t *out, uint8_t outlen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) { return -1; } if( S->buflen > BLAKE2B_BLOCKBYTES ) { blake2b_increment_counter( S, BLAKE2B_BLOCKBYTES ); blake2b_compress( S, S->buf ); S->buflen -= BLAKE2B_BLOCKBYTES; assert( S->buflen <= BLAKE2B_BLOCKBYTES ); memcpy( S->buf, S->buf + BLAKE2B_BLOCKBYTES, S->buflen ); } blake2b_increment_counter( S, S->buflen ); blake2b_set_lastblock( S ); memset( S->buf + S->buflen, 0, 2 * BLAKE2B_BLOCKBYTES - S->buflen ); /* Padding */ blake2b_compress( S, S->buf ); #ifdef NATIVE_LITTLE_ENDIAN memcpy( out, &S->h[0], outlen ); #else { uint8_t buffer[BLAKE2B_OUTBYTES]; int i; for( i = 0; i < 8; ++i ) /* Output full hash to temp buffer */ store64( buffer + sizeof( S->h[i] ) * i, S->h[i] ); memcpy( out, buffer, outlen ); } #endif return 0; }
int blake2b_init_salt_personal( blake2b_state *S, const uint8_t outlen, const void *salt, const void *personal ) { blake2b_param P[1]; if ( ( !outlen ) || ( outlen > BLAKE2B_OUTBYTES ) ) return -1; P->digest_length = outlen; P->key_length = 0; P->fanout = 1; P->depth = 1; store32( &P->leaf_length, 0 ); store64( &P->node_offset, 0 ); P->node_depth = 0; P->inner_length = 0; memset( P->reserved, 0, sizeof( P->reserved ) ); if (salt != NULL) { blake2b_param_set_salt( P, (const uint8_t *) salt ); } else { memset( P->salt, 0, sizeof( P->salt ) ); } if (personal != NULL) { blake2b_param_set_personal( P, (const uint8_t *) personal ); } else { memset( P->personal, 0, sizeof( P->personal ) ); } return blake2b_init_param( S, P ); }
void JIT::compileSetupVarargsFrame(Instruction* instruction, CallLinkInfo* info) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; int firstVarArgOffset = instruction[6].u.operand; JumpList slowCase; JumpList end; bool canOptimize = m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister().offset() && !m_codeBlock->symbolTable()->slowArguments(); if (canOptimize) { emitGetVirtualRegister(arguments, regT0); slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue())))); move(TrustedImm32(-firstFreeRegister), regT1); emitSetupVarargsFrameFastCase(*this, regT1, regT0, regT1, regT2, firstVarArgOffset, slowCase); end.append(jump()); slowCase.link(this); } emitGetVirtualRegister(arguments, regT1); callOperation(operationSizeFrameForVarargs, regT1, -firstFreeRegister, firstVarArgOffset); move(TrustedImm32(-firstFreeRegister), regT1); emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1); addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister); emitGetVirtualRegister(arguments, regT2); callOperation(operationSetupVarargsFrame, regT1, regT2, firstVarArgOffset, regT0); move(returnValueGPR, regT1); if (canOptimize) end.link(this); // Profile the argument count. load32(Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2); load8(&info->maxNumArguments, regT0); Jump notBiggest = branch32(Above, regT0, regT2); Jump notSaturated = branch32(BelowOrEqual, regT2, TrustedImm32(255)); move(TrustedImm32(255), regT2); notSaturated.link(this); store8(regT2, &info->maxNumArguments); notBiggest.link(this); // Initialize 'this'. emitGetVirtualRegister(thisValue, regT0); store64(regT0, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister); }
/* Initialize the parameter block with default values */ void blake2b_param_init(BLAKE2B_PARAM *P) { P->digest_length = BLAKE2B_DIGEST_LENGTH; P->key_length = 0; P->fanout = 1; P->depth = 1; store32(P->leaf_length, 0); store64(P->node_offset, 0); P->node_depth = 0; P->inner_length = 0; memset(P->reserved, 0, sizeof(P->reserved)); memset(P->salt, 0, sizeof(P->salt)); memset(P->personal, 0, sizeof(P->personal)); }
BLAKE2_LOCAL_INLINE(int) blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store64( &P->node_offset, 0 ); P->node_depth = 1; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2b_init_param( S, P ); }
int blake2b_init_key( blake2b_state *S, const byte outlen, const void *key, const byte keylen ) { blake2b_param P[1]; if ( ( !outlen ) || ( outlen > BLAKE2B_OUTBYTES ) ) return -1; if ( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1; P->digest_length = outlen; P->key_length = keylen; P->fanout = 1; P->depth = 1; store32( &P->leaf_length, 0 ); store64( &P->node_offset, 0 ); P->node_depth = 0; P->inner_length = 0; XMEMSET( P->reserved, 0, sizeof( P->reserved ) ); XMEMSET( P->salt, 0, sizeof( P->salt ) ); XMEMSET( P->personal, 0, sizeof( P->personal ) ); if( blake2b_init_param( S, P ) < 0 ) return -1; { #ifdef WOLFSSL_SMALL_STACK byte* block; block = (byte*)XMALLOC(BLAKE2B_BLOCKBYTES, NULL, DYNAMIC_TYPE_TMP_BUFFER); if ( block == NULL ) return -1; #else byte block[BLAKE2B_BLOCKBYTES]; #endif XMEMSET( block, 0, BLAKE2B_BLOCKBYTES ); XMEMCPY( block, key, keylen ); blake2b_update( S, block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from */ /* memory */ #ifdef WOLFSSL_SMALL_STACK XFREE(block, NULL, DYNAMIC_TYPE_TMP_BUFFER); #endif } return 0; }
int blake2b_init( blake2b_state *S, const byte outlen ) { blake2b_param P[1]; if ( ( !outlen ) || ( outlen > BLAKE2B_OUTBYTES ) ) return -1; P->digest_length = outlen; P->key_length = 0; P->fanout = 1; P->depth = 1; store32( &P->leaf_length, 0 ); store64( &P->node_offset, 0 ); P->node_depth = 0; P->inner_length = 0; XMEMSET( P->reserved, 0, sizeof( P->reserved ) ); XMEMSET( P->salt, 0, sizeof( P->salt ) ); XMEMSET( P->personal, 0, sizeof( P->personal ) ); return blake2b_init_param( S, P ); }
void AssemblyHelpers::callExceptionFuzz() { if (!Options::enableExceptionFuzz()) return; EncodedJSValue* buffer = vm()->exceptionFuzzingBuffer(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters)); for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) { #if USE(JSVALUE64) store64(GPRInfo::toRegister(i), buffer + i); #else store32(GPRInfo::toRegister(i), buffer + i); #endif } for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) { move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0); storeDouble(FPRInfo::toRegister(i), Address(GPRInfo::regT0)); } // Set up one argument. #if CPU(X86) poke(GPRInfo::callFrameRegister, 0); #else move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); #endif move(TrustedImmPtr(bitwise_cast<void*>(operationExceptionFuzz)), GPRInfo::nonPreservedNonReturnGPR); call(GPRInfo::nonPreservedNonReturnGPR); for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) { move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0); loadDouble(Address(GPRInfo::regT0), FPRInfo::toRegister(i)); } for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) { #if USE(JSVALUE64) load64(buffer + i, GPRInfo::toRegister(i)); #else load32(buffer + i, GPRInfo::toRegister(i)); #endif } }
void JIT::compileSetupVarargsFrame(OpcodeID opcode, Instruction* instruction, CallLinkInfo* info) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; int firstVarArgOffset = instruction[6].u.operand; emitGetVirtualRegister(arguments, regT1); Z_JITOperation_EJZZ sizeOperation; if (opcode == op_tail_call_forward_arguments) sizeOperation = operationSizeFrameForForwardArguments; else sizeOperation = operationSizeFrameForVarargs; callOperation(sizeOperation, regT1, -firstFreeRegister, firstVarArgOffset); move(TrustedImm32(-firstFreeRegister), regT1); emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1); addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister); emitGetVirtualRegister(arguments, regT2); F_JITOperation_EFJZZ setupOperation; if (opcode == op_tail_call_forward_arguments) setupOperation = operationSetupForwardArgumentsFrame; else setupOperation = operationSetupVarargsFrame; callOperation(setupOperation, regT1, regT2, firstVarArgOffset, regT0); move(returnValueGPR, regT1); // Profile the argument count. load32(Address(regT1, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2); load32(info->addressOfMaxNumArguments(), regT0); Jump notBiggest = branch32(Above, regT0, regT2); store32(regT2, info->addressOfMaxNumArguments()); notBiggest.link(this); // Initialize 'this'. emitGetVirtualRegister(thisValue, regT0); store64(regT0, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister); }
int blake2b_final( blake2b_state *S, void *out, size_t outlen ) { uint8_t buffer[BLAKE2B_OUTBYTES] = {0}; size_t i; if( out == NULL || outlen < S->outlen ) return -1; if( blake2b_is_lastblock( S ) ) return -1; blake2b_increment_counter( S, S->buflen ); blake2b_set_lastblock( S ); memset( S->buf + S->buflen, 0, BLAKE2B_BLOCKBYTES - S->buflen ); /* Padding */ blake2b_compress( S, S->buf ); for( i = 0; i < 8; ++i ) /* Output full hash to temp buffer */ store64( buffer + sizeof( S->h[i] ) * i, S->h[i] ); memcpy( out, buffer, S->outlen ); secure_zero_memory(buffer, sizeof(buffer)); return 0; }
static inline int blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen, const void *salt, const void *personal, const uint8_t saltlen, const uint8_t personallen ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store64( &P->node_offset, 0 ); P->node_depth = 1; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); if (saltlen) memcpy( P->salt, salt, BLAKE2B_SALTBYTES ); else memset(P->salt, 0, sizeof( P->salt )); if (personallen) memcpy( P->personal, personal, BLAKE2B_PERSONALBYTES ); else memset(P->personal, 0, sizeof(P->personal)); return blake2b_init_param( S, P ); }
void JIT::compileLoadVarargs(Instruction* instruction) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; int firstVarArgOffset = instruction[6].u.operand; JumpList slowCase; JumpList end; bool canOptimize = m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister().offset() && !m_codeBlock->symbolTable()->slowArguments(); if (canOptimize) { emitGetVirtualRegister(arguments, regT0); slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue())))); emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0); if (firstVarArgOffset) { Jump sufficientArguments = branch32(GreaterThan, regT0, TrustedImm32(firstVarArgOffset + 1)); move(TrustedImm32(1), regT0); Jump endVarArgs = jump(); sufficientArguments.link(this); sub32(TrustedImm32(firstVarArgOffset), regT0); endVarArgs.link(this); } slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1))); // regT0: argumentCountIncludingThis move(regT0, regT1); add64(TrustedImm32(-firstFreeRegister + JSStack::CallFrameHeaderSize), regT1); // regT1 now has the required frame size in Register units // Round regT1 to next multiple of stackAlignmentRegisters() add64(TrustedImm32(stackAlignmentRegisters() - 1), regT1); and64(TrustedImm32(~(stackAlignmentRegisters() - 1)), regT1); neg64(regT1); lshift64(TrustedImm32(3), regT1); addPtr(callFrameRegister, regT1); // regT1: newCallFrame slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1)); // Initialize ArgumentCount. store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); // Initialize 'this'. emitGetVirtualRegister(thisValue, regT2); store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); // Copy arguments. signExtend32ToPtr(regT0, regT0); end.append(branchSub64(Zero, TrustedImm32(1), regT0)); // regT0: argumentCount Label copyLoop = label(); load64(BaseIndex(callFrameRegister, regT0, TimesEight, (CallFrame::thisArgumentOffset() + firstVarArgOffset) * static_cast<int>(sizeof(Register))), regT2); store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); branchSub64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this); end.append(jump()); } if (canOptimize) slowCase.link(this); emitGetVirtualRegister(arguments, regT1); callOperation(operationSizeFrameForVarargs, regT1, firstFreeRegister, firstVarArgOffset); move(returnValueGPR, stackPointerRegister); emitGetVirtualRegister(thisValue, regT1); emitGetVirtualRegister(arguments, regT2); callOperation(operationLoadVarargs, returnValueGPR, regT1, regT2, firstVarArgOffset); move(returnValueGPR, regT1); if (canOptimize) end.link(this); addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister); }
static PyObject * py_blake2s_new_impl(PyTypeObject *type, PyObject *data, int digest_size, Py_buffer *key, Py_buffer *salt, Py_buffer *person, int fanout, int depth, unsigned long leaf_size, unsigned long long node_offset, int node_depth, int inner_size, int last_node) /*[clinic end generated code: output=b95806be0514dcf7 input=f18d6efd9b9a1271]*/ { BLAKE2sObject *self = NULL; Py_buffer buf; self = new_BLAKE2sObject(type); if (self == NULL) { goto error; } /* Zero parameter block. */ memset(&self->param, 0, sizeof(self->param)); /* Set digest size. */ if (digest_size <= 0 || digest_size > BLAKE2S_OUTBYTES) { PyErr_Format(PyExc_ValueError, "digest_size must be between 1 and %d bytes", BLAKE2S_OUTBYTES); goto error; } self->param.digest_length = digest_size; /* Set salt parameter. */ if ((salt->obj != NULL) && salt->len) { if (salt->len > BLAKE2S_SALTBYTES) { PyErr_Format(PyExc_ValueError, "maximum salt length is %d bytes", BLAKE2S_SALTBYTES); goto error; } memcpy(self->param.salt, salt->buf, salt->len); } /* Set personalization parameter. */ if ((person->obj != NULL) && person->len) { if (person->len > BLAKE2S_PERSONALBYTES) { PyErr_Format(PyExc_ValueError, "maximum person length is %d bytes", BLAKE2S_PERSONALBYTES); goto error; } memcpy(self->param.personal, person->buf, person->len); } /* Set tree parameters. */ if (fanout < 0 || fanout > 255) { PyErr_SetString(PyExc_ValueError, "fanout must be between 0 and 255"); goto error; } self->param.fanout = (uint8_t)fanout; if (depth <= 0 || depth > 255) { PyErr_SetString(PyExc_ValueError, "depth must be between 1 and 255"); goto error; } self->param.depth = (uint8_t)depth; if (leaf_size > 0xFFFFFFFFU) { PyErr_SetString(PyExc_OverflowError, "leaf_size is too large"); goto error; } // NB: Simple assignment here would be incorrect on big endian platforms. store32(&(self->param.leaf_length), leaf_size); #ifdef HAVE_BLAKE2S if (node_offset > 0xFFFFFFFFFFFFULL) { /* maximum 2**48 - 1 */ PyErr_SetString(PyExc_OverflowError, "node_offset is too large"); goto error; } store48(&(self->param.node_offset), node_offset); #else // NB: Simple assignment here would be incorrect on big endian platforms. store64(&(self->param.node_offset), node_offset); #endif if (node_depth < 0 || node_depth > 255) { PyErr_SetString(PyExc_ValueError, "node_depth must be between 0 and 255"); goto error; } self->param.node_depth = node_depth; if (inner_size < 0 || inner_size > BLAKE2S_OUTBYTES) { PyErr_Format(PyExc_ValueError, "inner_size must be between 0 and is %d", BLAKE2S_OUTBYTES); goto error; } self->param.inner_length = inner_size; /* Set key length. */ if ((key->obj != NULL) && key->len) { if (key->len > BLAKE2S_KEYBYTES) { PyErr_Format(PyExc_ValueError, "maximum key length is %d bytes", BLAKE2S_KEYBYTES); goto error; } self->param.key_length = (uint8_t)key->len; } /* Initialize hash state. */ if (blake2s_init_param(&self->state, &self->param) < 0) { PyErr_SetString(PyExc_RuntimeError, "error initializing hash state"); goto error; } /* Set last node flag (must come after initialization). */ self->state.last_node = last_node; /* Process key block if any. */ if (self->param.key_length) { uint8_t block[BLAKE2S_BLOCKBYTES]; memset(block, 0, sizeof(block)); memcpy(block, key->buf, key->len); blake2s_update(&self->state, block, sizeof(block)); secure_zero_memory(block, sizeof(block)); } /* Process initial data if any. */ if (data != NULL) { GET_BUFFER_VIEW_OR_ERROR(data, &buf, goto error); if (buf.len >= HASHLIB_GIL_MINSIZE) { Py_BEGIN_ALLOW_THREADS blake2s_update(&self->state, buf.buf, buf.len); Py_END_ALLOW_THREADS } else {
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { CallLinkInfo* info = m_codeBlock->addCallLinkInfo(); int callee = instruction[2].u.operand; /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length); if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs) compileSetupVarargsFrame(instruction, info); else { int argCount = instruction[3].u.operand; int registerOffset = -instruction[4].u.operand; if (opcodeID == op_call && shouldEmitProfiling()) { emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); Jump done = emitJumpIfNotJSCell(regT0); load32(Address(regT0, JSCell::structureIDOffset()), regT0); store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin(); uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(bytecodeOffset); store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + TagOffset)); emitGetVirtualRegister(callee, regT0); // regT0 holds callee. store64(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC))); if (opcodeID == op_call_eval) { compileCallEval(instruction); return; } DataLabelPtr addressOfLinkedFunctionCheck; Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); addSlowCase(slowCase); ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex); info->callType = CallLinkInfo::callTypeFor(opcodeID); info->codeOrigin = CodeOrigin(m_bytecodeOffset); info->calleeGPR = regT0; m_callCompilationInfo.append(CallCompilationInfo()); m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
/* Relocate a non-PLT object with addend. */ static int reloc_non_plt_obj(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela, SymCache *cache, int flags, RtldLockState *lockstate) { struct fptr **fptrs; Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset); switch (ELF_R_TYPE(rela->r_info)) { case R_IA_64_REL64LSB: /* * We handle rtld's relocations in rtld_start.S */ if (obj != obj_rtld) store64(where, load64(where) + (Elf_Addr) obj->relocbase); break; case R_IA_64_DIR64LSB: { const Elf_Sym *def; const Obj_Entry *defobj; Elf_Addr target; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags, cache, lockstate); if (def == NULL) return -1; target = (def->st_shndx != SHN_UNDEF) ? (Elf_Addr)(defobj->relocbase + def->st_value) : 0; store64(where, target + rela->r_addend); break; } case R_IA_64_FPTR64LSB: { /* * We have to make sure that all @fptr references to * the same function are identical so that code can * compare function pointers. */ const Elf_Sym *def; const Obj_Entry *defobj; struct fptr *fptr = 0; Elf_Addr target, gp; int sym_index; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, SYMLOOK_IN_PLT | flags, cache, lockstate); if (def == NULL) { /* * XXX r_debug_state is problematic and find_symdef() * returns NULL for it. This probably has something to * do with symbol versioning (r_debug_state is in the * symbol map). If we return -1 in that case we abort * relocating rtld, which typically is fatal. So, for * now just skip the symbol when we're relocating * rtld. We don't care about r_debug_state unless we * are being debugged. */ if (obj != obj_rtld) return -1; break; } if (def->st_shndx != SHN_UNDEF) { target = (Elf_Addr)(defobj->relocbase + def->st_value); gp = (Elf_Addr)defobj->pltgot; /* rtld is allowed to reference itself only */ assert(!obj->rtld || obj == defobj); fptrs = defobj->priv; if (fptrs == NULL) fptrs = alloc_fptrs((Obj_Entry *) defobj, obj->rtld); sym_index = def - defobj->symtab; /* * Find the @fptr, using fptrs as a helper. */ if (fptrs) fptr = fptrs[sym_index]; if (!fptr) { fptr = alloc_fptr(target, gp); if (fptrs) fptrs[sym_index] = fptr; } } else fptr = NULL; store64(where, (Elf_Addr)fptr); break; } case R_IA_64_IPLTLSB: { /* * Relocation typically used to populate C++ virtual function * tables. It creates a 128-bit function descriptor at the * specified memory address. */ const Elf_Sym *def; const Obj_Entry *defobj; struct fptr *fptr; Elf_Addr target, gp; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags, cache, lockstate); if (def == NULL) return -1; if (def->st_shndx != SHN_UNDEF) { target = (Elf_Addr)(defobj->relocbase + def->st_value); gp = (Elf_Addr)defobj->pltgot; } else { target = 0; gp = 0; } fptr = (void*)where; store64(&fptr->target, target); store64(&fptr->gp, gp); break; } case R_IA_64_DTPMOD64LSB: { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags, cache, lockstate); if (def == NULL) return -1; store64(where, defobj->tlsindex); break; } case R_IA_64_DTPREL64LSB: { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags, cache, lockstate); if (def == NULL) return -1; store64(where, def->st_value + rela->r_addend); break; } case R_IA_64_TPREL64LSB: { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags, cache, lockstate); if (def == NULL) return -1; /* * We lazily allocate offsets for static TLS as we * see the first relocation that references the * TLS block. This allows us to support (small * amounts of) static TLS in dynamically loaded * modules. If we run out of space, we generate an * error. */ if (!defobj->tls_done) { if (!allocate_tls_offset((Obj_Entry*) defobj)) { _rtld_error("%s: No space available for static " "Thread Local Storage", obj->path); return -1; } } store64(where, defobj->tlsoffset + def->st_value + rela->r_addend); break; } case R_IA_64_NONE: break; default: _rtld_error("%s: Unsupported relocation type %u" " in non-PLT relocations\n", obj->path, (unsigned int)ELF_R_TYPE(rela->r_info)); return -1; } return(0); }
static inline int blake2b_param_set_node_offset( blake2b_param *P, const uint64_t node_offset ) { store64( &P->node_offset, node_offset ); return 0; }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int callee = instruction[1].u.operand; /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Caller initializes ScopeChain. - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ScopeChain; ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ if (opcodeID == op_call_varargs) compileLoadVarargs(instruction); else { int argCount = instruction[2].u.operand; int registerOffset = instruction[3].u.operand; if (opcodeID == op_call && canBeOptimized()) { emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); Jump done = emitJumpIfNotJSCell(regT0); loadPtr(Address(regT0, JSCell::structureOffset()), regT0); storePtr(regT0, instruction[5].u.arrayProfile->addressOfLastSeenStructure()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT1); store32(TrustedImm32(argCount), Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); } // regT1 holds newCallFrame with ArgumentCount initialized. store32(TrustedImm32(instruction - m_codeBlock->instructions().begin()), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); emitGetVirtualRegister(callee, regT0); // regT0 holds callee. store64(callFrameRegister, Address(regT1, JSStack::CallerFrame * static_cast<int>(sizeof(Register)))); store64(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register)))); move(regT1, callFrameRegister); if (opcodeID == op_call_eval) { compileCallEval(); return; } DataLabelPtr addressOfLinkedFunctionCheck; BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall); Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); END_UNINTERRUPTED_SEQUENCE(sequenceOpCall); addSlowCase(slowCase); ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex); m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo()); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID); m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset; loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1); emitPutToCallFrameHeader(regT1, JSStack::ScopeChain); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); sampleCodeBlock(m_codeBlock); }
int crypto_aead_decrypt(unsigned char *m, unsigned long long *outputmlen, unsigned char *nsec, const unsigned char *c, unsigned long long clen, const unsigned char *ad, unsigned long long adlen, const unsigned char *npub, const unsigned char *k) { AES_KEY key[1]; AES_KEY kappa_0[1]; AES_KEY kappa_m[1]; unsigned char nonce[BLOCK_LENGTH]; CPFB_BLOCK X[1]; CPFB_BLOCK P[1]; CPFB_BLOCK stream[1]; unsigned long long counter; unsigned long long mlen; (void)nsec; /* avoid warning */ if (clen < TAG_LENGTH) return -1; *outputmlen = mlen = clen - TAG_LENGTH; init(npub,k,kappa_0,key,nonce); /* Block encoding alen and mlen * In an online implementation, it would be done at the end, and X would be 0 */ store64(X->u8, mlen); X->u32[2] = htobe32((crypto_uint32)adlen); X->counter = 0; AES_encrypt(X, X, kappa_0); /* AD processing */ counter = 0; while (adlen > PAYLOAD_LENGTH) { load_96_ctr(P,ad,++counter); AES_encrypt(P, stream, kappa_0); xor_128(X, stream, X); ad += PAYLOAD_LENGTH; adlen -= PAYLOAD_LENGTH; } if (adlen > 0) { load_partial_ctr(P,ad,adlen,++counter); AES_encrypt(P, stream, kappa_0); xor_128(X, stream, X); } /* Plaintext processing */ if (mlen > 0) { /* New kappa */ gen_next_kappa(nonce,kappa_0,kappa_m,key); counter = 0; /* P_0 processing */ P->u64[0] = P->u64[1] = 0; AES_encrypt(P, stream, kappa_m); while (mlen > PAYLOAD_LENGTH) { store_xor_96(m,(CPFB_BLOCK*)c,stream); load_96_ctr(P,m,++counter); AES_encrypt(P, stream, kappa_m); xor_128(X, stream, X); c += PAYLOAD_LENGTH; m += PAYLOAD_LENGTH; mlen -= PAYLOAD_LENGTH; } if (mlen > 0) { store_xor_partial(m,(CPFB_BLOCK*)c,stream,mlen); load_partial_ctr(P,m,mlen,++counter); AES_encrypt(P, stream, kappa_m); xor_128(X, stream, X); c += mlen; } } AES_encrypt(X, X, kappa_0); return verify_tag(c,X->u8); }
int crypto_aead_encrypt(unsigned char *c, unsigned long long *clen, const unsigned char *m, unsigned long long mlen, const unsigned char *ad, unsigned long long adlen, const unsigned char *nsec, const unsigned char *npub, const unsigned char *k) { AES_KEY key[1]; AES_KEY kappa_0[1]; AES_KEY kappa_m[1]; unsigned char nonce[BLOCK_LENGTH]; CPFB_BLOCK X[1]; CPFB_BLOCK P[1]; CPFB_BLOCK stream[1]; unsigned long long i; unsigned long long counter; (void)nsec; /* avoid warning */ *clen = mlen + TAG_LENGTH; /* Key schedule, nonce initialization and generation of first kappa */ init(npub,k,kappa_0,key,nonce); /* Block encoding alen and mlen * In an online implementation, it would be done at the end, and X would be 0 */ store64(X->u8, mlen); X->u32[2] = htobe32((crypto_uint32)adlen); X->counter = 0; AES_encrypt(X, X, kappa_0); /* AD processing */ counter = 0; while (adlen > PAYLOAD_LENGTH) { load_96_ctr(P,ad,++counter); AES_encrypt(P, stream, kappa_0); xor_128(X, stream, X); ad += PAYLOAD_LENGTH; adlen -= PAYLOAD_LENGTH; } if (adlen > 0) { load_partial_ctr(P,ad,adlen,++counter); AES_encrypt(P, stream, kappa_0); xor_128(X, stream, X); } /* Plaintext processing */ if (mlen > 0) { /* Increment nonce and generate new kappa * First subkey of kappa_m is modified * (xored with first subkey of kappa_0) */ gen_next_kappa(nonce,kappa_0, kappa_m,key); counter = 0; /* P_0 processing */ P->u64[0] = P->u64[1] = 0; AES_encrypt(P, stream, kappa_m); while (mlen > PAYLOAD_LENGTH) { load_96_ctr(P,m,++counter); store_xor_96(c,P,stream); AES_encrypt(P, stream, kappa_m); xor_128(X, stream, X); c += PAYLOAD_LENGTH; m += PAYLOAD_LENGTH; mlen -= PAYLOAD_LENGTH; } if (mlen > 0) { load_partial_ctr(P,m,mlen,++counter); store_xor_partial(c,P,stream,mlen); AES_encrypt(P, stream, kappa_m); xor_128(X, stream, X); c += mlen; } } AES_encrypt(X, X, kappa_0); for (i = 0; i < TAG_LENGTH; i++) c[i] = X->u8[i]; return 0; }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int callee = instruction[2].u.operand; /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), call_and_tail_call_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_varargs), call_and_tail_call_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_forward_arguments), call_and_tail_call_forward_arguments_opcodes_must_be_same_length); CallLinkInfo* info = nullptr; if (opcodeID != op_call_eval) info = m_codeBlock->addCallLinkInfo(); if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) compileSetupVarargsFrame(opcodeID, instruction, info); else { int argCount = instruction[3].u.operand; int registerOffset = -instruction[4].u.operand; if (opcodeID == op_call && shouldEmitProfiling()) { emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); Jump done = emitJumpIfNotJSCell(regT0); load32(Address(regT0, JSCell::structureIDOffset()), regT0); store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin(); uint32_t locationBits = CallSiteIndex(bytecodeOffset).bits(); store32(TrustedImm32(locationBits), Address(callFrameRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + TagOffset)); emitGetVirtualRegister(callee, regT0); // regT0 holds callee. store64(regT0, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC))); if (opcodeID == op_call_eval) { compileCallEval(instruction); return; } DataLabelPtr addressOfLinkedFunctionCheck; Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); addSlowCase(slowCase); ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex); info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0); m_callCompilationInfo.append(CallCompilationInfo()); m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; if (opcodeID == op_tail_call) { CallFrameShuffleData shuffleData; shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister; shuffleData.numLocals = instruction[4].u.operand - sizeof(CallerFrameAndPC) / sizeof(Register); shuffleData.args.resize(instruction[3].u.operand); for (int i = 0; i < instruction[3].u.operand; ++i) { shuffleData.args[i] = ValueRecovery::displacedInJSStack( virtualRegisterForArgument(i) - instruction[4].u.operand, DataFormatJS); } shuffleData.callee = ValueRecovery::inGPR(regT0, DataFormatJS); shuffleData.setupCalleeSaveRegisters(m_codeBlock); info->setFrameShuffleData(shuffleData); CallFrameShuffler(*this, shuffleData).prepareForTailCall(); m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); return; } if (opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) { emitRestoreCalleeSaves(); prepareForTailCallSlow(); m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); return; } m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }