void JIT::compileSetupVarargsFrame(Instruction* instruction, CallLinkInfo* info) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; int firstVarArgOffset = instruction[6].u.operand; emitLoad(arguments, regT1, regT0); callOperation(operationSizeFrameForVarargs, regT1, regT0, -firstFreeRegister, firstVarArgOffset); move(TrustedImm32(-firstFreeRegister), regT1); emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1); addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), regT1, stackPointerRegister); emitLoad(arguments, regT2, regT4); callOperation(operationSetupVarargsFrame, regT1, regT2, regT4, firstVarArgOffset, regT0); move(returnValueGPR, regT1); // Profile the argument count. load32(Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2); load8(info->addressOfMaxNumArguments(), regT0); Jump notBiggest = branch32(Above, regT0, regT2); Jump notSaturated = branch32(BelowOrEqual, regT2, TrustedImm32(255)); move(TrustedImm32(255), regT2); notSaturated.link(this); store8(regT2, info->addressOfMaxNumArguments()); notBiggest.link(this); // Initialize 'this'. emitLoad(thisValue, regT2, regT0); store32(regT0, Address(regT1, PayloadOffset + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); store32(regT2, Address(regT1, TagOffset + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister); }
static int blake2s_compress( blake2s_state *S, const uint8_t block[BLAKE2S_BLOCKBYTES] ) { uint32_t m[16]; uint32_t v[16]; size_t i; for( i = 0; i < 16; ++i ) m[i] = load32( block + i * sizeof( m[i] ) ); for( i = 0; i < 8; ++i ) v[i] = S->h[i]; v[ 8] = blake2s_IV[0]; v[ 9] = blake2s_IV[1]; v[10] = blake2s_IV[2]; v[11] = blake2s_IV[3]; v[12] = S->t[0] ^ blake2s_IV[4]; v[13] = S->t[1] ^ blake2s_IV[5]; v[14] = S->f[0] ^ blake2s_IV[6]; v[15] = S->f[1] ^ blake2s_IV[7]; #define G(r,i,a,b,c,d) \ do { \ a = a + b + m[blake2s_sigma[r][2*i+0]]; \ d = rotr32(d ^ a, 16); \ c = c + d; \ b = rotr32(b ^ c, 12); \ a = a + b + m[blake2s_sigma[r][2*i+1]]; \ d = rotr32(d ^ a, 8); \ c = c + d; \ b = rotr32(b ^ c, 7); \ } while(0) #define ROUND(r) \ do { \ G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \ G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \ G(r,2,v[ 2],v[ 6],v[10],v[14]); \ G(r,3,v[ 3],v[ 7],v[11],v[15]); \ G(r,4,v[ 0],v[ 5],v[10],v[15]); \ G(r,5,v[ 1],v[ 6],v[11],v[12]); \ G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \ G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \ } while(0) ROUND( 0 ); ROUND( 1 ); ROUND( 2 ); ROUND( 3 ); ROUND( 4 ); ROUND( 5 ); ROUND( 6 ); ROUND( 7 ); ROUND( 8 ); ROUND( 9 ); for( i = 0; i < 8; ++i ) S->h[i] = S->h[i] ^ v[i] ^ v[i + 8]; #undef G #undef ROUND return 0; }
std::enable_if_t< Op::opcodeID == op_call_varargs || Op::opcodeID == op_construct_varargs || Op::opcodeID == op_tail_call_varargs || Op::opcodeID == op_tail_call_forward_arguments , void> JIT::compileSetupFrame(const Op& bytecode, CallLinkInfo* info) { OpcodeID opcodeID = Op::opcodeID; int thisValue = bytecode.m_thisValue.offset(); int arguments = bytecode.m_arguments.offset(); int firstFreeRegister = bytecode.m_firstFree.offset(); int firstVarArgOffset = bytecode.m_firstVarArg; emitLoad(arguments, regT1, regT0); Z_JITOperation_EJZZ sizeOperation; if (Op::opcodeID == op_tail_call_forward_arguments) sizeOperation = operationSizeFrameForForwardArguments; else sizeOperation = operationSizeFrameForVarargs; callOperation(sizeOperation, JSValueRegs(regT1, regT0), -firstFreeRegister, firstVarArgOffset); move(TrustedImm32(-firstFreeRegister), regT1); emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1); addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), regT1, stackPointerRegister); emitLoad(arguments, regT2, regT4); F_JITOperation_EFJZZ setupOperation; if (opcodeID == op_tail_call_forward_arguments) setupOperation = operationSetupForwardArgumentsFrame; else setupOperation = operationSetupVarargsFrame; callOperation(setupOperation, regT1, JSValueRegs(regT2, regT4), firstVarArgOffset, regT0); move(returnValueGPR, regT1); // Profile the argument count. load32(Address(regT1, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2); load32(info->addressOfMaxNumArguments(), regT0); Jump notBiggest = branch32(Above, regT0, regT2); store32(regT2, info->addressOfMaxNumArguments()); notBiggest.link(this); // Initialize 'this'. emitLoad(thisValue, regT2, regT0); store32(regT0, Address(regT1, PayloadOffset + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); store32(regT2, Address(regT1, TagOffset + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister); }
/* init2 xors IV with input parameter block */ int blake2s_init_param( blake2s_state *S, const blake2s_param *P ) { blake2s_init0( S ); uint32_t *p = ( uint32_t * )( P ); /* IV XOR ParamBlock */ for( size_t i = 0; i < 8; ++i ) S->h[i] ^= load32( &p[i] ); return 0; }
void JIT::compileSetupVarargsFrame(OpcodeID opcode, Instruction* instruction, CallLinkInfo* info) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; int firstVarArgOffset = instruction[6].u.operand; emitGetVirtualRegister(arguments, regT1); Z_JITOperation_EJZZ sizeOperation; if (opcode == op_tail_call_forward_arguments) sizeOperation = operationSizeFrameForForwardArguments; else sizeOperation = operationSizeFrameForVarargs; callOperation(sizeOperation, regT1, -firstFreeRegister, firstVarArgOffset); move(TrustedImm32(-firstFreeRegister), regT1); emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1); addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister); emitGetVirtualRegister(arguments, regT2); F_JITOperation_EFJZZ setupOperation; if (opcode == op_tail_call_forward_arguments) setupOperation = operationSetupForwardArgumentsFrame; else setupOperation = operationSetupVarargsFrame; callOperation(setupOperation, regT1, regT2, firstVarArgOffset, regT0); move(returnValueGPR, regT1); // Profile the argument count. load32(Address(regT1, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2); load32(info->addressOfMaxNumArguments(), regT0); Jump notBiggest = branch32(Above, regT0, regT2); store32(regT2, info->addressOfMaxNumArguments()); notBiggest.link(this); // Initialize 'this'. emitGetVirtualRegister(thisValue, regT0); store64(regT0, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister); }
void JIT::compileSetupVarargsFrame(Instruction* instruction, CallLinkInfo* info) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; int firstVarArgOffset = instruction[6].u.operand; JumpList slowCase; JumpList end; bool canOptimize = m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister().offset() && !m_codeBlock->symbolTable()->slowArguments(); if (canOptimize) { emitGetVirtualRegister(arguments, regT0); slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue())))); move(TrustedImm32(-firstFreeRegister), regT1); emitSetupVarargsFrameFastCase(*this, regT1, regT0, regT1, regT2, firstVarArgOffset, slowCase); end.append(jump()); slowCase.link(this); } emitGetVirtualRegister(arguments, regT1); callOperation(operationSizeFrameForVarargs, regT1, -firstFreeRegister, firstVarArgOffset); move(TrustedImm32(-firstFreeRegister), regT1); emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1); addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister); emitGetVirtualRegister(arguments, regT2); callOperation(operationSetupVarargsFrame, regT1, regT2, firstVarArgOffset, regT0); move(returnValueGPR, regT1); if (canOptimize) end.link(this); // Profile the argument count. load32(Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2); load8(&info->maxNumArguments, regT0); Jump notBiggest = branch32(Above, regT0, regT2); Jump notSaturated = branch32(BelowOrEqual, regT2, TrustedImm32(255)); move(TrustedImm32(255), regT2); notSaturated.link(this); store8(regT2, &info->maxNumArguments); notBiggest.link(this); // Initialize 'this'. emitGetVirtualRegister(thisValue, regT0); store64(regT0, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister); }
/* init2 xors IV with input parameter block */ static void blake2s_init_param(BLAKE2S_CTX *S, const BLAKE2S_PARAM *P) { const uint8_t *p = (const uint8_t *)(P); size_t i; /* The param struct is carefully hand packed, and should be 32 bytes on * every platform. */ assert(sizeof(BLAKE2S_PARAM) == 32); blake2s_init0(S); /* IV XOR ParamBlock */ for (i = 0; i < 8; ++i) { S->h[i] ^= load32(&p[i*4]); } }
/* init2 xors IV with input parameter block */ int blake2s_init_param( blake2s_state *S, const blake2s_param *P ) { const unsigned char *p = ( const unsigned char * )( P ); size_t i; blake2s_init0( S ); /* IV XOR ParamBlock */ for( i = 0; i < 8; ++i ) S->h[i] ^= load32( &p[i * 4] ); S->outlen = P->digest_length; return 0; }
// 08080168 u8 *battle_load_arguments(struct battle_config_entry *bce, u8 *cursor) { while (1) { switch (bce->target_type) { case LOAD_8: *(u8 *)bce->target = load8 (cursor); cursor += 1; break; case LOAD_16: *(u16*)bce->target = load16(cursor); cursor += 2; break; case LOAD_32: *(u32*)bce->target = load32(cursor); cursor += 4; break; case ZERO_8: *(u8 *)bce->target = 0; cursor += 1; break; case ZERO_16: *(u16*)bce->target = 0; cursor += 2; break; case ZERO_32: *(u32*)bce->target = 0; cursor += 4; break; case END: *(u32*)bce->target = (u32)cursor; return cursor; default: break; } bce++; } }
std::enable_if_t< Op::opcodeID != op_call_varargs && Op::opcodeID != op_construct_varargs && Op::opcodeID != op_tail_call_varargs && Op::opcodeID != op_tail_call_forward_arguments , void> JIT::compileSetupFrame(const Op& bytecode, CallLinkInfo*) { auto& metadata = bytecode.metadata(m_codeBlock); int argCount = bytecode.m_argc; int registerOffset = -static_cast<int>(bytecode.m_argv); if (Op::opcodeID == op_call && shouldEmitProfiling()) { emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1); Jump done = branchIfNotCell(regT0); load32(Address(regT1, JSCell::structureIDOffset()), regT1); store32(regT1, metadata.m_arrayProfile.addressOfLastSeenStructureID()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); }
void AssemblyHelpers::callExceptionFuzz() { if (!Options::enableExceptionFuzz()) return; EncodedJSValue* buffer = vm()->exceptionFuzzingBuffer(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters)); for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) { #if USE(JSVALUE64) store64(GPRInfo::toRegister(i), buffer + i); #else store32(GPRInfo::toRegister(i), buffer + i); #endif } for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) { move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0); storeDouble(FPRInfo::toRegister(i), Address(GPRInfo::regT0)); } // Set up one argument. #if CPU(X86) poke(GPRInfo::callFrameRegister, 0); #else move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); #endif move(TrustedImmPtr(bitwise_cast<void*>(operationExceptionFuzz)), GPRInfo::nonPreservedNonReturnGPR); call(GPRInfo::nonPreservedNonReturnGPR); for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) { move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0); loadDouble(Address(GPRInfo::regT0), FPRInfo::toRegister(i)); } for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) { #if USE(JSVALUE64) load64(buffer + i, GPRInfo::toRegister(i)); #else load32(buffer + i, GPRInfo::toRegister(i)); #endif } }
static void blake2s_compress( blake2s_state *S, const uint8_t in[BLAKE2S_BLOCKBYTES] ) { uint32_t m[16]; uint32_t v[16]; size_t i; for( i = 0; i < 16; ++i ) { m[i] = load32( in + i * sizeof( m[i] ) ); } for( i = 0; i < 8; ++i ) { v[i] = S->h[i]; } v[ 8] = blake2s_IV[0]; v[ 9] = blake2s_IV[1]; v[10] = blake2s_IV[2]; v[11] = blake2s_IV[3]; v[12] = S->t[0] ^ blake2s_IV[4]; v[13] = S->t[1] ^ blake2s_IV[5]; v[14] = S->f[0] ^ blake2s_IV[6]; v[15] = S->f[1] ^ blake2s_IV[7]; ROUND( 0 ); ROUND( 1 ); ROUND( 2 ); ROUND( 3 ); ROUND( 4 ); ROUND( 5 ); ROUND( 6 ); ROUND( 7 ); ROUND( 8 ); ROUND( 9 ); for( i = 0; i < 8; ++i ) { S->h[i] = S->h[i] ^ v[i] ^ v[i + 8]; } }
void JITCompiler::compileFunction() { SamplingRegion samplingRegion("DFG Backend"); setStartOfCode(); compileEntry(); // === Function header code generation === // This is the main entry point, without performing an arity check. // If we needed to perform an arity check we will already have moved the return address, // so enter after this. Label fromArityCheck(this); // Plant a check that sufficient space is available in the JSStack. addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1); // Move the stack pointer down to accommodate locals addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); // === Function body code generation === m_speculative = adoptPtr(new SpeculativeJIT(*this)); compileBody(); setEndOfMainPath(); // === Function footer code generation === // // Generate code to perform the stack overflow handling (if the stack check in // the function header fails), and generate the entry point with arity check. // // Generate the stack overflow handling; if the stack check in the function head fails, // we need to call out to a helper function to throw the StackOverflowError. stackOverflow.link(this); emitStoreCodeOrigin(CodeOrigin(0)); if (maxFrameExtentForSlowPathCall) addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically // determine the correct number of arguments have been passed, or have already checked). // In cases where an arity check is necessary, we enter here. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). m_arityCheck = label(); compileEntry(); load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1); branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); if (maxFrameExtentForSlowPathCall) addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0); if (maxFrameExtentForSlowPathCall) addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); branchTest32(Zero, GPRInfo::regT0).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); move(TrustedImmPtr(m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters())), GPRInfo::regT5); loadPtr(BaseIndex(GPRInfo::regT5, GPRInfo::regT0, timesPtr()), GPRInfo::regT5); m_callArityFixup = call(); jump(fromArityCheck); // Generate slow path code. m_speculative->runSlowPathGenerators(); compileExceptionHandlers(); linkOSRExits(); // Create OSR entry trampolines if necessary. m_speculative->createOSREntries(); setEndOfCode(); }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { CallLinkInfo* info = m_codeBlock->addCallLinkInfo(); int callee = instruction[2].u.operand; /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length); if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs) compileSetupVarargsFrame(instruction, info); else { int argCount = instruction[3].u.operand; int registerOffset = -instruction[4].u.operand; if (opcodeID == op_call && shouldEmitProfiling()) { emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); Jump done = emitJumpIfNotJSCell(regT0); load32(Address(regT0, JSCell::structureIDOffset()), regT0); store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin(); uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(bytecodeOffset); store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + TagOffset)); emitGetVirtualRegister(callee, regT0); // regT0 holds callee. store64(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC))); if (opcodeID == op_call_eval) { compileCallEval(instruction); return; } DataLabelPtr addressOfLinkedFunctionCheck; Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); addSlowCase(slowCase); ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex); info->callType = CallLinkInfo::callTypeFor(opcodeID); info->codeOrigin = CodeOrigin(m_bytecodeOffset); info->calleeGPR = regT0; m_callCompilationInfo.append(CallCompilationInfo()); m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
/* Permute the state while xoring in the block of data. */ static void blake2s_compress(BLAKE2S_CTX *S, const uint8_t *blocks, size_t len) { uint32_t m[16]; uint32_t v[16]; size_t i; size_t increment; /* * There are two distinct usage vectors for this function: * * a) BLAKE2s_Update uses it to process complete blocks, * possibly more than one at a time; * * b) BLAK2s_Final uses it to process last block, always * single but possibly incomplete, in which case caller * pads input with zeros. */ assert(len < BLAKE2S_BLOCKBYTES || len % BLAKE2S_BLOCKBYTES == 0); /* * Since last block is always processed with separate call, * |len| not being multiple of complete blocks can be observed * only with |len| being less than BLAKE2S_BLOCKBYTES ("less" * including even zero), which is why following assignment doesn't * have to reside inside the main loop below. */ increment = len < BLAKE2S_BLOCKBYTES ? len : BLAKE2S_BLOCKBYTES; for (i = 0; i < 8; ++i) { v[i] = S->h[i]; } do { for (i = 0; i < 16; ++i) { m[i] = load32(blocks + i * sizeof(m[i])); } /* blake2s_increment_counter */ S->t[0] += increment; S->t[1] += (S->t[0] < increment); v[ 8] = blake2s_IV[0]; v[ 9] = blake2s_IV[1]; v[10] = blake2s_IV[2]; v[11] = blake2s_IV[3]; v[12] = S->t[0] ^ blake2s_IV[4]; v[13] = S->t[1] ^ blake2s_IV[5]; v[14] = S->f[0] ^ blake2s_IV[6]; v[15] = S->f[1] ^ blake2s_IV[7]; #define G(r,i,a,b,c,d) \ do { \ a = a + b + m[blake2s_sigma[r][2*i+0]]; \ d = rotr32(d ^ a, 16); \ c = c + d; \ b = rotr32(b ^ c, 12); \ a = a + b + m[blake2s_sigma[r][2*i+1]]; \ d = rotr32(d ^ a, 8); \ c = c + d; \ b = rotr32(b ^ c, 7); \ } while (0) #define ROUND(r) \ do { \ G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \ G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \ G(r,2,v[ 2],v[ 6],v[10],v[14]); \ G(r,3,v[ 3],v[ 7],v[11],v[15]); \ G(r,4,v[ 0],v[ 5],v[10],v[15]); \ G(r,5,v[ 1],v[ 6],v[11],v[12]); \ G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \ G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \ } while (0) #if defined(OPENSSL_SMALL_FOOTPRINT) /* almost 3x reduction on x86_64, 4.5x on ARMv8, 4x on ARMv4 */ for (i = 0; i < 10; i++) { ROUND(i); } #else ROUND(0); ROUND(1); ROUND(2); ROUND(3); ROUND(4); ROUND(5); ROUND(6); ROUND(7); ROUND(8); ROUND(9); #endif for (i = 0; i < 8; ++i) { S->h[i] = v[i] ^= v[i + 8] ^ S->h[i]; } #undef G #undef ROUND blocks += increment; len -= increment; } while (len); }
bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck) { SamplingRegion samplingRegion("DFG Backend"); setStartOfCode(); compileEntry(); // === Function header code generation === // This is the main entry point, without performing an arity check. // If we needed to perform an arity check we will already have moved the return address, // so enter after this. Label fromArityCheck(this); // Plant a check that sufficient space is available in the JSStack. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291 addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); Jump stackCheck = branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), GPRInfo::regT1); // Return here after stack check. Label fromStackCheck = label(); // === Function body code generation === SpeculativeJIT speculative(*this); compileBody(speculative); setEndOfMainPath(); // === Function footer code generation === // // Generate code to perform the slow stack check (if the fast one in // the function header fails), and generate the entry point with arity check. // // Generate the stack check; if the fast check in the function head fails, // we need to call out to a helper function to check whether more space is available. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). stackCheck.link(this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); CallBeginToken token; beginCall(CodeOrigin(0), token); Call callStackCheck = call(); notifyCall(callStackCheck, CodeOrigin(0), token); jump(fromStackCheck); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically // determine the correct number of arguments have been passed, or have already checked). // In cases where an arity check is necessary, we enter here. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). Label arityCheck = label(); compileEntry(); load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1); branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); beginCall(CodeOrigin(0), token); Call callArityCheck = call(); notifyCall(callArityCheck, CodeOrigin(0), token); move(GPRInfo::regT0, GPRInfo::callFrameRegister); jump(fromArityCheck); // Generate slow path code. speculative.runSlowPathGenerators(); compileExceptionHandlers(); linkOSRExits(); // Create OSR entry trampolines if necessary. speculative.createOSREntries(); setEndOfCode(); // === Link === LinkBuffer linkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail); if (linkBuffer.didFailToAllocate()) return false; link(linkBuffer); speculative.linkOSREntries(linkBuffer); // FIXME: switch the stack check & arity check over to DFGOpertaion style calls, not JIT stubs. linkBuffer.link(callStackCheck, cti_stack_check); linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck); if (shouldShowDisassembly()) m_disassembler->dump(linkBuffer); if (m_graph.m_compilation) m_disassembler->reportToProfiler(m_graph.m_compilation.get(), linkBuffer); entryWithArityCheck = linkBuffer.locationOf(arityCheck); entry = JITCode( linkBuffer.finalizeCodeWithoutDisassembly(), JITCode::DFGJIT); return true; }
void JITCompiler::compileFunction() { setStartOfCode(); compileEntry(); // === Function header code generation === // This is the main entry point, without performing an arity check. // If we needed to perform an arity check we will already have moved the return address, // so enter after this. Label fromArityCheck(this); // Plant a check that sufficient space is available in the JSStack. addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1); // Move the stack pointer down to accommodate locals addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); compileSetupRegistersForEntry(); compileEntryExecutionFlag(); // === Function body code generation === m_speculative = std::make_unique<SpeculativeJIT>(*this); compileBody(); setEndOfMainPath(); // === Function footer code generation === // // Generate code to perform the stack overflow handling (if the stack check in // the function header fails), and generate the entry point with arity check. // // Generate the stack overflow handling; if the stack check in the function head fails, // we need to call out to a helper function to throw the StackOverflowError. stackOverflow.link(this); emitStoreCodeOrigin(CodeOrigin(0)); if (maxFrameExtentForSlowPathCall) addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically // determine the correct number of arguments have been passed, or have already checked). // In cases where an arity check is necessary, we enter here. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). m_arityCheck = label(); compileEntry(); load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1); branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); if (maxFrameExtentForSlowPathCall) addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0); if (maxFrameExtentForSlowPathCall) addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0); m_callArityFixup = call(); jump(fromArityCheck); // Generate slow path code. m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder); m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); compileExceptionHandlers(); linkOSRExits(); // Create OSR entry trampolines if necessary. m_speculative->createOSREntries(); setEndOfCode(); // === Link === auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail); if (linkBuffer->didFailToAllocate()) { m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan); return; } link(*linkBuffer); m_speculative->linkOSREntries(*linkBuffer); m_jitCode->shrinkToFit(); codeBlock()->shrinkToFit(CodeBlock::LateShrink); linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress())); disassemble(*linkBuffer); MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck); m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>( m_graph.m_plan, m_jitCode.release(), WTFMove(linkBuffer), withArityCheck); }
void JITCompiler::compileFunction() { SamplingRegion samplingRegion("DFG Backend"); setStartOfCode(); compileEntry(); // === Function header code generation === // This is the main entry point, without performing an arity check. // If we needed to perform an arity check we will already have moved the return address, // so enter after this. Label fromArityCheck(this); // Plant a check that sufficient space is available in the JSStack. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291 addPtr(TrustedImm32(-m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); Jump stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), GPRInfo::regT1); // Return here after stack check. Label fromStackCheck = label(); // === Function body code generation === m_speculative = adoptPtr(new SpeculativeJIT(*this)); compileBody(); setEndOfMainPath(); // === Function footer code generation === // // Generate code to perform the slow stack check (if the fast one in // the function header fails), and generate the entry point with arity check. // // Generate the stack check; if the fast check in the function head fails, // we need to call out to a helper function to check whether more space is available. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). stackCheck.link(this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); emitStoreCodeOrigin(CodeOrigin(0)); m_callStackCheck = call(); jump(fromStackCheck); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically // determine the correct number of arguments have been passed, or have already checked). // In cases where an arity check is necessary, we enter here. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). m_arityCheck = label(); compileEntry(); load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1); branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); emitStoreCodeOrigin(CodeOrigin(0)); m_callArityCheck = call(); branchTest32(Zero, GPRInfo::regT0).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); m_callArityFixup = call(); jump(fromArityCheck); // Generate slow path code. m_speculative->runSlowPathGenerators(); compileExceptionHandlers(); linkOSRExits(); // Create OSR entry trampolines if necessary. m_speculative->createOSREntries(); setEndOfCode(); }
/* only 2 valids values are 256 (32) and 128 (16) */ void cryptonite_chacha_init_core(cryptonite_chacha_state *st, uint32_t keylen, const uint8_t *key, uint32_t ivlen, const uint8_t *iv) { const uint8_t *constants = (keylen == 32) ? sigma : tau; int i; st->d[0] = load32(constants + 0); st->d[1] = load32(constants + 4); st->d[2] = load32(constants + 8); st->d[3] = load32(constants + 12); st->d[4] = load32(key + 0); st->d[5] = load32(key + 4); st->d[6] = load32(key + 8); st->d[7] = load32(key + 12); /* we repeat the key on 128 bits */ if (keylen == 32) key += 16; st->d[8] = load32(key + 0); st->d[9] = load32(key + 4); st->d[10] = load32(key + 8); st->d[11] = load32(key + 12); st->d[12] = 0; switch (ivlen) { case 8: st->d[13] = 0; st->d[14] = load32(iv + 0); st->d[15] = load32(iv + 4); break; case 12: st->d[13] = load32(iv + 0); st->d[14] = load32(iv + 4); st->d[15] = load32(iv + 8); default: return; } }
void JIT::compileLoadVarargs(Instruction* instruction) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; JumpList slowCase; JumpList end; bool canOptimize = m_codeBlock->usesArguments() && VirtualRegister(arguments) == m_codeBlock->argumentsRegister() && !m_codeBlock->symbolTable()->slowArguments(); if (canOptimize) { emitLoadTag(arguments, regT1); slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag))); load32(payloadFor(JSStack::ArgumentCount), regT2); slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1))); // regT2: argumentCountIncludingThis move(regT2, regT3); neg32(regT3); add32(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT3); lshift32(TrustedImm32(3), regT3); addPtr(callFrameRegister, regT3); // regT3: newCallFrame slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT3)); // Initialize ArgumentCount. store32(regT2, payloadFor(JSStack::ArgumentCount, regT3)); // Initialize 'this'. emitLoad(thisValue, regT1, regT0); store32(regT0, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); store32(regT1, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); // Copy arguments. end.append(branchSub32(Zero, TrustedImm32(1), regT2)); // regT2: argumentCount; Label copyLoop = label(); load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT0); load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT1); store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); branchSub32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this); end.append(jump()); } if (canOptimize) slowCase.link(this); emitLoad(thisValue, regT1, regT0); emitLoad(arguments, regT3, regT2); callOperation(operationLoadVarargs, regT1, regT0, regT3, regT2, firstFreeRegister); move(returnValueRegister, regT3); if (canOptimize) end.link(this); }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int callee = instruction[2].u.operand; /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), call_and_tail_call_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_varargs), call_and_tail_call_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_forward_arguments), call_and_tail_call_forward_arguments_opcodes_must_be_same_length); CallLinkInfo* info = nullptr; if (opcodeID != op_call_eval) info = m_codeBlock->addCallLinkInfo(); if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) compileSetupVarargsFrame(opcodeID, instruction, info); else { int argCount = instruction[3].u.operand; int registerOffset = -instruction[4].u.operand; if (opcodeID == op_call && shouldEmitProfiling()) { emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); Jump done = emitJumpIfNotJSCell(regT0); load32(Address(regT0, JSCell::structureIDOffset()), regT0); store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin(); uint32_t locationBits = CallSiteIndex(bytecodeOffset).bits(); store32(TrustedImm32(locationBits), Address(callFrameRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + TagOffset)); emitGetVirtualRegister(callee, regT0); // regT0 holds callee. store64(regT0, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC))); if (opcodeID == op_call_eval) { compileCallEval(instruction); return; } DataLabelPtr addressOfLinkedFunctionCheck; Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); addSlowCase(slowCase); ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex); info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0); m_callCompilationInfo.append(CallCompilationInfo()); m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; if (opcodeID == op_tail_call) { CallFrameShuffleData shuffleData; shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister; shuffleData.numLocals = instruction[4].u.operand - sizeof(CallerFrameAndPC) / sizeof(Register); shuffleData.args.resize(instruction[3].u.operand); for (int i = 0; i < instruction[3].u.operand; ++i) { shuffleData.args[i] = ValueRecovery::displacedInJSStack( virtualRegisterForArgument(i) - instruction[4].u.operand, DataFormatJS); } shuffleData.callee = ValueRecovery::inGPR(regT0, DataFormatJS); shuffleData.setupCalleeSaveRegisters(m_codeBlock); info->setFrameShuffleData(shuffleData); CallFrameShuffler(*this, shuffleData).prepareForTailCall(); m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); return; } if (opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) { emitRestoreCalleeSaves(); prepareForTailCallSlow(); m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); return; } m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
TrackFrames buildTrackFramesFromMidi(const std::string& filename) { TrackFrames trackFrames; FILE* pFic; fopen_s(&pFic, filename.c_str(), "rb"); // Load header struct HeaderChunk { char HThd[4]; uint32_t size; uint16_t format; uint16_t nbTrack; uint16_t division; } headerChunk; loadStr(headerChunk.HThd, 4, pFic); headerChunk.size = load32(pFic); headerChunk.format = load16(pFic); headerChunk.nbTrack = load16(pFic); headerChunk.division = load16(pFic); uint32_t maxTime = 0; std::set<int> channelSet; //int midiToChannels[] = { // mario // 5, 0, 1, 2, // 5, 5, 5, 5, // 5, 5, 5, 5, // 5, 5, 5, 5, //}; //int midiToChannels[] = { // sm2 // 0, 2, 1, 5, // 5, 5, 5, 5, // 5, 3, 5, 5, // 5, 5, 5, 5, //}; int midiToChannels[] = { 0, 1, 2, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, }; //int midiToChannels[] = { // zelda // 0, 1, 2, 5, // 5, 5, 5, 5, // 5, 3, 5, 5, // 5, 5, 5, 5, //}; // Load tracks for (auto i = 0u; i < headerChunk.nbTrack; ++i) { char MTrk[4]; loadStr(MTrk, 4, pFic); auto len = load32(pFic); auto start = ftell(pFic); auto cur = start; uint32_t curTime = 0; while (cur - start < static_cast<decltype(cur)>(len)) { auto v_time = loadLength(pFic); auto event = load8(pFic); curTime += v_time;// / (headerChunk.division / 24); maxTime = std::max(maxTime, curTime); trackFrames.resize(maxTime * 4 + 16); if (event == 0xFF) { // Meta event event = load8(pFic); if (event == 0x58) { // Time Signature event = load8(pFic); assert(event == 0x04); load32(pFic); // Ignore who f*****g cares } else if (event == 0x7F) { // Sequencer Specific Meta-Event auto dataLen = load8(pFic); // Len, ignore while (dataLen) { load8(pFic); --dataLen; } } else if (event == 0x51) { // Set Tempo event = load8(pFic); assert(event == 0x03); load8(pFic); load8(pFic); load8(pFic); } else if (event == 0x2F) { // End of Track event = load8(pFic); assert(event == 0x00); } else if (event == 0x03) { // Sequence/Track Name auto textLen = load8(pFic); char text[257] = {0}; loadStr(text, textLen, pFic); printf("Track found: %i, %s\n", i, text); } else if (event == 0x59) { // Key Signature event = load8(pFic); assert(event == 0x02); auto sf = load8(pFic); auto mi = load8(pFic); } else if (event == 0x21) { // End of Track event = load8(pFic); assert(event == 0x01); load8(pFic); } else if (event == 0x20) { // MIDI Channel Prefix event = load8(pFic); assert(event == 0x01); auto channel = load8(pFic); } else { assert(false); } } else if (event & 0x80) { auto channelNo = event & 0x0F; channelSet.insert(channelNo); if (channelNo < sizeof(midiToChannels) / sizeof(int)) channelNo = midiToChannels[channelNo]; event = event & 0xF0; if (event == 0xB0) { // Control Change auto controllerNumber = load8(pFic) & 0x7F; auto controllerValue = load8(pFic) & 0x7F; } else if (event == 0xC0) { // Program Change auto programNumber = load8(pFic) & 0x7F; } else if (event == 0x90) { // Note On event auto note = load8(pFic) & 0x7F; auto vol = load8(pFic) & 0x7F; if (channelNo < 3) { auto& trackFrame = trackFrames[curTime * 4 + channelNo]; trackFrame = NOTE(noteTable[note], channelNo, (vol * 15) / 127); } else if (channelNo == 3) { auto& trackFrame = trackFrames[curTime * 4 + channelNo]; trackFrame = NOTE(noteTable[note + 12], channelNo, (vol * 15) / 127); } } else if (event == 0x80) { // Note Off event auto note = load8(pFic) & 0x7F; auto vol = load8(pFic) & 0x7F; if (channelNo == 2) { //auto& trackFrame = trackFrames[curTime * 4 + channelNo]; //trackFrame = NOTE(noteTable[note + 12], channelNo, 0); } if (channelNo == 3) { auto& trackFrame = trackFrames[curTime * 4 + channelNo]; trackFrame = NOTE(noteTable[note], channelNo, 0); } } else if (event == 0xE0) { // Pitch Wheel Change auto l = load8(pFic) & 0x7F; auto m = load8(pFic) & 0x7F; } else { assert(false); } } else if (event <= 0x7F) { load8(pFic); } else { assert(false); } cur = ftell(pFic); } } fclose(pFic); for (auto& channelNo : channelSet) { printf("%i\n", channelNo); } return trackFrames; }
void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck) { compileEntry(); // === Function header code generation === // This is the main entry point, without performing an arity check. // If we needed to perform an arity check we will already have moved the return address, // so enter after this. Label fromArityCheck(this); // Plant a check that sufficient space is available in the RegisterFile. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291 addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1); // Return here after register file check. Label fromRegisterFileCheck = label(); // === Function body code generation === SpeculativeJIT speculative(*this); compileBody(speculative); // === Function footer code generation === // // Generate code to perform the slow register file check (if the fast one in // the function header fails), and generate the entry point with arity check. // // Generate the register file check; if the fast check in the function head fails, // we need to call out to a helper function to check whether more space is available. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). registerFileCheck.link(this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); Call callRegisterFileCheck = call(); jump(fromRegisterFileCheck); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically // determine the correct number of arguments have been passed, or have already checked). // In cases where an arity check is necessary, we enter here. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). Label arityCheck = label(); compileEntry(); load32(Address(GPRInfo::callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))), GPRInfo::regT1); branch32(Equal, GPRInfo::regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(fromArityCheck, this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); Call callArityCheck = call(); move(GPRInfo::regT0, GPRInfo::callFrameRegister); jump(fromArityCheck); // === Link === LinkBuffer linkBuffer(*m_globalData, this); link(linkBuffer); speculative.linkOSREntries(linkBuffer); // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs. linkBuffer.link(callRegisterFileCheck, cti_register_file_check); linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck); entryWithArityCheck = linkBuffer.locationOf(arityCheck); entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT); }