/*@ * @deftypefun jit_function_t jit_function_create (jit_context_t @var{context}, jit_type_t @var{signature}) * Create a new function block and associate it with a JIT context. * Returns NULL if out of memory. * * A function persists for the lifetime of its containing context. * It initially starts life in the "building" state, where the user * constructs instructions that represents the function body. * Once the build process is complete, the user calls * @code{jit_function_compile} to convert it into its executable form. * * It is recommended that you call @code{jit_context_build_start} before * calling @code{jit_function_create}, and then call * @code{jit_context_build_end} after you have called * @code{jit_function_compile}. This will protect the JIT's internal * data structures within a multi-threaded environment. * @end deftypefun @*/ jit_function_t jit_function_create(jit_context_t context, jit_type_t signature) { jit_function_t func; #if !defined(JIT_BACKEND_INTERP) && (defined(jit_redirector_size) || defined(jit_indirector_size)) unsigned char *trampoline; #endif /* Acquire the memory context */ _jit_memory_lock(context); if(!_jit_memory_ensure(context)) { _jit_memory_unlock(context); return 0; } /* Allocate memory for the function and clear it */ func = _jit_memory_alloc_function(context); if(!func) { _jit_memory_unlock(context); return 0; } #if !defined(JIT_BACKEND_INTERP) && (defined(jit_redirector_size) || defined(jit_indirector_size)) trampoline = (unsigned char *) _jit_memory_alloc_trampoline(context); if(!trampoline) { _jit_memory_free_function(context, func); _jit_memory_unlock(context); return 0; } # if defined(jit_redirector_size) func->redirector = trampoline; trampoline += jit_redirector_size; # endif # if defined(jit_indirector_size) func->indirector = trampoline; # endif #endif /* !defined(JIT_BACKEND_INTERP) && (defined(jit_redirector_size) || defined(jit_indirector_size)) */ /* Release the memory context */ _jit_memory_unlock(context); /* Initialize the function block */ func->context = context; func->signature = jit_type_copy(signature); func->optimization_level = JIT_OPTLEVEL_NORMAL; #if !defined(JIT_BACKEND_INTERP) && defined(jit_redirector_size) /* If we aren't using interpretation, then point the function's initial entry point at the redirector, which in turn will invoke the on-demand compiler */ func->entry_point = _jit_create_redirector (func->redirector, (void *) context->on_demand_driver, func, jit_type_get_abi(signature)); _jit_flush_exec(func->redirector, jit_redirector_size); #endif #if !defined(JIT_BACKEND_INTERP) && defined(jit_indirector_size) _jit_create_indirector(func->indirector, (void**) &(func->entry_point)); _jit_flush_exec(func->indirector, jit_indirector_size); #endif /* Add the function to the context list */ func->next = 0; func->prev = context->last_function; if(context->last_function) { context->last_function->next = func; } else { context->functions = func; } context->last_function = func; /* Return the function to the caller */ return func; }
void _jit_gen_epilog(jit_gencode_t gen, jit_function_t func) { jit_nint pop_bytes = 0; int reg, offset; unsigned char *inst; int struct_return_offset = 0; void **fixup; void **next; /* Check if there is sufficient space for the epilog */ _jit_cache_check_space(&gen->posn, 48); #if JIT_APPLY_X86_FASTCALL == 1 /* Determine the number of parameter bytes to pop when we return */ { jit_type_t signature; unsigned int num_params; unsigned int param; signature = func->signature; if(jit_type_get_abi(signature) == jit_abi_stdcall || jit_type_get_abi(signature) == jit_abi_thiscall|| jit_type_get_abi(signature) == jit_abi_fastcall) { if(func->nested_parent) { pop_bytes += sizeof(void *); } if(jit_type_return_via_pointer(jit_type_get_return(signature))) { struct_return_offset = 2 * sizeof(void *) + pop_bytes; pop_bytes += sizeof(void *); } num_params = jit_type_num_params(signature); for(param = 0; param < num_params; ++param) { pop_bytes += ROUND_STACK (jit_type_get_size (jit_type_get_param(signature, param))); } if(jit_type_get_abi(signature) == jit_abi_fastcall) { /* The first two words are in fastcall registers */ if(pop_bytes > (2 * sizeof(void *))) { pop_bytes -= 2 * sizeof(void *); } else { pop_bytes = 0; } struct_return_offset = 0; } else if(jit_type_get_abi(signature) == jit_abi_thiscall) { /* The this is in ECX register */ if(pop_bytes > (1 * sizeof(void *))) { pop_bytes -= 1 * sizeof(void *); } else { pop_bytes = 0; } struct_return_offset = 0; } } else if(!(func->nested_parent) && jit_type_return_via_pointer(jit_type_get_return(signature))) { #if JIT_APPLY_X86_POP_STRUCT_RETURN == 1 pop_bytes += sizeof(void *); #endif struct_return_offset = 2 * sizeof(void *); } } #else { /* We only need to pop structure pointers in non-nested functions */ jit_type_t signature; signature = func->signature; if(!(func->nested_parent) && jit_type_return_via_pointer(jit_type_get_return(signature))) { #if JIT_APPLY_X86_POP_STRUCT_RETURN == 1 pop_bytes += sizeof(void *); #endif struct_return_offset = 2 * sizeof(void *); } } #endif /* Perform fixups on any blocks that jump to the epilog */ inst = gen->posn.ptr; fixup = (void **)(gen->epilog_fixup); while(fixup != 0) { next = (void **)(fixup[0]); fixup[0] = (void *)(((jit_nint)inst) - ((jit_nint)fixup) - 4); fixup = next; } gen->epilog_fixup = 0; /* If we are returning a structure via a pointer, then copy the pointer value into EAX when we return */ if(struct_return_offset != 0) { x86_mov_reg_membase(inst, X86_EAX, X86_EBP, struct_return_offset, 4); } /* Restore the callee save registers that we used */ if(gen->stack_changed) { offset = -(func->builder->frame_size); for(reg = 0; reg <= 7; ++reg) { if(jit_reg_is_used(gen->touched, reg) && (_jit_reg_info[reg].flags & JIT_REG_CALL_USED) == 0) { offset -= sizeof(void *); x86_mov_reg_membase(inst, _jit_reg_info[reg].cpu_reg, X86_EBP, offset, sizeof(void *)); } } } else { for(reg = 7; reg >= 0; --reg) { if(jit_reg_is_used(gen->touched, reg) && (_jit_reg_info[reg].flags & JIT_REG_CALL_USED) == 0) { x86_pop_reg(inst, _jit_reg_info[reg].cpu_reg); } } } /* Pop the stack frame and restore the saved copy of ebp */ if(gen->stack_changed || func->builder->frame_size > 0) { x86_mov_reg_reg(inst, X86_ESP, X86_EBP, sizeof(void *)); } x86_pop_reg(inst, X86_EBP); /* Return from the current function */ if(pop_bytes > 0) { x86_ret_imm(inst, pop_bytes); } else { x86_ret(inst); } gen->posn.ptr = inst; }
/*@ * @deftypefun jit_function_t jit_function_create (jit_context_t @var{context}, jit_type_t @var{signature}) * Create a new function block and associate it with a JIT context. * Returns NULL if out of memory. * * A function persists for the lifetime of its containing context. * It initially starts life in the "building" state, where the user * constructs instructions that represents the function body. * Once the build process is complete, the user calls * @code{jit_function_compile} to convert it into its executable form. * * It is recommended that you call @code{jit_context_build_start} before * calling @code{jit_function_create}, and then call * @code{jit_context_build_end} after you have called * @code{jit_function_compile}. This will protect the JIT's internal * data structures within a multi-threaded environment. * @end deftypefun @*/ jit_function_t jit_function_create(jit_context_t context, jit_type_t signature) { jit_function_t func; #if !defined(JIT_BACKEND_INTERP) && (defined(jit_redirector_size) || defined(jit_indirector_size)) jit_cache_t cache; #endif /* Allocate memory for the function and clear it */ func = jit_cnew(struct _jit_function); if(!func) { return 0; } #if !defined(JIT_BACKEND_INTERP) && (defined(jit_redirector_size) || defined(jit_indirector_size)) /* TODO: if the function is destroyed the redirector and indirector memory is leaked */ /* We need the cache lock while we are allocating redirector and indirector */ jit_mutex_lock(&(context->cache_lock)); /* Get the method cache */ cache = _jit_context_get_cache(context); if(!cache) { jit_mutex_unlock(&(context->cache_lock)); jit_free(func); return 0; } func->compilation_success_callback = 0; func->insn_compilation_callback = 0; func->current_code_location_start_column = 0; func->current_code_location_end_column = 0; func->current_code_location_start_line = 0; func->current_code_location_end_line = 0; # if defined(jit_redirector_size) /* Allocate redirector buffer */ func->redirector = _jit_cache_alloc_no_method(cache, jit_redirector_size, 1); if(!func->redirector) { jit_mutex_unlock(&(context->cache_lock)); jit_free(func); return 0; } # endif # if defined(jit_indirector_size) /* Allocate indirector buffer */ func->indirector = _jit_cache_alloc_no_method(cache, jit_indirector_size, 1); if(!func->indirector) { jit_mutex_unlock(&(context->cache_lock)); jit_free(func); return 0; } # endif jit_mutex_unlock(&(context->cache_lock)); #endif /* !defined(JIT_BACKEND_INTERP) && (defined(jit_redirector_size) || defined(jit_indirector_size)) */ /* Initialize the function block */ func->context = context; func->signature = jit_type_copy(signature); func->optimization_level = JIT_OPTLEVEL_NORMAL; #if !defined(JIT_BACKEND_INTERP) && defined(jit_redirector_size) /* If we aren't using interpretation, then point the function's initial entry point at the redirector, which in turn will invoke the on-demand compiler */ func->entry_point = _jit_create_redirector (func->redirector, (void *) context->on_demand_driver, func, jit_type_get_abi(signature)); jit_flush_exec(func->redirector, jit_redirector_size); #endif #if !defined(JIT_BACKEND_INTERP) && defined(jit_indirector_size) _jit_create_indirector(func->indirector, (void**) &(func->entry_point)); jit_flush_exec(func->indirector, jit_indirector_size); #endif /* Add the function to the context list */ func->next = 0; func->prev = context->last_function; if(context->last_function) { context->last_function->next = func; } else { context->functions = func; } context->last_function = func; /* Return the function to the caller */ return func; }
int _jit_create_call_return_insns (jit_function_t func, jit_type_t signature, jit_value_t *args, unsigned int num_args, jit_value_t return_value, int is_nested) { jit_nint pop_bytes; unsigned int size; jit_type_t return_type; int ptr_return; /* Calculate the number of bytes that we need to pop */ return_type = jit_type_normalize(jit_type_get_return(signature)); ptr_return = jit_type_return_via_pointer(return_type); #if JIT_APPLY_X86_FASTCALL == 1 if(jit_type_get_abi(signature) == jit_abi_stdcall || jit_type_get_abi(signature) == jit_abi_thiscall|| jit_type_get_abi(signature) == jit_abi_fastcall) { /* STDCALL, THISCALL and FASTCALL functions pop their own arguments */ pop_bytes = 0; } else #endif { pop_bytes = 0; while(num_args > 0) { --num_args; size = jit_type_get_size(jit_value_get_type(args[num_args])); pop_bytes += ROUND_STACK(size); } #if JIT_APPLY_X86_POP_STRUCT_RETURN == 1 if(ptr_return && is_nested) { /* Note: we only need this for nested functions, because regular functions will pop the structure return for us */ pop_bytes += sizeof(void *); } #else if(ptr_return) { pop_bytes += sizeof(void *); } #endif if(is_nested) { pop_bytes += sizeof(void *); } } /* Pop the bytes from the system stack */ if(pop_bytes > 0) { if(!jit_insn_defer_pop_stack(func, pop_bytes)) { return 0; } } /* Bail out now if we don't need to worry about return values */ if(!return_value || ptr_return) { return 1; } /* Structure values must be flushed into the frame, and everything else ends up in a register */ if(jit_type_is_struct(return_type) || jit_type_is_union(return_type)) { if(!jit_insn_flush_struct(func, return_value)) { return 0; } } else if(return_type == jit_type_float32 || return_type == jit_type_float64 || return_type == jit_type_nfloat || jit_type_get_kind(return_type) == JIT_TYPE_FLOAT32 || jit_type_get_kind(return_type) == JIT_TYPE_FLOAT64 || jit_type_get_kind(return_type) == JIT_TYPE_NFLOAT ) { if(!jit_insn_return_reg(func, return_value, X86_REG_ST0)) { return 0; } } else if(return_type->kind != JIT_TYPE_VOID) { if(!jit_insn_return_reg(func, return_value, X86_REG_EAX)) { return 0; } } /* Everything is back where it needs to be */ return 1; }