Exemple #1
0
VirtualMemoryPool::VirtualMemoryPool(size_t initial_size,
                                     bool use_large_pages,
                                     bool is_code) :
        BasePoolManager(initial_size, use_large_pages, is_code),
        _base(NULL),
        _reserved(0),
        _committed(0),
        _allocated(0)
{
    void *pool_storage = NULL;
    _reserved = round_up_to_page_size_multiple(initial_size);

    unsigned int mem_protection = PORT_VMEM_MODE_READ | PORT_VMEM_MODE_WRITE;
    if (_is_code)
         mem_protection |= PORT_VMEM_MODE_EXECUTE;

    size_t ps = (!_is_code && _use_large_pages) ?
         PORT_VMEM_PAGESIZE_LARGE : PORT_VMEM_PAGESIZE_DEFAULT;

    apr_status_t status = port_vmem_reserve(&_vmem, (void**) &_base, _reserved,
            mem_protection, ps, aux_pool);
    if (status != APR_SUCCESS)  {
         LDIE(27, "Cannot allocate pool storage: {0} bytes of virtual memory for code or data.\n"
             "Error code = {1}" << (void *)_reserved << status);
    }

	assert(_vmem);
}
//FIXME LAZY EXCEPTION (2006.05.06)
//Find all usage and change to lazy use
jthrowable exn_get()
{
    assert(hythread_is_suspend_enabled());

    // we can check heap references for equality to NULL
    // without disabling gc, because GC wouldn't change 
    // null to non-null and vice versa.
    vm_thread_t vm_thread = p_TLS_vmthread;
    if ((NULL == vm_thread->thread_exception.exc_object)
        && (NULL == vm_thread->thread_exception.exc_class)) {
        return NULL;
    }

    // returned value which will contains jthrowable value of
    // curent thread exception
    jobject exc;

    if (NULL != vm_thread->thread_exception.exc_object) {
        tmn_suspend_disable();
        exc = oh_allocate_local_handle();
        exc->object = (ManagedObject *) vm_thread->thread_exception.exc_object;
        tmn_suspend_enable();
    } else if (NULL != vm_thread->thread_exception.exc_class) {
        exc = exn_create((Exception*)&(vm_thread->thread_exception));
    } else {
        LDIE(59, "It's impossible internal error in exception handling.");
    }
    return exc;
} // exn_get
void * vm_helper_get_addr(VM_RT_SUPPORT f) {

#ifdef VM_STATS
    VM_Statistics::get_vm_stats().rt_function_requests.add((void *)f, 1, NULL);
#endif // VM_STATS

    NativeCodePtr res = rth_get_lil_helper(f);
    if (res) return res;

    switch(f) {
#ifdef _WIN64
    case VM_RT_MONITOR_ENTER:
        return rth_get_lil_monitor_enter();
    case VM_RT_MONITOR_EXIT:
        return rth_get_lil_monitor_exit();
#else
    case VM_RT_MONITOR_ENTER:
        return getaddress__vm_monitor_enter_naked();
    case VM_RT_MONITOR_EXIT:
        return getaddress__vm_monitor_exit_naked();
#endif

    // Object creation helper
    case VM_RT_NEW_RESOLVED_USING_VTABLE_AND_SIZE:
        return rth_get_lil_new_resolved_using_vtable_and_size();
    // Array creation helper
    case VM_RT_NEW_VECTOR_USING_VTABLE:
        return rth_get_lil_new_vector_using_vtable();
    default:
        LDIE(50, "Unexpected helper id {0}" << f);
        return NULL;
    }
}
Exemple #4
0
static BOOLEAN vm_property_get_boolean(const char *property_name)
{
  assert(property_name);
  if (!vm_property_is_set(property_name, VM_PROPERTIES)){
        LDIE(76, "Property value {0} is not set!" << property_name);
  }

  return vm_property_get_boolean(property_name, FALSE, VM_PROPERTIES);
}
Exemple #5
0
static int vm_property_get_integer(const char *property_name)
{
    assert(property_name);
    if(!vm_property_is_set(property_name, VM_PROPERTIES)) {
        LDIE(76, "Property value {0} is not set!" << property_name);
    }

    return vm_property_get_integer(property_name, 0, VM_PROPERTIES);
}
Exemple #6
0
void mspace_collection(Mspace* mspace) 
{
  mspace->num_collections++;

  GC* gc = mspace->gc;  
  Transform_Kind kind= gc->tuner->kind;
 
  /* init the pool before starting multiple collectors */

  pool_iterator_init(gc->metadata->gc_rootset_pool);

  //For_LOS_extend
  if(LOS_ADJUST_BOUNDARY){
    if(gc->tuner->kind != TRANS_NOTHING){
      major_set_compact_slide();
    }else if (collect_is_fallback()){
      major_set_compact_slide();
    }else{
      major_set_compact_move();    
    }
  }else {
    gc->tuner->kind = TRANS_NOTHING;
  }

  if(major_is_compact_slide()){
#ifdef ORDER_GC_DEBUG
    printf("[GC DEBUG]: slide compact algo start ... \n");
#endif
    TRACE2("gc.process", "GC: slide compact algo start ... \n");
    collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);
    TRACE2("gc.process", "\nGC: end of slide compact algo ... \n");
#ifdef ORDER_GC_DEBUG
    printf("[GC DEBUG]: end of slide compact algo ... \n");
#endif
  }else if( major_is_compact_move()){      
#ifdef ORDER_GC_DEBUG
    printf("[GC DEBUG]: move compact algo start ... \n");
#endif    
    TRACE2("gc.process", "GC: move compact algo start ... \n");
    collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace);
    TRACE2("gc.process", "\nGC: end of move compact algo ... \n");
#ifdef ORDER_GC_DEBUG
    printf("[GC DEBUG]: end of move compact algo ... \n");
#endif    
  }else{
    LDIE(75, "GC: The speficied major collection algorithm doesn't exist!");
  }

  if((!LOS_ADJUST_BOUNDARY)&&(kind != TRANS_NOTHING) ) {
    gc->tuner->kind = kind;
    gc_compute_space_tune_size_after_marking(gc);
  }
  
  return;  
} 
Exemple #7
0
PoolDescriptor* PoolManager::allocate_pool_storage(size_t size)
{
    PoolDescriptor* pDesc = (PoolDescriptor*) apr_palloc(aux_pool, sizeof(PoolDescriptor));
    memset(pDesc, 0, sizeof(PoolDescriptor));

    void *pool_storage = NULL;
    size = round_up_to_page_size_multiple(size);
    pDesc->_size = size;
     unsigned int mem_protection = PORT_VMEM_MODE_READ | PORT_VMEM_MODE_WRITE;
    if (_is_code) {
         mem_protection |= PORT_VMEM_MODE_EXECUTE;
    }

    size_t ps = (!_is_code && _use_large_pages) ?
         PORT_VMEM_PAGESIZE_LARGE : PORT_VMEM_PAGESIZE_DEFAULT;

    apr_status_t status = port_vmem_reserve(&pDesc->_descriptor, &pool_storage,
         size, mem_protection, ps, aux_pool);
    if (status != APR_SUCCESS)  {
         LDIE(27, "Cannot allocate pool storage: {0} bytes of virtual memory for code or data.\n"
             "Error code = {1}" << (void *)size << status);
     }

    status = port_vmem_commit(&pool_storage, size, pDesc->_descriptor);
    if (status != APR_SUCCESS || pool_storage == NULL)  {
         LDIE(27, "Cannot allocate pool storage: {0} bytes of virtual memory for code or data.\n"
             "Error code = {1}" << (void *)size << status);
     }

#ifdef VM_STATS
    VM_Statistics::get_vm_stats().number_memoryblock_allocations++;
    VM_Statistics::get_vm_stats().total_memory_allocated += size;
#endif

    pDesc->_begin  = (U_8*)pool_storage;
    pDesc->_end = (U_8*)(pool_storage) + size;

    return pDesc;
}
Exemple #8
0
static void rth_throw_lazy(Method * exn_constr)
{
#if defined(_IPF_) || defined(_EM64T_)
    LDIE(61, "Lazy exceptions are not supported on this platform");
#else
    U_8 *args = (U_8 *) (m2n_get_args(m2n_get_last_frame()) + 1);   // +1 to skip constructor
    if (NULL != exn_constr) {
        args += exn_constr->get_num_arg_slots() * 4 - 4;
    } else {
        args += 1*4 /*default constructor*/ - 4;
    }
    exn_athrow(NULL, *(Class_Handle *) args, exn_constr, args);
#endif
}   //rth_throw_lazy
Exemple #9
0
BOOLEAN type_info_is_resolved(Type_Info_Handle tih) {
    TypeDesc* td = (TypeDesc*)tih;
    switch (td->get_kind()) {
    case K_Vector:
        if (td->get_element_type()->is_primitive()) {
            return true;
        }
        return type_info_is_resolved(td->get_element_type());
    case K_Object:
        return td->is_loaded();
    default:
        LDIE(73, "Unexpected kind");
        return 0;
    }
}
// function can be cold from suspen enabled and disabled mode 
void exn_rethrow()
{
    // exception is throwing, so suspend can be disabeled without following enabling
    if (hythread_is_suspend_enabled()) {
        tmn_suspend_disable();
    }

    assert(!hythread_is_suspend_enabled());

    BEGIN_RAISE_AREA;

#ifndef VM_LAZY_EXCEPTION
    ManagedObject *exn = get_exception_object_internal();
    assert(exn);
    clear_exception_internal();

    check_pop_frame(exn);

    exn_throw_for_JIT(exn, NULL, NULL, NULL, NULL);
#else
    vm_thread_t vm_thread = p_TLS_vmthread;
    if (NULL != vm_thread->thread_exception.exc_object) {
        ManagedObject* exn_mng_object = vm_thread->thread_exception.exc_object;
        clear_exception_internal();

        check_pop_frame(exn_mng_object);

        exn_throw_for_JIT(exn_mng_object, NULL, NULL, NULL, NULL);
    } else if (NULL != vm_thread->thread_exception.exc_class) {
        Class * exc_class = vm_thread->thread_exception.exc_class;
        const char* exc_message = vm_thread->thread_exception.exc_message;
        jthrowable exc_cause = NULL;

        if (vm_thread->thread_exception.exc_cause){
            exc_cause = oh_allocate_local_handle();
            exc_cause->object = vm_thread->thread_exception.exc_cause;
        }
        clear_exception_internal();

        exn_throw_by_class_internal(exc_class, exc_message, exc_cause);
    } else {
        LDIE(60, "There is no exception.");
    }
#endif
    DIE(("It's Unreachable place."));

    END_RAISE_AREA;
}   //exn_rethrow
Exemple #11
0
void* VirtualMemoryPool::alloc(size_t size, size_t alignment, Code_Allocation_Action action)
{
    // Make sure alignment is a power of 2.
    assert((alignment & (alignment-1)) == 0);
    size_t mask = alignment - 1;

    // align the requested size
    size = (size + mask) & ~mask;

    // CAA_Simulate functionality support
    if (action == CAA_Simulate)
        size = 0;

    _lock();

    assert(_base);
    assert(_reserved);
    assert(_committed <= _reserved);
    assert(_allocated <= _committed);

    size_t new_allocated = _allocated + size;

    if (new_allocated > _committed) {
        apr_status_t status = APR_ENOMEM;

        size_t new_committed = round_up_to_page_size_multiple(new_allocated);

        if (new_committed <= _reserved) {
            U_8* commit_start = _base + _committed;
            status = port_vmem_commit((void**) &commit_start, new_committed - _committed, _vmem);
        }

        if (status != APR_SUCCESS)  {
             LDIE(27, "Cannot allocate pool storage: {0} bytes of virtual memory for code or data.\n"
                 "Error code = {1}" << (void *)size << status);
        }

        _committed = new_committed;

    }

    U_8* result = _base + _allocated;
    _allocated = new_allocated;

    _unlock();

    return result;
 }
Exemple #12
0
void vm_add_jit(JIT *jit)
{
    int max_jit_num = sizeof(jit_compilers) / sizeof(JIT *) - 2;
    if(jit_compilers[max_jit_num]) {
        LDIE(64, "Can't add new JIT");
        return;
    }

    // Shift the jits
    for(int i = max_jit_num; i > 0; i--) {
        jit_compilers[i] = jit_compilers[i - 1];
    }

    jit_compilers[0] = jit;
    assert(jit_compilers[max_jit_num + 1] == 0);
} //vm_add_jit
Exemple #13
0
Type_Info_Handle type_info_get_type_info(Type_Info_Handle tih)
{
    TypeDesc* td = (TypeDesc*)tih;
    assert(td);
    switch (td->get_kind()) {
    case K_Vector:
    case K_Array:
        return td->get_element_type();
    case K_ManagedPointer:
    case K_UnmanagedPointer:
        return td->get_pointed_to_type();
    default:
        LDIE(73, "Unexpected kind");
        return 0;
    }
} //type_info_get_type_info
Class* exn_get_class() {
    // we can check heap references for equality to NULL
    // without disabling gc, because GC wouldn't change
    // null to non-null and vice versa.
    vm_thread_t vm_thread = p_TLS_vmthread;
    if ((NULL == vm_thread->thread_exception.exc_object)
        && (NULL == vm_thread->thread_exception.exc_class)) {
        return NULL;
    }

    Class* result;

    if (NULL != vm_thread->thread_exception.exc_object) {
        tmn_suspend_disable_recursive();
        ManagedObject* exn = vm_thread->thread_exception.exc_object;
        result = exn->vt()->clss;
        tmn_suspend_enable_recursive();
    } else if (NULL != vm_thread->thread_exception.exc_class) {
        result = vm_thread->thread_exception.exc_class;
    } else {
        LDIE(59, "It's impossible internal error in exception handling.");
    }
    return result;
}
void Method::_set_nop()
{
    bool verbose = false;

    Global_Env *env = VM_Global_State::loader_env;
    if (get_name() != env->Init_String || get_descriptor() != env->VoidVoidDescriptor_String) {
        return;
    }

    if(is_native()) {
        return;
    }
    unsigned len = _byte_code_length;
    if(!len) {
        return;
    }
    U_8* bc = _byte_codes;
    Nop_Stack_State stack_state = NS_StackEmpty;
    if(verbose) {
        printf("=========== nop[%d]: %s.%s%s\n", len, get_class()->get_name()->bytes, get_name()->bytes, get_descriptor()->bytes);
    }
    for (unsigned idx = 0; idx < len; idx++) {
        U_8 b = bc[idx];
        if(verbose) {
            printf("\tbc[%d]=%d, state=%d\n", idx, b, stack_state);
        }
        if(b == 0xb1) {   // return
            if(verbose) {
                printf("+++++++ nop: %s.%s%s\n", get_class()->get_name()->bytes, get_name()->bytes, get_descriptor()->bytes);
            }
            _flags.is_nop = TRUE;
            return;
        }
        switch(stack_state) {
        case NS_StackEmpty:
            switch(b) {
            case 0x2a:  // aload_0
                stack_state = NS_ThisPushed;
                break;
            default:
                return;
            }
            break;
        case NS_ThisPushed:
            switch(b) {
            case 0x01:  // aconst_null
            case 0x03:  // iconst_0
                stack_state = NS_ThisAndZeroPushed;
                break;
            case 0xb7:  // invokespecial
                {
                    unsigned index = (bc[idx + 1] << 8) + bc[idx + 2];
                    if(verbose) {
                        printf("\tinvokespecial, index=%d\n", index);
                    }
                    Method_Handle mh = resolve_special_method_env(VM_Global_State::loader_env,
                                                                  get_class(),
                                                                  index, false);
                    Method *callee = (Method *)mh;
                    if(!callee) {
                        if(verbose) {
                            printf("\tinvokespecial, callee==null\n");
                        }
                        return;
                    }
                    if(callee == this) {
                        return;
                    }
                    if(verbose) {
                        printf("invokespecial: %s.%s%s\n", callee->get_class()->get_name()->bytes, callee->get_name()->bytes, callee->get_descriptor()->bytes);
                    }
                    if(!callee->is_nop()) {
                        return;
                    }
                    const char *descr = callee->get_descriptor()->bytes;
                    if(descr[1] != ')') {
                        return;
                    }
                    if(verbose) {
                        printf("invokespecial nop: %s.%s%s\n", callee->get_class()->get_name()->bytes, callee->get_name()->bytes, callee->get_descriptor()->bytes);
                    }
                }
                stack_state = NS_StackEmpty;
                idx += 2;
                break;
            default:
                return;
            }
            break;
        case NS_ThisAndZeroPushed:
            switch(b) {
            case 0xb5:  // putfield
                stack_state = NS_StackEmpty;
                if(verbose) {
                    printf("\tputfield\n");
                }
                idx += 2;
                break;
            default:
                return;
            }
            break;
        default:
            LDIE(57, "Unexpected stack state");
            return;
        }
    }
    LDIE(56, "should'nt get here");
} //Method::_set_nop
Exemple #16
0
void
JIT_execute_method_default(JIT_Handle jit, jmethodID methodID, jvalue *return_value, jvalue *args) {

    // Detecting errors with object headears on stack when using destructive
    // unwinding.
    void *lastFrame = p_TLS_vmthread->lastFrame;
    p_TLS_vmthread->lastFrame = (void*)&lastFrame;
    //printf("execute: push: prev = 0x%p, curr=0x%p\n", lastFrame, &lastFrame);

//    fprintf(stderr, "Not implemented\n");

    Method *method = (Method*) methodID;
    TRACE("enter method "
          << method->get_class()->get_name()->bytes << " "
          << method->get_name()->bytes << " "
          << method->get_descriptor()->bytes);
    int sz = method->get_num_arg_slots();
    void *meth_addr = method->get_code_addr();
    U_32 *arg_words = (U_32*) STD_ALLOCA(sz * sizeof(U_32));

    int argId = sz;
    int pos = 0;

    assert(!hythread_is_suspend_enabled());
    if (!method->is_static()) {
        ObjectHandle handle = (ObjectHandle) args[pos++].l;
        assert(handle);
        arg_words[--argId] = (unsigned) handle->object;
    }

    const char *mtype = method->get_descriptor()->bytes + 1;
    assert(mtype != 0);

    for(; *mtype != ')'; mtype++) {
        switch(*mtype) {
        case JAVA_TYPE_CLASS:
        case JAVA_TYPE_ARRAY:
        {
            ObjectHandle handle = (ObjectHandle) args[pos++].l;
            arg_words[--argId] = (unsigned) (handle ? handle->object : 0);

            while(*mtype == '[') mtype++;
            if (*mtype == 'L')
                while(*mtype != ';') mtype++;
        }
        break;

        case JAVA_TYPE_SHORT:
            // sign extend
            arg_words[--argId] = (U_32)(I_32) args[pos++].s;
            break;
        case JAVA_TYPE_BYTE:
            // sign extend
            arg_words[--argId] = (U_32)(I_32) args[pos++].b;
            break;
        case JAVA_TYPE_INT:
            // sign extend
            arg_words[--argId] = (U_32)(I_32) args[pos++].i;
            break;

        case JAVA_TYPE_FLOAT:
            arg_words[--argId] = (I_32) args[pos++].i;
            break;
        case JAVA_TYPE_BOOLEAN:
            arg_words[--argId] = (I_32) args[pos++].z;
            break;
        case JAVA_TYPE_CHAR:
            // zero extend
            arg_words[--argId] = (I_32) args[pos++].c;
            break;

        case JAVA_TYPE_LONG:
        case JAVA_TYPE_DOUBLE:
            *(jlong*)&arg_words[argId-2] = args[pos++].j;
            argId -= 2;
            break;
        default:
            LDIE(53, "Unexpected java type");
        }
    }
    assert(argId >= 0);

    jvalue *resultPtr = (jvalue*) return_value;
    Java_Type ret_type = method->get_return_java_type();

    arg_words += argId;
    argId = sz - argId;

    static const IntFuncPtr invoke_managed_func = gen_invoke_int_managed_func();
    static const FloatFuncPtr invoke_float_managed_func = gen_invoke_float_managed_func();
    static const DoubleFuncPtr invoke_double_managed_func = gen_invoke_double_managed_func();

    switch(ret_type) {
    case JAVA_TYPE_VOID:
        invoke_managed_func(arg_words, argId, meth_addr);
        break;
    case JAVA_TYPE_CLASS:
    case JAVA_TYPE_ARRAY:
    case JAVA_TYPE_STRING:
    {
        ManagedObject *ref = ((RefFuncPtr)invoke_managed_func)(arg_words, argId, meth_addr);
        ObjectHandle h = oh_allocate_local_handle();

        if (ref != NULL) {
            h->object = ref;
            resultPtr->l = h;
        } else {
            resultPtr->l = NULL;
        }
    }
    break;

    case JAVA_TYPE_BOOLEAN:
    case JAVA_TYPE_BYTE:
    case JAVA_TYPE_CHAR:
    case JAVA_TYPE_SHORT:
    case JAVA_TYPE_INT:
        resultPtr->i = invoke_managed_func(arg_words, argId, meth_addr);
        break;

    case JAVA_TYPE_FLOAT:
        resultPtr->f = invoke_float_managed_func(arg_words, argId, meth_addr);
        break;

    case JAVA_TYPE_LONG:
        resultPtr->j = ((LongFuncPtr)invoke_managed_func)(arg_words, argId, meth_addr);
        break;

    case JAVA_TYPE_DOUBLE:
        resultPtr->d = invoke_double_managed_func(arg_words, argId, meth_addr);
        break;

    default:
        LDIE(53, "Unexpected java type");
    }

    if (exn_raised()) {
        TRACE("Exception occured: " << exn_get_name());
        if ((resultPtr != NULL) && (ret_type != JAVA_TYPE_VOID)) {
            resultPtr->l = 0; //clear result
        }
    }

    TRACE("exit method "
          << method->get_class()->get_name()->bytes << " "
          << method->get_name()->bytes << " "
          << method->get_descriptor()->bytes);

    // Detecting errors with object headears on stack when using destructive
    // unwinding.
    //printf("execute:  pop: prev = 0x%p, curr=0x%p\n", &lastFrame, lastFrame);
    p_TLS_vmthread->lastFrame = lastFrame;
}
Exemple #17
0
GC* gc_parse_options() 
{
  TRACE2("gc.process", "GC: parse options ...\n");

  GC* gc;

  /* GC algorithm decision */
  /* Step 1: */
  char* minor_algo = NULL;
  char* major_algo = NULL;
  char* unique_algo = NULL;

  if (vm_property_is_set("gc.minor_algorithm", VM_PROPERTIES) == 1) {
    minor_algo = vm_properties_get_value("gc.minor_algorithm", VM_PROPERTIES);
  }

  if (vm_property_is_set("gc.major_algorithm", VM_PROPERTIES) == 1) {
    major_algo = vm_properties_get_value("gc.major_algorithm", VM_PROPERTIES);
  }

  if (vm_property_is_set("gc.unique_algorithm", VM_PROPERTIES) == 1) {
    unique_algo = vm_properties_get_value("gc.unique_algorithm", VM_PROPERTIES);
  }

  Boolean has_los = FALSE;
  if (vm_property_is_set("gc.has_los", VM_PROPERTIES) == 1) {
    has_los = vm_property_get_boolean("gc.has_los");
  }

  if(unique_algo){
    if(minor_algo || major_algo){
      LWARN(60, "Generational options cannot be set with unique_algo, ignored.");
    }
    gc = gc_unique_decide_collection_algo(unique_algo, has_los);
    vm_properties_destroy_value(unique_algo);  
  }else{ /* default */
    gc = gc_gen_decide_collection_algo(minor_algo, major_algo, has_los);
    if( minor_algo) vm_properties_destroy_value(minor_algo);
    if( major_algo) vm_properties_destroy_value(major_algo);
  }

  if (vm_property_is_set("gc.gen_mode", VM_PROPERTIES) == 1) {
    Boolean gen_mode = vm_property_get_boolean("gc.gen_mode");
    gc_set_gen_mode(gen_mode);
  }

  /* Step 2: */

  /* NOTE:: this has to stay after above!! */
  if (vm_property_is_set("gc.force_major_collect", VM_PROPERTIES) == 1) {
    FORCE_FULL_COMPACT = vm_property_get_boolean("gc.force_major_collect");
    if(FORCE_FULL_COMPACT){
      gc_set_gen_mode(FALSE);
    }
  }

  /* Step 3: */
  /* NOTE:: this has to stay after above!! */
  gc->generate_barrier = gc_is_gen_mode();
  
  if (vm_property_is_set("gc.generate_barrier", VM_PROPERTIES) == 1) {
    Boolean generate_barrier = vm_property_get_boolean("gc.generate_barrier");
    gc->generate_barrier = (generate_barrier || gc->generate_barrier);
  }
  
/* ///////////////////////////////////////////////////   */
  
  POINTER_SIZE_INT max_heap_size = HEAP_SIZE_DEFAULT;
  POINTER_SIZE_INT min_heap_size = min_heap_size_bytes;
  
  if (vm_property_is_set("gc.mx", VM_PROPERTIES) == 1) {
    max_heap_size = vm_property_get_size("gc.mx");

    if (max_heap_size < min_heap_size){
      max_heap_size = min_heap_size;
      LWARN(61, "Max heap size you set is too small, reset to {0}MB" << max_heap_size/MB);
    }
    if (0 == max_heap_size){
      max_heap_size = HEAP_SIZE_DEFAULT;
      LWARN(62, "Max heap size you set equals to zero, reset to {0}MB" << max_heap_size/MB);
    }
 
    min_heap_size = max_heap_size / 10;
    if (min_heap_size < min_heap_size_bytes){
      min_heap_size = min_heap_size_bytes;
      //printf("Min heap size: too small, reset to %d MB! \n", min_heap_size/MB);
    }
  }

  if (vm_property_is_set("gc.ms", VM_PROPERTIES) == 1) {
    min_heap_size = vm_property_get_size("gc.ms");
    if (min_heap_size < min_heap_size_bytes){
      min_heap_size = min_heap_size_bytes;
      LWARN(63, "Min heap size you set is too small, reset to {0}MB" << min_heap_size/MB);
    } 
  }

  if (min_heap_size > max_heap_size){
    max_heap_size = min_heap_size;
    LWARN(61, "Max heap size is too small, reset to {0}MB" << max_heap_size/MB);
  }

  min_heap_size_bytes = min_heap_size;
  max_heap_size_bytes = max_heap_size;

  if (vm_property_is_set("gc.nos_size", VM_PROPERTIES) == 1) {
    NOS_SIZE = vm_property_get_size("gc.nos_size");
  }

  if (vm_property_is_set("gc.min_nos_size", VM_PROPERTIES) == 1) {
    MIN_NOS_SIZE = vm_property_get_size("gc.min_nos_size");
  }

  if (vm_property_is_set("gc.init_los_size", VM_PROPERTIES) == 1) {
    INIT_LOS_SIZE = vm_property_get_size("gc.init_los_size");
  }  

  if (vm_property_is_set("gc.num_collectors", VM_PROPERTIES) == 1) {
    unsigned int num = vm_property_get_integer("gc.num_collectors");
    NUM_COLLECTORS = (num==0)? NUM_COLLECTORS:num;
  }

  if (vm_property_is_set("gc.num_conclctors", VM_PROPERTIES) == 1) {
    unsigned int num = vm_property_get_integer("gc.num_conclctors");
    NUM_CONCLCTORS = (num==0)? NUM_CONCLCTORS:num;
  }

  // for concurrent GC debug
  if (vm_property_is_set("gc.num_con_markers", VM_PROPERTIES) == 1) {
    unsigned int num = vm_property_get_integer("gc.num_con_markers");
    NUM_CON_MARKERS = (num==0)? NUM_CON_MARKERS:num;
  }

  if (vm_property_is_set("gc.num_con_sweepers", VM_PROPERTIES) == 1) {
    unsigned int num = vm_property_get_integer("gc.num_con_sweepers");
    NUM_CON_SWEEPERS = (num==0)? NUM_CON_SWEEPERS:num;
  }


  

  if (vm_property_is_set("gc.tospace_size", VM_PROPERTIES) == 1) {
    TOSPACE_SIZE = vm_property_get_size("gc.tospace_size");
  }

  if (vm_property_is_set("gc.mos_reserve_size", VM_PROPERTIES) == 1) {
    MOS_RESERVE_SIZE = vm_property_get_size("gc.mos_reserve_size");
  }

  if (vm_property_is_set("gc.nos_partial_forward", VM_PROPERTIES) == 1) {
    NOS_PARTIAL_FORWARD = vm_property_get_boolean("gc.nos_partial_forward");
  }
    
  if (vm_property_is_set("gc.minor_collectors", VM_PROPERTIES) == 1) {
    MINOR_COLLECTORS = vm_property_get_integer("gc.minor_collectors");
  }

  if (vm_property_is_set("gc.major_collectors", VM_PROPERTIES) == 1) {
    MAJOR_COLLECTORS = vm_property_get_integer("gc.major_collectors");
  }

  if (vm_property_is_set("gc.ignore_finref", VM_PROPERTIES) == 1) {
    IGNORE_FINREF = vm_property_get_boolean("gc.ignore_finref");
  }

  if (vm_property_is_set("gc.verify", VM_PROPERTIES) == 1) {
    char* value = vm_properties_get_value("gc.verify", VM_PROPERTIES);
    GC_VERIFY = strdup(value);
    vm_properties_destroy_value(value);
  }

  if (vm_property_is_set("gc.gen_nongen_switch", VM_PROPERTIES) == 1){
    GEN_NONGEN_SWITCH= vm_property_get_boolean("gc.gen_nongen_switch");
    gc->generate_barrier = TRUE;
  }

  if (vm_property_is_set("gc.heap_iteration", VM_PROPERTIES) == 1) {
    JVMTI_HEAP_ITERATION = vm_property_get_boolean("gc.heap_iteration");
  }

  if (vm_property_is_set("gc.ignore_vtable_tracing", VM_PROPERTIES) == 1) {
    IGNORE_VTABLE_TRACING = vm_property_get_boolean("gc.ignore_vtable_tracing");
  }

  if (vm_property_is_set("gc.use_large_page", VM_PROPERTIES) == 1){
    char* value = vm_properties_get_value("gc.use_large_page", VM_PROPERTIES);
    large_page_hint = strdup(value);
    vm_properties_destroy_value(value);
  }

  if (vm_property_is_set("gc.share_los_boundary", VM_PROPERTIES) == 1){
    share_los_boundary = vm_property_get_boolean("gc.share_los_boundary");
  }

  if (vm_property_is_set("gc.ignore_force_gc", VM_PROPERTIES) == 1){
    IGNORE_FORCE_GC = vm_property_get_boolean("gc.ignore_force_gc");
  }
  
  if (vm_property_is_set("gc.concurrent_gc", VM_PROPERTIES) == 1){
    Boolean use_all_concurrent_phase= vm_property_get_boolean("gc.concurrent_gc");
    if(use_all_concurrent_phase){
#ifndef USE_UNIQUE_MARK_SWEEP_GC
      LDIE(77, "Please define USE_UNIQUE_MARK_SWEEP_GC macro.");
#endif
      gc_specify_con_enum();
      gc_specify_con_mark();
      gc_specify_con_sweep();
      gc->generate_barrier = TRUE;
    }
  }

  if (vm_property_is_set("gc.concurrent_enumeration", VM_PROPERTIES) == 1){
    Boolean USE_CONCURRENT_ENUMERATION = vm_property_get_boolean("gc.concurrent_enumeration");
    if(USE_CONCURRENT_ENUMERATION){
#ifndef USE_UNIQUE_MARK_SWEEP_GC
      LDIE(77, "Please define USE_UNIQUE_MARK_SWEEP_GC macro.");
#endif
      gc_specify_con_enum();
      gc->generate_barrier = TRUE;
    }
  }

  if (vm_property_is_set("gc.concurrent_mark", VM_PROPERTIES) == 1){
    Boolean USE_CONCURRENT_MARK = vm_property_get_boolean("gc.concurrent_mark");
    if(USE_CONCURRENT_MARK){
#ifndef USE_UNIQUE_MARK_SWEEP_GC
      LDIE(77, "Please define USE_UNIQUE_MARK_SWEEP_GC macro.");
#endif
      gc_specify_con_mark();
      gc->generate_barrier = TRUE;
      IGNORE_FINREF = TRUE; /*TODO: finref is unsupported.*/
    }
  }

  if (vm_property_is_set("gc.concurrent_sweep", VM_PROPERTIES) == 1){
    Boolean USE_CONCURRENT_SWEEP= vm_property_get_boolean("gc.concurrent_sweep");
    if(USE_CONCURRENT_SWEEP){
      /*currently, concurrent sweeping only starts after concurrent marking.*/
      assert(gc_is_specify_con_mark());
#ifndef USE_UNIQUE_MARK_SWEEP_GC
      LDIE(77, "Please define USE_UNIQUE_MARK_SWEEP_GC macro.");
#endif
      gc_specify_con_sweep();
      IGNORE_FINREF = TRUE; /*TODO: finref is unsupported.*/
    }
  }
 
  char* concurrent_algo = NULL;
  
  if (vm_property_is_set("gc.concurrent_algorithm", VM_PROPERTIES) == 1) {
    concurrent_algo = vm_properties_get_value("gc.concurrent_algorithm", VM_PROPERTIES);    
    gc_decide_con_algo(concurrent_algo);
  }else if(gc_is_specify_con_gc()){
    gc_set_default_con_algo();
  }

  char* cc_scheduler = NULL;
  if (vm_property_is_set("gc.cc_scheduler", VM_PROPERTIES) == 1) {
    cc_scheduler = vm_properties_get_value("gc.cc_scheduler", VM_PROPERTIES);    
    gc_decide_cc_scheduler_kind(cc_scheduler);
  }else if(gc_is_specify_con_gc()){
    gc_set_default_cc_scheduler_kind();
  }

#if defined(ALLOC_ZEROING) && defined(ALLOC_PREFETCH)
  if(vm_property_is_set("gc.prefetch",VM_PROPERTIES) ==1) {
    PREFETCH_ENABLED = vm_property_get_boolean("gc.prefetch");
  }

  if(vm_property_is_set("gc.prefetch_distance",VM_PROPERTIES)==1) {
    PREFETCH_DISTANCE = vm_property_get_size("gc.prefetch_distance");
    if(!PREFETCH_ENABLED) {
      LWARN(64, "Prefetch distance set with Prefetch disabled!");
    }
  }

  if(vm_property_is_set("gc.prefetch_stride",VM_PROPERTIES)==1) {
    PREFETCH_STRIDE = vm_property_get_size("gc.prefetch_stride");
    if(!PREFETCH_ENABLED) {
      LWARN(65, "Prefetch stride set  with Prefetch disabled!");
    }  
  }
  
  if(vm_property_is_set("gc.zeroing_size",VM_PROPERTIES)==1) {
    ZEROING_SIZE = vm_property_get_size("gc.zeroing_size");
  }   
#endif

#ifdef PREFETCH_SUPPORTED
  if(vm_property_is_set("gc.mark_prefetch",VM_PROPERTIES) ==1) {
    mark_prefetch = vm_property_get_boolean("gc.mark_prefetch");
  }  
#endif

  return gc;
}
Exemple #18
0
Class* TypeDesc::load_type_desc()
{

#ifdef ORDER
    int class_status = 0;
    if(vm_order_record){
        class_status = clss ? 1 : 0;
        U_32 tid = hythread_self()->thread_id;
        if (order_system_call[tid] == NULL)
        {
            char name[40];
            sprintf(name, "SYSTEM_CALL.%d.log", tid);

            order_system_call[tid] = fopen64(name, "a+");
        }
#ifdef ORDER_DEBUG

        assert(order_system_call[tid] != NULL);

        fprintf(order_system_call[tid], "[%d] ", 21);
#endif
        fprintf(order_system_call[tid], "%d\n", class_status);

        if(class_status == 1){
#ifdef ORDER_DEBUG
//            printf("[TYPE_DESC]: class : %s already loaded, thread id : %d\n", clss->get_name()->bytes, hythread_self()->thread_id);
#endif
            return clss;
        }

    }
    else{
        
        U_32 tid = hythread_self()->thread_id;
        if (order_system_call[tid] == NULL)
        {
            char name[40];
            sprintf(name, "SYSTEM_CALL.%d.log", tid);

            order_system_call[tid] = fopen64(name, "r");
        }
#ifdef ORDER_DEBUG
        assert(order_system_call[tid] != NULL);

        int bit_num;
        fscanf(order_system_call[tid], "[%d]", &bit_num);
        assert(bit_num == 21);
#endif
        fscanf(order_system_call[tid], "%d\n", &class_status);

        if(class_status == 1){
            while(!clss){
                usleep(1000);
#ifdef ORDER_DEBUG
                printf("[TYPE_DESC]: usleep in TypeDesc::load_type_desc!!\n");
#endif
            }
            return clss;
        }

    }
	
#else

    if (clss) return clss; // class already loaded

#endif

    
    Global_Env* env = VM_Global_State::loader_env;
    Class* element_clss;

    switch (get_kind()) {
    case K_S1: return env->Byte_Class;
    case K_S2: return env->Short_Class;
    case K_S4: return env->Int_Class;
    case K_S8: return env->Long_Class;
    case K_F4: return env->Float_Class;
    case K_F8: return env->Double_Class;
    case K_Boolean: return env->Boolean_Class;
    case K_Char: return env->Char_Class;
    case K_Void: return env->Void_Class;
    case K_Object:
        assert (loader);
        assert (name);
        // FIXME: better to use LoadVerifyAndPrepareClass here - but this results in Recursive resolution collision in StartLoadingClass
        //c = loader->LoadVerifyAndPrepareClass(env, name);
        clss = loader->LoadClass(env, name);
        return clss;
    case K_Vector:
        assert (component_type);
        element_clss = component_type->load_type_desc();
        if (!element_clss) return NULL;
        clss = resolve_class_array_of_class(env, element_clss);
        return clss;
    default:
        // All other types are not Java types, so fail
        LDIE(73, "Unexpected kind");
        return NULL;
    }
}
Exemple #19
0
EXPORT void JIT_unwind_stack_frame(JIT_Handle, Method_Handle, JitFrameContext *) { LDIE(51, "Not implemented"); }
Exemple #20
0
EXPORT void JIT_get_root_set_from_stack_frame(JIT_Handle, Method_Handle, GC_Enumeration_Handle, JitFrameContext *) { LDIE(51, "Not implemented"); }
Exemple #21
0
EXPORT void JIT_fix_handler_context(JIT_Handle, Method_Handle, JitFrameContext *) { LDIE(51, "Not implemented"); }
Exemple #22
0
EXPORT void * JIT_get_address_of_this(JIT_Handle, Method_Handle, const JitFrameContext *) { LDIE(51, "Not implemented"); return (void *)JIT_FAILURE;}
Exemple #23
0
Method_Signature_Handle type_info_get_method_sig(Type_Info_Handle UNREF tih)
{
    LDIE(51, "Not implemented");
    return 0;
} //type_info_get_method_sig
void *dummy_tls_func()
{
    LDIE(56, "shouldn't get here");
}