void MemoryAddress::prepare_preindexed_address(jint address_offset, Assembler::Register& reg, jint& offset){ offset = 0; if (!has_address_register()) { // Try to do direct access jint fixed_offset; if (has_fixed_offset(fixed_offset)) { offset = fixed_offset + base_offset() + address_offset; reg = fixed_register(); return; } create_and_initialize_address_register(); } GUARANTEE(has_address_register(), "We must have address register by now"); reg = address_register(); int xbase_offset = // base_offset or 0 address_register_includes_base_offset() ? 0 : base_offset(); if (address_offset == 0 && xbase_offset != 0) { // Update the address_register so that it includes the base_offset set_address_register_includes_base_offset(); code_generator()->add(address_register(), address_register(), xbase_offset); offset = 0; } else { offset = (address_offset + xbase_offset); } }
void check_virtual(int index JVM_TRAPS) { UsingFastOops level1; ConstantPool::Fast cp = method()->constants(); InstanceClass::Fast klass; ClassInfo::Fast info; Method::Fast m; if (cp().tag_at(index).is_resolved_virtual_method()) { int class_id, vtable_index; cp().resolved_virtual_method_at(index, vtable_index, class_id); klass = Universe::class_from_id(class_id); info = klass().class_info(); m = info().vtable_method_at(vtable_index); _owner->add_method(&m JVM_NO_CHECK_AT_BOTTOM); } else { // This could be an element we failed to resolve // when ROMizing an application. if (!PostponeErrorsUntilRuntime) { SHOULD_NOT_REACH_HERE(); } else { //The following GUARANTEE could trigger if the class is a bogus // TCK class and we want to postpone the error until runtime so // we have commented it out. // GUARANTEE(cp.tag_at(index).is_method(), "Sanity"); // The class must be marked as unverified or non-optimizable, // since it contains an unresolved entry at this point. #ifdef AZZERT klass = method()->holder(); GUARANTEE(!klass().is_verified() || !klass().is_optimizable(), "Sanity"); #endif } } }
CLASS getClass(const char *name) { GUARANTEE(_in_kvm_native_method, "sanity"); OopDesc *mirror_obj = NULL; KNI_FindClass(name, (jclass)&mirror_obj); return mirror_obj; }
jint ConstantPool::name_and_type_at(int index JVM_TRAPS) const { int offset = offset_from_checked_index(index JVM_ZCHECK_0(offset)); cp_check_0(ConstantTag::is_name_and_type(tag_value_at(index))); int result = int_field(offset); GUARANTEE(result != 0, "sanity for JVM_ZCHECK"); return result; }
void Thread::stack_oops_do(void do_oop(OopDesc**)) { if (this->task_id() == Task::INVALID_TASK_ID) { // If we continue we'll break JVMTaskGCContext with a -1 task id GUARANTEE(!last_java_frame_exists(), "thread is already dead"); return; } // See Scheduler.cpp threads_do_list() for the // reason why we do this. const TaskGCContext tmp(this->task_id()); // Java frames if (last_java_frame_exists()) { Frame fr(this); while (true) { if (fr.is_entry_frame()) { fr.as_EntryFrame().oops_do(do_oop); if (fr.as_EntryFrame().is_first_frame()) { break; } fr.as_EntryFrame().caller_is(fr); } else { fr.as_JavaFrame().oops_do(do_oop); fr.as_JavaFrame().caller_is(fr); } } } }
KNIEXPORT void KNI_GetThisPointer(jobject toHandle) { GUARANTEE(!_in_kvm_native_method, "sanity"); #ifdef AZZERT if (!_jvm_in_quick_native_method) { if (!ObjectHeap::is_finalizing()) { JavaFrame frame(Thread::current()); Method::Raw method = frame.method(); GUARANTEE(!method().is_static(), "static method has no 'this' pointer"); } } else { // IMPL_NOTE: we don't have current method info ... this should be fixed to // make it possible to do the GUARANTEE as above. } #endif KNI_GetParameterAsObject(0, toHandle); }
void CompiledMethod::print_code_on(Stream* st) { int end_offset = RelocationReader::code_length(this); for (int offset = 0; offset < end_offset; offset += 2) { short* instruction_start = (short*) (entry() + offset); print_comment_for(offset, st); bool is_oop = false; for (RelocationReader stream(this); !stream.at_end(); stream.advance()) { if (stream.code_offset() == offset) { if (stream.is_oop() || stream.is_rom_oop()) { is_oop = true; break; } } } st->print(" %4d: ", offset); if (VerbosePointers) { st->print("0x%08x: ", instruction_start); } if (!is_oop) { decode_instruction(st, instruction_start, offset); } else { GUARANTEE(((int)instruction_start & 0x3) == 0, "Disassembler: Invalid embedded Oop"); Oop o = (OopDesc*)*(int*)instruction_start; if (VerbosePointers) { st->print("0x%08x:", *(int*)instruction_start); } o.print_value_on(st); offset += 2; } st->cr(); } }
void Value::set_double(jdouble value) { GUARANTEE(type() == T_DOUBLE, "check type"); destroy(); set_where(T_IMMEDIATE); *(double *)(&_low) = value; }
/* * Verifies that this oop is a unique handle to its obj, i.e. no other Oop on * the chain refences its obj. */ void LinkedBasicOop::handle_uniqueness_verification() { if (not_null()) { ForAllHandles( handle ) { GUARANTEE(handle == this || handle->obj() != obj(), "Non-unique handle"); } } }
void ExtendedValue::set_obj(Oop* value) { if (value->is_null()) { is_value(T_OBJECT); this->value().set_obj(value); } else { // IMPL_NOTE: Can we merge the code below with Value::set_value() ?? set_oop(value); if (!ObjectHeap::contains_moveable(value->obj())) { _value.set_not_on_heap(); } #if ENABLE_COMPILER_TYPE_INFO FarClass::Raw value_class = value->blueprint(); GUARANTEE(value_class.not_null(), "Sanity"); if (value_class.is_java_class()) { JavaClass::Raw java_class = value_class.obj(); _value.set_class_id(java_class().class_id()); _value.set_is_exact_type(); } #else if (value->is_string()) { _value.set_is_string(); } #endif _value.set_must_be_nonnull(); } }
void Value::set_registers(Assembler::Register low, Assembler::Register high) { destroy(); GUARANTEE(use_two_registers(), "tag_check"); set_where(T_REGISTER); _low = (int) low; _high = (int) high; }
int byte_size_for(BasicType type) { static const jubyte table[] = { /* unused = 0: */ 0, /* unused = 1: */ 0, /* unused = 2: */ 0, /* unused = 3: */ 0, /* T_BOOLEAN = 4: */ T_BOOLEAN_byte_size, /* T_CHAR = 5: */ T_CHAR_byte_size, /* T_FLOAT = 6: */ T_FLOAT_byte_size, /* T_DOUBLE = 7: */ T_DOUBLE_byte_size, /* T_BYTE = 8: */ T_BYTE_byte_size, /* T_SHORT = 9: */ T_SHORT_byte_size, /* T_INT = 10: */ T_INT_byte_size, /* T_LONG = 11: */ T_LONG_byte_size, /* T_OBJECT = 12: */ T_OBJECT_byte_size, /* T_ARRAY = 13: */ T_OBJECT_byte_size, /* T_VOID = 14: */ T_VOID_byte_size, #if ENABLE_ROM_GENERATOR /* T_SYMBOLIC = 15: */ T_SYMBOLIC_byte_size, #endif }; #ifdef AZZERT #if ENABLE_ROM_GENERATOR const int max = T_SYMBOLIC; #else const int max = T_VOID; #endif GUARANTEE(type >= T_BOOLEAN && type <= max, "sanity"); #endif return table[(int)type]; }
void GPTableGenerator::define_zeros(int size) { if (!GenerateGPTableOnly) { SourceMacros::define_zeros(size); } else { if (GenerateGNUCode) { #ifdef SPARC GUARANTEE((size % 4) == 0, "sanity"); for (int i=0; i<size; i+=4) { stream()->print_cr("\t.long\t0"); } #else stream()->print_cr("\t%s\t%d", ".space", size); #endif } else { // generating the GP table on Win32 for MASM if ((size % 4) == 0) { for (int i=0; i<size; i+=4) { stream()->print_cr("\tDWORD 0; %d bytes", i+4); } } else { // We support only define_zero of bytes divisible by 4! UNIMPLEMENTED(); } } } }
// Expand or shrink a chunk returned by allocate_chunk(). // The chunk is never moved. // // Returns 0 if fail to expand (shrink will always succeed). Returns old // size if successful. size_t OsMemory_adjust_chunk(address chunk_ptr, size_t new_committed_size) { ChunkInfo* ci = get_chunk_info(chunk_ptr); size_t old_size = ci->size; size_t new_size = page_align_up(new_committed_size); if (new_size <= ci->mmaped_size) { int rv; if (new_size < old_size) { rv = protect_area(chunk_ptr + new_size, old_size - new_size); } else { rv = unprotect_area(chunk_ptr, new_size); } GUARANTEE(rv == 0, "mprotect must succeed"); ci->size = new_size; return old_size; } new_size = page_align_up(new_size - ci->mmaped_size); if (anon_mmap(chunk_ptr + ci->mmaped_size, new_size) == NULL) { return 0; } ci->mmaped_size += new_size; ci->size = ci->mmaped_size; unprotect_area(chunk_ptr, ci->size); return old_size; }
int SourceAssembler::find_gp_offset(const char *name) { #if !ENABLE_THUMB_GP_TABLE int offset = 256 * sizeof(OopDesc*); // skip the bytecode table if (ENABLE_DISPATCH_TABLE_PADDING) { offset += 8 * sizeof(OopDesc*); // 8 extra bytecodes. } #else int offset = 1 * sizeof(OopDesc*); // skip the nop bytecode #endif static const GPTemplate gp_templates[] = { GP_SYMBOLS_DO(DEFINE_GP_POINTER, DEFINE_GP_VALUE) {NULL, 0, 0, 0} }; for (const GPTemplate* tmpl = gp_templates; tmpl->name; tmpl++) { if (jvm_strcmp(name, tmpl->name) == 0) { return offset; } offset += tmpl->size; GUARANTEE((offset % 4) == 0, "must be word aligned"); } SHOULD_NOT_REACH_HERE(); return 0; }
bool OopDesc::is_type_array(void) const { const jint instance_size = blueprint()->instance_size_as_jint(); GUARANTEE(InstanceSize::size_type_array_8 < InstanceSize::size_type_array_1, "sanity check"); return instance_size >= InstanceSize::size_type_array_8 && instance_size <= InstanceSize::size_type_array_1; }
void itable_interface_at_put(int index, int class_id) { #ifdef AZZERT Oop cls = Universe::class_from_id(class_id); GUARANTEE(!cls.is_null(), "sanity"); #endif int_field_put(itable_offset_from_index(index), class_id); }
static void check_imm(int imm, int size) { if(!has_room_for_imm(imm, size)) { tty->print_cr("size %d too big for %d bits", imm, size); } GUARANTEE(has_room_for_imm(imm, size), "illegal immediate value"); }
// This function fixes the dummy object allocated by FileDescriptor::allocate() // before the Universe was fully initialized. void JarFileParser::fix_bootstrap() { const int max = MAX_CACHED_PARSERS < MaxCachedJarParsers ? MAX_CACHED_PARSERS : MaxCachedJarParsers; for (int i=0; i<max; i++) { int ref = _cached_parsers[0]; if (ref >= 0) { JarFileParser::Raw parser = ObjectHeap::get_global_ref_object(ref); GUARANTEE(parser.not_null(), "must not be GC'ed yet!"); OopDesc** desc = (OopDesc**)parser().file_descriptor(); OopDesc* n = Universe::file_descriptor_class()->prototypical_near(); // Make it a real FileDescriptor object. oop_write_barrier(desc, n); // This line tests that n is now really a FileDescriptor. FileDescriptor::Raw try_it = parser().file_descriptor(); (void)try_it; } } // We still haven't put the fixed up FileDescriptor into the list of // finalizable objects yet. It's easier just to flush the cache now // and close the FileDescriptors now ... this code is used only in // non-product mode, so there's no need to make it fancy. flush_caches(); }
// this is method of the great fun: we update list of thread stacks // with value in old_stack, by replacing pointers to old_stack // with pointers to new_stack and also fixing new_stack->_next_stack // to actually point where it should point to void ExecutionStackDesc::update_list(ExecutionStackDesc* old_stack, ExecutionStackDesc* old_stack_next, ExecutionStackDesc* new_stack) { ExecutionStackDesc *this_stack = _stack_list; // if replacing list head - give it a special treatment if (old_stack == _stack_list) { _stack_list = new_stack; new_stack->_next_stack = old_stack_next; return; } // find element just before one we want to replace and patch here while (this_stack != NULL) { ExecutionStackDesc *next_stack = this_stack->_next_stack; if (next_stack == old_stack) { this_stack->_next_stack = new_stack; new_stack->_next_stack = old_stack_next; break; } this_stack = next_stack; } GUARANTEE(new_stack->_next_stack == old_stack_next, "must be updated"); }
void* operator new(size_t /*size*/) { #ifndef PRODUCT GUARANTEE(sizeof(singleton) >= sizeof(DefaultStream), "sanity"); jvm_memset(&singleton, 0, sizeof(singleton)); #endif return (void*)(&singleton); }
inline bool RawLocation::is_used_in(const VirtualStackFrame* frame, const Value& value) { GUARANTEE(value.in_register(), "value must be in register"); return frame->is_mapping_something(value.lo_register()) || ( value.use_two_registers() && frame->is_mapping_something(value.hi_register()) ); }
// This function must be called before the file_handle() function is called. // It ensures the cached JarFileParser object is valid before reading // from the JAR file. This is important for the caching of JarFileParser. ReturnOop FileDecoder::get_jar_parser_if_needed(JVM_SINGLE_ARG_TRAPS) { UsingFastOops fast_oops; TypeArray::Fast jar_name = jar_file_name(); if (jar_name.not_null()) { JarFileParser::Raw result = JarFileParser::get(&jar_name JVM_NO_CHECK); if (result.not_null()) { // The old JarFileParser that was used when allocating the // FileDecoder may be GC'ed already. We might get a new // JarFileParser here, thus we switch to its file handle. set_file_handle(result().handle()); } else if (!CURRENT_HAS_PENDING_EXCEPTION) { // This happens when system may have too many open files. This is // probably happening when a ResourceInputStream has been idle for // a while, all cached JarFileParser have been GC'ed, and // and some MIDP code opens too many files to make the system run // out of file handles. // // This should never happen if some cached JarFileParser are still // valid -- JarFileParser::get() should force a GC to close // the files used by old cached JarFileParsers, and we should // then be able to open the JAR file associated with <this>, // hence the GUARANTEE here. GUARANTEE(JarFileParser::parser_cache_is_empty(), "sanity"); // We can't proceed with reading when we don't have a file handle. Throw::out_of_memory_error(JVM_SINGLE_ARG_THROW_0); } return result.obj(); } else { return NULL; } }
/* Helper function to get the fields */ static inline jfieldID find_instance_field(InstanceClass* ic, String* name, const bool is_static JVM_TRAPS) { UsingFastOops fast_oops; GUARANTEE(ic->not_null(), "Isolate is not ready"); TypeArray::Fast fields = ic->fields(); Symbol::Fast n = SymbolTable::symbol_for(name JVM_CHECK_0); // IMPL_NOTE: for now we don't check the signatures // we also cannot check the type of the field because we are using a // class_list that is not the current running one. // Symbol::Fast s = TypeSymbol::parse("Ljava/lang/Object" JVM_CHECK_0); for (int index = 0; index < fields().length(); index += 5) { Field f(ic, index); if ((f.is_static() == is_static)) { // && (f.type() == bt)) { Symbol::Raw name = f.name(); if (name().matches(&n)) { return (jfieldID)((jint)f.offset()); } } } // Throw a no such field error if the field cannot be found Throw::no_such_field_error(JVM_SINGLE_ARG_CHECK_0); return NULL; }
void Thread::setup_lightweight_stack(JVM_SINGLE_ARG_TRAPS) { UsingFastOops fast_oops; const int stack_size = StackSize; GUARANTEE(wakeup_time_offset() % 8 == 0, "jlongs in ThreadDesc must be 8 byte aligned"); // it's here as Thread::allocate not invoked for the main thread // in ROMized case, and we still need it #if ENABLE_WTK_PROFILER OopDesc* info = WTKProfiler::allocate_thread_data(JVM_SINGLE_ARG_CHECK); set_profiler_info(info); #endif // Allocate stack and set stack pointer to bottom ExecutionStack::Fast stack = Universe::new_execution_stack(stack_size JVM_CHECK); address sp =(address)stack.field_base(JavaStackDirection < 0 ? stack_size : ExecutionStackDesc::header_size()); #if defined(UNDER_CE) // I thought I only needed this for debugging under EVC++, but when I ran // cldc_vm under the test utility on the iPaq it hung unless this was // uncommented. Run mintck uses CreateProcess to run the mintck tests. // If I run the mintck tests using a .lnk file it works without the // following line sp = (address)((int)sp |_system_address); #endif set_execution_stack(&stack); stack().set_thread(this); set_stack_limit(); sp = setup_stack_asm(sp); set_stack_pointer((jint)sp); }
void SignatureStream::next() { GUARANTEE(_num_param_words_processed <= _num_param_words, "sanity"); _word_index += word_size(); if (_num_param_words_processed < _num_param_words) { juint chr = (juint)_signature->byte_at(_position); if (chr < 0x80) { _type = TypeSymbol::primitive_field_basic_type_for(chr); _position ++; } else { _type = T_OBJECT; _current_class_id = _signature->decode_ushort_at(_position); if(!_fast) { JavaClass::Raw klass = Universe::class_from_id(_current_class_id); if (!klass.is_instance_class()) { _type = T_ARRAY; } } _position += 2; } _num_param_words_processed += word_size_for(_type); } else { _is_return_type = true; _type = _signature->return_type(_fast); _num_param_words_processed += 1; // because return type may be T_VOID! } }
bool OsFile_rename(const JvmPathChar *from, const JvmPathChar *to) { const int from_name_len = fn_strlen(from); const int to_name_len = fn_strlen(to); pcsl_string pcsl_filename_from = PCSL_STRING_NULL; pcsl_string pcsl_filename_to = PCSL_STRING_NULL; GUARANTEE(sizeof(jchar) == sizeof(JvmPathChar), "Types must match"); if (pcsl_string_convert_from_utf16(from, from_name_len, &pcsl_filename_from) != PCSL_STRING_OK) { return -1; } if (pcsl_string_convert_from_utf16(to, to_name_len, &pcsl_filename_to) != PCSL_STRING_OK) { return -1; } int result = pcsl_file_rename(&pcsl_filename_from, &pcsl_filename_to); pcsl_string_free(&pcsl_filename_from); pcsl_string_free(&pcsl_filename_to); return (result == 0) ? true : false; }
void SourceAssembler::ldr_string(Register r, const char* string, Condition cond) { GUARANTEE(r != pc, "probably incorrect code"); GUARANTEE(string != NULL, "Sanity check"); Literal lit(string); // EVC ASM doesn't understand adrls #if EVC_ASM_QUIRK // stream()->print("\tldr%s\t%s, _D%d", stream()->print("\tadrl%s\t%s, _D%d", cond_name(cond), reg_name(r), lit.id()); #else stream()->print("\tadr%s\t%s, _D%d", cond_name(cond), reg_name(r), lit.id()); #endif emit_comment_and_cr(); _literals.add(lit); }
ARRAY instantiateArray(ARRAY_CLASS arrayClass, long len) { GUARANTEE(_in_kvm_native_method, "sanity"); ARRAY_CLASS *p = PrimitiveArrayClasses; SETUP_ERROR_CHECKER_ARG; if (arrayClass == p[T_BOOLEAN]) { return (ARRAY)Universe:: new_bool_array((int)len JVM_NO_CHECK_AT_BOTTOM); } else if (arrayClass == p[T_CHAR]) { return (ARRAY)Universe:: new_char_array((int)len JVM_NO_CHECK_AT_BOTTOM); #if ENABLE_FLOAT } else if (arrayClass == p[T_FLOAT]) { return (ARRAY)Universe:: new_float_array((int)len JVM_NO_CHECK_AT_BOTTOM); } else if (arrayClass == p[T_DOUBLE]) { return (ARRAY)Universe::new_double_array((int)len JVM_NO_CHECK_AT_BOTTOM); #endif } else if (arrayClass == p[T_BYTE]) { return (ARRAY)Universe:: new_byte_array((int)len JVM_NO_CHECK_AT_BOTTOM); } else if (arrayClass == p[T_SHORT]) { return (ARRAY)Universe:: new_short_array((int)len JVM_NO_CHECK_AT_BOTTOM); } else if (arrayClass == p[T_INT]) { return (ARRAY)Universe:: new_int_array((int)len JVM_NO_CHECK_AT_BOTTOM); } else if (arrayClass == p[T_LONG]) { return (ARRAY)Universe:: new_long_array((int)len JVM_NO_CHECK_AT_BOTTOM); } else { // Only primitive arrays are supported. If you KVM code needs to allocate // object arrays, please convert it to use KNI/SNI code. SHOULD_NOT_REACH_HERE(); return NULL; } }
void SourceAssembler::emit_data(OperandSize size, const Constant& cst, int dups, const char* name) { GUARANTEE(!GenerateInlineAsm, "No inline data in inline asm"); if (!GenerateGNUCode) { if (name != 0) { emit("\n\tPUBLIC %s%s\n", extern_c_prefix(), name); } if (cst.is_reference()) { emit("\tEXTERNDEF %s%s:PROC\n", extern_c_prefix(), cst.reference()); } if (name != 0) { emit("%s%s ", extern_c_prefix(), name); } switch (size) { case byte_operand : emit("\tdb \t"); break; case word_operand : emit("\tdw \t"); break; case long_operand : emit("\tdd \t"); break; case very_long_operand : emit("\tdq \t"); break; case very_very_long_operand : emit("\tdt \t"); break; default : SHOULD_NOT_REACH_HERE(); } if (dups > 1) { emit("%d\tdup\t(", dups); emit_constant(cst); emit(")"); } else { emit_constant(cst); } emit("\n"); } else { if (name != 0) { emit("\n\t.global %s%s\n", extern_c_prefix(), name); } if (cst.is_reference()) { emit("\t.extern %s%s\n", extern_c_prefix(), cst.reference()); } if (name != 0) { emit("%s%s: ", extern_c_prefix(), name); } if (dups > 1) { emit(".rept %d\n\t", dups); } switch (size) { case byte_operand : emit("\t.byte \t"); break; case word_operand : emit("\t.word \t"); break; case long_operand : emit("\t.long \t"); break; case very_long_operand : emit("\tdq \t"); break; case very_very_long_operand : emit("\tdt \t"); break; default : SHOULD_NOT_REACH_HERE(); } emit_constant_displacement(cst); if (dups > 1) emit("\n\t.endr"); emit("\n"); } }