// Determine compilation policy based on command line argument void compilationPolicy_init() { CompilationPolicy::set_in_vm_startup(DelayCompilationDuringStartup); switch(CompilationPolicyChoice) { case 0: CompilationPolicy::set_policy(new SimpleCompPolicy()); break; case 1: #ifdef COMPILER2 CompilationPolicy::set_policy(new StackWalkCompPolicy()); #else Unimplemented(); #endif break; case 2: #ifdef TIERED CompilationPolicy::set_policy(new SimpleThresholdPolicy()); #else Unimplemented(); #endif break; case 3: #ifdef TIERED CompilationPolicy::set_policy(new AdvancedThresholdPolicy()); #else Unimplemented(); #endif break; default: fatal("CompilationPolicyChoice must be in the range: [0-3]"); } CompilationPolicy::policy()->initialize(); }
void LIR_Assembler::emit_op0(LIR_Op0* op) { switch (op->code()) { case lir_word_align: { while (code_offset() % BytesPerWord != 0) { _masm->nop(); } break; } case lir_nop: assert(op->info() == NULL, "not supported"); _masm->nop(); break; case lir_label: Unimplemented(); break; case lir_build_frame: build_frame(); break; case lir_std_entry: _masm->align(CodeEntryAlignment); _masm->set_code_start(); _masm->entry(_compilation->codeprofile()); build_frame(); break; case lir_osr_entry: Unimplemented(); //osr_entry(); break; case lir_breakpoint: breakpoint(); break; case lir_membar: membar(); break; case lir_membar_acquire: membar_acquire(); break; case lir_membar_release: membar_release(); break; case lir_get_thread: get_thread(op->result_opr()); break; default: ShouldNotReachHere(); break; } }
void AttributeManager::deleteAttributes(const AttrIdVec& atids) { typedef std::map<uint64_t, std::vector< uint64_t> > AttrToVecMap; AttrToVecMap perTableIds; for(AttrIdVec::const_iterator it = atids.begin(), it_end = atids.end(); it != it_end; ++it) { const AttributeUniqueId& pair = *(*it); std::vector< uint64_t>& inTable = perTableIds[pair.getTableId()]; inTable.push_back(pair.getWithinTypeId()); } AttrToVecMap::iterator it = perTableIds.begin(), it_end = perTableIds.end(); for(; it != it_end; ++it) { Assert(((*it).first) <= LastAttrTable); AttrTableId tableId = (AttrTableId) ((*it).first); std::vector< uint64_t>& ids = (*it).second; std::sort(ids.begin(), ids.end()); switch(tableId) { case AttrTableBool: Unimplemented("delete attributes is unimplemented for bools"); break; case AttrTableUInt64: deleteAttributesFromTable(d_ints, ids); break; case AttrTableTNode: deleteAttributesFromTable(d_tnodes, ids); break; case AttrTableNode: deleteAttributesFromTable(d_nodes, ids); break; case AttrTableTypeNode: deleteAttributesFromTable(d_types, ids); break; case AttrTableString: deleteAttributesFromTable(d_strings, ids); break; case AttrTableCDBool: case AttrTableCDUInt64: case AttrTableCDTNode: case AttrTableCDNode: case AttrTableCDString: case AttrTableCDPointer: Unimplemented("CDAttributes cannot be deleted. Contact Tim/Morgan if this behavior is desired."); break; case LastAttrTable: default: Unreachable(); } } }
StubQueue::~StubQueue() { // Note: Currently StubQueues are never destroyed so nothing needs to be done here. // If we want to implement the destructor, we need to release the BufferBlob // allocated in the constructor (i.e., we need to keep it around or look it // up via CodeCache::find_blob(...). Unimplemented(); }
void printAllocated(RegisterMask rs) { Unimplemented(); /* printf("{"); bool first = true; unsigned r = rs; // safer for >> for (int d = 0; r; d++, r >>= 1) { if (isSet(r, 0)) { if (first) { first = false; } else { printf(","); } printf("%s", RegisterNames[d]); Unimplemented(); // Location d1 = Location(d); <<< fix this Location d1; for (char c = RegisterNames[d][0]; isSet(r, 1) && c == RegisterNames[d + 1][0]; d ++, r >>= 1) ; if (d > d1.no()) printf("-%s", RegisterNames[d]); } } printf("}"); fflush(stdout); */ }
void BlockScope::initialize(methodOop method, klassOop methodHolder, Scope* p, InlinedScope* s, RScope* rs, SendInfo* info) { InlinedScope::initialize(method, methodHolder, s, rs, info); _parent = p; _self_is_initialized = false; if (s == NULL) { // top scope: create a context (currently always initialized for blocks) // (context is set up by the prologue node) _context = new SAPReg(this, PrologueBCI, EpilogueBCI); } else { // set up for context passed in by caller // (_context may be changed later if this scope allocates its own context) switch (method->block_info()) { case methodOopDesc::expects_nil: // no context needed _context = NULL; break; case methodOopDesc::expects_self: _context = self()->preg(); fatal("self not known yet -- fix this"); break; case methodOopDesc::expects_parameter: // fix this -- should find which Unimplemented(); break; case methodOopDesc::expects_context: if (p->isInlinedScope()) { _context = ((InlinedScope*)p)->context(); } else { fatal("shouldn't inline"); // shouldn't inline block unless parent was inlined, too } break; default: fatal("unexpected incoming info"); } } }
void CodeBlobCollector::collect() { assert_locked_or_safepoint(CodeCache_lock); assert(_global_code_blobs == NULL, "checking"); // create the global list _global_code_blobs = new (ResourceObj::C_HEAP) GrowableArray<JvmtiCodeBlobDesc*>(50,true); // iterate over the stub code descriptors and put them in the list first. int index = 0; StubCodeDesc* desc; while ((desc = StubCodeDesc::desc_for_index(++index)) != NULL) { _global_code_blobs->append(new JvmtiCodeBlobDesc(desc->name(), desc->begin(), desc->end())); } // next iterate over all the non-nmethod code blobs and add them to // the list - as noted above this will filter out duplicates and // enclosing blobs. Unimplemented(); //CodeCache::blobs_do(do_blob); // make the global list the instance list so that it can be used // for other iterations. _code_blobs = _global_code_blobs; _global_code_blobs = NULL; }
void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { const int hdr_offset = oopDesc::mark_offset_in_bytes(); assert_different_registers(hdr, obj, disp_hdr); NearLabel done; verify_oop(obj); // Load object header. z_lg(hdr, Address(obj, hdr_offset)); // Save object being locked into the BasicObjectLock... z_stg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); if (UseBiasedLocking) { biased_locking_enter(obj, hdr, Z_R1_scratch, Z_R0_scratch, done, &slow_case); } // and mark it as unlocked. z_oill(hdr, markOopDesc::unlocked_value); // Save unlocked object header into the displaced header location on the stack. z_stg(hdr, Address(disp_hdr, (intptr_t)0)); // Test if object header is still the same (i.e. unlocked), and if so, store the // displaced header address in the object header. If it is not the same, get the // object header instead. z_csg(hdr, disp_hdr, hdr_offset, obj); // If the object header was the same, we're done. if (PrintBiasedLockingStatistics) { Unimplemented(); #if 0 cond_inc32(Assembler::equal, ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr())); #endif } branch_optimized(Assembler::bcondEqual, done); // If the object header was not the same, it is now in the hdr register. // => Test if it is a stack pointer into the same stack (recursive locking), i.e.: // // 1) (hdr & markOopDesc::lock_mask_in_place) == 0 // 2) rsp <= hdr // 3) hdr <= rsp + page_size // // These 3 tests can be done by evaluating the following expression: // // (hdr - Z_SP) & (~(page_size-1) | markOopDesc::lock_mask_in_place) // // assuming both the stack pointer and page_size have their least // significant 2 bits cleared and page_size is a power of 2 z_sgr(hdr, Z_SP); load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place)); z_ngr(hdr, Z_R0_scratch); // AND sets CC (result eq/ne 0). // For recursive locking, the result is zero. => Save it in the displaced header // location (NULL in the displaced hdr location indicates recursive locking). z_stg(hdr, Address(disp_hdr, (intptr_t)0)); // Otherwise we don't care about the result and handle locking via runtime call. branch_optimized(Assembler::bcondNotZero, slow_case); // done bind(done); }
// inline void Atomic::store(jbyte store_value, volatile jbyte* dest) // { // Unimplemented(); // } // inline void Atomic::store(jshort store_value, volatile jshort* dest) // { // Unimplemented(); // } inline void Atomic::store(jint store_value, volatile jint* dest) { #ifdef PPC *dest = store_value; #else Unimplemented(); #endif }
void RenderTexture::fillTexture(uint32* dataRGBA32) { (void)dataRGBA32; Unimplemented(); // TODO not complete? DX_GetDeviceContext(dxctx); dxctx->GenerateMips(mDxTextureView); }
void IC::replace(nmethod* nm) { Unimplemented(); IC_Iterator* it = iterator(); it->init_iteration(); while (!it->at_end()) { // replace if found it->advance(); } }
Location pick(RegisterMask& alloc, RegisterMask mask) { Unimplemented(); unsigned r = mask & ~alloc; if (r == 0) return unAllocated; for (int reg = 0; ! isSet(r, 0); reg ++, r >>= 1) ; setNth(alloc, reg); // return Location(ireg, reg); /// fix this return Location(); }
bool CppInterpreter::contains(address pc) { #ifdef PPC return pc == CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) || _code->contains(pc); #else Unimplemented(); #endif // PPC }
void CompactingPermGenGen::generate_vtable_methods(void** vtbl_list, void** vtable, char** md_top, char* md_end, char** mc_top, char* mc_end) { Unimplemented(); }
// Loads the current PC of the following instruction as an immediate value in // 2 instructions. All PCs in the CodeCache are within 2 Gig of each other. inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) { intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip; #ifdef _LP64 Unimplemented(); #else Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc)); add(reg, thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc)); #endif return thepc; }
// --- java_local intptr_t vframe::java_local(JavaThread *thread, int index) const { if(_fr.is_compiled_frame()){ // In rare instances set_locals may have occurred in which case // there are local values that are not described by the ScopeValue anymore GrowableArray<jvmtiDeferredLocalVariable*>* deferred = NULL; GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals(); if (list != NULL ) { // In real life this never happens or is typically a single element search Unimplemented(); // for (int i = 0; i < list->length(); i++) { // if (list->at(i)->matches((vframe*)this)) { // deferred = list->at(i)->locals(); // break; // } // } // // if (deferred != NULL) { // jvmtiDeferredLocalVariable* val = NULL; // // // Iterate through the deferred locals until we find our desired index // int i = 0; // while (i < deferred->length()) { // val = deferred->at(i); // if (val->index() == index) { // if (buf != NULL) { // set_buf(buf, val); // } // return val->value(); // } // i++; // } // } } DebugScopeValue::Name vreg = scope()->get_local(index); if( !DebugScopeValue::is_valid(vreg) ) return 0; if( !DebugScopeValue::is_vreg(vreg) ) { Unimplemented();//debug info constants? } return *get_frame().reg_to_addr(DebugScopeValue::to_vreg(vreg)); } return _fr.interpreter_frame_local_at(index); }
HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { if (young_gen()->is_in(addr)) { Unimplemented(); } else if (old_gen()->is_in(addr)) { return old_gen()->start_array()->object_start((HeapWord*)addr); } else if (perm_gen()->is_in(addr)) { return perm_gen()->start_array()->object_start((HeapWord*)addr); } return 0; }
void SysWin32::rmdir(const std::string & dir) const { #ifdef __WINNT__ if (::rmdir (dir.c_str()) < 0) { throw PathException (dir, errno); } #else throw Unimplemented ("SysWin32::rmdir"); #endif }
void SysWin32::remove(const std::string &file) const { #ifdef __WINNT__ if (::unlink (file.c_str()) <0) { throw PathException (file, errno); } #else throw Unimplemented ("SysWin32::remove"); #endif }
void test_error_handler(size_t test_num) { if (test_num == 0) return; // If asserts are disabled, use the corresponding guarantee instead. size_t n = test_num; NOT_DEBUG(if (n <= 2) n += 2); const char* const str = "hello"; const size_t num = (size_t)os::vm_page_size(); const char* const eol = os::line_separator(); const char* const msg = "this message should be truncated during formatting"; // Keep this in sync with test/runtime/6888954/vmerrors.sh. switch (n) { case 1: assert(str == NULL, "expected null"); case 2: assert(num == 1023 && *str == 'X', err_msg("num=" SIZE_FORMAT " str=\"%s\"", num, str)); case 3: guarantee(str == NULL, "expected null"); case 4: guarantee(num == 1023 && *str == 'X', err_msg("num=" SIZE_FORMAT " str=\"%s\"", num, str)); case 5: fatal("expected null"); case 6: fatal(err_msg("num=" SIZE_FORMAT " str=\"%s\"", num, str)); case 7: fatal(err_msg("%s%s# %s%s# %s%s# %s%s# %s%s# " "%s%s# %s%s# %s%s# %s%s# %s%s# " "%s%s# %s%s# %s%s# %s%s# %s", msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg)); case 8: vm_exit_out_of_memory(num, "ChunkPool::allocate"); case 9: ShouldNotCallThis(); case 10: ShouldNotReachHere(); case 11: Unimplemented(); // This is last because it does not generate an hs_err* file on Windows. case 12: os::signal_raise(SIGSEGV); default: ShouldNotReachHere(); } }
int AbstractInterpreter::layout_activation(methodOop method, int tempcount, int popframe_extra_args, int moncount, int callee_param_count, int callee_locals, frame* caller, frame* interpreter_frame, bool is_top_frame) { Unimplemented(); }
void InstructionPrinter::do_ProfileCounter(ProfileCounter* x) { Unimplemented(); // ObjectConstant* oc = x->mdo()->type()->as_ObjectConstant(); // if (oc != NULL && oc->value()->is_method() && // x->offset() == methodOopDesc::interpreter_invocation_counter_offset_in_bytes()) { // print_value(x->mdo()); // output()->print(".interpreter_invocation_count += %d", x->increment()); // } else { // output()->print("counter ["); // print_value(x->mdo()); // output()->print(" + %d] += %d", x->offset(), x->increment()); // } }
HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { if (young_gen()->is_in_reserved(addr)) { assert(young_gen()->is_in(addr), "addr should be in allocated part of young gen"); // called from os::print_location by find or VMError if (Debugging || VMError::fatal_error_in_progress()) return NULL; Unimplemented(); } else if (old_gen()->is_in_reserved(addr)) { assert(old_gen()->is_in(addr), "addr should be in allocated part of old gen"); return old_gen()->start_array()->object_start((HeapWord*)addr); } return 0; }
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) { Unimplemented(); // verify_oop(receiver); // // explicit NULL check not needed since load from [klass_offset] causes a trap // // check against inline cache // assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); // int start_offset = offset(); // cmpl(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); // // if icache check fails, then jump to runtime routine // // Note: RECEIVER must still contain the receiver! // jump_cc(Assembler::notEqual, // RuntimeAddress(SharedRuntime::get_ic_miss_stub())); // assert(offset() - start_offset == 9, "check alignment in emit_method_entry"); }
void set_threadQ(SeenThread* seenthread, PlaceholderTable::classloadAction action) { switch (action) { case PlaceholderTable::LOAD_INSTANCE: _loadInstanceThreadQ = seenthread; break; case PlaceholderTable::LOAD_SUPER: _superThreadQ = seenthread; break; case PlaceholderTable::DEFINE_CLASS: _defineThreadQ = seenthread; break; default: Unimplemented(); } return; }
void nmethodCollector::collect() { assert_locked_or_safepoint(CodeCache_lock); assert(_global_nmethods == NULL, "checking"); // create the list _global_nmethods = new (ResourceObj::C_HEAP) GrowableArray<nmethodDesc*>(100,true); // any a descriptor for each nmethod to the list. Unimplemented(); // CodeCache::nmethods_do(do_nmethod); // make the list the instance list _nmethods = _global_nmethods; _global_nmethods = NULL; }
void CompilationScope::inline_scopes() { // Determine (recursively) which callees should be inlined. // After this pass, all inlining decisions have been made, and // subsequent passes obey these decisions. #ifdef COMPILER2 if (Inline || InlineAccessors) { InliningClosure c(this); iterate(&c); if (PrintScopeTree) { print_tree(); } } #else Unimplemented(); #endif }
SeenThread* actionToQueue(PlaceholderTable::classloadAction action) { SeenThread* queuehead; switch (action) { case PlaceholderTable::LOAD_INSTANCE: queuehead = _loadInstanceThreadQ; break; case PlaceholderTable::LOAD_SUPER: queuehead = _superThreadQ; break; case PlaceholderTable::DEFINE_CLASS: queuehead = _defineThreadQ; break; default: Unimplemented(); } return queuehead; }
// _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg void LIRGenerator::do_CompareOp(CompareOp* x) { LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.load_item(); right.load_item(); LIR_Opr reg = rlock_result(x); if (x->x()->type()->is_float_kind()) { Bytecodes::Code code = x->op(); __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); } else if (x->x()->type()->tag() == longTag) { __ lcmp2int(left.result(), right.result(), reg); } else { Unimplemented(); } }
void controlled_crash(int how) { if (how == 0) return; // If asserts are disabled, use the corresponding guarantee instead. NOT_DEBUG(if (how <= 2) how += 2); const char* const str = "hello"; const size_t num = (size_t)os::vm_page_size(); const char* const eol = os::line_separator(); const char* const msg = "this message should be truncated during formatting"; char * const dataPtr = NULL; // bad data pointer const void (*funcPtr)(void) = (const void(*)()) 0xF; // bad function pointer // Keep this in sync with test/runtime/6888954/vmerrors.sh. switch (how) { case 1: vmassert(str == NULL, "expected null"); case 2: vmassert(num == 1023 && *str == 'X', err_msg("num=" SIZE_FORMAT " str=\"%s\"", num, str)); case 3: guarantee(str == NULL, "expected null"); case 4: guarantee(num == 1023 && *str == 'X', err_msg("num=" SIZE_FORMAT " str=\"%s\"", num, str)); case 5: fatal("expected null"); case 6: fatal(err_msg("num=" SIZE_FORMAT " str=\"%s\"", num, str)); case 7: fatal(err_msg("%s%s# %s%s# %s%s# %s%s# %s%s# " "%s%s# %s%s# %s%s# %s%s# %s%s# " "%s%s# %s%s# %s%s# %s%s# %s", msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg, eol, msg)); case 8: vm_exit_out_of_memory(num, OOM_MALLOC_ERROR, "ChunkPool::allocate"); case 9: ShouldNotCallThis(); case 10: ShouldNotReachHere(); case 11: Unimplemented(); // There's no guarantee the bad data pointer will crash us // so "break" out to the ShouldNotReachHere(). case 12: *dataPtr = '\0'; break; // There's no guarantee the bad function pointer will crash us // so "break" out to the ShouldNotReachHere(). case 13: (*funcPtr)(); break; case 14: crash_with_segfault(); break; case 15: crash_with_sigfpe(); break; default: tty->print_cr("ERROR: %d: unexpected test_num value.", how); } ShouldNotReachHere(); }