status elementVector(Vector v, Int e, Any obj) { int n = indexVector(v, e); if ( n < 0 ) { int nsize = valInt(v->size)-n; Any *newElements = alloc(nsize*sizeof(Any)); int m; if ( v->elements ) { cpdata(&newElements[-n], v->elements, Any, valInt(v->size)); unalloc(valInt(v->allocated)*sizeof(Any), v->elements); } v->elements = newElements; for( m = 0; m < -n; m++ ) v->elements[m] = NIL; assignVector(v, 0, obj); assign(v, size, toInt(nsize)); assign(v, allocated, toInt(nsize)); assign(v, offset, toInt(valInt(e)-1)); succeed; } if ( n >= valInt(v->size) ) { int m; if ( n >= valInt(v->allocated) ) { int nalloc = max(valInt(v->allocated)*2, n+1); Any *newElements = alloc(nalloc * sizeof(Any)); if ( v->elements ) { cpdata(newElements, v->elements, Any, valInt(v->size)); unalloc(valInt(v->allocated)*sizeof(Any), v->elements); } v->elements = newElements; assign(v, allocated, toInt(nalloc)); } for( m = valInt(v->size); m <= n ; m++ ) v->elements[m] = NIL; assignVector(v, n, obj); assign(v, size, toInt(n+1)); succeed; } assignVector(v, n, obj); succeed; }
static status lowIndexVector(Vector v, Int low) { int l = valInt(low); int ol = valInt(v->offset) + 1; if ( l > ol ) /* too long */ { int size = valInt(v->size) + valInt(v->offset) - l; if ( size > 0 ) { Any *elms = alloc(size * sizeof(Any)); fillVector(v, NIL, toInt(l), toInt(ol-1)); /* dereference */ cpdata(elms, &v->elements[l-ol], Any, size); unalloc(valInt(v->allocated)*sizeof(Any), v->elements); v->elements = elms; assign(v, size, toInt(size)); assign(v, allocated, v->size); succeed; } else { return clearVector(v); } } else if ( l < ol ) /* too, short */ { return fillVector(v, NIL, toInt(l), toInt(ol-1)); } succeed; }
static status highIndexVector(Vector v, Int high) { int h = valInt(high); int oh = valInt(v->offset) + valInt(v->size); if ( oh > h ) /* too long */ { int size = h - valInt(v->offset); if ( size > 0 ) { Any *elms = alloc(size * sizeof(Any)); fillVector(v, NIL, inc(high), DEFAULT); /* dereference */ cpdata(elms, v->elements, Any, size); unalloc(valInt(v->allocated)*sizeof(Any), v->elements); v->elements = elms; assign(v, size, toInt(size)); assign(v, allocated, v->size); succeed; } else { return clearVector(v); } } else if ( oh < h ) /* too, short */ { return fillVector(v, NIL, toInt(oh+1), inc(high)); } succeed; }
static void epreader(void *u) { int dfd, rcount, cl, ntries, recov; Areader *a; Channel *c; Packser *pk; Serial *ser; Serialport *p; threadsetname("epreader proc"); a = u; p = a->p; ser = p->s; c = a->c; free(a); qlock(ser); /* this makes the reader wait end of initialization too */ dfd = p->epin->dfd; qunlock(ser); ntries = 0; pk = nil; for(;;) { if (pk == nil) pk = emallocz(sizeof(Packser), 1); Eagain: rcount = read(dfd, pk->b, sizeof pk->b); if(serialdebug > 5) dsprint(2, "%d %#ux%#ux ", rcount, p->data[0], p->data[1]); if(rcount < 0) { if(ntries++ > 100) break; qlock(ser); recov = serialrecover(ser, p, nil, "epreader: bulkin error"); qunlock(ser); if(recov >= 0) goto Eagain; } if(rcount == 0) continue; if(rcount >= ser->inhdrsz) { rcount = cpdata(ser, p, pk->b, pk->b, rcount); if(rcount != 0) { pk->nb = rcount; cl = sendp(c, pk); if(cl < 0) { /* * if it was a time-out, I don't want * to give back an error. */ rcount = 0; break; } } else free(pk); qlock(ser); ser->recover = 0; qunlock(ser); ntries = 0; pk = nil; } } if(rcount < 0) fprint(2, "%s: error reading %s: %r\n", argv0, p->name); free(pk); nbsendp(c, nil); if(p->w4data != nil) chanclose(p->w4data); if(p->gotdata != nil) chanclose(p->gotdata); devctl(ser->dev, "detach"); closedev(ser->dev); }
//------------------------------do_call---------------------------------------- // Handle your basic call. Inline if we can & want to, else just setup call. void Parse::do_call() { // It's likely we are going to add debug info soon. // Also, if we inline a guy who eventually needs debug info for this JVMS, // our contribution to it is cleaned up right here. kill_dead_locals(); // Set frequently used booleans bool is_virtual = bc() == Bytecodes::_invokevirtual; bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface; bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial; // Find target being called bool will_link; ciMethod* dest_method = iter().get_method(will_link); ciInstanceKlass* holder_klass = dest_method->holder(); ciKlass* holder = iter().get_declared_method_holder(); ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); int nargs = dest_method->arg_size(); // See if the receiver (if any) is NULL, hence we always throw BEFORE // attempting to resolve the call or initialize the holder class. Doing so // out of order opens a window where we can endlessly deopt because the call // holder is not initialized, but the call never actually happens (forcing // class initialization) because we only see NULL receivers. CPData_Invoke *caller_cpdi = cpdata()->as_Invoke(bc()); debug_only( assert(caller_cpdi->is_Invoke(), "Not invoke!") ); if( is_virtual_or_interface && _gvn.type(stack(sp() - nargs))->higher_equal(TypePtr::NULL_PTR) ) { builtin_throw( Deoptimization::Reason_null_check, "null receiver", caller_cpdi, caller_cpdi->saw_null(), /*must_throw=*/true ); return; } // uncommon-trap when callee is unloaded, uninitialized or will not link // bailout when too many arguments for register representation if (!will_link || can_not_compile_call_site(dest_method, klass)) { return; } assert(FAM||holder_klass->is_loaded(),""); assert(dest_method->is_static() == !has_receiver, "must match bc"); // Note: this takes into account invokeinterface of methods declared in java/lang/Object, // which should be invokevirtuals but according to the VM spec may be invokeinterfaces assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc"); // Note: In the absence of miranda methods, an abstract class K can perform // an invokevirtual directly on an interface method I.m if K implements I. // --------------------- // Does Class Hierarchy Analysis reveal only a single target of a v-call? // Then we may inline or make a static call, but become dependent on there being only 1 target. // Does the call-site type profile reveal only one receiver? // Then we may introduce a run-time check and inline on the path where it succeeds. // The other path may uncommon_trap, check for another receiver, or do a v-call. // Choose call strategy. bool call_is_virtual = is_virtual_or_interface; int vtable_index = methodOopDesc::invalid_vtable_index; ciMethod* call_method = dest_method; // Try to get the most accurate receiver type if (is_virtual_or_interface) { Node* receiver_node = stack(sp() - nargs); const TypeInstPtr*inst_type=_gvn.type(receiver_node)->isa_instptr(); if( inst_type ) { ciInstanceKlass*ikl=inst_type->klass()->as_instance_klass(); // If the receiver is not yet linked then: (1) we never can make this // call because no objects can be created until linkage, and (2) CHA // reports incorrect answers... so do not bother with making the call // until after the klass gets linked. ciInstanceKlass *ikl2 = ikl->is_subtype_of(klass) ? ikl : klass; if(!ikl->is_linked()){ uncommon_trap(Deoptimization::Reason_uninitialized,klass,"call site where receiver is not linked",false); return; } } const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type); // Have the call been sufficiently improved such that it is no longer a virtual? if (optimized_virtual_method != NULL) { call_method = optimized_virtual_method; call_is_virtual = false; } else if (false) { // We can make a vtable call at this site vtable_index = call_method->resolve_vtable_index(method()->holder(), klass); } } // Note: It's OK to try to inline a virtual call. // The call generator will not attempt to inline a polymorphic call // unless it knows how to optimize the receiver dispatch. bool try_inline=(C->do_inlining()||InlineAccessors)&& (!C->method()->should_disable_inlining()) && (call_method->number_of_breakpoints() == 0); // Get profile data for the *callee*. First see if we have precise // CodeProfile for this exact inline because C1 inlined it already. CodeProfile *callee_cp; int callee_cp_inloff; if( caller_cpdi->inlined_method_oid() == call_method->objectId() ) { callee_cp = c1_cp(); // Use same CodeProfile as current callee_cp_inloff = caller_cpdi->cpd_offset(); // But use inlined portion } else { // If callee has a cp, clone it and use callee_cp = call_method->codeprofile(true); callee_cp_inloff = 0; if (callee_cp || FAM) { // The cloned cp needs to be freed later Compile* C = Compile::current(); C->record_cloned_cp(callee_cp); } else { // Had profile info at top level, but not for this call site? // callee_cp will hold the just created cp, or whatever cp allocated by // other thread which wins the race in set_codeprofile callee_cp = call_method->set_codeprofile(CodeProfile::make(call_method)); } } CPData_Invoke *c2_caller_cpdi = UseC1 ? c2cpdata()->as_Invoke(bc()) : NULL; // --------------------- inc_sp(- nargs); // Temporarily pop args for JVM state of call JVMState* jvms = sync_jvms(); // --------------------- // Decide call tactic. // This call checks with CHA, the interpreter profile, intrinsics table, etc. // It decides whether inlining is desirable or not. CallGenerator*cg=C->call_generator(call_method,vtable_index,call_is_virtual,jvms,try_inline,prof_factor(),callee_cp,callee_cp_inloff,c2_caller_cpdi,caller_cpdi); // --------------------- // Round double arguments before call round_double_arguments(dest_method); #ifndef PRODUCT // Record first part of parsing work for this call parse_histogram()->record_change(); #endif // not PRODUCT assert(jvms == this->jvms(), "still operating on the right JVMS"); assert(jvms_in_sync(), "jvms must carry full info into CG"); // save across call, for a subsequent cast_not_null. Node* receiver = has_receiver ? argument(0) : NULL; JVMState* new_jvms = cg->generate(jvms, caller_cpdi, is_private_copy()); if( new_jvms == NULL ) { // Did it work? // When inlining attempt fails (e.g., too many arguments), // it may contaminate the current compile state, making it // impossible to pull back and try again. Once we call // cg->generate(), we are committed. If it fails, the whole // compilation task is compromised. if (failing()) return; if (PrintOpto || PrintInlining || PrintC2Inlining) { // Only one fall-back, so if an intrinsic fails, ignore any bytecodes. if (cg->is_intrinsic() && call_method->code_size() > 0) { C2OUT->print("Bailed out of intrinsic, will not inline: "); call_method->print_name(C2OUT); C2OUT->cr(); } } // This can happen if a library intrinsic is available, but refuses // the call site, perhaps because it did not match a pattern the // intrinsic was expecting to optimize. The fallback position is // to call out-of-line. try_inline = false; // Inline tactic bailed out. cg=C->call_generator(call_method,vtable_index,call_is_virtual,jvms,try_inline,prof_factor(),c1_cp(),c1_cp_inloff(),c2_caller_cpdi,caller_cpdi); new_jvms=cg->generate(jvms,caller_cpdi,is_private_copy()); assert(new_jvms!=NULL,"call failed to generate: calls should work"); if (c2_caller_cpdi) c2_caller_cpdi->_inlining_failure_id = IF_GENERALFAILURE; } if (cg->is_inline()) { C->env()->notice_inlined_method(call_method); } // Reset parser state from [new_]jvms, which now carries results of the call. // Return value (if any) is already pushed on the stack by the cg. add_exception_states_from(new_jvms); if (new_jvms->map()->control() == top()) { stop_and_kill_map(); } else { assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged"); set_jvms(new_jvms); } if (!stopped()) { // This was some sort of virtual call, which did a null check for us. // Now we can assert receiver-not-null, on the normal return path. if (receiver != NULL && cg->is_virtual()) { Node*cast=cast_not_null(receiver,true); // %%% assert(receiver == cast, "should already have cast the receiver"); } // Round double result after a call from strict to non-strict code round_double_result(dest_method); // If the return type of the method is not loaded, assert that the // value we got is a null. Otherwise, we need to recompile. if (!dest_method->return_type()->is_loaded()) { // If there is going to be a trap, put it at the next bytecode: set_bci(iter().next_bci()); do_null_assert(peek(), T_OBJECT); set_bci(iter().cur_bci()); // put it back } else { assert0( call_method->return_type()->is_loaded() ); BasicType result_type = dest_method->return_type()->basic_type(); if(result_type==T_OBJECT||result_type==T_ARRAY){ const Type *t = peek()->bottom_type(); assert0( t == TypePtr::NULL_PTR || t->is_oopptr()->klass()->is_loaded() ); } } } // Restart record of parsing work after possible inlining of call #ifndef PRODUCT parse_histogram()->set_initial_state(bc()); #endif }