// Do a null check on the receiver as it would happen before the call to // callee (with all arguments still on the stack). Node* null_check_receiver_before_call(ciMethod* callee) { assert(!callee->is_static(), "must be a virtual method"); const int nargs = callee->arg_size(); inc_sp(nargs); Node* n = null_check_receiver(); dec_sp(nargs); return n; }
void execute(INST * cdp, /* code ptr, start execution here */ CELL *sp, /* eval_stack pointer */ CELL *fp) /* frame ptr into eval_stack for user defined functions */ { /* some useful temporaries */ CELL *cp; int t; unsigned tu; /* save state for array loops via a stack */ ALOOP_STATE *aloop_state = (ALOOP_STATE *) 0; /* for moving the eval stack on deep recursion */ CELL *old_stack_base = 0; CELL *old_sp = 0; #ifdef DEBUG CELL *entry_sp = sp; #endif int force_exit = (end_start == 0); if (fp) { /* we are a function call, check for deep recursion */ if (sp > stack_danger) { /* change stacks */ old_stack_base = stack_base; old_sp = sp; stack_base = (CELL *) zmalloc(sizeof(CELL) * EVAL_STACK_SIZE); stack_danger = stack_base + DANGER; sp = stack_base; /* waste 1 slot for ANSI, actually large model msdos breaks in RET if we don't */ #ifdef DEBUG entry_sp = sp; #endif } else old_stack_base = (CELL *) 0; } while (1) { TRACE(("execute %s sp(%ld:%s)\n", da_op_name(cdp), (long) (sp - stack_base), da_type_name(sp))); switch ((cdp++)->op) { /* HALT only used by the disassemble now ; this remains so compilers don't offset the jump table */ case _HALT: case _STOP: /* only for range patterns */ #ifdef DEBUG if (sp != entry_sp + 1) bozo("stop0"); #endif return; case _PUSHC: inc_sp(); cellcpy(sp, (cdp++)->ptr); break; case _PUSHD: inc_sp(); sp->type = C_DOUBLE; sp->dval = *(double *) (cdp++)->ptr; break; case _PUSHS: inc_sp(); sp->type = C_STRING; sp->ptr = (cdp++)->ptr; string(sp)->ref_cnt++; break; case F_PUSHA: cp = (CELL *) cdp->ptr; if (cp != field) { if (nf < 0) split_field0(); if (!(cp >= NF && cp <= LAST_PFIELD)) { /* it is a real field $1, $2 ... If it is greater than $NF, we have to make sure it is set to "" so that (++|--) and g?sub() work right */ t = field_addr_to_index(cp); if (t > nf) { cp->type = C_STRING; cp->ptr = (PTR) & null_str; null_str.ref_cnt++; } } } /* fall thru */ case _PUSHA: case A_PUSHA: inc_sp(); sp->ptr = (cdp++)->ptr; break; case _PUSHI: /* put contents of next address on stack */ inc_sp(); cellcpy(sp, (cdp++)->ptr); break; case L_PUSHI: /* put the contents of a local var on stack, cdp->op holds the offset from the frame pointer */ inc_sp(); cellcpy(sp, fp + (cdp++)->op); break; case L_PUSHA: /* put a local address on eval stack */ inc_sp(); sp->ptr = (PTR) (fp + (cdp++)->op); break; case F_PUSHI: /* push contents of $i cdp[0] holds & $i , cdp[1] holds i */ inc_sp(); if (nf < 0) split_field0(); cp = (CELL *) cdp->ptr; t = (cdp + 1)->op; cdp += 2; if (t <= nf) cellcpy(sp, cp); else { /* an unset field */ sp->type = C_STRING; sp->ptr = (PTR) & null_str; null_str.ref_cnt++; } break; case NF_PUSHI: inc_sp(); if (nf < 0) split_field0(); cellcpy(sp, NF); break; case FE_PUSHA: if (sp->type != C_DOUBLE) cast1_to_d(sp); tu = d_to_index(sp->dval); if (tu && nf < 0) split_field0(); sp->ptr = (PTR) field_ptr((int) tu); if ((int) tu > nf) { /* make sure it is set to "" */ cp = (CELL *) sp->ptr; cell_destroy(cp); cp->type = C_STRING; cp->ptr = (PTR) & null_str; null_str.ref_cnt++; } break; case FE_PUSHI: if (sp->type != C_DOUBLE) cast1_to_d(sp); tu = d_to_index(sp->dval); if (nf < 0) split_field0(); if ((int) tu <= nf) { cellcpy(sp, field_ptr((int) tu)); } else { sp->type = C_STRING; sp->ptr = (PTR) & null_str; null_str.ref_cnt++; } break; case AE_PUSHA: /* top of stack has an expr, cdp->ptr points at an array, replace the expr with the cell address inside the array */ cp = array_find((ARRAY) (cdp++)->ptr, sp, CREATE); cell_destroy(sp); sp->ptr = (PTR) cp; break; case AE_PUSHI: /* top of stack has an expr, cdp->ptr points at an array, replace the expr with the contents of the cell inside the array */ cp = array_find((ARRAY) (cdp++)->ptr, sp, CREATE); cell_destroy(sp); cellcpy(sp, cp); break; case LAE_PUSHI: /* sp[0] is an expression cdp->op is offset from frame pointer of a CELL which has an ARRAY in the ptr field, replace expr with array[expr] */ if (fp != 0) { cp = array_find((ARRAY) fp[(cdp++)->op].ptr, sp, CREATE); cell_destroy(sp); cellcpy(sp, cp); } break; case LAE_PUSHA: /* sp[0] is an expression cdp->op is offset from frame pointer of a CELL which has an ARRAY in the ptr field, replace expr with & array[expr] */ if (fp != 0) { cp = array_find((ARRAY) fp[(cdp++)->op].ptr, sp, CREATE); cell_destroy(sp); sp->ptr = (PTR) cp; } break; case LA_PUSHA: /* cdp->op is offset from frame pointer of a CELL which has an ARRAY in the ptr field. Push this ARRAY on the eval stack */ if (fp != 0) { inc_sp(); sp->ptr = fp[(cdp++)->op].ptr; } break; case SET_ALOOP: { ALOOP_STATE *ap = ZMALLOC(ALOOP_STATE); size_t vector_size; ap->var = (CELL *) sp[-1].ptr; ap->base = ap->ptr = array_loop_vector((ARRAY) sp->ptr, &vector_size); ap->limit = ap->base + vector_size; sp -= 2; /* push onto aloop stack */ ap->link = aloop_state; aloop_state = ap; cdp += cdp->op; } break; case ALOOP: { ALOOP_STATE *ap = aloop_state; if (ap != 0 && (ap->ptr < ap->limit)) { cell_destroy(ap->var); ap->var->type = C_STRING; ap->var->ptr = (PTR) * ap->ptr++; cdp += cdp->op; } else { cdp++; } } break; case POP_AL: { /* finish up an array loop */ ALOOP_STATE *ap = aloop_state; if (ap != 0) { aloop_state = ap->link; while (ap->ptr < ap->limit) { free_STRING(*ap->ptr); ap->ptr++; } if (ap->base < ap->limit) { zfree(ap->base, ((unsigned) (ap->limit - ap->base) * sizeof(STRING *))); } ZFREE(ap); } } break; case _POP: cell_destroy(sp); sp--; break; case _ASSIGN: /* top of stack has an expr, next down is an address, put the expression in *address and replace the address with the expression */ /* don't propagate type C_MBSTRN */ if (sp->type == C_MBSTRN) check_strnum(sp); sp--; cell_destroy(((CELL *) sp->ptr)); cellcpy(sp, cellcpy(sp->ptr, sp + 1)); cell_destroy(sp + 1); break; case F_ASSIGN: /* assign to a field */ if (sp->type == C_MBSTRN) check_strnum(sp); sp--; field_assign((CELL *) sp->ptr, sp + 1); cell_destroy(sp + 1); cellcpy(sp, (CELL *) sp->ptr); break; case _ADD_ASG: if (sp->type != C_DOUBLE) cast1_to_d(sp); cp = (CELL *) (sp - 1)->ptr; if (cp->type != C_DOUBLE) cast1_to_d(cp); #ifdef SW_FP_CHECK /* specific to V7 and XNX23A */ clrerr(); #endif cp->dval += (sp--)->dval; #ifdef SW_FP_CHECK fpcheck(); #endif sp->type = C_DOUBLE; sp->dval = cp->dval; break; case _SUB_ASG: if (sp->type != C_DOUBLE) cast1_to_d(sp); cp = (CELL *) (sp - 1)->ptr; if (cp->type != C_DOUBLE) cast1_to_d(cp); #ifdef SW_FP_CHECK clrerr(); #endif cp->dval -= (sp--)->dval; #ifdef SW_FP_CHECK fpcheck(); #endif sp->type = C_DOUBLE; sp->dval = cp->dval; break; case _MUL_ASG: if (sp->type != C_DOUBLE) cast1_to_d(sp); cp = (CELL *) (sp - 1)->ptr; if (cp->type != C_DOUBLE) cast1_to_d(cp); #ifdef SW_FP_CHECK clrerr(); #endif cp->dval *= (sp--)->dval; #ifdef SW_FP_CHECK fpcheck(); #endif sp->type = C_DOUBLE; sp->dval = cp->dval; break; case _DIV_ASG: if (sp->type != C_DOUBLE) cast1_to_d(sp); cp = (CELL *) (sp - 1)->ptr; if (cp->type != C_DOUBLE) cast1_to_d(cp); #ifdef NOINFO_SIGFPE CHECK_DIVZERO(sp->dval); #endif #ifdef SW_FP_CHECK clrerr(); #endif cp->dval /= (sp--)->dval; #ifdef SW_FP_CHECK fpcheck(); #endif sp->type = C_DOUBLE; sp->dval = cp->dval; break; case _MOD_ASG: if (sp->type != C_DOUBLE) cast1_to_d(sp); cp = (CELL *) (sp - 1)->ptr; if (cp->type != C_DOUBLE) cast1_to_d(cp); #ifdef NOINFO_SIGFPE CHECK_DIVZERO(sp->dval); #endif cp->dval = fmod(cp->dval, (sp--)->dval); sp->type = C_DOUBLE; sp->dval = cp->dval; break; case _POW_ASG: if (sp->type != C_DOUBLE) cast1_to_d(sp); cp = (CELL *) (sp - 1)->ptr; if (cp->type != C_DOUBLE) cast1_to_d(cp); cp->dval = pow(cp->dval, (sp--)->dval); sp->type = C_DOUBLE; sp->dval = cp->dval; break; /* will anyone ever use these ? */ case F_ADD_ASG: if (sp->type != C_DOUBLE) cast1_to_d(sp); cp = (CELL *) (sp - 1)->ptr; cast1_to_d(cellcpy(&tc, cp)); #ifdef SW_FP_CHECK clrerr(); #endif tc.dval += (sp--)->dval; #ifdef SW_FP_CHECK fpcheck(); #endif sp->type = C_DOUBLE; sp->dval = tc.dval; field_assign(cp, &tc); break; case F_SUB_ASG: if (sp->type != C_DOUBLE) cast1_to_d(sp); cp = (CELL *) (sp - 1)->ptr; cast1_to_d(cellcpy(&tc, cp)); #ifdef SW_FP_CHECK clrerr(); #endif tc.dval -= (sp--)->dval; #ifdef SW_FP_CHECK fpcheck(); #endif sp->type = C_DOUBLE; sp->dval = tc.dval; field_assign(cp, &tc); break; case F_MUL_ASG: if (sp->type != C_DOUBLE) cast1_to_d(sp); cp = (CELL *) (sp - 1)->ptr; cast1_to_d(cellcpy(&tc, cp)); #ifdef SW_FP_CHECK clrerr(); #endif tc.dval *= (sp--)->dval; #ifdef SW_FP_CHECK fpcheck(); #endif sp->type = C_DOUBLE; sp->dval = tc.dval; field_assign(cp, &tc); break; case F_DIV_ASG: if (sp->type != C_DOUBLE) cast1_to_d(sp); cp = (CELL *) (sp - 1)->ptr; cast1_to_d(cellcpy(&tc, cp)); #ifdef NOINFO_SIGFPE CHECK_DIVZERO(sp->dval); #endif #ifdef SW_FP_CHECK clrerr(); #endif tc.dval /= (sp--)->dval; #ifdef SW_FP_CHECK fpcheck(); #endif sp->type = C_DOUBLE; sp->dval = tc.dval; field_assign(cp, &tc); break; case F_MOD_ASG: if (sp->type != C_DOUBLE) cast1_to_d(sp); cp = (CELL *) (sp - 1)->ptr; cast1_to_d(cellcpy(&tc, cp)); #ifdef NOINFO_SIGFPE CHECK_DIVZERO(sp->dval); #endif tc.dval = fmod(tc.dval, (sp--)->dval); sp->type = C_DOUBLE; sp->dval = tc.dval; field_assign(cp, &tc); break; case F_POW_ASG: if (sp->type != C_DOUBLE) cast1_to_d(sp); cp = (CELL *) (sp - 1)->ptr; cast1_to_d(cellcpy(&tc, cp)); tc.dval = pow(tc.dval, (sp--)->dval); sp->type = C_DOUBLE; sp->dval = tc.dval; field_assign(cp, &tc); break; case _ADD: sp--; if (TEST2(sp) != TWO_DOUBLES) cast2_to_d(sp); #ifdef SW_FP_CHECK clrerr(); #endif sp[0].dval += sp[1].dval; #ifdef SW_FP_CHECK fpcheck(); #endif break; case _SUB: sp--; if (TEST2(sp) != TWO_DOUBLES) cast2_to_d(sp); #ifdef SW_FP_CHECK clrerr(); #endif sp[0].dval -= sp[1].dval; #ifdef SW_FP_CHECK fpcheck(); #endif break; case _MUL: sp--; if (TEST2(sp) != TWO_DOUBLES) cast2_to_d(sp); #ifdef SW_FP_CHECK clrerr(); #endif sp[0].dval *= sp[1].dval; #ifdef SW_FP_CHECK fpcheck(); #endif break; case _DIV: sp--; if (TEST2(sp) != TWO_DOUBLES) cast2_to_d(sp); #ifdef NOINFO_SIGFPE CHECK_DIVZERO(sp[1].dval); #endif #ifdef SW_FP_CHECK clrerr(); #endif sp[0].dval /= sp[1].dval; #ifdef SW_FP_CHECK fpcheck(); #endif break; case _MOD: sp--; if (TEST2(sp) != TWO_DOUBLES) cast2_to_d(sp); #ifdef NOINFO_SIGFPE CHECK_DIVZERO(sp[1].dval); #endif sp[0].dval = fmod(sp[0].dval, sp[1].dval); break; case _POW: sp--; if (TEST2(sp) != TWO_DOUBLES) cast2_to_d(sp); sp[0].dval = pow(sp[0].dval, sp[1].dval); break; case _NOT: /* evaluates to 0.0 or 1.0 */ reswitch_1: switch (sp->type) { case C_NOINIT: sp->dval = 1.0; break; case C_DOUBLE: sp->dval = sp->dval != 0.0 ? 0.0 : 1.0; break; case C_FIELDWIDTHS: case C_STRING: sp->dval = string(sp)->len ? 0.0 : 1.0; free_STRING(string(sp)); break; case C_STRNUM: /* test as a number */ sp->dval = sp->dval != 0.0 ? 0.0 : 1.0; free_STRING(string(sp)); break; case C_MBSTRN: check_strnum(sp); goto reswitch_1; default: bozo("bad type on eval stack"); } sp->type = C_DOUBLE; break; case _TEST: /* evaluates to 0.0 or 1.0 */ reswitch_2: switch (sp->type) { case C_NOINIT: sp->dval = 0.0; break; case C_DOUBLE: sp->dval = sp->dval != 0.0 ? 1.0 : 0.0; break; case C_FIELDWIDTHS: case C_STRING: sp->dval = string(sp)->len ? 1.0 : 0.0; free_STRING(string(sp)); break; case C_STRNUM: /* test as a number */ sp->dval = sp->dval != 0.0 ? 1.0 : 0.0; free_STRING(string(sp)); break; case C_MBSTRN: check_strnum(sp); goto reswitch_2; default: bozo("bad type on eval stack"); } sp->type = C_DOUBLE; break; case _UMINUS: if (sp->type != C_DOUBLE) cast1_to_d(sp); sp->dval = -sp->dval; break; case _UPLUS: if (sp->type != C_DOUBLE) cast1_to_d(sp); break; case _CAT: { size_t len1, len2; char *str1, *str2; STRING *b; sp--; if (TEST2(sp) != TWO_STRINGS) cast2_to_s(sp); str1 = string(sp)->str; len1 = string(sp)->len; str2 = string(sp + 1)->str; len2 = string(sp + 1)->len; b = new_STRING0(len1 + len2); memcpy(b->str, str1, len1); memcpy(b->str + len1, str2, len2); free_STRING(string(sp)); free_STRING(string(sp + 1)); sp->ptr = (PTR) b; break; } case _PUSHINT: inc_sp(); sp->type = (short) (cdp++)->op; break; case _BUILTIN: case _PRINT: sp = (*(PF_CP) (cdp++)->ptr) (sp); break; case _POST_INC: cp = (CELL *) sp->ptr; if (cp->type != C_DOUBLE) cast1_to_d(cp); sp->type = C_DOUBLE; sp->dval = cp->dval; cp->dval += 1.0; break; case _POST_DEC: cp = (CELL *) sp->ptr; if (cp->type != C_DOUBLE) cast1_to_d(cp); sp->type = C_DOUBLE; sp->dval = cp->dval; cp->dval -= 1.0; break; case _PRE_INC: cp = (CELL *) sp->ptr; if (cp->type != C_DOUBLE) cast1_to_d(cp); sp->dval = cp->dval += 1.0; sp->type = C_DOUBLE; break; case _PRE_DEC: cp = (CELL *) sp->ptr; if (cp->type != C_DOUBLE) cast1_to_d(cp); sp->dval = cp->dval -= 1.0; sp->type = C_DOUBLE; break; case F_POST_INC: cp = (CELL *) sp->ptr; cellcpy(&tc, cp); cast1_to_d(&tc); sp->type = C_DOUBLE; sp->dval = tc.dval; tc.dval += 1.0; field_assign(cp, &tc); break; case F_POST_DEC: cp = (CELL *) sp->ptr; cellcpy(&tc, cp); cast1_to_d(&tc); sp->type = C_DOUBLE; sp->dval = tc.dval; tc.dval -= 1.0; field_assign(cp, &tc); break; case F_PRE_INC: cp = (CELL *) sp->ptr; cast1_to_d(cellcpy(sp, cp)); sp->dval += 1.0; field_assign(cp, sp); break; case F_PRE_DEC: cp = (CELL *) sp->ptr; cast1_to_d(cellcpy(sp, cp)); sp->dval -= 1.0; field_assign(cp, sp); break; case _JMP: cdp += cdp->op; break; case _JNZ: /* jmp if top of stack is non-zero and pop stack */ if (test(sp)) cdp += cdp->op; else cdp++; cell_destroy(sp); sp--; break; case _JZ: /* jmp if top of stack is zero and pop stack */ if (!test(sp)) cdp += cdp->op; else cdp++; cell_destroy(sp); sp--; break; case _LJZ: /* special jump for logical and */ /* this is always preceded by _TEST */ if (sp->dval == 0.0) { /* take jump, but don't pop stack */ cdp += cdp->op; } else { /* pop and don't jump */ sp--; cdp++; } break; case _LJNZ: /* special jump for logical or */ /* this is always preceded by _TEST */ if (sp->dval != 0.0) { /* take jump, but don't pop stack */ cdp += cdp->op; } else { /* pop and don't jump */ sp--; cdp++; } break; /* the relation operations */ /* compare() makes sure string ref counts are OK */ case _EQ: t = compare(--sp); sp->type = C_DOUBLE; sp->dval = t == 0 ? 1.0 : 0.0; break; case _NEQ: t = compare(--sp); sp->type = C_DOUBLE; sp->dval = t ? 1.0 : 0.0; break; case _LT: t = compare(--sp); sp->type = C_DOUBLE; sp->dval = t < 0 ? 1.0 : 0.0; break; case _LTE: t = compare(--sp); sp->type = C_DOUBLE; sp->dval = t <= 0 ? 1.0 : 0.0; break; case _GT: t = compare(--sp); sp->type = C_DOUBLE; sp->dval = t > 0 ? 1.0 : 0.0; break; case _GTE: t = compare(--sp); sp->type = C_DOUBLE; sp->dval = t >= 0 ? 1.0 : 0.0; break; case _MATCH0: /* does $0 match, the RE at cdp? */ inc_sp(); if (field->type >= C_STRING) { sp->type = C_DOUBLE; sp->dval = (REtest(string(field)->str, string(field)->len, cast_to_re((cdp++)->ptr)) ? 1.0 : 0.0); break /* the case */ ; } else { cellcpy(sp, field); /* and FALL THRU */ } case _MATCH1: /* does expr at sp[0] match RE at cdp */ if (sp->type < C_STRING) cast1_to_s(sp); t = REtest(string(sp)->str, string(sp)->len, cast_to_re((cdp++)->ptr)); free_STRING(string(sp)); sp->type = C_DOUBLE; sp->dval = t ? 1.0 : 0.0; break; case _MATCH2: /* does sp[-1] match sp[0] as re */ cast_to_RE(sp); if ((--sp)->type < C_STRING) cast1_to_s(sp); t = REtest(string(sp)->str, string(sp)->len, cast_to_re((sp + 1)->ptr)); free_STRING(string(sp)); no_leaks_re_ptr((sp + 1)->ptr); sp->type = C_DOUBLE; sp->dval = t ? 1.0 : 0.0; break; case A_LENGTH: sp--; sp->type = C_DOUBLE; sp->dval = (double) (((ARRAY) ((sp + 0)->ptr))->size); break; case A_TEST: /* entry : sp[0].ptr-> an array sp[-1] is an expression we compute (expression in array) */ sp--; cp = array_find((sp + 1)->ptr, sp, NO_CREATE); cell_destroy(sp); sp->type = C_DOUBLE; sp->dval = (cp != (CELL *) 0) ? 1.0 : 0.0; break; case A_DEL: /* sp[0].ptr -> array sp[-1] is an expr delete array[expr] */ array_delete(sp->ptr, sp - 1); cell_destroy(sp - 1); sp -= 2; break; case DEL_A: /* free all the array at once */ array_clear(sp->ptr); sp--; break; /* form a multiple array index */ case A_CAT: sp = array_cat(sp, (cdp++)->op); break; case _EXIT: if (sp->type != C_DOUBLE) cast1_to_d(sp); exit_code = d_to_i(sp->dval); sp--; /* fall thru */ case _EXIT0: if (force_exit) mawk_exit(exit_code); cdp = end_start; force_exit = 1; /* makes sure next exit exits */ if (begin_start) { free_codes("BEGIN", begin_start, begin_size); begin_start = 0; begin_size = 0; } if (main_start) { free_codes("MAIN", main_start, main_size); main_start = 0; main_size = 0; } sp = eval_stack - 1; /* might be in user function */ CLEAR_ALOOP_STACK(); /* ditto */ break; case _JMAIN: /* go from BEGIN code to MAIN code */ free_codes("BEGIN", begin_start, begin_size); begin_start = 0; begin_size = 0; cdp = main_start; break; case _OMAIN: if (!main_fin) open_main(); restart_label = cdp; cdp = next_label; break; case _NEXT: /* next might be inside an aloop -- clear stack */ CLEAR_ALOOP_STACK(); cdp = next_label; break; case _NEXTFILE: /* nextfile might be inside an aloop -- clear stack */ CLEAR_ALOOP_STACK(); FINsemi_close(main_fin); cdp = next_label; break; case OL_GL: { char *p; size_t len; if (!(p = FINgets(main_fin, &len))) { if (force_exit) mawk_exit(0); cdp = end_start; zfree(main_start, main_size); main_start = (INST *) 0; force_exit = 1; } else { set_field0(p, len); cdp = restart_label; rt_nr++; rt_fnr++; } } break; /* two kinds of OL_GL is a historical stupidity from working on a machine with very slow floating point emulation */ case OL_GL_NR: { char *p; size_t len; if (!(p = FINgets(main_fin, &len))) { if (force_exit) mawk_exit(0); cdp = end_start; zfree(main_start, main_size); main_start = (INST *) 0; force_exit = 1; } else { set_field0(p, len); cdp = restart_label; if (TEST2(NR) != TWO_DOUBLES) cast2_to_d(NR); NR->dval += 1.0; rt_nr++; FNR->dval += 1.0; rt_fnr++; } } break; case _RANGE: /* test a range pattern: pat1, pat2 { action } entry : cdp[0].op -- a flag, test pat1 if on else pat2 cdp[1].op -- offset of pat2 code from cdp cdp[2].op -- offset of action code from cdp cdp[3].op -- offset of code after the action from cdp cdp[4] -- start of pat1 code */ #define FLAG cdp[0].op #define PAT2 cdp[1].op #define ACTION cdp[2].op #define FOLLOW cdp[3].op #define PAT1 4 if (FLAG) /* test against pat1 */ { execute(cdp + PAT1, sp, fp); t = test(sp + 1); cell_destroy(sp + 1); if (t) FLAG = 0; else { cdp += FOLLOW; break; /* break the switch */ } } /* test against pat2 and then perform the action */ execute(cdp + PAT2, sp, fp); FLAG = test(sp + 1); cell_destroy(sp + 1); cdp += ACTION; break; /* function calls */ case _RET0: inc_sp(); sp->type = C_NOINIT; /* fall thru */ case _RET: #ifdef DEBUG if (sp != entry_sp + 1) bozo("ret"); #endif if (old_stack_base) /* reset stack */ { /* move the return value */ cellcpy(old_sp + 1, sp); cell_destroy(sp); zfree(stack_base, sizeof(CELL) * EVAL_STACK_SIZE); stack_base = old_stack_base; stack_danger = old_stack_base + DANGER; } /* return might be inside an aloop -- clear stack */ CLEAR_ALOOP_STACK(); return; case _CALL: /* cdp[0] holds ptr to "function block" cdp[1] holds number of input arguments */ { FBLOCK *fbp = (FBLOCK *) (cdp++)->ptr; int a_args = (cdp++)->op; /* actual number of args */ CELL *nfp = sp - a_args + 1; /* new fp for callee */ CELL *local_p = sp + 1; /* first local argument on stack */ char *type_p = 0; /* pts to type of an argument */ if (fbp->nargs) type_p = fbp->typev + a_args - 1; /* create space for locals */ t = fbp->nargs - a_args; /* t is number of locals */ while (t > 0) { t--; sp++; type_p++; sp->type = C_NOINIT; if ((type_p) != 0 && (*type_p == ST_LOCAL_ARRAY)) sp->ptr = (PTR) new_ARRAY(); } execute(fbp->code, sp, nfp); /* cleanup the callee's arguments */ /* putting return value at top of eval stack */ if ((type_p != 0) && (sp >= nfp)) { cp = sp + 1; /* cp -> the function return */ do { if (*type_p == ST_LOCAL_ARRAY) { if (sp >= local_p) { array_clear(sp->ptr); ZFREE((ARRAY) sp->ptr); } } else { cell_destroy(sp); } type_p--; sp--; } while (sp >= nfp); cellcpy(++sp, cp); cell_destroy(cp); } else sp++; /* no arguments passed */ } break; default: bozo("bad opcode"); } } }
//------------------------------do_call---------------------------------------- // Handle your basic call. Inline if we can & want to, else just setup call. void Parse::do_call() { // It's likely we are going to add debug info soon. // Also, if we inline a guy who eventually needs debug info for this JVMS, // our contribution to it is cleaned up right here. kill_dead_locals(); // Set frequently used booleans bool is_virtual = bc() == Bytecodes::_invokevirtual; bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface; bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial; // Find target being called bool will_link; ciMethod* dest_method = iter().get_method(will_link); ciInstanceKlass* holder_klass = dest_method->holder(); ciKlass* holder = iter().get_declared_method_holder(); ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder); int nargs = dest_method->arg_size(); // See if the receiver (if any) is NULL, hence we always throw BEFORE // attempting to resolve the call or initialize the holder class. Doing so // out of order opens a window where we can endlessly deopt because the call // holder is not initialized, but the call never actually happens (forcing // class initialization) because we only see NULL receivers. CPData_Invoke *caller_cpdi = cpdata()->as_Invoke(bc()); debug_only( assert(caller_cpdi->is_Invoke(), "Not invoke!") ); if( is_virtual_or_interface && _gvn.type(stack(sp() - nargs))->higher_equal(TypePtr::NULL_PTR) ) { builtin_throw( Deoptimization::Reason_null_check, "null receiver", caller_cpdi, caller_cpdi->saw_null(), /*must_throw=*/true ); return; } // uncommon-trap when callee is unloaded, uninitialized or will not link // bailout when too many arguments for register representation if (!will_link || can_not_compile_call_site(dest_method, klass)) { return; } assert(FAM||holder_klass->is_loaded(),""); assert(dest_method->is_static() == !has_receiver, "must match bc"); // Note: this takes into account invokeinterface of methods declared in java/lang/Object, // which should be invokevirtuals but according to the VM spec may be invokeinterfaces assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc"); // Note: In the absence of miranda methods, an abstract class K can perform // an invokevirtual directly on an interface method I.m if K implements I. // --------------------- // Does Class Hierarchy Analysis reveal only a single target of a v-call? // Then we may inline or make a static call, but become dependent on there being only 1 target. // Does the call-site type profile reveal only one receiver? // Then we may introduce a run-time check and inline on the path where it succeeds. // The other path may uncommon_trap, check for another receiver, or do a v-call. // Choose call strategy. bool call_is_virtual = is_virtual_or_interface; int vtable_index = methodOopDesc::invalid_vtable_index; ciMethod* call_method = dest_method; // Try to get the most accurate receiver type if (is_virtual_or_interface) { Node* receiver_node = stack(sp() - nargs); const TypeInstPtr*inst_type=_gvn.type(receiver_node)->isa_instptr(); if( inst_type ) { ciInstanceKlass*ikl=inst_type->klass()->as_instance_klass(); // If the receiver is not yet linked then: (1) we never can make this // call because no objects can be created until linkage, and (2) CHA // reports incorrect answers... so do not bother with making the call // until after the klass gets linked. ciInstanceKlass *ikl2 = ikl->is_subtype_of(klass) ? ikl : klass; if(!ikl->is_linked()){ uncommon_trap(Deoptimization::Reason_uninitialized,klass,"call site where receiver is not linked",false); return; } } const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr(); ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type); // Have the call been sufficiently improved such that it is no longer a virtual? if (optimized_virtual_method != NULL) { call_method = optimized_virtual_method; call_is_virtual = false; } else if (false) { // We can make a vtable call at this site vtable_index = call_method->resolve_vtable_index(method()->holder(), klass); } } // Note: It's OK to try to inline a virtual call. // The call generator will not attempt to inline a polymorphic call // unless it knows how to optimize the receiver dispatch. bool try_inline=(C->do_inlining()||InlineAccessors)&& (!C->method()->should_disable_inlining()) && (call_method->number_of_breakpoints() == 0); // Get profile data for the *callee*. First see if we have precise // CodeProfile for this exact inline because C1 inlined it already. CodeProfile *callee_cp; int callee_cp_inloff; if( caller_cpdi->inlined_method_oid() == call_method->objectId() ) { callee_cp = c1_cp(); // Use same CodeProfile as current callee_cp_inloff = caller_cpdi->cpd_offset(); // But use inlined portion } else { // If callee has a cp, clone it and use callee_cp = call_method->codeprofile(true); callee_cp_inloff = 0; if (callee_cp || FAM) { // The cloned cp needs to be freed later Compile* C = Compile::current(); C->record_cloned_cp(callee_cp); } else { // Had profile info at top level, but not for this call site? // callee_cp will hold the just created cp, or whatever cp allocated by // other thread which wins the race in set_codeprofile callee_cp = call_method->set_codeprofile(CodeProfile::make(call_method)); } } CPData_Invoke *c2_caller_cpdi = UseC1 ? c2cpdata()->as_Invoke(bc()) : NULL; // --------------------- inc_sp(- nargs); // Temporarily pop args for JVM state of call JVMState* jvms = sync_jvms(); // --------------------- // Decide call tactic. // This call checks with CHA, the interpreter profile, intrinsics table, etc. // It decides whether inlining is desirable or not. CallGenerator*cg=C->call_generator(call_method,vtable_index,call_is_virtual,jvms,try_inline,prof_factor(),callee_cp,callee_cp_inloff,c2_caller_cpdi,caller_cpdi); // --------------------- // Round double arguments before call round_double_arguments(dest_method); #ifndef PRODUCT // Record first part of parsing work for this call parse_histogram()->record_change(); #endif // not PRODUCT assert(jvms == this->jvms(), "still operating on the right JVMS"); assert(jvms_in_sync(), "jvms must carry full info into CG"); // save across call, for a subsequent cast_not_null. Node* receiver = has_receiver ? argument(0) : NULL; JVMState* new_jvms = cg->generate(jvms, caller_cpdi, is_private_copy()); if( new_jvms == NULL ) { // Did it work? // When inlining attempt fails (e.g., too many arguments), // it may contaminate the current compile state, making it // impossible to pull back and try again. Once we call // cg->generate(), we are committed. If it fails, the whole // compilation task is compromised. if (failing()) return; if (PrintOpto || PrintInlining || PrintC2Inlining) { // Only one fall-back, so if an intrinsic fails, ignore any bytecodes. if (cg->is_intrinsic() && call_method->code_size() > 0) { C2OUT->print("Bailed out of intrinsic, will not inline: "); call_method->print_name(C2OUT); C2OUT->cr(); } } // This can happen if a library intrinsic is available, but refuses // the call site, perhaps because it did not match a pattern the // intrinsic was expecting to optimize. The fallback position is // to call out-of-line. try_inline = false; // Inline tactic bailed out. cg=C->call_generator(call_method,vtable_index,call_is_virtual,jvms,try_inline,prof_factor(),c1_cp(),c1_cp_inloff(),c2_caller_cpdi,caller_cpdi); new_jvms=cg->generate(jvms,caller_cpdi,is_private_copy()); assert(new_jvms!=NULL,"call failed to generate: calls should work"); if (c2_caller_cpdi) c2_caller_cpdi->_inlining_failure_id = IF_GENERALFAILURE; } if (cg->is_inline()) { C->env()->notice_inlined_method(call_method); } // Reset parser state from [new_]jvms, which now carries results of the call. // Return value (if any) is already pushed on the stack by the cg. add_exception_states_from(new_jvms); if (new_jvms->map()->control() == top()) { stop_and_kill_map(); } else { assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged"); set_jvms(new_jvms); } if (!stopped()) { // This was some sort of virtual call, which did a null check for us. // Now we can assert receiver-not-null, on the normal return path. if (receiver != NULL && cg->is_virtual()) { Node*cast=cast_not_null(receiver,true); // %%% assert(receiver == cast, "should already have cast the receiver"); } // Round double result after a call from strict to non-strict code round_double_result(dest_method); // If the return type of the method is not loaded, assert that the // value we got is a null. Otherwise, we need to recompile. if (!dest_method->return_type()->is_loaded()) { // If there is going to be a trap, put it at the next bytecode: set_bci(iter().next_bci()); do_null_assert(peek(), T_OBJECT); set_bci(iter().cur_bci()); // put it back } else { assert0( call_method->return_type()->is_loaded() ); BasicType result_type = dest_method->return_type()->basic_type(); if(result_type==T_OBJECT||result_type==T_ARRAY){ const Type *t = peek()->bottom_type(); assert0( t == TypePtr::NULL_PTR || t->is_oopptr()->klass()->is_loaded() ); } } } // Restart record of parsing work after possible inlining of call #ifndef PRODUCT parse_histogram()->set_initial_state(bc()); #endif }