static void unroll_op(pTHX_ struct sljit_compiler* compiler, OP* op) { DEBUGf((" ; %s [%p]\n", OP_NAME(op), op)); emit_label(compiler, op); // TODO(dgl): Can we avoid doing this for every op? sljit_emit_ijump(compiler, SLJIT_CALL0, SLJIT_IMM, (sljit_w) op->op_ppaddr); sljit_emit_op1(compiler, SLJIT_MOV, SLJIT_MEM, (sljit_w) &PL_op, SLJIT_RETURN_REG, 0); }
static void fixup_jumps(struct sljit_compiler* compiler, struct needjump* jump, struct oplabel *label) { while (jump) { struct oplabel *l = label; while (l) { if (l->op == jump->op_to) { sljit_set_label(jump->jump, l->label); goto done; } l = l->next; } Perl_croak("Unable to resolve jump for %p [%s]", jump->op_to, OP_NAME(jump->op_to)); done: jump = jump->next; } }
void Perl_deb_stack_all(pTHX) { #ifdef DEBUGGING dVAR; I32 si_ix; const PERL_SI *si; /* rewind to start of chain */ si = PL_curstackinfo; while (si->si_prev) si = si->si_prev; si_ix=0; for (;;) { const size_t si_name_ix = si->si_type+1; /* -1 is a valid index */ const char * const si_name = (si_name_ix >= sizeof(si_names)) ? "????" : si_names[si_name_ix]; I32 ix; PerlIO_printf(Perl_debug_log, "STACK %"IVdf": %s\n", (IV)si_ix, si_name); for (ix=0; ix<=si->si_cxix; ix++) { const PERL_CONTEXT * const cx = &(si->si_cxstack[ix]); PerlIO_printf(Perl_debug_log, " CX %"IVdf": %-6s => ", (IV)ix, PL_block_type[CxTYPE(cx)] ); /* substitution contexts don't save stack pointers etc) */ if (CxTYPE(cx) == CXt_SUBST) PerlIO_printf(Perl_debug_log, "\n"); else { /* Find the current context's stack range by searching * forward for any higher contexts using this stack; failing * that, it will be equal to the size of the stack for old * stacks, or PL_stack_sp for the current stack */ I32 i, stack_min, stack_max, mark_min, mark_max; const PERL_CONTEXT *cx_n = NULL; const PERL_SI *si_n; /* there's a separate stack per SI, so only search * this one */ for (i=ix+1; i<=si->si_cxix; i++) { if (CxTYPE(cx) == CXt_SUBST) continue; cx_n = &(si->si_cxstack[i]); break; } stack_min = cx->blk_oldsp; if (cx_n) { stack_max = cx_n->blk_oldsp; } else if (si == PL_curstackinfo) { stack_max = PL_stack_sp - AvARRAY(si->si_stack); } else { stack_max = AvFILLp(si->si_stack); } /* for the other stack types, there's only one stack * shared between all SIs */ si_n = si; i = ix; cx_n = NULL; for (;;) { i++; if (i > si_n->si_cxix) { if (si_n == PL_curstackinfo) break; else { si_n = si_n->si_next; i = 0; } } if (CxTYPE(&(si_n->si_cxstack[i])) == CXt_SUBST) continue; cx_n = &(si_n->si_cxstack[i]); break; } mark_min = cx->blk_oldmarksp; if (cx_n) { mark_max = cx_n->blk_oldmarksp; } else { mark_max = PL_markstack_ptr - PL_markstack; } deb_stack_n(AvARRAY(si->si_stack), stack_min, stack_max, mark_min, mark_max); if (CxTYPE(cx) == CXt_EVAL || CxTYPE(cx) == CXt_SUB || CxTYPE(cx) == CXt_FORMAT) { const OP * const retop = cx->blk_sub.retop; PerlIO_printf(Perl_debug_log, " retop=%s\n", retop ? OP_NAME(retop) : "(null)" ); } } } /* next context */ if (si == PL_curstackinfo) break; si = si->si_next; si_ix++; if (!si) break; /* shouldn't happen, but just in case.. */ } /* next stackinfo */ PerlIO_printf(Perl_debug_log, "\n"); #else PERL_UNUSED_CONTEXT; #endif /* DEBUGGING */ }
static void unroll_tree(pTHX_ struct sljit_compiler* compiler, HV* seenops, OP* op, OP* end) { // XXX: This basically is the algorithm from walk_exec in B::Concise, but I // think it could be done better. (Maybe core will get something like // codegen?). while (op && op != end) { const char* op_hex = format_hex(op); // Have we visited this op already? if (hv_exists(seenops, op_hex, strlen(op_hex))) { DEBUGf((" ;; Already seen %s [0x%s]\n", OP_NAME(op), op_hex)); // Insert jump to it emit_jump(compiler, op); // We know we have followed all the next pointers for this chain, // so: break; } // Seen op hv_store(seenops, op_hex, 0, &PL_sv_yes, strlen(op_hex)); if (op->op_type == OP_CUSTOM) { // All bets are off sljit_emit_return(compiler, SLJIT_MEM, (sljit_w) &PL_op); } else if (OP_CLASS(op) == OA_LOGOP) { unroll_branching_op(compiler, seenops, op, op->op_next, cLOGOPx(op)->op_other); } else if (op->op_type == OP_SUBST && cPMOPx(op)->op_pmstashstartu.op_pmreplstart) { unroll_branching_op(compiler, seenops, op, op->op_next, cPMOPx(op)->op_pmstashstartu.op_pmreplstart); } else if (op->op_type == OP_GREPSTART || op->op_type == OP_MAPSTART) { unroll_branching_op(compiler, seenops, op, op->op_next->op_next, cLOGOPx(op->op_next)->op_other); } else if (op->op_type == OP_NEXT || op->op_type == OP_LAST || op->op_type == OP_REDO) { sljit_emit_return(compiler, SLJIT_MEM, (sljit_w) &PL_op); //need_patch = 1; XXX } else if (op->op_type == OP_FLIP || op->op_type == OP_GOTO) { sljit_emit_return(compiler, SLJIT_MEM, (sljit_w) &PL_op); //need_patch = 1; XXX } else if (op->op_type == OP_ENTERSUB) { sljit_emit_return(compiler, SLJIT_MEM, (sljit_w) &PL_op); //need_patch = 1; XXX } else if (op->op_type == OP_RETURN || op->op_type == OP_LEAVESUB || op->op_type == OP_REQUIRE) { // XXX: leavesublv? unroll_op(compiler, op); sljit_emit_return(compiler, SLJIT_MEM, (sljit_w) &PL_op); } else { unroll_op(compiler, op); #ifdef DEBUG // Ensure returned OP is actually the one we expect struct sljit_jump *jump1 = sljit_emit_cmp(compiler, SLJIT_C_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, (sljit_w) op->op_next); sljit_emit_op0(compiler, SLJIT_BREAKPOINT); sljit_set_label(jump1, sljit_emit_label(compiler)); #endif } op = op->op_next; } }