IoCairoSurfacePattern *IoCairoSurfacePattern_rawClone(IoCairoSurfacePattern *proto) { IoObject *self = IoObject_rawClonePrimitive(proto); if (PATTERN(proto)) IoObject_setDataPointer_(self, cairo_pattern_reference(PATTERN(proto))); return self; }
/* Return true if is load/store with SYMBOL_REF addressing mode and memory mode is SImode. */ bool nds32_symbol_load_store_p (rtx_insn *insn) { rtx mem_src = NULL_RTX; switch (get_attr_type (insn)) { case TYPE_LOAD: mem_src = SET_SRC (PATTERN (insn)); break; case TYPE_STORE: mem_src = SET_DEST (PATTERN (insn)); break; default: break; } /* Find load/store insn with addressing mode is SYMBOL_REF. */ if (mem_src != NULL_RTX) { if ((GET_CODE (mem_src) == ZERO_EXTEND) || (GET_CODE (mem_src) == SIGN_EXTEND)) mem_src = XEXP (mem_src, 0); if ((GET_CODE (XEXP (mem_src, 0)) == SYMBOL_REF) || (GET_CODE (XEXP (mem_src, 0)) == LO_SUM)) return true; } return false; }
IoCairoLinearGradient *IoCairoLinearGradient_rawClone(IoCairoLinearGradient *proto) { IoObject *self = IoObject_rawClonePrimitive(proto); if (PATTERN(proto)) IoObject_setDataPointer_(self, cairo_pattern_reference(PATTERN(proto))); return self; }
static rtx single_set_for_csa (rtx insn) { int i; rtx tmp = single_set (insn); if (tmp) return tmp; if (!NONJUMP_INSN_P (insn) || GET_CODE (PATTERN (insn)) != PARALLEL) return NULL_RTX; tmp = PATTERN (insn); if (GET_CODE (XVECEXP (tmp, 0, 0)) != SET) return NULL_RTX; for (i = 1; i < XVECLEN (tmp, 0); ++i) { rtx this_rtx = XVECEXP (tmp, 0, i); /* The special case is allowing a no-op set. */ if (GET_CODE (this_rtx) == SET && SET_SRC (this_rtx) == SET_DEST (this_rtx)) ; else if (GET_CODE (this_rtx) != CLOBBER && GET_CODE (this_rtx) != USE) return NULL_RTX; } return XVECEXP (tmp, 0, 0); }
/* Return non-zero if the consumer (a multiply-accumulate instruction) has an accumulator dependency on the result of the producer (a multiplication instruction) and no other dependency on that result. */ int arm_mac_accumulator_is_mul_result (rtx producer, rtx consumer) { rtx mul = PATTERN (producer); rtx mac = PATTERN (consumer); rtx mul_result; rtx mac_op0, mac_op1, mac_acc; if (GET_CODE (mul) == COND_EXEC) mul = COND_EXEC_CODE (mul); if (GET_CODE (mac) == COND_EXEC) mac = COND_EXEC_CODE (mac); /* Check that mul is of the form (set (...) (mult ...)) and mla is of the form (set (...) (plus (mult ...) (...))). */ if ((GET_CODE (mul) != SET || GET_CODE (XEXP (mul, 1)) != MULT) || (GET_CODE (mac) != SET || GET_CODE (XEXP (mac, 1)) != PLUS || GET_CODE (XEXP (XEXP (mac, 1), 0)) != MULT)) return 0; mul_result = XEXP (mul, 0); mac_op0 = XEXP (XEXP (XEXP (mac, 1), 0), 0); mac_op1 = XEXP (XEXP (XEXP (mac, 1), 0), 1); mac_acc = XEXP (XEXP (mac, 1), 1); return (reg_overlap_mentioned_p (mul_result, mac_acc) && !reg_overlap_mentioned_p (mul_result, mac_op0) && !reg_overlap_mentioned_p (mul_result, mac_op1)); }
/* In case function does not return value, we get clobber of pseudo followed by set to hard return value. */ static rtx skip_unreturned_value (rtx orig_insn) { rtx insn = next_nonnote_insn (orig_insn); /* Skip possible clobber of pseudo return register. */ if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == CLOBBER && REG_P (XEXP (PATTERN (insn), 0)) && (REGNO (XEXP (PATTERN (insn), 0)) >= FIRST_PSEUDO_REGISTER)) { rtx set_insn = next_nonnote_insn (insn); rtx set; if (!set_insn) return insn; set = single_set (set_insn); if (!set || SET_SRC (set) != XEXP (PATTERN (insn), 0) || SET_DEST (set) != current_function_return_rtx) return insn; return set_insn; } return orig_insn; }
void print_insn (char *buf, const_rtx x, int verbose) { char t[BUF_LEN]; const_rtx insn = x; switch (GET_CODE (x)) { case INSN: print_pattern (t, PATTERN (x), verbose); #ifdef INSN_SCHEDULING if (verbose && current_sched_info) sprintf (buf, "%s: %s", (*current_sched_info->print_insn) (x, 1), t); else #endif sprintf (buf, " %4d %s", INSN_UID (x), t); break; case JUMP_INSN: print_pattern (t, PATTERN (x), verbose); #ifdef INSN_SCHEDULING if (verbose && current_sched_info) sprintf (buf, "%s: jump %s", (*current_sched_info->print_insn) (x, 1), t); else #endif sprintf (buf, " %4d %s", INSN_UID (x), t); break; case CALL_INSN: x = PATTERN (insn); if (GET_CODE (x) == PARALLEL) { x = XVECEXP (x, 0, 0); print_pattern (t, x, verbose); } else strcpy (t, "call <...>"); #ifdef INSN_SCHEDULING if (verbose && current_sched_info) sprintf (buf, "%s: %s", (*current_sched_info->print_insn) (insn, 1), t); else #endif sprintf (buf, " %4d %s", INSN_UID (insn), t); break; case CODE_LABEL: sprintf (buf, "L%d:", INSN_UID (x)); break; case BARRIER: sprintf (buf, "i%4d: barrier", INSN_UID (x)); break; case NOTE: sprintf (buf, " %4d %s", INSN_UID (x), GET_NOTE_INSN_NAME (NOTE_KIND (x))); break; default: sprintf (buf, "i%4d <What %s?>", INSN_UID (x), GET_RTX_NAME (GET_CODE (x))); } } /* print_insn */
/* Return non-zero iff the consumer (a multiply-accumulate or a multiple-subtract instruction) has an accumulator dependency on the result of the producer and no other dependency on that result. It does not check if the producer is multiply-accumulate instruction. */ int arm_mac_accumulator_is_result (rtx producer, rtx consumer) { rtx result; rtx op0, op1, acc; producer = PATTERN (producer); consumer = PATTERN (consumer); if (GET_CODE (producer) == COND_EXEC) producer = COND_EXEC_CODE (producer); if (GET_CODE (consumer) == COND_EXEC) consumer = COND_EXEC_CODE (consumer); if (GET_CODE (producer) != SET) return 0; result = XEXP (producer, 0); if (GET_CODE (consumer) != SET) return 0; /* Check that the consumer is of the form (set (...) (plus (mult ...) (...))) or (set (...) (minus (...) (mult ...))). */ if (GET_CODE (XEXP (consumer, 1)) == PLUS) { if (GET_CODE (XEXP (XEXP (consumer, 1), 0)) != MULT) return 0; op0 = XEXP (XEXP (XEXP (consumer, 1), 0), 0); op1 = XEXP (XEXP (XEXP (consumer, 1), 0), 1); acc = XEXP (XEXP (consumer, 1), 1); } else if (GET_CODE (XEXP (consumer, 1)) == MINUS) { if (GET_CODE (XEXP (XEXP (consumer, 1), 1)) != MULT) return 0; op0 = XEXP (XEXP (XEXP (consumer, 1), 1), 0); op1 = XEXP (XEXP (XEXP (consumer, 1), 1), 1); acc = XEXP (XEXP (consumer, 1), 0); } else return 0; return (reg_overlap_mentioned_p (result, acc) && !reg_overlap_mentioned_p (result, op0) && !reg_overlap_mentioned_p (result, op1)); }
static int sequence_uses_addressof (rtx seq) { rtx insn; for (insn = seq; insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) { /* If this is a CALL_PLACEHOLDER, then recursively call ourselves with each nonempty sequence attached to the CALL_PLACEHOLDER. */ if (GET_CODE (insn) == CALL_INSN && GET_CODE (PATTERN (insn)) == CALL_PLACEHOLDER) { if (XEXP (PATTERN (insn), 0) != NULL_RTX && sequence_uses_addressof (XEXP (PATTERN (insn), 0))) return 1; if (XEXP (PATTERN (insn), 1) != NULL_RTX && sequence_uses_addressof (XEXP (PATTERN (insn), 1))) return 1; if (XEXP (PATTERN (insn), 2) != NULL_RTX && sequence_uses_addressof (XEXP (PATTERN (insn), 2))) return 1; } else if (uses_addressof (PATTERN (insn)) || (REG_NOTES (insn) && uses_addressof (REG_NOTES (insn)))) return 1; } return 0; }
static bool find_mem_reference (rtx insn, rtx *mem, int *write) { *mem = NULL_RTX; for_each_rtx (&PATTERN (insn), find_mem_reference_1, mem); if (!*mem) return false; fmr2_write = false; note_stores (PATTERN (insn), find_mem_reference_2, *mem); *write = fmr2_write; return true; }
void reemit_insn_block_notes (void) { tree cur_block = DECL_INITIAL (cfun->decl); rtx insn, note; insn = get_insns (); if (!active_insn_p (insn)) insn = next_active_insn (insn); for (; insn; insn = next_active_insn (insn)) { tree this_block; /* Avoid putting scope notes between jump table and its label. */ if (JUMP_P (insn) && (GET_CODE (PATTERN (insn)) == ADDR_VEC || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)) continue; this_block = insn_scope (insn); /* For sequences compute scope resulting from merging all scopes of instructions nested inside. */ if (GET_CODE (PATTERN (insn)) == SEQUENCE) { int i; rtx body = PATTERN (insn); this_block = NULL; for (i = 0; i < XVECLEN (body, 0); i++) this_block = choose_inner_scope (this_block, insn_scope (XVECEXP (body, 0, i))); } if (! this_block) continue; if (this_block != cur_block) { change_scope (insn, cur_block, this_block); cur_block = this_block; } } /* change_scope emits before the insn, not after. */ note = emit_note (NOTE_INSN_DELETED); change_scope (note, cur_block, DECL_INITIAL (cfun->decl)); delete_insn (note); reorder_blocks (); }
/* * find all asm level function returns and forcibly set the highest bit of the return address */ static unsigned int execute_kernexec_retaddr(void) { rtx insn; // 1. find function returns for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil)) // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil)) // (jump_insn 97 96 98 6 (simple_return) fptr.c:50 -1 (nil) -> simple_return) rtx body; // is it a retn if (!JUMP_P(insn)) continue; body = PATTERN(insn); if (GET_CODE(body) == PARALLEL) body = XVECEXP(body, 0, 0); if (!ANY_RETURN_P(body)) continue; kernexec_instrument_retaddr(insn); } // print_simple_rtl(stderr, get_insns()); // print_rtl(stderr, get_insns()); return 0; }
static rtx skip_use_of_return_value (rtx orig_insn, enum rtx_code code) { rtx insn; insn = next_nonnote_insn (orig_insn); if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == code && (XEXP (PATTERN (insn), 0) == current_function_return_rtx || XEXP (PATTERN (insn), 0) == const0_rtx)) return insn; return orig_insn; }
/* Function for initialization of elimination once per function. It sets up sp offset for each insn. */ static void init_elimination (void) { bool stop_to_sp_elimination_p; basic_block bb; rtx_insn *insn; struct lra_elim_table *ep; init_elim_table (); FOR_EACH_BB_FN (bb, cfun) { curr_sp_change = 0; stop_to_sp_elimination_p = false; FOR_BB_INSNS (bb, insn) if (INSN_P (insn)) { lra_get_insn_recog_data (insn)->sp_offset = curr_sp_change; if (NONDEBUG_INSN_P (insn)) { mark_not_eliminable (PATTERN (insn), VOIDmode); if (curr_sp_change != 0 && find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) stop_to_sp_elimination_p = true; } } if (! frame_pointer_needed && (curr_sp_change != 0 || stop_to_sp_elimination_p) && bb->succs && bb->succs->length () != 0) for (ep = reg_eliminate; ep < ®_eliminate[NUM_ELIMINABLE_REGS]; ep++) if (ep->to == STACK_POINTER_REGNUM) setup_can_eliminate (ep, false); }
/* Dump insn INSN honoring FLAGS. */ void dump_insn_rtx_1 (rtx insn, int flags) { int all; /* flags == -1 also means dumping all. */ all = (flags & 1);; if (all) flags |= DUMP_INSN_RTX_ALL; sel_print ("("); if (flags & DUMP_INSN_RTX_UID) sel_print ("%d;", INSN_UID (insn)); if (flags & DUMP_INSN_RTX_PATTERN) sel_print ("%s;", str_pattern_slim (PATTERN (insn))); if (flags & DUMP_INSN_RTX_BBN) { basic_block bb = BLOCK_FOR_INSN (insn); sel_print ("bb:%d;", bb != NULL ? bb->index : -1); } sel_print (")"); }
/* Returns nonzero if INSN reads from memory. */ static bool mem_read_insn_p (rtx_insn *insn) { mem_ref_p = false; note_uses (&PATTERN (insn), mark_mem_use, NULL); return mem_ref_p; }
/* Figure the location of the given INSN. */ static location_t location_for_asm (const rtx_insn *insn) { rtx body = PATTERN (insn); rtx asmop; location_t loc; /* Find the (or one of the) ASM_OPERANDS in the insn. */ if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS) asmop = SET_SRC (body); else if (GET_CODE (body) == ASM_OPERANDS) asmop = body; else if (GET_CODE (body) == PARALLEL && GET_CODE (XVECEXP (body, 0, 0)) == SET) asmop = SET_SRC (XVECEXP (body, 0, 0)); else if (GET_CODE (body) == PARALLEL && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS) asmop = XVECEXP (body, 0, 0); else asmop = NULL; if (asmop) loc = ASM_OPERANDS_SOURCE_LOCATION (asmop); else loc = input_location; return loc; }
static bool can_eliminate_compare (rtx compare, rtx eh_note, struct comparison *cmp) { /* Take care that it's in the same EH region. */ if (cfun->can_throw_non_call_exceptions && !rtx_equal_p (eh_note, cmp->eh_note)) return false; /* Make sure the compare is redundant with the previous. */ if (!rtx_equal_p (XEXP (compare, 0), cmp->in_a) || !rtx_equal_p (XEXP (compare, 1), cmp->in_b)) return false; /* New mode must be compatible with the previous compare mode. */ enum machine_mode new_mode = targetm.cc_modes_compatible (GET_MODE (compare), cmp->orig_mode); if (new_mode == VOIDmode) return false; if (cmp->orig_mode != new_mode) { /* Generate new comparison for substitution. */ rtx flags = gen_rtx_REG (new_mode, targetm.flags_regnum); rtx x = gen_rtx_COMPARE (new_mode, cmp->in_a, cmp->in_b); x = gen_rtx_SET (flags, x); if (!validate_change (cmp->insn, &PATTERN (cmp->insn), x, false)) return false; cmp->orig_mode = new_mode; } return true; }
static bool arithmetic_flags_clobber_p (rtx_insn *insn) { rtx pat, x; if (!NONJUMP_INSN_P (insn)) return false; pat = PATTERN (insn); if (extract_asm_operands (pat)) return false; if (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) == 2) { x = XVECEXP (pat, 0, 0); if (GET_CODE (x) != SET) return false; x = SET_DEST (x); if (!REG_P (x)) return false; x = XVECEXP (pat, 0, 1); if (GET_CODE (x) == CLOBBER) { x = XEXP (x, 0); if (REG_P (x) && REGNO (x) == targetm.flags_regnum) return true; } } return false; }
static void init_label_info (rtx f) { rtx insn; for (insn = f; insn; insn = NEXT_INSN (insn)) { if (LABEL_P (insn)) LABEL_NUSES (insn) = (LABEL_PRESERVE_P (insn) != 0); /* REG_LABEL_TARGET notes (including the JUMP_LABEL field) are sticky and not reset here; that way we won't lose association with a label when e.g. the source for a target register disappears out of reach for targets that may use jump-target registers. Jump transformations are supposed to transform any REG_LABEL_TARGET notes. The target label reference in a branch may disappear from the branch (and from the instruction before it) for other reasons, like register allocation. */ if (INSN_P (insn)) { rtx note, next; for (note = REG_NOTES (insn); note; note = next) { next = XEXP (note, 1); if (REG_NOTE_KIND (note) == REG_LABEL_OPERAND && ! reg_mentioned_p (XEXP (note, 0), PATTERN (insn))) remove_note (insn, note); } } } }
void fma_node::dump_info (ATTRIBUTE_UNUSED fma_forest *forest) { struct du_chain *chain; std::list<fma_node *>::iterator fma_child; gcc_assert (dump_file); if (this->get_children ()->empty ()) return; fprintf (dump_file, "Instruction(s)"); for (chain = this->m_head->first; chain; chain = chain->next_use) { if (!is_fmul_fmac_insn (chain->insn, true)) continue; if (chain->loc != &SET_DEST (PATTERN (chain->insn))) continue; fprintf (dump_file, " %d", INSN_UID (chain->insn)); } fprintf (dump_file, " is(are) accumulator dependency of instructions"); for (fma_child = this->get_children ()->begin (); fma_child != this->get_children ()->end (); fma_child++) fprintf (dump_file, " %d", INSN_UID ((*fma_child)->m_insn)); fprintf (dump_file, "\n"); }
/* If INSN can not be used for rematerialization, return negative value. If INSN can be considered as a candidate for rematerialization, return value which is the operand number of the pseudo for which the insn can be used for rematerialization. Here we consider the insns without any memory, spilled pseudo (except for the rematerialization pseudo), or dying or unused regs. */ static int operand_to_remat (rtx_insn *insn) { lra_insn_recog_data_t id = lra_get_insn_recog_data (insn); struct lra_static_insn_data *static_id = id->insn_static_data; struct lra_insn_reg *reg, *found_reg = NULL; /* First find a pseudo which can be rematerialized. */ for (reg = id->regs; reg != NULL; reg = reg->next) /* True FRAME_POINTER_NEEDED might be because we can not follow changing sp offsets, e.g. alloca is used. If the insn contains stack pointer in such case, we can not rematerialize it as we can not know sp offset at a rematerialization place. */ if (reg->regno == STACK_POINTER_REGNUM && frame_pointer_needed) return -1; else if (reg->type == OP_OUT && ! reg->subreg_p && find_regno_note (insn, REG_UNUSED, reg->regno) == NULL) { /* We permits only one spilled reg. */ if (found_reg != NULL) return -1; found_reg = reg; } if (found_reg == NULL) return -1; if (found_reg->regno < FIRST_PSEUDO_REGISTER) return -1; if (bad_for_rematerialization_p (PATTERN (insn))) return -1; /* Check the other regs are not spilled. */ for (reg = id->regs; reg != NULL; reg = reg->next) if (found_reg == reg) continue; else if (reg->type == OP_INOUT) return -1; else if (reg->regno >= FIRST_PSEUDO_REGISTER && reg_renumber[reg->regno] < 0) /* Another spilled reg. */ return -1; else if (reg->type == OP_IN) { if (find_regno_note (insn, REG_DEAD, reg->regno) != NULL) /* We don't want to make live ranges longer. */ return -1; /* Check that there is no output reg as the input one. */ for (struct lra_insn_reg *reg2 = id->regs; reg2 != NULL; reg2 = reg2->next) if (reg2->type == OP_OUT && reg->regno == reg2->regno) return -1; } /* Find the rematerialization operand. */ int nop = static_id->n_operands; for (int i = 0; i < nop; i++) if (REG_P (*id->operand_loc[i]) && (int) REGNO (*id->operand_loc[i]) == found_reg->regno) return i; return -1; }
void fma_node::rename (fma_forest *forest) { int cur_parity, target_parity; /* This is alternate root of a chain and thus has no children. It will be renamed when processing the canonical root for that chain. */ if (!this->m_head) return; target_parity = forest->get_target_parity (); if (this->m_parent) target_parity = this->m_parent->get_parity (); cur_parity = this->get_parity (); /* Rename if parity differs. */ if (cur_parity != target_parity) { rtx_insn *insn = this->m_insn; HARD_REG_SET unavailable; enum machine_mode mode; int reg; if (dump_file) { unsigned cur_dest_reg = this->m_head->regno; fprintf (dump_file, "FMA or FMUL at insn %d but destination " "register (%s) has different parity from expected to " "maximize FPU pipeline utilization\n", INSN_UID (insn), reg_names[cur_dest_reg]); } /* Don't clobber traceback for noreturn functions. */ CLEAR_HARD_REG_SET (unavailable); if (frame_pointer_needed) { add_to_hard_reg_set (&unavailable, Pmode, FRAME_POINTER_REGNUM); add_to_hard_reg_set (&unavailable, Pmode, HARD_FRAME_POINTER_REGNUM); } /* Exclude registers with wrong parity. */ mode = GET_MODE (SET_DEST (PATTERN (insn))); for (reg = cur_parity; reg < FIRST_PSEUDO_REGISTER; reg += 2) add_to_hard_reg_set (&unavailable, mode, reg); if (!rename_single_chain (this->m_head, &unavailable)) { if (dump_file) fprintf (dump_file, "Destination register of insn %d could not be " "renamed. Dependent FMA insns will use this parity from " "there on.\n", INSN_UID (insn)); } else cur_parity = target_parity; } forest->get_globals ()->update_balance (cur_parity); }
static void matching_length (rtx insn1, rtx insn2, int* len, int* cost) { rtx x1; rtx x2; x1 = insn1; x2 = insn2; *len = 0; *cost = 0; while (x1 && x2 && (x1 != insn2) && (x2 != insn1) && rtx_equal_p (PATTERN (x1), PATTERN (x2))) { (*len)++; (*cost) += compute_rtx_cost (x1); x1 = prev_insn_in_block (x1); x2 = prev_insn_in_block (x2); } }
static bool transform_ifelse (ext_cand *cand, rtx def_insn) { rtx set_insn = PATTERN (def_insn); rtx srcreg, dstreg, srcreg2; rtx map_srcreg, map_dstreg, map_srcreg2; rtx ifexpr; rtx cond; rtx new_set; gcc_assert (GET_CODE (set_insn) == SET); cond = XEXP (SET_SRC (set_insn), 0); dstreg = SET_DEST (set_insn); srcreg = XEXP (SET_SRC (set_insn), 1); srcreg2 = XEXP (SET_SRC (set_insn), 2); /* If the conditional move already has the right or wider mode, there is nothing to do. */ if (GET_MODE_SIZE (GET_MODE (dstreg)) >= GET_MODE_SIZE (cand->mode)) return true; map_srcreg = gen_rtx_REG (cand->mode, REGNO (srcreg)); map_srcreg2 = gen_rtx_REG (cand->mode, REGNO (srcreg2)); map_dstreg = gen_rtx_REG (cand->mode, REGNO (dstreg)); ifexpr = gen_rtx_IF_THEN_ELSE (cand->mode, cond, map_srcreg, map_srcreg2); new_set = gen_rtx_SET (VOIDmode, map_dstreg, ifexpr); if (validate_change (def_insn, &PATTERN (def_insn), new_set, true) && update_reg_equal_equiv_notes (def_insn, cand->mode, GET_MODE (dstreg), cand->code)) { if (dump_file) { fprintf (dump_file, "Mode of conditional move instruction extended:\n"); print_rtl_single (dump_file, def_insn); } return true; } return false; }
static kbool_t StringInterpolation_PackupNameSpace(KonohaContext *kctx, kNameSpace *ns, int option, KTraceInfo *trace) { #define PATTERN(X) KSymbol_##X##Pattern kSyntax *textSyntax = kSyntax_(KNULL(NameSpace), PATTERN(Text)); KDEFINE_SYNTAX SYNTAX[] = { { KSymbol_TextPattern, SYNFLAG_CParseFunc, 0, 0, {SUGARFUNC Expression_ExtendedTextLiteral}, {textSyntax->TypeFuncNULL}, }, { KSymbol_END, }, }; SUGAR kNameSpace_DefineSyntax(kctx, ns, SYNTAX, trace); return true; }
void replace_call_placeholder (rtx insn, sibcall_use_t use) { if (use == sibcall_use_tail_recursion) emit_insn_before (XEXP (PATTERN (insn), 2), insn); else if (use == sibcall_use_sibcall) emit_insn_before (XEXP (PATTERN (insn), 1), insn); else if (use == sibcall_use_normal) emit_insn_before (XEXP (PATTERN (insn), 0), insn); else abort (); /* Turn off LABEL_PRESERVE_P for the tail recursion label if it exists. We only had to set it long enough to keep the jump pass above from deleting it as unused. */ if (XEXP (PATTERN (insn), 3)) LABEL_PRESERVE_P (XEXP (PATTERN (insn), 3)) = 0; /* "Delete" the placeholder insn. */ remove_insn (insn); }
static void find_flags_uses_in_insn (struct comparison *cmp, rtx_insn *insn) { df_ref use; /* If we've already lost track of uses, don't bother collecting more. */ if (cmp->missing_uses) return; /* Find a USE of the flags register. */ FOR_EACH_INSN_USE (use, insn) if (DF_REF_REGNO (use) == targetm.flags_regnum) { rtx x, *loc; /* If this is an unusual use, quit. */ if (DF_REF_TYPE (use) != DF_REF_REG_USE) goto fail; /* If we've run out of slots to record uses, quit. */ if (cmp->n_uses == MAX_CMP_USE) goto fail; /* Unfortunately the location of the flags register, while present in the reference structure, doesn't help. We need to find the comparison code that is outer to the actual flags use. */ loc = DF_REF_LOC (use); x = PATTERN (insn); if (GET_CODE (x) == PARALLEL) x = XVECEXP (x, 0, 0); x = SET_SRC (x); if (GET_CODE (x) == IF_THEN_ELSE) x = XEXP (x, 0); if (COMPARISON_P (x) && loc == &XEXP (x, 0) && XEXP (x, 1) == const0_rtx) { /* We've found a use of the flags that we understand. */ struct comparison_use *cuse = &cmp->uses[cmp->n_uses++]; cuse->insn = insn; cuse->loc = loc; cuse->code = GET_CODE (x); } else goto fail; } return; fail: /* We failed to recognize this use of the flags register. */ cmp->missing_uses = true; }
static void kill_autoinc_value (rtx_insn *insn, struct value_data *vd) { subrtx_iterator::array_type array; FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST) { const_rtx x = *iter; if (GET_RTX_CLASS (GET_CODE (x)) == RTX_AUTOINC) { x = XEXP (x, 0); kill_value (x, vd); set_value_regno (REGNO (x), GET_MODE (x), vd); iter.skip_subrtxes (); } }
static void mark_all_labels (rtx f) { rtx insn; if (current_ir_type () == IR_RTL_CFGLAYOUT) { basic_block bb; FOR_EACH_BB (bb) { /* In cfglayout mode, we don't bother with trivial next-insn propagation of LABEL_REFs into JUMP_LABEL. This will be handled by other optimizers using better algorithms. */ FOR_BB_INSNS (bb, insn) { gcc_assert (! INSN_DELETED_P (insn)); if (NONDEBUG_INSN_P (insn)) mark_jump_label (PATTERN (insn), insn, 0); } /* In cfglayout mode, there may be non-insns between the basic blocks. If those non-insns represent tablejump data, they contain label references that we must record. */ for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) { gcc_assert (JUMP_TABLE_DATA_P (insn)); mark_jump_label (PATTERN (insn), insn, 0); } for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn)) if (INSN_P (insn)) { gcc_assert (JUMP_TABLE_DATA_P (insn)); mark_jump_label (PATTERN (insn), insn, 0); } }