static void stack_adjust (HOST_WIDE_INT amount) { rtx insn; if (!IN_RANGE (amount, -32776, 32768)) { /* r10 is caller saved so it can be used as a temp reg. */ rtx r10; r10 = gen_rtx_REG (word_mode, 10); insn = emit_move_insn (r10, GEN_INT (amount)); if (amount < 0) RTX_FRAME_RELATED_P (insn) = 1; insn = emit_add (stack_pointer_rtx, stack_pointer_rtx, r10); if (amount < 0) RTX_FRAME_RELATED_P (insn) = 1; } else { insn = emit_add (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (amount)); if (amount < 0) RTX_FRAME_RELATED_P (insn) = 1; } }
FilterBuilder::FilterBuilder(QWidget *parent) : QWidget(parent) { filterCombo = new QComboBox; filterCombo->setObjectName("filterCombo"); for (int i = 0; i < CardFilter::AttrEnd; i++) filterCombo->addItem( tr(CardFilter::attrName(static_cast<CardFilter::Attr>(i))), QVariant(i) ); typeCombo = new QComboBox; typeCombo->setObjectName("typeCombo"); for (int i = 0; i < CardFilter::TypeEnd; i++) typeCombo->addItem( tr(CardFilter::typeName(static_cast<CardFilter::Type>(i))), QVariant(i) ); QPushButton *ok = new QPushButton(QPixmap("theme:icons/increment"), QString()); ok->setObjectName("ok"); ok->setMaximumSize(20, 20); edit = new QLineEdit; edit->setObjectName("edit"); edit->setSizePolicy(QSizePolicy::Expanding, QSizePolicy::Fixed); QGridLayout *layout = new QGridLayout; layout->setObjectName("layout"); layout->setContentsMargins(0, 0, 0, 0); layout->addWidget(typeCombo, 0, 0, 1, 2); layout->addWidget(filterCombo, 0, 2, 1, 2); layout->addWidget(edit, 1, 0, 1, 3); layout->addWidget(ok, 1, 3); setLayout(layout); connect(edit, SIGNAL(returnPressed()), this, SLOT(emit_add())); connect(ok, SIGNAL(released()), this, SLOT(emit_add())); fltr = NULL; }
/* Generate and emit RTL to save or restore callee save registers. */ static void expand_save_restore (struct lm32_frame_info *info, int op) { unsigned int reg_save_mask = info->reg_save_mask; int regno; HOST_WIDE_INT offset; rtx insn; /* Callee saves are below locals and above outgoing arguments. */ offset = info->args_size + info->callee_size; for (regno = 0; regno <= 31; regno++) { if ((reg_save_mask & (1 << regno)) != 0) { rtx offset_rtx; rtx mem; offset_rtx = GEN_INT (offset); if (satisfies_constraint_K (offset_rtx)) { mem = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, stack_pointer_rtx, offset_rtx)); } else { /* r10 is caller saved so it can be used as a temp reg. */ rtx r10; r10 = gen_rtx_REG (word_mode, 10); insn = emit_move_insn (r10, offset_rtx); if (op == 0) RTX_FRAME_RELATED_P (insn) = 1; insn = emit_add (r10, r10, stack_pointer_rtx); if (op == 0) RTX_FRAME_RELATED_P (insn) = 1; mem = gen_rtx_MEM (word_mode, r10); } if (op == 0) insn = emit_move_insn (mem, gen_rtx_REG (word_mode, regno)); else insn = emit_move_insn (gen_rtx_REG (word_mode, regno), mem); /* only prologue instructions which set the sp fp or save a register should be marked as frame related. */ if (op == 0) RTX_FRAME_RELATED_P (insn) = 1; offset -= UNITS_PER_WORD; } } }
/* Create and emit instructions for a functions prologue. */ void lm32_expand_prologue (void) { rtx insn; lm32_compute_frame_size (get_frame_size ()); if (current_frame_info.total_size > 0) { /* Add space on stack new frame. */ stack_adjust (-current_frame_info.total_size); /* Save callee save registers. */ if (current_frame_info.reg_save_mask != 0) expand_save_restore (¤t_frame_info, 0); /* Setup frame pointer if it's needed. */ if (frame_pointer_needed == 1) { /* Move sp to fp. */ insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx); RTX_FRAME_RELATED_P (insn) = 1; /* Add offset - Don't use total_size, as that includes pretend_size, which isn't part of this frame? */ insn = emit_add (frame_pointer_rtx, frame_pointer_rtx, GEN_INT (current_frame_info.args_size + current_frame_info.callee_size + current_frame_info.locals_size)); RTX_FRAME_RELATED_P (insn) = 1; } /* Prevent prologue from being scheduled into function body. */ emit_insn (gen_blockage ()); } }
void bpf_jit_compile(struct bpf_prog *fp) { unsigned int cleanup_addr, proglen, oldproglen = 0; u32 temp[8], *prog, *func, seen = 0, pass; const struct sock_filter *filter = fp->insns; int i, flen = fp->len, pc_ret0 = -1; unsigned int *addrs; void *image; if (!bpf_jit_enable) return; addrs = kmalloc_array(flen, sizeof(*addrs), GFP_KERNEL); if (addrs == NULL) return; /* Before first pass, make a rough estimation of addrs[] * each bpf instruction is translated to less than 64 bytes */ for (proglen = 0, i = 0; i < flen; i++) { proglen += 64; addrs[i] = proglen; } cleanup_addr = proglen; /* epilogue address */ image = NULL; for (pass = 0; pass < 10; pass++) { u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen; /* no prologue/epilogue for trivial filters (RET something) */ proglen = 0; prog = temp; /* Prologue */ if (seen_or_pass0) { if (seen_or_pass0 & SEEN_MEM) { unsigned int sz = BASE_STACKFRAME; sz += BPF_MEMWORDS * sizeof(u32); emit_alloc_stack(sz); } /* Make sure we dont leek kernel memory. */ if (seen_or_pass0 & SEEN_XREG) emit_clear(r_X); /* If this filter needs to access skb data, * load %o4 and %o5 with: * %o4 = skb->len - skb->data_len * %o5 = skb->data * And also back up %o7 into r_saved_O7 so we can * invoke the stubs using 'call'. */ if (seen_or_pass0 & SEEN_DATAREF) { emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN); emit_load32(r_SKB, struct sk_buff, data_len, r_TMP); emit_sub(r_HEADLEN, r_TMP, r_HEADLEN); emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA); } } emit_reg_move(O7, r_saved_O7); /* Make sure we dont leak kernel information to the user. */ if (bpf_needs_clear_a(&filter[0])) emit_clear(r_A); /* A = 0 */ for (i = 0; i < flen; i++) { unsigned int K = filter[i].k; unsigned int t_offset; unsigned int f_offset; u32 t_op, f_op; u16 code = bpf_anc_helper(&filter[i]); int ilen; switch (code) { case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */ emit_alu_X(ADD); break; case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */ emit_alu_K(ADD, K); break; case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */ emit_alu_X(SUB); break; case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ emit_alu_K(SUB, K); break; case BPF_ALU | BPF_AND | BPF_X: /* A &= X */ emit_alu_X(AND); break; case BPF_ALU | BPF_AND | BPF_K: /* A &= K */ emit_alu_K(AND, K); break; case BPF_ALU | BPF_OR | BPF_X: /* A |= X */ emit_alu_X(OR); break; case BPF_ALU | BPF_OR | BPF_K: /* A |= K */ emit_alu_K(OR, K); break; case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */ case BPF_ALU | BPF_XOR | BPF_X: emit_alu_X(XOR); break; case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */ emit_alu_K(XOR, K); break; case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */ emit_alu_X(SLL); break; case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */ emit_alu_K(SLL, K); break; case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X */ emit_alu_X(SRL); break; case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */ emit_alu_K(SRL, K); break; case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */ emit_alu_X(MUL); break; case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ emit_alu_K(MUL, K); break; case BPF_ALU | BPF_DIV | BPF_K: /* A /= K with K != 0*/ if (K == 1) break; emit_write_y(G0); /* The Sparc v8 architecture requires * three instructions between a %y * register write and the first use. */ emit_nop(); emit_nop(); emit_nop(); emit_alu_K(DIV, K); break; case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ emit_cmpi(r_X, 0); if (pc_ret0 > 0) { t_offset = addrs[pc_ret0 - 1]; emit_branch(BE, t_offset + 20); emit_nop(); /* delay slot */ } else { emit_branch_off(BNE, 16); emit_nop(); emit_jump(cleanup_addr + 20); emit_clear(r_A); } emit_write_y(G0); /* The Sparc v8 architecture requires * three instructions between a %y * register write and the first use. */ emit_nop(); emit_nop(); emit_nop(); emit_alu_X(DIV); break; case BPF_ALU | BPF_NEG: emit_neg(); break; case BPF_RET | BPF_K: if (!K) { if (pc_ret0 == -1) pc_ret0 = i; emit_clear(r_A); } else { emit_loadimm(K, r_A); } /* Fallthrough */ case BPF_RET | BPF_A: if (seen_or_pass0) { if (i != flen - 1) { emit_jump(cleanup_addr); emit_nop(); break; } if (seen_or_pass0 & SEEN_MEM) { unsigned int sz = BASE_STACKFRAME; sz += BPF_MEMWORDS * sizeof(u32); emit_release_stack(sz); } } /* jmpl %r_saved_O7 + 8, %g0 */ emit_jmpl(r_saved_O7, 8, G0); emit_reg_move(r_A, O0); /* delay slot */ break; case BPF_MISC | BPF_TAX: seen |= SEEN_XREG; emit_reg_move(r_A, r_X); break; case BPF_MISC | BPF_TXA: seen |= SEEN_XREG; emit_reg_move(r_X, r_A); break; case BPF_ANC | SKF_AD_CPU: emit_load_cpu(r_A); break; case BPF_ANC | SKF_AD_PROTOCOL: emit_skb_load16(protocol, r_A); break; case BPF_ANC | SKF_AD_PKTTYPE: __emit_skb_load8(__pkt_type_offset, r_A); emit_andi(r_A, PKT_TYPE_MAX, r_A); emit_alu_K(SRL, 5); break; case BPF_ANC | SKF_AD_IFINDEX: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); emit_branch(BE_PTR, cleanup_addr + 4); emit_nop(); emit_load32(r_A, struct net_device, ifindex, r_A); break; case BPF_ANC | SKF_AD_MARK: emit_skb_load32(mark, r_A); break; case BPF_ANC | SKF_AD_QUEUE: emit_skb_load16(queue_mapping, r_A); break; case BPF_ANC | SKF_AD_HATYPE: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); emit_branch(BE_PTR, cleanup_addr + 4); emit_nop(); emit_load16(r_A, struct net_device, type, r_A); break; case BPF_ANC | SKF_AD_RXHASH: emit_skb_load32(hash, r_A); break; case BPF_ANC | SKF_AD_VLAN_TAG: emit_skb_load16(vlan_tci, r_A); break; case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: __emit_skb_load8(__pkt_vlan_present_offset, r_A); if (PKT_VLAN_PRESENT_BIT) emit_alu_K(SRL, PKT_VLAN_PRESENT_BIT); if (PKT_VLAN_PRESENT_BIT < 7) emit_andi(r_A, 1, r_A); break; case BPF_LD | BPF_W | BPF_LEN: emit_skb_load32(len, r_A); break; case BPF_LDX | BPF_W | BPF_LEN: emit_skb_load32(len, r_X); break; case BPF_LD | BPF_IMM: emit_loadimm(K, r_A); break; case BPF_LDX | BPF_IMM: emit_loadimm(K, r_X); break; case BPF_LD | BPF_MEM: seen |= SEEN_MEM; emit_ldmem(K * 4, r_A); break; case BPF_LDX | BPF_MEM: seen |= SEEN_MEM | SEEN_XREG; emit_ldmem(K * 4, r_X); break; case BPF_ST: seen |= SEEN_MEM; emit_stmem(K * 4, r_A); break; case BPF_STX: seen |= SEEN_MEM | SEEN_XREG; emit_stmem(K * 4, r_X); break; #define CHOOSE_LOAD_FUNC(K, func) \ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) case BPF_LD | BPF_W | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word); common_load: seen |= SEEN_DATAREF; emit_loadimm(K, r_OFF); emit_call(func); break; case BPF_LD | BPF_H | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half); goto common_load; case BPF_LD | BPF_B | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte); goto common_load; case BPF_LDX | BPF_B | BPF_MSH: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh); goto common_load; case BPF_LD | BPF_W | BPF_IND: func = bpf_jit_load_word; common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; if (K) { if (is_simm13(K)) { emit_addi(r_X, K, r_OFF); } else { emit_loadimm(K, r_TMP); emit_add(r_X, r_TMP, r_OFF); } } else { emit_reg_move(r_X, r_OFF); } emit_call(func); break; case BPF_LD | BPF_H | BPF_IND: func = bpf_jit_load_half; goto common_load_ind; case BPF_LD | BPF_B | BPF_IND: func = bpf_jit_load_byte; goto common_load_ind; case BPF_JMP | BPF_JA: emit_jump(addrs[i + K]); emit_nop(); break; #define COND_SEL(CODE, TOP, FOP) \ case CODE: \ t_op = TOP; \ f_op = FOP; \ goto cond_branch COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU); COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU); COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE); COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE); COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU); COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU); COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE); COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE); cond_branch: f_offset = addrs[i + filter[i].jf]; t_offset = addrs[i + filter[i].jt]; /* same targets, can avoid doing the test :) */ if (filter[i].jt == filter[i].jf) { emit_jump(t_offset); emit_nop(); break; } switch (code) { case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JEQ | BPF_X: seen |= SEEN_XREG; emit_cmp(r_A, r_X); break; case BPF_JMP | BPF_JSET | BPF_X: seen |= SEEN_XREG; emit_btst(r_A, r_X); break; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: if (is_simm13(K)) { emit_cmpi(r_A, K); } else { emit_loadimm(K, r_TMP); emit_cmp(r_A, r_TMP); } break; case BPF_JMP | BPF_JSET | BPF_K: if (is_simm13(K)) { emit_btsti(r_A, K); } else { emit_loadimm(K, r_TMP); emit_btst(r_A, r_TMP); } break; } if (filter[i].jt != 0) { if (filter[i].jf) t_offset += 8; emit_branch(t_op, t_offset); emit_nop(); /* delay slot */ if (filter[i].jf) { emit_jump(f_offset); emit_nop(); } break; } emit_branch(f_op, f_offset); emit_nop(); /* delay slot */ break; default: /* hmm, too complex filter, give up with jit compiler */ goto out; } ilen = (void *) prog - (void *) temp; if (image) { if (unlikely(proglen + ilen > oldproglen)) { pr_err("bpb_jit_compile fatal error\n"); kfree(addrs); module_memfree(image); return; } memcpy(image + proglen, temp, ilen); } proglen += ilen; addrs[i] = proglen; prog = temp; } /* last bpf instruction is always a RET : * use it to give the cleanup instruction(s) addr */ cleanup_addr = proglen - 8; /* jmpl; mov r_A,%o0; */ if (seen_or_pass0 & SEEN_MEM) cleanup_addr -= 4; /* add %sp, X, %sp; */ if (image) { if (proglen != oldproglen) pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen); break; } if (proglen == oldproglen) { image = module_alloc(proglen); if (!image) goto out; } oldproglen = proglen; } if (bpf_jit_enable > 1) bpf_jit_dump(flen, proglen, pass + 1, image); if (image) { fp->bpf_func = (void *)image; fp->jited = 1; } out: kfree(addrs); return; }
enum eval_result_type compile_bytecodes (struct agent_expr *aexpr) { int pc = 0; int done = 0; unsigned char op, next_op; int arg; /* This is only used to build 64-bit value for constants. */ ULONGEST top; struct bytecode_address *aentry, *aentry2; #define UNHANDLED \ do \ { \ ax_debug ("Cannot compile op 0x%x\n", op); \ return expr_eval_unhandled_opcode; \ } while (0) if (aexpr->length == 0) { ax_debug ("empty agent expression\n"); return expr_eval_empty_expression; } bytecode_address_table = NULL; while (!done) { op = aexpr->bytes[pc]; ax_debug ("About to compile op 0x%x, pc=%d\n", op, pc); /* Record the compiled-code address of the bytecode, for use by jump instructions. */ aentry = XNEW (struct bytecode_address); aentry->pc = pc; aentry->address = current_insn_ptr; aentry->goto_pc = -1; aentry->from_offset = aentry->from_size = 0; aentry->next = bytecode_address_table; bytecode_address_table = aentry; ++pc; emit_error = 0; switch (op) { case gdb_agent_op_add: emit_add (); break; case gdb_agent_op_sub: emit_sub (); break; case gdb_agent_op_mul: emit_mul (); break; case gdb_agent_op_div_signed: UNHANDLED; break; case gdb_agent_op_div_unsigned: UNHANDLED; break; case gdb_agent_op_rem_signed: UNHANDLED; break; case gdb_agent_op_rem_unsigned: UNHANDLED; break; case gdb_agent_op_lsh: emit_lsh (); break; case gdb_agent_op_rsh_signed: emit_rsh_signed (); break; case gdb_agent_op_rsh_unsigned: emit_rsh_unsigned (); break; case gdb_agent_op_trace: UNHANDLED; break; case gdb_agent_op_trace_quick: UNHANDLED; break; case gdb_agent_op_log_not: emit_log_not (); break; case gdb_agent_op_bit_and: emit_bit_and (); break; case gdb_agent_op_bit_or: emit_bit_or (); break; case gdb_agent_op_bit_xor: emit_bit_xor (); break; case gdb_agent_op_bit_not: emit_bit_not (); break; case gdb_agent_op_equal: next_op = aexpr->bytes[pc]; if (next_op == gdb_agent_op_if_goto && !is_goto_target (aexpr, pc) && target_emit_ops ()->emit_eq_goto) { ax_debug ("Combining equal & if_goto"); pc += 1; aentry->pc = pc; arg = aexpr->bytes[pc++]; arg = (arg << 8) + aexpr->bytes[pc++]; aentry->goto_pc = arg; emit_eq_goto (&(aentry->from_offset), &(aentry->from_size)); } else if (next_op == gdb_agent_op_log_not && (aexpr->bytes[pc + 1] == gdb_agent_op_if_goto) && !is_goto_target (aexpr, pc + 1) && target_emit_ops ()->emit_ne_goto) { ax_debug ("Combining equal & log_not & if_goto"); pc += 2; aentry->pc = pc; arg = aexpr->bytes[pc++]; arg = (arg << 8) + aexpr->bytes[pc++]; aentry->goto_pc = arg; emit_ne_goto (&(aentry->from_offset), &(aentry->from_size)); } else emit_equal (); break; case gdb_agent_op_less_signed: next_op = aexpr->bytes[pc]; if (next_op == gdb_agent_op_if_goto && !is_goto_target (aexpr, pc)) { ax_debug ("Combining less_signed & if_goto"); pc += 1; aentry->pc = pc; arg = aexpr->bytes[pc++]; arg = (arg << 8) + aexpr->bytes[pc++]; aentry->goto_pc = arg; emit_lt_goto (&(aentry->from_offset), &(aentry->from_size)); } else if (next_op == gdb_agent_op_log_not && !is_goto_target (aexpr, pc) && (aexpr->bytes[pc + 1] == gdb_agent_op_if_goto) && !is_goto_target (aexpr, pc + 1)) { ax_debug ("Combining less_signed & log_not & if_goto"); pc += 2; aentry->pc = pc; arg = aexpr->bytes[pc++]; arg = (arg << 8) + aexpr->bytes[pc++]; aentry->goto_pc = arg; emit_ge_goto (&(aentry->from_offset), &(aentry->from_size)); } else emit_less_signed (); break; case gdb_agent_op_less_unsigned: emit_less_unsigned (); break; case gdb_agent_op_ext: arg = aexpr->bytes[pc++]; if (arg < (sizeof (LONGEST) * 8)) emit_ext (arg); break; case gdb_agent_op_ref8: emit_ref (1); break; case gdb_agent_op_ref16: emit_ref (2); break; case gdb_agent_op_ref32: emit_ref (4); break; case gdb_agent_op_ref64: emit_ref (8); break; case gdb_agent_op_if_goto: arg = aexpr->bytes[pc++]; arg = (arg << 8) + aexpr->bytes[pc++]; aentry->goto_pc = arg; emit_if_goto (&(aentry->from_offset), &(aentry->from_size)); break; case gdb_agent_op_goto: arg = aexpr->bytes[pc++]; arg = (arg << 8) + aexpr->bytes[pc++]; aentry->goto_pc = arg; emit_goto (&(aentry->from_offset), &(aentry->from_size)); break; case gdb_agent_op_const8: emit_stack_flush (); top = aexpr->bytes[pc++]; emit_const (top); break; case gdb_agent_op_const16: emit_stack_flush (); top = aexpr->bytes[pc++]; top = (top << 8) + aexpr->bytes[pc++]; emit_const (top); break; case gdb_agent_op_const32: emit_stack_flush (); top = aexpr->bytes[pc++]; top = (top << 8) + aexpr->bytes[pc++]; top = (top << 8) + aexpr->bytes[pc++]; top = (top << 8) + aexpr->bytes[pc++]; emit_const (top); break; case gdb_agent_op_const64: emit_stack_flush (); top = aexpr->bytes[pc++]; top = (top << 8) + aexpr->bytes[pc++]; top = (top << 8) + aexpr->bytes[pc++]; top = (top << 8) + aexpr->bytes[pc++]; top = (top << 8) + aexpr->bytes[pc++]; top = (top << 8) + aexpr->bytes[pc++]; top = (top << 8) + aexpr->bytes[pc++]; top = (top << 8) + aexpr->bytes[pc++]; emit_const (top); break; case gdb_agent_op_reg: emit_stack_flush (); arg = aexpr->bytes[pc++]; arg = (arg << 8) + aexpr->bytes[pc++]; emit_reg (arg); break; case gdb_agent_op_end: ax_debug ("At end of expression\n"); /* Assume there is one stack element left, and that it is cached in "top" where emit_epilogue can get to it. */ emit_stack_adjust (1); done = 1; break; case gdb_agent_op_dup: /* In our design, dup is equivalent to stack flushing. */ emit_stack_flush (); break; case gdb_agent_op_pop: emit_pop (); break; case gdb_agent_op_zero_ext: arg = aexpr->bytes[pc++]; if (arg < (sizeof (LONGEST) * 8)) emit_zero_ext (arg); break; case gdb_agent_op_swap: next_op = aexpr->bytes[pc]; /* Detect greater-than comparison sequences. */ if (next_op == gdb_agent_op_less_signed && !is_goto_target (aexpr, pc) && (aexpr->bytes[pc + 1] == gdb_agent_op_if_goto) && !is_goto_target (aexpr, pc + 1)) { ax_debug ("Combining swap & less_signed & if_goto"); pc += 2; aentry->pc = pc; arg = aexpr->bytes[pc++]; arg = (arg << 8) + aexpr->bytes[pc++]; aentry->goto_pc = arg; emit_gt_goto (&(aentry->from_offset), &(aentry->from_size)); } else if (next_op == gdb_agent_op_less_signed && !is_goto_target (aexpr, pc) && (aexpr->bytes[pc + 1] == gdb_agent_op_log_not) && !is_goto_target (aexpr, pc + 1) && (aexpr->bytes[pc + 2] == gdb_agent_op_if_goto) && !is_goto_target (aexpr, pc + 2)) { ax_debug ("Combining swap & less_signed & log_not & if_goto"); pc += 3; aentry->pc = pc; arg = aexpr->bytes[pc++]; arg = (arg << 8) + aexpr->bytes[pc++]; aentry->goto_pc = arg; emit_le_goto (&(aentry->from_offset), &(aentry->from_size)); } else emit_swap (); break; case gdb_agent_op_getv: emit_stack_flush (); arg = aexpr->bytes[pc++]; arg = (arg << 8) + aexpr->bytes[pc++]; emit_int_call_1 (get_get_tsv_func_addr (), arg); break; case gdb_agent_op_setv: arg = aexpr->bytes[pc++]; arg = (arg << 8) + aexpr->bytes[pc++]; emit_void_call_2 (get_set_tsv_func_addr (), arg); break; case gdb_agent_op_tracev: UNHANDLED; break; /* GDB never (currently) generates any of these ops. */ case gdb_agent_op_float: case gdb_agent_op_ref_float: case gdb_agent_op_ref_double: case gdb_agent_op_ref_long_double: case gdb_agent_op_l_to_d: case gdb_agent_op_d_to_l: case gdb_agent_op_trace16: UNHANDLED; break; default: ax_debug ("Agent expression op 0x%x not recognized\n", op); /* Don't struggle on, things will just get worse. */ return expr_eval_unrecognized_opcode; } /* This catches errors that occur in target-specific code emission. */ if (emit_error) { ax_debug ("Error %d while emitting code for %s\n", emit_error, gdb_agent_op_name (op)); return expr_eval_unhandled_opcode; } ax_debug ("Op %s compiled\n", gdb_agent_op_name (op)); } /* Now fill in real addresses as goto destinations. */ for (aentry = bytecode_address_table; aentry; aentry = aentry->next) { int written = 0; if (aentry->goto_pc < 0) continue; /* Find the location that we are going to, and call back into target-specific code to write the actual address or displacement. */ for (aentry2 = bytecode_address_table; aentry2; aentry2 = aentry2->next) { if (aentry2->pc == aentry->goto_pc) { ax_debug ("Want to jump from %s to %s\n", paddress (aentry->address), paddress (aentry2->address)); write_goto_address (aentry->address + aentry->from_offset, aentry2->address, aentry->from_size); written = 1; break; } } /* Error out if we didn't find a destination. */ if (!written) { ax_debug ("Destination of goto %d not found\n", aentry->goto_pc); return expr_eval_invalid_goto; } } return expr_eval_no_error; }
void emit_nodes(struct node *n, const char *assignto, BOOL force, BOOL inloop) { char *fast = NULL; assert(n); if (n->next == NULL && n->type != NODE_CALL && !assignto && !force) { /* do not emit single node, unless it is a call or we really need it */ cry("statement with no effect\n"); return; } switch (n->type) { case NODE_IMM: { fast = AS_IMM(n)->value; maybe_symbol_forward(fast); if (force) { cry("constant expression '%s' in conditional\n", fast); } assert(!assignto); break; } case NODE_LVAL: { struct lval_node *p = AS_LVAL(n); int loadable = is_loadable(n, TRUE); if (loadable < 0) { fast = p->name; } else if (loadable > 0) { make_symbol_used(p->name); emit_load_direct(p->name, p->deref); } else { emit_load_indirect(p->name, p->deref); } break; } case NODE_CALL: { struct call_node *p = AS_CALL(n); struct node *parm; int deref0 = 0; char *args[MAX_FUNC_ARGS]; char *func; int i; BOOL retval = (n->next != NULL) || force || assignto; BOOL direct = FALSE; for (i = 0, parm = p->parm; parm; parm = parm->next, i++) { BOOL r0 = (i == 0 && arch_regparm); assert(i < MAX_FUNC_ARGS); if (parm->type == NODE_IMM) { args[i] = xstrdup(AS_IMM(parm)->value); maybe_symbol_forward(args[i]); } else if (parm->type == NODE_LVAL && is_loadable(parm, r0)) { struct lval_node *q = AS_LVAL(parm); args[i] = xstrdup(q->name); make_symbol_used(args[i]); if (q->deref) { deref0 = 1; } } else if (r0 && parm->next == NULL) { args[i] = NULL; direct = TRUE; emit_nodes(parm, NULL, TRUE, inloop); } else if (r0 && parm->type == NODE_LVAL) { struct lval_node *q = AS_LVAL(parm); args[i] = create_address_str(q->name); deref0 = 1; if (q->deref) { deref0++; } } else { args[i] = new_name("var"); emit_nodes(parm, args[i], FALSE, inloop); make_symbol_used(args[i]); } } func = p->func; if (retval && (p->attr & ATTRIB_NORETURN)) { cry("function '%s' does not return\n", func); } if (!is_loadable_sym(func)) { char *ptr = new_name("ptr"); emit_load_indirect(func, FALSE); add_symbol_forward(ptr, 0); emit_store_indirect(ptr); func = ptr; } else { func = xstrdup(func); } make_symbol_used(func); if (!(p->attr & ATTRIB_NORETURN)) { if ((p->attr & ATTRIB_STACK) || inloop) { if (!(p->attr & ATTRIB_STACK)) { cry("reserved [[stack]] for '%s' because of loop\n", func); } mark_all_used(PROTECTED); } else { mark_all_used(CLOBBERED); } } if (direct) { emit_call(func, NULL, 0, deref0, inloop, retval, p->attr); } else { emit_call(func, args, i, deref0, inloop, retval, p->attr); } free(func); while (--i >= 0) { free(args[i]); } break; } case NODE_ADD: { struct node *term; struct node *prev; int deref0 = 0; char *prev_tmp; prev = AS_ADD(n)->list; if (prev->type == NODE_IMM) { prev_tmp = xstrdup(AS_IMM(prev)->value); maybe_symbol_forward(prev_tmp); } else if (prev->type == NODE_LVAL && is_loadable(prev, TRUE)) { prev_tmp = xstrdup(AS_LVAL(prev)->name); make_symbol_used(prev_tmp); if (AS_LVAL(prev)->deref) { deref0 = TRUE; } } else if (prev->type == NODE_LVAL) { prev_tmp = create_address_str(AS_LVAL(prev)->name); deref0 = 1; if (AS_LVAL(prev)->deref) { deref0++; } } else { prev_tmp = new_name("var"); emit_nodes(prev, prev_tmp, FALSE, inloop); make_symbol_used(prev_tmp); } for (term = prev->next; term; term = term->next) { BOOL swap = FALSE; char *tmp; char *sum = new_name("sum"); if (term->type == NODE_IMM) { tmp = xstrdup(AS_IMM(term)->value); maybe_symbol_forward(tmp); } else if (term->type == NODE_LVAL && is_loadable(term, !deref0)) { tmp = xstrdup(AS_LVAL(term)->name); make_symbol_used(tmp); if (AS_LVAL(term)->deref) { swap = TRUE; deref0 = 1; } } else if (term->type == NODE_LVAL && !deref0) { tmp = create_address_str(AS_LVAL(term)->name); deref0 = 1; if (AS_LVAL(term)->deref) { swap = TRUE; deref0++; } } else { tmp = new_name("var"); emit_nodes(term, tmp, FALSE, inloop); make_symbol_used(tmp); } emit_add(prev_tmp, tmp, deref0, swap); deref0 = 0; if (term->next) { emit_store_indirect(sum); } free(prev_tmp); prev_tmp = sum; free(tmp); } free(prev_tmp); break; } } if (assignto) { add_symbol_forward(assignto, 0); emit_store_indirect(assignto); } else { BOOL loaded = FALSE; for (n = n->next; n; n = n->next) { BOOL later = FALSE; struct lval_node *p = AS_LVAL(n); assert(n->type == NODE_LVAL); if (fast) { if (optimize_imm && !p->deref && !get_symbol(p->name) && ((p->attr & ATTRIB_CONSTANT) || !inloop)) { emit_fast(p->name, fast); add_symbol_defined(p->name, fast, p->attr); continue; } if (!loaded) { loaded = TRUE; if (p->deref && !is_loadable_sym(p->name)) { later = TRUE; } else { emit_load_direct(fast, FALSE); } } } if (p->attr & ATTRIB_CONSTANT) { cry("useless const for '%s'\n", p->name); } if (p->deref) { /* XXX only addresses (imports/vectors) can be derefed */ if (!is_loadable_sym(p->name)) { /* XXX ok, this is very very shitty * tip1: store value to tmp_N for each future p->deref at once * tip2: calculate in advance how many derefs we will need and store pointers before calculating r0 (see above) */ char *ptr = new_name("ptr"); char *tmp; if (!later) { tmp = emit_save(); } emit_load_indirect(p->name, FALSE); emit_store_indirect(ptr); if (!later) { emit_restore(tmp); } else { emit_load_direct(fast, FALSE); } add_symbol_forward(ptr, 0); make_symbol_used(ptr); emit_store_direct(ptr); free(ptr); } else { make_symbol_used(p->name); emit_store_direct(p->name); } } else { add_symbol_forward(p->name, p->attr); if (try_symbol_extern(p->name)) { die("cannot assign to import address '%s'\n", p->name); } if (optimize_imm && (try_symbol_attr(p->name) & ATTRIB_CONSTANT)) { die("'%s' was declared constant\n", p->name); } emit_store_indirect(p->name); } } if (force && fast && !loaded) { emit_load_direct(fast, FALSE); } } }
static void brw_wm_emit_glsl(struct brw_context *brw, struct brw_wm_compile *c) { #define MAX_IFSN 32 #define MAX_LOOP_DEPTH 32 struct brw_instruction *if_inst[MAX_IFSN], *loop_inst[MAX_LOOP_DEPTH]; struct brw_instruction *inst0, *inst1; int i, if_insn = 0, loop_insn = 0; struct brw_compile *p = &c->func; struct brw_indirect stack_index = brw_indirect(0, 0); c->reg_index = 0; prealloc_reg(c); brw_set_compression_control(p, BRW_COMPRESSION_NONE); brw_MOV(p, get_addr_reg(stack_index), brw_address(c->stack)); for (i = 0; i < c->nr_fp_insns; i++) { struct prog_instruction *inst = &c->prog_instructions[i]; struct prog_instruction *orig_inst; if ((orig_inst = inst->Data) != 0) orig_inst->Data = current_insn(p); if (inst->CondUpdate) brw_set_conditionalmod(p, BRW_CONDITIONAL_NZ); else brw_set_conditionalmod(p, BRW_CONDITIONAL_NONE); switch (inst->Opcode) { case WM_PIXELXY: emit_pixel_xy(c, inst); break; case WM_DELTAXY: emit_delta_xy(c, inst); break; case WM_PIXELW: emit_pixel_w(c, inst); break; case WM_LINTERP: emit_linterp(c, inst); break; case WM_PINTERP: emit_pinterp(c, inst); break; case WM_CINTERP: emit_cinterp(c, inst); break; case WM_WPOSXY: emit_wpos_xy(c, inst); break; case WM_FB_WRITE: emit_fb_write(c, inst); break; case OPCODE_ABS: emit_abs(c, inst); break; case OPCODE_ADD: emit_add(c, inst); break; case OPCODE_SUB: emit_sub(c, inst); break; case OPCODE_FRC: emit_frc(c, inst); break; case OPCODE_FLR: emit_flr(c, inst); break; case OPCODE_LRP: emit_lrp(c, inst); break; case OPCODE_INT: emit_int(c, inst); break; case OPCODE_MOV: emit_mov(c, inst); break; case OPCODE_DP3: emit_dp3(c, inst); break; case OPCODE_DP4: emit_dp4(c, inst); break; case OPCODE_XPD: emit_xpd(c, inst); break; case OPCODE_DPH: emit_dph(c, inst); break; case OPCODE_RCP: emit_rcp(c, inst); break; case OPCODE_RSQ: emit_rsq(c, inst); break; case OPCODE_SIN: emit_sin(c, inst); break; case OPCODE_COS: emit_cos(c, inst); break; case OPCODE_EX2: emit_ex2(c, inst); break; case OPCODE_LG2: emit_lg2(c, inst); break; case OPCODE_MAX: emit_max(c, inst); break; case OPCODE_MIN: emit_min(c, inst); break; case OPCODE_DDX: emit_ddx(c, inst); break; case OPCODE_DDY: emit_ddy(c, inst); break; case OPCODE_SLT: emit_slt(c, inst); break; case OPCODE_SLE: emit_sle(c, inst); break; case OPCODE_SGT: emit_sgt(c, inst); break; case OPCODE_SGE: emit_sge(c, inst); break; case OPCODE_SEQ: emit_seq(c, inst); break; case OPCODE_SNE: emit_sne(c, inst); break; case OPCODE_MUL: emit_mul(c, inst); break; case OPCODE_POW: emit_pow(c, inst); break; case OPCODE_MAD: emit_mad(c, inst); break; case OPCODE_TEX: emit_tex(c, inst); break; case OPCODE_TXB: emit_txb(c, inst); break; case OPCODE_KIL_NV: emit_kil(c); break; case OPCODE_IF: assert(if_insn < MAX_IFSN); if_inst[if_insn++] = brw_IF(p, BRW_EXECUTE_8); break; case OPCODE_ELSE: if_inst[if_insn-1] = brw_ELSE(p, if_inst[if_insn-1]); break; case OPCODE_ENDIF: assert(if_insn > 0); brw_ENDIF(p, if_inst[--if_insn]); break; case OPCODE_BGNSUB: case OPCODE_ENDSUB: break; case OPCODE_CAL: brw_push_insn_state(p); brw_set_mask_control(p, BRW_MASK_DISABLE); brw_set_access_mode(p, BRW_ALIGN_1); brw_ADD(p, deref_1ud(stack_index, 0), brw_ip_reg(), brw_imm_d(3*16)); brw_set_access_mode(p, BRW_ALIGN_16); brw_ADD(p, get_addr_reg(stack_index), get_addr_reg(stack_index), brw_imm_d(4)); orig_inst = inst->Data; orig_inst->Data = &p->store[p->nr_insn]; brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16)); brw_pop_insn_state(p); break; case OPCODE_RET: brw_push_insn_state(p); brw_set_mask_control(p, BRW_MASK_DISABLE); brw_ADD(p, get_addr_reg(stack_index), get_addr_reg(stack_index), brw_imm_d(-4)); brw_set_access_mode(p, BRW_ALIGN_1); brw_MOV(p, brw_ip_reg(), deref_1ud(stack_index, 0)); brw_set_access_mode(p, BRW_ALIGN_16); brw_pop_insn_state(p); break; case OPCODE_BGNLOOP: loop_inst[loop_insn++] = brw_DO(p, BRW_EXECUTE_8); break; case OPCODE_BRK: brw_BREAK(p); brw_set_predicate_control(p, BRW_PREDICATE_NONE); break; case OPCODE_CONT: brw_CONT(p); brw_set_predicate_control(p, BRW_PREDICATE_NONE); break; case OPCODE_ENDLOOP: loop_insn--; inst0 = inst1 = brw_WHILE(p, loop_inst[loop_insn]); /* patch all the BREAK instructions from last BEGINLOOP */ while (inst0 > loop_inst[loop_insn]) { inst0--; if (inst0->header.opcode == BRW_OPCODE_BREAK) { inst0->bits3.if_else.jump_count = inst1 - inst0 + 1; inst0->bits3.if_else.pop_count = 0; } else if (inst0->header.opcode == BRW_OPCODE_CONTINUE) { inst0->bits3.if_else.jump_count = inst1 - inst0; inst0->bits3.if_else.pop_count = 0; } } break; default: _mesa_printf("unsupported IR in fragment shader %d\n", inst->Opcode); } if (inst->CondUpdate) brw_set_predicate_control(p, BRW_PREDICATE_NORMAL); else brw_set_predicate_control(p, BRW_PREDICATE_NONE); } post_wm_emit(c); for (i = 0; i < c->fp->program.Base.NumInstructions; i++) c->fp->program.Base.Instructions[i].Data = NULL; }