void emit (astree* root) { switch (root->symbol) { case '=' : emit_assign (root); break; case TOK_VARDECL : emit_vardecl (root); break; case TOK_NEWSTRING: emit_alloc_string (root); break; case TOK_NEWARRAY: emit_alloc_array (root); break; case '+': emit_binop ("+", root); break; case '-': emit_binop ("-", root); break; case '*': emit_binop ("*", root); break; case '/': emit_binop ("/", root); break; case '.': emit_field_select (root); break; case '!': emit_unop ("!", root); break; case TOK_ORD: emit_unop ("(int)", root); break; case TOK_CHR: emit_unop ("(char)", root); break; case TOK_LT: emit_binop ("<", root); break; case TOK_GT: emit_binop (">", root); break; case TOK_LE: emit_binop ("<=", root); break; case TOK_GE: emit_binop (">=", root); break; case TOK_EQ: emit_binop ("==", root); break; case TOK_NE: emit_binop ("!=", root); break; case TOK_NEG: emit_unop ("-", root); break; case TOK_POS: emit_unop ("+", root); break; case TOK_INDEX: emit_index_select (root); break; case TOK_RETURN: emit_return (root); break; case TOK_RETURNVOID: emit_return (root); break; case TOK_CALL: emit_call (root); break; case TOK_WHILE: emit_while (root); break; case TOK_IFELSE: emit_ifelse (root); break; case TOK_TRUE: emit_boolcon (root); break; case TOK_FALSE: emit_boolcon (root); break; default: break; } }
void bpf_jit_compile(struct bpf_prog *fp) { unsigned int cleanup_addr, proglen, oldproglen = 0; u32 temp[8], *prog, *func, seen = 0, pass; const struct sock_filter *filter = fp->insns; int i, flen = fp->len, pc_ret0 = -1; unsigned int *addrs; void *image; if (!bpf_jit_enable) return; addrs = kmalloc_array(flen, sizeof(*addrs), GFP_KERNEL); if (addrs == NULL) return; /* Before first pass, make a rough estimation of addrs[] * each bpf instruction is translated to less than 64 bytes */ for (proglen = 0, i = 0; i < flen; i++) { proglen += 64; addrs[i] = proglen; } cleanup_addr = proglen; /* epilogue address */ image = NULL; for (pass = 0; pass < 10; pass++) { u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen; /* no prologue/epilogue for trivial filters (RET something) */ proglen = 0; prog = temp; /* Prologue */ if (seen_or_pass0) { if (seen_or_pass0 & SEEN_MEM) { unsigned int sz = BASE_STACKFRAME; sz += BPF_MEMWORDS * sizeof(u32); emit_alloc_stack(sz); } /* Make sure we dont leek kernel memory. */ if (seen_or_pass0 & SEEN_XREG) emit_clear(r_X); /* If this filter needs to access skb data, * load %o4 and %o5 with: * %o4 = skb->len - skb->data_len * %o5 = skb->data * And also back up %o7 into r_saved_O7 so we can * invoke the stubs using 'call'. */ if (seen_or_pass0 & SEEN_DATAREF) { emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN); emit_load32(r_SKB, struct sk_buff, data_len, r_TMP); emit_sub(r_HEADLEN, r_TMP, r_HEADLEN); emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA); } } emit_reg_move(O7, r_saved_O7); /* Make sure we dont leak kernel information to the user. */ if (bpf_needs_clear_a(&filter[0])) emit_clear(r_A); /* A = 0 */ for (i = 0; i < flen; i++) { unsigned int K = filter[i].k; unsigned int t_offset; unsigned int f_offset; u32 t_op, f_op; u16 code = bpf_anc_helper(&filter[i]); int ilen; switch (code) { case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */ emit_alu_X(ADD); break; case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */ emit_alu_K(ADD, K); break; case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */ emit_alu_X(SUB); break; case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ emit_alu_K(SUB, K); break; case BPF_ALU | BPF_AND | BPF_X: /* A &= X */ emit_alu_X(AND); break; case BPF_ALU | BPF_AND | BPF_K: /* A &= K */ emit_alu_K(AND, K); break; case BPF_ALU | BPF_OR | BPF_X: /* A |= X */ emit_alu_X(OR); break; case BPF_ALU | BPF_OR | BPF_K: /* A |= K */ emit_alu_K(OR, K); break; case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */ case BPF_ALU | BPF_XOR | BPF_X: emit_alu_X(XOR); break; case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */ emit_alu_K(XOR, K); break; case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */ emit_alu_X(SLL); break; case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */ emit_alu_K(SLL, K); break; case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X */ emit_alu_X(SRL); break; case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */ emit_alu_K(SRL, K); break; case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */ emit_alu_X(MUL); break; case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ emit_alu_K(MUL, K); break; case BPF_ALU | BPF_DIV | BPF_K: /* A /= K with K != 0*/ if (K == 1) break; emit_write_y(G0); /* The Sparc v8 architecture requires * three instructions between a %y * register write and the first use. */ emit_nop(); emit_nop(); emit_nop(); emit_alu_K(DIV, K); break; case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ emit_cmpi(r_X, 0); if (pc_ret0 > 0) { t_offset = addrs[pc_ret0 - 1]; emit_branch(BE, t_offset + 20); emit_nop(); /* delay slot */ } else { emit_branch_off(BNE, 16); emit_nop(); emit_jump(cleanup_addr + 20); emit_clear(r_A); } emit_write_y(G0); /* The Sparc v8 architecture requires * three instructions between a %y * register write and the first use. */ emit_nop(); emit_nop(); emit_nop(); emit_alu_X(DIV); break; case BPF_ALU | BPF_NEG: emit_neg(); break; case BPF_RET | BPF_K: if (!K) { if (pc_ret0 == -1) pc_ret0 = i; emit_clear(r_A); } else { emit_loadimm(K, r_A); } /* Fallthrough */ case BPF_RET | BPF_A: if (seen_or_pass0) { if (i != flen - 1) { emit_jump(cleanup_addr); emit_nop(); break; } if (seen_or_pass0 & SEEN_MEM) { unsigned int sz = BASE_STACKFRAME; sz += BPF_MEMWORDS * sizeof(u32); emit_release_stack(sz); } } /* jmpl %r_saved_O7 + 8, %g0 */ emit_jmpl(r_saved_O7, 8, G0); emit_reg_move(r_A, O0); /* delay slot */ break; case BPF_MISC | BPF_TAX: seen |= SEEN_XREG; emit_reg_move(r_A, r_X); break; case BPF_MISC | BPF_TXA: seen |= SEEN_XREG; emit_reg_move(r_X, r_A); break; case BPF_ANC | SKF_AD_CPU: emit_load_cpu(r_A); break; case BPF_ANC | SKF_AD_PROTOCOL: emit_skb_load16(protocol, r_A); break; case BPF_ANC | SKF_AD_PKTTYPE: __emit_skb_load8(__pkt_type_offset, r_A); emit_andi(r_A, PKT_TYPE_MAX, r_A); emit_alu_K(SRL, 5); break; case BPF_ANC | SKF_AD_IFINDEX: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); emit_branch(BE_PTR, cleanup_addr + 4); emit_nop(); emit_load32(r_A, struct net_device, ifindex, r_A); break; case BPF_ANC | SKF_AD_MARK: emit_skb_load32(mark, r_A); break; case BPF_ANC | SKF_AD_QUEUE: emit_skb_load16(queue_mapping, r_A); break; case BPF_ANC | SKF_AD_HATYPE: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); emit_branch(BE_PTR, cleanup_addr + 4); emit_nop(); emit_load16(r_A, struct net_device, type, r_A); break; case BPF_ANC | SKF_AD_RXHASH: emit_skb_load32(hash, r_A); break; case BPF_ANC | SKF_AD_VLAN_TAG: emit_skb_load16(vlan_tci, r_A); break; case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: __emit_skb_load8(__pkt_vlan_present_offset, r_A); if (PKT_VLAN_PRESENT_BIT) emit_alu_K(SRL, PKT_VLAN_PRESENT_BIT); if (PKT_VLAN_PRESENT_BIT < 7) emit_andi(r_A, 1, r_A); break; case BPF_LD | BPF_W | BPF_LEN: emit_skb_load32(len, r_A); break; case BPF_LDX | BPF_W | BPF_LEN: emit_skb_load32(len, r_X); break; case BPF_LD | BPF_IMM: emit_loadimm(K, r_A); break; case BPF_LDX | BPF_IMM: emit_loadimm(K, r_X); break; case BPF_LD | BPF_MEM: seen |= SEEN_MEM; emit_ldmem(K * 4, r_A); break; case BPF_LDX | BPF_MEM: seen |= SEEN_MEM | SEEN_XREG; emit_ldmem(K * 4, r_X); break; case BPF_ST: seen |= SEEN_MEM; emit_stmem(K * 4, r_A); break; case BPF_STX: seen |= SEEN_MEM | SEEN_XREG; emit_stmem(K * 4, r_X); break; #define CHOOSE_LOAD_FUNC(K, func) \ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) case BPF_LD | BPF_W | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word); common_load: seen |= SEEN_DATAREF; emit_loadimm(K, r_OFF); emit_call(func); break; case BPF_LD | BPF_H | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half); goto common_load; case BPF_LD | BPF_B | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte); goto common_load; case BPF_LDX | BPF_B | BPF_MSH: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh); goto common_load; case BPF_LD | BPF_W | BPF_IND: func = bpf_jit_load_word; common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; if (K) { if (is_simm13(K)) { emit_addi(r_X, K, r_OFF); } else { emit_loadimm(K, r_TMP); emit_add(r_X, r_TMP, r_OFF); } } else { emit_reg_move(r_X, r_OFF); } emit_call(func); break; case BPF_LD | BPF_H | BPF_IND: func = bpf_jit_load_half; goto common_load_ind; case BPF_LD | BPF_B | BPF_IND: func = bpf_jit_load_byte; goto common_load_ind; case BPF_JMP | BPF_JA: emit_jump(addrs[i + K]); emit_nop(); break; #define COND_SEL(CODE, TOP, FOP) \ case CODE: \ t_op = TOP; \ f_op = FOP; \ goto cond_branch COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU); COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU); COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE); COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE); COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU); COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU); COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE); COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE); cond_branch: f_offset = addrs[i + filter[i].jf]; t_offset = addrs[i + filter[i].jt]; /* same targets, can avoid doing the test :) */ if (filter[i].jt == filter[i].jf) { emit_jump(t_offset); emit_nop(); break; } switch (code) { case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JEQ | BPF_X: seen |= SEEN_XREG; emit_cmp(r_A, r_X); break; case BPF_JMP | BPF_JSET | BPF_X: seen |= SEEN_XREG; emit_btst(r_A, r_X); break; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: if (is_simm13(K)) { emit_cmpi(r_A, K); } else { emit_loadimm(K, r_TMP); emit_cmp(r_A, r_TMP); } break; case BPF_JMP | BPF_JSET | BPF_K: if (is_simm13(K)) { emit_btsti(r_A, K); } else { emit_loadimm(K, r_TMP); emit_btst(r_A, r_TMP); } break; } if (filter[i].jt != 0) { if (filter[i].jf) t_offset += 8; emit_branch(t_op, t_offset); emit_nop(); /* delay slot */ if (filter[i].jf) { emit_jump(f_offset); emit_nop(); } break; } emit_branch(f_op, f_offset); emit_nop(); /* delay slot */ break; default: /* hmm, too complex filter, give up with jit compiler */ goto out; } ilen = (void *) prog - (void *) temp; if (image) { if (unlikely(proglen + ilen > oldproglen)) { pr_err("bpb_jit_compile fatal error\n"); kfree(addrs); module_memfree(image); return; } memcpy(image + proglen, temp, ilen); } proglen += ilen; addrs[i] = proglen; prog = temp; } /* last bpf instruction is always a RET : * use it to give the cleanup instruction(s) addr */ cleanup_addr = proglen - 8; /* jmpl; mov r_A,%o0; */ if (seen_or_pass0 & SEEN_MEM) cleanup_addr -= 4; /* add %sp, X, %sp; */ if (image) { if (proglen != oldproglen) pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen); break; } if (proglen == oldproglen) { image = module_alloc(proglen); if (!image) goto out; } oldproglen = proglen; } if (bpf_jit_enable > 1) bpf_jit_dump(flen, proglen, pass + 1, image); if (image) { fp->bpf_func = (void *)image; fp->jited = 1; } out: kfree(addrs); return; }
void emit_nodes(struct node *n, const char *assignto, BOOL force, BOOL inloop) { char *fast = NULL; assert(n); if (n->next == NULL && n->type != NODE_CALL && !assignto && !force) { /* do not emit single node, unless it is a call or we really need it */ cry("statement with no effect\n"); return; } switch (n->type) { case NODE_IMM: { fast = AS_IMM(n)->value; maybe_symbol_forward(fast); if (force) { cry("constant expression '%s' in conditional\n", fast); } assert(!assignto); break; } case NODE_LVAL: { struct lval_node *p = AS_LVAL(n); int loadable = is_loadable(n, TRUE); if (loadable < 0) { fast = p->name; } else if (loadable > 0) { make_symbol_used(p->name); emit_load_direct(p->name, p->deref); } else { emit_load_indirect(p->name, p->deref); } break; } case NODE_CALL: { struct call_node *p = AS_CALL(n); struct node *parm; int deref0 = 0; char *args[MAX_FUNC_ARGS]; char *func; int i; BOOL retval = (n->next != NULL) || force || assignto; BOOL direct = FALSE; for (i = 0, parm = p->parm; parm; parm = parm->next, i++) { BOOL r0 = (i == 0 && arch_regparm); assert(i < MAX_FUNC_ARGS); if (parm->type == NODE_IMM) { args[i] = xstrdup(AS_IMM(parm)->value); maybe_symbol_forward(args[i]); } else if (parm->type == NODE_LVAL && is_loadable(parm, r0)) { struct lval_node *q = AS_LVAL(parm); args[i] = xstrdup(q->name); make_symbol_used(args[i]); if (q->deref) { deref0 = 1; } } else if (r0 && parm->next == NULL) { args[i] = NULL; direct = TRUE; emit_nodes(parm, NULL, TRUE, inloop); } else if (r0 && parm->type == NODE_LVAL) { struct lval_node *q = AS_LVAL(parm); args[i] = create_address_str(q->name); deref0 = 1; if (q->deref) { deref0++; } } else { args[i] = new_name("var"); emit_nodes(parm, args[i], FALSE, inloop); make_symbol_used(args[i]); } } func = p->func; if (retval && (p->attr & ATTRIB_NORETURN)) { cry("function '%s' does not return\n", func); } if (!is_loadable_sym(func)) { char *ptr = new_name("ptr"); emit_load_indirect(func, FALSE); add_symbol_forward(ptr, 0); emit_store_indirect(ptr); func = ptr; } else { func = xstrdup(func); } make_symbol_used(func); if (!(p->attr & ATTRIB_NORETURN)) { if ((p->attr & ATTRIB_STACK) || inloop) { if (!(p->attr & ATTRIB_STACK)) { cry("reserved [[stack]] for '%s' because of loop\n", func); } mark_all_used(PROTECTED); } else { mark_all_used(CLOBBERED); } } if (direct) { emit_call(func, NULL, 0, deref0, inloop, retval, p->attr); } else { emit_call(func, args, i, deref0, inloop, retval, p->attr); } free(func); while (--i >= 0) { free(args[i]); } break; } case NODE_ADD: { struct node *term; struct node *prev; int deref0 = 0; char *prev_tmp; prev = AS_ADD(n)->list; if (prev->type == NODE_IMM) { prev_tmp = xstrdup(AS_IMM(prev)->value); maybe_symbol_forward(prev_tmp); } else if (prev->type == NODE_LVAL && is_loadable(prev, TRUE)) { prev_tmp = xstrdup(AS_LVAL(prev)->name); make_symbol_used(prev_tmp); if (AS_LVAL(prev)->deref) { deref0 = TRUE; } } else if (prev->type == NODE_LVAL) { prev_tmp = create_address_str(AS_LVAL(prev)->name); deref0 = 1; if (AS_LVAL(prev)->deref) { deref0++; } } else { prev_tmp = new_name("var"); emit_nodes(prev, prev_tmp, FALSE, inloop); make_symbol_used(prev_tmp); } for (term = prev->next; term; term = term->next) { BOOL swap = FALSE; char *tmp; char *sum = new_name("sum"); if (term->type == NODE_IMM) { tmp = xstrdup(AS_IMM(term)->value); maybe_symbol_forward(tmp); } else if (term->type == NODE_LVAL && is_loadable(term, !deref0)) { tmp = xstrdup(AS_LVAL(term)->name); make_symbol_used(tmp); if (AS_LVAL(term)->deref) { swap = TRUE; deref0 = 1; } } else if (term->type == NODE_LVAL && !deref0) { tmp = create_address_str(AS_LVAL(term)->name); deref0 = 1; if (AS_LVAL(term)->deref) { swap = TRUE; deref0++; } } else { tmp = new_name("var"); emit_nodes(term, tmp, FALSE, inloop); make_symbol_used(tmp); } emit_add(prev_tmp, tmp, deref0, swap); deref0 = 0; if (term->next) { emit_store_indirect(sum); } free(prev_tmp); prev_tmp = sum; free(tmp); } free(prev_tmp); break; } } if (assignto) { add_symbol_forward(assignto, 0); emit_store_indirect(assignto); } else { BOOL loaded = FALSE; for (n = n->next; n; n = n->next) { BOOL later = FALSE; struct lval_node *p = AS_LVAL(n); assert(n->type == NODE_LVAL); if (fast) { if (optimize_imm && !p->deref && !get_symbol(p->name) && ((p->attr & ATTRIB_CONSTANT) || !inloop)) { emit_fast(p->name, fast); add_symbol_defined(p->name, fast, p->attr); continue; } if (!loaded) { loaded = TRUE; if (p->deref && !is_loadable_sym(p->name)) { later = TRUE; } else { emit_load_direct(fast, FALSE); } } } if (p->attr & ATTRIB_CONSTANT) { cry("useless const for '%s'\n", p->name); } if (p->deref) { /* XXX only addresses (imports/vectors) can be derefed */ if (!is_loadable_sym(p->name)) { /* XXX ok, this is very very shitty * tip1: store value to tmp_N for each future p->deref at once * tip2: calculate in advance how many derefs we will need and store pointers before calculating r0 (see above) */ char *ptr = new_name("ptr"); char *tmp; if (!later) { tmp = emit_save(); } emit_load_indirect(p->name, FALSE); emit_store_indirect(ptr); if (!later) { emit_restore(tmp); } else { emit_load_direct(fast, FALSE); } add_symbol_forward(ptr, 0); make_symbol_used(ptr); emit_store_direct(ptr); free(ptr); } else { make_symbol_used(p->name); emit_store_direct(p->name); } } else { add_symbol_forward(p->name, p->attr); if (try_symbol_extern(p->name)) { die("cannot assign to import address '%s'\n", p->name); } if (optimize_imm && (try_symbol_attr(p->name) & ATTRIB_CONSTANT)) { die("'%s' was declared constant\n", p->name); } emit_store_indirect(p->name); } } if (force && fast && !loaded) { emit_load_direct(fast, FALSE); } } }
MonoPIFunc mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor) { unsigned int *p; unsigned int *buffer; MonoType* param; int i, pos; int alpharegs; int hasthis; int STACK_SIZE; int BUFFER_SIZE; int simple_type; int regbase; // Set up basic stuff. like has this. hasthis = !!sig->hasthis; alpharegs = AXP_GENERAL_REGS - hasthis; regbase = hasthis?alpha_a1:alpha_a0 ; // Make a ballpark estimate for now. calculate_size( sig, &BUFFER_SIZE, &STACK_SIZE ); // convert to the correct number of bytes. BUFFER_SIZE = BUFFER_SIZE * 4; // allocate. buffer = p = (unsigned int *)malloc(BUFFER_SIZE); memset( buffer, 0, BUFFER_SIZE ); pos = 8 * (sig->param_count - alpharegs - 1); // Ok, start creating this thing. p = emit_prolog( p, STACK_SIZE, hasthis ); // copy everything into the correct register/stack space for (i = sig->param_count; --i >= 0; ) { param = sig->params [i]; if( param->byref ) { if( i >= alpharegs ) { // load into temp register, then store on the stack alpha_ldq( p, alpha_t1, alpha_t0, ARG_LOC( i )); alpha_stq( p, alpha_t1, alpha_sp, pos ); pos -= 8; } else { // load into register alpha_ldq( p, (regbase + i), alpha_t0, ARG_LOC( i ) ); } } else { simple_type = param->type; if( simple_type == MONO_TYPE_VALUETYPE ) { if (param->data.klass->enumtype) simple_type = param->data.klass->enum_basetype->type; } switch (simple_type) { case MONO_TYPE_VOID: break; case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: // 4 bytes - need to sign-extend (stackvals are not extended) if( i >= alpharegs ) { // load into temp register, then store on the stack alpha_ldl( p, alpha_t1, alpha_t0, ARG_LOC( i ) ); alpha_stq( p, alpha_t1, alpha_sp, pos ); pos -= 8; } else { // load into register alpha_ldl( p, (regbase + i), alpha_t0, (ARG_LOC(i)) ); } break; case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_PTR: case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_SZARRAY: case MONO_TYPE_STRING: case MONO_TYPE_I8: // 8 bytes if( i >= alpharegs ) { // load into temp register, then store on the stack alpha_ldq( p, alpha_t1, alpha_t0, ARG_LOC( i ) ); alpha_stq( p, alpha_t1, alpha_sp, pos ); pos -= 8; } else { // load into register alpha_ldq( p, (regbase + i), alpha_t0, ARG_LOC(i) ); } break; case MONO_TYPE_R4: case MONO_TYPE_R8: /* // floating point... Maybe this does the correct thing. if( i > alpharegs ) { alpha_ldq( p, alpha_t1, alpha_t0, ARG_LOC( i ) ); alpha_cpys( p, alpha_ft1, alpha_ft1, alpha_ft2 ); alpha_stt( p, alpha_ft2, alpha_sp, pos ); pos -= 8; } else { alpha_ldq( p, alpha_t1, alpha_t0, ARG_LOC(i) ); alpha_cpys( p, alpha_ft1, alpha_ft1, alpha_fa0 + i + hasthis ); } break; */ case MONO_TYPE_VALUETYPE: g_error ("Not implemented: ValueType as parameter to delegate." ); break; default: g_error( "Not implemented: 0x%x.", simple_type ); break; } } } // Now call the function and store the return parameter. p = emit_call( p, STACK_SIZE ); p = emit_store_return_default( p, STACK_SIZE ); p = emit_epilog( p, STACK_SIZE ); if( p > buffer + BUFFER_SIZE ) g_error( "Buffer overflow: got 0x%lx, expected <=0x%x.", (long)(p-buffer), BUFFER_SIZE ); /* flush instruction cache to see trampoline code */ asm volatile("imb":::"memory"); return (MonoPIFunc)buffer; }