void x86_jcc( struct x86_function *p, enum x86_cc cc, int label ) { int offset = label - (x86_get_label(p) + 2); DUMP_I(cc); if (offset < 0) { /*assert(p->csr - p->store > -offset);*/ if (p->csr - p->store <= -offset) { /* probably out of memory (using the error_overflow buffer) */ return; } } if (offset <= 127 && offset >= -128) { emit_1ub(p, 0x70 + cc); emit_1b(p, (char) offset); } else { offset = label - (x86_get_label(p) + 6); emit_2ub(p, 0x0f, 0x80 + cc); emit_1i(p, offset); } }
int x86_jmp_forward( struct x86_function *p) { DUMP(); emit_1ub(p, 0xe9); emit_1i(p, 0); return x86_get_label(p); }
/* Always use a 32bit offset for forward jumps: */ unsigned char *x86_jcc_forward( struct x86_function *p, enum x86_cc cc ) { emit_2ub(p, 0x0f, 0x80 + cc); emit_1i(p, 0); return x86_get_label(p); }
void x86_jcc( struct x86_function *p, enum x86_cc cc, unsigned char *label ) { int offset = label - (x86_get_label(p) + 2); if (offset <= 127 && offset >= -128) { emit_1ub(p, 0x70 + cc); emit_1b(p, (char) offset); } else { offset = label - (x86_get_label(p) + 6); emit_2ub(p, 0x0f, 0x80 + cc); emit_1i(p, offset); } }
/* Always use a 32bit offset for forward jumps: */ int x86_jcc_forward( struct x86_function *p, enum x86_cc cc ) { DUMP_I(cc); emit_2ub(p, 0x0f, 0x80 + cc); emit_1i(p, 0); return x86_get_label(p); }
void x86_jmp( struct x86_function *p, int label) { DUMP_I( label ); emit_1ub(p, 0xe9); emit_1i(p, label - x86_get_label(p) - 4); }
/* Fixup offset from forward jump: */ void x86_fixup_fwd_jump( struct x86_function *p, int fixup ) { *(int *)(p->store + fixup - 4) = x86_get_label(p) - fixup; }
/* This doesn't work once we start reallocating & copying the * generated code on buffer fills, because the call is relative to the * current pc. */ void x86_call( struct x86_function *p, void (*label)()) { emit_1ub(p, 0xe8); emit_1i(p, cptr(label) - x86_get_label(p) - 4); }
void x86_jmp( struct x86_function *p, unsigned char *label) { emit_1ub(p, 0xe9); emit_1i(p, label - x86_get_label(p) - 4); }
/* Fixup offset from forward jump: */ void x86_fixup_fwd_jump( struct x86_function *p, unsigned char *fixup ) { *(int *)(fixup - 4) = x86_get_label(p) - fixup; }
unsigned char *x86_call_forward( struct x86_function *p) { emit_1ub(p, 0xe8); emit_1i(p, 0); return x86_get_label(p); }
/* Lots of hardcoding * * EAX -- pointer to current output vertex * ECX -- pointer to current attribute * */ static GLboolean build_vertex_emit( struct x86_program *p ) { struct gl_context *ctx = p->ctx; TNLcontext *tnl = TNL_CONTEXT(ctx); struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); GLuint j = 0; struct x86_reg vertexEAX = x86_make_reg(file_REG32, reg_AX); struct x86_reg srcECX = x86_make_reg(file_REG32, reg_CX); struct x86_reg countEBP = x86_make_reg(file_REG32, reg_BP); struct x86_reg vtxESI = x86_make_reg(file_REG32, reg_SI); struct x86_reg temp = x86_make_reg(file_XMM, 0); struct x86_reg vp0 = x86_make_reg(file_XMM, 1); struct x86_reg vp1 = x86_make_reg(file_XMM, 2); struct x86_reg temp2 = x86_make_reg(file_XMM, 3); GLubyte *fixup, *label; /* Push a few regs? */ x86_push(&p->func, countEBP); x86_push(&p->func, vtxESI); /* Get vertex count, compare to zero */ x86_xor(&p->func, srcECX, srcECX); x86_mov(&p->func, countEBP, x86_fn_arg(&p->func, 2)); x86_cmp(&p->func, countEBP, srcECX); fixup = x86_jcc_forward(&p->func, cc_E); /* Initialize destination register. */ x86_mov(&p->func, vertexEAX, x86_fn_arg(&p->func, 3)); /* Dereference ctx to get tnl, then vtx: */ x86_mov(&p->func, vtxESI, x86_fn_arg(&p->func, 1)); x86_mov(&p->func, vtxESI, x86_make_disp(vtxESI, get_offset(ctx, &ctx->swtnl_context))); vtxESI = x86_make_disp(vtxESI, get_offset(tnl, &tnl->clipspace)); /* Possibly load vp0, vp1 for viewport calcs: */ if (vtx->need_viewport) { sse_movups(&p->func, vp0, x86_make_disp(vtxESI, get_offset(vtx, &vtx->vp_scale[0]))); sse_movups(&p->func, vp1, x86_make_disp(vtxESI, get_offset(vtx, &vtx->vp_xlate[0]))); } /* always load, needed or not: */ sse_movups(&p->func, p->chan0, x86_make_disp(vtxESI, get_offset(vtx, &vtx->chan_scale[0]))); sse_movups(&p->func, p->identity, x86_make_disp(vtxESI, get_offset(vtx, &vtx->identity[0]))); /* Note address for loop jump */ label = x86_get_label(&p->func); /* Emit code for each of the attributes. Currently routes * everything through SSE registers, even when it might be more * efficient to stick with regular old x86. No optimization or * other tricks - enough new ground to cover here just getting * things working. */ while (j < vtx->attr_count) { struct tnl_clipspace_attr *a = &vtx->attr[j]; struct x86_reg dest = x86_make_disp(vertexEAX, a->vertoffset); /* Now, load an XMM reg from src, perhaps transform, then save. * Could be shortcircuited in specific cases: */ switch (a->format) { case EMIT_1F: get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 1, x86_deref(srcECX), a->inputsize); emit_store(p, dest, 1, temp); update_src_ptr(p, srcECX, vtxESI, a); break; case EMIT_2F: get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 2, x86_deref(srcECX), a->inputsize); emit_store(p, dest, 2, temp); update_src_ptr(p, srcECX, vtxESI, a); break; case EMIT_3F: /* Potentially the worst case - hardcode 2+1 copying: */ if (0) { get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize); emit_store(p, dest, 3, temp); update_src_ptr(p, srcECX, vtxESI, a); } else { get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 2, x86_deref(srcECX), a->inputsize); emit_store(p, dest, 2, temp); if (a->inputsize > 2) { emit_load(p, temp, 1, x86_make_disp(srcECX, 8), 1); emit_store(p, x86_make_disp(dest,8), 1, temp); } else { sse_movss(&p->func, x86_make_disp(dest,8), get_identity(p)); } update_src_ptr(p, srcECX, vtxESI, a); } break; case EMIT_4F: get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize); emit_store(p, dest, 4, temp); update_src_ptr(p, srcECX, vtxESI, a); break; case EMIT_2F_VIEWPORT: get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 2, x86_deref(srcECX), a->inputsize); sse_mulps(&p->func, temp, vp0); sse_addps(&p->func, temp, vp1); emit_store(p, dest, 2, temp); update_src_ptr(p, srcECX, vtxESI, a); break; case EMIT_3F_VIEWPORT: get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize); sse_mulps(&p->func, temp, vp0); sse_addps(&p->func, temp, vp1); emit_store(p, dest, 3, temp); update_src_ptr(p, srcECX, vtxESI, a); break; case EMIT_4F_VIEWPORT: get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize); sse_mulps(&p->func, temp, vp0); sse_addps(&p->func, temp, vp1); emit_store(p, dest, 4, temp); update_src_ptr(p, srcECX, vtxESI, a); break; case EMIT_3F_XYW: get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize); sse_shufps(&p->func, temp, temp, SHUF(X,Y,W,Z)); emit_store(p, dest, 3, temp); update_src_ptr(p, srcECX, vtxESI, a); break; case EMIT_1UB_1F: /* Test for PAD3 + 1UB: */ if (j > 0 && a[-1].vertoffset + a[-1].vertattrsize <= a->vertoffset - 3) { get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 1, x86_deref(srcECX), a->inputsize); sse_shufps(&p->func, temp, temp, SHUF(X,X,X,X)); emit_pack_store_4ub(p, x86_make_disp(dest, -3), temp); /* overkill! */ update_src_ptr(p, srcECX, vtxESI, a); } else { printf("Can't emit 1ub %x %x %d\n", a->vertoffset, a[-1].vertoffset, a[-1].vertattrsize ); return GL_FALSE; } break; case EMIT_3UB_3F_RGB: case EMIT_3UB_3F_BGR: /* Test for 3UB + PAD1: */ if (j == vtx->attr_count - 1 || a[1].vertoffset >= a->vertoffset + 4) { get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize); if (a->format == EMIT_3UB_3F_BGR) sse_shufps(&p->func, temp, temp, SHUF(Z,Y,X,W)); emit_pack_store_4ub(p, dest, temp); update_src_ptr(p, srcECX, vtxESI, a); } /* Test for 3UB + 1UB: */ else if (j < vtx->attr_count - 1 && a[1].format == EMIT_1UB_1F && a[1].vertoffset == a->vertoffset + 3) { get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 3, x86_deref(srcECX), a->inputsize); update_src_ptr(p, srcECX, vtxESI, a); /* Make room for incoming value: */ sse_shufps(&p->func, temp, temp, SHUF(W,X,Y,Z)); get_src_ptr(p, srcECX, vtxESI, &a[1]); emit_load(p, temp2, 1, x86_deref(srcECX), a[1].inputsize); sse_movss(&p->func, temp, temp2); update_src_ptr(p, srcECX, vtxESI, &a[1]); /* Rearrange and possibly do BGR conversion: */ if (a->format == EMIT_3UB_3F_BGR) sse_shufps(&p->func, temp, temp, SHUF(W,Z,Y,X)); else sse_shufps(&p->func, temp, temp, SHUF(Y,Z,W,X)); emit_pack_store_4ub(p, dest, temp); j++; /* NOTE: two attrs consumed */ } else { printf("Can't emit 3ub\n"); return GL_FALSE; /* add this later */ } break; case EMIT_4UB_4F_RGBA: get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize); emit_pack_store_4ub(p, dest, temp); update_src_ptr(p, srcECX, vtxESI, a); break; case EMIT_4UB_4F_BGRA: get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize); sse_shufps(&p->func, temp, temp, SHUF(Z,Y,X,W)); emit_pack_store_4ub(p, dest, temp); update_src_ptr(p, srcECX, vtxESI, a); break; case EMIT_4UB_4F_ARGB: get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize); sse_shufps(&p->func, temp, temp, SHUF(W,X,Y,Z)); emit_pack_store_4ub(p, dest, temp); update_src_ptr(p, srcECX, vtxESI, a); break; case EMIT_4UB_4F_ABGR: get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize); sse_shufps(&p->func, temp, temp, SHUF(W,Z,Y,X)); emit_pack_store_4ub(p, dest, temp); update_src_ptr(p, srcECX, vtxESI, a); break; case EMIT_4CHAN_4F_RGBA: switch (CHAN_TYPE) { case GL_UNSIGNED_BYTE: get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize); emit_pack_store_4ub(p, dest, temp); update_src_ptr(p, srcECX, vtxESI, a); break; case GL_FLOAT: get_src_ptr(p, srcECX, vtxESI, a); emit_load(p, temp, 4, x86_deref(srcECX), a->inputsize); emit_store(p, dest, 4, temp); update_src_ptr(p, srcECX, vtxESI, a); break; case GL_UNSIGNED_SHORT: default: printf("unknown CHAN_TYPE %s\n", _mesa_lookup_enum_by_nr(CHAN_TYPE)); return GL_FALSE; } break; default: printf("unknown a[%d].format %d\n", j, a->format); return GL_FALSE; /* catch any new opcodes */ } /* Increment j by at least 1 - may have been incremented above also: */ j++; } /* Next vertex: */ x86_lea(&p->func, vertexEAX, x86_make_disp(vertexEAX, vtx->vertex_size)); /* decr count, loop if not zero */ x86_dec(&p->func, countEBP); x86_test(&p->func, countEBP, countEBP); x86_jcc(&p->func, cc_NZ, label); /* Exit mmx state? */ if (p->func.need_emms) mmx_emms(&p->func); /* Land forward jump here: */ x86_fixup_fwd_jump(&p->func, fixup); /* Pop regs and return */ x86_pop(&p->func, x86_get_base_reg(vtxESI)); x86_pop(&p->func, countEBP); x86_ret(&p->func); assert(!vtx->emit); vtx->emit = (tnl_emit_func)x86_get_func(&p->func); assert( (char *) p->func.csr - (char *) p->func.store <= MAX_SSE_CODE_SIZE ); return GL_TRUE; }