void setupFrame(void) { static Matrix world; static Matrix view; SVGA3dTextureState *ts; SVGA3dRenderState *rs; Matrix_Copy(view, gIdentityMatrix); Matrix_Translate(view, 0, 0, 3); SVGA3D_SetTransform(CID, SVGA3D_TRANSFORM_VIEW, view); Matrix_Copy(world, gIdentityMatrix); Matrix_RotateX(world, -60.0 * PI_OVER_180); Matrix_RotateY(world, gFPS.frame * 0.01f); SVGA3D_SetTransform(CID, SVGA3D_TRANSFORM_WORLD, world); SVGA3D_SetTransform(CID, SVGA3D_TRANSFORM_PROJECTION, perspectiveMat); SVGA3D_BeginSetRenderState(CID, &rs, 4); { rs[0].state = SVGA3D_RS_BLENDENABLE; rs[0].uintValue = FALSE; rs[1].state = SVGA3D_RS_ZENABLE; rs[1].uintValue = TRUE; rs[2].state = SVGA3D_RS_ZWRITEENABLE; rs[2].uintValue = TRUE; rs[3].state = SVGA3D_RS_ZFUNC; rs[3].uintValue = SVGA3D_CMP_LESS; } SVGA_FIFOCommitAll(); SVGA3D_BeginSetTextureState(CID, &ts, 4); { ts[0].stage = 0; ts[0].name = SVGA3D_TS_BIND_TEXTURE; ts[0].value = SVGA3D_INVALID_ID; ts[1].stage = 0; ts[1].name = SVGA3D_TS_COLOROP; ts[1].value = SVGA3D_TC_SELECTARG1; ts[2].stage = 0; ts[2].name = SVGA3D_TS_COLORARG1; ts[2].value = SVGA3D_TA_DIFFUSE; ts[3].stage = 0; ts[3].name = SVGA3D_TS_ALPHAARG1; ts[3].value = SVGA3D_TA_DIFFUSE; } SVGA_FIFOCommitAll(); }
static void presentStairStep(int xOffset, int yOffset) { const int gridSize = 16; const int numRects = (gridSize + 1) * gridSize / 2; const int squareWidth = surfWidth / gridSize; const int squareHeight = surfHeight / gridSize; int x, y, i; SVGA3dCopyRect *cr; SVGA3D_BeginPresent(colorImage.sid, &cr, numRects); i = 0; for (x = 0; x < gridSize; x++) { for (y = 0; y < gridSize; y++) { if (x + y < gridSize) { cr[i].srcx = x * squareWidth; cr[i].srcy = y * squareHeight; cr[i].x = cr[i].srcx + xOffset; cr[i].y = cr[i].srcy + yOffset; cr[i].w = squareWidth; cr[i].h = squareHeight; i++; } } } if (i != numRects) { SVGA_Panic("Incorrect numRects in present()"); } SVGA_FIFOCommitAll(); }
void drawStrip(int row) { SVGA3dVertexDecl *decls; SVGA3dPrimitiveRange *ranges; SVGA3D_BeginDrawPrimitives(CID, &decls, 2, &ranges, 1); { decls[0].identity.type = SVGA3D_DECLTYPE_FLOAT3; decls[0].identity.usage = SVGA3D_DECLUSAGE_POSITION; decls[0].array.surfaceId = vertexSid; decls[0].array.stride = sizeof(MyVertex); decls[0].array.offset = offsetof(MyVertex, position); decls[1].identity.type = SVGA3D_DECLTYPE_FLOAT3; decls[1].identity.usage = SVGA3D_DECLUSAGE_COLOR; decls[1].array.surfaceId = vertexSid; decls[1].array.stride = sizeof(MyVertex); decls[1].array.offset = offsetof(MyVertex, color); ranges[0].primType = SVGA3D_PRIMITIVE_TRIANGLELIST; ranges[0].primitiveCount = TRIANGLES_PER_ROW; ranges[0].indexArray.surfaceId = indexSid; ranges[0].indexArray.stride = sizeof(IndexType); ranges[0].indexArray.offset = sizeof(IndexType) * INDICES_PER_ROW * row; ranges[0].indexWidth = sizeof(IndexType); } SVGA_FIFOCommitAll(); }
void svga_texture_copy_handle(struct svga_context *svga, struct svga_winsys_surface *src_handle, unsigned src_x, unsigned src_y, unsigned src_z, unsigned src_level, unsigned src_face, struct svga_winsys_surface *dst_handle, unsigned dst_x, unsigned dst_y, unsigned dst_z, unsigned dst_level, unsigned dst_face, unsigned width, unsigned height, unsigned depth) { struct svga_surface dst, src; enum pipe_error ret; SVGA3dCopyBox box, *boxes; assert(svga); src.handle = src_handle; src.real_level = src_level; src.real_face = src_face; src.real_zslice = 0; dst.handle = dst_handle; dst.real_level = dst_level; dst.real_face = dst_face; dst.real_zslice = 0; box.x = dst_x; box.y = dst_y; box.z = dst_z; box.w = width; box.h = height; box.d = depth; box.srcx = src_x; box.srcy = src_y; box.srcz = src_z; /* SVGA_DBG(DEBUG_VIEWS, "mipcopy src: %p %u (%ux%ux%u), dst: %p %u (%ux%ux%u)\n", src_handle, src_level, src_x, src_y, src_z, dst_handle, dst_level, dst_x, dst_y, dst_z); */ ret = SVGA3D_BeginSurfaceCopy(svga->swc, &src.base, &dst.base, &boxes, 1); if(ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_BeginSurfaceCopy(svga->swc, &src.base, &dst.base, &boxes, 1); assert(ret == PIPE_OK); } *boxes = box; SVGA_FIFOCommitAll(svga->swc); }
void SVGA3DUtil_InitFullscreen(uint32 cid, // IN uint32 width, // IN uint32 height) // IN { SVGA3dRenderState *rs; gFullscreen.screen.x = 0; gFullscreen.screen.y = 0; gFullscreen.screen.w = width; gFullscreen.screen.h = height; Intr_Init(); Intr_SetFaultHandlers(SVGA_DefaultFaultHandler); SVGA_Init(); SVGA_SetMode(width, height, 32); VMBackdoor_MouseInit(TRUE); SVGA3D_Init(); gFullscreen.colorImage.sid = SVGA3DUtil_DefineSurface2D(width, height, SVGA3D_X8R8G8B8); gFullscreen.depthImage.sid = SVGA3DUtil_DefineSurface2D(width, height, SVGA3D_Z_D16); SVGA3D_DefineContext(cid); SVGA3D_SetRenderTarget(cid, SVGA3D_RT_COLOR0, &gFullscreen.colorImage); SVGA3D_SetRenderTarget(cid, SVGA3D_RT_DEPTH, &gFullscreen.depthImage); SVGA3D_SetViewport(cid, &gFullscreen.screen); SVGA3D_SetZRange(cid, 0.0f, 1.0f); /* * The device defaults to flat shading, but to retain compatibility * across OpenGL and Direct3D it may be much slower in this * mode. Usually we don't want flat shading, so go ahead and switch * into smooth shading mode. * * Note that this is a per-context render state. * * XXX: There is also a bug in VMware Workstation 6.5.2 which shows * up if you're in flat shading mode and you're using a drawing * command which does not include an SVGA3dVertexDivisor array. * Avoiding flat shading is one workaround, another is to include * a dummy SVGA3dVertexDivisor array on every draw. */ SVGA3D_BeginSetRenderState(cid, &rs, 1); { rs[0].state = SVGA3D_RS_SHADEMODE; rs[0].uintValue = SVGA3D_SHADEMODE_SMOOTH; } SVGA_FIFOCommitAll(); }
/* Setup any hardware state which will be constant through the life of * a context. */ enum pipe_error svga_emit_initial_state( struct svga_context *svga ) { if (svga_have_vgpu10(svga)) { SVGA3dRasterizerStateId id = util_bitmask_add(svga->rast_object_id_bm); enum pipe_error ret; /* XXX preliminary code */ ret = SVGA3D_vgpu10_DefineRasterizerState(svga->swc, id, SVGA3D_FILLMODE_FILL, SVGA3D_CULL_NONE, 1, /* frontCounterClockwise */ 0, /* depthBias */ 0.0f, /* depthBiasClamp */ 0.0f, /* slopeScaledDepthBiasClamp */ 0, /* depthClampEnable */ 0, /* scissorEnable */ 0, /* multisampleEnable */ 0, /* aalineEnable */ 1.0f, /* lineWidth */ 0, /* lineStippleEnable */ 0, /* lineStippleFactor */ 0, /* lineStipplePattern */ 0); /* provokingVertexLast */ assert(ret == PIPE_OK); ret = SVGA3D_vgpu10_SetRasterizerState(svga->swc, id); return ret; } else { SVGA3dRenderState *rs; unsigned count = 0; const unsigned COUNT = 2; enum pipe_error ret; ret = SVGA3D_BeginSetRenderState( svga->swc, &rs, COUNT ); if (ret != PIPE_OK) return ret; /* Always use D3D style coordinate space as this is the only one * which is implemented on all backends. */ EMIT_RS(rs, count, SVGA3D_RS_COORDINATETYPE, SVGA3D_COORDINATE_LEFTHANDED ); EMIT_RS(rs, count, SVGA3D_RS_FRONTWINDING, SVGA3D_FRONTWINDING_CW ); assert( COUNT == count ); SVGA_FIFOCommitAll( svga->swc ); return PIPE_OK; } }
/* * Rebind textures. * * Similar to update_tss_binding, but without any state checking/update. * * Called at the beginning of every new command buffer to ensure that * non-dirty textures are properly paged-in. */ enum pipe_error svga_reemit_tss_bindings(struct svga_context *svga) { unsigned i; enum pipe_error ret; struct bind_queue queue; assert(svga->rebind.texture_samplers); queue.bind_count = 0; for (i = 0; i < svga->state.hw_draw.num_views; i++) { struct svga_hw_view_state *view = &svga->state.hw_draw.views[i]; if (view->v) { queue.bind[queue.bind_count].unit = i; queue.bind[queue.bind_count].view = view; queue.bind_count++; } } if (queue.bind_count) { SVGA3dTextureState *ts; ret = SVGA3D_BeginSetTextureState(svga->swc, &ts, queue.bind_count); if (ret != PIPE_OK) { return ret; } for (i = 0; i < queue.bind_count; i++) { struct svga_winsys_surface *handle; ts[i].stage = queue.bind[i].unit; ts[i].name = SVGA3D_TS_BIND_TEXTURE; assert(queue.bind[i].view->v); handle = queue.bind[i].view->v->handle; svga->swc->surface_relocation(svga->swc, &ts[i].value, handle, SVGA_RELOC_READ); } SVGA_FIFOCommitAll(svga->swc); } svga->rebind.texture_samplers = FALSE; return PIPE_OK; }
void SVGA3DUtil_ClearFullscreen(uint32 cid, // IN SVGA3dClearFlag flags, // IN uint32 color, // IN float depth, // IN uint32 stencil) // IN { SVGA3dRect *rect; SVGA3D_BeginClear(cid, flags, color, depth, stencil, &rect, 1); memset(rect, 0, sizeof *rect); rect->w = gSVGA.width; rect->h = gSVGA.height; SVGA_FIFOCommitAll(); }
void SVGA3DUtil_PresentFullscreen(void) { SVGA3dCopyRect *cr; SVGA_SyncToFence(gFullscreen.lastPresentFence); SVGA3D_BeginPresent(gFullscreen.colorImage.sid, &cr, 1); memset(cr, 0, sizeof *cr); cr->w = gSVGA.width; cr->h = gSVGA.height; SVGA_FIFOCommitAll(); gFullscreen.lastPresentFence = SVGA_InsertFence(); }
void uploadRow(int row, DMAPoolBuffer *dma) { SVGA3dCopyBox *boxes; SVGA3dGuestImage guestImage; SVGA3dSurfaceImageId hostImage = { vertexSid }; guestImage.ptr = dma->ptr; guestImage.pitch = 0; SVGA3D_BeginSurfaceDMA(&guestImage, &hostImage, SVGA3D_WRITE_HOST_VRAM, &boxes, 1); { boxes[0].x = MESH_HEIGHT * sizeof(MyVertex) * row; boxes[0].w = MESH_WIDTH * sizeof(MyVertex); boxes[0].srcx = boxes[0].x; boxes[0].h = 1; boxes[0].d = 1; } SVGA_FIFOCommitAll(); }
void SVGA3DUtil_SurfaceDMA2D(uint32 sid, // IN SVGAGuestPtr *guestPtr, // IN SVGA3dTransferType transfer, // IN uint32 width, // IN uint32 height) // IN { SVGA3dCopyBox *boxes; SVGA3dGuestImage guestImage; SVGA3dSurfaceImageId hostImage = { sid }; guestImage.ptr = *guestPtr; guestImage.pitch = 0; SVGA3D_BeginSurfaceDMA(&guestImage, &hostImage, transfer, &boxes, 1); boxes[0].w = width; boxes[0].h = height; boxes[0].d = 1; SVGA_FIFOCommitAll(); }
uint32 SVGA3DUtil_DefineSurface2D(uint32 width, // IN uint32 height, // IN SVGA3dSurfaceFormat format) // IN { uint32 sid; SVGA3dSize *mipSizes; SVGA3dSurfaceFace *faces; sid = SVGA3DUtil_AllocSurfaceID(); SVGA3D_BeginDefineSurface(sid, 0, format, &faces, &mipSizes, 1); faces[0].numMipLevels = 1; mipSizes[0].width = width; mipSizes[0].height = height; mipSizes[0].depth = 1; SVGA_FIFOCommitAll(); return sid; }
/* Setup any hardware state which will be constant through the life of * a context. */ enum pipe_error svga_emit_initial_state( struct svga_context *svga ) { SVGA3dRenderState *rs; unsigned count = 0; const unsigned COUNT = 2; enum pipe_error ret; ret = SVGA3D_BeginSetRenderState( svga->swc, &rs, COUNT ); if (ret != PIPE_OK) return ret; /* Always use D3D style coordinate space as this is the only one * which is implemented on all backends. */ EMIT_RS(rs, count, SVGA3D_RS_COORDINATETYPE, SVGA3D_COORDINATE_LEFTHANDED ); EMIT_RS(rs, count, SVGA3D_RS_FRONTWINDING, SVGA3D_FRONTWINDING_CW ); assert( COUNT == count ); SVGA_FIFOCommitAll( svga->swc ); return PIPE_OK; }
void renderCube(float x, float y, Bool useShaders, Bool useHalf) { SVGA3dTextureState *ts; SVGA3dRenderState *rs; SVGA3dVertexDecl *decls; SVGA3dPrimitiveRange *ranges; static Matrix view; Matrix_Copy(view, gIdentityMatrix); Matrix_RotateX(view, 30.0 * M_PI / 180.0); Matrix_RotateY(view, gFPS.frame * 0.01f); Matrix_Translate(view, x, y, 15); if (useShaders) { SVGA3D_SetShader(CID, SVGA3D_SHADERTYPE_VS, MY_VSHADER_ID); SVGA3D_SetShader(CID, SVGA3D_SHADERTYPE_PS, MY_PSHADER_ID); SVGA3DUtil_SetShaderConstMatrix(CID, CONST_MAT_PROJ, SVGA3D_SHADERTYPE_VS, perspectiveMat); SVGA3DUtil_SetShaderConstMatrix(CID, CONST_MAT_VIEW, SVGA3D_SHADERTYPE_VS, view); } else { SVGA3D_SetShader(CID, SVGA3D_SHADERTYPE_VS, SVGA3D_INVALID_ID); SVGA3D_SetShader(CID, SVGA3D_SHADERTYPE_PS, SVGA3D_INVALID_ID); SVGA3D_SetTransform(CID, SVGA3D_TRANSFORM_VIEW, view); SVGA3D_SetTransform(CID, SVGA3D_TRANSFORM_WORLD, gIdentityMatrix); SVGA3D_SetTransform(CID, SVGA3D_TRANSFORM_PROJECTION, perspectiveMat); } SVGA3D_BeginSetRenderState(CID, &rs, 4); { rs[0].state = SVGA3D_RS_BLENDENABLE; rs[0].uintValue = FALSE; rs[1].state = SVGA3D_RS_ZENABLE; rs[1].uintValue = TRUE; rs[2].state = SVGA3D_RS_ZWRITEENABLE; rs[2].uintValue = TRUE; rs[3].state = SVGA3D_RS_ZFUNC; rs[3].uintValue = SVGA3D_CMP_LESS; } SVGA_FIFOCommitAll(); SVGA3D_BeginSetTextureState(CID, &ts, 4); { ts[0].stage = 0; ts[0].name = SVGA3D_TS_BIND_TEXTURE; ts[0].value = SVGA3D_INVALID_ID; ts[1].stage = 0; ts[1].name = SVGA3D_TS_COLOROP; ts[1].value = SVGA3D_TC_SELECTARG1; ts[2].stage = 0; ts[2].name = SVGA3D_TS_COLORARG1; ts[2].value = SVGA3D_TA_DIFFUSE; ts[3].stage = 0; ts[3].name = SVGA3D_TS_ALPHAARG1; ts[3].value = SVGA3D_TA_DIFFUSE; } SVGA_FIFOCommitAll(); SVGA3D_BeginDrawPrimitives(CID, &decls, 2, &ranges, 1); { decls[0].identity.usage = SVGA3D_DECLUSAGE_POSITION; decls[0].array.surfaceId = vertexSid; decls[0].array.stride = sizeof(MyVertex); if (useHalf) { decls[0].identity.type = SVGA3D_DECLTYPE_FLOAT16_4; decls[0].array.offset = offsetof(MyVertex, position16); } else { decls[0].identity.type = SVGA3D_DECLTYPE_FLOAT3; decls[0].array.offset = offsetof(MyVertex, position32); } decls[1].identity.type = SVGA3D_DECLTYPE_D3DCOLOR; decls[1].identity.usage = SVGA3D_DECLUSAGE_COLOR; decls[1].array.surfaceId = vertexSid; decls[1].array.stride = sizeof(MyVertex); decls[1].array.offset = offsetof(MyVertex, color); ranges[0].primType = SVGA3D_PRIMITIVE_TRIANGLELIST; ranges[0].primitiveCount = numTriangles; ranges[0].indexArray.surfaceId = indexSid; ranges[0].indexArray.stride = sizeof(uint16); ranges[0].indexWidth = sizeof(uint16); } SVGA_FIFOCommitAll(); SVGA3D_SetShader(CID, SVGA3D_SHADERTYPE_VS, SVGA3D_INVALID_ID); SVGA3D_SetShader(CID, SVGA3D_SHADERTYPE_PS, SVGA3D_INVALID_ID); }
static void present(void) { /* * Main stair-step unscaled present test. */ presentStairStep(1020, 2065); /* * Another non-scaled present, this time using the copyrects in a * way which is not also expressable as a clip rectangle. In this * case, we're using one Present to split the image in half (top * and bottom) and reverse the two halves. */ { SVGA3dCopyRect *cr; SVGA3D_BeginPresent(colorImage.sid, &cr, 2); cr[0].srcx = 0; cr[0].srcy = surfHeight / 2; cr[0].x = 1020; cr[0].y = 2265; cr[0].w = surfWidth; cr[0].h = surfHeight / 2; cr[1].srcx = 0; cr[1].srcy = 0; cr[1].x = 1020; cr[1].y = 2265 + surfHeight / 2; cr[1].w = surfWidth; cr[1].h = surfHeight / 2; SVGA_FIFOCommitAll(); } /* * A fairly normal scaled blit. This one is only slightly scaled, unlike the * large one below- so it may be easier to see a different class of bugs. * For clipping, we remove a hole from the center of the image. * * We also test source clipping by displaying the bottom half. */ { SVGASignedRect *clip; SVGASignedRect srcRect = { 0, surfHeight/2, surfWidth, surfHeight }; SVGASignedRect dstRect = { 20, 465, 325, 655 }; SVGA3D_BeginBlitSurfaceToScreen(&colorImage, &srcRect, 0, &dstRect, &clip, 4); // Top clip[0].left = 0; clip[0].top = 0; clip[0].right = 445; clip[0].bottom = 75; // Bottom clip[1].left = 0; clip[1].top = 115; clip[1].right = 445; clip[1].bottom = 330; // Left clip[2].left = 0; clip[2].top = 75; clip[2].right = 63; clip[2].bottom = 115; // Right clip[3].left = 242; clip[3].top = 75; clip[3].right = 305; clip[3].bottom = 115; SVGA_FIFOCommitAll(); } /* * Stair-step, clipped against the bottom and left sides of the screen. */ presentStairStep(1000 - surfHeight/2, 2000 + 768 - surfHeight/2); /* * Scaled circles. We scale these asymmetrically, to about 1.5x the * size of the screen. */ { int i; for (i = 0; i < arraysize(circles); i++) { presentWithClipBuf(&circles[i], -500, -300, 1300, 1000); } } }
/* Compare old and new render states and emit differences between them * to hardware. Simplest implementation would be to emit the whole of * the "to" state. */ static enum pipe_error emit_rss(struct svga_context *svga, unsigned dirty) { struct svga_screen *screen = svga_screen(svga->pipe.screen); struct rs_queue queue; float point_size_min; queue.rs_count = 0; if (dirty & SVGA_NEW_BLEND) { const struct svga_blend_state *curr = svga->curr.blend; EMIT_RS( svga, curr->rt[0].writemask, COLORWRITEENABLE, fail ); EMIT_RS( svga, curr->rt[0].blend_enable, BLENDENABLE, fail ); if (curr->rt[0].blend_enable) { EMIT_RS( svga, curr->rt[0].srcblend, SRCBLEND, fail ); EMIT_RS( svga, curr->rt[0].dstblend, DSTBLEND, fail ); EMIT_RS( svga, curr->rt[0].blendeq, BLENDEQUATION, fail ); EMIT_RS( svga, curr->rt[0].separate_alpha_blend_enable, SEPARATEALPHABLENDENABLE, fail ); if (curr->rt[0].separate_alpha_blend_enable) { EMIT_RS( svga, curr->rt[0].srcblend_alpha, SRCBLENDALPHA, fail ); EMIT_RS( svga, curr->rt[0].dstblend_alpha, DSTBLENDALPHA, fail ); EMIT_RS( svga, curr->rt[0].blendeq_alpha, BLENDEQUATIONALPHA, fail ); } } } if (dirty & SVGA_NEW_BLEND_COLOR) { uint32 color; uint32 r = float_to_ubyte(svga->curr.blend_color.color[0]); uint32 g = float_to_ubyte(svga->curr.blend_color.color[1]); uint32 b = float_to_ubyte(svga->curr.blend_color.color[2]); uint32 a = float_to_ubyte(svga->curr.blend_color.color[3]); color = (a << 24) | (r << 16) | (g << 8) | b; EMIT_RS( svga, color, BLENDCOLOR, fail ); } if (dirty & (SVGA_NEW_DEPTH_STENCIL | SVGA_NEW_RAST)) { const struct svga_depth_stencil_state *curr = svga->curr.depth; const struct svga_rasterizer_state *rast = svga->curr.rast; if (!curr->stencil[0].enabled) { /* Stencil disabled */ EMIT_RS( svga, FALSE, STENCILENABLE, fail ); EMIT_RS( svga, FALSE, STENCILENABLE2SIDED, fail ); } else if (curr->stencil[0].enabled && !curr->stencil[1].enabled) { /* Regular stencil */ EMIT_RS( svga, TRUE, STENCILENABLE, fail ); EMIT_RS( svga, FALSE, STENCILENABLE2SIDED, fail ); EMIT_RS( svga, curr->stencil[0].func, STENCILFUNC, fail ); EMIT_RS( svga, curr->stencil[0].fail, STENCILFAIL, fail ); EMIT_RS( svga, curr->stencil[0].zfail, STENCILZFAIL, fail ); EMIT_RS( svga, curr->stencil[0].pass, STENCILPASS, fail ); EMIT_RS( svga, curr->stencil_mask, STENCILMASK, fail ); EMIT_RS( svga, curr->stencil_writemask, STENCILWRITEMASK, fail ); } else { int cw, ccw; /* Hardware frontwinding is always CW, so if ours is also CW, * then our definition of front face agrees with hardware. * Otherwise need to flip. */ if (rast->templ.front_ccw) { ccw = 0; cw = 1; } else { ccw = 1; cw = 0; } /* Twoside stencil */ EMIT_RS( svga, TRUE, STENCILENABLE, fail ); EMIT_RS( svga, TRUE, STENCILENABLE2SIDED, fail ); EMIT_RS( svga, curr->stencil[cw].func, STENCILFUNC, fail ); EMIT_RS( svga, curr->stencil[cw].fail, STENCILFAIL, fail ); EMIT_RS( svga, curr->stencil[cw].zfail, STENCILZFAIL, fail ); EMIT_RS( svga, curr->stencil[cw].pass, STENCILPASS, fail ); EMIT_RS( svga, curr->stencil[ccw].func, CCWSTENCILFUNC, fail ); EMIT_RS( svga, curr->stencil[ccw].fail, CCWSTENCILFAIL, fail ); EMIT_RS( svga, curr->stencil[ccw].zfail, CCWSTENCILZFAIL, fail ); EMIT_RS( svga, curr->stencil[ccw].pass, CCWSTENCILPASS, fail ); EMIT_RS( svga, curr->stencil_mask, STENCILMASK, fail ); EMIT_RS( svga, curr->stencil_writemask, STENCILWRITEMASK, fail ); } EMIT_RS( svga, curr->zenable, ZENABLE, fail ); if (curr->zenable) { EMIT_RS( svga, curr->zfunc, ZFUNC, fail ); EMIT_RS( svga, curr->zwriteenable, ZWRITEENABLE, fail ); } EMIT_RS( svga, curr->alphatestenable, ALPHATESTENABLE, fail ); if (curr->alphatestenable) { EMIT_RS( svga, curr->alphafunc, ALPHAFUNC, fail ); EMIT_RS_FLOAT( svga, curr->alpharef, ALPHAREF, fail ); } } if (dirty & SVGA_NEW_STENCIL_REF) { EMIT_RS( svga, svga->curr.stencil_ref.ref_value[0], STENCILREF, fail ); } if (dirty & (SVGA_NEW_RAST | SVGA_NEW_NEED_PIPELINE)) { const struct svga_rasterizer_state *curr = svga->curr.rast; unsigned cullmode = curr->cullmode; /* Shademode: still need to rearrange index list to move * flat-shading PV first vertex. */ EMIT_RS( svga, curr->shademode, SHADEMODE, fail ); /* Don't do culling while the software pipeline is active. It * does it for us, and additionally introduces potentially * back-facing triangles. */ if (svga->state.sw.need_pipeline) cullmode = SVGA3D_FACE_NONE; point_size_min = util_get_min_point_size(&curr->templ); EMIT_RS( svga, cullmode, CULLMODE, fail ); EMIT_RS( svga, curr->scissortestenable, SCISSORTESTENABLE, fail ); EMIT_RS( svga, curr->multisampleantialias, MULTISAMPLEANTIALIAS, fail ); EMIT_RS( svga, curr->lastpixel, LASTPIXEL, fail ); EMIT_RS( svga, curr->linepattern, LINEPATTERN, fail ); EMIT_RS_FLOAT( svga, curr->pointsize, POINTSIZE, fail ); EMIT_RS_FLOAT( svga, point_size_min, POINTSIZEMIN, fail ); EMIT_RS_FLOAT( svga, screen->maxPointSize, POINTSIZEMAX, fail ); EMIT_RS( svga, curr->pointsprite, POINTSPRITEENABLE, fail); } if (dirty & (SVGA_NEW_RAST | SVGA_NEW_FRAME_BUFFER | SVGA_NEW_NEED_PIPELINE)) { const struct svga_rasterizer_state *curr = svga->curr.rast; float slope = 0.0; float bias = 0.0; /* Need to modify depth bias according to bound depthbuffer * format. Don't do hardware depthbias while the software * pipeline is active. */ if (!svga->state.sw.need_pipeline && svga->curr.framebuffer.zsbuf) { slope = curr->slopescaledepthbias; bias = svga->curr.depthscale * curr->depthbias; } EMIT_RS_FLOAT( svga, slope, SLOPESCALEDEPTHBIAS, fail ); EMIT_RS_FLOAT( svga, bias, DEPTHBIAS, fail ); } if (dirty & SVGA_NEW_FRAME_BUFFER) { /* XXX: we only look at the first color buffer's sRGB state */ float gamma = 1.0f; if (svga->curr.framebuffer.cbufs[0] && util_format_is_srgb(svga->curr.framebuffer.cbufs[0]->format)) { gamma = 2.2f; } EMIT_RS_FLOAT(svga, gamma, OUTPUTGAMMA, fail); } if (dirty & SVGA_NEW_RAST) { /* bitmask of the enabled clip planes */ unsigned enabled = svga->curr.rast->templ.clip_plane_enable; EMIT_RS( svga, enabled, CLIPPLANEENABLE, fail ); } if (queue.rs_count) { SVGA3dRenderState *rs; if (SVGA3D_BeginSetRenderState( svga->swc, &rs, queue.rs_count ) != PIPE_OK) goto fail; memcpy( rs, queue.rs, queue.rs_count * sizeof queue.rs[0]); SVGA_FIFOCommitAll( svga->swc ); } return PIPE_OK; fail: /* XXX: need to poison cached hardware state on failure to ensure * dirty state gets re-emitted. Fix this by re-instating partial * FIFOCommit command and only updating cached hw state once the * initial allocation has succeeded. */ memset(svga->state.hw_draw.rs, 0xcd, sizeof(svga->state.hw_draw.rs)); return PIPE_ERROR_OUT_OF_MEMORY; }
static enum pipe_error draw_vgpu9(struct svga_hwtnl *hwtnl) { struct svga_winsys_context *swc = hwtnl->cmd.swc; struct svga_context *svga = hwtnl->svga; enum pipe_error ret; struct svga_winsys_surface *vb_handle[SVGA3D_INPUTREG_MAX]; struct svga_winsys_surface *ib_handle[QSZ]; struct svga_winsys_surface *handle; SVGA3dVertexDecl *vdecl; SVGA3dPrimitiveRange *prim; unsigned i; for (i = 0; i < hwtnl->cmd.vdecl_count; i++) { unsigned j = hwtnl->cmd.vdecl_buffer_index[i]; handle = svga_buffer_handle(svga, hwtnl->cmd.vbufs[j].buffer); if (handle == NULL) return PIPE_ERROR_OUT_OF_MEMORY; vb_handle[i] = handle; } for (i = 0; i < hwtnl->cmd.prim_count; i++) { if (hwtnl->cmd.prim_ib[i]) { handle = svga_buffer_handle(svga, hwtnl->cmd.prim_ib[i]); if (handle == NULL) return PIPE_ERROR_OUT_OF_MEMORY; } else handle = NULL; ib_handle[i] = handle; } if (svga->rebind.flags.rendertargets) { ret = svga_reemit_framebuffer_bindings(svga); if (ret != PIPE_OK) { return ret; } } if (svga->rebind.flags.texture_samplers) { ret = svga_reemit_tss_bindings(svga); if (ret != PIPE_OK) { return ret; } } if (svga->rebind.flags.vs) { ret = svga_reemit_vs_bindings(svga); if (ret != PIPE_OK) { return ret; } } if (svga->rebind.flags.fs) { ret = svga_reemit_fs_bindings(svga); if (ret != PIPE_OK) { return ret; } } SVGA_DBG(DEBUG_DMA, "draw to sid %p, %d prims\n", svga->curr.framebuffer.cbufs[0] ? svga_surface(svga->curr.framebuffer.cbufs[0])->handle : NULL, hwtnl->cmd.prim_count); ret = SVGA3D_BeginDrawPrimitives(swc, &vdecl, hwtnl->cmd.vdecl_count, &prim, hwtnl->cmd.prim_count); if (ret != PIPE_OK) return ret; memcpy(vdecl, hwtnl->cmd.vdecl, hwtnl->cmd.vdecl_count * sizeof hwtnl->cmd.vdecl[0]); for (i = 0; i < hwtnl->cmd.vdecl_count; i++) { /* check for 4-byte alignment */ assert(vdecl[i].array.offset % 4 == 0); assert(vdecl[i].array.stride % 4 == 0); /* Given rangeHint is considered to be relative to indexBias, and * indexBias varies per primitive, we cannot accurately supply an * rangeHint when emitting more than one primitive per draw command. */ if (hwtnl->cmd.prim_count == 1) { vdecl[i].rangeHint.first = hwtnl->cmd.min_index[0]; vdecl[i].rangeHint.last = hwtnl->cmd.max_index[0] + 1; } else { vdecl[i].rangeHint.first = 0; vdecl[i].rangeHint.last = 0; } swc->surface_relocation(swc, &vdecl[i].array.surfaceId, NULL, vb_handle[i], SVGA_RELOC_READ); } memcpy(prim, hwtnl->cmd.prim, hwtnl->cmd.prim_count * sizeof hwtnl->cmd.prim[0]); for (i = 0; i < hwtnl->cmd.prim_count; i++) { swc->surface_relocation(swc, &prim[i].indexArray.surfaceId, NULL, ib_handle[i], SVGA_RELOC_READ); pipe_resource_reference(&hwtnl->cmd.prim_ib[i], NULL); } SVGA_FIFOCommitAll(swc); hwtnl->cmd.prim_count = 0; return PIPE_OK; }
static void initScreens(void) { static const SVGAScreenObject screen = { .structSize = sizeof(SVGAScreenObject), .id = 0, .flags = SVGA_SCREEN_HAS_ROOT | SVGA_SCREEN_IS_PRIMARY, .size = { 1024, 768 }, .root = { 1000, 2000 }, }; Screen_Define(&screen); ScreenDraw_SetScreen(screen.id, screen.size.width, screen.size.height); Console_Clear(); Console_Format("Surface-to-Screen Blit Clipping Test\n"); ScreenDraw_Border(0, 0, screen.size.width, screen.size.height, 0xFF0000, 1); Console_MoveTo(20, 45); Console_Format("Stair-step clipping (small tiles)"); Console_MoveTo(20, 245); Console_Format("Top/bottom halves swapped"); Console_MoveTo(20, 445); Console_Format("Scaled bottom half, with hole"); Console_MoveTo(350, 65); Console_Format("Zoomed to 1.5x full screen, two circular clip regions"); Console_MoveTo(5, 660); Console_Format("Stair-step, clipped against screen edges"); } /* * presentWithClipBuf -- * * Present our surface to the screen, with clipping data from a ClipBuffer. * * The supplied ClipBuffer is always in screen coordinates. We * convert them into dest-relative coordinates for the * surface-to-screen blit. */ static void presentWithClipBuf(ClipBuffer *buf, int dstL, int dstT, int dstR, int dstB) { SVGASignedRect srcRect = { 0, 0, surfWidth, surfHeight }; SVGASignedRect dstRect = { dstL, dstT, dstR, dstB }; SVGASignedRect *clip; int i; SVGA3D_BeginBlitSurfaceToScreen(&colorImage, &srcRect, 0, &dstRect, &clip, buf->numRects); for (i = 0; i < buf->numRects; i++) { clip->left = buf->rects[i].left - dstL; clip->top = buf->rects[i].top - dstT; clip->right = buf->rects[i].right - dstL; clip->bottom = buf->rects[i].bottom - dstT; clip++; } SVGA_FIFOCommitAll(); }
static void drawCube(void) { static float angle = 0.5f; SVGA3dRect *rect; Matrix perspectiveMat; SVGA3dTextureState *ts; SVGA3dRenderState *rs; SVGA3dRect viewport = { 0, 0, surfWidth, surfHeight }; SVGA3D_SetRenderTarget(CID, SVGA3D_RT_COLOR0, &colorImage); SVGA3D_SetRenderTarget(CID, SVGA3D_RT_DEPTH, &depthImage); SVGA3D_SetViewport(CID, &viewport); SVGA3D_SetZRange(CID, 0.0f, 1.0f); SVGA3D_BeginSetRenderState(CID, &rs, 5); { rs[0].state = SVGA3D_RS_BLENDENABLE; rs[0].uintValue = FALSE; rs[1].state = SVGA3D_RS_ZENABLE; rs[1].uintValue = TRUE; rs[2].state = SVGA3D_RS_ZWRITEENABLE; rs[2].uintValue = TRUE; rs[3].state = SVGA3D_RS_ZFUNC; rs[3].uintValue = SVGA3D_CMP_LESS; rs[4].state = SVGA3D_RS_LIGHTINGENABLE; rs[4].uintValue = FALSE; } SVGA_FIFOCommitAll(); SVGA3D_BeginSetTextureState(CID, &ts, 4); { ts[0].stage = 0; ts[0].name = SVGA3D_TS_BIND_TEXTURE; ts[0].value = SVGA3D_INVALID_ID; ts[1].stage = 0; ts[1].name = SVGA3D_TS_COLOROP; ts[1].value = SVGA3D_TC_SELECTARG1; ts[2].stage = 0; ts[2].name = SVGA3D_TS_COLORARG1; ts[2].value = SVGA3D_TA_DIFFUSE; ts[3].stage = 0; ts[3].name = SVGA3D_TS_ALPHAARG1; ts[3].value = SVGA3D_TA_DIFFUSE; } SVGA_FIFOCommitAll(); /* * Draw a red border around the render target, to test edge * accuracy in Present. */ SVGA3D_BeginClear(CID, SVGA3D_CLEAR_COLOR | SVGA3D_CLEAR_DEPTH, 0xFF0000, 1.0f, 0, &rect, 1); *rect = viewport; SVGA_FIFOCommitAll(); /* * Draw the background color */ SVGA3D_BeginClear(CID, SVGA3D_CLEAR_COLOR | SVGA3D_CLEAR_DEPTH, 0x336699, 1.0f, 0, &rect, 1); rect->x = viewport.x + 1; rect->y = viewport.y + 1; rect->w = viewport.w - 2; rect->h = viewport.h - 2; SVGA_FIFOCommitAll(); SVGA3dVertexDecl *decls; SVGA3dPrimitiveRange *ranges; Matrix view; Matrix_Copy(view, gIdentityMatrix); Matrix_Scale(view, 0.5, 0.5, 0.5, 1.0); Matrix_RotateX(view, 30.0 * M_PI / 180.0); Matrix_RotateY(view, angle); Matrix_Translate(view, 0, 0, 2.2); angle += 0.02; Matrix_Perspective(perspectiveMat, 45.0f, 4.0f / 3.0f, 0.1f, 100.0f); SVGA3D_SetTransform(CID, SVGA3D_TRANSFORM_WORLD, gIdentityMatrix); SVGA3D_SetTransform(CID, SVGA3D_TRANSFORM_PROJECTION, perspectiveMat); SVGA3D_SetTransform(CID, SVGA3D_TRANSFORM_VIEW, view); SVGA3D_BeginDrawPrimitives(CID, &decls, 2, &ranges, 1); { decls[0].identity.type = SVGA3D_DECLTYPE_FLOAT3; decls[0].identity.usage = SVGA3D_DECLUSAGE_POSITION; decls[0].array.surfaceId = vertexSid; decls[0].array.stride = sizeof(MyVertex); decls[0].array.offset = offsetof(MyVertex, position); decls[1].identity.type = SVGA3D_DECLTYPE_D3DCOLOR; decls[1].identity.usage = SVGA3D_DECLUSAGE_COLOR; decls[1].array.surfaceId = vertexSid; decls[1].array.stride = sizeof(MyVertex); decls[1].array.offset = offsetof(MyVertex, color); ranges[0].primType = SVGA3D_PRIMITIVE_LINELIST; ranges[0].primitiveCount = numLines; ranges[0].indexArray.surfaceId = indexSid; ranges[0].indexArray.stride = sizeof(uint16); ranges[0].indexWidth = sizeof(uint16); } SVGA_FIFOCommitAll(); }
enum pipe_error svga_hwtnl_flush( struct svga_hwtnl *hwtnl ) { struct svga_winsys_context *swc = hwtnl->cmd.swc; struct svga_context *svga = hwtnl->svga; enum pipe_error ret; if (hwtnl->cmd.prim_count) { struct svga_winsys_surface *vb_handle[SVGA3D_INPUTREG_MAX]; struct svga_winsys_surface *ib_handle[QSZ]; struct svga_winsys_surface *handle; SVGA3dVertexDecl *vdecl; SVGA3dPrimitiveRange *prim; unsigned i; for (i = 0; i < hwtnl->cmd.vdecl_count; i++) { handle = svga_buffer_handle(svga, hwtnl->cmd.vdecl_vb[i]); if (handle == NULL) return PIPE_ERROR_OUT_OF_MEMORY; vb_handle[i] = handle; } for (i = 0; i < hwtnl->cmd.prim_count; i++) { if (hwtnl->cmd.prim_ib[i]) { handle = svga_buffer_handle(svga, hwtnl->cmd.prim_ib[i]); if (handle == NULL) return PIPE_ERROR_OUT_OF_MEMORY; } else handle = NULL; ib_handle[i] = handle; } SVGA_DBG(DEBUG_DMA, "draw to sid %p, %d prims\n", svga->curr.framebuffer.cbufs[0] ? svga_surface(svga->curr.framebuffer.cbufs[0])->handle : NULL, hwtnl->cmd.prim_count); ret = SVGA3D_BeginDrawPrimitives(swc, &vdecl, hwtnl->cmd.vdecl_count, &prim, hwtnl->cmd.prim_count); if (ret != PIPE_OK) return ret; memcpy( vdecl, hwtnl->cmd.vdecl, hwtnl->cmd.vdecl_count * sizeof hwtnl->cmd.vdecl[0]); for (i = 0; i < hwtnl->cmd.vdecl_count; i++) { /* Given rangeHint is considered to be relative to indexBias, and * indexBias varies per primitive, we cannot accurately supply an * rangeHint when emitting more than one primitive per draw command. */ if (hwtnl->cmd.prim_count == 1) { vdecl[i].rangeHint.first = hwtnl->cmd.min_index[0]; vdecl[i].rangeHint.last = hwtnl->cmd.max_index[0] + 1; } else { vdecl[i].rangeHint.first = 0; vdecl[i].rangeHint.last = 0; } swc->surface_relocation(swc, &vdecl[i].array.surfaceId, vb_handle[i], PIPE_BUFFER_USAGE_GPU_READ); } memcpy( prim, hwtnl->cmd.prim, hwtnl->cmd.prim_count * sizeof hwtnl->cmd.prim[0]); for (i = 0; i < hwtnl->cmd.prim_count; i++) { swc->surface_relocation(swc, &prim[i].indexArray.surfaceId, ib_handle[i], PIPE_BUFFER_USAGE_GPU_READ); pipe_buffer_reference(&hwtnl->cmd.prim_ib[i], NULL); } SVGA_FIFOCommitAll( swc ); hwtnl->cmd.prim_count = 0; } return PIPE_OK; }
static int update_tss_binding(struct svga_context *svga, unsigned dirty ) { boolean reemit = svga->rebind.texture_samplers; unsigned i; unsigned count = MAX2( svga->curr.num_sampler_views, svga->state.hw_draw.num_views ); unsigned min_lod; unsigned max_lod; struct bind_queue queue; queue.bind_count = 0; for (i = 0; i < count; i++) { const struct svga_sampler_state *s = svga->curr.sampler[i]; struct svga_hw_view_state *view = &svga->state.hw_draw.views[i]; struct pipe_resource *texture = NULL; /* get min max lod */ if (svga->curr.sampler_views[i]) { min_lod = MAX2(s->view_min_lod, 0); max_lod = MIN2(s->view_max_lod, svga->curr.sampler_views[i]->texture->last_level); texture = svga->curr.sampler_views[i]->texture; } else { min_lod = 0; max_lod = 0; } if (view->texture != texture || view->min_lod != min_lod || view->max_lod != max_lod) { svga_sampler_view_reference(&view->v, NULL); pipe_resource_reference( &view->texture, texture ); view->dirty = TRUE; view->min_lod = min_lod; view->max_lod = max_lod; if (texture) view->v = svga_get_tex_sampler_view(&svga->pipe, texture, min_lod, max_lod); } /* * We need to reemit non-null texture bindings, even when they are not * dirty, to ensure that the resources are paged in. */ if (view->dirty || (reemit && view->v)) { queue.bind[queue.bind_count].unit = i; queue.bind[queue.bind_count].view = view; queue.bind_count++; } if (!view->dirty && view->v) { svga_validate_sampler_view(svga, view->v); } } svga->state.hw_draw.num_views = svga->curr.num_sampler_views; if (queue.bind_count) { SVGA3dTextureState *ts; if (SVGA3D_BeginSetTextureState( svga->swc, &ts, queue.bind_count ) != PIPE_OK) goto fail; for (i = 0; i < queue.bind_count; i++) { struct svga_winsys_surface *handle; ts[i].stage = queue.bind[i].unit; ts[i].name = SVGA3D_TS_BIND_TEXTURE; if (queue.bind[i].view->v) { handle = queue.bind[i].view->v->handle; } else { handle = NULL; } svga->swc->surface_relocation(svga->swc, &ts[i].value, handle, SVGA_RELOC_READ); queue.bind[i].view->dirty = FALSE; } SVGA_FIFOCommitAll( svga->swc ); } svga->rebind.texture_samplers = FALSE; return 0; fail: return PIPE_ERROR_OUT_OF_MEMORY; }
/** * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank. */ static enum pipe_error svga_buffer_upload_command(struct svga_context *svga, struct svga_buffer *sbuf) { struct svga_winsys_context *swc = svga->swc; struct svga_winsys_buffer *guest = sbuf->hwbuf; struct svga_winsys_surface *host = sbuf->handle; SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM; SVGA3dCmdSurfaceDMA *cmd; uint32 numBoxes = sbuf->map.num_ranges; SVGA3dCopyBox *boxes; SVGA3dCmdSurfaceDMASuffix *pSuffix; unsigned region_flags; unsigned surface_flags; struct pipe_resource *dummy; if (transfer == SVGA3D_WRITE_HOST_VRAM) { region_flags = SVGA_RELOC_READ; surface_flags = SVGA_RELOC_WRITE; } else if (transfer == SVGA3D_READ_HOST_VRAM) { region_flags = SVGA_RELOC_WRITE; surface_flags = SVGA_RELOC_READ; } else { assert(0); return PIPE_ERROR_BAD_INPUT; } assert(numBoxes); cmd = SVGA3D_FIFOReserve(swc, SVGA_3D_CMD_SURFACE_DMA, sizeof *cmd + numBoxes * sizeof *boxes + sizeof *pSuffix, 2); if (!cmd) return PIPE_ERROR_OUT_OF_MEMORY; swc->region_relocation(swc, &cmd->guest.ptr, guest, 0, region_flags); cmd->guest.pitch = 0; swc->surface_relocation(swc, &cmd->host.sid, host, surface_flags); cmd->host.face = 0; cmd->host.mipmap = 0; cmd->transfer = transfer; sbuf->dma.boxes = (SVGA3dCopyBox *)&cmd[1]; sbuf->dma.svga = svga; /* Increment reference count */ dummy = NULL; pipe_resource_reference(&dummy, &sbuf->b.b); pSuffix = (SVGA3dCmdSurfaceDMASuffix *)((uint8_t*)cmd + sizeof *cmd + numBoxes * sizeof *boxes); pSuffix->suffixSize = sizeof *pSuffix; pSuffix->maximumOffset = sbuf->b.b.width0; pSuffix->flags = sbuf->dma.flags; SVGA_FIFOCommitAll(swc); sbuf->dma.flags.discard = FALSE; return PIPE_OK; }
static int update_tss(struct svga_context *svga, unsigned dirty ) { unsigned i; struct ts_queue queue; queue.ts_count = 0; for (i = 0; i < svga->curr.num_samplers; i++) { if (svga->curr.sampler[i]) { const struct svga_sampler_state *curr = svga->curr.sampler[i]; EMIT_TS(svga, i, curr->mipfilter, MIPFILTER, fail); EMIT_TS(svga, i, curr->min_lod, TEXTURE_MIPMAP_LEVEL, fail); EMIT_TS(svga, i, curr->magfilter, MAGFILTER, fail); EMIT_TS(svga, i, curr->minfilter, MINFILTER, fail); EMIT_TS(svga, i, curr->aniso_level, TEXTURE_ANISOTROPIC_LEVEL, fail); EMIT_TS_FLOAT(svga, i, curr->lod_bias, TEXTURE_LOD_BIAS, fail); EMIT_TS(svga, i, curr->addressu, ADDRESSU, fail); EMIT_TS(svga, i, curr->addressw, ADDRESSW, fail); EMIT_TS(svga, i, curr->bordercolor, BORDERCOLOR, fail); // TEXCOORDINDEX -- hopefully not needed if (svga->curr.tex_flags.flag_1d & (1 << i)) { debug_printf("wrap 1d tex %d\n", i); EMIT_TS(svga, i, SVGA3D_TEX_ADDRESS_WRAP, ADDRESSV, fail); } else EMIT_TS(svga, i, curr->addressv, ADDRESSV, fail); if (svga->curr.tex_flags.flag_srgb & (1 << i)) EMIT_TS_FLOAT(svga, i, 2.2f, GAMMA, fail); else EMIT_TS_FLOAT(svga, i, 1.0f, GAMMA, fail); } } if (queue.ts_count) { SVGA3dTextureState *ts; if (SVGA3D_BeginSetTextureState( svga->swc, &ts, queue.ts_count ) != PIPE_OK) goto fail; memcpy( ts, queue.ts, queue.ts_count * sizeof queue.ts[0]); SVGA_FIFOCommitAll( svga->swc ); } return 0; fail: /* XXX: need to poison cached hardware state on failure to ensure * dirty state gets re-emitted. Fix this by re-instating partial * FIFOCommit command and only updating cached hw state once the * initial allocation has succeeded. */ memset(svga->state.hw_draw.ts, 0xcd, sizeof(svga->state.hw_draw.ts)); return PIPE_ERROR_OUT_OF_MEMORY; }
/* Compare old and new render states and emit differences between them * to hardware. Simplest implementation would be to emit the whole of * the "to" state. */ static int emit_rss( struct svga_context *svga, unsigned dirty ) { struct rs_queue queue; queue.rs_count = 0; if (dirty & SVGA_NEW_BLEND) { const struct svga_blend_state *curr = svga->curr.blend; EMIT_RS( svga, curr->rt[0].writemask, COLORWRITEENABLE, fail ); EMIT_RS( svga, curr->rt[0].blend_enable, BLENDENABLE, fail ); if (curr->rt[0].blend_enable) { EMIT_RS( svga, curr->rt[0].srcblend, SRCBLEND, fail ); EMIT_RS( svga, curr->rt[0].dstblend, DSTBLEND, fail ); EMIT_RS( svga, curr->rt[0].blendeq, BLENDEQUATION, fail ); EMIT_RS( svga, curr->rt[0].separate_alpha_blend_enable, SEPARATEALPHABLENDENABLE, fail ); if (curr->rt[0].separate_alpha_blend_enable) { EMIT_RS( svga, curr->rt[0].srcblend_alpha, SRCBLENDALPHA, fail ); EMIT_RS( svga, curr->rt[0].dstblend_alpha, DSTBLENDALPHA, fail ); EMIT_RS( svga, curr->rt[0].blendeq_alpha, BLENDEQUATIONALPHA, fail ); } } } if (dirty & (SVGA_NEW_DEPTH_STENCIL | SVGA_NEW_RAST)) { const struct svga_depth_stencil_state *curr = svga->curr.depth; const struct svga_rasterizer_state *rast = svga->curr.rast; if (!curr->stencil[0].enabled) { /* Stencil disabled */ EMIT_RS( svga, FALSE, STENCILENABLE, fail ); EMIT_RS( svga, FALSE, STENCILENABLE2SIDED, fail ); } else if (curr->stencil[0].enabled && !curr->stencil[1].enabled) { /* Regular stencil */ EMIT_RS( svga, TRUE, STENCILENABLE, fail ); EMIT_RS( svga, FALSE, STENCILENABLE2SIDED, fail ); EMIT_RS( svga, curr->stencil[0].func, STENCILFUNC, fail ); EMIT_RS( svga, curr->stencil[0].fail, STENCILFAIL, fail ); EMIT_RS( svga, curr->stencil[0].zfail, STENCILZFAIL, fail ); EMIT_RS( svga, curr->stencil[0].pass, STENCILPASS, fail ); EMIT_RS( svga, curr->stencil_ref, STENCILREF, fail ); EMIT_RS( svga, curr->stencil_mask, STENCILMASK, fail ); EMIT_RS( svga, curr->stencil_writemask, STENCILWRITEMASK, fail ); } else { int cw, ccw; /* Hardware frontwinding is always CW, so if ours is also CW, * then our definition of front face agrees with hardware. * Otherwise need to flip. */ if (rast->templ.front_winding == PIPE_WINDING_CW) { cw = 0; ccw = 1; } else { cw = 1; ccw = 0; } /* Twoside stencil */ EMIT_RS( svga, TRUE, STENCILENABLE, fail ); EMIT_RS( svga, TRUE, STENCILENABLE2SIDED, fail ); EMIT_RS( svga, curr->stencil[cw].func, STENCILFUNC, fail ); EMIT_RS( svga, curr->stencil[cw].fail, STENCILFAIL, fail ); EMIT_RS( svga, curr->stencil[cw].zfail, STENCILZFAIL, fail ); EMIT_RS( svga, curr->stencil[cw].pass, STENCILPASS, fail ); EMIT_RS( svga, curr->stencil[ccw].func, CCWSTENCILFUNC, fail ); EMIT_RS( svga, curr->stencil[ccw].fail, CCWSTENCILFAIL, fail ); EMIT_RS( svga, curr->stencil[ccw].zfail, CCWSTENCILZFAIL, fail ); EMIT_RS( svga, curr->stencil[ccw].pass, CCWSTENCILPASS, fail ); EMIT_RS( svga, curr->stencil_ref, STENCILREF, fail ); EMIT_RS( svga, curr->stencil_mask, STENCILMASK, fail ); EMIT_RS( svga, curr->stencil_writemask, STENCILWRITEMASK, fail ); } EMIT_RS( svga, curr->zenable, ZENABLE, fail ); if (curr->zenable) { EMIT_RS( svga, curr->zfunc, ZFUNC, fail ); EMIT_RS( svga, curr->zwriteenable, ZWRITEENABLE, fail ); } EMIT_RS( svga, curr->alphatestenable, ALPHATESTENABLE, fail ); if (curr->alphatestenable) { EMIT_RS( svga, curr->alphafunc, ALPHAFUNC, fail ); EMIT_RS_FLOAT( svga, curr->alpharef, ALPHAREF, fail ); } } if (dirty & SVGA_NEW_RAST) { const struct svga_rasterizer_state *curr = svga->curr.rast; /* Shademode: still need to rearrange index list to move * flat-shading PV first vertex. */ EMIT_RS( svga, curr->shademode, SHADEMODE, fail ); EMIT_RS( svga, curr->cullmode, CULLMODE, fail ); EMIT_RS( svga, curr->scissortestenable, SCISSORTESTENABLE, fail ); EMIT_RS( svga, curr->multisampleantialias, MULTISAMPLEANTIALIAS, fail ); EMIT_RS( svga, curr->lastpixel, LASTPIXEL, fail ); EMIT_RS( svga, curr->linepattern, LINEPATTERN, fail ); EMIT_RS_FLOAT( svga, curr->pointsize, POINTSIZE, fail ); EMIT_RS_FLOAT( svga, curr->pointsize_min, POINTSIZEMIN, fail ); EMIT_RS_FLOAT( svga, curr->pointsize_max, POINTSIZEMAX, fail ); } if (dirty & (SVGA_NEW_RAST | SVGA_NEW_FRAME_BUFFER | SVGA_NEW_NEED_PIPELINE)) { const struct svga_rasterizer_state *curr = svga->curr.rast; float slope = 0.0; float bias = 0.0; /* Need to modify depth bias according to bound depthbuffer * format. Don't do hardware depthbias while the software * pipeline is active. */ if (!svga->state.sw.need_pipeline && svga->curr.framebuffer.zsbuf) { slope = curr->slopescaledepthbias; bias = svga->curr.depthscale * curr->depthbias; } EMIT_RS_FLOAT( svga, slope, SLOPESCALEDEPTHBIAS, fail ); EMIT_RS_FLOAT( svga, bias, DEPTHBIAS, fail ); } if (queue.rs_count) { SVGA3dRenderState *rs; if (SVGA3D_BeginSetRenderState( svga->swc, &rs, queue.rs_count ) != PIPE_OK) goto fail; memcpy( rs, queue.rs, queue.rs_count * sizeof queue.rs[0]); SVGA_FIFOCommitAll( svga->swc ); } /* Also blend color: */ return 0; fail: /* XXX: need to poison cached hardware state on failure to ensure * dirty state gets re-emitted. Fix this by re-instating partial * FIFOCommit command and only updating cached hw state once the * initial allocation has succeeded. */ memset(svga->state.hw_draw.rs, 0xcd, sizeof(svga->state.hw_draw.rs)); return PIPE_ERROR_OUT_OF_MEMORY; }
static int update_tss_binding(struct svga_context *svga, unsigned dirty ) { unsigned i; unsigned count = MAX2( svga->curr.num_textures, svga->state.hw_draw.num_views ); unsigned min_lod; unsigned max_lod; struct { struct { unsigned unit; struct svga_hw_view_state *view; } bind[PIPE_MAX_SAMPLERS]; unsigned bind_count; } queue; queue.bind_count = 0; for (i = 0; i < count; i++) { const struct svga_sampler_state *s = svga->curr.sampler[i]; struct svga_hw_view_state *view = &svga->state.hw_draw.views[i]; /* get min max lod */ if (svga->curr.texture[i]) { min_lod = MAX2(s->view_min_lod, 0); max_lod = MIN2(s->view_max_lod, svga->curr.texture[i]->last_level); } else { min_lod = 0; max_lod = 0; } if (view->texture != svga->curr.texture[i] || view->min_lod != min_lod || view->max_lod != max_lod) { svga_sampler_view_reference(&view->v, NULL); pipe_texture_reference( &view->texture, svga->curr.texture[i] ); view->dirty = TRUE; view->min_lod = min_lod; view->max_lod = max_lod; if (svga->curr.texture[i]) view->v = svga_get_tex_sampler_view(&svga->pipe, svga->curr.texture[i], min_lod, max_lod); } if (view->dirty) { queue.bind[queue.bind_count].unit = i; queue.bind[queue.bind_count].view = view; queue.bind_count++; } else if (view->v) { svga_validate_sampler_view(svga, view->v); } } svga->state.hw_draw.num_views = svga->curr.num_textures; if (queue.bind_count) { SVGA3dTextureState *ts; if (SVGA3D_BeginSetTextureState( svga->swc, &ts, queue.bind_count ) != PIPE_OK) goto fail; for (i = 0; i < queue.bind_count; i++) { ts[i].stage = queue.bind[i].unit; ts[i].name = SVGA3D_TS_BIND_TEXTURE; if (queue.bind[i].view->v) { svga->swc->surface_relocation(svga->swc, &ts[i].value, queue.bind[i].view->v->handle, PIPE_BUFFER_USAGE_GPU_READ); } else { ts[i].value = SVGA3D_INVALID_ID; } queue.bind[i].view->dirty = FALSE; } SVGA_FIFOCommitAll( svga->swc ); } return 0; fail: return PIPE_ERROR_OUT_OF_MEMORY; }
/** * Insert a number of preliminary UPDATE_GB_IMAGE commands in the * command buffer, equal to the current number of mapped ranges. * The UPDATE_GB_IMAGE commands will be patched with the * actual ranges just before flush. */ static enum pipe_error svga_buffer_upload_gb_command(struct svga_context *svga, struct svga_buffer *sbuf) { struct svga_winsys_context *swc = svga->swc; SVGA3dCmdUpdateGBImage *update_cmd; struct svga_3d_update_gb_image *whole_update_cmd = NULL; uint32 numBoxes = sbuf->map.num_ranges; struct pipe_resource *dummy; unsigned int i; assert(numBoxes); assert(sbuf->dma.updates == NULL); if (sbuf->dma.flags.discard) { struct svga_3d_invalidate_gb_image *cicmd = NULL; SVGA3dCmdInvalidateGBImage *invalidate_cmd; const unsigned total_commands_size = sizeof(*invalidate_cmd) + numBoxes * sizeof(*whole_update_cmd); /* Allocate FIFO space for one INVALIDATE_GB_IMAGE command followed by * 'numBoxes' UPDATE_GB_IMAGE commands. Allocate all at once rather * than with separate commands because we need to properly deal with * filling the command buffer. */ invalidate_cmd = SVGA3D_FIFOReserve(swc, SVGA_3D_CMD_INVALIDATE_GB_IMAGE, total_commands_size, 1 + numBoxes); if (!invalidate_cmd) return PIPE_ERROR_OUT_OF_MEMORY; cicmd = container_of(invalidate_cmd, cicmd, body); cicmd->header.size = sizeof(*invalidate_cmd); swc->surface_relocation(swc, &invalidate_cmd->image.sid, NULL, sbuf->handle, (SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL | SVGA_RELOC_DMA)); invalidate_cmd->image.face = 0; invalidate_cmd->image.mipmap = 0; /* The whole_update_command is a SVGA3dCmdHeader plus the * SVGA3dCmdUpdateGBImage command. */ whole_update_cmd = (struct svga_3d_update_gb_image *) &invalidate_cmd[1]; /* initialize the first UPDATE_GB_IMAGE command */ whole_update_cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; update_cmd = &whole_update_cmd->body; } else { /* Allocate FIFO space for 'numBoxes' UPDATE_GB_IMAGE commands */ const unsigned total_commands_size = sizeof(*update_cmd) + (numBoxes - 1) * sizeof(*whole_update_cmd); update_cmd = SVGA3D_FIFOReserve(swc, SVGA_3D_CMD_UPDATE_GB_IMAGE, total_commands_size, numBoxes); if (!update_cmd) return PIPE_ERROR_OUT_OF_MEMORY; /* The whole_update_command is a SVGA3dCmdHeader plus the * SVGA3dCmdUpdateGBImage command. */ whole_update_cmd = container_of(update_cmd, whole_update_cmd, body); } /* Init the first UPDATE_GB_IMAGE command */ whole_update_cmd->header.size = sizeof(*update_cmd); swc->surface_relocation(swc, &update_cmd->image.sid, NULL, sbuf->handle, SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL); update_cmd->image.face = 0; update_cmd->image.mipmap = 0; /* Save pointer to the first UPDATE_GB_IMAGE command so that we can * fill in the box info below. */ sbuf->dma.updates = whole_update_cmd; /* * Copy the face, mipmap, etc. info to all subsequent commands. * Also do the surface relocation for each subsequent command. */ for (i = 1; i < numBoxes; ++i) { whole_update_cmd++; memcpy(whole_update_cmd, sbuf->dma.updates, sizeof(*whole_update_cmd)); swc->surface_relocation(swc, &whole_update_cmd->body.image.sid, NULL, sbuf->handle, SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL); } /* Increment reference count */ sbuf->dma.svga = svga; dummy = NULL; pipe_resource_reference(&dummy, &sbuf->b.b); SVGA_FIFOCommitAll(swc); sbuf->dma.flags.discard = FALSE; return PIPE_OK; }
/* XXX still have doubts about this... */ static void svga_surface_copy(struct pipe_context *pipe, struct pipe_resource* dst_tex, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource* src_tex, unsigned src_level, const struct pipe_box *src_box) { struct svga_context *svga = svga_context(pipe); struct svga_texture *stex, *dtex; /* struct pipe_screen *screen = pipe->screen; SVGA3dCopyBox *box; enum pipe_error ret; struct pipe_surface *srcsurf, *dstsurf;*/ unsigned dst_face, dst_z, src_face, src_z; /* Emit buffered drawing commands, and any back copies. */ svga_surfaces_flush( svga ); /* Fallback for buffers. */ if (dst_tex->target == PIPE_BUFFER && src_tex->target == PIPE_BUFFER) { util_resource_copy_region(pipe, dst_tex, dst_level, dstx, dsty, dstz, src_tex, src_level, src_box); return; } stex = svga_texture(src_tex); dtex = svga_texture(dst_tex); #if 0 srcsurf = screen->get_tex_surface(screen, src_tex, src_level, src_box->z, src_box->z, PIPE_BIND_SAMPLER_VIEW); dstsurf = screen->get_tex_surface(screen, dst_tex, dst_level, dst_box->z, dst_box->z, PIPE_BIND_RENDER_TARGET); SVGA_DBG(DEBUG_DMA, "blit to sid %p (%d,%d), from sid %p (%d,%d) sz %dx%d\n", svga_surface(dstsurf)->handle, dstx, dsty, svga_surface(srcsurf)->handle, src_box->x, src_box->y, width, height); ret = SVGA3D_BeginSurfaceCopy(svga->swc, srcsurf, dstsurf, &box, 1); if(ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_BeginSurfaceCopy(svga->swc, srcsurf, dstsurf, &box, 1); assert(ret == PIPE_OK); } box->x = dstx; box->y = dsty; box->z = 0; box->w = width; box->h = height; box->d = 1; box->srcx = src_box->x; box->srcy = src_box->y; box->srcz = 0; SVGA_FIFOCommitAll(svga->swc); svga_surface(dstsurf)->dirty = TRUE; svga_propagate_surface(pipe, dstsurf); pipe_surface_reference(&srcsurf, NULL); pipe_surface_reference(&dstsurf, NULL); #else if (src_tex->target == PIPE_TEXTURE_CUBE) { src_face = src_box->z; src_z = 0; assert(src_box->depth == 1); } else { src_face = 0; src_z = src_box->z; } /* different src/dst type???*/ if (dst_tex->target == PIPE_TEXTURE_CUBE) { dst_face = dstz; dst_z = 0; assert(src_box->depth == 1); } else { dst_face = 0; dst_z = dstz; } svga_texture_copy_handle(svga, stex->handle, src_box->x, src_box->y, src_z, src_level, src_face, dtex->handle, dstx, dsty, dst_z, dst_level, dst_face, src_box->width, src_box->height, src_box->depth); #endif }
void render(void) { SVGA3dTextureState *ts; SVGA3dRenderState *rs; SVGA3dVertexDecl *decls; SVGA3dPrimitiveRange *ranges; static Matrix view; Matrix_Copy(view, gIdentityMatrix); Matrix_Scale(view, 0.5, 0.5, 0.5, 1.0); if (lastMouseState.buttons & VMMOUSE_LEFT_BUTTON) { Matrix_RotateX(view, lastMouseState.y * 0.0001); Matrix_RotateY(view, lastMouseState.x * -0.0001); } else { Matrix_RotateX(view, 30.0 * M_PI / 180.0); Matrix_RotateY(view, gFPS.frame * 0.01f); } Matrix_Translate(view, 0, 0, 3); SVGA3D_SetTransform(CID, SVGA3D_TRANSFORM_VIEW, view); SVGA3D_SetTransform(CID, SVGA3D_TRANSFORM_WORLD, gIdentityMatrix); SVGA3D_SetTransform(CID, SVGA3D_TRANSFORM_PROJECTION, perspectiveMat); SVGA3D_BeginSetRenderState(CID, &rs, 4); { rs[0].state = SVGA3D_RS_BLENDENABLE; rs[0].uintValue = FALSE; rs[1].state = SVGA3D_RS_ZENABLE; rs[1].uintValue = TRUE; rs[2].state = SVGA3D_RS_ZWRITEENABLE; rs[2].uintValue = TRUE; rs[3].state = SVGA3D_RS_ZFUNC; rs[3].uintValue = SVGA3D_CMP_LESS; } SVGA_FIFOCommitAll(); SVGA3D_BeginSetTextureState(CID, &ts, 4); { ts[0].stage = 0; ts[0].name = SVGA3D_TS_BIND_TEXTURE; ts[0].value = SVGA3D_INVALID_ID; ts[1].stage = 0; ts[1].name = SVGA3D_TS_COLOROP; ts[1].value = SVGA3D_TC_SELECTARG1; ts[2].stage = 0; ts[2].name = SVGA3D_TS_COLORARG1; ts[2].value = SVGA3D_TA_DIFFUSE; ts[3].stage = 0; ts[3].name = SVGA3D_TS_ALPHAARG1; ts[3].value = SVGA3D_TA_DIFFUSE; } SVGA_FIFOCommitAll(); SVGA3D_BeginDrawPrimitives(CID, &decls, 2, &ranges, 1); { decls[0].identity.type = SVGA3D_DECLTYPE_FLOAT3; decls[0].identity.usage = SVGA3D_DECLUSAGE_POSITION; decls[0].array.surfaceId = vertexSid; decls[0].array.stride = sizeof(MyVertex); decls[0].array.offset = offsetof(MyVertex, position); decls[1].identity.type = SVGA3D_DECLTYPE_D3DCOLOR; decls[1].identity.usage = SVGA3D_DECLUSAGE_COLOR; decls[1].array.surfaceId = vertexSid; decls[1].array.stride = sizeof(MyVertex); decls[1].array.offset = offsetof(MyVertex, color); ranges[0].primType = SVGA3D_PRIMITIVE_TRIANGLELIST; ranges[0].primitiveCount = numTriangles; ranges[0].indexArray.surfaceId = indexSid; ranges[0].indexArray.stride = sizeof(uint16); ranges[0].indexWidth = sizeof(uint16); } SVGA_FIFOCommitAll(); }