static void vmw_swc_destroy(struct svga_winsys_context *swc) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); unsigned i; for(i = 0; i < vswc->surface.used; ++i) { struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i]; if (isurf->referenced) p_atomic_dec(&isurf->vsurf->validated); vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL); } for(i = 0; i < vswc->shader.used; ++i) { struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i]; if (ishader->referenced) p_atomic_dec(&ishader->vshader->validated); vmw_svga_winsys_shader_reference(&ishader->vshader, NULL); } util_hash_table_destroy(vswc->hash); pb_validate_destroy(vswc->validate); vmw_ioctl_context_destroy(vswc->vws, swc->cid); #ifdef DEBUG debug_flush_ctx_destroy(vswc->fctx); #endif FREE(vswc); }
static void vmw_dri1_present_locked(struct pipe_context *locked_pipe, struct pipe_surface *surf, const struct drm_clip_rect *rect, unsigned int num_clip, int x_draw, int y_draw, const struct drm_clip_rect *bbox, struct pipe_fence_handle **p_fence) { struct svga_winsys_surface *srf = svga_screen_texture_get_winsys_surface(surf->texture); struct vmw_svga_winsys_surface *vsrf = vmw_svga_winsys_surface(srf); struct vmw_winsys_screen *vws = vmw_winsys_screen(svga_winsys_screen(locked_pipe->screen)); struct drm_clip_rect clip; int i; struct { SVGA3dCmdHeader header; SVGA3dCmdPresent body; SVGA3dCopyRect rect; } cmd; boolean visible = FALSE; uint32_t fence_seq = 0; VMW_FUNC; cmd.header.id = SVGA_3D_CMD_PRESENT; cmd.header.size = sizeof cmd.body + sizeof cmd.rect; cmd.body.sid = vsrf->sid; for (i = 0; i < num_clip; ++i) { if (!vmw_dri1_intersect_src_bbox(&clip, x_draw, y_draw, rect++, bbox)) continue; cmd.rect.x = clip.x1; cmd.rect.y = clip.y1; cmd.rect.w = clip.x2 - clip.x1; cmd.rect.h = clip.y2 - clip.y1; cmd.rect.srcx = (int)clip.x1 - x_draw; cmd.rect.srcy = (int)clip.y1 - y_draw; vmw_printf("%s: Clip %d x %d y %d w %d h %d srcx %d srcy %d\n", __FUNCTION__, i, cmd.rect.x, cmd.rect.y, cmd.rect.w, cmd.rect.h, cmd.rect.srcx, cmd.rect.srcy); vmw_ioctl_command(vws, &cmd, sizeof cmd.header + cmd.header.size, &fence_seq); visible = TRUE; } *p_fence = (visible) ? vmw_pipe_fence(fence_seq) : NULL; vmw_svga_winsys_surface_reference(&vsrf, NULL); }
static void vmw_svga_winsys_surface_ref(struct svga_winsys_screen *sws, struct svga_winsys_surface **pDst, struct svga_winsys_surface *src) { struct vmw_svga_winsys_surface *d_vsurf = vmw_svga_winsys_surface(*pDst); struct vmw_svga_winsys_surface *s_vsurf = vmw_svga_winsys_surface(src); vmw_svga_winsys_surface_reference(&d_vsurf, s_vsurf); *pDst = svga_winsys_surface(d_vsurf); }
static boolean vmw_drm_handle_from_buffer(struct drm_api *drm_api, struct pipe_screen *screen, struct pipe_buffer *buffer, unsigned *handle) { struct svga_winsys_surface *surface = svga_screen_buffer_get_winsys_surface(buffer); struct vmw_svga_winsys_surface *vsrf; if (!surface) return FALSE; vsrf = vmw_svga_winsys_surface(surface); *handle = vsrf->sid; vmw_svga_winsys_surface_reference(&vsrf, NULL); return TRUE; }
static void vmw_swc_surface_only_relocation(struct svga_winsys_context *swc, uint32 *where, struct vmw_svga_winsys_surface *vsurf, unsigned flags) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); struct vmw_ctx_validate_item *isrf; assert(vswc->surface.staged < vswc->surface.reserved); isrf = util_hash_table_get(vswc->hash, vsurf); if (isrf == NULL) { isrf = &vswc->surface.items[vswc->surface.used + vswc->surface.staged]; vmw_svga_winsys_surface_reference(&isrf->vsurf, vsurf); isrf->referenced = FALSE; /* * Note that a failure here may just fall back to unhashed behavior * and potentially cause unnecessary flushing, so ignore the * return code. */ (void) util_hash_table_set(vswc->hash, vsurf, isrf); ++vswc->surface.staged; vswc->seen_surfaces += vsurf->size; if ((swc->hints & SVGA_HINT_FLAG_CAN_PRE_FLUSH) && vswc->seen_surfaces >= vswc->vws->ioctl.max_surface_memory / VMW_MAX_SURF_MEM_FACTOR) vswc->preemptive_flush = TRUE; } if (!(flags & SVGA_RELOC_INTERNAL) && !isrf->referenced) { isrf->referenced = TRUE; p_atomic_inc(&vsurf->validated); } if (where) *where = vsurf->sid; }
static enum pipe_error vmw_swc_flush(struct svga_winsys_context *swc, struct pipe_fence_handle **pfence) { struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc); struct pipe_fence_handle *fence = NULL; unsigned i; enum pipe_error ret; ret = pb_validate_validate(vswc->validate); assert(ret == PIPE_OK); if(ret == PIPE_OK) { /* Apply relocations */ for(i = 0; i < vswc->region.used; ++i) { struct vmw_buffer_relocation *reloc = &vswc->region.relocs[i]; struct SVGAGuestPtr ptr; if(!vmw_gmr_bufmgr_region_ptr(reloc->buffer, &ptr)) assert(0); ptr.offset += reloc->offset; if (reloc->is_mob) { if (reloc->mob.id) *reloc->mob.id = ptr.gmrId; if (reloc->mob.offset_into_mob) *reloc->mob.offset_into_mob = ptr.offset; else { assert(ptr.offset == 0); } } else *reloc->region.where = ptr; } if (vswc->command.used || pfence != NULL) vmw_ioctl_command(vswc->vws, vswc->base.cid, 0, vswc->command.buffer, vswc->command.used, &fence); pb_validate_fence(vswc->validate, fence); } vswc->command.used = 0; vswc->command.reserved = 0; for(i = 0; i < vswc->surface.used + vswc->surface.staged; ++i) { struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i]; if (isurf->referenced) p_atomic_dec(&isurf->vsurf->validated); vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL); } util_hash_table_clear(vswc->hash); vswc->surface.used = 0; vswc->surface.reserved = 0; for(i = 0; i < vswc->shader.used + vswc->shader.staged; ++i) { struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i]; if (ishader->referenced) p_atomic_dec(&ishader->vshader->validated); vmw_svga_winsys_shader_reference(&ishader->vshader, NULL); } vswc->shader.used = 0; vswc->shader.reserved = 0; vswc->region.used = 0; vswc->region.reserved = 0; #ifdef DEBUG vswc->must_flush = FALSE; debug_flush_flush(vswc->fctx); #endif vswc->preemptive_flush = FALSE; vswc->seen_surfaces = 0; vswc->seen_regions = 0; vswc->seen_mobs = 0; if(pfence) vmw_fence_reference(vswc->vws, pfence, fence); vmw_fence_reference(vswc->vws, &fence, NULL); return ret; }
static struct pipe_buffer * vmw_drm_buffer_from_handle(struct drm_api *drm_api, struct pipe_screen *screen, const char *name, unsigned handle) { struct vmw_svga_winsys_surface *vsrf; struct svga_winsys_surface *ssrf; struct vmw_winsys_screen *vws = vmw_winsys_screen(svga_winsys_screen(screen)); struct pipe_buffer *buf; union drm_vmw_surface_reference_arg arg; struct drm_vmw_surface_arg *req = &arg.req; struct drm_vmw_surface_create_req *rep = &arg.rep; int ret; int i; /** * The vmware device specific handle is the hardware SID. * FIXME: We probably want to move this to the ioctl implementations. */ memset(&arg, 0, sizeof(arg)); req->sid = handle; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_REF_SURFACE, &arg, sizeof(arg)); if (ret) { fprintf(stderr, "Failed referencing shared surface. SID %d.\n" "Error %d (%s).\n", handle, ret, strerror(-ret)); return NULL; } if (rep->mip_levels[0] != 1) { fprintf(stderr, "Incorrect number of mipmap levels on shared surface." " SID %d, levels %d\n", handle, rep->mip_levels[0]); goto out_mip; } for (i=1; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { if (rep->mip_levels[i] != 0) { fprintf(stderr, "Incorrect number of faces levels on shared surface." " SID %d, face %d present.\n", handle, i); goto out_mip; } } vsrf = CALLOC_STRUCT(vmw_svga_winsys_surface); if (!vsrf) goto out_mip; pipe_reference_init(&vsrf->refcnt, 1); p_atomic_set(&vsrf->validated, 0); vsrf->sid = handle; ssrf = svga_winsys_surface(vsrf); buf = svga_screen_buffer_wrap_surface(screen, rep->format, ssrf); if (!buf) vmw_svga_winsys_surface_reference(&vsrf, NULL); return buf; out_mip: vmw_ioctl_surface_destroy(vws, handle); return NULL; }