/** * Free the CSO context. */ void cso_destroy_context( struct cso_context *ctx ) { unsigned i; if (ctx->pipe) { ctx->pipe->set_index_buffer(ctx->pipe, NULL); ctx->pipe->bind_blend_state( ctx->pipe, NULL ); ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL ); { static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL }; static void *zeros[PIPE_MAX_SAMPLERS] = { NULL }; struct pipe_screen *scr = ctx->pipe->screen; unsigned sh; for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) { int maxsam = scr->get_shader_param(scr, sh, PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS); int maxview = scr->get_shader_param(scr, sh, PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS); assert(maxsam <= PIPE_MAX_SAMPLERS); assert(maxview <= PIPE_MAX_SHADER_SAMPLER_VIEWS); if (maxsam > 0) { ctx->pipe->bind_sampler_states(ctx->pipe, sh, 0, maxsam, zeros); } if (maxview > 0) { ctx->pipe->set_sampler_views(ctx->pipe, sh, 0, maxview, views); } } } ctx->pipe->bind_depth_stencil_alpha_state( ctx->pipe, NULL ); ctx->pipe->bind_fs_state( ctx->pipe, NULL ); ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, NULL); ctx->pipe->bind_vs_state( ctx->pipe, NULL ); ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_VERTEX, 0, NULL); if (ctx->has_geometry_shader) { ctx->pipe->bind_gs_state(ctx->pipe, NULL); ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_GEOMETRY, 0, NULL); } if (ctx->has_tessellation) { ctx->pipe->bind_tcs_state(ctx->pipe, NULL); ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_CTRL, 0, NULL); ctx->pipe->bind_tes_state(ctx->pipe, NULL); ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_EVAL, 0, NULL); } ctx->pipe->bind_vertex_elements_state( ctx->pipe, NULL ); if (ctx->has_streamout) ctx->pipe->set_stream_output_targets(ctx->pipe, 0, NULL, NULL); } for (i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; i++) { pipe_sampler_view_reference(&ctx->fragment_views[i], NULL); pipe_sampler_view_reference(&ctx->fragment_views_saved[i], NULL); } util_unreference_framebuffer_state(&ctx->fb); util_unreference_framebuffer_state(&ctx->fb_saved); pipe_resource_reference(&ctx->aux_vertex_buffer_current.buffer, NULL); pipe_resource_reference(&ctx->aux_vertex_buffer_saved.buffer, NULL); for (i = 0; i < PIPE_SHADER_TYPES; i++) { pipe_resource_reference(&ctx->aux_constbuf_current[i].buffer, NULL); pipe_resource_reference(&ctx->aux_constbuf_saved[i].buffer, NULL); } for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) { pipe_so_target_reference(&ctx->so_targets[i], NULL); pipe_so_target_reference(&ctx->so_targets_saved[i], NULL); } if (ctx->cache) { cso_cache_delete( ctx->cache ); ctx->cache = NULL; } if (ctx->vbuf) u_vbuf_destroy(ctx->vbuf); FREE( ctx ); }
static boolean st_context_teximage(struct st_context_iface *stctxi, enum st_texture_type tex_type, int level, enum pipe_format pipe_format, struct pipe_resource *tex, boolean mipmap) { struct st_context *st = (struct st_context *) stctxi; struct gl_context *ctx = st->ctx; struct gl_texture_object *texObj; struct gl_texture_image *texImage; struct st_texture_object *stObj; struct st_texture_image *stImage; GLenum internalFormat; GLuint width, height, depth; GLenum target; switch (tex_type) { case ST_TEXTURE_1D: target = GL_TEXTURE_1D; break; case ST_TEXTURE_2D: target = GL_TEXTURE_2D; break; case ST_TEXTURE_3D: target = GL_TEXTURE_3D; break; case ST_TEXTURE_RECT: target = GL_TEXTURE_RECTANGLE_ARB; break; default: return FALSE; } texObj = _mesa_get_current_tex_object(ctx, target); _mesa_lock_texture(ctx, texObj); stObj = st_texture_object(texObj); /* switch to surface based */ if (!stObj->surface_based) { _mesa_clear_texture_object(ctx, texObj); stObj->surface_based = GL_TRUE; } texImage = _mesa_get_tex_image(ctx, texObj, target, level); stImage = st_texture_image(texImage); if (tex) { mesa_format texFormat = st_pipe_format_to_mesa_format(pipe_format); if (util_format_has_alpha(tex->format)) internalFormat = GL_RGBA; else internalFormat = GL_RGB; _mesa_init_teximage_fields(ctx, texImage, tex->width0, tex->height0, 1, 0, internalFormat, texFormat); width = tex->width0; height = tex->height0; depth = tex->depth0; /* grow the image size until we hit level = 0 */ while (level > 0) { if (width != 1) width <<= 1; if (height != 1) height <<= 1; if (depth != 1) depth <<= 1; level--; } } else { _mesa_clear_texture_image(ctx, texImage); width = height = depth = 0; } pipe_resource_reference(&stImage->pt, tex); stObj->width0 = width; stObj->height0 = height; stObj->depth0 = depth; stObj->surface_format = pipe_format; _mesa_dirty_texobj(ctx, texObj); _mesa_unlock_texture(ctx, texObj); return TRUE; }
/* Not required to implement u_resource_vtbl, consider moving to another file: */ struct pipe_surface* r300_get_tex_surface(struct pipe_screen* screen, struct pipe_resource* texture, unsigned face, unsigned level, unsigned zslice, unsigned flags) { struct r300_texture* tex = r300_texture(texture); struct r300_surface* surface = CALLOC_STRUCT(r300_surface); if (surface) { uint32_t offset, tile_height; pipe_reference_init(&surface->base.reference, 1); pipe_resource_reference(&surface->base.texture, texture); surface->base.format = texture->format; surface->base.width = u_minify(texture->width0, level); surface->base.height = u_minify(texture->height0, level); surface->base.usage = flags; surface->base.zslice = zslice; surface->base.face = face; surface->base.level = level; surface->buffer = tex->buffer; /* Prefer VRAM if there are multiple domains to choose from. */ surface->domain = tex->domain; if (surface->domain & R300_DOMAIN_VRAM) surface->domain &= ~R300_DOMAIN_GTT; surface->offset = r300_texture_get_offset(&tex->desc, level, zslice, face); surface->pitch = tex->fb_state.pitch[level]; surface->format = tex->fb_state.format; /* Parameters for the CBZB clear. */ surface->cbzb_allowed = tex->desc.cbzb_allowed[level]; surface->cbzb_width = align(surface->base.width, 64); /* Height must be aligned to the size of a tile. */ tile_height = r300_get_pixel_alignment(tex->desc.b.b.format, tex->desc.b.b.nr_samples, tex->desc.microtile, tex->desc.macrotile[level], DIM_HEIGHT, 0); surface->cbzb_height = align((surface->base.height + 1) / 2, tile_height); /* Offset must be aligned to 2K and must point at the beginning * of a scanline. */ offset = surface->offset + tex->desc.stride_in_bytes[level] * surface->cbzb_height; surface->cbzb_midpoint_offset = offset & ~2047; surface->cbzb_pitch = surface->pitch & 0x1ffffc; if (util_format_get_blocksizebits(surface->base.format) == 32) surface->cbzb_format = R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL; else surface->cbzb_format = R300_DEPTHFORMAT_16BIT_INT_Z; SCREEN_DBG(r300_screen(screen), DBG_CBZB, "CBZB Allowed: %s, Dim: %ix%i, Misalignment: %i, Micro: %s, Macro: %s\n", surface->cbzb_allowed ? "YES" : " NO", surface->cbzb_width, surface->cbzb_height, offset & 2047, tex->desc.microtile ? "YES" : " NO", tex->desc.macrotile[level] ? "YES" : " NO"); } return &surface->base; }
/* XXX: Still implementing this as if it was a screen function, but * can now modify it to queue transfers on the context. */ static struct pipe_transfer * svga_texture_get_transfer(struct pipe_context *pipe, struct pipe_resource *texture, unsigned level, unsigned usage, const struct pipe_box *box) { struct svga_context *svga = svga_context(pipe); struct svga_screen *ss = svga_screen(pipe->screen); struct svga_winsys_screen *sws = ss->sws; struct svga_transfer *st; unsigned nblocksx = util_format_get_nblocksx(texture->format, box->width); unsigned nblocksy = util_format_get_nblocksy(texture->format, box->height); /* We can't map texture storage directly */ if (usage & PIPE_TRANSFER_MAP_DIRECTLY) return NULL; assert(box->depth == 1); st = CALLOC_STRUCT(svga_transfer); if (!st) return NULL; pipe_resource_reference(&st->base.resource, texture); st->base.level = level; st->base.usage = usage; st->base.box = *box; st->base.stride = nblocksx*util_format_get_blocksize(texture->format); st->base.layer_stride = 0; st->hw_nblocksy = nblocksy; st->hwbuf = svga_winsys_buffer_create(svga, 1, 0, st->hw_nblocksy*st->base.stride); while(!st->hwbuf && (st->hw_nblocksy /= 2)) { st->hwbuf = svga_winsys_buffer_create(svga, 1, 0, st->hw_nblocksy*st->base.stride); } if(!st->hwbuf) goto no_hwbuf; if(st->hw_nblocksy < nblocksy) { /* We couldn't allocate a hardware buffer big enough for the transfer, * so allocate regular malloc memory instead */ if (0) { debug_printf("%s: failed to allocate %u KB of DMA, " "splitting into %u x %u KB DMA transfers\n", __FUNCTION__, (nblocksy*st->base.stride + 1023)/1024, (nblocksy + st->hw_nblocksy - 1)/st->hw_nblocksy, (st->hw_nblocksy*st->base.stride + 1023)/1024); } st->swbuf = MALLOC(nblocksy*st->base.stride); if(!st->swbuf) goto no_swbuf; } if (usage & PIPE_TRANSFER_READ) { SVGA3dSurfaceDMAFlags flags; memset(&flags, 0, sizeof flags); svga_transfer_dma(svga, st, SVGA3D_READ_HOST_VRAM, flags); } return &st->base; no_swbuf: sws->buffer_destroy(sws, st->hwbuf); no_hwbuf: FREE(st); return NULL; }
/** * Called during state validation when LP_NEW_SAMPLER_VIEW is set. */ void lp_setup_set_fragment_sampler_views(struct lp_setup_context *setup, unsigned num, struct pipe_sampler_view **views) { unsigned i; LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS); for (i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; i++) { struct pipe_sampler_view *view = i < num ? views[i] : NULL; if (view) { struct pipe_resource *res = view->texture; struct llvmpipe_resource *lp_tex = llvmpipe_resource(res); struct lp_jit_texture *jit_tex; jit_tex = &setup->fs.current.jit_context.textures[i]; /* We're referencing the texture's internal data, so save a * reference to it. */ pipe_resource_reference(&setup->fs.current_tex[i], res); if (!lp_tex->dt) { /* regular texture - setup array of mipmap level offsets */ int j; unsigned first_level = 0; unsigned last_level = 0; if (llvmpipe_resource_is_texture(res)) { first_level = view->u.tex.first_level; last_level = view->u.tex.last_level; assert(first_level <= last_level); assert(last_level <= res->last_level); jit_tex->base = lp_tex->tex_data; } else { jit_tex->base = lp_tex->data; } if (LP_PERF & PERF_TEX_MEM) { /* use dummy tile memory */ jit_tex->base = lp_dummy_tile; jit_tex->width = TILE_SIZE/8; jit_tex->height = TILE_SIZE/8; jit_tex->depth = 1; jit_tex->first_level = 0; jit_tex->last_level = 0; jit_tex->mip_offsets[0] = 0; jit_tex->row_stride[0] = 0; jit_tex->img_stride[0] = 0; } else { jit_tex->width = res->width0; jit_tex->height = res->height0; jit_tex->depth = res->depth0; jit_tex->first_level = first_level; jit_tex->last_level = last_level; if (llvmpipe_resource_is_texture(res)) { for (j = first_level; j <= last_level; j++) { jit_tex->mip_offsets[j] = lp_tex->mip_offsets[j]; jit_tex->row_stride[j] = lp_tex->row_stride[j]; jit_tex->img_stride[j] = lp_tex->img_stride[j]; } if (res->target == PIPE_TEXTURE_1D_ARRAY || res->target == PIPE_TEXTURE_2D_ARRAY || res->target == PIPE_TEXTURE_CUBE || res->target == PIPE_TEXTURE_CUBE_ARRAY) { /* * For array textures, we don't have first_layer, instead * adjust last_layer (stored as depth) plus the mip level offsets * (as we have mip-first layout can't just adjust base ptr). * XXX For mip levels, could do something similar. */ jit_tex->depth = view->u.tex.last_layer - view->u.tex.first_layer + 1; for (j = first_level; j <= last_level; j++) { jit_tex->mip_offsets[j] += view->u.tex.first_layer * lp_tex->img_stride[j]; } if (view->target == PIPE_TEXTURE_CUBE || view->target == PIPE_TEXTURE_CUBE_ARRAY) { assert(jit_tex->depth % 6 == 0); } assert(view->u.tex.first_layer <= view->u.tex.last_layer); assert(view->u.tex.last_layer < res->array_size); } } else { /* * For buffers, we don't have first_element, instead adjust * last_element (stored as width) plus the base pointer. */ unsigned view_blocksize = util_format_get_blocksize(view->format); /* probably don't really need to fill that out */ jit_tex->mip_offsets[0] = 0; jit_tex->row_stride[0] = 0; jit_tex->img_stride[0] = 0; /* everything specified in number of elements here. */ jit_tex->width = view->u.buf.last_element - view->u.buf.first_element + 1; jit_tex->base = (uint8_t *)jit_tex->base + view->u.buf.first_element * view_blocksize; /* XXX Unsure if we need to sanitize parameters? */ assert(view->u.buf.first_element <= view->u.buf.last_element); assert(view->u.buf.last_element * view_blocksize < res->width0); } } } else { /* display target texture/surface */ /* * XXX: Where should this be unmapped? */ struct llvmpipe_screen *screen = llvmpipe_screen(res->screen); struct sw_winsys *winsys = screen->winsys; jit_tex->base = winsys->displaytarget_map(winsys, lp_tex->dt, PIPE_TRANSFER_READ); jit_tex->row_stride[0] = lp_tex->row_stride[0]; jit_tex->img_stride[0] = lp_tex->img_stride[0]; jit_tex->mip_offsets[0] = 0; jit_tex->width = res->width0; jit_tex->height = res->height0; jit_tex->depth = res->depth0; jit_tex->first_level = jit_tex->last_level = 0; assert(jit_tex->base); } } } setup->dirty |= LP_SETUP_NEW_FS; }
static void svga_vbuf_render_draw_elements( struct vbuf_render *render, const ushort *indices, uint nr_indices) { struct svga_vbuf_render *svga_render = svga_vbuf_render(render); struct svga_context *svga = svga_render->svga; struct pipe_screen *screen = svga->pipe.screen; int bias = (svga_render->vbuf_offset - svga_render->vdecl_offset) / svga_render->vertex_size; boolean ret; size_t size = 2 * nr_indices; assert(( svga_render->vbuf_offset - svga_render->vdecl_offset) % svga_render->vertex_size == 0); if (svga_render->ibuf_size < svga_render->ibuf_offset + size) pipe_resource_reference(&svga_render->ibuf, NULL); if (!svga_render->ibuf) { svga_render->ibuf_size = MAX2(size, svga_render->ibuf_alloc_size); svga_render->ibuf = pipe_buffer_create(screen, PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STREAM, svga_render->ibuf_size); svga_render->ibuf_offset = 0; } pipe_buffer_write_nooverlap(&svga->pipe, svga_render->ibuf, svga_render->ibuf_offset, 2 * nr_indices, indices); /* off to hardware */ svga_vbuf_submit_state(svga_render); /* Need to call update_state() again as the draw module may have * altered some of our state behind our backs. Testcase: * redbook/polys.c */ svga_update_state_retry( svga, SVGA_STATE_HW_DRAW ); ret = svga_hwtnl_draw_range_elements(svga->hwtnl, svga_render->ibuf, 2, bias, svga_render->min_index, svga_render->max_index, svga_render->prim, svga_render->ibuf_offset / 2, nr_indices); if(ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = svga_hwtnl_draw_range_elements(svga->hwtnl, svga_render->ibuf, 2, bias, svga_render->min_index, svga_render->max_index, svga_render->prim, svga_render->ibuf_offset / 2, nr_indices); svga->swtnl.new_vbuf = TRUE; assert(ret == PIPE_OK); } svga_render->ibuf_offset += size; }
static void r300_draw_elements(struct r300_context *r300, const struct pipe_draw_info *info, int instance_id) { struct pipe_resource *indexBuffer = r300->index_buffer.buffer; unsigned indexSize = r300->index_buffer.index_size; struct pipe_resource* orgIndexBuffer = indexBuffer; unsigned start = info->start; unsigned count = info->count; boolean alt_num_verts = r300->screen->caps.is_r500 && count > 65536; unsigned short_count; int buffer_offset = 0, index_offset = 0; /* for index bias emulation */ uint16_t indices3[3]; if (info->index_bias && !r300->screen->caps.is_r500) { r300_split_index_bias(r300, info->index_bias, &buffer_offset, &index_offset); } r300_translate_index_buffer(r300, &r300->index_buffer, &indexBuffer, &indexSize, index_offset, &start, count); /* Fallback for misaligned ushort indices. */ if (indexSize == 2 && (start & 1) && indexBuffer) { /* If we got here, then orgIndexBuffer == indexBuffer. */ uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->cs_buf, r300->cs, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED); if (info->mode == PIPE_PRIM_TRIANGLES) { memcpy(indices3, ptr + start, 6); } else { /* Copy the mapped index buffer directly to the upload buffer. * The start index will be aligned simply from the fact that * every sub-buffer in the upload buffer is aligned. */ r300_upload_index_buffer(r300, &indexBuffer, indexSize, &start, count, (uint8_t*)ptr); } } else { if (r300->index_buffer.user_buffer) r300_upload_index_buffer(r300, &indexBuffer, indexSize, &start, count, r300->index_buffer.user_buffer); } /* 19 dwords for emit_draw_elements. Give up if the function fails. */ if (!r300_prepare_for_rendering(r300, PREP_EMIT_STATES | PREP_VALIDATE_VBOS | PREP_EMIT_VARRAYS | PREP_INDEXED, indexBuffer, 19, buffer_offset, info->index_bias, instance_id)) goto done; if (alt_num_verts || count <= 65535) { r300_emit_draw_elements(r300, indexBuffer, indexSize, info->max_index, info->mode, start, count, indices3); } else { do { /* The maximum must be divisible by 4 and 3, * so that quad and triangle lists are split correctly. * * Strips, loops, and fans won't work. */ short_count = MIN2(count, 65532); r300_emit_draw_elements(r300, indexBuffer, indexSize, info->max_index, info->mode, start, short_count, indices3); start += short_count; count -= short_count; /* 15 dwords for emit_draw_elements */ if (count) { if (!r300_prepare_for_rendering(r300, PREP_VALIDATE_VBOS | PREP_EMIT_VARRAYS | PREP_INDEXED, indexBuffer, 19, buffer_offset, info->index_bias, instance_id)) goto done; } } while (count); } done: if (indexBuffer != orgIndexBuffer) { pipe_resource_reference( &indexBuffer, NULL ); } }
VAStatus vlVaDeriveImage(VADriverContextP ctx, VASurfaceID surface, VAImage *image) { vlVaDriver *drv; vlVaSurface *surf; vlVaBuffer *img_buf; VAImage *img; struct pipe_surface **surfaces; int w; int h; int i; if (!ctx) return VA_STATUS_ERROR_INVALID_CONTEXT; drv = VL_VA_DRIVER(ctx); if (!drv) return VA_STATUS_ERROR_INVALID_CONTEXT; surf = handle_table_get(drv->htab, surface); if (!surf || !surf->buffer || surf->buffer->interlaced) return VA_STATUS_ERROR_INVALID_SURFACE; surfaces = surf->buffer->get_surfaces(surf->buffer); if (!surfaces || !surfaces[0]->texture) return VA_STATUS_ERROR_ALLOCATION_FAILED; img = CALLOC(1, sizeof(VAImage)); if (!img) return VA_STATUS_ERROR_ALLOCATION_FAILED; img->format.fourcc = PipeFormatToVaFourcc(surf->buffer->buffer_format); img->buf = VA_INVALID_ID; img->width = surf->buffer->width; img->height = surf->buffer->height; img->num_palette_entries = 0; img->entry_bytes = 0; w = align(surf->buffer->width, 2); h = align(surf->buffer->height, 2); for (i = 0; i < ARRAY_SIZE(formats); ++i) { if (img->format.fourcc == formats[i].fourcc) { img->format = formats[i]; break; } } switch (img->format.fourcc) { case VA_FOURCC('U','Y','V','Y'): case VA_FOURCC('Y','U','Y','V'): img->num_planes = 1; img->pitches[0] = w * 2; img->offsets[0] = 0; img->data_size = w * h * 2; break; case VA_FOURCC('B','G','R','A'): case VA_FOURCC('R','G','B','A'): case VA_FOURCC('B','G','R','X'): case VA_FOURCC('R','G','B','X'): img->num_planes = 1; img->pitches[0] = w * 4; img->offsets[0] = 0; img->data_size = w * h * 4; break; default: /* VaDeriveImage is designed for contiguous planes. */ FREE(img); return VA_STATUS_ERROR_INVALID_IMAGE_FORMAT; } img_buf = CALLOC(1, sizeof(vlVaBuffer)); if (!img_buf) { FREE(img); return VA_STATUS_ERROR_ALLOCATION_FAILED; } pipe_mutex_lock(drv->mutex); img->image_id = handle_table_add(drv->htab, img); img_buf->type = VAImageBufferType; img_buf->size = img->data_size; img_buf->num_elements = 1; pipe_resource_reference(&img_buf->derived_surface.resource, surfaces[0]->texture); img->buf = handle_table_add(VL_VA_DRIVER(ctx)->htab, img_buf); pipe_mutex_unlock(drv->mutex); *image = *img; return VA_STATUS_SUCCESS; }
/** * Free all the temporary data in a scene. */ void lp_scene_end_rasterization(struct lp_scene *scene ) { int i, j; /* Unmap color buffers */ for (i = 0; i < scene->fb.nr_cbufs; i++) { if (scene->cbufs[i].map) { struct pipe_surface *cbuf = scene->fb.cbufs[i]; if (llvmpipe_resource_is_texture(cbuf->texture)) { llvmpipe_resource_unmap(cbuf->texture, cbuf->u.tex.level, cbuf->u.tex.first_layer); } scene->cbufs[i].map = NULL; } } /* Unmap z/stencil buffer */ if (scene->zsbuf.map) { struct pipe_surface *zsbuf = scene->fb.zsbuf; llvmpipe_resource_unmap(zsbuf->texture, zsbuf->u.tex.level, zsbuf->u.tex.first_layer); scene->zsbuf.map = NULL; } /* Reset all command lists: */ for (i = 0; i < scene->tiles_x; i++) { for (j = 0; j < scene->tiles_y; j++) { struct cmd_bin *bin = lp_scene_get_bin(scene, i, j); bin->head = NULL; bin->tail = NULL; bin->last_state = NULL; } } /* If there are any bins which weren't cleared by the loop above, * they will be caught (on debug builds at least) by this assert: */ assert(lp_scene_is_empty(scene)); /* Decrement texture ref counts */ { struct resource_ref *ref; int i, j = 0; for (ref = scene->resources; ref; ref = ref->next) { for (i = 0; i < ref->count; i++) { if (LP_DEBUG & DEBUG_SETUP) debug_printf("resource %d: %p %dx%d sz %d\n", j, (void *) ref->resource[i], ref->resource[i]->width0, ref->resource[i]->height0, llvmpipe_resource_size(ref->resource[i])); j++; pipe_resource_reference(&ref->resource[i], NULL); } } if (LP_DEBUG & DEBUG_SETUP) debug_printf("scene %d resources, sz %d\n", j, scene->resource_reference_size); } /* Free all scene data blocks: */ { struct data_block_list *list = &scene->data; struct data_block *block, *tmp; for (block = list->head->next; block; block = tmp) { tmp = block->next; FREE(block); } list->head->next = NULL; list->head->used = 0; } lp_fence_reference(&scene->fence, NULL); scene->resources = NULL; scene->scene_size = 0; scene->resource_reference_size = 0; scene->has_depthstencil_clear = FALSE; scene->alloc_failed = FALSE; util_unreference_framebuffer_state( &scene->fb ); }
static void noop_surface_destroy(struct pipe_context *ctx, struct pipe_surface *surface) { pipe_resource_reference(&surface->texture, NULL); FREE(surface); }
static void noop_stream_output_target_destroy(struct pipe_context *ctx, struct pipe_stream_output_target *t) { pipe_resource_reference(&t->buffer, NULL); FREE(t); }
static void noop_sampler_view_destroy(struct pipe_context *ctx, struct pipe_sampler_view *state) { pipe_resource_reference(&state->texture, NULL); FREE(state); }
static void dri2_destroy_image(__DRIimage *img) { pipe_resource_reference(&img->texture, NULL); FREE(img); }
/** * Process __DRIbuffer and convert them into pipe_resources. */ static void dri2_drawable_process_buffers(struct dri_drawable *drawable, __DRIbuffer *buffers, unsigned count) { struct dri_screen *screen = dri_screen(drawable->sPriv); __DRIdrawable *dri_drawable = drawable->dPriv; struct pipe_resource templ; struct winsys_handle whandle; boolean have_depth = FALSE; unsigned i, bind; if (drawable->old_num == count && drawable->old_w == dri_drawable->w && drawable->old_h == dri_drawable->h && memcmp(drawable->old, buffers, sizeof(__DRIbuffer) * count) == 0) return; for (i = 0; i < ST_ATTACHMENT_COUNT; i++) pipe_resource_reference(&drawable->textures[i], NULL); memset(&templ, 0, sizeof(templ)); templ.target = screen->target; templ.last_level = 0; templ.width0 = dri_drawable->w; templ.height0 = dri_drawable->h; templ.depth0 = 1; templ.array_size = 1; memset(&whandle, 0, sizeof(whandle)); for (i = 0; i < count; i++) { __DRIbuffer *buf = &buffers[i]; enum st_attachment_type statt; enum pipe_format format; switch (buf->attachment) { case __DRI_BUFFER_FRONT_LEFT: if (!screen->auto_fake_front) { statt = ST_ATTACHMENT_INVALID; break; } /* fallthrough */ case __DRI_BUFFER_FAKE_FRONT_LEFT: statt = ST_ATTACHMENT_FRONT_LEFT; break; case __DRI_BUFFER_BACK_LEFT: statt = ST_ATTACHMENT_BACK_LEFT; break; case __DRI_BUFFER_DEPTH: case __DRI_BUFFER_DEPTH_STENCIL: case __DRI_BUFFER_STENCIL: /* use only the first depth/stencil buffer */ if (!have_depth) { have_depth = TRUE; statt = ST_ATTACHMENT_DEPTH_STENCIL; } else { statt = ST_ATTACHMENT_INVALID; } break; default: statt = ST_ATTACHMENT_INVALID; break; } dri_drawable_get_format(drawable, statt, &format, &bind); if (statt == ST_ATTACHMENT_INVALID || format == PIPE_FORMAT_NONE) continue; templ.format = format; templ.bind = bind; whandle.handle = buf->name; whandle.stride = buf->pitch; drawable->textures[statt] = screen->base.screen->resource_from_handle(screen->base.screen, &templ, &whandle); } drawable->old_num = count; drawable->old_w = dri_drawable->w; drawable->old_h = dri_drawable->h; memcpy(drawable->old, buffers, sizeof(__DRIbuffer) * count); }
void r600_draw_rectangle(struct blitter_context *blitter, int x1, int y1, int x2, int y2, float depth, enum blitter_attrib_type type, const union pipe_color_union *attrib) { struct r600_common_context *rctx = (struct r600_common_context*)util_blitter_get_pipe(blitter); struct pipe_viewport_state viewport; struct pipe_resource *buf = NULL; unsigned offset = 0; float *vb; if (type == UTIL_BLITTER_ATTRIB_TEXCOORD) { util_blitter_draw_rectangle(blitter, x1, y1, x2, y2, depth, type, attrib); return; } /* Some operations (like color resolve on r6xx) don't work * with the conventional primitive types. * One that works is PT_RECTLIST, which we use here. */ /* setup viewport */ viewport.scale[0] = 1.0f; viewport.scale[1] = 1.0f; viewport.scale[2] = 1.0f; viewport.translate[0] = 0.0f; viewport.translate[1] = 0.0f; viewport.translate[2] = 0.0f; rctx->b.set_viewport_states(&rctx->b, 0, 1, &viewport); /* Upload vertices. The hw rectangle has only 3 vertices, * I guess the 4th one is derived from the first 3. * The vertex specification should match u_blitter's vertex element state. */ u_upload_alloc(rctx->uploader, 0, sizeof(float) * 24, 256, &offset, &buf, (void**)&vb); if (!buf) return; vb[0] = x1; vb[1] = y1; vb[2] = depth; vb[3] = 1; vb[8] = x1; vb[9] = y2; vb[10] = depth; vb[11] = 1; vb[16] = x2; vb[17] = y1; vb[18] = depth; vb[19] = 1; if (attrib) { memcpy(vb+4, attrib->f, sizeof(float)*4); memcpy(vb+12, attrib->f, sizeof(float)*4); memcpy(vb+20, attrib->f, sizeof(float)*4); } /* draw */ util_draw_vertex_buffer(&rctx->b, NULL, buf, blitter->vb_slot, offset, R600_PRIM_RECTANGLE_LIST, 3, 2); pipe_resource_reference(&buf, NULL); }
static void r600_tex_surface_destroy(struct pipe_surface *surface) { pipe_resource_reference(&surface->texture, NULL); FREE(surface); }
PUBLIC Status XvMCCreateSubpicture(Display *dpy, XvMCContext *context, XvMCSubpicture *subpicture, unsigned short width, unsigned short height, int xvimage_id) { XvMCContextPrivate *context_priv; XvMCSubpicturePrivate *subpicture_priv; struct pipe_context *pipe; struct pipe_resource tex_templ, *tex; struct pipe_sampler_view sampler_templ; Status ret; XVMC_MSG(XVMC_TRACE, "[XvMC] Creating subpicture %p.\n", subpicture); assert(dpy); if (!context) return XvMCBadContext; context_priv = context->privData; pipe = context_priv->pipe; if (!subpicture) return XvMCBadSubpicture; if (width > context_priv->subpicture_max_width || height > context_priv->subpicture_max_height) return BadValue; ret = Validate(dpy, context->port, context->surface_type_id, xvimage_id); if (ret != Success) return ret; subpicture_priv = CALLOC(1, sizeof(XvMCSubpicturePrivate)); if (!subpicture_priv) return BadAlloc; memset(&tex_templ, 0, sizeof(tex_templ)); tex_templ.target = PIPE_TEXTURE_2D; tex_templ.format = XvIDToPipe(xvimage_id); tex_templ.last_level = 0; if (pipe->screen->get_video_param(pipe->screen, PIPE_VIDEO_PROFILE_UNKNOWN, PIPE_VIDEO_ENTRYPOINT_UNKNOWN, PIPE_VIDEO_CAP_NPOT_TEXTURES)) { tex_templ.width0 = width; tex_templ.height0 = height; } else { tex_templ.width0 = util_next_power_of_two(width); tex_templ.height0 = util_next_power_of_two(height); } tex_templ.depth0 = 1; tex_templ.array_size = 1; tex_templ.usage = PIPE_USAGE_DYNAMIC; tex_templ.bind = PIPE_BIND_SAMPLER_VIEW; tex_templ.flags = 0; tex = pipe->screen->resource_create(pipe->screen, &tex_templ); memset(&sampler_templ, 0, sizeof(sampler_templ)); u_sampler_view_default_template(&sampler_templ, tex, tex->format); subpicture_priv->sampler = pipe->create_sampler_view(pipe, tex, &sampler_templ); pipe_resource_reference(&tex, NULL); if (!subpicture_priv->sampler) { FREE(subpicture_priv); return BadAlloc; } subpicture_priv->context = context; subpicture->subpicture_id = XAllocID(dpy); subpicture->context_id = context->context_id; subpicture->xvimage_id = xvimage_id; subpicture->width = width; subpicture->height = height; subpicture->num_palette_entries = NumPaletteEntries4XvID(xvimage_id); subpicture->entry_bytes = PipeToComponentOrder(tex_templ.format, subpicture->component_order); subpicture->privData = subpicture_priv; if (subpicture->num_palette_entries > 0) { tex_templ.target = PIPE_TEXTURE_1D; tex_templ.format = PIPE_FORMAT_R8G8B8X8_UNORM; tex_templ.width0 = subpicture->num_palette_entries; tex_templ.height0 = 1; tex_templ.usage = PIPE_USAGE_DEFAULT; tex = pipe->screen->resource_create(pipe->screen, &tex_templ); memset(&sampler_templ, 0, sizeof(sampler_templ)); u_sampler_view_default_template(&sampler_templ, tex, tex->format); sampler_templ.swizzle_a = PIPE_SWIZZLE_ONE; subpicture_priv->palette = pipe->create_sampler_view(pipe, tex, &sampler_templ); pipe_resource_reference(&tex, NULL); if (!subpicture_priv->sampler) { FREE(subpicture_priv); return BadAlloc; } } SyncHandle(); XVMC_MSG(XVMC_TRACE, "[XvMC] Subpicture %p created.\n", subpicture); return Success; }
/** * Called during state validation when LP_NEW_SAMPLER_VIEW is set. */ void lp_setup_set_fragment_sampler_views(struct lp_setup_context *setup, unsigned num, struct pipe_sampler_view **views) { unsigned i; LP_DBG(DEBUG_SETUP, "%s\n", __FUNCTION__); assert(num <= PIPE_MAX_SAMPLERS); for (i = 0; i < PIPE_MAX_SAMPLERS; i++) { struct pipe_sampler_view *view = i < num ? views[i] : NULL; if (view) { struct pipe_resource *tex = view->texture; struct llvmpipe_resource *lp_tex = llvmpipe_resource(tex); struct lp_jit_texture *jit_tex; jit_tex = &setup->fs.current.jit_context.textures[i]; jit_tex->width = tex->width0; jit_tex->height = tex->height0; jit_tex->depth = tex->depth0; jit_tex->first_level = view->u.tex.first_level; jit_tex->last_level = tex->last_level; /* We're referencing the texture's internal data, so save a * reference to it. */ pipe_resource_reference(&setup->fs.current_tex[i], tex); if (!lp_tex->dt) { /* regular texture - setup array of mipmap level pointers */ int j; for (j = view->u.tex.first_level; j <= tex->last_level; j++) { jit_tex->data[j] = llvmpipe_get_texture_image_all(lp_tex, j, LP_TEX_USAGE_READ, LP_TEX_LAYOUT_LINEAR); jit_tex->row_stride[j] = lp_tex->row_stride[j]; jit_tex->img_stride[j] = lp_tex->img_stride[j]; if ((LP_PERF & PERF_TEX_MEM) || !jit_tex->data[j]) { /* out of memory - use dummy tile memory */ jit_tex->data[j] = lp_dummy_tile; jit_tex->width = TILE_SIZE/8; jit_tex->height = TILE_SIZE/8; jit_tex->depth = 1; jit_tex->first_level = 0; jit_tex->last_level = 0; jit_tex->row_stride[j] = 0; jit_tex->img_stride[j] = 0; } } } else { /* display target texture/surface */ /* * XXX: Where should this be unmapped? */ struct llvmpipe_screen *screen = llvmpipe_screen(tex->screen); struct sw_winsys *winsys = screen->winsys; jit_tex->data[0] = winsys->displaytarget_map(winsys, lp_tex->dt, PIPE_TRANSFER_READ); jit_tex->row_stride[0] = lp_tex->row_stride[0]; jit_tex->img_stride[0] = lp_tex->img_stride[0]; assert(jit_tex->data[0]); } } } setup->dirty |= LP_SETUP_NEW_FS; }
/** * Remove outdated textures and create the requested ones. */ static void stw_st_framebuffer_validate_locked(struct st_framebuffer_iface *stfb, unsigned width, unsigned height, unsigned mask) { struct stw_st_framebuffer *stwfb = stw_st_framebuffer(stfb); struct pipe_resource templ; unsigned i; /* remove outdated textures */ if (stwfb->texture_width != width || stwfb->texture_height != height) { for (i = 0; i < ST_ATTACHMENT_COUNT; i++) pipe_resource_reference(&stwfb->textures[i], NULL); } memset(&templ, 0, sizeof(templ)); templ.target = PIPE_TEXTURE_2D; templ.width0 = width; templ.height0 = height; templ.depth0 = 1; templ.array_size = 1; templ.last_level = 0; for (i = 0; i < ST_ATTACHMENT_COUNT; i++) { enum pipe_format format; unsigned bind; /* the texture already exists or not requested */ if (stwfb->textures[i] || !(mask & (1 << i))) { /* remember the texture */ if (stwfb->textures[i]) mask |= (1 << i); continue; } switch (i) { case ST_ATTACHMENT_FRONT_LEFT: case ST_ATTACHMENT_BACK_LEFT: format = stwfb->stvis.color_format; bind = PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_RENDER_TARGET; break; case ST_ATTACHMENT_DEPTH_STENCIL: format = stwfb->stvis.depth_stencil_format; bind = PIPE_BIND_DEPTH_STENCIL; break; default: format = PIPE_FORMAT_NONE; break; } if (format != PIPE_FORMAT_NONE) { templ.format = format; templ.bind = bind; stwfb->textures[i] = stw_dev->screen->resource_create(stw_dev->screen, &templ); } } stwfb->texture_width = width; stwfb->texture_height = height; stwfb->texture_mask = mask; }
HRESULT NineSwapChain9_Resize( struct NineSwapChain9 *This, D3DPRESENT_PARAMETERS *pParams, D3DDISPLAYMODEEX *mode ) { struct NineDevice9 *pDevice = This->base.device; D3DSURFACE_DESC desc; HRESULT hr; struct pipe_resource *resource, tmplt; enum pipe_format pf; BOOL has_present_buffers = FALSE; int depth; unsigned i, oldBufferCount, newBufferCount; D3DMULTISAMPLE_TYPE multisample_type; DBG("This=%p pParams=%p\n", This, pParams); user_assert(pParams != NULL, E_POINTER); user_assert(pParams->SwapEffect, D3DERR_INVALIDCALL); user_assert((pParams->SwapEffect != D3DSWAPEFFECT_COPY) || (pParams->BackBufferCount <= 1), D3DERR_INVALIDCALL); user_assert(pDevice->ex || pParams->BackBufferCount <= D3DPRESENT_BACK_BUFFERS_MAX, D3DERR_INVALIDCALL); user_assert(!pDevice->ex || pParams->BackBufferCount <= D3DPRESENT_BACK_BUFFERS_MAX_EX, D3DERR_INVALIDCALL); user_assert(pDevice->ex || (pParams->SwapEffect == D3DSWAPEFFECT_FLIP) || (pParams->SwapEffect == D3DSWAPEFFECT_COPY) || (pParams->SwapEffect == D3DSWAPEFFECT_DISCARD), D3DERR_INVALIDCALL); DBG("pParams(%p):\n" "BackBufferWidth: %u\n" "BackBufferHeight: %u\n" "BackBufferFormat: %s\n" "BackBufferCount: %u\n" "MultiSampleType: %u\n" "MultiSampleQuality: %u\n" "SwapEffect: %u\n" "hDeviceWindow: %p\n" "Windowed: %i\n" "EnableAutoDepthStencil: %i\n" "AutoDepthStencilFormat: %s\n" "Flags: %s\n" "FullScreen_RefreshRateInHz: %u\n" "PresentationInterval: %x\n", pParams, pParams->BackBufferWidth, pParams->BackBufferHeight, d3dformat_to_string(pParams->BackBufferFormat), pParams->BackBufferCount, pParams->MultiSampleType, pParams->MultiSampleQuality, pParams->SwapEffect, pParams->hDeviceWindow, pParams->Windowed, pParams->EnableAutoDepthStencil, d3dformat_to_string(pParams->AutoDepthStencilFormat), nine_D3DPRESENTFLAG_to_str(pParams->Flags), pParams->FullScreen_RefreshRateInHz, pParams->PresentationInterval); if (pParams->BackBufferCount == 0) { pParams->BackBufferCount = 1; } if (pParams->BackBufferFormat == D3DFMT_UNKNOWN) { pParams->BackBufferFormat = D3DFMT_A8R8G8B8; } This->desired_fences = This->actx->throttling ? This->actx->throttling_value + 1 : 0; /* +1 because we add the fence of the current buffer before popping an old one */ if (This->desired_fences > DRI_SWAP_FENCES_MAX) This->desired_fences = DRI_SWAP_FENCES_MAX; if (This->actx->vblank_mode == 0) pParams->PresentationInterval = D3DPRESENT_INTERVAL_IMMEDIATE; else if (This->actx->vblank_mode == 3) pParams->PresentationInterval = D3DPRESENT_INTERVAL_ONE; if (mode && This->mode) { *(This->mode) = *mode; } else if (mode) { This->mode = malloc(sizeof(D3DDISPLAYMODEEX)); memcpy(This->mode, mode, sizeof(D3DDISPLAYMODEEX)); } else { free(This->mode); This->mode = NULL; } /* Note: It is the role of the backend to fill if necessary * BackBufferWidth and BackBufferHeight */ hr = ID3DPresent_SetPresentParameters(This->present, pParams, This->mode); if (hr != D3D_OK) return hr; oldBufferCount = This->num_back_buffers; newBufferCount = NineSwapChain9_GetBackBufferCountForParams(This, pParams); multisample_type = pParams->MultiSampleType; /* Map MultiSampleQuality to MultiSampleType */ hr = d3dmultisample_type_check(This->screen, pParams->BackBufferFormat, &multisample_type, pParams->MultiSampleQuality, NULL); if (FAILED(hr)) { return hr; } pf = d3d9_to_pipe_format_checked(This->screen, pParams->BackBufferFormat, PIPE_TEXTURE_2D, multisample_type, PIPE_BIND_RENDER_TARGET, FALSE, FALSE); if (This->actx->linear_framebuffer || (pf != PIPE_FORMAT_B8G8R8X8_UNORM && pf != PIPE_FORMAT_B8G8R8A8_UNORM) || pParams->SwapEffect != D3DSWAPEFFECT_DISCARD || multisample_type >= 2 || (This->actx->ref && This->actx->ref == This->screen)) has_present_buffers = TRUE; /* Note: the buffer depth has to match the window depth. * In practice, ARGB buffers can be used with windows * of depth 24. Windows of depth 32 are extremely rare. * So even if the buffer is ARGB, say it is depth 24. * It is common practice, for example that's how * glamor implements depth 24. * TODO: handle windows with other depths. Not possible in the short term. * For example 16 bits.*/ depth = 24; memset(&tmplt, 0, sizeof(tmplt)); tmplt.target = PIPE_TEXTURE_2D; tmplt.width0 = pParams->BackBufferWidth; tmplt.height0 = pParams->BackBufferHeight; tmplt.depth0 = 1; tmplt.last_level = 0; tmplt.array_size = 1; tmplt.usage = PIPE_USAGE_DEFAULT; tmplt.flags = 0; desc.Type = D3DRTYPE_SURFACE; desc.Pool = D3DPOOL_DEFAULT; desc.MultiSampleType = pParams->MultiSampleType; desc.MultiSampleQuality = pParams->MultiSampleQuality; desc.Width = pParams->BackBufferWidth; desc.Height = pParams->BackBufferHeight; for (i = 0; i < oldBufferCount; i++) { if (This->tasks[i]) _mesa_threadpool_wait_for_task(This->pool, &(This->tasks[i])); } memset(This->tasks, 0, sizeof(This->tasks)); if (This->pool) { _mesa_threadpool_destroy(This, This->pool); This->pool = NULL; } This->enable_threadpool = This->actx->thread_submit && (pParams->SwapEffect != D3DSWAPEFFECT_COPY); if (This->enable_threadpool) This->pool = _mesa_threadpool_create(This); if (!This->pool) This->enable_threadpool = FALSE; for (i = 0; i < oldBufferCount; i++) { ID3DPresent_DestroyD3DWindowBuffer(This->present, This->present_handles[i]); This->present_handles[i] = NULL; if (This->present_buffers[i]) pipe_resource_reference(&(This->present_buffers[i]), NULL); } if (newBufferCount != oldBufferCount) { for (i = newBufferCount; i < oldBufferCount; ++i) NineUnknown_Detach(NineUnknown(This->buffers[i])); for (i = oldBufferCount; i < newBufferCount; ++i) { This->buffers[i] = NULL; This->present_handles[i] = NULL; } } This->num_back_buffers = newBufferCount; for (i = 0; i < newBufferCount; ++i) { tmplt.bind = PIPE_BIND_SAMPLER_VIEW | PIPE_BIND_RENDER_TARGET; tmplt.nr_samples = multisample_type; if (!has_present_buffers) tmplt.bind |= NINE_BIND_PRESENTBUFFER_FLAGS; tmplt.format = d3d9_to_pipe_format_checked(This->screen, pParams->BackBufferFormat, PIPE_TEXTURE_2D, tmplt.nr_samples, tmplt.bind, FALSE, FALSE); if (tmplt.format == PIPE_FORMAT_NONE) return D3DERR_INVALIDCALL; resource = This->screen->resource_create(This->screen, &tmplt); if (!resource) { DBG("Failed to create pipe_resource.\n"); return D3DERR_OUTOFVIDEOMEMORY; } if (pParams->Flags & D3DPRESENTFLAG_LOCKABLE_BACKBUFFER) resource->flags |= NINE_RESOURCE_FLAG_LOCKABLE; if (This->buffers[i]) { NineSurface9_SetMultiSampleType(This->buffers[i], desc.MultiSampleType); NineSurface9_SetResourceResize(This->buffers[i], resource); if (has_present_buffers) pipe_resource_reference(&resource, NULL); } else { desc.Format = pParams->BackBufferFormat; desc.Usage = D3DUSAGE_RENDERTARGET; hr = NineSurface9_new(pDevice, NineUnknown(This), resource, NULL, 0, 0, 0, &desc, &This->buffers[i]); if (has_present_buffers) pipe_resource_reference(&resource, NULL); if (FAILED(hr)) { DBG("Failed to create RT surface.\n"); return hr; } This->buffers[i]->base.base.forward = FALSE; } if (has_present_buffers) { tmplt.format = PIPE_FORMAT_B8G8R8X8_UNORM; tmplt.bind = NINE_BIND_PRESENTBUFFER_FLAGS; tmplt.nr_samples = 0; if (This->actx->linear_framebuffer) tmplt.bind |= PIPE_BIND_LINEAR; if (pParams->SwapEffect != D3DSWAPEFFECT_DISCARD) tmplt.bind |= PIPE_BIND_RENDER_TARGET; resource = This->screen->resource_create(This->screen, &tmplt); pipe_resource_reference(&(This->present_buffers[i]), resource); } This->present_handles[i] = D3DWindowBuffer_create(This, resource, depth, false); pipe_resource_reference(&resource, NULL); if (!This->present_handles[i]) { return D3DERR_DRIVERINTERNALERROR; } } if (pParams->EnableAutoDepthStencil) { tmplt.bind = d3d9_get_pipe_depth_format_bindings(pParams->AutoDepthStencilFormat); tmplt.nr_samples = multisample_type; tmplt.format = d3d9_to_pipe_format_checked(This->screen, pParams->AutoDepthStencilFormat, PIPE_TEXTURE_2D, tmplt.nr_samples, tmplt.bind, FALSE, FALSE); if (tmplt.format == PIPE_FORMAT_NONE) return D3DERR_INVALIDCALL; if (This->zsbuf) { resource = This->screen->resource_create(This->screen, &tmplt); if (!resource) { DBG("Failed to create pipe_resource for depth buffer.\n"); return D3DERR_OUTOFVIDEOMEMORY; } NineSurface9_SetMultiSampleType(This->zsbuf, desc.MultiSampleType); NineSurface9_SetResourceResize(This->zsbuf, resource); pipe_resource_reference(&resource, NULL); } else { hr = NineDevice9_CreateDepthStencilSurface(pDevice, pParams->BackBufferWidth, pParams->BackBufferHeight, pParams->AutoDepthStencilFormat, pParams->MultiSampleType, pParams->MultiSampleQuality, 0, (IDirect3DSurface9 **)&This->zsbuf, NULL); if (FAILED(hr)) { DBG("Failed to create ZS surface.\n"); return hr; } NineUnknown_ConvertRefToBind(NineUnknown(This->zsbuf)); } } This->params = *pParams; return D3D_OK; }
void galahad_resource_destroy(struct galahad_resource *glhd_resource) { pipe_resource_reference(&glhd_resource->resource, NULL); FREE(glhd_resource); }
HRESULT NINE_WINAPI NineSwapChain9_Present( struct NineSwapChain9 *This, const RECT *pSourceRect, const RECT *pDestRect, HWND hDestWindowOverride, const RGNDATA *pDirtyRegion, DWORD dwFlags ) { struct pipe_resource *res = NULL; D3DWindowBuffer *handle_temp; struct threadpool_task *task_temp; int i; HRESULT hr; DBG("This=%p pSourceRect=%p pDestRect=%p hDestWindowOverride=%p " "pDirtyRegion=%p dwFlags=%d\n", This, pSourceRect, pDestRect, hDestWindowOverride, pDirtyRegion,dwFlags); if (This->base.device->ex) { if (NineSwapChain9_GetOccluded(This)) { DBG("Present is occluded. Returning S_PRESENT_OCCLUDED.\n"); return S_PRESENT_OCCLUDED; } } else { if (NineSwapChain9_GetOccluded(This) || NineSwapChain9_ResolutionMismatch(This)) { This->base.device->device_needs_reset = TRUE; } if (This->base.device->device_needs_reset) { DBG("Device is lost. Returning D3DERR_DEVICELOST.\n"); return D3DERR_DEVICELOST; } } nine_csmt_process(This->base.device); hr = present(This, pSourceRect, pDestRect, hDestWindowOverride, pDirtyRegion, dwFlags); if (hr == D3DERR_WASSTILLDRAWING) return hr; if (This->base.device->minor_version_num > 2 && This->params.SwapEffect == D3DSWAPEFFECT_DISCARD && This->params.PresentationInterval == D3DPRESENT_INTERVAL_IMMEDIATE && !This->actx->thread_submit) { int next_buffer = -1; while (next_buffer == -1) { /* Find a free backbuffer */ for (i = 1; i < This->num_back_buffers; i++) { if (ID3DPresent_IsBufferReleased(This->present, This->present_handles[i])) { DBG("Found buffer released: %d\n", i); next_buffer = i; break; } } if (next_buffer == -1) { DBG("Found no buffer released. Waiting for event\n"); ID3DPresent_WaitBufferReleaseEvent(This->present); } } /* Switch with the released buffer */ pipe_resource_reference(&res, This->buffers[0]->base.resource); NineSurface9_SetResourceResize( This->buffers[0], This->buffers[next_buffer]->base.resource); NineSurface9_SetResourceResize( This->buffers[next_buffer], res); pipe_resource_reference(&res, NULL); if (This->present_buffers[0]) { pipe_resource_reference(&res, This->present_buffers[0]); pipe_resource_reference(&This->present_buffers[0], This->present_buffers[next_buffer]); pipe_resource_reference(&This->present_buffers[next_buffer], res); pipe_resource_reference(&res, NULL); } handle_temp = This->present_handles[0]; This->present_handles[0] = This->present_handles[next_buffer]; This->present_handles[next_buffer] = handle_temp; /* Path not yet compatible with thread_submit */ assert(!This->tasks[0] && !This->tasks[next_buffer]); } else { switch (This->params.SwapEffect) { case D3DSWAPEFFECT_OVERLAY: /* Not implemented, fallback to FLIP */ case D3DSWAPEFFECT_FLIPEX: /* Allows optimizations over FLIP for windowed mode. */ case D3DSWAPEFFECT_DISCARD: /* Allows optimizations over FLIP */ case D3DSWAPEFFECT_FLIP: /* rotate the queue */ pipe_resource_reference(&res, This->buffers[0]->base.resource); for (i = 1; i < This->num_back_buffers; i++) { NineSurface9_SetResourceResize(This->buffers[i - 1], This->buffers[i]->base.resource); } NineSurface9_SetResourceResize( This->buffers[This->num_back_buffers - 1], res); pipe_resource_reference(&res, NULL); if (This->present_buffers[0]) { pipe_resource_reference(&res, This->present_buffers[0]); for (i = 1; i < This->num_back_buffers; i++) pipe_resource_reference(&(This->present_buffers[i-1]), This->present_buffers[i]); pipe_resource_reference(&(This->present_buffers[This->num_back_buffers - 1]), res); pipe_resource_reference(&res, NULL); } handle_temp = This->present_handles[0]; for (i = 1; i < This->num_back_buffers; i++) { This->present_handles[i-1] = This->present_handles[i]; } This->present_handles[This->num_back_buffers - 1] = handle_temp; task_temp = This->tasks[0]; for (i = 1; i < This->num_back_buffers; i++) { This->tasks[i-1] = This->tasks[i]; } This->tasks[This->num_back_buffers - 1] = task_temp; break; case D3DSWAPEFFECT_COPY: /* do nothing */ break; } if (This->tasks[0]) _mesa_threadpool_wait_for_task(This->pool, &(This->tasks[0])); ID3DPresent_WaitBufferReleased(This->present, This->present_handles[0]); } This->base.device->context.changed.group |= NINE_STATE_FB; return hr; }
static boolean st_context_teximage(struct st_context_iface *stctxi, enum st_texture_type tex_type, int level, enum pipe_format internal_format, struct pipe_resource *tex, boolean mipmap) { struct st_context *st = (struct st_context *) stctxi; struct gl_context *ctx = st->ctx; struct gl_texture_unit *texUnit = _mesa_get_current_tex_unit(ctx); struct gl_texture_object *texObj; struct gl_texture_image *texImage; struct st_texture_object *stObj; struct st_texture_image *stImage; GLenum internalFormat; GLuint width, height, depth; GLenum target; switch (tex_type) { case ST_TEXTURE_1D: target = GL_TEXTURE_1D; break; case ST_TEXTURE_2D: target = GL_TEXTURE_2D; break; case ST_TEXTURE_3D: target = GL_TEXTURE_3D; break; case ST_TEXTURE_RECT: target = GL_TEXTURE_RECTANGLE_ARB; break; default: return FALSE; } texObj = _mesa_select_tex_object(ctx, texUnit, target); _mesa_lock_texture(ctx, texObj); stObj = st_texture_object(texObj); /* switch to surface based */ if (!stObj->surface_based) { _mesa_clear_texture_object(ctx, texObj); stObj->surface_based = GL_TRUE; } texImage = _mesa_get_tex_image(ctx, texObj, target, level); stImage = st_texture_image(texImage); if (tex) { gl_format texFormat; /* * XXX When internal_format and tex->format differ, st_finalize_texture * needs to allocate a new texture with internal_format and copy the * texture here into the new one. It will result in surface_copy being * called on surfaces whose formats differ. * * To avoid that, internal_format is (wrongly) ignored here. A sane fix * is to use a sampler view. */ if (!st_sampler_compat_formats(tex->format, internal_format)) internal_format = tex->format; if (util_format_get_component_bits(internal_format, UTIL_FORMAT_COLORSPACE_RGB, 3) > 0) internalFormat = GL_RGBA; else internalFormat = GL_RGB; texFormat = st_ChooseTextureFormat(ctx, target, internalFormat, GL_BGRA, GL_UNSIGNED_BYTE); _mesa_init_teximage_fields(ctx, texImage, tex->width0, tex->height0, 1, 0, internalFormat, texFormat); width = tex->width0; height = tex->height0; depth = tex->depth0; /* grow the image size until we hit level = 0 */ while (level > 0) { if (width != 1) width <<= 1; if (height != 1) height <<= 1; if (depth != 1) depth <<= 1; level--; } } else { _mesa_clear_texture_image(ctx, texImage); width = height = depth = 0; } pipe_resource_reference(&stImage->pt, tex); stObj->width0 = width; stObj->height0 = height; stObj->depth0 = depth; _mesa_dirty_texobj(ctx, texObj, GL_TRUE); _mesa_unlock_texture(ctx, texObj); return TRUE; }
/** * Insert a number of preliminary UPDATE_GB_IMAGE commands in the * command buffer, equal to the current number of mapped ranges. * The UPDATE_GB_IMAGE commands will be patched with the * actual ranges just before flush. */ static enum pipe_error svga_buffer_upload_gb_command(struct svga_context *svga, struct svga_buffer *sbuf) { struct svga_winsys_context *swc = svga->swc; SVGA3dCmdUpdateGBImage *update_cmd; struct svga_3d_update_gb_image *whole_update_cmd = NULL; uint32 numBoxes = sbuf->map.num_ranges; struct pipe_resource *dummy; unsigned int i; assert(numBoxes); assert(sbuf->dma.updates == NULL); if (sbuf->dma.flags.discard) { struct svga_3d_invalidate_gb_image *cicmd = NULL; SVGA3dCmdInvalidateGBImage *invalidate_cmd; const unsigned total_commands_size = sizeof(*invalidate_cmd) + numBoxes * sizeof(*whole_update_cmd); /* Allocate FIFO space for one INVALIDATE_GB_IMAGE command followed by * 'numBoxes' UPDATE_GB_IMAGE commands. Allocate all at once rather * than with separate commands because we need to properly deal with * filling the command buffer. */ invalidate_cmd = SVGA3D_FIFOReserve(swc, SVGA_3D_CMD_INVALIDATE_GB_IMAGE, total_commands_size, 1 + numBoxes); if (!invalidate_cmd) return PIPE_ERROR_OUT_OF_MEMORY; cicmd = container_of(invalidate_cmd, cicmd, body); cicmd->header.size = sizeof(*invalidate_cmd); swc->surface_relocation(swc, &invalidate_cmd->image.sid, NULL, sbuf->handle, (SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL | SVGA_RELOC_DMA)); invalidate_cmd->image.face = 0; invalidate_cmd->image.mipmap = 0; /* The whole_update_command is a SVGA3dCmdHeader plus the * SVGA3dCmdUpdateGBImage command. */ whole_update_cmd = (struct svga_3d_update_gb_image *) &invalidate_cmd[1]; /* initialize the first UPDATE_GB_IMAGE command */ whole_update_cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; update_cmd = &whole_update_cmd->body; } else { /* Allocate FIFO space for 'numBoxes' UPDATE_GB_IMAGE commands */ const unsigned total_commands_size = sizeof(*update_cmd) + (numBoxes - 1) * sizeof(*whole_update_cmd); update_cmd = SVGA3D_FIFOReserve(swc, SVGA_3D_CMD_UPDATE_GB_IMAGE, total_commands_size, numBoxes); if (!update_cmd) return PIPE_ERROR_OUT_OF_MEMORY; /* The whole_update_command is a SVGA3dCmdHeader plus the * SVGA3dCmdUpdateGBImage command. */ whole_update_cmd = container_of(update_cmd, whole_update_cmd, body); } /* Init the first UPDATE_GB_IMAGE command */ whole_update_cmd->header.size = sizeof(*update_cmd); swc->surface_relocation(swc, &update_cmd->image.sid, NULL, sbuf->handle, SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL); update_cmd->image.face = 0; update_cmd->image.mipmap = 0; /* Save pointer to the first UPDATE_GB_IMAGE command so that we can * fill in the box info below. */ sbuf->dma.updates = whole_update_cmd; /* * Copy the face, mipmap, etc. info to all subsequent commands. * Also do the surface relocation for each subsequent command. */ for (i = 1; i < numBoxes; ++i) { whole_update_cmd++; memcpy(whole_update_cmd, sbuf->dma.updates, sizeof(*whole_update_cmd)); swc->surface_relocation(swc, &whole_update_cmd->body.image.sid, NULL, sbuf->handle, SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL); } /* Increment reference count */ sbuf->dma.svga = svga; dummy = NULL; pipe_resource_reference(&dummy, &sbuf->b.b); SVGA_FIFOCommitAll(swc); sbuf->dma.flags.discard = FALSE; return PIPE_OK; }
/** * Validate a framebuffer to make sure up-to-date pipe_textures are used. * The context is only used for creating pipe surfaces and for calling * _mesa_resize_framebuffer(). * (That should probably be rethought, since those surfaces become * drawable state, not context state, and can be freed by another pipe * context). */ static void st_framebuffer_validate(struct st_framebuffer *stfb, struct st_context *st) { struct pipe_resource *textures[ST_ATTACHMENT_COUNT]; uint width, height; unsigned i; boolean changed = FALSE; int32_t new_stamp; /* Check for incomplete framebuffers (e.g. EGL_KHR_surfaceless_context) */ if (!stfb->iface) return; new_stamp = p_atomic_read(&stfb->iface->stamp); if (stfb->iface_stamp == new_stamp) return; /* validate the fb */ do { if (!stfb->iface->validate(&st->iface, stfb->iface, stfb->statts, stfb->num_statts, textures)) return; stfb->iface_stamp = new_stamp; new_stamp = p_atomic_read(&stfb->iface->stamp); } while(stfb->iface_stamp != new_stamp); width = stfb->Base.Width; height = stfb->Base.Height; for (i = 0; i < stfb->num_statts; i++) { struct st_renderbuffer *strb; struct pipe_surface *ps, surf_tmpl; gl_buffer_index idx; if (!textures[i]) continue; idx = attachment_to_buffer_index(stfb->statts[i]); if (idx >= BUFFER_COUNT) { pipe_resource_reference(&textures[i], NULL); continue; } strb = st_renderbuffer(stfb->Base.Attachment[idx].Renderbuffer); assert(strb); if (strb->texture == textures[i]) { pipe_resource_reference(&textures[i], NULL); continue; } u_surface_default_template(&surf_tmpl, textures[i]); ps = st->pipe->create_surface(st->pipe, textures[i], &surf_tmpl); if (ps) { pipe_surface_reference(&strb->surface, ps); pipe_resource_reference(&strb->texture, ps->texture); /* ownership transfered */ pipe_surface_reference(&ps, NULL); changed = TRUE; strb->Base.Width = strb->surface->width; strb->Base.Height = strb->surface->height; width = strb->Base.Width; height = strb->Base.Height; } pipe_resource_reference(&textures[i], NULL); } if (changed) { ++stfb->stamp; _mesa_resize_framebuffer(st->ctx, &stfb->Base, width, height); } }
/** * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank. */ static enum pipe_error svga_buffer_upload_command(struct svga_context *svga, struct svga_buffer *sbuf) { struct svga_winsys_context *swc = svga->swc; struct svga_winsys_buffer *guest = sbuf->hwbuf; struct svga_winsys_surface *host = sbuf->handle; SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM; SVGA3dCmdSurfaceDMA *cmd; uint32 numBoxes = sbuf->map.num_ranges; SVGA3dCopyBox *boxes; SVGA3dCmdSurfaceDMASuffix *pSuffix; unsigned region_flags; unsigned surface_flags; struct pipe_resource *dummy; if (svga_have_gb_objects(svga)) return svga_buffer_upload_gb_command(svga, sbuf); if (transfer == SVGA3D_WRITE_HOST_VRAM) { region_flags = SVGA_RELOC_READ; surface_flags = SVGA_RELOC_WRITE; } else if (transfer == SVGA3D_READ_HOST_VRAM) { region_flags = SVGA_RELOC_WRITE; surface_flags = SVGA_RELOC_READ; } else { assert(0); return PIPE_ERROR_BAD_INPUT; } assert(numBoxes); cmd = SVGA3D_FIFOReserve(swc, SVGA_3D_CMD_SURFACE_DMA, sizeof *cmd + numBoxes * sizeof *boxes + sizeof *pSuffix, 2); if (!cmd) return PIPE_ERROR_OUT_OF_MEMORY; swc->region_relocation(swc, &cmd->guest.ptr, guest, 0, region_flags); cmd->guest.pitch = 0; swc->surface_relocation(swc, &cmd->host.sid, NULL, host, surface_flags); cmd->host.face = 0; cmd->host.mipmap = 0; cmd->transfer = transfer; sbuf->dma.boxes = (SVGA3dCopyBox *)&cmd[1]; sbuf->dma.svga = svga; /* Increment reference count */ dummy = NULL; pipe_resource_reference(&dummy, &sbuf->b.b); pSuffix = (SVGA3dCmdSurfaceDMASuffix *)((uint8_t*)cmd + sizeof *cmd + numBoxes * sizeof *boxes); pSuffix->suffixSize = sizeof *pSuffix; pSuffix->maximumOffset = sbuf->b.b.width0; pSuffix->flags = sbuf->dma.flags; SVGA_FIFOCommitAll(swc); sbuf->dma.flags.discard = FALSE; return PIPE_OK; }
struct pipe_sampler_view * vl_idct_upload_matrix(struct pipe_context *pipe, float scale) { struct pipe_resource tex_templ, *matrix; struct pipe_sampler_view sv_templ, *sv; struct pipe_transfer *buf_transfer; unsigned i, j, pitch; float *f; struct pipe_box rect = { 0, 0, 0, VL_BLOCK_WIDTH / 4, VL_BLOCK_HEIGHT, 1 }; assert(pipe); memset(&tex_templ, 0, sizeof(tex_templ)); tex_templ.target = PIPE_TEXTURE_2D; tex_templ.format = PIPE_FORMAT_R32G32B32A32_FLOAT; tex_templ.last_level = 0; tex_templ.width0 = 2; tex_templ.height0 = 8; tex_templ.depth0 = 1; tex_templ.array_size = 1; tex_templ.usage = PIPE_USAGE_IMMUTABLE; tex_templ.bind = PIPE_BIND_SAMPLER_VIEW; tex_templ.flags = 0; matrix = pipe->screen->resource_create(pipe->screen, &tex_templ); if (!matrix) goto error_matrix; f = pipe->transfer_map(pipe, matrix, 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE, &rect, &buf_transfer); if (!f) goto error_map; pitch = buf_transfer->stride / sizeof(float); for(i = 0; i < VL_BLOCK_HEIGHT; ++i) for(j = 0; j < VL_BLOCK_WIDTH; ++j) // transpose and scale f[i * pitch + j] = ((const float (*)[8])const_matrix)[j][i] * scale; pipe->transfer_unmap(pipe, buf_transfer); memset(&sv_templ, 0, sizeof(sv_templ)); u_sampler_view_default_template(&sv_templ, matrix, matrix->format); sv = pipe->create_sampler_view(pipe, matrix, &sv_templ); pipe_resource_reference(&matrix, NULL); if (!sv) goto error_map; return sv; error_map: pipe_resource_reference(&matrix, NULL); error_matrix: return NULL; }
/** * Patch up the upload DMA command reserved by svga_buffer_upload_command * with the final ranges. */ void svga_buffer_upload_flush(struct svga_context *svga, struct svga_buffer *sbuf) { unsigned i; struct pipe_resource *dummy; if (!sbuf->dma.pending) { //debug_printf("no dma pending on buffer\n"); return; } assert(sbuf->handle); assert(sbuf->map.num_ranges); assert(sbuf->dma.svga == svga); /* * Patch the DMA/update command with the final copy box. */ if (svga_have_gb_objects(svga)) { struct svga_3d_update_gb_image *update = sbuf->dma.updates; assert(update); for (i = 0; i < sbuf->map.num_ranges; ++i, ++update) { SVGA3dBox *box = &update->body.box; SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n", sbuf->map.ranges[i].start, sbuf->map.ranges[i].end); box->x = sbuf->map.ranges[i].start; box->y = 0; box->z = 0; box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start; box->h = 1; box->d = 1; assert(box->x <= sbuf->b.b.width0); assert(box->x + box->w <= sbuf->b.b.width0); svga->hud.num_bytes_uploaded += box->w; } } else { assert(sbuf->hwbuf); assert(sbuf->dma.boxes); SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle); for (i = 0; i < sbuf->map.num_ranges; ++i) { SVGA3dCopyBox *box = sbuf->dma.boxes + i; SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n", sbuf->map.ranges[i].start, sbuf->map.ranges[i].end); box->x = sbuf->map.ranges[i].start; box->y = 0; box->z = 0; box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start; box->h = 1; box->d = 1; box->srcx = sbuf->map.ranges[i].start; box->srcy = 0; box->srcz = 0; assert(box->x <= sbuf->b.b.width0); assert(box->x + box->w <= sbuf->b.b.width0); svga->hud.num_bytes_uploaded += box->w; } } /* Reset sbuf for next use/upload */ sbuf->map.num_ranges = 0; assert(sbuf->head.prev && sbuf->head.next); LIST_DEL(&sbuf->head); /* remove from svga->dirty_buffers list */ #ifdef DEBUG sbuf->head.next = sbuf->head.prev = NULL; #endif sbuf->dma.pending = FALSE; sbuf->dma.flags.discard = FALSE; sbuf->dma.flags.unsynchronized = FALSE; sbuf->dma.svga = NULL; sbuf->dma.boxes = NULL; sbuf->dma.updates = NULL; /* Decrement reference count (and potentially destroy) */ dummy = &sbuf->b.b; pipe_resource_reference(&dummy, NULL); }
/* Not required to implement u_resource_vtbl, consider moving to another file: */ void r300_tex_surface_destroy(struct pipe_surface* s) { pipe_resource_reference(&s->texture, NULL); FREE(s); }
VAStatus vlVaPutSurface(VADriverContextP ctx, VASurfaceID surface_id, void* draw, short srcx, short srcy, unsigned short srcw, unsigned short srch, short destx, short desty, unsigned short destw, unsigned short desth, VARectangle *cliprects, unsigned int number_cliprects, unsigned int flags) { vlVaDriver *drv; vlVaSurface *surf; struct pipe_screen *screen; struct pipe_resource *tex; struct pipe_surface surf_templ, *surf_draw; struct vl_screen *vscreen; struct u_rect src_rect, *dirty_area; struct u_rect dst_rect = {destx, destx + destw, desty, desty + desth}; VAStatus status; if (!ctx) return VA_STATUS_ERROR_INVALID_CONTEXT; drv = VL_VA_DRIVER(ctx); pipe_mutex_lock(drv->mutex); surf = handle_table_get(drv->htab, surface_id); if (!surf) { pipe_mutex_unlock(drv->mutex); return VA_STATUS_ERROR_INVALID_SURFACE; } screen = drv->pipe->screen; vscreen = drv->vscreen; tex = vscreen->texture_from_drawable(vscreen, draw); if (!tex) { pipe_mutex_unlock(drv->mutex); return VA_STATUS_ERROR_INVALID_DISPLAY; } dirty_area = vscreen->get_dirty_area(vscreen); memset(&surf_templ, 0, sizeof(surf_templ)); surf_templ.format = tex->format; surf_draw = drv->pipe->create_surface(drv->pipe, tex, &surf_templ); if (!surf_draw) { pipe_resource_reference(&tex, NULL); pipe_mutex_unlock(drv->mutex); return VA_STATUS_ERROR_INVALID_DISPLAY; } src_rect.x0 = srcx; src_rect.y0 = srcy; src_rect.x1 = srcw + srcx; src_rect.y1 = srch + srcy; vl_compositor_clear_layers(&drv->cstate); vl_compositor_set_buffer_layer(&drv->cstate, &drv->compositor, 0, surf->buffer, &src_rect, NULL, VL_COMPOSITOR_WEAVE); vl_compositor_set_layer_dst_area(&drv->cstate, 0, &dst_rect); vl_compositor_render(&drv->cstate, &drv->compositor, surf_draw, dirty_area, true); status = vlVaPutSubpictures(surf, drv, surf_draw, dirty_area, &src_rect, &dst_rect); if (status) { pipe_mutex_unlock(drv->mutex); return status; } screen->flush_frontbuffer(screen, tex, 0, 0, vscreen->get_private(vscreen), NULL); drv->pipe->flush(drv->pipe, NULL, 0); pipe_resource_reference(&tex, NULL); pipe_surface_reference(&surf_draw, NULL); pipe_mutex_unlock(drv->mutex); return VA_STATUS_SUCCESS; }