static void i915_surface_copy_render(struct pipe_context *pipe, struct pipe_resource *dst, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src, unsigned src_level, const struct pipe_box *src_box) { struct i915_context *i915 = i915_context(pipe); /* Fallback for buffers. */ if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); return; } if (!util_blitter_is_copy_supported(i915->blitter, dst, src, PIPE_MASK_RGBAZS)) { util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); return; } i915_util_blitter_save_states(i915); util_blitter_copy_texture(i915->blitter, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box, PIPE_MASK_RGBAZS, TRUE); }
/** * Copy a block of pixels from one resource to another. * The resource must be of the same format. * Resources with nr_samples > 1 are not allowed. */ static void fd_resource_copy_region(struct pipe_context *pctx, struct pipe_resource *dst, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src, unsigned src_level, const struct pipe_box *src_box) { struct fd_context *ctx = fd_context(pctx); /* TODO if we have 2d core, or other DMA engine that could be used * for simple copies and reasonably easily synchronized with the 3d * core, this is where we'd plug it in.. */ /* try blit on 3d pipe: */ if (fd_blitter_pipe_copy_region(ctx, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box)) return; /* else fallback to pure sw: */ util_resource_copy_region(pctx, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); }
void nouveau_copy_buffer(struct nouveau_context *nv, struct nv04_resource *dst, unsigned dstx, struct nv04_resource *src, unsigned srcx, unsigned size) { assert(dst->base.target == PIPE_BUFFER && src->base.target == PIPE_BUFFER); if (likely(dst->domain) && likely(src->domain)) { nv->copy_data(nv, dst->bo, dst->offset + dstx, dst->domain, src->bo, src->offset + srcx, src->domain, size); dst->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING; nouveau_fence_ref(nv->screen->fence.current, &dst->fence); nouveau_fence_ref(nv->screen->fence.current, &dst->fence_wr); src->status |= NOUVEAU_BUFFER_STATUS_GPU_READING; nouveau_fence_ref(nv->screen->fence.current, &src->fence); } else { struct pipe_box src_box; src_box.x = srcx; src_box.y = 0; src_box.z = 0; src_box.width = size; src_box.height = 1; src_box.depth = 1; util_resource_copy_region(&nv->pipe, &dst->base, 0, dstx, 0, 0, &src->base, 0, &src_box); } }
static void swr_resource_copy(struct pipe_context *pipe, struct pipe_resource *dst, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src, unsigned src_level, const struct pipe_box *src_box) { struct swr_screen *screen = swr_screen(pipe->screen); /* If either the src or dst is a renderTarget, store tiles before copy */ swr_store_dirty_resource(pipe, src, SWR_TILE_RESOLVED); swr_store_dirty_resource(pipe, dst, SWR_TILE_RESOLVED); swr_fence_finish(pipe->screen, NULL, screen->flush_fence, 0); swr_resource_unused(src); swr_resource_unused(dst); if ((dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) || (dst->target != PIPE_BUFFER && src->target != PIPE_BUFFER)) { util_resource_copy_region( pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); return; } debug_printf("unhandled swr_resource_copy\n"); }
static void r600_copy_buffer(struct pipe_context *ctx, struct pipe_resource *dst, unsigned dstx, struct pipe_resource *src, const struct pipe_box *src_box) { struct r600_context *rctx = (struct r600_context*)ctx; if (rctx->screen->b.has_cp_dma) { r600_cp_dma_copy_buffer(rctx, dst, dstx, src, src_box->x, src_box->width); } else if (rctx->screen->b.has_streamout && /* Require 4-byte alignment. */ dstx % 4 == 0 && src_box->x % 4 == 0 && src_box->width % 4 == 0) { r600_blitter_begin(ctx, R600_COPY_BUFFER); util_blitter_copy_buffer(rctx->blitter, dst, dstx, src, src_box->x, src_box->width); r600_blitter_end(ctx); } else { util_resource_copy_region(ctx, dst, 0, dstx, 0, 0, src, 0, src_box); } /* The index buffer (VGT) doesn't seem to see the result of the copying. * Can we somehow flush the index buffer cache? Starting a new IB seems * to do the trick. */ if (rctx->b.chip_class <= R700) rctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL); }
void r600_copy_buffer(struct pipe_context *ctx, struct pipe_resource *dst, unsigned dstx, struct pipe_resource *src, const struct pipe_box *src_box) { struct r600_context *rctx = (struct r600_context*)ctx; if (rctx->screen->has_cp_dma) { r600_cp_dma_copy_buffer(rctx, dst, dstx, src, src_box->x, src_box->width); } else if (rctx->screen->has_streamout && /* Require 4-byte alignment. */ dstx % 4 == 0 && src_box->x % 4 == 0 && src_box->width % 4 == 0) { /* Flush both resources. */ r600_flag_resource_cache_flush(rctx, src); r600_flag_resource_cache_flush(rctx, dst); r600_blitter_begin(ctx, R600_COPY_BUFFER); util_blitter_copy_buffer(rctx->blitter, dst, dstx, src, src_box->x, src_box->width); r600_blitter_end(ctx); /* Flush the dst in case the 3D engine has been prefetching the resource. */ r600_flag_resource_cache_flush(rctx, dst); } else { util_resource_copy_region(ctx, dst, 0, dstx, 0, 0, src, 0, src_box); } }
static void r600_resource_copy_region(struct pipe_context *ctx, struct pipe_resource *dst, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src, unsigned src_level, const struct pipe_box *src_box) { struct r600_resource_texture *rsrc = (struct r600_resource_texture*)src; struct texture_orig_info orig_info[2]; struct pipe_box sbox; const struct pipe_box *psbox; boolean restore_orig[2]; memset(orig_info, 0, sizeof(orig_info)); /* Fallback for buffers. */ if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { util_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); return; } if (rsrc->depth && !rsrc->is_flushing_texture) r600_texture_depth_flush(ctx, src, FALSE); restore_orig[0] = restore_orig[1] = FALSE; if (util_format_is_compressed(src->format)) { r600_compressed_to_blittable(src, src_level, &orig_info[0]); restore_orig[0] = TRUE; sbox.x = util_format_get_nblocksx(orig_info[0].format, src_box->x); sbox.y = util_format_get_nblocksy(orig_info[0].format, src_box->y); sbox.z = src_box->z; sbox.width = util_format_get_nblocksx(orig_info[0].format, src_box->width); sbox.height = util_format_get_nblocksy(orig_info[0].format, src_box->height); sbox.depth = src_box->depth; psbox=&sbox; } else psbox=src_box; if (util_format_is_compressed(dst->format)) { r600_compressed_to_blittable(dst, dst_level, &orig_info[1]); restore_orig[1] = TRUE; /* translate the dst box as well */ dstx = util_format_get_nblocksx(orig_info[1].format, dstx); dsty = util_format_get_nblocksy(orig_info[1].format, dsty); } r600_hw_copy_region(ctx, dst, dst_level, dstx, dsty, dstz, src, src_level, psbox); if (restore_orig[0]) r600_reset_blittable_to_compressed(src, src_level, &orig_info[0]); if (restore_orig[1]) r600_reset_blittable_to_compressed(dst, dst_level, &orig_info[1]); }
static void i915_surface_copy_render(struct pipe_context *pipe, struct pipe_resource *dst, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src, unsigned src_level, const struct pipe_box *src_box) { struct i915_context *i915 = i915_context(pipe); unsigned src_width0 = src->width0; unsigned src_height0 = src->height0; unsigned dst_width0 = dst->width0; unsigned dst_height0 = dst->height0; struct pipe_box dstbox; struct pipe_sampler_view src_templ, *src_view; struct pipe_surface dst_templ, *dst_view; const struct util_format_description *desc; /* Fallback for buffers. */ if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) goto fallback; /* Fallback for depth&stencil. XXX: see if we can use a proxy format */ desc = util_format_description(src->format); if (desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS) goto fallback; desc = util_format_description(dst->format); if (desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS) goto fallback; util_blitter_default_dst_texture(&dst_templ, dst, dst_level, dstz); util_blitter_default_src_texture(i915->blitter, &src_templ, src, src_level); if (!util_blitter_is_copy_supported(i915->blitter, dst, src)) goto fallback; i915_util_blitter_save_states(i915); dst_view = i915_create_surface_custom(pipe, dst, &dst_templ, dst_width0, dst_height0); src_view = i915_create_sampler_view_custom(pipe, src, &src_templ, src_width0, src_height0); u_box_3d(dstx, dsty, dstz, abs(src_box->width), abs(src_box->height), abs(src_box->depth), &dstbox); util_blitter_blit_generic(i915->blitter, dst_view, &dstbox, src_view, src_box, src_width0, src_height0, PIPE_MASK_RGBAZS, PIPE_TEX_FILTER_NEAREST, NULL, FALSE); return; fallback: util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); }
/** * Fallback to the copy region utility which uses map/memcpy for the copy */ static void copy_region_fallback(struct svga_context *svga, struct pipe_resource *dst_tex, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src_tex, unsigned src_level, const struct pipe_box *src_box) { struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws; SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_COPYREGIONFALLBACK); util_resource_copy_region(&svga->pipe, dst_tex, dst_level, dstx, dsty, dstz, src_tex, src_level, src_box); SVGA_STATS_TIME_POP(sws); (void) sws; }
void util_blitter_copy_buffer(struct blitter_context *blitter, struct pipe_resource *dst, unsigned dstx, struct pipe_resource *src, unsigned srcx, unsigned size) { struct blitter_context_priv *ctx = (struct blitter_context_priv*)blitter; struct pipe_context *pipe = ctx->base.pipe; struct pipe_vertex_buffer vb; struct pipe_stream_output_target *so_target; /* Drivers not capable of Stream Out should not call this function * in the first place. */ assert(ctx->has_stream_out); /* Some alignment is required. */ if (srcx % 4 != 0 || dstx % 4 != 0 || size % 16 != 0 || !ctx->has_stream_out) { struct pipe_box box; u_box_1d(srcx, size, &box); util_resource_copy_region(pipe, dst, 0, dstx, 0, 0, src, 0, &box); return; } blitter_set_running_flag(ctx); blitter_check_saved_vertex_states(ctx); vb.buffer = src; vb.buffer_offset = srcx; vb.stride = 4; pipe->set_vertex_buffers(pipe, 1, &vb); pipe->bind_vertex_elements_state(pipe, ctx->velem_state_readbuf); pipe->bind_vs_state(pipe, ctx->vs_pos_only); if (ctx->has_geometry_shader) pipe->bind_gs_state(pipe, NULL); pipe->bind_rasterizer_state(pipe, ctx->rs_discard_state); so_target = pipe->create_stream_output_target(pipe, dst, dstx, size); pipe->set_stream_output_targets(pipe, 1, &so_target, 0); util_draw_arrays(pipe, PIPE_PRIM_POINTS, 0, size / 16); blitter_restore_vertex_states(ctx); blitter_unset_running_flag(ctx); pipe_so_target_reference(&so_target, NULL); }
void r600_copy_buffer(struct pipe_context *ctx, struct pipe_resource *dst, unsigned dstx, struct pipe_resource *src, const struct pipe_box *src_box) { struct r600_context *rctx = (struct r600_context*)ctx; if (rctx->screen->has_streamout && /* Require dword alignment. */ dstx % 4 == 0 && src_box->x % 4 == 0 && src_box->width % 4 == 0) { r600_blitter_begin(ctx, R600_COPY_BUFFER); util_blitter_copy_buffer(rctx->blitter, dst, dstx, src, src_box->x, src_box->width); r600_blitter_end(ctx); } else { util_resource_copy_region(ctx, dst, 0, dstx, 0, 0, src, 0, src_box); } }
/* Assumes all values are within bounds -- no checking at this level - * do it higher up if required. */ static void i915_surface_copy_blitter(struct pipe_context *pipe, struct pipe_resource *dst, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src, unsigned src_level, const struct pipe_box *src_box) { struct i915_texture *dst_tex = i915_texture(dst); struct i915_texture *src_tex = i915_texture(src); struct pipe_resource *dpt = &dst_tex->b.b; struct pipe_resource *spt = &src_tex->b.b; unsigned dst_offset, src_offset; /* in bytes */ /* Fallback for buffers. */ if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); return; } /* XXX cannot copy 3d regions at this time */ assert(src_box->depth == 1); if (dst->target != PIPE_TEXTURE_CUBE && dst->target != PIPE_TEXTURE_3D) assert(dstz == 0); dst_offset = i915_texture_offset(dst_tex, dst_level, dstz); if (src->target != PIPE_TEXTURE_CUBE && src->target != PIPE_TEXTURE_3D) assert(src_box->z == 0); src_offset = i915_texture_offset(src_tex, src_level, src_box->z); assert( util_format_get_blocksize(dpt->format) == util_format_get_blocksize(spt->format) ); assert( util_format_get_blockwidth(dpt->format) == util_format_get_blockwidth(spt->format) ); assert( util_format_get_blockheight(dpt->format) == util_format_get_blockheight(spt->format) ); assert( util_format_get_blockwidth(dpt->format) == 1 ); assert( util_format_get_blockheight(dpt->format) == 1 ); i915_copy_blit( i915_context(pipe), util_format_get_blocksize(dpt->format), (unsigned short) src_tex->stride, src_tex->buffer, src_offset, (unsigned short) dst_tex->stride, dst_tex->buffer, dst_offset, (short) src_box->x, (short) src_box->y, (short) dstx, (short) dsty, (short) src_box->width, (short) src_box->height ); }
static void i915_surface_copy_render(struct pipe_context *pipe, struct pipe_resource *dst, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src, unsigned src_level, const struct pipe_box *src_box) { struct i915_context *i915 = i915_context(pipe); /* Fallback for buffers. */ if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); return; } util_blitter_save_blend(i915->blitter, (void *)i915->blend); util_blitter_save_depth_stencil_alpha(i915->blitter, (void *)i915->depth_stencil); util_blitter_save_stencil_ref(i915->blitter, &i915->stencil_ref); util_blitter_save_rasterizer(i915->blitter, (void *)i915->rasterizer); util_blitter_save_fragment_shader(i915->blitter, i915->saved_fs); util_blitter_save_vertex_shader(i915->blitter, i915->saved_vs); util_blitter_save_viewport(i915->blitter, &i915->viewport); util_blitter_save_clip(i915->blitter, &i915->saved_clip); util_blitter_save_vertex_elements(i915->blitter, i915->saved_velems); util_blitter_save_vertex_buffers(i915->blitter, i915->saved_nr_vertex_buffers, i915->saved_vertex_buffers); util_blitter_save_framebuffer(i915->blitter, &i915->framebuffer); util_blitter_save_fragment_sampler_states(i915->blitter, i915->saved_nr_samplers, i915->saved_samplers); util_blitter_save_fragment_sampler_views(i915->blitter, i915->saved_nr_sampler_views, i915->saved_sampler_views); util_blitter_copy_texture(i915->blitter, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box, TRUE); }
void nv30_resource_copy_region(struct pipe_context *pipe, struct pipe_resource *dstres, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *srcres, unsigned src_level, const struct pipe_box *src_box) { struct nv30_context *nv30 = nv30_context(pipe); struct nv30_rect src, dst; if (dstres->target == PIPE_BUFFER && srcres->target == PIPE_BUFFER) { util_resource_copy_region(pipe, dstres, dst_level, dstx, dsty, dstz, srcres, src_level, src_box); return; } define_rect(srcres, src_level, src_box->z, src_box->x, src_box->y, src_box->width, src_box->height, &src); define_rect(dstres, dst_level, dstz, dstx, dsty, src_box->width, src_box->height, &dst); nv30_transfer_rect(nv30, NEAREST, &src, &dst); }
static void ilo_resource_copy_region(struct pipe_context *pipe, struct pipe_resource *dst, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src, unsigned src_level, const struct pipe_box *src_box) { struct ilo_context *ilo = ilo_context(pipe); if (ilo_blitter_blt_copy_resource(ilo->blitter, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box)) return; if (ilo_blitter_pipe_copy_resource(ilo->blitter, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box)) return; util_resource_copy_region(&ilo->base, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); }
/* XXX still have doubts about this... */ static void svga_surface_copy(struct pipe_context *pipe, struct pipe_resource* dst_tex, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource* src_tex, unsigned src_level, const struct pipe_box *src_box) { struct svga_context *svga = svga_context(pipe); struct svga_texture *stex, *dtex; /* struct pipe_screen *screen = pipe->screen; SVGA3dCopyBox *box; enum pipe_error ret; struct pipe_surface *srcsurf, *dstsurf;*/ unsigned dst_face, dst_z, src_face, src_z; /* Emit buffered drawing commands, and any back copies. */ svga_surfaces_flush( svga ); /* Fallback for buffers. */ if (dst_tex->target == PIPE_BUFFER && src_tex->target == PIPE_BUFFER) { util_resource_copy_region(pipe, dst_tex, dst_level, dstx, dsty, dstz, src_tex, src_level, src_box); return; } stex = svga_texture(src_tex); dtex = svga_texture(dst_tex); #if 0 srcsurf = screen->get_tex_surface(screen, src_tex, src_level, src_box->z, src_box->z, PIPE_BIND_SAMPLER_VIEW); dstsurf = screen->get_tex_surface(screen, dst_tex, dst_level, dst_box->z, dst_box->z, PIPE_BIND_RENDER_TARGET); SVGA_DBG(DEBUG_DMA, "blit to sid %p (%d,%d), from sid %p (%d,%d) sz %dx%d\n", svga_surface(dstsurf)->handle, dstx, dsty, svga_surface(srcsurf)->handle, src_box->x, src_box->y, width, height); ret = SVGA3D_BeginSurfaceCopy(svga->swc, srcsurf, dstsurf, &box, 1); if(ret != PIPE_OK) { svga_context_flush(svga, NULL); ret = SVGA3D_BeginSurfaceCopy(svga->swc, srcsurf, dstsurf, &box, 1); assert(ret == PIPE_OK); } box->x = dstx; box->y = dsty; box->z = 0; box->w = width; box->h = height; box->d = 1; box->srcx = src_box->x; box->srcy = src_box->y; box->srcz = 0; SVGA_FIFOCommitAll(svga->swc); svga_surface(dstsurf)->dirty = TRUE; svga_propagate_surface(pipe, dstsurf); pipe_surface_reference(&srcsurf, NULL); pipe_surface_reference(&dstsurf, NULL); #else if (src_tex->target == PIPE_TEXTURE_CUBE) { src_face = src_box->z; src_z = 0; assert(src_box->depth == 1); } else { src_face = 0; src_z = src_box->z; } /* different src/dst type???*/ if (dst_tex->target == PIPE_TEXTURE_CUBE) { dst_face = dstz; dst_z = 0; assert(src_box->depth == 1); } else { dst_face = 0; dst_z = dstz; } svga_texture_copy_handle(svga, stex->handle, src_box->x, src_box->y, src_z, src_level, src_face, dtex->handle, dstx, dsty, dst_z, dst_level, dst_face, src_box->width, src_box->height, src_box->depth); #endif }
void vc4_update_shadow_baselevel_texture(struct pipe_context *pctx, struct pipe_sampler_view *view) { struct vc4_resource *shadow = vc4_resource(view->texture); struct vc4_resource *orig = vc4_resource(shadow->shadow_parent); assert(orig); if (shadow->writes == orig->writes) return; for (int i = 0; i <= shadow->base.b.last_level; i++) { struct pipe_box box = { .x = 0, .y = 0, .z = 0, .width = u_minify(shadow->base.b.width0, i), .height = u_minify(shadow->base.b.height0, i), .depth = 1, }; util_resource_copy_region(pctx, &shadow->base.b, i, 0, 0, 0, &orig->base.b, view->u.tex.first_level + i, &box); } shadow->writes = orig->writes; } /** * Converts a 4-byte index buffer to 2 bytes. * * Since GLES2 only has support for 1 and 2-byte indices, the hardware doesn't * include 4-byte index support, and we have to shrink it down. * * There's no fallback support for when indices end up being larger than 2^16, * though it will at least assertion fail. Also, if the original index data * was in user memory, it would be nice to not have uploaded it to a VBO * before translating. */ void vc4_update_shadow_index_buffer(struct pipe_context *pctx, const struct pipe_index_buffer *ib) { struct vc4_resource *shadow = vc4_resource(ib->buffer); struct vc4_resource *orig = vc4_resource(shadow->shadow_parent); uint32_t count = shadow->base.b.width0 / 2; if (shadow->writes == orig->writes) return; struct pipe_transfer *src_transfer; uint32_t *src = pipe_buffer_map_range(pctx, &orig->base.b, ib->offset, count * 4, PIPE_TRANSFER_READ, &src_transfer); struct pipe_transfer *dst_transfer; uint16_t *dst = pipe_buffer_map_range(pctx, &shadow->base.b, 0, count * 2, PIPE_TRANSFER_WRITE, &dst_transfer); for (int i = 0; i < count; i++) { uint32_t src_index = src[i]; assert(src_index <= 0xffff); dst[i] = src_index; } pctx->transfer_unmap(pctx, dst_transfer); pctx->transfer_unmap(pctx, src_transfer); shadow->writes = orig->writes; } void vc4_resource_screen_init(struct pipe_screen *pscreen) { pscreen->resource_create = vc4_resource_create; pscreen->resource_from_handle = vc4_resource_from_handle; pscreen->resource_get_handle = u_resource_get_handle_vtbl; pscreen->resource_destroy = u_resource_destroy_vtbl; }
/* Copy a block of pixels from one surface to another. */ static void r300_resource_copy_region(struct pipe_context *pipe, struct pipe_resource *dst, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src, unsigned src_level, const struct pipe_box *src_box) { struct r300_context *r300 = r300_context(pipe); struct pipe_framebuffer_state *fb = (struct pipe_framebuffer_state*)r300->fb_state.state; struct pipe_resource old_src = *src; struct pipe_resource old_dst = *dst; struct pipe_resource new_src = old_src; struct pipe_resource new_dst = old_dst; const struct util_format_description *desc = util_format_description(dst->format); struct pipe_box box; /* Fallback for buffers. */ if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); return; } if (r300->zmask_in_use && !r300->locked_zbuffer) { if (fb->zsbuf->texture == src || fb->zsbuf->texture == dst) { r300_decompress_zmask(r300); } } /* Handle non-renderable plain formats. */ if (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN && (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB || !pipe->screen->is_format_supported(pipe->screen, src->format, src->target, src->nr_samples, PIPE_BIND_SAMPLER_VIEW) || !pipe->screen->is_format_supported(pipe->screen, dst->format, dst->target, dst->nr_samples, PIPE_BIND_RENDER_TARGET))) { switch (util_format_get_blocksize(old_dst.format)) { case 1: new_dst.format = PIPE_FORMAT_I8_UNORM; break; case 2: new_dst.format = PIPE_FORMAT_B4G4R4A4_UNORM; break; case 4: new_dst.format = PIPE_FORMAT_B8G8R8A8_UNORM; break; case 8: new_dst.format = PIPE_FORMAT_R16G16B16A16_UNORM; break; default: debug_printf("r300: surface_copy: Unhandled format: %s. Falling back to software.\n" "r300: surface_copy: Software fallback doesn't work for tiled textures.\n", util_format_short_name(dst->format)); } new_src.format = new_dst.format; } /* Handle compressed formats. */ if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC || desc->layout == UTIL_FORMAT_LAYOUT_RGTC) { switch (util_format_get_blocksize(old_dst.format)) { case 8: /* 1 pixel = 4 bits, * we set 1 pixel = 2 bytes ===> 4 times larger pixels. */ new_dst.format = PIPE_FORMAT_B4G4R4A4_UNORM; break; case 16: /* 1 pixel = 8 bits, * we set 1 pixel = 4 bytes ===> 4 times larger pixels. */ new_dst.format = PIPE_FORMAT_B8G8R8A8_UNORM; break; } /* Since the pixels are 4 times larger, we must decrease * the image size and the coordinates 4 times. */ new_src.format = new_dst.format; new_dst.height0 = (new_dst.height0 + 3) / 4; new_src.height0 = (new_src.height0 + 3) / 4; dsty /= 4; box = *src_box; box.y /= 4; box.height = (box.height + 3) / 4; src_box = &box; } if (old_src.format != new_src.format) r300_resource_set_properties(pipe->screen, src, &new_src); if (old_dst.format != new_dst.format) r300_resource_set_properties(pipe->screen, dst, &new_dst); r300_hw_copy_region(pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); if (old_src.format != new_src.format) r300_resource_set_properties(pipe->screen, src, &old_src); if (old_dst.format != new_dst.format) r300_resource_set_properties(pipe->screen, dst, &old_dst); }
static void r600_resource_copy_region(struct pipe_context *ctx, struct pipe_resource *dst, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src, unsigned src_level, const struct pipe_box *src_box) { struct r600_context *rctx = (struct r600_context *)ctx; struct r600_resource_texture *rsrc = (struct r600_resource_texture*)src; struct texture_orig_info orig_info[2]; struct pipe_box sbox; const struct pipe_box *psbox = src_box; boolean restore_orig[2]; memset(orig_info, 0, sizeof(orig_info)); /* Fallback for buffers. */ if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { util_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); return; } /* This must be done before entering u_blitter to avoid recursion. */ if (rsrc->is_depth && !rsrc->is_flushing_texture) { si_blit_decompress_depth_in_place(rctx, rsrc, src_level, src_level, src_box->z, src_box->z + src_box->depth - 1); } restore_orig[0] = restore_orig[1] = FALSE; if (util_format_is_compressed(src->format) && util_format_is_compressed(dst->format)) { r600_compressed_to_blittable(src, src_level, &orig_info[0]); restore_orig[0] = TRUE; sbox.x = util_format_get_nblocksx(orig_info[0].format, src_box->x); sbox.y = util_format_get_nblocksy(orig_info[0].format, src_box->y); sbox.z = src_box->z; sbox.width = util_format_get_nblocksx(orig_info[0].format, src_box->width); sbox.height = util_format_get_nblocksy(orig_info[0].format, src_box->height); sbox.depth = src_box->depth; psbox=&sbox; r600_compressed_to_blittable(dst, dst_level, &orig_info[1]); restore_orig[1] = TRUE; /* translate the dst box as well */ dstx = util_format_get_nblocksx(orig_info[1].format, dstx); dsty = util_format_get_nblocksy(orig_info[1].format, dsty); } else if (!util_blitter_is_copy_supported(rctx->blitter, dst, src, PIPE_MASK_RGBAZS)) { unsigned blocksize = util_format_get_blocksize(src->format); switch (blocksize) { case 1: r600_change_format(src, src_level, &orig_info[0], PIPE_FORMAT_R8_UNORM); r600_change_format(dst, dst_level, &orig_info[1], PIPE_FORMAT_R8_UNORM); break; case 4: r600_change_format(src, src_level, &orig_info[0], PIPE_FORMAT_R8G8B8A8_UNORM); r600_change_format(dst, dst_level, &orig_info[1], PIPE_FORMAT_R8G8B8A8_UNORM); break; default: fprintf(stderr, "Unhandled format %s with blocksize %u\n", util_format_short_name(src->format), blocksize); assert(0); } restore_orig[0] = TRUE; restore_orig[1] = TRUE; } r600_blitter_begin(ctx, R600_COPY); util_blitter_copy_texture(rctx->blitter, dst, dst_level, dstx, dsty, dstz, src, src_level, psbox, PIPE_MASK_RGBAZS, TRUE); r600_blitter_end(ctx); if (restore_orig[0]) r600_reset_blittable_to_orig(src, src_level, &orig_info[0]); if (restore_orig[1]) r600_reset_blittable_to_orig(dst, dst_level, &orig_info[1]); }
/* Copy a block of pixels from one surface to another. */ static void r300_resource_copy_region(struct pipe_context *pipe, struct pipe_resource *dst, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src, unsigned src_level, const struct pipe_box *src_box) { struct pipe_screen *screen = pipe->screen; struct r300_context *r300 = r300_context(pipe); struct pipe_framebuffer_state *fb = (struct pipe_framebuffer_state*)r300->fb_state.state; unsigned src_width0 = r300_resource(src)->tex.width0; unsigned src_height0 = r300_resource(src)->tex.height0; unsigned dst_width0 = r300_resource(dst)->tex.width0; unsigned dst_height0 = r300_resource(dst)->tex.height0; unsigned layout; struct pipe_box box; struct pipe_sampler_view src_templ, *src_view; struct pipe_surface dst_templ, *dst_view; /* Fallback for buffers. */ if ((dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) || !r300_is_blit_supported(dst->format)) { util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); return; } /* The code below changes the texture format so that the copy can be done * on hardware. E.g. depth-stencil surfaces are copied as RGBA * colorbuffers. */ util_blitter_default_dst_texture(&dst_templ, dst, dst_level, dstz, src_box); util_blitter_default_src_texture(&src_templ, src, src_level); layout = util_format_description(dst_templ.format)->layout; /* Handle non-renderable plain formats. */ if (layout == UTIL_FORMAT_LAYOUT_PLAIN && (!screen->is_format_supported(screen, src_templ.format, src->target, src->nr_samples, PIPE_BIND_SAMPLER_VIEW) || !screen->is_format_supported(screen, dst_templ.format, dst->target, dst->nr_samples, PIPE_BIND_RENDER_TARGET))) { switch (util_format_get_blocksize(dst_templ.format)) { case 1: dst_templ.format = PIPE_FORMAT_I8_UNORM; break; case 2: dst_templ.format = PIPE_FORMAT_B4G4R4A4_UNORM; break; case 4: dst_templ.format = PIPE_FORMAT_B8G8R8A8_UNORM; break; case 8: dst_templ.format = PIPE_FORMAT_R16G16B16A16_UNORM; break; default: debug_printf("r300: copy_region: Unhandled format: %s. Falling back to software.\n" "r300: copy_region: Software fallback doesn't work for tiled textures.\n", util_format_short_name(dst_templ.format)); } src_templ.format = dst_templ.format; } /* Handle compressed formats. */ if (layout == UTIL_FORMAT_LAYOUT_S3TC || layout == UTIL_FORMAT_LAYOUT_RGTC) { assert(src_templ.format == dst_templ.format); box = *src_box; src_box = &box; dst_width0 = align(dst_width0, 4); dst_height0 = align(dst_height0, 4); src_width0 = align(src_width0, 4); src_height0 = align(src_height0, 4); box.width = align(box.width, 4); box.height = align(box.height, 4); switch (util_format_get_blocksize(dst_templ.format)) { case 8: /* one 4x4 pixel block has 8 bytes. * we set 1 pixel = 4 bytes ===> 1 block corrensponds to 2 pixels. */ dst_templ.format = PIPE_FORMAT_R8G8B8A8_UNORM; dst_width0 = dst_width0 / 2; src_width0 = src_width0 / 2; dstx /= 2; box.x /= 2; box.width /= 2; break; case 16: /* one 4x4 pixel block has 16 bytes. * we set 1 pixel = 4 bytes ===> 1 block corresponds to 4 pixels. */ dst_templ.format = PIPE_FORMAT_R8G8B8A8_UNORM; break; } src_templ.format = dst_templ.format; dst_height0 = dst_height0 / 4; src_height0 = src_height0 / 4; dsty /= 4; box.y /= 4; box.height /= 4; } /* Fallback for textures. */ if (!screen->is_format_supported(screen, dst_templ.format, dst->target, dst->nr_samples, PIPE_BIND_RENDER_TARGET) || !screen->is_format_supported(screen, src_templ.format, src->target, src->nr_samples, PIPE_BIND_SAMPLER_VIEW)) { assert(0 && "this shouldn't happen, update r300_is_blit_supported"); util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); return; } /* Decompress ZMASK. */ if (r300->zmask_in_use && !r300->locked_zbuffer) { if (fb->zsbuf->texture == src || fb->zsbuf->texture == dst) { r300_decompress_zmask(r300); } } dst_view = r300_create_surface_custom(pipe, dst, &dst_templ, dst_width0, dst_height0); src_view = r300_create_sampler_view_custom(pipe, src, &src_templ, src_width0, src_height0); r300_blitter_begin(r300, R300_COPY); util_blitter_blit_generic(r300->blitter, dst_view, dstx, dsty, abs(src_box->width), abs(src_box->height), src_view, src_box, src_width0, src_height0, PIPE_MASK_RGBAZS, PIPE_TEX_FILTER_NEAREST, NULL, FALSE); r300_blitter_end(r300); pipe_surface_reference(&dst_view, NULL); pipe_sampler_view_reference(&src_view, NULL); }
void util_blitter_copy_texture(struct blitter_context *blitter, struct pipe_resource *dst, unsigned dstlevel, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src, unsigned srclevel, const struct pipe_box *srcbox, boolean ignore_stencil) { struct blitter_context_priv *ctx = (struct blitter_context_priv*)blitter; struct pipe_context *pipe = ctx->base.pipe; struct pipe_screen *screen = pipe->screen; struct pipe_surface *dst_view, dst_templ; struct pipe_sampler_view src_templ, *src_view; unsigned bind; boolean is_stencil, is_depth; /* Give up if textures are not set. */ assert(dst && src); if (!dst || !src) return; assert(src->target < PIPE_MAX_TEXTURE_TYPES); /* Is this a ZS format? */ is_depth = util_format_get_component_bits(src->format, UTIL_FORMAT_COLORSPACE_ZS, 0) != 0; is_stencil = util_format_get_component_bits(src->format, UTIL_FORMAT_COLORSPACE_ZS, 1) != 0; if (is_depth || is_stencil) bind = PIPE_BIND_DEPTH_STENCIL; else bind = PIPE_BIND_RENDER_TARGET; /* Check if we can sample from and render to the surfaces. */ /* (assuming copying a stencil buffer is not possible) */ if ((!ignore_stencil && is_stencil) || !screen->is_format_supported(screen, dst->format, dst->target, dst->nr_samples, bind) || !screen->is_format_supported(screen, src->format, src->target, src->nr_samples, PIPE_BIND_SAMPLER_VIEW)) { blitter_set_running_flag(ctx); util_resource_copy_region(pipe, dst, dstlevel, dstx, dsty, dstz, src, srclevel, srcbox); blitter_unset_running_flag(ctx); return; } /* Initialize the surface. */ util_blitter_default_dst_texture(&dst_templ, dst, dstlevel, dstz, srcbox); dst_view = pipe->create_surface(pipe, dst, &dst_templ); /* Initialize the sampler view. */ util_blitter_default_src_texture(&src_templ, src, srclevel); src_view = pipe->create_sampler_view(pipe, src, &src_templ); /* Copy. */ util_blitter_copy_texture_view(blitter, dst_view, dstx, dsty, src_view, srcbox, src->width0, src->height0); pipe_surface_reference(&dst_view, NULL); pipe_sampler_view_reference(&src_view, NULL); }
static void nv50_resource_copy_region(struct pipe_context *pipe, struct pipe_resource *dst, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src, unsigned src_level, const struct pipe_box *src_box) { struct nv50_screen *screen = nv50_context(pipe)->screen; int ret; boolean m2mf; unsigned dst_layer = dstz, src_layer = src_box->z; /* Fallback for buffers. */ if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); return; } assert(src->nr_samples == dst->nr_samples); m2mf = (src->format == dst->format) || (util_format_get_blocksizebits(src->format) == util_format_get_blocksizebits(dst->format)); nv04_resource(dst)->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING; if (m2mf) { struct nv50_m2mf_rect drect, srect; unsigned i; unsigned nx = util_format_get_nblocksx(src->format, src_box->width); unsigned ny = util_format_get_nblocksy(src->format, src_box->height); nv50_m2mf_rect_setup(&drect, dst, dst_level, dstx, dsty, dstz); nv50_m2mf_rect_setup(&srect, src, src_level, src_box->x, src_box->y, src_box->z); for (i = 0; i < src_box->depth; ++i) { nv50_m2mf_transfer_rect(&screen->base.base, &drect, &srect, nx, ny); if (nv50_miptree(dst)->layout_3d) drect.z++; else drect.base += nv50_miptree(dst)->layer_stride; if (nv50_miptree(src)->layout_3d) srect.z++; else srect.base += nv50_miptree(src)->layer_stride; } return; } assert((src->format == dst->format) || (nv50_2d_format_faithful(src->format) && nv50_2d_format_faithful(dst->format))); for (; dst_layer < dstz + src_box->depth; ++dst_layer, ++src_layer) { ret = nv50_2d_texture_do_copy(screen->base.channel, nv50_miptree(dst), dst_level, dstx, dsty, dst_layer, nv50_miptree(src), src_level, src_box->x, src_box->y, src_layer, src_box->width, src_box->height); if (ret) return; } }
static void lp_resource_copy(struct pipe_context *pipe, struct pipe_resource *dst, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src, unsigned src_level, const struct pipe_box *src_box) { /* XXX this used to ignore srcz/dstz * assume it works the same for cube and 3d */ struct llvmpipe_resource *src_tex = llvmpipe_resource(src); struct llvmpipe_resource *dst_tex = llvmpipe_resource(dst); const enum pipe_format format = src_tex->base.format; unsigned width = src_box->width; unsigned height = src_box->height; assert(src_box->depth == 1); /* Fallback for buffers. */ if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); return; } llvmpipe_flush_resource(pipe, dst, dst_level, dstz, FALSE, /* read_only */ TRUE, /* cpu_access */ FALSE, /* do_not_block */ "blit dest"); llvmpipe_flush_resource(pipe, src, src_level, src_box->z, TRUE, /* read_only */ TRUE, /* cpu_access */ FALSE, /* do_not_block */ "blit src"); /* printf("surface copy from %u lvl %u to %u lvl %u: %u,%u,%u to %u,%u,%u %u x %u x %u\n", src_tex->id, src_level, dst_tex->id, dst_level, src_box->x, src_box->y, src_box->z, dstx, dsty, dstz, src_box->width, src_box->height, src_box->depth); */ /* set src tiles to linear layout */ { unsigned tx, ty, tw, th; unsigned x, y; adjust_to_tile_bounds(src_box->x, src_box->y, width, height, &tx, &ty, &tw, &th); for (y = 0; y < th; y += TILE_SIZE) { for (x = 0; x < tw; x += TILE_SIZE) { (void) llvmpipe_get_texture_tile_linear(src_tex, src_box->z, src_level, LP_TEX_USAGE_READ, tx + x, ty + y); } } } /* set dst tiles to linear layout */ { unsigned tx, ty, tw, th; unsigned x, y; enum lp_texture_usage usage; adjust_to_tile_bounds(dstx, dsty, width, height, &tx, &ty, &tw, &th); for (y = 0; y < th; y += TILE_SIZE) { boolean contained_y = ty + y >= dsty && ty + y + TILE_SIZE <= dsty + height ? TRUE : FALSE; for (x = 0; x < tw; x += TILE_SIZE) { boolean contained_x = tx + x >= dstx && tx + x + TILE_SIZE <= dstx + width ? TRUE : FALSE; /* * Set the usage mode to WRITE_ALL for the tiles which are * completely contained by the dest rectangle. */ if (contained_y && contained_x) usage = LP_TEX_USAGE_WRITE_ALL; else usage = LP_TEX_USAGE_READ_WRITE; (void) llvmpipe_get_texture_tile_linear(dst_tex, dstz, dst_level, usage, tx + x, ty + y); } } } /* copy */ { const ubyte *src_linear_ptr = llvmpipe_get_texture_image_address(src_tex, src_box->z, src_level, LP_TEX_LAYOUT_LINEAR); ubyte *dst_linear_ptr = llvmpipe_get_texture_image_address(dst_tex, dstz, dst_level, LP_TEX_LAYOUT_LINEAR); if (dst_linear_ptr && src_linear_ptr) { util_copy_rect(dst_linear_ptr, format, llvmpipe_resource_stride(&dst_tex->base, dst_level), dstx, dsty, width, height, src_linear_ptr, llvmpipe_resource_stride(&src_tex->base, src_level), src_box->x, src_box->y); } } }
static void nv50_resource_copy_region(struct pipe_context *pipe, struct pipe_resource *dst, unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz, struct pipe_resource *src, unsigned src_level, const struct pipe_box *src_box) { struct nv50_context *nv50 = nv50_context(pipe); int ret; boolean m2mf; unsigned dst_layer = dstz, src_layer = src_box->z; /* Fallback for buffers. */ if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { util_resource_copy_region(pipe, dst, dst_level, dstx, dsty, dstz, src, src_level, src_box); return; } /* 0 and 1 are equal, only supporting 0/1, 2, 4 and 8 */ assert((src->nr_samples | 1) == (dst->nr_samples | 1)); m2mf = (src->format == dst->format) || (util_format_get_blocksizebits(src->format) == util_format_get_blocksizebits(dst->format)); nv04_resource(dst)->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING; if (m2mf) { struct nv50_m2mf_rect drect, srect; unsigned i; unsigned nx = util_format_get_nblocksx(src->format, src_box->width); unsigned ny = util_format_get_nblocksy(src->format, src_box->height); nv50_m2mf_rect_setup(&drect, dst, dst_level, dstx, dsty, dstz); nv50_m2mf_rect_setup(&srect, src, src_level, src_box->x, src_box->y, src_box->z); for (i = 0; i < src_box->depth; ++i) { nv50_m2mf_transfer_rect(nv50, &drect, &srect, nx, ny); if (nv50_miptree(dst)->layout_3d) drect.z++; else drect.base += nv50_miptree(dst)->layer_stride; if (nv50_miptree(src)->layout_3d) srect.z++; else srect.base += nv50_miptree(src)->layer_stride; } return; } assert((src->format == dst->format) || (nv50_2d_format_faithful(src->format) && nv50_2d_format_faithful(dst->format))); BCTX_REFN(nv50->bufctx, 2D, nv04_resource(src), RD); BCTX_REFN(nv50->bufctx, 2D, nv04_resource(dst), WR); nouveau_pushbuf_bufctx(nv50->base.pushbuf, nv50->bufctx); nouveau_pushbuf_validate(nv50->base.pushbuf); for (; dst_layer < dstz + src_box->depth; ++dst_layer, ++src_layer) { ret = nv50_2d_texture_do_copy(nv50->base.pushbuf, nv50_miptree(dst), dst_level, dstx, dsty, dst_layer, nv50_miptree(src), src_level, src_box->x, src_box->y, src_layer, src_box->width, src_box->height); if (ret) break; } nouveau_bufctx_reset(nv50->bufctx, NV50_BIND_2D); }