static boolean nv30_vbo_static_attrib(struct nv30_context *nv30, struct nouveau_stateobj *so, int attrib, struct pipe_vertex_element *ve, struct pipe_vertex_buffer *vb) { struct pipe_screen *pscreen = nv30->pipe.screen; struct nouveau_grobj *rankine = nv30->screen->rankine; unsigned type, ncomp; void *map; if (nv30_vbo_format_to_hw(ve->src_format, &type, &ncomp)) return FALSE; map = pipe_buffer_map(pscreen, vb->buffer, PIPE_BUFFER_USAGE_CPU_READ); map += vb->buffer_offset + ve->src_offset; switch (type) { case NV34TCL_VTXFMT_TYPE_FLOAT: { float *v = map; switch (ncomp) { case 4: so_method(so, rankine, NV34TCL_VTX_ATTR_4F_X(attrib), 4); so_data (so, fui(v[0])); so_data (so, fui(v[1])); so_data (so, fui(v[2])); so_data (so, fui(v[3])); break; case 3: so_method(so, rankine, NV34TCL_VTX_ATTR_3F_X(attrib), 3); so_data (so, fui(v[0])); so_data (so, fui(v[1])); so_data (so, fui(v[2])); break; case 2: so_method(so, rankine, NV34TCL_VTX_ATTR_2F_X(attrib), 2); so_data (so, fui(v[0])); so_data (so, fui(v[1])); break; case 1: so_method(so, rankine, NV34TCL_VTX_ATTR_1F(attrib), 1); so_data (so, fui(v[0])); break; default: pipe_buffer_unmap(pscreen, vb->buffer); return FALSE; } } break; default: pipe_buffer_unmap(pscreen, vb->buffer); return FALSE; } pipe_buffer_unmap(pscreen, vb->buffer); return TRUE; }
/* SW TCL elements, using Draw. */ static void r300_swtcl_draw_vbo(struct pipe_context* pipe, const struct pipe_draw_info *info) { struct r300_context* r300 = r300_context(pipe); struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS]; struct pipe_transfer *ib_transfer = NULL; int i; void *indices = NULL; boolean indexed = info->indexed && r300->vbuf_mgr->index_buffer.buffer; if (r300->skip_rendering) { return; } r300_update_derived_state(r300); r300_reserve_cs_dwords(r300, PREP_EMIT_STATES | PREP_EMIT_VARRAYS_SWTCL | (indexed ? PREP_INDEXED : 0), indexed ? 256 : 6); for (i = 0; i < r300->vbuf_mgr->nr_vertex_buffers; i++) { if (r300->vbuf_mgr->vertex_buffer[i].buffer) { void *buf = pipe_buffer_map(pipe, r300->vbuf_mgr->vertex_buffer[i].buffer, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED, &vb_transfer[i]); draw_set_mapped_vertex_buffer(r300->draw, i, buf); } } if (indexed) { indices = pipe_buffer_map(pipe, r300->vbuf_mgr->index_buffer.buffer, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED, &ib_transfer); } draw_set_mapped_index_buffer(r300->draw, indices); r300->draw_vbo_locked = TRUE; r300->draw_first_emitted = FALSE; draw_vbo(r300->draw, info); draw_flush(r300->draw); r300->draw_vbo_locked = FALSE; for (i = 0; i < r300->vbuf_mgr->nr_vertex_buffers; i++) { if (r300->vbuf_mgr->vertex_buffer[i].buffer) { pipe_buffer_unmap(pipe, vb_transfer[i]); draw_set_mapped_vertex_buffer(r300->draw, i, NULL); } } if (indexed) { pipe_buffer_unmap(pipe, ib_transfer); draw_set_mapped_index_buffer(r300->draw, NULL); } }
static enum pipe_error translate_indices( struct svga_hwtnl *hwtnl, struct pipe_resource *src, unsigned offset, unsigned nr, unsigned index_size, u_translate_func translate, struct pipe_resource **out_buf ) { struct pipe_context *pipe = &hwtnl->svga->pipe; struct pipe_transfer *src_transfer = NULL; struct pipe_transfer *dst_transfer = NULL; unsigned size = index_size * nr; const void *src_map = NULL; struct pipe_resource *dst = NULL; void *dst_map = NULL; dst = pipe_buffer_create( pipe->screen, PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STATIC, size ); if (dst == NULL) goto fail; src_map = pipe_buffer_map( pipe, src, PIPE_TRANSFER_READ, &src_transfer ); if (src_map == NULL) goto fail; dst_map = pipe_buffer_map( pipe, dst, PIPE_TRANSFER_WRITE, &dst_transfer ); if (dst_map == NULL) goto fail; translate( (const char *)src_map + offset, nr, dst_map ); pipe_buffer_unmap( pipe, src_transfer ); pipe_buffer_unmap( pipe, dst_transfer ); *out_buf = dst; return PIPE_OK; fail: if (src_map) pipe_buffer_unmap( pipe, src_transfer ); if (dst_map) pipe_buffer_unmap( pipe, dst_transfer ); if (dst) pipe->screen->resource_destroy( pipe->screen, dst ); return PIPE_ERROR_OUT_OF_MEMORY; }
static enum pipe_error translate_indices(struct svga_hwtnl *hwtnl, struct pipe_resource *src, unsigned offset, unsigned prim, unsigned nr, unsigned index_size, u_translate_func translate, struct pipe_resource **out_buf) { struct pipe_context *pipe = &hwtnl->svga->pipe; struct pipe_transfer *src_transfer = NULL; struct pipe_transfer *dst_transfer = NULL; unsigned size = index_size * nr; const void *src_map = NULL; struct pipe_resource *dst = NULL; void *dst_map = NULL; /* Need to trim vertex count to make sure we don't write too much data * to the dst buffer in the translate() call. */ u_trim_pipe_prim(prim, &nr); size = index_size * nr; dst = pipe_buffer_create(pipe->screen, PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_DEFAULT, size); if (dst == NULL) goto fail; src_map = pipe_buffer_map(pipe, src, PIPE_TRANSFER_READ, &src_transfer); if (src_map == NULL) goto fail; dst_map = pipe_buffer_map(pipe, dst, PIPE_TRANSFER_WRITE, &dst_transfer); if (dst_map == NULL) goto fail; translate((const char *) src_map + offset, 0, 0, nr, 0, dst_map); pipe_buffer_unmap(pipe, src_transfer); pipe_buffer_unmap(pipe, dst_transfer); *out_buf = dst; return PIPE_OK; fail: if (src_map) pipe_buffer_unmap(pipe, src_transfer); if (dst_map) pipe_buffer_unmap(pipe, dst_transfer); if (dst) pipe->screen->resource_destroy(pipe->screen, dst); return PIPE_ERROR_OUT_OF_MEMORY; }
void vl_vb_unmap(struct vl_vertex_buffer *buffer, struct pipe_context *pipe) { unsigned i; assert(buffer && pipe); for (i = 0; i < VL_NUM_COMPONENTS; ++i) { pipe_buffer_unmap(pipe, buffer->ycbcr[i].transfer); } for (i = 0; i < VL_MAX_REF_FRAMES; ++i) { pipe_buffer_unmap(pipe, buffer->mv[i].transfer); } }
static boolean r300_get_query_result(struct pipe_context* pipe, struct pipe_query* query, boolean wait, uint64_t* result) { struct r300_query* q = (struct r300_query*)query; uint32_t* map; uint32_t temp; if (wait) { /* Well, we're expected to just sit here and spin, so let's go ahead * and flush so we can be sure that the card's spinning... */ /* XXX double-check these params */ pipe->flush(pipe, 0, NULL); } map = pipe_buffer_map(pipe->screen, q->buf, PIPE_BUFFER_USAGE_CPU_READ); temp = *map; pipe_buffer_unmap(pipe->screen, q->buf); if (temp < 0) { /* Our results haven't been written yet... */ return FALSE; } *result = temp; return TRUE; }
static void fill_grid_size(struct pipe_context *context, const struct pipe_grid_info *info, uint32_t grid_size[3]) { struct pipe_transfer *transfer; uint32_t *params; if (!info->indirect) { grid_size[0] = info->grid[0]; grid_size[1] = info->grid[1]; grid_size[2] = info->grid[2]; return; } params = pipe_buffer_map_range(context, info->indirect, info->indirect_offset, 3 * sizeof(uint32_t), PIPE_TRANSFER_READ, &transfer); if (!transfer) return; grid_size[0] = params[0]; grid_size[1] = params[1]; grid_size[2] = params[2]; pipe_buffer_unmap(context, transfer); }
/* As above, but upload the full contents of a buffer. Useful for * uploading user buffers, avoids generating an explosion of GPU * buffers if you have an app that does lots of small vertex buffer * renders or DrawElements calls. */ enum pipe_error u_upload_buffer( struct u_upload_mgr *upload, unsigned offset, unsigned size, struct pipe_buffer *inbuf, unsigned *out_offset, struct pipe_buffer **outbuf ) { enum pipe_error ret = PIPE_OK; const char *map = NULL; map = (const char *)pipe_buffer_map( upload->screen, inbuf, PIPE_BUFFER_USAGE_CPU_READ ); if (map == NULL) { ret = PIPE_ERROR_OUT_OF_MEMORY; goto done; } if (0) debug_printf("upload ptr %p ofs %d sz %d\n", map, offset, size); ret = u_upload_data( upload, size, map + offset, out_offset, outbuf ); if (ret) goto done; done: if (map) pipe_buffer_unmap( upload->screen, inbuf ); return ret; }
static void svga_vbuf_render_unmap_vertices( struct vbuf_render *render, ushort min_index, ushort max_index ) { struct svga_vbuf_render *svga_render = svga_vbuf_render(render); struct svga_context *svga = svga_render->svga; unsigned offset, length; size_t used = svga_render->vertex_size * ((size_t)max_index + 1); offset = svga_render->vbuf_offset + svga_render->vertex_size * min_index; length = svga_render->vertex_size * (max_index + 1 - min_index); if (0) { /* dump vertex data */ const float *f = (const float *) ((char *) svga_render->vbuf_ptr + svga_render->vbuf_offset); unsigned i; debug_printf("swtnl vertex data:\n"); for (i = 0; i < length / 4; i += 4) { debug_printf("%u: %f %f %f %f\n", i, f[i], f[i+1], f[i+2], f[i+3]); } } pipe_buffer_flush_mapped_range(&svga->pipe, svga_render->vbuf_transfer, offset, length); pipe_buffer_unmap(&svga->pipe, svga_render->vbuf_transfer); svga_render->min_index = min_index; svga_render->max_index = max_index; svga_render->vbuf_used = MAX2(svga_render->vbuf_used, used); }
static INLINE enum pipe_error my_buffer_write(struct pipe_screen *screen, struct pipe_buffer *buf, unsigned offset, unsigned size, unsigned dirty_size, const void *data) { uint8_t *map; assert(offset < buf->size); assert(offset + size <= buf->size); assert(dirty_size >= size); assert(size); map = pipe_buffer_map_range(screen, buf, offset, size, PIPE_BUFFER_USAGE_CPU_WRITE | PIPE_BUFFER_USAGE_FLUSH_EXPLICIT | PIPE_BUFFER_USAGE_DISCARD | PIPE_BUFFER_USAGE_UNSYNCHRONIZED); if (map == NULL) return PIPE_ERROR_OUT_OF_MEMORY; memcpy(map + offset, data, size); pipe_buffer_flush_mapped_range(screen, buf, offset, dirty_size); pipe_buffer_unmap(screen, buf); return PIPE_OK; }
/* Slightly specialized version of buffer_write designed to maximize * chances of the driver consolidating successive writes into a single * upload. * * dirty_size may be slightly greater than size to cope with * alignment. We don't want to leave holes between succesively mapped * regions as that may prevent the driver from consolidating uploads. * * Note that the 'data' pointer has probably come from the application * and we cannot read even a byte past its end without risking * segfaults, or at least complaints from valgrind.. */ static INLINE enum pipe_error my_buffer_write(struct pipe_context *pipe, struct pipe_resource *buf, unsigned offset, unsigned size, unsigned dirty_size, const void *data) { struct pipe_transfer *transfer = NULL; uint8_t *map; assert(offset < buf->width0); assert(offset + size <= buf->width0); assert(dirty_size >= size); assert(size); map = pipe_buffer_map_range(pipe, buf, offset, dirty_size, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_FLUSH_EXPLICIT | PIPE_TRANSFER_DISCARD | PIPE_TRANSFER_UNSYNCHRONIZED, &transfer); if (map == NULL) return PIPE_ERROR_OUT_OF_MEMORY; memcpy(map + offset, data, size); pipe_buffer_flush_mapped_range(pipe, transfer, offset, dirty_size); pipe_buffer_unmap(pipe, buf, transfer); return PIPE_OK; }
static void nv30_fragprog_upload(struct nv30_context *nv30) { struct nouveau_context *nv = &nv30->base; struct nv30_fragprog *fp = nv30->fragprog.program; struct pipe_context *pipe = &nv30->base.pipe; if (unlikely(!fp->buffer)) fp->buffer = pipe_buffer_create(pipe->screen, 0, 0, fp->insn_len * 4); #ifndef PIPE_ARCH_BIG_ENDIAN pipe_buffer_write(pipe, fp->buffer, 0, fp->insn_len * 4, fp->insn); #else { struct pipe_transfer *transfer; uint32_t *map; int i; map = pipe_buffer_map(pipe, fp->buffer, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE, &transfer); for (i = 0; i < fp->insn_len; i++) *map++ = (fp->insn[i] >> 16) | (fp->insn[i] << 16); pipe_buffer_unmap(pipe, transfer); } #endif if (nv04_resource(fp->buffer)->domain != NOUVEAU_BO_VRAM) nouveau_buffer_migrate(nv, nv04_resource(fp->buffer), NOUVEAU_BO_VRAM); }
static void nv30_render_unmap_vertices(struct vbuf_render *render, ushort min_index, ushort max_index) { struct nv30_render *r = nv30_render(render); pipe_buffer_unmap(&r->nv30->base.pipe, r->transfer); }
static boolean nv30_draw_elements_inline(struct pipe_context *pipe, struct pipe_buffer *ib, unsigned ib_size, unsigned mode, unsigned start, unsigned count) { struct nv30_context *nv30 = nv30_context(pipe); struct pipe_screen *pscreen = pipe->screen; void *map; map = pipe_buffer_map(pscreen, ib, PIPE_BUFFER_USAGE_CPU_READ); if (!ib) { NOUVEAU_ERR("failed mapping ib\n"); return FALSE; } switch (ib_size) { case 1: nv30_draw_elements_u08(nv30, map, mode, start, count); break; case 2: nv30_draw_elements_u16(nv30, map, mode, start, count); break; case 4: nv30_draw_elements_u32(nv30, map, mode, start, count); break; default: NOUVEAU_ERR("invalid idxbuf fmt %d\n", ib_size); break; } pipe_buffer_unmap(pscreen, ib); return TRUE; }
static inline struct nv04_resource * nv30_transfer_rect_fragprog(struct nv30_context *nv30) { struct nv04_resource *fp = nv04_resource(nv30->blit_fp); struct pipe_context *pipe = &nv30->base.pipe; if (!fp) { nv30->blit_fp = pipe_buffer_create(pipe->screen, 0, 0, 12 * 4); if (nv30->blit_fp) { struct pipe_transfer *transfer; u32 *map = pipe_buffer_map(pipe, nv30->blit_fp, PIPE_TRANSFER_WRITE, &transfer); if (map) { map[0] = 0x17009e00; /* texr r0, i[tex0], texture[0]; end; */ map[1] = 0x1c9dc801; map[2] = 0x0001c800; map[3] = 0x3fe1c800; map[4] = 0x01401e81; /* end; */ map[5] = 0x1c9dc800; map[6] = 0x0001c800; map[7] = 0x0001c800; pipe_buffer_unmap(pipe, transfer); } fp = nv04_resource(nv30->blit_fp); nouveau_buffer_migrate(&nv30->base, fp, NOUVEAU_BO_VRAM); } } return fp; }
VAStatus vlVaUnmapBuffer(VADriverContextP ctx, VABufferID buf_id) { vlVaDriver *drv; vlVaBuffer *buf; if (!ctx) return VA_STATUS_ERROR_INVALID_CONTEXT; drv = VL_VA_DRIVER(ctx); if (!drv) return VA_STATUS_ERROR_INVALID_CONTEXT; buf = handle_table_get(drv->htab, buf_id); if (!buf) return VA_STATUS_ERROR_INVALID_BUFFER; if (buf->export_refcount > 0) return VA_STATUS_ERROR_INVALID_BUFFER; if (buf->derived_surface.resource) { if (!buf->derived_surface.transfer) return VA_STATUS_ERROR_INVALID_BUFFER; pipe_buffer_unmap(drv->pipe, buf->derived_surface.transfer); buf->derived_surface.transfer = NULL; } return VA_STATUS_SUCCESS; }
/* SW TCL arrays, using Draw. */ boolean r300_swtcl_draw_arrays(struct pipe_context* pipe, unsigned mode, unsigned start, unsigned count) { struct r300_context* r300 = r300_context(pipe); int i; if (!u_trim_pipe_prim(mode, &count)) { return FALSE; } for (i = 0; i < r300->vertex_buffer_count; i++) { void* buf = pipe_buffer_map(pipe->screen, r300->vertex_buffer[i].buffer, PIPE_BUFFER_USAGE_CPU_READ); draw_set_mapped_vertex_buffer(r300->draw, i, buf); } draw_set_mapped_element_buffer(r300->draw, 0, NULL); draw_set_mapped_constant_buffer(r300->draw, r300->shader_constants[PIPE_SHADER_VERTEX].constants, r300->shader_constants[PIPE_SHADER_VERTEX].count * (sizeof(float) * 4)); draw_arrays(r300->draw, mode, start, count); for (i = 0; i < r300->vertex_buffer_count; i++) { pipe_buffer_unmap(pipe->screen, r300->vertex_buffer[i].buffer); draw_set_mapped_vertex_buffer(r300->draw, i, NULL); } return TRUE; }
int main(int argc, char **argv) { struct fbdemos_scaffold *fbs = 0; fbdemo_init(&fbs); int width = fbs->width; int height = fbs->height; struct pipe_context *pipe = fbs->pipe; /* resources */ struct pipe_resource *rt_resource = fbdemo_create_2d(fbs->screen, PIPE_BIND_RENDER_TARGET, PIPE_FORMAT_B8G8R8X8_UNORM, width, height, 0); struct pipe_resource *z_resource = fbdemo_create_2d(fbs->screen, PIPE_BIND_RENDER_TARGET, PIPE_FORMAT_Z16_UNORM, width, height, 0); /* bind render target to framebuffer */ etna_fb_bind_resource(&fbs->fb, rt_resource); /* geometry */ struct pipe_resource *vtx_resource = pipe_buffer_create(fbs->screen, PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE, VERTEX_BUFFER_SIZE); struct pipe_transfer *transfer = 0; float *vtx_logical = pipe_buffer_map(pipe, vtx_resource, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_UNSYNCHRONIZED, &transfer); assert(vtx_logical); for(int vert=0; vert<NUM_VERTICES; ++vert) { int src_idx = vert * COMPONENTS_PER_VERTEX; int dest_idx = vert * COMPONENTS_PER_VERTEX * 3; for(int comp=0; comp<COMPONENTS_PER_VERTEX; ++comp) { ((float*)vtx_logical)[dest_idx+comp+0] = vVertices[src_idx + comp]; /* 0 */ ((float*)vtx_logical)[dest_idx+comp+3] = vNormals[src_idx + comp]; /* 1 */ ((float*)vtx_logical)[dest_idx+comp+6] = vColors[src_idx + comp]; /* 2 */ } } pipe_buffer_unmap(pipe, transfer); struct pipe_vertex_buffer vertex_buffer_desc = { .stride = (3 + 3 + 3)*4, .buffer_offset = 0, .buffer = vtx_resource, .user_buffer = 0 }; struct pipe_vertex_element pipe_vertex_elements[] = { { /* positions */ .src_offset = 0, .instance_divisor = 0, .vertex_buffer_index = 0, .src_format = PIPE_FORMAT_R32G32B32_FLOAT }, { /* normals */ .src_offset = 0xc, .instance_divisor = 0, .vertex_buffer_index = 0, .src_format = PIPE_FORMAT_R32G32B32_FLOAT }, { /* texture coord */ .src_offset = 0x18,
static void nv30_render_destroy(struct vbuf_render *render) { struct nv30_render *r = nv30_render(render); if (r->transfer) pipe_buffer_unmap(&r->nv30->base.pipe, r->transfer); pipe_resource_reference(&r->buffer, NULL); nouveau_heap_free(&r->vertprog); FREE(render); }
static void softpipe_transfer_unmap(struct pipe_screen *screen, struct pipe_transfer *transfer) { struct softpipe_texture *spt; assert(transfer->texture); spt = softpipe_texture(transfer->texture); pipe_buffer_unmap( screen, spt->buffer ); }
static enum pipe_error generate_indices( struct svga_hwtnl *hwtnl, unsigned nr, unsigned index_size, u_generate_func generate, struct pipe_resource **out_buf ) { struct pipe_context *pipe = &hwtnl->svga->pipe; struct pipe_transfer *transfer; unsigned size = index_size * nr; struct pipe_resource *dst = NULL; void *dst_map = NULL; dst = pipe_buffer_create( pipe->screen, PIPE_BIND_INDEX_BUFFER, PIPE_USAGE_STATIC, size ); if (dst == NULL) goto fail; dst_map = pipe_buffer_map( pipe, dst, PIPE_TRANSFER_WRITE, &transfer); if (dst_map == NULL) goto fail; generate( nr, dst_map ); pipe_buffer_unmap( pipe, transfer ); *out_buf = dst; return PIPE_OK; fail: if (dst_map) pipe_buffer_unmap( pipe, transfer ); if (dst) pipe->screen->resource_destroy( pipe->screen, dst ); return PIPE_ERROR_OUT_OF_MEMORY; }
/** * Called via glUnmapBufferARB(). */ static GLboolean st_bufferobj_unmap(GLcontext *ctx, GLenum target, struct gl_buffer_object *obj) { struct pipe_context *pipe = st_context(ctx)->pipe; struct st_buffer_object *st_obj = st_buffer_object(obj); pipe_buffer_unmap(pipe->screen, st_obj->buffer); obj->Pointer = NULL; obj->Offset = 0; obj->Length = 0; return GL_TRUE; }
static boolean virgl_get_query_result(struct pipe_context *ctx, struct pipe_query *q, boolean wait, union pipe_query_result *result) { struct virgl_context *vctx = virgl_context(ctx); struct virgl_query *query = virgl_query(q); struct pipe_transfer *transfer; struct virgl_host_query_state *host_state; /* ask host for query result */ if (!query->result_gotten_sent) { query->result_gotten_sent = 1; virgl_encoder_get_query_result(vctx, query->handle, 0); ctx->flush(ctx, NULL, 0); } /* do we have to flush? */ /* now we can do the transfer to get the result back? */ remap: host_state = pipe_buffer_map(ctx, &query->buf->u.b, PIPE_TRANSFER_READ, &transfer); if (host_state->query_state != VIRGL_QUERY_STATE_DONE) { pipe_buffer_unmap(ctx, transfer); if (wait) goto remap; else return FALSE; } if (query->type == PIPE_QUERY_TIMESTAMP || query->type == PIPE_QUERY_TIME_ELAPSED) result->u64 = host_state->result; else result->u64 = (uint32_t)host_state->result; pipe_buffer_unmap(ctx, transfer); query->result_gotten_sent = 0; return TRUE; }
static void gen_vertex_data(struct vl_compositor *c, struct vl_compositor_state *s, struct u_rect *dirty) { struct vertex2f *vb; struct pipe_transfer *buf_transfer; unsigned i; assert(c); vb = pipe_buffer_map(c->pipe, c->vertex_buf.buffer, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_DONTBLOCK, &buf_transfer); if (!vb) { // If buffer is still locked from last draw create a new one create_vertex_buffer(c); vb = pipe_buffer_map(c->pipe, c->vertex_buf.buffer, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE, &buf_transfer); } for (i = 0; i < VL_COMPOSITOR_MAX_LAYERS; i++) { if (s->used_layers & (1 << i)) { struct vl_compositor_layer *layer = &s->layers[i]; gen_rect_verts(vb, layer); vb += 20; if (!layer->viewport_valid) { layer->viewport.scale[0] = c->fb_state.width; layer->viewport.scale[1] = c->fb_state.height; layer->viewport.translate[0] = 0; layer->viewport.translate[1] = 0; } if (dirty && layer->clearing) { struct u_rect drawn = calc_drawn_area(s, layer); if ( dirty->x0 >= drawn.x0 && dirty->y0 >= drawn.y0 && dirty->x1 <= drawn.x1 && dirty->y1 <= drawn.y1) { // We clear the dirty area anyway, no need for clear_render_target dirty->x0 = dirty->y0 = MAX_DIRTY; dirty->x1 = dirty->y1 = MIN_DIRTY; } } } } pipe_buffer_unmap(c->pipe, buf_transfer); }
void vl_compositor_set_csc_matrix(struct vl_compositor *compositor, const float *mat) { assert(compositor); memcpy ( pipe_buffer_map(compositor->pipe->screen, compositor->fs_const_buf.buffer, PIPE_BUFFER_USAGE_CPU_WRITE), mat, sizeof(struct fragment_shader_consts) ); pipe_buffer_unmap(compositor->pipe->screen, compositor->fs_const_buf.buffer); }
static void r300_render_unmap_vertices(struct vbuf_render* render, ushort min, ushort max) { struct r300_render* r300render = r300_render(render); struct pipe_screen* screen = r300render->r300->context.screen; CS_LOCALS(r300render->r300); BEGIN_CS(2); OUT_CS_REG(R300_VAP_VF_MAX_VTX_INDX, max); END_CS; r300render->vbo_max_used = MAX2(r300render->vbo_max_used, r300render->vertex_size * (max + 1)); pipe_buffer_unmap(screen, r300render->vbo); }
/** * Called via glUnmapBufferARB(). */ static GLboolean st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj) { struct pipe_context *pipe = st_context(ctx)->pipe; struct st_buffer_object *st_obj = st_buffer_object(obj); if (obj->Length) pipe_buffer_unmap(pipe, st_obj->transfer); st_obj->transfer = NULL; obj->Pointer = NULL; obj->Offset = 0; obj->Length = 0; return GL_TRUE; }
/** * Called via glUnmapBufferARB(). */ static GLboolean st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj, gl_map_buffer_index index) { struct pipe_context *pipe = st_context(ctx)->pipe; struct st_buffer_object *st_obj = st_buffer_object(obj); if (obj->Mappings[index].Length) pipe_buffer_unmap(pipe, st_obj->transfer[index]); st_obj->transfer[index] = NULL; obj->Mappings[index].Pointer = NULL; obj->Mappings[index].Offset = 0; obj->Mappings[index].Length = 0; return GL_TRUE; }
static void r300_begin_query(struct pipe_context* pipe, struct pipe_query* query) { uint32_t* map; struct r300_context* r300 = r300_context(pipe); struct r300_query* q = (struct r300_query*)query; CS_LOCALS(r300); map = pipe_buffer_map(pipe->screen, q->buf, PIPE_BUFFER_USAGE_CPU_WRITE); *map = ~0; pipe_buffer_unmap(pipe->screen, q->buf); BEGIN_CS(2); OUT_CS_REG(R300_ZB_ZPASS_DATA, 0); END_CS; }
/** * Emit all the constants in a constant buffer for a shader stage. */ static enum pipe_error emit_consts(struct svga_context *svga, unsigned shader) { struct svga_screen *ss = svga_screen(svga->pipe.screen); struct pipe_transfer *transfer = NULL; unsigned count; const float (*data)[4] = NULL; unsigned i; enum pipe_error ret = PIPE_OK; const unsigned offset = 0; assert(shader < PIPE_SHADER_TYPES); if (svga->curr.cb[shader] == NULL) goto done; count = svga->curr.cb[shader]->width0 / (4 * sizeof(float)); data = (const float (*)[4])pipe_buffer_map(&svga->pipe, svga->curr.cb[shader], PIPE_TRANSFER_READ, &transfer); if (data == NULL) { ret = PIPE_ERROR_OUT_OF_MEMORY; goto done; } if (ss->hw_version >= SVGA3D_HWVERSION_WS8_B1) { ret = emit_const_range( svga, shader, offset, count, data ); if (ret != PIPE_OK) { goto done; } } else { for (i = 0; i < count; i++) { ret = emit_const( svga, shader, offset + i, data[i] ); if (ret != PIPE_OK) { goto done; } } } done: if (data) pipe_buffer_unmap(&svga->pipe, transfer); return ret; }