void intel_batch_teardown(ScrnInfoPtr scrn) { intel_screen_private *intel = intel_get_screen_private(scrn); int i; for (i = 0; i < ARRAY_SIZE(intel->last_batch_bo); i++) { if (intel->last_batch_bo[i] != NULL) { dri_bo_unreference(intel->last_batch_bo[i]); intel->last_batch_bo[i] = NULL; } } if (intel->batch_bo != NULL) { dri_bo_unreference(intel->batch_bo); intel->batch_bo = NULL; } if (intel->vertex_bo) { dri_bo_unreference(intel->vertex_bo); intel->vertex_bo = NULL; } while (!list_is_empty(&intel->batch_pixmaps)) list_del(intel->batch_pixmaps.next); }
void gen_free_avc_surface(void **data) { GenAvcSurface *avc_surface; pthread_mutex_lock(&free_avc_surface_lock); avc_surface = *data; if (!avc_surface) { pthread_mutex_unlock(&free_avc_surface_lock); return; } dri_bo_unreference(avc_surface->dmv_top); avc_surface->dmv_top = NULL; dri_bo_unreference(avc_surface->dmv_bottom); avc_surface->dmv_bottom = NULL; free(avc_surface); *data = NULL; pthread_mutex_unlock(&free_avc_surface_lock); }
void intelDestroyContext(__DRIcontext * driContextPriv) { struct intel_context *intel = (struct intel_context *) driContextPriv->driverPrivate; assert(intel); /* should never be null */ if (intel) { GLboolean release_texture_heaps; INTEL_FIREVERTICES(intel); _mesa_meta_free(&intel->ctx); meta_destroy_metaops(&intel->meta); intel->vtbl.destroy(intel); release_texture_heaps = (intel->ctx.Shared->RefCount == 1); _swsetup_DestroyContext(&intel->ctx); _tnl_DestroyContext(&intel->ctx); _vbo_DestroyContext(&intel->ctx); _swrast_DestroyContext(&intel->ctx); intel->Fallback = 0x0; /* don't call _swrast_Flush later */ intel_batchbuffer_free(intel->batch); intel->batch = NULL; free(intel->prim.vb); intel->prim.vb = NULL; dri_bo_unreference(intel->prim.vb_bo); intel->prim.vb_bo = NULL; dri_bo_unreference(intel->first_post_swapbuffers_batch); intel->first_post_swapbuffers_batch = NULL; if (release_texture_heaps) { /* Nothing is currently done here to free texture heaps; * but we're not using the texture heap utilities, so I * rather think we shouldn't. I've taken a look, and can't * find any private texture data hanging around anywhere, but * I'm not yet certain there isn't any at all... */ /* if (INTEL_DEBUG & DEBUG_TEXTURE) fprintf(stderr, "do something to free texture heaps\n"); */ } driDestroyOptionCache(&intel->optionCache); /* free the Mesa context */ _mesa_free_context_data(&intel->ctx); FREE(intel); driContextPriv->driverPrivate = NULL; } }
/** * Update the surface state for a WM constant buffer. * The constant buffer will be (re)allocated here if needed. */ static void brw_update_wm_constant_surface( GLcontext *ctx, GLuint surf) { struct brw_context *brw = brw_context(ctx); struct brw_surface_key key; struct brw_fragment_program *fp = (struct brw_fragment_program *) brw->fragment_program; const struct gl_program_parameter_list *params = fp->program.Base.Parameters; /* If we're in this state update atom, we need to update WM constants, so * free the old buffer and create a new one for the new contents. */ dri_bo_unreference(fp->const_buffer); fp->const_buffer = brw_wm_update_constant_buffer(brw); /* If there's no constant buffer, then no surface BO is needed to point at * it. */ if (fp->const_buffer == NULL) { drm_intel_bo_unreference(brw->wm.surf_bo[surf]); brw->wm.surf_bo[surf] = NULL; return; } memset(&key, 0, sizeof(key)); key.format = MESA_FORMAT_RGBA_FLOAT32; key.internal_format = GL_RGBA; key.bo = fp->const_buffer; key.depthmode = GL_NONE; key.pitch = params->NumParameters; key.width = params->NumParameters; key.height = 1; key.depth = 1; key.cpp = 16; /* printf("%s:\n", __FUNCTION__); printf(" width %d height %d depth %d cpp %d pitch %d\n", key.width, key.height, key.depth, key.cpp, key.pitch); */ dri_bo_unreference(brw->wm.surf_bo[surf]); brw->wm.surf_bo[surf] = brw_search_cache(&brw->surface_cache, BRW_SS_SURFACE, &key, sizeof(key), &key.bo, 1, NULL); if (brw->wm.surf_bo[surf] == NULL) { brw->wm.surf_bo[surf] = brw_create_constant_surface(brw, &key); } brw->state.dirty.brw |= BRW_NEW_WM_SURFACES; }
void intel_batchbuffer_free(struct intel_batchbuffer *batch) { if (batch->map) { dri_bo_unmap(batch->buffer); batch->map = NULL; } dri_bo_unreference(batch->buffer); dri_bo_unreference(batch->wa_render_bo); free(batch); }
/* Attach to a pbo, discarding our data. Effectively zero-copy upload * the pbo's data. */ void intel_region_attach_pbo(struct intel_context *intel, struct intel_region *region, struct intel_buffer_object *pbo) { if (region->pbo == pbo) return; /* If there is already a pbo attached, break the cow tie now. * Don't call intel_region_release_pbo() as that would * unnecessarily allocate a new buffer we would have to immediately * discard. */ if (region->pbo) { region->pbo->region = NULL; region->pbo = NULL; } if (region->buffer) { dri_bo_unreference(region->buffer); region->buffer = NULL; } region->pbo = pbo; region->pbo->region = region; dri_bo_reference(pbo->buffer); region->buffer = pbo->buffer; }
void intel_region_release(struct intel_region **region_handle) { struct intel_region *region = *region_handle; if (region == NULL) return; DBG("%s %p %d\n", __FUNCTION__, region, region->refcount - 1); ASSERT(region->refcount > 0); region->refcount--; if (region->refcount == 0) { assert(region->map_refcount == 0); if (region->pbo) region->pbo->region = NULL; region->pbo = NULL; dri_bo_unreference(region->buffer); if (region->classic_map != NULL) { drmUnmap(region->classic_map, region->pitch * region->cpp * region->height); } free(region); } *region_handle = NULL; }
static void drmmode_crtc_shadow_destroy(xf86CrtcPtr crtc, PixmapPtr rotate_pixmap, void *data) { ScrnInfoPtr scrn = crtc->scrn; intel_screen_private *intel = intel_get_screen_private(scrn); drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private; drmmode_ptr drmmode = drmmode_crtc->drmmode; if (rotate_pixmap) { intel_set_pixmap_bo(rotate_pixmap, NULL); FreeScratchPixmapHeader(rotate_pixmap); } if (data) { /* Be sure to sync acceleration before the memory gets * unbound. */ drmModeRmFB(drmmode->fd, drmmode_crtc->rotate_fb_id); drmmode_crtc->rotate_fb_id = 0; dri_bo_unreference(drmmode_crtc->rotate_bo); drmmode_crtc->rotate_bo = NULL; } intel->shadow_present = intel->use_shadow; }
void brw_draw_destroy( struct brw_context *brw ) { if (brw->vb.upload.bo != NULL) { dri_bo_unreference(brw->vb.upload.bo); brw->vb.upload.bo = NULL; } }
void brw_emit_indices(struct brw_context *brw, const struct _mesa_index_buffer *index_buffer, dri_bo *bo, GLuint offset) { struct intel_context *intel = &brw->intel; GLuint ib_size = get_size(index_buffer->type) * index_buffer->count; /* Emit the indexbuffer packet: */ { struct brw_indexbuffer ib; memset(&ib, 0, sizeof(ib)); ib.header.bits.opcode = CMD_INDEX_BUFFER; ib.header.bits.length = sizeof(ib)/4 - 2; ib.header.bits.index_format = get_index_type(index_buffer->type); ib.header.bits.cut_index_enable = 0; BEGIN_BATCH(4, IGNORE_CLIPRECTS); OUT_BATCH( ib.header.dword ); OUT_RELOC( bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, offset); OUT_RELOC( bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, offset + ib_size); OUT_BATCH( 0 ); ADVANCE_BATCH(); dri_bo_unreference(bo); } }
/** * Deallocate/free a vertex/pixel buffer object. * Called via glDeleteBuffersARB(). */ static void intel_bufferobj_free(GLcontext * ctx, struct gl_buffer_object *obj) { struct intel_context *intel = intel_context(ctx); struct intel_buffer_object *intel_obj = intel_buffer_object(obj); assert(intel_obj); /* Buffer objects are automatically unmapped when deleting according * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy * (though it does if you call glDeleteBuffers) */ if (obj->Pointer) intel_bufferobj_unmap(ctx, 0, obj); _mesa_free(intel_obj->sys_buffer); if (intel_obj->region) { intel_bufferobj_release_region(intel, intel_obj); } else if (intel_obj->buffer) { dri_bo_unreference(intel_obj->buffer); } _mesa_free(intel_obj); }
/* Attach to a pbo, discarding our data. Effectively zero-copy upload * the pbo's data. */ void intel_region_attach_pbo(struct intel_context *intel, struct intel_region *region, struct intel_buffer_object *pbo) { dri_bo *buffer; if (region->pbo == pbo) return; _DBG("%s %p %p\n", __FUNCTION__, region, pbo); /* If there is already a pbo attached, break the cow tie now. * Don't call intel_region_release_pbo() as that would * unnecessarily allocate a new buffer we would have to immediately * discard. */ if (region->pbo) { region->pbo->region = NULL; region->pbo = NULL; } if (region->buffer) { dri_bo_unreference(region->buffer); region->buffer = NULL; } /* make sure pbo has a buffer of its own */ buffer = intel_bufferobj_buffer(intel, pbo, INTEL_WRITE_FULL); region->pbo = pbo; region->pbo->region = region; dri_bo_reference(buffer); region->buffer = buffer; }
/* Calculate interpolants for triangle and line rasterization. */ static void upload_sf_prog(struct brw_context *brw) { GLcontext *ctx = &brw->intel.ctx; struct brw_sf_prog_key key; memset(&key, 0, sizeof(key)); /* Populate the key, noting state dependencies: */ /* CACHE_NEW_VS_PROG */ key.attrs = brw->vs.prog_data->outputs_written; /* BRW_NEW_REDUCED_PRIMITIVE */ switch (brw->intel.reduced_primitive) { case GL_TRIANGLES: /* NOTE: We just use the edgeflag attribute as an indicator that * unfilled triangles are active. We don't actually do the * edgeflag testing here, it is already done in the clip * program. */ if (key.attrs & (1<<VERT_RESULT_EDGE)) key.primitive = SF_UNFILLED_TRIS; else key.primitive = SF_TRIANGLES; break; case GL_LINES: key.primitive = SF_LINES; break; case GL_POINTS: key.primitive = SF_POINTS; break; } key.do_point_sprite = ctx->Point.PointSprite; key.SpriteOrigin = ctx->Point.SpriteOrigin; /* _NEW_LIGHT */ key.do_flat_shading = (ctx->Light.ShadeModel == GL_FLAT); key.do_twoside_color = (ctx->Light.Enabled && ctx->Light.Model.TwoSide); /* _NEW_POLYGON */ if (key.do_twoside_color) { /* If we're rendering to a FBO, we have to invert the polygon * face orientation, just as we invert the viewport in * sf_unit_create_from_key(). ctx->DrawBuffer->Name will be * nonzero if we're rendering to such an FBO. */ key.frontface_ccw = (ctx->Polygon.FrontFace == GL_CCW) ^ (ctx->DrawBuffer->Name != 0); } dri_bo_unreference(brw->sf.prog_bo); brw->sf.prog_bo = brw_search_cache(&brw->cache, BRW_SF_PROG, &key, sizeof(key), NULL, 0, &brw->sf.prog_data); if (brw->sf.prog_bo == NULL) compile_sf_prog( brw, &key ); }
static void brwDeleteProgram( GLcontext *ctx, struct gl_program *prog ) { if (prog->Target == GL_FRAGMENT_PROGRAM_ARB) { struct gl_fragment_program *fp = (struct gl_fragment_program *) prog; struct brw_fragment_program *brw_fp = brw_fragment_program(fp); dri_bo_unreference(brw_fp->const_buffer); } if (prog->Target == GL_VERTEX_PROGRAM_ARB) { struct gl_vertex_program *vp = (struct gl_vertex_program *) prog; struct brw_vertex_program *brw_vp = brw_vertex_program(vp); dri_bo_unreference(brw_vp->const_buffer); } _mesa_delete_program( ctx, prog ); }
/* Break the COW tie to the region. The region gets to keep the data. */ void intel_bufferobj_release_region(struct intel_context *intel, struct intel_buffer_object *intel_obj) { assert(intel_obj->region->buffer == intel_obj->buffer); intel_obj->region->pbo = NULL; intel_obj->region = NULL; dri_bo_unreference(intel_obj->buffer); intel_obj->buffer = NULL; }
static void prepare_cc_vp( struct brw_context *brw ) { struct brw_cc_viewport ccv; memset(&ccv, 0, sizeof(ccv)); ccv.min_depth = 0.0; ccv.max_depth = 1.0; dri_bo_unreference(brw->cc.vp_bo); brw->cc.vp_bo = brw_cache_data( &brw->cache, BRW_CC_VP, &ccv, NULL, 0 ); }
static PixmapPtr intel_dri3_pixmap_from_fd(ScreenPtr screen, int fd, CARD16 width, CARD16 height, CARD16 stride, CARD8 depth, CARD8 bpp) { ScrnInfoPtr scrn = xf86ScreenToScrn(screen); intel_screen_private *intel = intel_get_screen_private(scrn); struct intel_uxa_pixmap *priv; PixmapPtr pixmap; dri_bo *bo; if (depth < 8) return NULL; switch (bpp) { case 8: case 16: case 32: break; default: return NULL; } pixmap = fbCreatePixmap(screen, 0, 0, depth, 0); if (!pixmap) return NULL; if (!screen->ModifyPixmapHeader(pixmap, width, height, 0, 0, stride, NULL)) goto free_pixmap; bo = drm_intel_bo_gem_create_from_prime(intel->bufmgr, fd, (uint32_t)height * stride); if (bo == NULL) goto free_pixmap; intel_uxa_set_pixmap_bo(pixmap, bo); dri_bo_unreference(bo); priv = intel_uxa_get_pixmap_private(pixmap); if (priv == NULL) goto free_pixmap; priv->pinned |= PIN_DRI3; return pixmap; free_pixmap: fbDestroyPixmap(pixmap); return NULL; }
static void intel_end_vertex(intel_screen_private *intel) { if (intel->vertex_bo) { if (intel->vertex_used) { dri_bo_subdata(intel->vertex_bo, 0, intel->vertex_used*4, intel->vertex_ptr); intel->vertex_used = 0; } dri_bo_unreference(intel->vertex_bo); intel->vertex_bo = NULL; } intel->vertex_id = 0; }
/* Break the COW tie to the pbo and allocate a new buffer. * The pbo gets to keep the data. */ void intel_region_release_pbo(struct intel_context *intel, struct intel_region *region) { assert(region->buffer == region->pbo->buffer); region->pbo->region = NULL; region->pbo = NULL; dri_bo_unreference(region->buffer); region->buffer = NULL; region->buffer = dri_bo_alloc(intel->bufmgr, "region", region->pitch * region->cpp * region->height, 64); }
/** * Allocate space for and store data in a buffer object. Any data that was * previously stored in the buffer object is lost. If data is NULL, * memory will be allocated, but no copy will occur. * Called via ctx->Driver.BufferData(). * \return GL_TRUE for success, GL_FALSE if out of memory */ static GLboolean intel_bufferobj_data(GLcontext * ctx, GLenum target, GLsizeiptrARB size, const GLvoid * data, GLenum usage, struct gl_buffer_object *obj) { struct intel_context *intel = intel_context(ctx); struct intel_buffer_object *intel_obj = intel_buffer_object(obj); intel_obj->Base.Size = size; intel_obj->Base.Usage = usage; assert(!obj->Pointer); /* Mesa should have unmapped it */ if (intel_obj->region) intel_bufferobj_release_region(intel, intel_obj); if (intel_obj->buffer != NULL) { dri_bo_unreference(intel_obj->buffer); intel_obj->buffer = NULL; } _mesa_free(intel_obj->sys_buffer); intel_obj->sys_buffer = NULL; if (size != 0) { #ifdef I915 /* On pre-965, stick VBOs in system memory, as we're always doing swtnl * with their contents anyway. */ if (target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER) { intel_obj->sys_buffer = _mesa_malloc(size); if (intel_obj->sys_buffer != NULL) { if (data != NULL) memcpy(intel_obj->sys_buffer, data, size); return GL_TRUE; } } #endif intel_bufferobj_alloc_buffer(intel, intel_obj); if (!intel_obj->buffer) return GL_FALSE; if (data != NULL) dri_bo_subdata(intel_obj->buffer, 0, size, data); } return GL_TRUE; }
static void prepare_cc_unit( struct brw_context *brw ) { struct brw_cc_unit_key key; cc_unit_populate_key(brw, &key); dri_bo_unreference(brw->cc.state_bo); brw->cc.state_bo = brw_search_cache(&brw->cache, BRW_CC_UNIT, &key, sizeof(key), &brw->cc.vp_bo, 1, NULL); if (brw->cc.state_bo == NULL) brw->cc.state_bo = cc_unit_create_from_key(brw, &key); }
static void upload_clip_unit( struct brw_context *brw ) { struct brw_clip_unit_key key; clip_unit_populate_key(brw, &key); dri_bo_unreference(brw->clip.state_bo); brw->clip.state_bo = brw_search_cache(&brw->cache, BRW_CLIP_UNIT, &key, sizeof(key), &brw->clip.prog_bo, 1, NULL); if (brw->clip.state_bo == NULL) { brw->clip.state_bo = clip_unit_create_from_key(brw, &key); } }
VOID media_destroy_surface (struct object_heap * heap, struct object_base * obj) { struct object_surface *obj_surface = (struct object_surface *) obj; dri_bo_unreference (obj_surface->bo); obj_surface->bo = NULL; if (obj_surface->free_private_data != NULL) { obj_surface->free_private_data (&obj_surface->private_data); if(obj_surface->private_data!=NULL) { media_drv_free_memory(obj_surface->private_data); } obj_surface->private_data = NULL; } object_heap_free (heap, obj); }
static int upload_clip_unit( struct brw_context *brw ) { struct brw_clip_unit_key key; int ret = 0; clip_unit_populate_key(brw, &key); dri_bo_unreference(brw->clip.state_bo); brw->clip.state_bo = brw_search_cache(&brw->cache, BRW_CLIP_UNIT, &key, sizeof(key), &brw->clip.prog_bo, 1, NULL); if (brw->clip.state_bo == NULL) { brw->clip.state_bo = clip_unit_create_from_key(brw, &key); } ret = dri_bufmgr_check_aperture_space(brw->clip.state_bo); return ret; }
static void prepare_cc_vp( struct brw_context *brw ) { GLcontext *ctx = &brw->intel.ctx; struct brw_cc_viewport ccv; memset(&ccv, 0, sizeof(ccv)); /* _NEW_TRANSOFORM */ if (ctx->Transform.DepthClamp) { /* _NEW_VIEWPORT */ ccv.min_depth = MIN2(ctx->Viewport.Near, ctx->Viewport.Far); ccv.max_depth = MAX2(ctx->Viewport.Near, ctx->Viewport.Far); } else { ccv.min_depth = 0.0; ccv.max_depth = 1.0; } dri_bo_unreference(brw->cc.vp_bo); brw->cc.vp_bo = brw_cache_data(&brw->cache, BRW_CC_VP, &ccv, sizeof(ccv), NULL, 0); }
void i830UpdateTextureState(struct intel_context *intel) { struct i830_context *i830 = i830_context(&intel->ctx); GLboolean ok = GL_TRUE; GLuint i; for (i = 0; i < I830_TEX_UNITS && ok; i++) { switch (intel->ctx.Texture.Unit[i]._ReallyEnabled) { case TEXTURE_1D_BIT: case TEXTURE_2D_BIT: case TEXTURE_CUBE_BIT: ok = i830_update_tex_unit(intel, i, TEXCOORDS_ARE_NORMAL); break; case TEXTURE_RECT_BIT: ok = i830_update_tex_unit(intel, i, TEXCOORDS_ARE_IN_TEXELUNITS); break; case 0:{ struct i830_context *i830 = i830_context(&intel->ctx); if (i830->state.active & I830_UPLOAD_TEX(i)) I830_ACTIVESTATE(i830, I830_UPLOAD_TEX(i), GL_FALSE); if (i830->state.tex_buffer[i] != NULL) { dri_bo_unreference(i830->state.tex_buffer[i]); i830->state.tex_buffer[i] = NULL; } break; } case TEXTURE_3D_BIT: default: ok = GL_FALSE; break; } } FALLBACK(intel, I830_FALLBACK_TEXTURE, !ok); if (ok) i830EmitTextureBlend(i830); }
static void brw_update_texture_surface( GLcontext *ctx, GLuint unit ) { struct brw_context *brw = brw_context(ctx); struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current; struct intel_texture_object *intelObj = intel_texture_object(tObj); struct gl_texture_image *firstImage = tObj->Image[0][intelObj->firstLevel]; struct brw_surface_key key; const GLuint surf = SURF_INDEX_TEXTURE(unit); memset(&key, 0, sizeof(key)); key.format = firstImage->TexFormat; key.internal_format = firstImage->InternalFormat; key.pitch = intelObj->mt->pitch; key.depth = firstImage->Depth; key.bo = intelObj->mt->region->buffer; key.offset = 0; key.target = tObj->Target; key.depthmode = tObj->DepthMode; key.first_level = intelObj->firstLevel; key.last_level = intelObj->lastLevel; key.width = firstImage->Width; key.height = firstImage->Height; key.cpp = intelObj->mt->cpp; key.tiling = intelObj->mt->region->tiling; dri_bo_unreference(brw->wm.surf_bo[surf]); brw->wm.surf_bo[surf] = brw_search_cache(&brw->surface_cache, BRW_SS_SURFACE, &key, sizeof(key), &key.bo, 1, NULL); if (brw->wm.surf_bo[surf] == NULL) { brw->wm.surf_bo[surf] = brw_create_texture_surface(brw, &key); } }
VOID media_release_buffer_store (struct buffer_store ** ptr) { struct buffer_store *buffer_store = *ptr; if (buffer_store == NULL) return; MEDIA_DRV_ASSERT (buffer_store->bo || buffer_store->buffer); MEDIA_DRV_ASSERT (!(buffer_store->bo && buffer_store->buffer)); buffer_store->ref_count--; if (buffer_store->ref_count == 0) { dri_bo_unreference (buffer_store->bo); media_drv_free_memory (buffer_store->buffer); buffer_store->bo = NULL; buffer_store->buffer = NULL; media_drv_free_memory (buffer_store); } *ptr = NULL; }
static void wrap_buffers( struct brw_context *brw, GLuint size ) { if (size < BRW_UPLOAD_INIT_SIZE) size = BRW_UPLOAD_INIT_SIZE; brw->vb.upload.offset = 0; if (brw->vb.upload.bo != NULL) dri_bo_unreference(brw->vb.upload.bo); brw->vb.upload.bo = dri_bo_alloc(brw->intel.bufmgr, "temporary VBO", size, 1); /* Set the internal VBO\ to no-backing-store. We only use them as a * temporary within a brw_try_draw_prims while the lock is held. */ /* DON'T DO THIS AS IF WE HAVE TO RE-ORG MEMORY WE NEED SOMEWHERE WITH FAKE TO PUSH THIS STUFF */ /* if (!brw->intel.ttm) dri_bo_fake_disable_backing_store(brw->vb.upload.bo, NULL, NULL); */ }
static void intel_batchbuffer_reset(struct intel_batchbuffer *batch, int buffer_size) { struct intel_driver_data *intel = batch->intel; int batch_size = buffer_size; assert(batch->flag == I915_EXEC_RENDER || batch->flag == I915_EXEC_BLT || batch->flag == I915_EXEC_BSD || batch->flag == I915_EXEC_VEBOX); dri_bo_unreference(batch->buffer); batch->buffer = dri_bo_alloc(intel->bufmgr, "batch buffer", batch_size, 0x1000); assert(batch->buffer); dri_bo_map(batch->buffer, 1); assert(batch->buffer->virtual); batch->map = batch->buffer->virtual; batch->size = batch_size; batch->ptr = batch->map; batch->atomic = 0; }