static void GLAPIENTRY _mesa_GetnPixelMapuivARB( GLenum map, GLsizei bufSize, GLuint *values ) { GET_CURRENT_CONTEXT(ctx); GLint mapsize, i; const struct gl_pixelmap *pm; ASSERT_OUTSIDE_BEGIN_END(ctx); pm = get_pixelmap(ctx, map); if (!pm) { _mesa_error(ctx, GL_INVALID_ENUM, "glGetPixelMapuiv(map)"); return; } mapsize = pm->Size; if (!validate_pbo_access(ctx, &ctx->Pack, mapsize, GL_INTENSITY, GL_UNSIGNED_INT, bufSize, values)) { return; } values = (GLuint *) _mesa_map_pbo_dest(ctx, &ctx->Pack, values); if (!values) { if (_mesa_is_bufferobj(ctx->Pack.BufferObj)) { _mesa_error(ctx, GL_INVALID_OPERATION, "glGetPixelMapuiv(PBO is mapped)"); } return; } if (map == GL_PIXEL_MAP_S_TO_S) { /* special case */ memcpy(values, ctx->PixelMaps.StoS.Map, mapsize * sizeof(GLint)); } else { for (i = 0; i < mapsize; i++) { values[i] = FLOAT_TO_UINT( pm->Map[i] ); } } _mesa_unmap_pbo_dest(ctx, &ctx->Pack); }
static void intel_get_tex_sub_image(struct gl_context *ctx, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLint depth, GLenum format, GLenum type, GLvoid *pixels, struct gl_texture_image *texImage) { struct brw_context *brw = brw_context(ctx); bool ok; DBG("%s\n", __func__); if (_mesa_is_bufferobj(ctx->Pack.BufferObj)) { if (_mesa_meta_pbo_GetTexSubImage(ctx, 3, texImage, xoffset, yoffset, zoffset, width, height, depth, format, type, pixels, &ctx->Pack)) { /* Flush to guarantee coherency between the render cache and other * caches the PBO could potentially be bound to after this point. * See the related comment in intelReadPixels() for a more detailed * explanation. */ brw_emit_mi_flush(brw); return; } perf_debug("%s: fallback to CPU mapping in PBO case\n", __func__); } ok = intel_gettexsubimage_tiled_memcpy(ctx, texImage, xoffset, yoffset, width, height, format, type, pixels, &ctx->Pack); if(ok) return; _mesa_meta_GetTexSubImage(ctx, xoffset, yoffset, zoffset, width, height, depth, format, type, pixels, texImage); DBG("%s - DONE\n", __func__); }
/** * Examine the active arrays to determine if we have interleaved * vertex arrays all living in one VBO, or all living in user space. */ static GLboolean is_interleaved_arrays(const struct st_vertex_program *vp, const struct st_vp_variant *vpv, const struct gl_client_array **arrays) { GLuint attr; const struct gl_buffer_object *firstBufObj = NULL; GLint firstStride = -1; const GLubyte *firstPtr = NULL; GLboolean userSpaceBuffer = GL_FALSE; for (attr = 0; attr < vpv->num_inputs; attr++) { const GLuint mesaAttr = vp->index_to_input[attr]; const struct gl_client_array *array = arrays[mesaAttr]; const struct gl_buffer_object *bufObj = array->BufferObj; const GLsizei stride = array->StrideB; /* in bytes */ if (attr == 0) { /* save info about the first array */ firstStride = stride; firstPtr = array->Ptr; firstBufObj = bufObj; userSpaceBuffer = !bufObj || !bufObj->Name; } else { /* check if other arrays interleave with the first, in same buffer */ if (stride != firstStride) return GL_FALSE; /* strides don't match */ if (bufObj != firstBufObj) return GL_FALSE; /* arrays in different VBOs */ if (abs(array->Ptr - firstPtr) > firstStride) return GL_FALSE; /* arrays start too far apart */ if ((!_mesa_is_bufferobj(bufObj)) != userSpaceBuffer) return GL_FALSE; /* mix of VBO and user-space arrays */ } } return GL_TRUE; }
/** * Get texture image. Called by glGetTexImage. * * \param target texture target. * \param level image level. * \param format pixel data format for returned image. * \param type pixel data type for returned image. * \param bufSize size of the pixels data buffer. * \param pixels returned pixel data. */ void GLAPIENTRY _mesa_GetnTexImageARB( GLenum target, GLint level, GLenum format, GLenum type, GLsizei bufSize, GLvoid *pixels ) { struct gl_texture_object *texObj; struct gl_texture_image *texImage; GET_CURRENT_CONTEXT(ctx); FLUSH_VERTICES(ctx, 0); if (getteximage_error_check(ctx, target, level, format, type, bufSize, pixels)) { return; } if (!_mesa_is_bufferobj(ctx->Pack.BufferObj) && !pixels) { /* not an error, do nothing */ return; } texObj = _mesa_get_current_tex_object(ctx, target); texImage = _mesa_select_tex_image(ctx, texObj, target, level); if (_mesa_is_zero_size_texture(texImage)) return; if (MESA_VERBOSE & (VERBOSE_API | VERBOSE_TEXTURE)) { _mesa_debug(ctx, "glGetTexImage(tex %u) format = %s, w=%d, h=%d," " dstFmt=0x%x, dstType=0x%x\n", texObj->Name, _mesa_get_format_name(texImage->TexFormat), texImage->Width, texImage->Height, format, type); } _mesa_lock_texture(ctx, texObj); { ctx->Driver.GetTexImage(ctx, format, type, pixels, texImage); } _mesa_unlock_texture(ctx, texObj); }
/** * Check that element 'j' of the array has reasonable data. * Map VBO if needed. * For debugging purposes; not normally used. */ static void check_array_data(struct gl_context *ctx, struct gl_client_array *array, GLuint attrib, GLuint j) { if (array->Enabled) { const void *data = array->Ptr; if (_mesa_is_bufferobj(array->BufferObj)) { if (!array->BufferObj->Pointer) { /* need to map now */ array->BufferObj->Pointer = ctx->Driver.MapBufferRange(ctx, 0, array->BufferObj->Size, GL_MAP_READ_BIT, array->BufferObj); } data = ADD_POINTERS(data, array->BufferObj->Pointer); } switch (array->Type) { case GL_FLOAT: { GLfloat *f = (GLfloat *) ((GLubyte *) data + array->StrideB * j); GLint k; for (k = 0; k < array->Size; k++) { if (IS_INF_OR_NAN(f[k]) || f[k] >= 1.0e20 || f[k] <= -1.0e10) { printf("Bad array data:\n"); printf(" Element[%u].%u = %f\n", j, k, f[k]); printf(" Array %u at %p\n", attrib, (void* ) array); printf(" Type 0x%x, Size %d, Stride %d\n", array->Type, array->Size, array->Stride); printf(" Address/offset %p in Buffer Object %u\n", array->Ptr, array->BufferObj->Name); f[k] = 1.0; /* XXX replace the bad value! */ } /*assert(!IS_INF_OR_NAN(f[k]));*/ } } break; default: ; } } }
/** Implements glGetnCompressedTexImageARB, glGetCompressedTexImage, and * glGetCompressedTextureImage. * * texImage must be passed in because glGetCompressedTexImage must handle the * target GL_TEXTURE_CUBE_MAP. */ void _mesa_get_compressed_texture_image(struct gl_context *ctx, struct gl_texture_object *texObj, struct gl_texture_image *texImage, GLenum target, GLint level, GLsizei bufSize, GLvoid *pixels, bool dsa) { assert(texObj); assert(texImage); FLUSH_VERTICES(ctx, 0); if (getcompressedteximage_error_check(ctx, texImage, target, level, bufSize, pixels, dsa)) { return; } if (!_mesa_is_bufferobj(ctx->Pack.BufferObj) && !pixels) { /* not an error, do nothing */ return; } if (_mesa_is_zero_size_texture(texImage)) return; if (MESA_VERBOSE & (VERBOSE_API | VERBOSE_TEXTURE)) { _mesa_debug(ctx, "glGetCompressedTex%sImage(tex %u) format = %s, w=%d, h=%d\n", dsa ? "ture" : "", texObj->Name, _mesa_get_format_name(texImage->TexFormat), texImage->Width, texImage->Height); } _mesa_lock_texture(ctx, texObj); { ctx->Driver.GetCompressedTexImage(ctx, texImage, pixels); } _mesa_unlock_texture(ctx, texObj); }
static void GLAPIENTRY _mesa_PixelMapfv( GLenum map, GLsizei mapsize, const GLfloat *values ) { GET_CURRENT_CONTEXT(ctx); ASSERT_OUTSIDE_BEGIN_END(ctx); /* XXX someday, test against ctx->Const.MaxPixelMapTableSize */ if (mapsize < 1 || mapsize > MAX_PIXEL_MAP_TABLE) { _mesa_error( ctx, GL_INVALID_VALUE, "glPixelMapfv(mapsize)" ); return; } if (map >= GL_PIXEL_MAP_S_TO_S && map <= GL_PIXEL_MAP_I_TO_A) { /* test that mapsize is a power of two */ if (!_mesa_is_pow_two(mapsize)) { _mesa_error( ctx, GL_INVALID_VALUE, "glPixelMapfv(mapsize)" ); return; } } FLUSH_VERTICES(ctx, _NEW_PIXEL); if (!validate_pbo_access(ctx, &ctx->Unpack, mapsize, GL_INTENSITY, GL_FLOAT, INT_MAX, values)) { return; } values = (const GLfloat *) _mesa_map_pbo_source(ctx, &ctx->Unpack, values); if (!values) { if (_mesa_is_bufferobj(ctx->Unpack.BufferObj)) { _mesa_error(ctx, GL_INVALID_OPERATION, "glPixelMapfv(PBO is mapped)"); } return; } store_pixelmap(ctx, map, mapsize, values); _mesa_unmap_pbo_source(ctx, &ctx->Unpack); }
void intelReadPixels(struct gl_context * ctx, GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, const struct gl_pixelstore_attrib *pack, GLvoid * pixels) { struct brw_context *brw = brw_context(ctx); bool dirty; DBG("%s\n", __FUNCTION__); if (_mesa_is_bufferobj(pack->BufferObj)) { /* Using PBOs, so try the BLT based path. */ if (do_blit_readpixels(ctx, x, y, width, height, format, type, pack, pixels)) { return; } perf_debug("%s: fallback to CPU mapping in PBO case\n", __FUNCTION__); } /* glReadPixels() wont dirty the front buffer, so reset the dirty * flag after calling intel_prepare_render(). */ dirty = brw->front_buffer_dirty; intel_prepare_render(brw); brw->front_buffer_dirty = dirty; /* Update Mesa state before calling _mesa_readpixels(). * XXX this may not be needed since ReadPixels no longer uses the * span code. */ if (ctx->NewState) _mesa_update_state(ctx); _mesa_readpixels(ctx, x, y, width, height, format, type, pack, pixels); /* There's an intel_prepare_render() call in intelSpanRenderStart(). */ brw->front_buffer_dirty = dirty; }
/** * Find the max index in the given element/index buffer */ GLuint _mesa_max_buffer_index(struct gl_context *ctx, GLuint count, GLenum type, const void *indices, struct gl_buffer_object *elementBuf) { const GLubyte *map = NULL; GLuint max = 0; GLuint i; if (_mesa_is_bufferobj(elementBuf)) { /* elements are in a user-defined buffer object. need to map it */ map = ctx->Driver.MapBufferRange(ctx, 0, elementBuf->Size, GL_MAP_READ_BIT, elementBuf); /* Actual address is the sum of pointers */ indices = (const GLvoid *) ADD_POINTERS(map, (const GLubyte *) indices); } if (type == GL_UNSIGNED_INT) { for (i = 0; i < count; i++) if (((GLuint *) indices)[i] > max) max = ((GLuint *) indices)[i]; } else if (type == GL_UNSIGNED_SHORT) { for (i = 0; i < count; i++) if (((GLushort *) indices)[i] > max) max = ((GLushort *) indices)[i]; } else { ASSERT(type == GL_UNSIGNED_BYTE); for (i = 0; i < count; i++) if (((GLubyte *) indices)[i] > max) max = ((GLubyte *) indices)[i]; } if (map) { ctx->Driver.UnmapBuffer(ctx, elementBuf); } return max; }
static inline GLboolean valid_draw_indirect_elements(struct gl_context *ctx, GLenum mode, GLenum type, const GLvoid *indirect, GLsizeiptr size, const char *name) { if (!valid_elements_type(ctx, type, name)) return GL_FALSE; /* * Unlike regular DrawElementsInstancedBaseVertex commands, the indices * may not come from a client array and must come from an index buffer. * If no element array buffer is bound, an INVALID_OPERATION error is * generated. */ if (!_mesa_is_bufferobj(ctx->Array.VAO->IndexBufferObj)) { _mesa_error(ctx, GL_INVALID_OPERATION, "%s(no buffer bound to GL_ELEMENT_ARRAY_BUFFER)", name); return GL_FALSE; } return valid_draw_indirect(ctx, mode, indirect, size, name); }
/** * Error checking for glMultiDrawElements(). Includes parameter checking * and VBO bounds checking. * \return GL_TRUE if OK to render, GL_FALSE if error found */ GLboolean _mesa_validate_MultiDrawElements(struct gl_context *ctx, GLenum mode, const GLsizei *count, GLenum type, const GLvoid * const *indices, GLuint primcount) { unsigned i; FLUSH_CURRENT(ctx, 0); for (i = 0; i < primcount; i++) { if (count[i] < 0) { _mesa_error(ctx, GL_INVALID_VALUE, "glMultiDrawElements(count)" ); return GL_FALSE; } } if (!_mesa_valid_prim_mode(ctx, mode, "glMultiDrawElements")) { return GL_FALSE; } if (!valid_elements_type(ctx, type, "glMultiDrawElements")) return GL_FALSE; if (!check_valid_to_render(ctx, "glMultiDrawElements")) return GL_FALSE; /* Not using a VBO for indices, so avoid NULL pointer derefs later. */ if (!_mesa_is_bufferobj(ctx->Array.VAO->IndexBufferObj)) { for (i = 0; i < primcount; i++) { if (!indices[i]) return GL_FALSE; } } return GL_TRUE; }
void GLAPIENTRY _mesa_GetnCompressedTexImageARB(GLenum target, GLint level, GLsizei bufSize, GLvoid *img) { struct gl_texture_object *texObj; struct gl_texture_image *texImage; GET_CURRENT_CONTEXT(ctx); ASSERT_OUTSIDE_BEGIN_END_AND_FLUSH(ctx); if (getcompressedteximage_error_check(ctx, target, level, bufSize, img)) { return; } if (!_mesa_is_bufferobj(ctx->Pack.BufferObj) && !img) { /* not an error, do nothing */ return; } texObj = _mesa_get_current_tex_object(ctx, target); texImage = _mesa_select_tex_image(ctx, texObj, target, level); if (_mesa_is_zero_size_texture(texImage)) return; if (MESA_VERBOSE & (VERBOSE_API | VERBOSE_TEXTURE)) { _mesa_debug(ctx, "glGetCompressedTexImage(tex %u) format = %s, w=%d, h=%d\n", texObj->Name, _mesa_get_format_name(texImage->TexFormat), texImage->Width, texImage->Height); } _mesa_lock_texture(ctx, texObj); { ctx->Driver.GetCompressedTexImage(ctx, texImage, img); } _mesa_unlock_texture(ctx, texObj); }
/* Could do better by copying the arrays and element list intact and * then emitting an indexed prim at runtime. */ static void GLAPIENTRY _save_OBE_DrawElements(GLenum mode, GLsizei count, GLenum type, const GLvoid *indices) { GET_CURRENT_CONTEXT(ctx); GLint i; if (!_mesa_validate_DrawElements( ctx, mode, count, type, indices, 0 )) return; _ae_map_vbos( ctx ); if (_mesa_is_bufferobj(ctx->Array.ElementArrayBufferObj)) indices = ADD_POINTERS(ctx->Array.ElementArrayBufferObj->Pointer, indices); vbo_save_NotifyBegin( ctx, mode | VBO_SAVE_PRIM_WEAK ); switch (type) { case GL_UNSIGNED_BYTE: for (i = 0 ; i < count ; i++) CALL_ArrayElement(GET_DISPATCH(), ( ((GLubyte *)indices)[i] )); break; case GL_UNSIGNED_SHORT: for (i = 0 ; i < count ; i++) CALL_ArrayElement(GET_DISPATCH(), ( ((GLushort *)indices)[i] )); break; case GL_UNSIGNED_INT: for (i = 0 ; i < count ; i++) CALL_ArrayElement(GET_DISPATCH(), ( ((GLuint *)indices)[i] )); break; default: _mesa_error( ctx, GL_INVALID_ENUM, "glDrawElements(type)" ); break; } CALL_End(GET_DISPATCH(), ()); _ae_unmap_vbos( ctx ); }
/** * For commands that write to a PBO (glReadPixels, glGetColorTable, etc), * if we're writing to a PBO, map it write-only and return the pointer * into the PBO. If we're not writing to a PBO, return \p dst as-is. * If non-null return, must call _mesa_unmap_pbo_dest() when done. * * \return NULL if error, else pointer to start of data */ void * _mesa_map_pbo_dest(struct gl_context *ctx, const struct gl_pixelstore_attrib *pack, GLvoid *dest) { void *buf; if (_mesa_is_bufferobj(pack->BufferObj)) { /* pack into PBO */ buf = (GLubyte *) ctx->Driver.MapBuffer(ctx, GL_PIXEL_PACK_BUFFER_EXT, GL_WRITE_ONLY_ARB, pack->BufferObj); if (!buf) return NULL; buf = ADD_POINTERS(buf, dest); } else { /* pack to normal memory */ buf = dest; } return buf; }
/** * For commands that read from a PBO (glDrawPixels, glTexImage, * glPolygonStipple, etc), if we're reading from a PBO, map it read-only * and return the pointer into the PBO. If we're not reading from a * PBO, return \p src as-is. * If non-null return, must call _mesa_unmap_pbo_source() when done. * * \return NULL if error, else pointer to start of data */ const GLvoid * _mesa_map_pbo_source(struct gl_context *ctx, const struct gl_pixelstore_attrib *unpack, const GLvoid *src) { const GLubyte *buf; if (_mesa_is_bufferobj(unpack->BufferObj)) { /* unpack from PBO */ buf = (GLubyte *) ctx->Driver.MapBuffer(ctx, GL_PIXEL_UNPACK_BUFFER_EXT, GL_READ_ONLY_ARB, unpack->BufferObj); if (!buf) return NULL; buf = ADD_POINTERS(buf, src); } else { /* unpack from normal memory */ buf = src; } return buf; }
/** * Inner support for both _mesa_MultiDrawElements() and * _mesa_MultiDrawRangeElements(). * This does the actual rendering after we've checked array indexes, etc. */ static void vbo_validated_multidrawelements(struct gl_context *ctx, GLenum mode, const GLsizei *count, GLenum type, const GLvoid * const *indices, GLsizei primcount, const GLint *basevertex) { struct vbo_context *vbo = vbo_context(ctx); struct vbo_exec_context *exec = &vbo->exec; struct _mesa_index_buffer ib; struct _mesa_prim *prim; unsigned int index_type_size = vbo_sizeof_ib_type(type); uintptr_t min_index_ptr, max_index_ptr; GLboolean fallback = GL_FALSE; int i; if (primcount == 0) return; prim = calloc(1, primcount * sizeof(*prim)); if (prim == NULL) { _mesa_error(ctx, GL_OUT_OF_MEMORY, "glMultiDrawElements"); return; } vbo_bind_arrays(ctx); min_index_ptr = (uintptr_t)indices[0]; max_index_ptr = 0; for (i = 0; i < primcount; i++) { min_index_ptr = MIN2(min_index_ptr, (uintptr_t)indices[i]); max_index_ptr = MAX2(max_index_ptr, (uintptr_t)indices[i] + index_type_size * count[i]); } /* Check if we can handle this thing as a bunch of index offsets from the * same index pointer. If we can't, then we have to fall back to doing * a draw_prims per primitive. * Check that the difference between each prim's indexes is a multiple of * the index/element size. */ if (index_type_size != 1) { for (i = 0; i < primcount; i++) { if ((((uintptr_t)indices[i] - min_index_ptr) % index_type_size) != 0) { fallback = GL_TRUE; break; } } } /* If the index buffer isn't in a VBO, then treating the application's * subranges of the index buffer as one large index buffer may lead to * us reading unmapped memory. */ if (!_mesa_is_bufferobj(ctx->Array.ArrayObj->ElementArrayBufferObj)) fallback = GL_TRUE; if (!fallback) { ib.count = (max_index_ptr - min_index_ptr) / index_type_size; ib.type = type; ib.obj = ctx->Array.ArrayObj->ElementArrayBufferObj; ib.ptr = (void *)min_index_ptr; for (i = 0; i < primcount; i++) { prim[i].begin = (i == 0); prim[i].end = (i == primcount - 1); prim[i].weak = 0; prim[i].pad = 0; prim[i].mode = mode; prim[i].start = ((uintptr_t)indices[i] - min_index_ptr) / index_type_size; prim[i].count = count[i]; prim[i].indexed = 1; prim[i].num_instances = 1; prim[i].base_instance = 0; if (basevertex != NULL) prim[i].basevertex = basevertex[i]; else prim[i].basevertex = 0; } check_buffers_are_unmapped(exec->array.inputs); vbo_handle_primitive_restart(ctx, prim, primcount, &ib, GL_FALSE, ~0, ~0); } else { /* render one prim at a time */ for (i = 0; i < primcount; i++) { ib.count = count[i]; ib.type = type; ib.obj = ctx->Array.ArrayObj->ElementArrayBufferObj; ib.ptr = indices[i]; prim[0].begin = 1; prim[0].end = 1; prim[0].weak = 0; prim[0].pad = 0; prim[0].mode = mode; prim[0].start = 0; prim[0].count = count[i]; prim[0].indexed = 1; prim[0].num_instances = 1; prim[0].base_instance = 0; if (basevertex != NULL) prim[0].basevertex = basevertex[i]; else prim[0].basevertex = 0; check_buffers_are_unmapped(exec->array.inputs); vbo_handle_primitive_restart(ctx, prim, 1, &ib, GL_FALSE, ~0, ~0); } } free(prim); if (MESA_DEBUG_FLAGS & DEBUG_ALWAYS_FLUSH) { _mesa_flush(ctx); } }
/** * Execute the buffer and save copied verts. * \param keep_unmapped if true, leave the VBO unmapped when we're done. */ void vbo_exec_vtx_flush(struct vbo_exec_context *exec, GLboolean keepUnmapped) { if (0) vbo_exec_debug_verts( exec ); if (exec->vtx.prim_count && exec->vtx.vert_count) { exec->vtx.copied.nr = vbo_copy_vertices( exec ); if (exec->vtx.copied.nr != exec->vtx.vert_count) { struct gl_context *ctx = exec->ctx; /* Before the update_state() as this may raise _NEW_ARRAY * from _mesa_set_varying_vp_inputs(). */ vbo_exec_bind_arrays( ctx ); if (ctx->NewState) _mesa_update_state( ctx ); if (_mesa_is_bufferobj(exec->vtx.bufferobj)) { vbo_exec_vtx_unmap( exec ); } if (0) printf("%s %d %d\n", __FUNCTION__, exec->vtx.prim_count, exec->vtx.vert_count); vbo_context(ctx)->draw_prims( ctx, exec->vtx.inputs, exec->vtx.prim, exec->vtx.prim_count, NULL, GL_TRUE, 0, exec->vtx.vert_count - 1, NULL); /* If using a real VBO, get new storage -- unless asked not to. */ if (_mesa_is_bufferobj(exec->vtx.bufferobj) && !keepUnmapped) { vbo_exec_vtx_map( exec ); } } } /* May have to unmap explicitly if we didn't draw: */ if (keepUnmapped && _mesa_is_bufferobj(exec->vtx.bufferobj) && exec->vtx.buffer_map) { vbo_exec_vtx_unmap( exec ); } if (keepUnmapped || exec->vtx.vertex_size == 0) exec->vtx.max_vert = 0; else exec->vtx.max_vert = ((VBO_VERT_BUFFER_SIZE - exec->vtx.buffer_used) / (exec->vtx.vertex_size * sizeof(GLfloat))); exec->vtx.buffer_ptr = exec->vtx.buffer_map; exec->vtx.prim_count = 0; exec->vtx.vert_count = 0; }
/** * Map the vertex buffer to begin storing glVertex, glColor, etc data. */ void vbo_exec_vtx_map( struct vbo_exec_context *exec ) { struct gl_context *ctx = exec->ctx; const GLenum accessRange = GL_MAP_WRITE_BIT | /* for MapBufferRange */ GL_MAP_INVALIDATE_RANGE_BIT | GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_FLUSH_EXPLICIT_BIT | MESA_MAP_NOWAIT_BIT; const GLenum usage = GL_STREAM_DRAW_ARB; if (!_mesa_is_bufferobj(exec->vtx.bufferobj)) return; assert(!exec->vtx.buffer_map); assert(!exec->vtx.buffer_ptr); if (VBO_VERT_BUFFER_SIZE > exec->vtx.buffer_used + 1024) { /* The VBO exists and there's room for more */ if (exec->vtx.bufferobj->Size > 0) { exec->vtx.buffer_map = (GLfloat *)ctx->Driver.MapBufferRange(ctx, exec->vtx.buffer_used, (VBO_VERT_BUFFER_SIZE - exec->vtx.buffer_used), accessRange, exec->vtx.bufferobj); exec->vtx.buffer_ptr = exec->vtx.buffer_map; } else { exec->vtx.buffer_ptr = exec->vtx.buffer_map = NULL; } } if (!exec->vtx.buffer_map) { /* Need to allocate a new VBO */ exec->vtx.buffer_used = 0; if (ctx->Driver.BufferData(ctx, GL_ARRAY_BUFFER_ARB, VBO_VERT_BUFFER_SIZE, NULL, usage, exec->vtx.bufferobj)) { /* buffer allocation worked, now map the buffer */ exec->vtx.buffer_map = (GLfloat *)ctx->Driver.MapBufferRange(ctx, 0, VBO_VERT_BUFFER_SIZE, accessRange, exec->vtx.bufferobj); } else { _mesa_error(ctx, GL_OUT_OF_MEMORY, "VBO allocation"); exec->vtx.buffer_map = NULL; } } exec->vtx.buffer_ptr = exec->vtx.buffer_map; if (!exec->vtx.buffer_map) { /* out of memory */ _mesa_install_exec_vtxfmt( ctx, &exec->vtxfmt_noop ); } else { if (_mesa_using_noop_vtxfmt(ctx->Exec)) { /* The no-op functions are installed so switch back to regular * functions. We do this test just to avoid frequent and needless * calls to _mesa_install_exec_vtxfmt(). */ _mesa_install_exec_vtxfmt(ctx, &exec->vtxfmt); } } if (0) printf("map %d..\n", exec->vtx.buffer_used); }
/* TODO: populate these as the vertex is defined: */ static void vbo_exec_bind_arrays( struct gl_context *ctx ) { struct vbo_context *vbo = vbo_context(ctx); struct vbo_exec_context *exec = &vbo->exec; struct gl_client_array *arrays = exec->vtx.arrays; const GLuint count = exec->vtx.vert_count; const GLuint *map; GLuint attr; GLbitfield64 varying_inputs = 0x0; /* Install the default (ie Current) attributes first, then overlay * all active ones. */ switch (get_program_mode(exec->ctx)) { case VP_NONE: for (attr = 0; attr < VERT_ATTRIB_FF_MAX; attr++) { exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; } for (attr = 0; attr < MAT_ATTRIB_MAX; attr++) { ASSERT(VERT_ATTRIB_GENERIC(attr) < Elements(exec->vtx.inputs)); exec->vtx.inputs[VERT_ATTRIB_GENERIC(attr)] = &vbo->mat_currval[attr]; } map = vbo->map_vp_none; break; case VP_NV: case VP_ARB: /* The aliasing of attributes for NV vertex programs has already * occurred. NV vertex programs cannot access material values, * nor attributes greater than VERT_ATTRIB_TEX7. */ for (attr = 0; attr < VERT_ATTRIB_FF_MAX; attr++) { exec->vtx.inputs[attr] = &vbo->legacy_currval[attr]; } for (attr = 0; attr < VERT_ATTRIB_GENERIC_MAX; attr++) { ASSERT(VERT_ATTRIB_GENERIC(attr) < Elements(exec->vtx.inputs)); exec->vtx.inputs[VERT_ATTRIB_GENERIC(attr)] = &vbo->generic_currval[attr]; } map = vbo->map_vp_arb; /* check if VERT_ATTRIB_POS is not read but VERT_BIT_GENERIC0 is read. * In that case we effectively need to route the data from * glVertexAttrib(0, val) calls to feed into the GENERIC0 input. */ if ((ctx->VertexProgram._Current->Base.InputsRead & VERT_BIT_POS) == 0 && (ctx->VertexProgram._Current->Base.InputsRead & VERT_BIT_GENERIC0)) { exec->vtx.inputs[VERT_ATTRIB_GENERIC0] = exec->vtx.inputs[0]; exec->vtx.attrsz[VERT_ATTRIB_GENERIC0] = exec->vtx.attrsz[0]; exec->vtx.attrptr[VERT_ATTRIB_GENERIC0] = exec->vtx.attrptr[0]; exec->vtx.attrsz[0] = 0; } break; default: assert(0); } /* Make all active attributes (including edgeflag) available as * arrays of floats. */ for (attr = 0; attr < VERT_ATTRIB_MAX ; attr++) { const GLuint src = map[attr]; if (exec->vtx.attrsz[src]) { GLsizeiptr offset = (GLbyte *)exec->vtx.attrptr[src] - (GLbyte *)exec->vtx.vertex; /* override the default array set above */ ASSERT(attr < Elements(exec->vtx.inputs)); ASSERT(attr < Elements(exec->vtx.arrays)); /* arrays[] */ exec->vtx.inputs[attr] = &arrays[attr]; if (_mesa_is_bufferobj(exec->vtx.bufferobj)) { /* a real buffer obj: Ptr is an offset, not a pointer*/ assert(exec->vtx.bufferobj->Pointer); /* buf should be mapped */ assert(offset >= 0); arrays[attr].Ptr = (GLubyte *)exec->vtx.bufferobj->Offset + offset; } else { /* Ptr into ordinary app memory */ arrays[attr].Ptr = (GLubyte *)exec->vtx.buffer_map + offset; } arrays[attr].Size = exec->vtx.attrsz[src]; arrays[attr].StrideB = exec->vtx.vertex_size * sizeof(GLfloat); arrays[attr].Stride = exec->vtx.vertex_size * sizeof(GLfloat); arrays[attr].Type = GL_FLOAT; arrays[attr].Format = GL_RGBA; arrays[attr].Enabled = 1; arrays[attr]._ElementSize = arrays[attr].Size * sizeof(GLfloat); _mesa_reference_buffer_object(ctx, &arrays[attr].BufferObj, exec->vtx.bufferobj); arrays[attr]._MaxElement = count; /* ??? */ varying_inputs |= VERT_BIT(attr); ctx->NewState |= _NEW_ARRAY; } } _mesa_set_varying_vp_inputs( ctx, varying_inputs ); }
static void brw_upload_indices(struct brw_context *brw) { struct gl_context *ctx = &brw->ctx; const struct _mesa_index_buffer *index_buffer = brw->ib.ib; GLuint ib_size; drm_intel_bo *old_bo = brw->ib.bo; struct gl_buffer_object *bufferobj; GLuint offset; GLuint ib_type_size; if (index_buffer == NULL) return; ib_type_size = _mesa_sizeof_type(index_buffer->type); ib_size = ib_type_size * index_buffer->count; bufferobj = index_buffer->obj; /* Turn into a proper VBO: */ if (!_mesa_is_bufferobj(bufferobj)) { /* Get new bufferobj, offset: */ intel_upload_data(brw, index_buffer->ptr, ib_size, ib_type_size, &brw->ib.bo, &offset); } else { offset = (GLuint) (unsigned long) index_buffer->ptr; /* If the index buffer isn't aligned to its element size, we have to * rebase it into a temporary. */ if ((ib_type_size - 1) & offset) { perf_debug("copying index buffer to a temporary to work around " "misaligned offset %d\n", offset); GLubyte *map = ctx->Driver.MapBufferRange(ctx, offset, ib_size, GL_MAP_READ_BIT, bufferobj, MAP_INTERNAL); intel_upload_data(brw, map, ib_size, ib_type_size, &brw->ib.bo, &offset); ctx->Driver.UnmapBuffer(ctx, bufferobj, MAP_INTERNAL); } else { drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_buffer_object(bufferobj), offset, ib_size); if (bo != brw->ib.bo) { drm_intel_bo_unreference(brw->ib.bo); brw->ib.bo = bo; drm_intel_bo_reference(bo); } } } /* Use 3DPRIMITIVE's start_vertex_offset to avoid re-uploading * the index buffer state when we're just moving the start index * of our drawing. */ brw->ib.start_vertex_offset = offset / ib_type_size; if (brw->ib.bo != old_bo) brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER; if (index_buffer->type != brw->ib.type) { brw->ib.type = index_buffer->type; brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER; } }
static void replay_init( struct copy_context *copy ) { struct gl_context *ctx = copy->ctx; GLuint i; GLuint offset; const GLvoid *srcptr; /* Make a list of varying attributes and their vbo's. Also * calculate vertex size. */ copy->vertex_size = 0; for (i = 0; i < VERT_ATTRIB_MAX; i++) { struct gl_buffer_object *vbo = copy->array[i]->BufferObj; if (copy->array[i]->StrideB == 0) { copy->dstarray_ptr[i] = copy->array[i]; } else { GLuint j = copy->nr_varying++; copy->varying[j].attr = i; copy->varying[j].array = copy->array[i]; copy->varying[j].size = attr_size(copy->array[i]); copy->vertex_size += attr_size(copy->array[i]); if (_mesa_is_bufferobj(vbo) && !_mesa_bufferobj_mapped(vbo, MAP_INTERNAL)) ctx->Driver.MapBufferRange(ctx, 0, vbo->Size, GL_MAP_READ_BIT, vbo, MAP_INTERNAL); copy->varying[j].src_ptr = ADD_POINTERS(vbo->Mappings[MAP_INTERNAL].Pointer, copy->array[i]->Ptr); copy->dstarray_ptr[i] = ©->varying[j].dstarray; } } /* There must always be an index buffer. Currently require the * caller convert non-indexed prims to indexed. Could alternately * do it internally. */ if (_mesa_is_bufferobj(copy->ib->obj) && !_mesa_bufferobj_mapped(copy->ib->obj, MAP_INTERNAL)) ctx->Driver.MapBufferRange(ctx, 0, copy->ib->obj->Size, GL_MAP_READ_BIT, copy->ib->obj, MAP_INTERNAL); srcptr = (const GLubyte *) ADD_POINTERS(copy->ib->obj->Mappings[MAP_INTERNAL].Pointer, copy->ib->ptr); switch (copy->ib->type) { case GL_UNSIGNED_BYTE: copy->translated_elt_buf = malloc(sizeof(GLuint) * copy->ib->count); copy->srcelt = copy->translated_elt_buf; for (i = 0; i < copy->ib->count; i++) copy->translated_elt_buf[i] = ((const GLubyte *)srcptr)[i]; break; case GL_UNSIGNED_SHORT: copy->translated_elt_buf = malloc(sizeof(GLuint) * copy->ib->count); copy->srcelt = copy->translated_elt_buf; for (i = 0; i < copy->ib->count; i++) copy->translated_elt_buf[i] = ((const GLushort *)srcptr)[i]; break; case GL_UNSIGNED_INT: copy->translated_elt_buf = NULL; copy->srcelt = (const GLuint *)srcptr; break; } /* Figure out the maximum allowed vertex buffer size: */ if (copy->vertex_size * copy->limits->max_verts <= copy->limits->max_vb_size) { copy->dstbuf_size = copy->limits->max_verts; } else { copy->dstbuf_size = copy->limits->max_vb_size / copy->vertex_size; } /* Allocate an output vertex buffer: * * XXX: This should be a VBO! */ copy->dstbuf = malloc(copy->dstbuf_size * copy->vertex_size); copy->dstptr = copy->dstbuf; /* Setup new vertex arrays to point into the output buffer: */ for (offset = 0, i = 0; i < copy->nr_varying; i++) { const struct gl_client_array *src = copy->varying[i].array; struct gl_client_array *dst = ©->varying[i].dstarray; dst->Size = src->Size; dst->Type = src->Type; dst->Format = GL_RGBA; dst->Stride = copy->vertex_size; dst->StrideB = copy->vertex_size; dst->Ptr = copy->dstbuf + offset; dst->Enabled = GL_TRUE; dst->Normalized = src->Normalized; dst->Integer = src->Integer; dst->BufferObj = ctx->Shared->NullBufferObj; dst->_ElementSize = src->_ElementSize; offset += copy->varying[i].size; } /* Allocate an output element list: */ copy->dstelt_size = MIN2(65536, copy->ib->count * 2 + 3); copy->dstelt_size = MIN2(copy->dstelt_size, copy->limits->max_indices); copy->dstelt = malloc(sizeof(GLuint) * copy->dstelt_size); copy->dstelt_nr = 0; /* Setup the new index buffer to point to the allocated element * list: */ copy->dstib.count = 0; /* duplicates dstelt_nr */ copy->dstib.type = GL_UNSIGNED_INT; copy->dstib.obj = ctx->Shared->NullBufferObj; copy->dstib.ptr = copy->dstelt; }
/* * Render a bitmap. */ static bool do_blit_bitmap( struct gl_context *ctx, GLint dstx, GLint dsty, GLsizei width, GLsizei height, const struct gl_pixelstore_attrib *unpack, const GLubyte *bitmap ) { struct intel_context *intel = intel_context(ctx); struct gl_framebuffer *fb = ctx->DrawBuffer; struct intel_renderbuffer *irb; GLfloat tmpColor[4]; GLubyte ubcolor[4]; GLuint color; GLsizei bitmap_width = width; GLsizei bitmap_height = height; GLint px, py; GLuint stipple[32]; GLint orig_dstx = dstx; GLint orig_dsty = dsty; /* Update draw buffer bounds */ _mesa_update_state(ctx); if (ctx->Depth.Test) { /* The blit path produces incorrect results when depth testing is on. * It seems the blit Z coord is always 1.0 (the far plane) so fragments * will likely be obscured by other, closer geometry. */ return false; } intel_prepare_render(intel); if (fb->_NumColorDrawBuffers != 1) { perf_debug("accelerated glBitmap() only supports rendering to a " "single color buffer\n"); return false; } irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]); if (_mesa_is_bufferobj(unpack->BufferObj)) { bitmap = map_pbo(ctx, width, height, unpack, bitmap); if (bitmap == NULL) return true; /* even though this is an error, we're done */ } COPY_4V(tmpColor, ctx->Current.RasterColor); if (_mesa_need_secondary_color(ctx)) { ADD_3V(tmpColor, tmpColor, ctx->Current.RasterSecondaryColor); } UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[0], tmpColor[0]); UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[1], tmpColor[1]); UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[2], tmpColor[2]); UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[3], tmpColor[3]); switch (irb->mt->format) { case MESA_FORMAT_B8G8R8A8_UNORM: case MESA_FORMAT_B8G8R8X8_UNORM: color = PACK_COLOR_8888(ubcolor[3], ubcolor[0], ubcolor[1], ubcolor[2]); break; case MESA_FORMAT_B5G6R5_UNORM: color = PACK_COLOR_565(ubcolor[0], ubcolor[1], ubcolor[2]); break; default: perf_debug("Unsupported format %s in accelerated glBitmap()\n", _mesa_get_format_name(irb->mt->format)); return false; } if (!intel_check_blit_fragment_ops(ctx, tmpColor[3] == 1.0F)) return false; /* Clip to buffer bounds and scissor. */ if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin, fb->_Xmax, fb->_Ymax, &dstx, &dsty, &width, &height)) goto out; dsty = y_flip(fb, dsty, height); #define DY 32 #define DX 32 /* Chop it all into chunks that can be digested by hardware: */ for (py = 0; py < height; py += DY) { for (px = 0; px < width; px += DX) { int h = MIN2(DY, height - py); int w = MIN2(DX, width - px); GLuint sz = ALIGN(ALIGN(w,8) * h, 64)/8; GLenum logic_op = ctx->Color.ColorLogicOpEnabled ? ctx->Color.LogicOp : GL_COPY; assert(sz <= sizeof(stipple)); memset(stipple, 0, sz); /* May need to adjust this when padding has been introduced in * sz above: * * Have to translate destination coordinates back into source * coordinates. */ int count = get_bitmap_rect(bitmap_width, bitmap_height, unpack, bitmap, -orig_dstx + (dstx + px), -orig_dsty + y_flip(fb, dsty + py, h), w, h, (GLubyte *)stipple, 8, _mesa_is_winsys_fbo(fb)); if (count == 0) continue; if (!intelEmitImmediateColorExpandBlit(intel, irb->mt->cpp, (GLubyte *)stipple, sz, color, irb->mt->region->pitch, irb->mt->region->bo, 0, irb->mt->region->tiling, dstx + px, dsty + py, w, h, logic_op)) { return false; } if (ctx->Query.CurrentOcclusionObject) ctx->Query.CurrentOcclusionObject->Result += count; } } out: if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) intel_batchbuffer_flush(intel); if (_mesa_is_bufferobj(unpack->BufferObj)) { /* done with PBO so unmap it now */ ctx->Driver.UnmapBuffer(ctx, unpack->BufferObj, MAP_INTERNAL); } intel_check_front_buffer_rendering(intel); return true; }
GLboolean _mesa_validate_DrawElementsInstanced(struct gl_context *ctx, GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, GLsizei numInstances, GLint basevertex) { FLUSH_CURRENT(ctx, 0); /* From the GLES3 specification, section 2.14.2 (Transform Feedback * Primitive Capture): * * The error INVALID_OPERATION is also generated by DrawElements, * DrawElementsInstanced, and DrawRangeElements while transform feedback * is active and not paused, regardless of mode. */ if (_mesa_is_gles3(ctx) && _mesa_is_xfb_active_and_unpaused(ctx)) { _mesa_error(ctx, GL_INVALID_OPERATION, "glDrawElements(transform feedback active)"); return GL_FALSE; } if (count <= 0) { if (count < 0) _mesa_error(ctx, GL_INVALID_VALUE, "glDrawElementsInstanced(count=%d)", count); return GL_FALSE; } if (!_mesa_valid_prim_mode(ctx, mode, "glDrawElementsInstanced")) { return GL_FALSE; } if (!valid_elements_type(ctx, type, "glDrawElementsInstanced")) return GL_FALSE; if (numInstances <= 0) { if (numInstances < 0) _mesa_error(ctx, GL_INVALID_VALUE, "glDrawElementsInstanced(numInstances=%d)", numInstances); return GL_FALSE; } if (!check_valid_to_render(ctx, "glDrawElementsInstanced")) return GL_FALSE; /* Vertex buffer object tests */ if (_mesa_is_bufferobj(ctx->Array.ArrayObj->ElementArrayBufferObj)) { /* use indices in the buffer object */ /* make sure count doesn't go outside buffer bounds */ if (index_bytes(type, count) > ctx->Array.ArrayObj->ElementArrayBufferObj->Size) { _mesa_warning(ctx, "glDrawElementsInstanced index out of buffer bounds"); return GL_FALSE; } } else { /* not using a VBO */ if (!indices) return GL_FALSE; } if (!check_index_bounds(ctx, count, type, indices, basevertex)) return GL_FALSE; return GL_TRUE; }
/** * Compute min and max elements by scanning the index buffer for * glDraw[Range]Elements() calls. * If primitive restart is enabled, we need to ignore restart * indexes when computing min/max. */ static void vbo_get_minmax_index(struct gl_context *ctx, const struct _mesa_prim *prim, const struct _mesa_index_buffer *ib, GLuint *min_index, GLuint *max_index, const GLuint count) { const GLboolean restart = ctx->Array._PrimitiveRestart; const GLuint restartIndex = _mesa_primitive_restart_index(ctx, ib->type); const int index_size = vbo_sizeof_ib_type(ib->type); const char *indices; GLuint i; indices = (char *) ib->ptr + prim->start * index_size; if (_mesa_is_bufferobj(ib->obj)) { GLsizeiptr size = MIN2(count * index_size, ib->obj->Size); indices = ctx->Driver.MapBufferRange(ctx, (GLintptr) indices, size, GL_MAP_READ_BIT, ib->obj); } switch (ib->type) { case GL_UNSIGNED_INT: { const GLuint *ui_indices = (const GLuint *)indices; GLuint max_ui = 0; GLuint min_ui = ~0U; if (restart) { for (i = 0; i < count; i++) { if (ui_indices[i] != restartIndex) { if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; } } } else { for (i = 0; i < count; i++) { if (ui_indices[i] > max_ui) max_ui = ui_indices[i]; if (ui_indices[i] < min_ui) min_ui = ui_indices[i]; } } *min_index = min_ui; *max_index = max_ui; break; } case GL_UNSIGNED_SHORT: { const GLushort *us_indices = (const GLushort *)indices; GLuint max_us = 0; GLuint min_us = ~0U; if (restart) { for (i = 0; i < count; i++) { if (us_indices[i] != restartIndex) { if (us_indices[i] > max_us) max_us = us_indices[i]; if (us_indices[i] < min_us) min_us = us_indices[i]; } } } else { for (i = 0; i < count; i++) { if (us_indices[i] > max_us) max_us = us_indices[i]; if (us_indices[i] < min_us) min_us = us_indices[i]; } } *min_index = min_us; *max_index = max_us; break; } case GL_UNSIGNED_BYTE: { const GLubyte *ub_indices = (const GLubyte *)indices; GLuint max_ub = 0; GLuint min_ub = ~0U; if (restart) { for (i = 0; i < count; i++) { if (ub_indices[i] != restartIndex) { if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; } } } else { for (i = 0; i < count; i++) { if (ub_indices[i] > max_ub) max_ub = ub_indices[i]; if (ub_indices[i] < min_ub) min_ub = ub_indices[i]; } } *min_index = min_ub; *max_index = max_ub; break; } default: assert(0); break; } if (_mesa_is_bufferobj(ib->obj)) { ctx->Driver.UnmapBuffer(ctx, ib->obj); } }
/* XXX: Do this for TexSubImage also: */ static bool try_pbo_upload(struct gl_context *ctx, struct gl_texture_image *image, const struct gl_pixelstore_attrib *unpack, GLenum format, GLenum type, const void *pixels) { struct intel_texture_image *intelImage = intel_texture_image(image); struct intel_context *intel = intel_context(ctx); struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj); GLuint src_offset; drm_intel_bo *src_buffer; if (!_mesa_is_bufferobj(unpack->BufferObj)) return false; DBG("trying pbo upload\n"); if (intel->ctx._ImageTransferState || unpack->SkipPixels || unpack->SkipRows) { DBG("%s: image transfer\n", __FUNCTION__); return false; } ctx->Driver.AllocTextureImageBuffer(ctx, image); if (!intelImage->mt) { DBG("%s: no miptree\n", __FUNCTION__); return false; } if (!_mesa_format_matches_format_and_type(intelImage->mt->format, format, type, false)) { DBG("%s: format mismatch (upload to %s with format 0x%x, type 0x%x)\n", __FUNCTION__, _mesa_get_format_name(intelImage->mt->format), format, type); return false; } if (image->TexObject->Target == GL_TEXTURE_1D_ARRAY || image->TexObject->Target == GL_TEXTURE_2D_ARRAY) { DBG("%s: no support for array textures\n", __FUNCTION__); return false; } src_buffer = intel_bufferobj_source(intel, pbo, 64, &src_offset); /* note: potential 64-bit ptr to 32-bit int cast */ src_offset += (GLuint) (unsigned long) pixels; int src_stride = _mesa_image_row_stride(unpack, image->Width, format, type); struct intel_mipmap_tree *pbo_mt = intel_miptree_create_for_bo(intel, src_buffer, intelImage->mt->format, src_offset, image->Width, image->Height, src_stride, I915_TILING_NONE); if (!pbo_mt) return false; if (!intel_miptree_blit(intel, pbo_mt, 0, 0, 0, 0, false, intelImage->mt, image->Level, image->Face, 0, 0, false, image->Width, image->Height, GL_COPY)) { DBG("%s: blit failed\n", __FUNCTION__); intel_miptree_release(&pbo_mt); return false; } intel_miptree_release(&pbo_mt); DBG("%s: success\n", __FUNCTION__); return true; }
static GLboolean valid_draw_indirect(struct gl_context *ctx, GLenum mode, const GLvoid *indirect, GLsizei size, const char *name) { const uint64_t end = (uint64_t) (uintptr_t) indirect + size; /* OpenGL ES 3.1 spec. section 10.5: * * "DrawArraysIndirect requires that all data sourced for the * command, including the DrawArraysIndirectCommand * structure, be in buffer objects, and may not be called when * the default vertex array object is bound." */ if (ctx->Array.VAO == ctx->Array.DefaultVAO) { _mesa_error(ctx, GL_INVALID_OPERATION, "(no VAO bound)"); return GL_FALSE; } /* From OpenGL ES 3.1 spec. section 10.5: * "An INVALID_OPERATION error is generated if zero is bound to * VERTEX_ARRAY_BINDING, DRAW_INDIRECT_BUFFER or to any enabled * vertex array." * * Here we check that for each enabled vertex array we have a vertex * buffer bound. */ if (_mesa_is_gles31(ctx) && ctx->Array.VAO->_Enabled != ctx->Array.VAO->VertexAttribBufferMask) { _mesa_error(ctx, GL_INVALID_OPERATION, "%s(No VBO bound)", name); return GL_FALSE; } if (!_mesa_valid_prim_mode(ctx, mode, name)) return GL_FALSE; /* OpenGL ES 3.1 specification, section 10.5: * * "An INVALID_OPERATION error is generated if * transform feedback is active and not paused." */ if (_mesa_is_gles31(ctx) && !ctx->Extensions.OES_geometry_shader && _mesa_is_xfb_active_and_unpaused(ctx)) { _mesa_error(ctx, GL_INVALID_OPERATION, "%s(TransformFeedback is active and not paused)", name); } /* From OpenGL version 4.4. section 10.5 * and OpenGL ES 3.1, section 10.6: * * "An INVALID_VALUE error is generated if indirect is not a * multiple of the size, in basic machine units, of uint." */ if ((GLsizeiptr)indirect & (sizeof(GLuint) - 1)) { _mesa_error(ctx, GL_INVALID_VALUE, "%s(indirect is not aligned)", name); return GL_FALSE; } if (!_mesa_is_bufferobj(ctx->DrawIndirectBuffer)) { _mesa_error(ctx, GL_INVALID_OPERATION, "%s: no buffer bound to DRAW_INDIRECT_BUFFER", name); return GL_FALSE; } if (_mesa_check_disallowed_mapping(ctx->DrawIndirectBuffer)) { _mesa_error(ctx, GL_INVALID_OPERATION, "%s(DRAW_INDIRECT_BUFFER is mapped)", name); return GL_FALSE; } /* From the ARB_draw_indirect specification: * "An INVALID_OPERATION error is generated if the commands source data * beyond the end of the buffer object [...]" */ if (ctx->DrawIndirectBuffer->Size < end) { _mesa_error(ctx, GL_INVALID_OPERATION, "%s(DRAW_INDIRECT_BUFFER too small)", name); return GL_FALSE; } if (!check_valid_to_render(ctx, name)) return GL_FALSE; return GL_TRUE; }
void GLAPIENTRY _mesa_ReadnPixelsARB( GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, GLvoid *pixels ) { GLenum err = GL_NO_ERROR; struct gl_renderbuffer *rb; GET_CURRENT_CONTEXT(ctx); FLUSH_VERTICES(ctx, 0); FLUSH_CURRENT(ctx, 0); if (MESA_VERBOSE & VERBOSE_API) _mesa_debug(ctx, "glReadPixels(%d, %d, %s, %s, %p)\n", width, height, _mesa_lookup_enum_by_nr(format), _mesa_lookup_enum_by_nr(type), pixels); if (width < 0 || height < 0) { _mesa_error( ctx, GL_INVALID_VALUE, "glReadPixels(width=%d height=%d)", width, height ); return; } if (ctx->NewState) _mesa_update_state(ctx); if (ctx->ReadBuffer->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) { _mesa_error(ctx, GL_INVALID_FRAMEBUFFER_OPERATION_EXT, "glReadPixels(incomplete framebuffer)" ); return; } rb = _mesa_get_read_renderbuffer_for_format(ctx, format); if (rb == NULL) { _mesa_error(ctx, GL_INVALID_OPERATION, "glReadPixels(read buffer)"); return; } /* OpenGL ES 1.x and OpenGL ES 2.0 impose additional restrictions on the * combinations of format and type that can be used. * * Technically, only two combinations are actually allowed: * GL_RGBA/GL_UNSIGNED_BYTE, and some implementation-specific internal * preferred combination. This code doesn't know what that preferred * combination is, and Mesa can handle anything valid. Just work instead. */ if (_mesa_is_gles(ctx)) { if (ctx->API == API_OPENGLES2 && _mesa_is_color_format(format) && _mesa_get_color_read_format(ctx) == format && _mesa_get_color_read_type(ctx) == type) { err = GL_NO_ERROR; } else if (ctx->Version < 30) { err = _mesa_es_error_check_format_and_type(format, type, 2); if (err == GL_NO_ERROR) { if (type == GL_FLOAT || type == GL_HALF_FLOAT_OES) { err = GL_INVALID_OPERATION; } } } else { err = read_pixels_es3_error_check(format, type, rb); } if (err == GL_NO_ERROR && (format == GL_DEPTH_COMPONENT || format == GL_DEPTH_STENCIL)) { err = GL_INVALID_ENUM; } if (err != GL_NO_ERROR) { _mesa_error(ctx, err, "glReadPixels(invalid format %s and/or type %s)", _mesa_lookup_enum_by_nr(format), _mesa_lookup_enum_by_nr(type)); return; } } err = _mesa_error_check_format_and_type(ctx, format, type); if (err != GL_NO_ERROR) { _mesa_error(ctx, err, "glReadPixels(invalid format %s and/or type %s)", _mesa_lookup_enum_by_nr(format), _mesa_lookup_enum_by_nr(type)); return; } if (_mesa_is_user_fbo(ctx->ReadBuffer) && ctx->ReadBuffer->Visual.samples > 0) { _mesa_error(ctx, GL_INVALID_OPERATION, "glReadPixels(multisample FBO)"); return; } if (!_mesa_source_buffer_exists(ctx, format)) { _mesa_error(ctx, GL_INVALID_OPERATION, "glReadPixels(no readbuffer)"); return; } /* Check that the destination format and source buffer are both * integer-valued or both non-integer-valued. */ if (ctx->Extensions.EXT_texture_integer && _mesa_is_color_format(format)) { const struct gl_renderbuffer *rb = ctx->ReadBuffer->_ColorReadBuffer; const GLboolean srcInteger = _mesa_is_format_integer_color(rb->Format); const GLboolean dstInteger = _mesa_is_enum_format_integer(format); if (dstInteger != srcInteger) { _mesa_error(ctx, GL_INVALID_OPERATION, "glReadPixels(integer / non-integer format mismatch"); return; } } if (width == 0 || height == 0) return; /* nothing to do */ if (!_mesa_validate_pbo_access(2, &ctx->Pack, width, height, 1, format, type, bufSize, pixels)) { if (_mesa_is_bufferobj(ctx->Pack.BufferObj)) { _mesa_error(ctx, GL_INVALID_OPERATION, "glReadPixels(out of bounds PBO access)"); } else { _mesa_error(ctx, GL_INVALID_OPERATION, "glReadnPixelsARB(out of bounds access:" " bufSize (%d) is too small)", bufSize); } return; } if (_mesa_is_bufferobj(ctx->Pack.BufferObj) && _mesa_check_disallowed_mapping(ctx->Pack.BufferObj)) { /* buffer is mapped - that's an error */ _mesa_error(ctx, GL_INVALID_OPERATION, "glReadPixels(PBO is mapped)"); return; } ctx->Driver.ReadPixels(ctx, x, y, width, height, format, type, &ctx->Pack, pixels); }
static void GLAPIENTRY _mesa_Bitmap( GLsizei width, GLsizei height, GLfloat xorig, GLfloat yorig, GLfloat xmove, GLfloat ymove, const GLubyte *bitmap ) { GET_CURRENT_CONTEXT(ctx); ASSERT_OUTSIDE_BEGIN_END_AND_FLUSH(ctx); if (width < 0 || height < 0) { _mesa_error( ctx, GL_INVALID_VALUE, "glBitmap(width or height < 0)" ); return; } if (!ctx->Current.RasterPosValid) { return; /* do nothing */ } /* Note: this call does state validation */ if (!_mesa_valid_to_render(ctx, "glBitmap")) { /* the error code was recorded */ return; } if (ctx->RasterDiscard) return; if (ctx->RenderMode == GL_RENDER) { /* Truncate, to satisfy conformance tests (matches SGI's OpenGL). */ if (width > 0 && height > 0) { const GLfloat epsilon = 0.0001F; GLint x = IFLOOR(ctx->Current.RasterPos[0] + epsilon - xorig); GLint y = IFLOOR(ctx->Current.RasterPos[1] + epsilon - yorig); if (_mesa_is_bufferobj(ctx->Unpack.BufferObj)) { /* unpack from PBO */ if (!_mesa_validate_pbo_access(2, &ctx->Unpack, width, height, 1, GL_COLOR_INDEX, GL_BITMAP, INT_MAX, (const GLvoid *) bitmap)) { _mesa_error(ctx, GL_INVALID_OPERATION, "glBitmap(invalid PBO access)"); return; } if (_mesa_bufferobj_mapped(ctx->Unpack.BufferObj)) { /* buffer is mapped - that's an error */ _mesa_error(ctx, GL_INVALID_OPERATION, "glBitmap(PBO is mapped)"); return; } } ctx->Driver.Bitmap( ctx, x, y, width, height, &ctx->Unpack, bitmap ); } } #if _HAVE_FULL_GL else if (ctx->RenderMode == GL_FEEDBACK) { FLUSH_CURRENT(ctx, 0); _mesa_feedback_token( ctx, (GLfloat) (GLint) GL_BITMAP_TOKEN ); _mesa_feedback_vertex( ctx, ctx->Current.RasterPos, ctx->Current.RasterColor, ctx->Current.RasterTexCoords[0] ); } else { ASSERT(ctx->RenderMode == GL_SELECT); /* Do nothing. See OpenGL Spec, Appendix B, Corollary 6. */ } #endif /* update raster position */ ctx->Current.RasterPos[0] += xmove; ctx->Current.RasterPos[1] += ymove; if (MESA_DEBUG_FLAGS & DEBUG_ALWAYS_FLUSH) { _mesa_flush(ctx); } }
static GLboolean do_blit_readpixels(struct gl_context * ctx, GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, const struct gl_pixelstore_attrib *pack, GLvoid * pixels) { radeonContextPtr radeon = RADEON_CONTEXT(ctx); const struct radeon_renderbuffer *rrb = radeon_renderbuffer(ctx->ReadBuffer->_ColorReadBuffer); const gl_format dst_format = gl_format_and_type_to_mesa_format(format, type); unsigned dst_rowstride, dst_imagesize, aligned_rowstride, flip_y; struct radeon_bo *dst_buffer; GLint dst_x = 0, dst_y = 0; intptr_t dst_offset; /* It's not worth if number of pixels to copy is really small */ if (width * height < 100) { return GL_FALSE; } if (dst_format == MESA_FORMAT_NONE || !radeon->vtbl.check_blit(dst_format) || !radeon->vtbl.blit) { return GL_FALSE; } if (ctx->_ImageTransferState || ctx->Color._LogicOpEnabled) { return GL_FALSE; } if (pack->SwapBytes || pack->LsbFirst) { return GL_FALSE; } if (pack->RowLength > 0) { dst_rowstride = pack->RowLength; } else { dst_rowstride = width; } if (!_mesa_clip_copytexsubimage(ctx, &dst_x, &dst_y, &x, &y, &width, &height)) { return GL_TRUE; } assert(x >= 0 && y >= 0); aligned_rowstride = get_texture_image_row_stride(radeon, dst_format, dst_rowstride, 0); dst_rowstride *= _mesa_get_format_bytes(dst_format); if (_mesa_is_bufferobj(pack->BufferObj) && aligned_rowstride != dst_rowstride) return GL_FALSE; dst_imagesize = get_texture_image_size(dst_format, aligned_rowstride, height, 1, 0); if (!_mesa_is_bufferobj(pack->BufferObj)) { dst_buffer = radeon_bo_open(radeon->radeonScreen->bom, 0, dst_imagesize, 1024, RADEON_GEM_DOMAIN_GTT, 0); dst_offset = 0; } else { dst_buffer = get_radeon_buffer_object(pack->BufferObj)->bo; dst_offset = (intptr_t)pixels; } /* Disable source Y flipping for FBOs */ flip_y = (ctx->ReadBuffer->Name == 0); if (pack->Invert) { y = rrb->base.Height - height - y; flip_y = !flip_y; } if (radeon->vtbl.blit(ctx, rrb->bo, rrb->draw_offset, rrb->base.Format, rrb->pitch / rrb->cpp, rrb->base.Width, rrb->base.Height, x, y, dst_buffer, dst_offset, dst_format, aligned_rowstride / _mesa_get_format_bytes(dst_format), width, height, 0, /* dst_x */ 0, /* dst_y */ width, height, flip_y)) { if (!_mesa_is_bufferobj(pack->BufferObj)) { radeon_bo_map(dst_buffer, 0); copy_rows(pixels, dst_rowstride, dst_buffer->ptr, aligned_rowstride, height, dst_rowstride); radeon_bo_unmap(dst_buffer); radeon_bo_unref(dst_buffer); } return GL_TRUE; } if (!_mesa_is_bufferobj(pack->BufferObj)) radeon_bo_unref(dst_buffer); return GL_FALSE; }
/* * Execute glDrawPixels */ static void GLAPIENTRY _mesa_DrawPixels( GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels ) { GLenum err; GET_CURRENT_CONTEXT(ctx); ASSERT_OUTSIDE_BEGIN_END_AND_FLUSH(ctx); if (MESA_VERBOSE & VERBOSE_API) _mesa_debug(ctx, "glDrawPixels(%d, %d, %s, %s, %p) // to %s at %d, %d\n", width, height, _mesa_lookup_enum_by_nr(format), _mesa_lookup_enum_by_nr(type), pixels, _mesa_lookup_enum_by_nr(ctx->DrawBuffer->ColorDrawBuffer[0]), IROUND(ctx->Current.RasterPos[0]), IROUND(ctx->Current.RasterPos[1])); if (width < 0 || height < 0) { _mesa_error( ctx, GL_INVALID_VALUE, "glDrawPixels(width or height < 0)" ); return; } /* We're not using the current vertex program, and the driver may install * its own. Note: this may dirty some state. */ _mesa_set_vp_override(ctx, GL_TRUE); /* Note: this call does state validation */ if (!_mesa_valid_to_render(ctx, "glDrawPixels")) { goto end; /* the error code was recorded */ } /* GL 3.0 introduced a new restriction on glDrawPixels() over what was in * GL_EXT_texture_integer. From section 3.7.4 ("Rasterization of Pixel * Rectangles) on page 151 of the GL 3.0 specification: * * "If format contains integer components, as shown in table 3.6, an * INVALID OPERATION error is generated." * * Since DrawPixels rendering would be merely undefined if not an error (due * to a lack of defined mapping from integer data to gl_Color fragment shader * input), NVIDIA's implementation also just returns this error despite * exposing GL_EXT_texture_integer, just return an error regardless. */ if (_mesa_is_enum_format_integer(format)) { _mesa_error(ctx, GL_INVALID_OPERATION, "glDrawPixels(integer format)"); goto end; } err = _mesa_error_check_format_and_type(ctx, format, type); if (err != GL_NO_ERROR) { _mesa_error(ctx, err, "glDrawPixels(invalid format %s and/or type %s)", _mesa_lookup_enum_by_nr(format), _mesa_lookup_enum_by_nr(type)); goto end; } /* do special format-related checks */ switch (format) { case GL_STENCIL_INDEX: case GL_DEPTH_COMPONENT: case GL_DEPTH_STENCIL_EXT: /* these buffers must exist */ if (!_mesa_dest_buffer_exists(ctx, format)) { _mesa_error(ctx, GL_INVALID_OPERATION, "glDrawPixels(missing deest buffer)"); goto end; } break; case GL_COLOR_INDEX: if (ctx->PixelMaps.ItoR.Size == 0 || ctx->PixelMaps.ItoG.Size == 0 || ctx->PixelMaps.ItoB.Size == 0) { _mesa_error(ctx, GL_INVALID_OPERATION, "glDrawPixels(drawing color index pixels into RGB buffer)"); goto end; } break; default: /* for color formats it's not an error if the destination color * buffer doesn't exist. */ break; } if (ctx->RasterDiscard) { goto end; } if (!ctx->Current.RasterPosValid) { goto end; /* no-op, not an error */ } if (ctx->RenderMode == GL_RENDER) { if (width > 0 && height > 0) { /* Round, to satisfy conformance tests (matches SGI's OpenGL) */ GLint x = IROUND(ctx->Current.RasterPos[0]); GLint y = IROUND(ctx->Current.RasterPos[1]); if (_mesa_is_bufferobj(ctx->Unpack.BufferObj)) { /* unpack from PBO */ if (!_mesa_validate_pbo_access(2, &ctx->Unpack, width, height, 1, format, type, INT_MAX, pixels)) { _mesa_error(ctx, GL_INVALID_OPERATION, "glDrawPixels(invalid PBO access)"); goto end; } if (_mesa_bufferobj_mapped(ctx->Unpack.BufferObj)) { /* buffer is mapped - that's an error */ _mesa_error(ctx, GL_INVALID_OPERATION, "glDrawPixels(PBO is mapped)"); goto end; } } ctx->Driver.DrawPixels(ctx, x, y, width, height, format, type, &ctx->Unpack, pixels); } } else if (ctx->RenderMode == GL_FEEDBACK) { /* Feedback the current raster pos info */ FLUSH_CURRENT( ctx, 0 ); _mesa_feedback_token( ctx, (GLfloat) (GLint) GL_DRAW_PIXEL_TOKEN ); _mesa_feedback_vertex( ctx, ctx->Current.RasterPos, ctx->Current.RasterColor, ctx->Current.RasterTexCoords[0] ); } else { ASSERT(ctx->RenderMode == GL_SELECT); /* Do nothing. See OpenGL Spec, Appendix B, Corollary 6. */ } end: _mesa_set_vp_override(ctx, GL_FALSE); if (MESA_DEBUG_FLAGS & DEBUG_ALWAYS_FLUSH) { _mesa_flush(ctx); } }