/** * @param for_bo Indicates that the caller is * intel_miptree_create_for_bo(). If true, then do not create * \c stencil_mt. */ struct intel_mipmap_tree * intel_miptree_create_layout(struct intel_context *intel, GLenum target, mesa_format format, GLuint first_level, GLuint last_level, GLuint width0, GLuint height0, GLuint depth0, bool for_bo) { struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1); if (!mt) return NULL; DBG("%s target %s format %s level %d..%d <-- %p\n", __func__, _mesa_enum_to_string(target), _mesa_get_format_name(format), first_level, last_level, mt); mt->target = target_to_target(target); mt->format = format; mt->first_level = first_level; mt->last_level = last_level; mt->logical_width0 = width0; mt->logical_height0 = height0; mt->logical_depth0 = depth0; /* The cpp is bytes per (1, blockheight)-sized block for compressed * textures. This is why you'll see divides by blockheight all over */ unsigned bw, bh; _mesa_get_format_block_size(format, &bw, &bh); assert(_mesa_get_format_bytes(mt->format) % bw == 0); mt->cpp = _mesa_get_format_bytes(mt->format) / bw; mt->compressed = _mesa_is_format_compressed(format); mt->refcount = 1; if (target == GL_TEXTURE_CUBE_MAP) { assert(depth0 == 1); depth0 = 6; } mt->physical_width0 = width0; mt->physical_height0 = height0; mt->physical_depth0 = depth0; intel_get_texture_alignment_unit(intel, mt->format, &mt->align_w, &mt->align_h); (void) intel; if (intel->is_945) i945_miptree_layout(mt); else i915_miptree_layout(mt); return mt; }
GLboolean _mesa_is_image_unit_valid(struct gl_context *ctx, struct gl_image_unit *u) { struct gl_texture_object *t = u->TexObj; mesa_format tex_format; if (!t) return GL_FALSE; if (!t->_BaseComplete && !t->_MipmapComplete) _mesa_test_texobj_completeness(ctx, t); if (u->Level < t->BaseLevel || u->Level > t->_MaxLevel || (u->Level == t->BaseLevel && !t->_BaseComplete) || (u->Level != t->BaseLevel && !t->_MipmapComplete)) return GL_FALSE; if (_mesa_tex_target_is_layered(t->Target) && u->_Layer >= _mesa_get_texture_layers(t, u->Level)) return GL_FALSE; if (t->Target == GL_TEXTURE_BUFFER) { tex_format = _mesa_get_shader_image_format(t->BufferObjectFormat); } else { struct gl_texture_image *img = (t->Target == GL_TEXTURE_CUBE_MAP ? t->Image[u->_Layer][u->Level] : t->Image[0][u->Level]); if (!img || img->Border || img->NumSamples > ctx->Const.MaxImageSamples) return GL_FALSE; tex_format = _mesa_get_shader_image_format(img->InternalFormat); } if (!tex_format) return GL_FALSE; switch (t->ImageFormatCompatibilityType) { case GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE: if (_mesa_get_format_bytes(tex_format) != _mesa_get_format_bytes(u->_ActualFormat)) return GL_FALSE; break; case GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS: if (get_image_format_class(tex_format) != get_image_format_class(u->_ActualFormat)) return GL_FALSE; break; default: assert(!"Unexpected image format compatibility type"); } return GL_TRUE; }
static __DRIimage * intel_from_planar(__DRIimage *parent, int plane, void *loaderPrivate) { int width, height, offset, stride, dri_format, cpp, index, pitch; struct intel_image_format *f; uint32_t mask_x, mask_y; __DRIimage *image; if (parent == NULL || parent->planar_format == NULL) return NULL; f = parent->planar_format; if (plane >= f->nplanes) return NULL; width = parent->region->width >> f->planes[plane].width_shift; height = parent->region->height >> f->planes[plane].height_shift; dri_format = f->planes[plane].dri_format; index = f->planes[plane].buffer_index; offset = parent->offsets[index]; stride = parent->strides[index]; image = intel_allocate_image(dri_format, loaderPrivate); cpp = _mesa_get_format_bytes(image->format); /* safe since no none format */ pitch = stride / cpp; if (offset + height * cpp * pitch > parent->region->bo->size) { _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds"); free(image); return NULL; } image->region = calloc(sizeof(*image->region), 1); if (image->region == NULL) { free(image); return NULL; } image->region->cpp = _mesa_get_format_bytes(image->format); image->region->width = width; image->region->height = height; image->region->pitch = pitch; image->region->refcount = 1; image->region->bo = parent->region->bo; drm_intel_bo_reference(image->region->bo); image->region->tiling = parent->region->tiling; image->region->screen = parent->region->screen; image->offset = offset; intel_region_get_tile_masks(image->region, &mask_x, &mask_y, false); if (offset & mask_x) _mesa_warning(NULL, "intel_create_sub_image: offset not on tile boundary"); return image; }
static GLboolean validate_image_unit(struct gl_context *ctx, struct gl_image_unit *u) { struct gl_texture_object *t = u->TexObj; struct gl_texture_image *img; if (!t || u->Level < t->BaseLevel || u->Level > t->_MaxLevel) return GL_FALSE; _mesa_test_texobj_completeness(ctx, t); if ((u->Level == t->BaseLevel && !t->_BaseComplete) || (u->Level != t->BaseLevel && !t->_MipmapComplete)) return GL_FALSE; if (_mesa_tex_target_is_layered(t->Target) && u->Layer >= _mesa_get_texture_layers(t, u->Level)) return GL_FALSE; if (t->Target == GL_TEXTURE_CUBE_MAP) img = t->Image[u->Layer][u->Level]; else img = t->Image[0][u->Level]; if (!img || img->Border || get_image_format_class(img->TexFormat) == IMAGE_FORMAT_CLASS_NONE || img->NumSamples > ctx->Const.MaxImageSamples) return GL_FALSE; switch (t->ImageFormatCompatibilityType) { case GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE: if (_mesa_get_format_bytes(img->TexFormat) != _mesa_get_format_bytes(u->_ActualFormat)) return GL_FALSE; break; case GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS: if (get_image_format_class(img->TexFormat) != get_image_format_class(u->_ActualFormat)) return GL_FALSE; break; default: assert(!"Unexpected image format compatibility type"); } return GL_TRUE; }
/** * Decompress a compressed texture image, returning a GL_RGBA/GL_FLOAT image. * \param srcRowStride stride in bytes between rows of blocks in the * compressed source image. */ void _mesa_decompress_image(mesa_format format, GLuint width, GLuint height, const GLubyte *src, GLint srcRowStride, GLfloat *dest) { compressed_fetch_func fetch; GLuint i, j; GLuint bytes, bw, bh; GLint stride; bytes = _mesa_get_format_bytes(format); _mesa_get_format_block_size(format, &bw, &bh); fetch = _mesa_get_compressed_fetch_func(format); if (!fetch) { _mesa_problem(NULL, "Unexpected format in _mesa_decompress_image()"); return; } stride = srcRowStride * bh / bytes; for (j = 0; j < height; j++) { for (i = 0; i < width; i++) { fetch(src, stride, i, j, dest); dest += 4; } } }
/** * Store a 32-bit integer or float depth component texture image. */ static GLboolean _mesa_texstore_z32(TEXSTORE_PARAMS) { const GLuint depthScale = 0xffffffff; GLenum dstType; (void) dims; assert(dstFormat == MESA_FORMAT_Z_UNORM32 || dstFormat == MESA_FORMAT_Z_FLOAT32); assert(_mesa_get_format_bytes(dstFormat) == sizeof(GLuint)); if (dstFormat == MESA_FORMAT_Z_UNORM32) dstType = GL_UNSIGNED_INT; else dstType = GL_FLOAT; { /* general path */ GLint img, row; for (img = 0; img < srcDepth; img++) { GLubyte *dstRow = dstSlices[img]; for (row = 0; row < srcHeight; row++) { const GLvoid *src = _mesa_image_address(dims, srcPacking, srcAddr, srcWidth, srcHeight, srcFormat, srcType, img, row, 0); _mesa_unpack_depth_span(ctx, srcWidth, dstType, dstRow, depthScale, srcType, src, srcPacking); dstRow += dstRowStride; } } } return GL_TRUE; }
/** * Get array of 32-bit z values from the depth buffer. With clipping. * Note: the returned values are always in the range [0, 2^32-1]. */ static void get_z32_values(struct gl_context *ctx, struct gl_renderbuffer *rb, GLuint count, const GLint x[], const GLint y[], GLuint zbuffer[]) { struct swrast_renderbuffer *srb = swrast_renderbuffer(rb); const GLint w = rb->Width, h = rb->Height; const GLubyte *map = _swrast_pixel_address(rb, 0, 0); GLuint i; if (rb->Format == MESA_FORMAT_Z_UNORM32) { const GLint rowStride = srb->RowStride; for (i = 0; i < count; i++) { if (x[i] >= 0 && y[i] >= 0 && x[i] < w && y[i] < h) { zbuffer[i] = *((GLuint *) (map + y[i] * rowStride + x[i] * 4)); } } } else { const GLint bpp = _mesa_get_format_bytes(rb->Format); const GLint rowStride = srb->RowStride; for (i = 0; i < count; i++) { if (x[i] >= 0 && y[i] >= 0 && x[i] < w && y[i] < h) { const GLubyte *src = map + y[i] * rowStride+ x[i] * bpp; _mesa_unpack_uint_z_row(rb->Format, 1, src, &zbuffer[i]); } } } }
static void radeon_swrast_map_image(radeonContextPtr rmesa, radeon_texture_image *image) { GLuint level, face; radeon_mipmap_tree *mt; GLuint texel_size; radeon_mipmap_level *lvl; int rs; if (!image || !image->mt) return; texel_size = _mesa_get_format_bytes(image->base.Base.TexFormat); level = image->base.Base.Level; face = image->base.Base.Face; mt = image->mt; lvl = &image->mt->levels[level]; rs = lvl->rowstride / texel_size; radeon_bo_map(mt->bo, 1); image->base.Map = mt->bo->ptr + lvl->faces[face].offset; if (mt->target == GL_TEXTURE_3D) { int i; for (i = 0; i < mt->levels[level].depth; i++) image->base.ImageOffsets[i] = rs * lvl->height * i; } image->base.RowStride = rs; }
/** * Put an array of 32-bit z values into the depth buffer. * Note: the z values are always in the range [0, 2^32-1]. */ static void put_z32_values(struct gl_context *ctx, struct gl_renderbuffer *rb, GLuint count, const GLint x[], const GLint y[], const GLuint zvalues[], const GLubyte mask[]) { struct swrast_renderbuffer *srb = swrast_renderbuffer(rb); const GLint w = rb->Width, h = rb->Height; GLubyte *map = _swrast_pixel_address(rb, 0, 0); GLuint i; if (rb->Format == MESA_FORMAT_Z_UNORM32) { const GLint rowStride = srb->RowStride; for (i = 0; i < count; i++) { if (mask[i] && x[i] >= 0 && y[i] >= 0 && x[i] < w && y[i] < h) { GLuint *dst = (GLuint *) (map + y[i] * rowStride + x[i] * 4); *dst = zvalues[i]; } } } else { gl_pack_uint_z_func packZ = _mesa_get_pack_uint_z_func(rb->Format); const GLint bpp = _mesa_get_format_bytes(rb->Format); const GLint rowStride = srb->RowStride; for (i = 0; i < count; i++) { if (mask[i] && x[i] >= 0 && y[i] >= 0 && x[i] < w && y[i] < h) { void *dst = map + y[i] * rowStride + x[i] * bpp; packZ(zvalues + i, dst); } } } }
/** * Apply depth (Z) buffer testing to the span. * \return approx number of pixels that passed (only zero is reliable) */ GLuint _swrast_depth_test_span(struct gl_context *ctx, SWspan *span) { struct gl_framebuffer *fb = ctx->DrawBuffer; struct gl_renderbuffer *rb = fb->Attachment[BUFFER_DEPTH].Renderbuffer; const GLint bpp = _mesa_get_format_bytes(rb->Format); void *zStart; const GLuint count = span->end; const GLuint *fragZ = span->array->z; GLubyte *mask = span->array->mask; void *zBufferVals; GLuint *zBufferTemp = NULL; GLuint passed; GLuint zBits = _mesa_get_format_bits(rb->Format, GL_DEPTH_BITS); GLboolean ztest16 = GL_FALSE; if (span->arrayMask & SPAN_XY) zStart = NULL; else zStart = _swrast_pixel_address(rb, span->x, span->y); if (rb->Format == MESA_FORMAT_Z_UNORM16 && !(span->arrayMask & SPAN_XY)) { /* directly read/write row of 16-bit Z values */ zBufferVals = zStart; ztest16 = GL_TRUE; } else if (rb->Format == MESA_FORMAT_Z_UNORM32 && !(span->arrayMask & SPAN_XY)) { /* directly read/write row of 32-bit Z values */ zBufferVals = zStart; } else { if (_mesa_get_format_datatype(rb->Format) != GL_UNSIGNED_NORMALIZED) { _mesa_problem(ctx, "Incorrectly writing swrast's integer depth " "values to %s depth buffer", _mesa_get_format_name(rb->Format)); } /* copy Z buffer values into temp buffer (32-bit Z values) */ zBufferTemp = malloc(count * sizeof(GLuint)); if (!zBufferTemp) return 0; if (span->arrayMask & SPAN_XY) { get_z32_values(ctx, rb, count, span->array->x, span->array->y, zBufferTemp); } else { _mesa_unpack_uint_z_row(rb->Format, count, zStart, zBufferTemp); } if (zBits == 24) { GLuint i; /* Convert depth buffer values from 32 to 24 bits to match the * fragment Z values generated by rasterization. */ for (i = 0; i < count; i++) { zBufferTemp[i] >>= 8; } } else if (zBits == 16) {
static __DRIimage * intel_create_image_from_name(__DRIscreen *screen, int width, int height, int format, int name, int pitch, void *loaderPrivate) { struct intel_screen *intelScreen = screen->driverPrivate; __DRIimage *image; int cpp; image = intel_allocate_image(format, loaderPrivate); if (image == NULL) return NULL; if (image->format == MESA_FORMAT_NONE) cpp = 1; else cpp = _mesa_get_format_bytes(image->format); image->region = intel_region_alloc_for_handle(intelScreen, cpp, width, height, pitch * cpp, name, "image"); if (image->region == NULL) { free(image); return NULL; } intel_setup_image_from_dimensions(image); return image; }
/** * Recompute the values of the context's rowaddr array. */ static void compute_row_addresses( OSMesaContext osmesa ) { GLint bytesPerRow, i; GLubyte *origin = (GLubyte *) osmesa->srb->Buffer; GLint rowlength; /* in pixels */ GLint height = osmesa->srb->Base.Height; if (osmesa->userRowLength) rowlength = osmesa->userRowLength; else rowlength = osmesa->srb->Base.Width; bytesPerRow = rowlength * _mesa_get_format_bytes(osmesa->srb->Base.Format); if (osmesa->yup) { /* Y=0 is bottom line of window */ for (i = 0; i < height; i++) { osmesa->rowaddr[i] = (GLvoid *) ((GLubyte *) origin + i * bytesPerRow); } } else { /* Y=0 is top line of window */ for (i = 0; i < height; i++) { GLint j = height - i - 1; osmesa->rowaddr[i] = (GLvoid *) ((GLubyte *) origin + j * bytesPerRow); } } }
static __DRIimage * intel_create_image(__DRIscreen *screen, int width, int height, int format, unsigned int use, void *loaderPrivate) { __DRIimage *image; struct intel_screen *intelScreen = screen->driverPrivate; uint32_t tiling; int cpp; tiling = I915_TILING_X; if (use & __DRI_IMAGE_USE_CURSOR) { if (width != 64 || height != 64) return NULL; tiling = I915_TILING_NONE; } image = intel_allocate_image(format, loaderPrivate); cpp = _mesa_get_format_bytes(image->format); image->region = intel_region_alloc(intelScreen, tiling, cpp, width, height, true); if (image->region == NULL) { free(image); return NULL; } return image; }
static __DRIimage * intel_create_image_from_name(__DRIscreen *screen, int width, int height, int format, int name, int pitch, void *loaderPrivate) { struct intel_screen *intelScreen = screen->driverPrivate; __DRIimage *image; int cpp; image = intel_allocate_image(format, loaderPrivate); if (image == NULL) return NULL; if (image->format == MESA_FORMAT_NONE) cpp = 1; else cpp = _mesa_get_format_bytes(image->format); image->width = width; image->height = height; image->pitch = pitch * cpp; image->bo = drm_intel_bo_gem_create_from_name(intelScreen->bufmgr, "image", name); if (!image->bo) { free(image); return NULL; } return image; }
/** * Store a 16-bit integer depth component texture image. */ static GLboolean _mesa_texstore_z16(TEXSTORE_PARAMS) { const GLuint depthScale = 0xffff; (void) dims; assert(dstFormat == MESA_FORMAT_Z_UNORM16); assert(_mesa_get_format_bytes(dstFormat) == sizeof(GLushort)); { /* general path */ GLint img, row; for (img = 0; img < srcDepth; img++) { GLubyte *dstRow = dstSlices[img]; for (row = 0; row < srcHeight; row++) { const GLvoid *src = _mesa_image_address(dims, srcPacking, srcAddr, srcWidth, srcHeight, srcFormat, srcType, img, row, 0); GLushort *dst16 = (GLushort *) dstRow; _mesa_unpack_depth_span(ctx, srcWidth, GL_UNSIGNED_SHORT, dst16, depthScale, srcType, src, srcPacking); dstRow += dstRowStride; } } } return GL_TRUE; }
static unsigned get_aligned_compressed_row_stride( mesa_format format, unsigned width, unsigned minStride) { const unsigned blockBytes = _mesa_get_format_bytes(format); unsigned blockWidth, blockHeight; unsigned stride; _mesa_get_format_block_size(format, &blockWidth, &blockHeight); /* Count number of blocks required to store the given width. * And then multiple it with bytes required to store a block. */ stride = (width + blockWidth - 1) / blockWidth * blockBytes; /* Round the given minimum stride to the next full blocksize. * (minStride + blockBytes - 1) / blockBytes * blockBytes */ if ( stride < minStride ) stride = (minStride + blockBytes - 1) / blockBytes * blockBytes; radeon_print(RADEON_TEXTURE, RADEON_TRACE, "%s width %u, minStride %u, block(bytes %u, width %u):" "stride %u\n", __func__, width, minStride, blockBytes, blockWidth, stride); return stride; }
static __DRIimage * intel_create_image(__DRIscreen *screen, int width, int height, int format, unsigned int use, void *loaderPrivate) { __DRIimage *image; struct intel_screen *intelScreen = screen->driverPrivate; uint32_t tiling; int cpp; tiling = I915_TILING_X; if (use & __DRI_IMAGE_USE_CURSOR) { if (width != 64 || height != 64) return NULL; tiling = I915_TILING_NONE; } /* We only support write for cursor drm images */ if ((use & __DRI_IMAGE_USE_WRITE) && use != (__DRI_IMAGE_USE_WRITE | __DRI_IMAGE_USE_CURSOR)) return NULL; image = intel_allocate_image(format, loaderPrivate); image->usage = use; cpp = _mesa_get_format_bytes(image->format); image->region = intel_region_alloc(intelScreen, tiling, cpp, width, height, true); if (image->region == NULL) { FREE(image); return NULL; } return image; }
/** * Can the image be pulled into a unified mipmap tree? This mirrors * the completeness test in a lot of ways. * * Not sure whether I want to pass gl_texture_image here. */ GLboolean intel_miptree_match_image(struct intel_mipmap_tree *mt, struct gl_texture_image *image) { GLboolean isCompressed = _mesa_is_format_compressed(image->TexFormat); struct intel_texture_image *intelImage = intel_texture_image(image); GLuint level = intelImage->level; /* Images with borders are never pulled into mipmap trees. */ if (image->Border) return GL_FALSE; if (image->InternalFormat != mt->internal_format || isCompressed != mt->compressed) return GL_FALSE; if (!isCompressed && !mt->compressed && _mesa_get_format_bytes(image->TexFormat) != mt->cpp) return GL_FALSE; /* Test image dimensions against the base level image adjusted for * minification. This will also catch images not present in the * tree, changed targets, etc. */ if (image->Width != mt->level[level].width || image->Height != mt->level[level].height || image->Depth != mt->level[level].depth) return GL_FALSE; return GL_TRUE; }
static bool intel_set_texture_storage_for_buffer_object(struct gl_context *ctx, struct gl_texture_object *tex_obj, struct gl_buffer_object *buffer_obj, uint32_t buffer_offset, uint32_t row_stride, bool read_only) { struct brw_context *brw = brw_context(ctx); struct intel_texture_object *intel_texobj = intel_texture_object(tex_obj); struct gl_texture_image *image = tex_obj->Image[0][0]; struct intel_texture_image *intel_image = intel_texture_image(image); struct intel_buffer_object *intel_buffer_obj = intel_buffer_object(buffer_obj); if (!read_only) { /* Renderbuffers have the restriction that the buffer offset and * surface pitch must be a multiple of the element size. If it's * not, we have to fail and fall back to software. */ int cpp = _mesa_get_format_bytes(image->TexFormat); if (buffer_offset % cpp || row_stride % cpp) { perf_debug("Bad PBO alignment; fallback to CPU mapping\n"); return false; } if (!brw->format_supported_as_render_target[image->TexFormat]) { perf_debug("Non-renderable PBO format; fallback to CPU mapping\n"); return false; } } assert(intel_texobj->mt == NULL); drm_intel_bo *bo = intel_bufferobj_buffer(brw, intel_buffer_obj, buffer_offset, row_stride * image->Height); intel_texobj->mt = intel_miptree_create_for_bo(brw, bo, image->TexFormat, buffer_offset, image->Width, image->Height, image->Depth, row_stride, 0); if (!intel_texobj->mt) return false; if (!_swrast_init_texture_image(image)) return false; intel_miptree_reference(&intel_image->mt, intel_texobj->mt); /* The miptree is in a validated state, so no need to check later. */ intel_texobj->needs_validate = false; intel_texobj->validated_first_level = 0; intel_texobj->validated_last_level = 0; intel_texobj->_Format = intel_texobj->mt->format; return true; }
static void sisTexSubImage2D( GLcontext *ctx, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid *pixels, const struct gl_pixelstore_attrib *packing, struct gl_texture_object *texObj, struct gl_texture_image *texImage ) { sisContextPtr smesa = SIS_CONTEXT(ctx); sisTexObjPtr t; GLuint copySize; GLint texelBytes; const char *src; GLubyte *dst; int j; GLuint soffset; if ( texObj->DriverData == NULL ) sisAllocTexObj( texObj ); t = texObj->DriverData; _mesa_store_texsubimage2d(ctx, target, level, xoffset, yoffset, width, height, format, type, pixels, packing, texObj, texImage); /* Allocate offscreen space for the texture */ sisFreeTexImage(smesa, t, level); sisAllocTexImage(smesa, t, level, texImage); /* Upload the texture */ WaitEngIdle(smesa); texelBytes = _mesa_get_format_bytes(texImage->TexFormat); copySize = width * texelBytes; src = (char *)texImage->Data + (xoffset + yoffset * texImage->Width) * texelBytes; dst = t->image[level].Data + (xoffset + yoffset * texImage->Width) * texelBytes; soffset = texImage->Width * texelBytes; for (j = yoffset; j < yoffset + height; j++) { memcpy( dst, src, copySize ); src += soffset; dst += soffset; } smesa->clearTexCache = GL_TRUE; if (smesa->PrevTexFormat[ctx->Texture.CurrentUnit] != t->format) { smesa->TexStates[ctx->Texture.CurrentUnit] |= NEW_TEXTURE_ENV; smesa->PrevTexFormat[ctx->Texture.CurrentUnit] = t->format; } smesa->TexStates[ctx->Texture.CurrentUnit] |= NEW_TEXTURING; }
/** * Map texture memory/buffer into user space. * Note: the region of interest parameters are ignored here. * \param mapOut returns start of mapping of region of interest * \param rowStrideOut returns row stride in bytes */ static void radeon_map_texture_image(struct gl_context *ctx, struct gl_texture_image *texImage, GLuint slice, GLuint x, GLuint y, GLuint w, GLuint h, GLbitfield mode, GLubyte **map, GLint *stride) { radeonContextPtr rmesa = RADEON_CONTEXT(ctx); radeon_texture_image *image = get_radeon_texture_image(texImage); radeon_mipmap_tree *mt = image->mt; GLuint texel_size = _mesa_get_format_bytes(texImage->TexFormat); GLuint width = texImage->Width; GLuint height = texImage->Height; struct radeon_bo *bo = !image->mt ? image->bo : image->mt->bo; unsigned int bw, bh; GLboolean write = (mode & GL_MAP_WRITE_BIT) != 0; _mesa_get_format_block_size(texImage->TexFormat, &bw, &bh); assert(y % bh == 0); y /= bh; texel_size /= bw; if (bo && radeon_bo_is_referenced_by_cs(bo, rmesa->cmdbuf.cs)) { radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, "%s for texture that is " "queued for GPU processing.\n", __func__); radeon_firevertices(rmesa); } if (image->bo) { /* TFP case */ radeon_bo_map(image->bo, write); *stride = get_texture_image_row_stride(rmesa, texImage->TexFormat, width, 0, texImage->TexObject->Target); *map = bo->ptr; } else if (likely(mt)) { void *base; radeon_mipmap_level *lvl = &image->mt->levels[texImage->Level]; radeon_bo_map(mt->bo, write); base = mt->bo->ptr + lvl->faces[image->base.Base.Face].offset; *stride = lvl->rowstride; *map = base + (slice * height) * *stride; } else { /* texture data is in malloc'd memory */ assert(map); *stride = _mesa_format_row_stride(texImage->TexFormat, width); *map = image->base.Buffer + (slice * height) * *stride; } *map += y * *stride + x * texel_size; }
static __DRIimage * radeon_create_image(__DRIscreen *screen, int width, int height, int format, unsigned int use, void *loaderPrivate) { __DRIimage *image; radeonScreenPtr radeonScreen = screen->driverPrivate; image = calloc(1, sizeof *image); if (image == NULL) return NULL; image->dri_format = format; switch (format) { case __DRI_IMAGE_FORMAT_RGB565: image->format = MESA_FORMAT_B5G6R5_UNORM; image->internal_format = GL_RGB; image->data_type = GL_UNSIGNED_BYTE; break; case __DRI_IMAGE_FORMAT_XRGB8888: image->format = MESA_FORMAT_B8G8R8X8_UNORM; image->internal_format = GL_RGB; image->data_type = GL_UNSIGNED_BYTE; break; case __DRI_IMAGE_FORMAT_ARGB8888: image->format = MESA_FORMAT_B8G8R8A8_UNORM; image->internal_format = GL_RGBA; image->data_type = GL_UNSIGNED_BYTE; break; default: free(image); return NULL; } image->data = loaderPrivate; image->cpp = _mesa_get_format_bytes(image->format); image->width = width; image->height = height; image->pitch = ((image->cpp * image->width + 255) & ~255) / image->cpp; image->bo = radeon_bo_open(radeonScreen->bom, 0, image->pitch * image->height * image->cpp, 0, RADEON_GEM_DOMAIN_VRAM, 0); if (image->bo == NULL) { free(image); return NULL; } return image; }
int intel_compressed_num_bytes(GLuint mesaFormat) { GLuint bw, bh; GLuint block_size; block_size = _mesa_get_format_bytes(mesaFormat); _mesa_get_format_block_size(mesaFormat, &bw, &bh); return block_size / bw; }
/** * Compute the cached hardware register values for the given texture object. * * \param rmesa Context pointer * \param t the r300 texture object */ static void setup_hardware_state(r300ContextPtr rmesa, radeonTexObj *t) { const struct gl_texture_image *firstImage; firstImage = t->base.Image[0][t->minLod]; if (!t->image_override && VALID_FORMAT(firstImage->TexFormat)) { if (firstImage->_BaseFormat == GL_DEPTH_COMPONENT) { r300SetDepthTexMode(&t->base); } else { t->pp_txformat = tx_table[firstImage->TexFormat].format; } t->pp_txfilter |= tx_table[firstImage->TexFormat].filter; } else if (!t->image_override) { _mesa_problem(NULL, "unexpected texture format in %s", __FUNCTION__); return; } if (t->image_override && t->bo) return; t->pp_txsize = (((R300_TX_WIDTHMASK_MASK & ((firstImage->Width - 1) << R300_TX_WIDTHMASK_SHIFT))) | ((R300_TX_HEIGHTMASK_MASK & ((firstImage->Height - 1) << R300_TX_HEIGHTMASK_SHIFT))) | ((R300_TX_DEPTHMASK_MASK & ((firstImage->DepthLog2) << R300_TX_DEPTHMASK_SHIFT))) | ((R300_TX_MAX_MIP_LEVEL_MASK & ((t->maxLod - t->minLod) << R300_TX_MAX_MIP_LEVEL_SHIFT)))); t->tile_bits = 0; if (t->base.Target == GL_TEXTURE_CUBE_MAP) t->pp_txformat |= R300_TX_FORMAT_CUBIC_MAP; if (t->base.Target == GL_TEXTURE_3D) t->pp_txformat |= R300_TX_FORMAT_3D; if (t->base.Target == GL_TEXTURE_RECTANGLE_NV) { unsigned int align = (64 / _mesa_get_format_bytes(firstImage->TexFormat)) - 1; t->pp_txsize |= R300_TX_SIZE_TXPITCH_EN; if (!t->image_override) t->pp_txpitch = ((firstImage->Width + align) & ~align) - 1; } if (rmesa->radeon.radeonScreen->chip_family >= CHIP_FAMILY_RV515) { if (firstImage->Width > 2048) t->pp_txpitch |= R500_TXWIDTH_BIT11; else t->pp_txpitch &= ~R500_TXWIDTH_BIT11; if (firstImage->Height > 2048) t->pp_txpitch |= R500_TXHEIGHT_BIT11; else t->pp_txpitch &= ~R500_TXHEIGHT_BIT11; } }
static __DRIimage * intel_create_sub_image(__DRIimage *parent, int width, int height, int dri_format, int offset, int pitch, void *loaderPrivate) { __DRIimage *image; int cpp; uint32_t mask_x, mask_y; image = intel_allocate_image(dri_format, loaderPrivate); cpp = _mesa_get_format_bytes(image->format); if (offset + height * cpp * pitch > parent->region->bo->size) { _mesa_warning(NULL, "intel_create_sub_image: subimage out of bounds"); FREE(image); return NULL; } image->region = calloc(sizeof(*image->region), 1); if (image->region == NULL) { FREE(image); return NULL; } image->region->cpp = _mesa_get_format_bytes(image->format); image->region->width = width; image->region->height = height; image->region->pitch = pitch; image->region->refcount = 1; image->region->bo = parent->region->bo; drm_intel_bo_reference(image->region->bo); image->region->tiling = parent->region->tiling; image->region->screen = parent->region->screen; image->offset = offset; intel_region_get_tile_masks(image->region, &mask_x, &mask_y); if (offset & mask_x) _mesa_warning(NULL, "intel_create_sub_image: offset not on tile boundary"); return image; }
/** Put row of colors into renderbuffer */ void _swrast_put_row(struct gl_context *ctx, struct gl_renderbuffer *rb, GLenum datatype, GLuint count, GLint x, GLint y, const void *values, const GLubyte *mask) { GLubyte *dst = _swrast_pixel_address(rb, x, y); if (!mask) { if (datatype == GL_UNSIGNED_BYTE) { _mesa_pack_ubyte_rgba_row(rb->Format, count, (const GLubyte (*)[4]) values, dst); } else { assert(datatype == GL_FLOAT); _mesa_pack_float_rgba_row(rb->Format, count, (const GLfloat (*)[4]) values, dst); } } else { const GLuint bpp = _mesa_get_format_bytes(rb->Format); GLuint i, runLen, runStart; /* We can't pass a 'mask' array to the _mesa_pack_rgba_row() functions * so look for runs where mask=1... */ runLen = runStart = 0; for (i = 0; i < count; i++) { if (mask[i]) { if (runLen == 0) runStart = i; runLen++; } if (!mask[i] || i == count - 1) { /* might be the end of a run of pixels */ if (runLen > 0) { if (datatype == GL_UNSIGNED_BYTE) { _mesa_pack_ubyte_rgba_row(rb->Format, runLen, (const GLubyte (*)[4]) values + runStart, dst + runStart * bpp); } else { assert(datatype == GL_FLOAT); _mesa_pack_float_rgba_row(rb->Format, runLen, (const GLfloat (*)[4]) values + runStart, dst + runStart * bpp); } runLen = 0; } } } } }
/** * \see dd_function_table::MapRenderbuffer */ static void intel_map_renderbuffer(struct gl_context *ctx, struct gl_renderbuffer *rb, GLuint x, GLuint y, GLuint w, GLuint h, GLbitfield mode, GLubyte **out_map, GLint *out_stride) { struct intel_context *intel = intel_context(ctx); struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb; struct intel_renderbuffer *irb = intel_renderbuffer(rb); void *map; int stride; if (srb->Buffer) { /* this is a malloc'd renderbuffer (accum buffer), not an irb */ GLint bpp = _mesa_get_format_bytes(rb->Format); GLint rowStride = srb->RowStride; *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp; *out_stride = rowStride; return; } /* We sometimes get called with this by our intel_span.c usage. */ if (!irb->mt) { *out_map = NULL; *out_stride = 0; return; } /* For a window-system renderbuffer, we need to flip the mapping we receive * upside-down. So we need to ask for a rectangle on flipped vertically, and * we then return a pointer to the bottom of it with a negative stride. */ if (rb->Name == 0) { y = rb->Height - y - h; } intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer, x, y, w, h, mode, &map, &stride); if (rb->Name == 0) { map += (h - 1) * stride; stride = -stride; } DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n", __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format), x, y, w, h, map, stride); *out_map = map; *out_stride = stride; }
void untile_image(const void * src, unsigned src_pitch, void *dst, unsigned dst_pitch, gl_format format, unsigned width, unsigned height) { assert(src_pitch >= width); assert(dst_pitch >= width); radeon_print(RADEON_TEXTURE, RADEON_TRACE, "Software untiling: src_pitch %d, dst_pitch %d, width %d, height %d, bpp %d\n", src_pitch, dst_pitch, width, height, _mesa_get_format_bytes(format)); switch (_mesa_get_format_bytes(format)) { case 16: micro_untile_1_x_1_128bit(src, src_pitch, dst, dst_pitch, width, height); break; case 8: micro_untile_2_x_2_64bit(src, src_pitch, dst, dst_pitch, width, height); break; case 4: micro_untile_4_x_2_32bit(src, src_pitch, dst, dst_pitch, width, height); break; case 2: if (_mesa_get_format_bits(format, GL_DEPTH_BITS)) { micro_untile_4_x_4_16bit(src, src_pitch, dst, dst_pitch, width, height); } else { micro_untile_8_x_2_16bit(src, src_pitch, dst, dst_pitch, width, height); } break; case 1: micro_untile_8_x_4_8bit(src, src_pitch, dst, dst_pitch, width, height); break; default: assert(0); break; } }
static void swrast_map_renderbuffer(struct gl_context *ctx, struct gl_renderbuffer *rb, GLuint x, GLuint y, GLuint w, GLuint h, GLbitfield mode, GLubyte **out_map, GLint *out_stride) { struct dri_swrast_renderbuffer *xrb = dri_swrast_renderbuffer(rb); GLubyte *map = xrb->Base.Buffer; int cpp = _mesa_get_format_bytes(rb->Format); int stride = rb->Width * cpp; if (rb->AllocStorage == swrast_alloc_front_storage) { __DRIdrawable *dPriv = xrb->dPriv; __DRIscreen *sPriv = dPriv->driScreenPriv; xrb->map_mode = mode; xrb->map_x = x; xrb->map_y = y; xrb->map_w = w; xrb->map_h = h; stride = w * cpp; xrb->Base.Buffer = malloc(h * stride); sPriv->swrast_loader->getImage(dPriv, x, rb->Height - y - h, w, h, (char *) xrb->Base.Buffer, dPriv->loaderPrivate); *out_map = xrb->Base.Buffer + (h - 1) * stride; *out_stride = -stride; return; } assert(xrb->Base.Buffer); if (rb->AllocStorage == swrast_alloc_back_storage) { map += (rb->Height - 1) * stride; stride = -stride; } map += (GLsizei)y * stride; map += (GLsizei)x * cpp; *out_map = map; *out_stride = stride; }
/* Upload an image from mesa's internal copy. */ static void i810UploadTexLevel( i810ContextPtr imesa, i810TextureObjectPtr t, int hwlevel ) { const struct gl_texture_image *image = t->image[hwlevel].image; int j; GLuint texelBytes; if (!image || !image->Data) return; texelBytes = _mesa_get_format_bytes(image->TexFormat); if (image->Width * texelBytes == t->Pitch) { GLubyte *dst = (GLubyte *)(t->BufAddr + t->image[hwlevel].offset); GLubyte *src = (GLubyte *)image->Data; memcpy( dst, src, t->Pitch * image->Height ); } else { switch (texelBytes) { case 1: { GLubyte *dst = (GLubyte *)(t->BufAddr + t->image[hwlevel].offset); GLubyte *src = (GLubyte *)image->Data; for (j = 0 ; j < image->Height ; j++, dst += t->Pitch) { __memcpy(dst, src, image->Width ); src += image->Width; } } break; case 2: { GLushort *dst = (GLushort *)(t->BufAddr + t->image[hwlevel].offset); GLushort *src = (GLushort *)image->Data; for (j = 0 ; j < image->Height ; j++, dst += (t->Pitch/2)) { __memcpy(dst, src, image->Width * 2 ); src += image->Width; } } break; default: fprintf(stderr, "%s: Not supported texel size %d\n", __FUNCTION__, texelBytes); } } }