示例#1
0
static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
                                 uint32_t handle,
                                 uint32_t size,
                                 uint32_t alignment,
                                 uint32_t domains,
                                 uint32_t flags)
{
    struct radeon_bo_gem *bo;
    int r;

    bo = (struct radeon_bo_gem*)calloc(1, sizeof(struct radeon_bo_gem));
    if (bo == NULL) {
        return NULL;
    }

    bo->base.bom = bom;
    bo->base.handle = 0;
    bo->base.size = size;
    bo->base.alignment = alignment;
    bo->base.domains = domains;
    bo->base.flags = flags;
    bo->base.ptr = NULL;
    atomic_set(&bo->reloc_in_cs, 0);
    bo->map_count = 0;
    if (handle) {
        struct drm_gem_open open_arg;

        memset(&open_arg, 0, sizeof(open_arg));
        open_arg.name = handle;
        r = drmIoctl(bom->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
        if (r != 0) {
            free(bo);
            return NULL;
        }
        bo->base.handle = open_arg.handle;
        bo->base.size = open_arg.size;
        bo->name = handle;
    } else {
        struct drm_radeon_gem_create args;

        args.size = size;
        args.alignment = alignment;
        args.initial_domain = bo->base.domains;
        args.flags = flags;
        args.handle = 0;
        r = drmCommandWriteRead(bom->fd, DRM_RADEON_GEM_CREATE,
                                &args, sizeof(args));
        bo->base.handle = args.handle;
        if (r) {
            fprintf(stderr, "Failed to allocate :\n");
            fprintf(stderr, "   size      : %d bytes\n", size);
            fprintf(stderr, "   alignment : %d bytes\n", alignment);
            fprintf(stderr, "   domains   : %d\n", bo->base.domains);
            free(bo);
            return NULL;
        }
    }
    radeon_bo_ref((struct radeon_bo*)bo);
    return (struct radeon_bo*)bo;
}
示例#2
0
static __DRIimage *
radeon_create_image_from_renderbuffer(__DRIcontext *context,
                                      int renderbuffer, void *loaderPrivate)
{
   __DRIimage *image;
   radeonContextPtr radeon = context->driverPrivate;
   struct gl_renderbuffer *rb;
   struct radeon_renderbuffer *rrb;

   rb = _mesa_lookup_renderbuffer(&radeon->glCtx, renderbuffer);
   if (!rb) {
      _mesa_error(&radeon->glCtx,
                  GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
      return NULL;
   }

   rrb = radeon_renderbuffer(rb);
   image = calloc(1, sizeof *image);
   if (image == NULL)
      return NULL;

   image->internal_format = rb->InternalFormat;
   image->format = rb->Format;
   image->cpp = rrb->cpp;
   image->data_type = GL_UNSIGNED_BYTE;
   image->data = loaderPrivate;
   radeon_bo_ref(rrb->bo);
   image->bo = rrb->bo;

   image->width = rb->Width;
   image->height = rb->Height;
   image->pitch = rrb->pitch / image->cpp;

   return image;
}
示例#3
0
struct radeon_bo *
radeon_gem_bo_open_prime(struct radeon_bo_manager *bom, int fd_handle, uint32_t size)
{
    struct radeon_bo_gem *bo;
    int r;
    uint32_t handle;

    bo = (struct radeon_bo_gem*)calloc(1, sizeof(struct radeon_bo_gem));
    if (bo == NULL) {
        return NULL;
    }

    bo->base.bom = bom;
    bo->base.handle = 0;
    bo->base.size = size;
    bo->base.alignment = 0;
    bo->base.domains = RADEON_GEM_DOMAIN_GTT;
    bo->base.flags = 0;
    bo->base.ptr = NULL;
    atomic_set(&bo->reloc_in_cs, 0);
    bo->map_count = 0;

    r = drmPrimeFDToHandle(bom->fd, fd_handle, &handle);
    if (r != 0) {
	free(bo);
	return NULL;
    }

    bo->base.handle = handle;
    bo->name = handle;

    radeon_bo_ref((struct radeon_bo *)bo);
    return (struct radeon_bo *)bo;

}
示例#4
0
void radeon_image_target_texture_2d(struct gl_context *ctx, GLenum target,
                                    struct gl_texture_object *texObj,
                                    struct gl_texture_image *texImage,
                                    GLeglImageOES image_handle)
{
    radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    radeonTexObj *t = radeon_tex_obj(texObj);
    radeon_texture_image *radeonImage = get_radeon_texture_image(texImage);
    __DRIscreen *screen;
    __DRIimage *image;

    screen = radeon->dri.screen;
    image = screen->dri2.image->lookupEGLImage(screen, image_handle,
            screen->loaderPrivate);
    if (image == NULL)
        return;

    radeonFreeTextureImageBuffer(ctx, texImage);

    texImage->Width = image->width;
    texImage->Height = image->height;
    texImage->Depth = 1;
    texImage->_BaseFormat = GL_RGBA;
    texImage->TexFormat = image->format;
    radeonImage->base.RowStride = image->pitch;
    texImage->InternalFormat = image->internal_format;

    if(t->mt)
    {
        radeon_miptree_unreference(&t->mt);
        t->mt = NULL;
    }

    /* NOTE: The following is *very* ugly and will probably break. But
       I don't know how to deal with it, without creating a whole new
       function like radeon_miptree_from_bo() so I'm going with the
       easy but error-prone way. */

    radeon_try_alloc_miptree(radeon, t);

    radeon_miptree_reference(t->mt, &radeonImage->mt);

    if (t->mt == NULL)
    {
        radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
                     "%s Failed to allocate miptree.\n", __func__);
        return;
    }

    /* Particularly ugly: this is guaranteed to break, if image->bo is
       not of the required size for a miptree. */
    radeon_bo_unref(t->mt->bo);
    radeon_bo_ref(image->bo);
    t->mt->bo = image->bo;

    if (!radeon_miptree_matches_image(t->mt, &radeonImage->base.Base))
        fprintf(stderr, "miptree doesn't match image\n");
}
示例#5
0
void radeon_renderbuffer_set_bo(struct radeon_renderbuffer *rb,
				struct radeon_bo *bo)
{
  struct radeon_bo *old;
  old = rb->bo;
  rb->bo = bo;
  radeon_bo_ref(bo);
  if (old)
    radeon_bo_unref(old);
}
示例#6
0
static void
radeon_image_target_renderbuffer_storage(struct gl_context *ctx,
                                         struct gl_renderbuffer *rb,
                                         void *image_handle)
{
   radeonContextPtr radeon = RADEON_CONTEXT(ctx);
   struct radeon_renderbuffer *rrb;
   __DRIscreen *screen;
   __DRIimage *image;

   screen = radeon->radeonScreen->driScreen;
   image = screen->dri2.image->lookupEGLImage(screen, image_handle,
					      screen->loaderPrivate);
   if (image == NULL)
      return;

   rrb = radeon_renderbuffer(rb);

   if (ctx->Driver.Flush)
      ctx->Driver.Flush(ctx); /* +r6/r7 */

   if (rrb->bo)
      radeon_bo_unref(rrb->bo);
   rrb->bo = image->bo;
   radeon_bo_ref(rrb->bo);
   fprintf(stderr, "image->bo: %p, name: %d, rbs: w %d -> p %d\n", image->bo, image->bo->handle,
           image->width, image->pitch);

   rrb->cpp = image->cpp;
   rrb->pitch = image->pitch * image->cpp;

   rb->Format = image->format;
   rb->InternalFormat = image->internal_format;
   rb->Width = image->width;
   rb->Height = image->height;
   rb->Format = image->format;
   rb->DataType = image->data_type;
   rb->_BaseFormat = _mesa_base_fbo_format(radeon->glCtx,
                                           image->internal_format);
}
示例#7
0
static int cs_gem_write_reloc(struct radeon_cs_int *cs,
                              struct radeon_bo *bo,
                              uint32_t read_domain,
                              uint32_t write_domain,
                              uint32_t flags)
{
    struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
    struct cs_gem *csg = (struct cs_gem*)cs;
    struct cs_reloc_gem *reloc;
    uint32_t idx;
    unsigned i;

    assert(boi->space_accounted);

    /* check domains */
    if ((read_domain && write_domain) || (!read_domain && !write_domain)) {
        /* in one CS a bo can only be in read or write domain but not
         * in read & write domain at the same sime
         */
        return -EINVAL;
    }
    if (read_domain == RADEON_GEM_DOMAIN_CPU) {
        return -EINVAL;
    }
    if (write_domain == RADEON_GEM_DOMAIN_CPU) {
        return -EINVAL;
    }
    /* use bit field hash function to determine
       if this bo is for sure not in this cs.*/
    if ((atomic_read((atomic_t *)radeon_gem_get_reloc_in_cs(bo)) & cs->id)) {
        /* check if bo is already referenced.
         * Scanning from end to begin reduces cycles with mesa because
         * it often relocates same shared dma bo again. */
        for(i = cs->crelocs; i != 0;) {
            --i;
            idx = i * RELOC_SIZE;
            reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
            if (reloc->handle == bo->handle) {
                /* Check domains must be in read or write. As we check already
                 * checked that in argument one of the read or write domain was
                 * set we only need to check that if previous reloc as the read
                 * domain set then the read_domain should also be set for this
                 * new relocation.
                 */
                /* the DDX expects to read and write from same pixmap */
                if (write_domain && (reloc->read_domain & write_domain)) {
                    reloc->read_domain = 0;
                    reloc->write_domain = write_domain;
                } else if (read_domain & reloc->write_domain) {
                    reloc->read_domain = 0;
                } else {
                    if (write_domain != reloc->write_domain)
                        return -EINVAL;
                    if (read_domain != reloc->read_domain)
                        return -EINVAL;
                }

                reloc->read_domain |= read_domain;
                reloc->write_domain |= write_domain;
                /* update flags */
                reloc->flags |= (flags & reloc->flags);
                /* write relocation packet */
                radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000);
                radeon_cs_write_dword((struct radeon_cs *)cs, idx);
                return 0;
            }
        }
    }
    /* new relocation */
    if (csg->base.crelocs >= csg->nrelocs) {
        /* allocate more memory (TODO: should use a slab allocatore maybe) */
        uint32_t *tmp, size;
        size = ((csg->nrelocs + 1) * sizeof(struct radeon_bo*));
        tmp = (uint32_t*)realloc(csg->relocs_bo, size);
        if (tmp == NULL) {
            return -ENOMEM;
        }
        csg->relocs_bo = (struct radeon_bo_int **)tmp;
        size = ((csg->nrelocs + 1) * RELOC_SIZE * 4);
        tmp = (uint32_t*)realloc(csg->relocs, size);
        if (tmp == NULL) {
            return -ENOMEM;
        }
        cs->relocs = csg->relocs = tmp;
        csg->nrelocs += 1;
        csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs;
    }
    csg->relocs_bo[csg->base.crelocs] = boi;
    idx = (csg->base.crelocs++) * RELOC_SIZE;
    reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
    reloc->handle = bo->handle;
    reloc->read_domain = read_domain;
    reloc->write_domain = write_domain;
    reloc->flags = flags;
    csg->chunks[1].length_dw += RELOC_SIZE;
    radeon_bo_ref(bo);
    /* bo might be referenced from another context so have to use atomic opertions */
    atomic_add((atomic_t *)radeon_gem_get_reloc_in_cs(bo), cs->id);
    cs->relocs_total_size += boi->size;
    radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000);
    radeon_cs_write_dword((struct radeon_cs *)cs, idx);
    return 0;
}
示例#8
0
void r300SetTexBuffer2(__DRIcontext *pDRICtx, GLint target, GLint glx_texture_format, __DRIdrawable *dPriv)
{
	struct gl_texture_unit *texUnit;
	struct gl_texture_object *texObj;
	struct gl_texture_image *texImage;
	struct radeon_renderbuffer *rb;
	radeon_texture_image *rImage;
	radeonContextPtr radeon;
	r300ContextPtr rmesa;
	struct radeon_framebuffer *rfb;
	radeonTexObjPtr t;
	uint32_t pitch_val;
	uint32_t internalFormat, type, format;

	type = GL_BGRA;
	format = GL_UNSIGNED_BYTE;
	internalFormat = (glx_texture_format == GLX_TEXTURE_FORMAT_RGB_EXT ? 3 : 4);

	radeon = pDRICtx->driverPrivate;
	rmesa = pDRICtx->driverPrivate;

	rfb = dPriv->driverPrivate;
        texUnit = &radeon->glCtx->Texture.Unit[radeon->glCtx->Texture.CurrentUnit];
	texObj = _mesa_select_tex_object(radeon->glCtx, texUnit, target);
        texImage = _mesa_get_tex_image(radeon->glCtx, texObj, target, 0);

	rImage = get_radeon_texture_image(texImage);
	t = radeon_tex_obj(texObj);
        if (t == NULL) {
    	    return;
    	}

	radeon_update_renderbuffers(pDRICtx, dPriv);
	/* back & depth buffer are useless free them right away */
	rb = (void*)rfb->base.Attachment[BUFFER_DEPTH].Renderbuffer;
	if (rb && rb->bo) {
		radeon_bo_unref(rb->bo);
        rb->bo = NULL;
	}
	rb = (void*)rfb->base.Attachment[BUFFER_BACK_LEFT].Renderbuffer;
	if (rb && rb->bo) {
		radeon_bo_unref(rb->bo);
		rb->bo = NULL;
	}
	rb = rfb->color_rb[0];
	if (rb->bo == NULL) {
		/* Failed to BO for the buffer */
		return;
	}
	
	_mesa_lock_texture(radeon->glCtx, texObj);
	if (t->bo) {
		radeon_bo_unref(t->bo);
		t->bo = NULL;
	}
	if (rImage->bo) {
		radeon_bo_unref(rImage->bo);
		rImage->bo = NULL;
	}

	radeon_miptree_unreference(&t->mt);
	radeon_miptree_unreference(&rImage->mt);

	_mesa_init_teximage_fields(radeon->glCtx, target, texImage,
				   rb->base.Width, rb->base.Height, 1, 0, rb->cpp);
	texImage->RowStride = rb->pitch / rb->cpp;
	rImage->bo = rb->bo;
	radeon_bo_ref(rImage->bo);
	t->bo = rb->bo;
	radeon_bo_ref(t->bo);
	t->tile_bits = 0;
	t->image_override = GL_TRUE;
	t->override_offset = 0;
	t->pp_txpitch &= (1 << 13) -1;
	pitch_val = rb->pitch;
	switch (rb->cpp) {
	case 4:
		if (glx_texture_format == GLX_TEXTURE_FORMAT_RGB_EXT)
			t->pp_txformat = R300_EASY_TX_FORMAT(X, Y, Z, ONE, W8Z8Y8X8);
		else
			t->pp_txformat = R300_EASY_TX_FORMAT(X, Y, Z, W, W8Z8Y8X8);
		t->pp_txfilter |= tx_table[2].filter;
		pitch_val /= 4;
		break;
	case 3:
	default:
		t->pp_txformat = R300_EASY_TX_FORMAT(X, Y, Z, ONE, W8Z8Y8X8);
		t->pp_txfilter |= tx_table[4].filter;
		pitch_val /= 4;
		break;
	case 2:
		t->pp_txformat = R300_EASY_TX_FORMAT(X, Y, Z, ONE, Z5Y6X5);
		t->pp_txfilter |= tx_table[5].filter;
		pitch_val /= 2;
		break;
	}
	pitch_val--;
	t->pp_txsize = (((R300_TX_WIDTHMASK_MASK & ((rb->base.Width - 1) << R300_TX_WIDTHMASK_SHIFT)))
			| ((R300_TX_HEIGHTMASK_MASK & ((rb->base.Height - 1) << R300_TX_HEIGHTMASK_SHIFT))));
	t->pp_txsize |= R300_TX_SIZE_TXPITCH_EN;
	t->pp_txpitch |= pitch_val;

	if (rmesa->radeon.radeonScreen->chip_family >= CHIP_FAMILY_RV515) {
	    if (rb->base.Width > 2048)
		t->pp_txpitch |= R500_TXWIDTH_BIT11;
            else
		t->pp_txpitch &= ~R500_TXWIDTH_BIT11;
	    if (rb->base.Height > 2048)
		t->pp_txpitch |= R500_TXHEIGHT_BIT11;
            else
		t->pp_txpitch &= ~R500_TXHEIGHT_BIT11;
	}
	t->validated = GL_TRUE;
	_mesa_unlock_texture(radeon->glCtx, texObj);
	return;
}
示例#9
0
static void
radeon_render_texture(struct gl_context * ctx,
                     struct gl_framebuffer *fb,
                     struct gl_renderbuffer_attachment *att)
{
   struct gl_texture_image *newImage
      = att->Texture->Image[att->CubeMapFace][att->TextureLevel];
   struct radeon_renderbuffer *rrb = radeon_renderbuffer(att->Renderbuffer);
   radeon_texture_image *radeon_image;
   GLuint imageOffset;

  radeon_print(RADEON_TEXTURE, RADEON_TRACE,
		"%s(%p, fb %p, rrb %p, att %p)\n",
		__func__, ctx, fb, rrb, att);

   (void) fb;

   ASSERT(newImage);

   radeon_image = (radeon_texture_image *)newImage;

   if (!radeon_image->mt || newImage->Border != 0) {
      /* Fallback on drawing to a texture without a miptree.
       */
      _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
      _mesa_render_texture(ctx, fb, att);
      return;
   }
   else if (!rrb) {
      rrb = radeon_wrap_texture(ctx, newImage);
      if (rrb) {
         /* bind the wrapper to the attachment point */
         _mesa_reference_renderbuffer(&att->Renderbuffer, &rrb->base);
      }
      else {
         /* fallback to software rendering */
         _mesa_render_texture(ctx, fb, att);
         return;
      }
   }

   if (!radeon_update_wrapper(ctx, rrb, newImage)) {
       _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
       _mesa_render_texture(ctx, fb, att);
       return;
   }

   DBG("Begin render texture tid %lx tex=%u w=%d h=%d refcount=%d\n",
       _glthread_GetID(),
       att->Texture->Name, newImage->Width, newImage->Height,
       rrb->base.RefCount);

   /* point the renderbufer's region to the texture image region */
   if (rrb->bo != radeon_image->mt->bo) {
      if (rrb->bo)
  	radeon_bo_unref(rrb->bo);
      rrb->bo = radeon_image->mt->bo;
      radeon_bo_ref(rrb->bo);
   }

   /* compute offset of the particular 2D image within the texture region */
   imageOffset = radeon_miptree_image_offset(radeon_image->mt,
                                            att->CubeMapFace,
                                            att->TextureLevel);

   if (att->Texture->Target == GL_TEXTURE_3D) {
      imageOffset += radeon_image->mt->levels[att->TextureLevel].rowstride *
                     radeon_image->mt->levels[att->TextureLevel].height *
                     att->Zoffset;
   }

   /* store that offset in the region, along with the correct pitch for
    * the image we are rendering to */
   rrb->draw_offset = imageOffset;
   rrb->pitch = radeon_image->mt->levels[att->TextureLevel].rowstride;

   /* update drawing region, etc */
   radeon_draw_buffer(ctx, fb);
}
示例#10
0
static int r600_cs_write_reloc(struct radeon_cs_int *csi,
			       struct radeon_bo *bo,
			       uint32_t read_domain,
			       uint32_t write_domain,
			       uint32_t flags)
{
    struct r600_cs_reloc_legacy *relocs;
    int i;

    relocs = (struct r600_cs_reloc_legacy *)csi->relocs;
    /* check domains */
    if ((read_domain && write_domain) || (!read_domain && !write_domain)) {
        /* in one CS a bo can only be in read or write domain but not
         * in read & write domain at the same sime
         */
        return -EINVAL;
    }
    if (read_domain == RADEON_GEM_DOMAIN_CPU) {
        return -EINVAL;
    }
    if (write_domain == RADEON_GEM_DOMAIN_CPU) {
        return -EINVAL;
    }
    /* check if bo is already referenced */
    for(i = 0; i < csi->crelocs; i++) {
        uint32_t *indices;
        uint32_t *reloc_indices;

        if (relocs[i].base.bo->handle == bo->handle) {
            /* Check domains must be in read or write. As we check already
             * checked that in argument one of the read or write domain was
             * set we only need to check that if previous reloc as the read
             * domain set then the read_domain should also be set for this
             * new relocation.
             */
            if (relocs[i].base.read_domain && !read_domain) {
                return -EINVAL;
            }
            if (relocs[i].base.write_domain && !write_domain) {
                return -EINVAL;
            }
            relocs[i].base.read_domain |= read_domain;
            relocs[i].base.write_domain |= write_domain;
            /* save indice */
            relocs[i].cindices++;
            indices = (uint32_t*)realloc(relocs[i].indices,
                                         relocs[i].cindices * 4);
            reloc_indices = (uint32_t*)realloc(relocs[i].reloc_indices,
                                               relocs[i].cindices * 4);
            if ( (indices == NULL) || (reloc_indices == NULL) ) {
                relocs[i].cindices -= 1;
                return -ENOMEM;
            }
            relocs[i].indices = indices;
            relocs[i].reloc_indices = reloc_indices;
            relocs[i].indices[relocs[i].cindices - 1] = csi->cdw;
            relocs[i].reloc_indices[relocs[i].cindices - 1] = csi->cdw;
            csi->section_cdw += 2;
	    csi->cdw += 2;

            return 0;
        }
    }
    /* add bo to reloc */
    relocs = (struct r600_cs_reloc_legacy*)
             realloc(csi->relocs,
                     sizeof(struct r600_cs_reloc_legacy) * (csi->crelocs + 1));
    if (relocs == NULL) {
        return -ENOMEM;
    }
    csi->relocs = relocs;
    relocs[csi->crelocs].base.bo = bo;
    relocs[csi->crelocs].base.read_domain = read_domain;
    relocs[csi->crelocs].base.write_domain = write_domain;
    relocs[csi->crelocs].base.flags = flags;
    relocs[csi->crelocs].indices = (uint32_t*)malloc(4);
    relocs[csi->crelocs].reloc_indices = (uint32_t*)malloc(4);
    if ( (relocs[csi->crelocs].indices == NULL) || (relocs[csi->crelocs].reloc_indices == NULL) )
    {
        return -ENOMEM;
    }

    relocs[csi->crelocs].indices[0] = csi->cdw;
    relocs[csi->crelocs].reloc_indices[0] = csi->cdw;
    csi->section_cdw += 2;
    csi->cdw += 2;
    relocs[csi->crelocs].cindices = 1;
    csi->relocs_total_size += radeon_bo_legacy_relocs_size(bo);
    csi->crelocs++;

    radeon_bo_ref(bo);

    return 0;
}