Exemplo n.º 1
0
static struct drm_framebuffer *
cirrus_user_framebuffer_create(struct drm_device *dev,
			       struct drm_file *filp,
			       struct drm_mode_fb_cmd2 *mode_cmd)
{
	struct drm_gem_object *obj;
	struct cirrus_framebuffer *cirrus_fb;
	int ret;
	u32 bpp, depth;

	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
	/* cirrus can't handle > 24bpp framebuffers at all */
	if (bpp > 24)
		return ERR_PTR(-EINVAL);

	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
	if (obj == NULL)
		return ERR_PTR(-ENOENT);

	cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
	if (!cirrus_fb) {
		drm_gem_object_unreference_unlocked(obj);
		return ERR_PTR(-ENOMEM);
	}

	ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
	if (ret) {
		drm_gem_object_unreference_unlocked(obj);
		kfree(cirrus_fb);
		return ERR_PTR(ret);
	}
	return &cirrus_fb->base;
}
Exemplo n.º 2
0
static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
				      const struct drm_mode_fb_cmd2 *mode_cmd,
				      struct drm_gem_object **gobj_p)
{
	struct qxl_device *qdev = qfbdev->qdev;
	struct drm_gem_object *gobj = NULL;
	struct qxl_bo *qbo = NULL;
	int ret;
	int aligned_size, size;
	int height = mode_cmd->height;
	int bpp;
	int depth;

	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);

	size = mode_cmd->pitches[0] * height;
	aligned_size = ALIGN(size, PAGE_SIZE);
	/* TODO: unallocate and reallocate surface0 for real. Hack to just
	 * have a large enough surface0 for 1024x768 Xorg 32bpp mode */
	ret = qxl_gem_object_create(qdev, aligned_size, 0,
				    QXL_GEM_DOMAIN_SURFACE,
				    false, /* is discardable */
				    false, /* is kernel (false means device) */
				    NULL,
				    &gobj);
	if (ret) {
		pr_err("failed to allocate framebuffer (%d)\n",
		       aligned_size);
		return -ENOMEM;
	}
	qbo = gem_to_qxl_bo(gobj);

	qbo->surf.width = mode_cmd->width;
	qbo->surf.height = mode_cmd->height;
	qbo->surf.stride = mode_cmd->pitches[0];
	qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
	ret = qxl_bo_reserve(qbo, false);
	if (unlikely(ret != 0))
		goto out_unref;
	ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
	if (ret) {
		qxl_bo_unreserve(qbo);
		goto out_unref;
	}
	ret = qxl_bo_kmap(qbo, NULL);
	qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */
	if (ret)
		goto out_unref;

	*gobj_p = gobj;
	return 0;
out_unref:
	qxlfb_destroy_pinned_object(gobj);
	*gobj_p = NULL;
	return ret;
}
Exemplo n.º 3
0
/**
 * drm_format_plane_cpp - determine the bytes per pixel value
 * @format: pixel format (DRM_FORMAT_*)
 * @plane: plane index
 *
 * Returns:
 * The bytes per pixel value for the specified plane.
 */
int drm_format_plane_cpp(uint32_t format, int plane)
{
	unsigned int depth;
	int bpp;

	if (plane >= drm_format_num_planes(format))
		return 0;

	switch (format) {
	case DRM_FORMAT_YUYV:
	case DRM_FORMAT_YVYU:
	case DRM_FORMAT_UYVY:
	case DRM_FORMAT_VYUY:
		return 2;
	case DRM_FORMAT_NV12:
	case DRM_FORMAT_NV21:
	case DRM_FORMAT_NV16:
	case DRM_FORMAT_NV61:
	case DRM_FORMAT_NV24:
	case DRM_FORMAT_NV42:
		return plane ? 2 : 1;
	case DRM_FORMAT_YUV410:
	case DRM_FORMAT_YVU410:
	case DRM_FORMAT_YUV411:
	case DRM_FORMAT_YVU411:
	case DRM_FORMAT_YUV420:
	case DRM_FORMAT_YVU420:
	case DRM_FORMAT_YUV422:
	case DRM_FORMAT_YVU422:
	case DRM_FORMAT_YUV444:
	case DRM_FORMAT_YVU444:
		return 1;
	default:
		drm_fb_get_bpp_depth(format, &depth, &bpp);
		return bpp >> 3;
	}
}
Exemplo n.º 4
0
static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
					 struct drm_mode_fb_cmd2 *mode_cmd,
					 struct drm_gem_object **gobj_p)
{
	struct radeon_device *rdev = rfbdev->rdev;
	struct drm_gem_object *gobj = NULL;
	struct radeon_bo *rbo = NULL;
	bool fb_tiled = false; /* useful for testing */
	u32 tiling_flags = 0;
	int ret;
	int aligned_size, size;
	int height = mode_cmd->height;
	u32 bpp, depth;

	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);

	/* need to align pitch with crtc limits */
	mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
						  fb_tiled) * ((bpp + 1) / 8);

	if (rdev->family >= CHIP_R600)
		height = ALIGN(mode_cmd->height, 8);
	size = mode_cmd->pitches[0] * height;
	aligned_size = ALIGN(size, PAGE_SIZE);
	ret = radeon_gem_object_create(rdev, aligned_size, 0,
				       RADEON_GEM_DOMAIN_VRAM,
				       0, true, &gobj);
	if (ret) {
		printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
		       aligned_size);
		return -ENOMEM;
	}
	rbo = gem_to_radeon_bo(gobj);

	if (fb_tiled)
		tiling_flags = RADEON_TILING_MACRO;

#ifdef __BIG_ENDIAN
	switch (bpp) {
	case 32:
		tiling_flags |= RADEON_TILING_SWAP_32BIT;
		break;
	case 16:
		tiling_flags |= RADEON_TILING_SWAP_16BIT;
	default:
		break;
	}
#endif

	if (tiling_flags) {
		ret = radeon_bo_set_tiling_flags(rbo,
						 tiling_flags | RADEON_TILING_SURFACE,
						 mode_cmd->pitches[0]);
		if (ret)
			dev_err(rdev->dev, "FB failed to set tiling flags\n");
	}


	ret = radeon_bo_reserve(rbo, false);
	if (unlikely(ret != 0))
		goto out_unref;
	/* Only 27 bit offset for legacy CRTC */
	ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
				       ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
				       NULL);
	if (ret) {
		radeon_bo_unreserve(rbo);
		goto out_unref;
	}
	if (fb_tiled)
		radeon_bo_check_tiling(rbo, 0, 0);
	ret = radeon_bo_kmap(rbo, NULL);
	radeon_bo_unreserve(rbo);
	if (ret) {
		goto out_unref;
	}

	*gobj_p = gobj;
	return 0;
out_unref:
	radeonfb_destroy_pinned_object(gobj);
	*gobj_p = NULL;
	return ret;
}
Exemplo n.º 5
0
/**
 * sti_gdp_prepare_layer
 * @lay: gdp layer
 * @first_prepare: true if it is the first time this function is called
 *
 * Update the free GDP node list according to the layer properties.
 *
 * RETURNS:
 * 0 on success.
 */
static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
{
    struct sti_gdp_node_list *list;
    struct sti_gdp_node *top_field, *btm_field;
    struct drm_display_mode *mode = layer->mode;
    struct device *dev = layer->dev;
    struct sti_gdp *gdp = to_sti_gdp(layer);
    struct sti_compositor *compo = dev_get_drvdata(dev);
    int format;
    unsigned int depth, bpp;
    int rate = mode->clock * 1000;
    int res;
    u32 ydo, xdo, yds, xds;

    list = sti_gdp_get_free_nodes(layer);
    top_field = list->top_field;
    btm_field = list->btm_field;

    dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
            sti_layer_to_str(layer), top_field, btm_field);

    /* Build the top field from layer params */
    top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
    top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
    format = sti_gdp_fourcc2format(layer->format);
    if (format == -1) {
        DRM_ERROR("Format not supported by GDP %.4s\n",
                  (char *)&layer->format);
        return 1;
    }
    top_field->gam_gdp_ctl |= format;
    top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
    top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;

    /* pixel memory location */
    drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
    top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
    top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
    top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];

    /* input parameters */
    top_field->gam_gdp_pmp = layer->pitches[0];
    top_field->gam_gdp_size =
        clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
        clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);

    /* output parameters */
    ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
    yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
    xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
    xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
    top_field->gam_gdp_vpo = (ydo << 16) | xdo;
    top_field->gam_gdp_vps = (yds << 16) | xds;

    /* Same content and chained together */
    memcpy(btm_field, top_field, sizeof(*btm_field));
    top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field);
    btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field);

    /* Interlaced mode */
    if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
        btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
                                 layer->pitches[0];

    if (first_prepare) {
        /* Register gdp callback */
        if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
                                    compo->vtg_main : compo->vtg_aux,
                                    &gdp->vtg_field_nb, layer->mixer_id)) {
            DRM_ERROR("Cannot register VTG notifier\n");
            return 1;
        }

        /* Set and enable gdp clock */
        if (gdp->clk_pix) {
            res = clk_set_rate(gdp->clk_pix, rate);
            if (res < 0) {
                DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
                          rate);
                return 1;
            }

            if (clk_prepare_enable(gdp->clk_pix)) {
                DRM_ERROR("Failed to prepare/enable gdp\n");
                return 1;
            }
        }
    }

    return 0;
}