static void gen7_fbc_activate(struct drm_i915_private *dev_priv) { struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; int threshold = dev_priv->fbc.threshold; dpfc_ctl = 0; if (IS_IVYBRIDGE(dev_priv)) dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane); if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) threshold++; switch (threshold) { case 4: case 3: dpfc_ctl |= DPFC_CTL_LIMIT_4X; break; case 2: dpfc_ctl |= DPFC_CTL_LIMIT_2X; break; case 1: dpfc_ctl |= DPFC_CTL_LIMIT_1X; break; } dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; if (dev_priv->fbc.false_color) dpfc_ctl |= FBC_CTL_FALSE_COLOR; if (IS_IVYBRIDGE(dev_priv)) { /* WaFbcAsynchFlipDisableFbcQueue:ivb */ I915_WRITE(ILK_DISPLAY_CHICKEN1, I915_READ(ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe), I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS); } I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); I915_WRITE(SNB_DPFC_CTL_SA, SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); intel_fbc_recompress(dev_priv); }
static struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, struct drm_file *file, struct drm_mode_fb_cmd2 *cmd) { unsigned int hsub, vsub, i; struct tegra_bo *planes[4]; struct drm_gem_object *gem; struct tegra_fb *fb; int err; hsub = drm_format_horz_chroma_subsampling(cmd->pixel_format); vsub = drm_format_vert_chroma_subsampling(cmd->pixel_format); for (i = 0; i < drm_format_num_planes(cmd->pixel_format); i++) { unsigned int width = cmd->width / (i ? hsub : 1); unsigned int height = cmd->height / (i ? vsub : 1); unsigned int size, bpp; gem = drm_gem_object_lookup(drm, file, cmd->handles[i]); if (!gem) { err = -ENXIO; goto unreference; } bpp = drm_format_plane_cpp(cmd->pixel_format, i); size = (height - 1) * cmd->pitches[i] + width * bpp + cmd->offsets[i]; if (gem->size < size) { err = -EINVAL; goto unreference; } planes[i] = to_tegra_bo(gem); } fb = tegra_fb_alloc(drm, cmd, planes, i); if (IS_ERR(fb)) { err = PTR_ERR(fb); goto unreference; } return &fb->base; unreference: while (i--) drm_gem_object_unreference_unlocked(&planes[i]->gem); return ERR_PTR(err); }
static void ilk_fbc_enable(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct drm_framebuffer *fb = crtc->base.primary->fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); u32 dpfc_ctl; int threshold = dev_priv->fbc.threshold; unsigned int y_offset; dev_priv->fbc.enabled = true; dpfc_ctl = DPFC_CTL_PLANE(crtc->plane); if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) threshold++; switch (threshold) { case 4: case 3: dpfc_ctl |= DPFC_CTL_LIMIT_4X; break; case 2: dpfc_ctl |= DPFC_CTL_LIMIT_2X; break; case 1: dpfc_ctl |= DPFC_CTL_LIMIT_1X; break; } dpfc_ctl |= DPFC_CTL_FENCE_EN; if (IS_GEN5(dev_priv)) dpfc_ctl |= obj->fence_reg; y_offset = get_crtc_fence_y_offset(crtc); I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset); I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); if (IS_GEN6(dev_priv)) { I915_WRITE(SNB_DPFC_CTL_SA, SNB_CPU_FENCE_ENABLE | obj->fence_reg); I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset); } intel_fbc_nuke(dev_priv); DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane)); }
static void g4x_fbc_activate(struct drm_i915_private *dev_priv) { struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN; if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) dpfc_ctl |= DPFC_CTL_LIMIT_2X; else dpfc_ctl |= DPFC_CTL_LIMIT_1X; dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg; I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); /* enable it... */ I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); }
struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev, struct drm_file *file, struct drm_mode_fb_cmd2 *cmd) { unsigned int hsub, vsub, i; struct mtk_drm_fb *mtk_fb; struct drm_gem_object *gem[MAX_FB_OBJ]; int err; hsub = drm_format_horz_chroma_subsampling(cmd->pixel_format); vsub = drm_format_vert_chroma_subsampling(cmd->pixel_format); for (i = 0; i < drm_format_num_planes(cmd->pixel_format); i++) { unsigned int width = cmd->width / (i ? hsub : 1); unsigned int height = cmd->height / (i ? vsub : 1); unsigned int size, bpp; gem[i] = drm_gem_object_lookup(dev, file, cmd->handles[i]); if (!gem[i]) { err = -ENOENT; goto unreference; } bpp = drm_format_plane_cpp(cmd->pixel_format, i); size = (height - 1) * cmd->pitches[i] + width * bpp; size += cmd->offsets[i]; if (gem[i]->size < size) { err = -EINVAL; goto unreference; } } mtk_fb = mtk_drm_framebuffer_init(dev, cmd, gem); return &mtk_fb->base; unreference: while (i--) drm_gem_object_unreference_unlocked(gem[i]); return ERR_PTR(err); }
static void ilk_fbc_activate(struct drm_i915_private *dev_priv) { struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; int threshold = dev_priv->fbc.threshold; dev_priv->fbc.active = true; dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane); if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) threshold++; switch (threshold) { case 4: case 3: dpfc_ctl |= DPFC_CTL_LIMIT_4X; break; case 2: dpfc_ctl |= DPFC_CTL_LIMIT_2X; break; case 1: dpfc_ctl |= DPFC_CTL_LIMIT_1X; break; } dpfc_ctl |= DPFC_CTL_FENCE_EN; if (IS_GEN5(dev_priv)) dpfc_ctl |= params->fb.fence_reg; I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); if (IS_GEN6(dev_priv)) { I915_WRITE(SNB_DPFC_CTL_SA, SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); } intel_fbc_recompress(dev_priv); }
/* NOTE: looks like if horizontal decimation is used (if we supported that) * then the width used to calculate SMP block requirements is the post- * decimated width. Ie. SMP buffering sits downstream of decimation (which * presumably happens during the dma from scanout buffer). */ static int request_smp_blocks(struct drm_plane *plane, uint32_t format, uint32_t nplanes, uint32_t width) { struct drm_device *dev = plane->dev; struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); enum mdp5_pipe pipe = mdp5_plane->pipe; int i, hsub, nlines, nblks, ret; hsub = drm_format_horz_chroma_subsampling(format); /* different if BWC (compressed framebuffer?) enabled: */ nlines = 2; for (i = 0, nblks = 0; i < nplanes; i++) { int n, fetch_stride, cpp; cpp = drm_format_plane_cpp(format, i); fetch_stride = width * cpp / (i ? hsub : 1); n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE); /* for hw rev v1.00 */ if (mdp5_kms->rev == 0) n = roundup_pow_of_two(n); DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n); ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n); if (ret) { dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n", n, ret); return ret; } nblks += n; } /* in success case, return total # of blocks allocated: */ return nblks; }
struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *cmd) { struct mtk_drm_fb *mtk_fb; struct drm_gem_object *gem; unsigned int width = cmd->width; unsigned int height = cmd->height; unsigned int size, bpp; int ret; if (drm_format_num_planes(cmd->pixel_format) != 1) return ERR_PTR(-EINVAL); gem = drm_gem_object_lookup(file, cmd->handles[0]); if (!gem) return ERR_PTR(-ENOENT); bpp = drm_format_plane_cpp(cmd->pixel_format, 0); size = (height - 1) * cmd->pitches[0] + width * bpp; size += cmd->offsets[0]; if (gem->size < size) { ret = -EINVAL; goto unreference; } mtk_fb = mtk_drm_framebuffer_init(dev, cmd, gem); if (IS_ERR(mtk_fb)) { ret = PTR_ERR(mtk_fb); goto unreference; } return &mtk_fb->base; unreference: drm_gem_object_unreference_unlocked(gem); return ERR_PTR(ret); }
static void g4x_fbc_enable(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct drm_framebuffer *fb = crtc->base.primary->fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); u32 dpfc_ctl; dev_priv->fbc.enabled = true; dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN; if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) dpfc_ctl |= DPFC_CTL_LIMIT_2X; else dpfc_ctl |= DPFC_CTL_LIMIT_1X; dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc)); /* enable it... */ I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane)); }
static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **gobj_p) { struct amdgpu_device *adev = rfbdev->adev; struct drm_gem_object *gobj = NULL; struct amdgpu_bo *abo = NULL; bool fb_tiled = false; /* useful for testing */ u32 tiling_flags = 0; int ret; int aligned_size, size; int height = mode_cmd->height; u32 cpp; cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0); /* need to align pitch with crtc limits */ mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, fb_tiled); height = ALIGN(mode_cmd->height, 8); size = mode_cmd->pitches[0] * height; aligned_size = ALIGN(size, PAGE_SIZE); ret = amdgpu_gem_object_create(adev, aligned_size, 0, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_VRAM_CLEARED, true, &gobj); if (ret) { pr_err("failed to allocate framebuffer (%d)\n", aligned_size); return -ENOMEM; } abo = gem_to_amdgpu_bo(gobj); if (fb_tiled) tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1); ret = amdgpu_bo_reserve(abo, false); if (unlikely(ret != 0)) goto out_unref; if (tiling_flags) { ret = amdgpu_bo_set_tiling_flags(abo, tiling_flags); if (ret) dev_err(adev->dev, "FB failed to set tiling flags\n"); } ret = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, NULL); if (ret) { amdgpu_bo_unreserve(abo); goto out_unref; } ret = amdgpu_bo_kmap(abo, NULL); amdgpu_bo_unreserve(abo); if (ret) { goto out_unref; } *gobj_p = gobj; return 0; out_unref: amdgpufb_destroy_pinned_object(gobj); *gobj_p = NULL; return ret; }
/* mode set a plane */ int xilinx_drm_plane_mode_set(struct drm_plane *base_plane, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { struct xilinx_drm_plane *plane = to_xilinx_plane(base_plane); struct drm_gem_cma_object *obj; size_t offset; unsigned int hsub, vsub, i; DRM_DEBUG_KMS("plane->id: %d\n", plane->id); if (fb->pixel_format != plane->format) { DRM_ERROR("unsupported pixel format %08x\n", fb->pixel_format); return -EINVAL; } /* configure cresample */ if (plane->cresample) xilinx_cresample_configure(plane->cresample, crtc_w, crtc_h); /* configure rgb2yuv */ if (plane->rgb2yuv) xilinx_rgb2yuv_configure(plane->rgb2yuv, crtc_w, crtc_h); DRM_DEBUG_KMS("h: %d(%d), v: %d(%d)\n", src_w, crtc_x, src_h, crtc_y); DRM_DEBUG_KMS("bpp: %d\n", fb->bits_per_pixel / 8); hsub = drm_format_horz_chroma_subsampling(fb->pixel_format); vsub = drm_format_vert_chroma_subsampling(fb->pixel_format); for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { unsigned int width = src_w / (i ? hsub : 1); unsigned int height = src_h / (i ? vsub : 1); unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, i); obj = xilinx_drm_fb_get_gem_obj(fb, i); if (!obj) { DRM_ERROR("failed to get a gem obj for fb\n"); return -EINVAL; } plane->dma[i].xt.numf = height; plane->dma[i].sgl[0].size = width * cpp; plane->dma[i].sgl[0].icg = fb->pitches[i] - plane->dma[i].sgl[0].size; offset = src_x * cpp + src_y * fb->pitches[i]; offset += fb->offsets[i]; plane->dma[i].xt.src_start = obj->paddr + offset; plane->dma[i].xt.frame_size = 1; plane->dma[i].xt.dir = DMA_MEM_TO_DEV; plane->dma[i].xt.src_sgl = true; plane->dma[i].xt.dst_sgl = false; plane->dma[i].is_active = true; } for (; i < MAX_NUM_SUB_PLANES; i++) plane->dma[i].is_active = false; /* set OSD dimensions */ if (plane->manager->osd) { xilinx_osd_disable_rue(plane->manager->osd); xilinx_osd_layer_set_dimension(plane->osd_layer, crtc_x, crtc_y, src_w, src_h); xilinx_osd_enable_rue(plane->manager->osd); } if (plane->manager->dp_sub) { int ret; ret = xilinx_drm_dp_sub_layer_check_size(plane->manager->dp_sub, plane->dp_layer, src_w, src_h); if (ret) return ret; } return 0; }
static void ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, uint32_t src_w, uint32_t src_h) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(plane); int pipe = intel_plane->pipe; u32 sprctl, sprscale = 0; unsigned long sprsurf_offset, linear_offset; int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); bool scaling_was_enabled = dev_priv->sprite_scaling_enabled; sprctl = I915_READ(SPRCTL(pipe)); /* Mask out pixel format bits in case we change it */ sprctl &= ~SPRITE_PIXFORMAT_MASK; sprctl &= ~SPRITE_RGB_ORDER_RGBX; sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK; sprctl &= ~SPRITE_TILED; switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; break; case DRM_FORMAT_XRGB8888: sprctl |= SPRITE_FORMAT_RGBX888; break; case DRM_FORMAT_YUYV: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV; break; case DRM_FORMAT_YVYU: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU; break; case DRM_FORMAT_UYVY: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY; break; case DRM_FORMAT_VYUY: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY; break; default: BUG(); } if (obj->tiling_mode != I915_TILING_NONE) sprctl |= SPRITE_TILED; /* must disable */ sprctl |= SPRITE_TRICKLE_FEED_DISABLE; sprctl |= SPRITE_ENABLE; if (IS_HASWELL(dev)) sprctl |= SPRITE_PIPE_CSC_ENABLE; /* Sizes are 0 based */ src_w--; src_h--; crtc_w--; crtc_h--; intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); /* * IVB workaround: must disable low power watermarks for at least * one frame before enabling scaling. LP watermarks can be re-enabled * when scaling is disabled. */ if (crtc_w != src_w || crtc_h != src_h) { dev_priv->sprite_scaling_enabled |= 1 << pipe; if (!scaling_was_enabled) { intel_update_watermarks(dev); intel_wait_for_vblank(dev, pipe); } sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; } else dev_priv->sprite_scaling_enabled &= ~(1 << pipe); I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); linear_offset = y * fb->pitches[0] + x * pixel_size; sprsurf_offset = intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, pixel_size, fb->pitches[0]); linear_offset -= sprsurf_offset; /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET * register */ if (IS_HASWELL(dev)) I915_WRITE(SPROFFSET(pipe), (y << 16) | x); else if (obj->tiling_mode != I915_TILING_NONE) I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); else I915_WRITE(SPRLINOFF(pipe), linear_offset); I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); if (intel_plane->can_scale) I915_WRITE(SPRSCALE(pipe), sprscale); I915_WRITE(SPRCTL(pipe), sprctl); I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); POSTING_READ(SPRSURF(pipe)); /* potentially re-enable LP watermarks */ if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) intel_update_watermarks(dev); }
static void ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, uint32_t src_w, uint32_t src_h) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(plane); int pipe = intel_plane->pipe; unsigned long dvssurf_offset, linear_offset; u32 dvscntr, dvsscale; int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); dvscntr = I915_READ(DVSCNTR(pipe)); /* Mask out pixel format bits in case we change it */ dvscntr &= ~DVS_PIXFORMAT_MASK; dvscntr &= ~DVS_RGB_ORDER_XBGR; dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK; dvscntr &= ~DVS_TILED; switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR; break; case DRM_FORMAT_XRGB8888: dvscntr |= DVS_FORMAT_RGBX888; break; case DRM_FORMAT_YUYV: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV; break; case DRM_FORMAT_YVYU: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU; break; case DRM_FORMAT_UYVY: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY; break; case DRM_FORMAT_VYUY: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY; break; default: BUG(); } if (obj->tiling_mode != I915_TILING_NONE) dvscntr |= DVS_TILED; if (IS_GEN6(dev)) dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ dvscntr |= DVS_ENABLE; /* Sizes are 0 based */ src_w--; src_h--; crtc_w--; crtc_h--; intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); dvsscale = 0; if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h) dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); linear_offset = y * fb->pitches[0] + x * pixel_size; dvssurf_offset = intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, pixel_size, fb->pitches[0]); linear_offset -= dvssurf_offset; if (obj->tiling_mode != I915_TILING_NONE) I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); else I915_WRITE(DVSLINOFF(pipe), linear_offset); I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); I915_WRITE(DVSSCALE(pipe), dvsscale); I915_WRITE(DVSCNTR(pipe), dvscntr); I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset); POSTING_READ(DVSSURF(pipe)); }
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) { struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; struct msm_framebuffer *msm_fb = NULL; struct drm_framebuffer *fb; const struct msm_format *format; int ret, i, n; unsigned int hsub, vsub; DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)", dev, mode_cmd, mode_cmd->width, mode_cmd->height, (char *)&mode_cmd->pixel_format); n = drm_format_num_planes(mode_cmd->pixel_format); hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); format = kms->funcs->get_format(kms, mode_cmd->pixel_format); if (!format) { dev_err(dev->dev, "unsupported pixel format: %4.4s\n", (char *)&mode_cmd->pixel_format); ret = -EINVAL; goto fail; } msm_fb = kzalloc(sizeof(*msm_fb), GFP_KERNEL); if (!msm_fb) { ret = -ENOMEM; goto fail; } fb = &msm_fb->base; msm_fb->format = format; if (n > ARRAY_SIZE(msm_fb->planes)) { ret = -EINVAL; goto fail; } for (i = 0; i < n; i++) { unsigned int width = mode_cmd->width / (i ? hsub : 1); unsigned int height = mode_cmd->height / (i ? vsub : 1); unsigned int min_size; min_size = (height - 1) * mode_cmd->pitches[i] + width * drm_format_plane_cpp(mode_cmd->pixel_format, i) + mode_cmd->offsets[i]; if (bos[i]->size < min_size) { ret = -EINVAL; goto fail; } msm_fb->planes[i] = bos[i]; } drm_helper_mode_fill_fb_struct(fb, mode_cmd); ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs); if (ret) { dev_err(dev->dev, "framebuffer init failed: %d\n", ret); goto fail; } DBG("create: FB ID: %d (%p)", fb->base.id, fb); return fb; fail: kfree(msm_fb); return ERR_PTR(ret); }