static void hisi_drm_fbdev_fini(struct hisi_drm_fbdev *fbdev) { if (fbdev->fb_helper.fbdev) { struct fb_info *info; int ret; info = fbdev->fb_helper.fbdev; ret = unregister_framebuffer(info); if (ret < 0) DRM_DEBUG_KMS("failed unregister_framebuffer()\n"); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } if (fbdev->fb) { drm_framebuffer_unregister_private(&fbdev->fb->fb); hisi_drm_fb_destroy(&fbdev->fb->fb); } drm_fb_helper_fini(&fbdev->fb_helper); kfree(fbdev); }
static int bochs_fbdev_destroy(struct bochs_device *bochs) { struct bochs_framebuffer *gfb = &bochs->fb.gfb; struct fb_info *info; DRM_DEBUG_DRIVER("\n"); if (bochs->fb.helper.fbdev) { info = bochs->fb.helper.fbdev; unregister_framebuffer(info); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } if (gfb->obj) { drm_gem_object_unreference_unlocked(gfb->obj); gfb->obj = NULL; } drm_fb_helper_fini(&bochs->fb.helper); drm_framebuffer_unregister_private(&gfb->base); drm_framebuffer_cleanup(&gfb->base); return 0; }
void mtk_fbdev_destroy(struct drm_device *dev) { struct mtk_drm_private *priv = (struct mtk_drm_private *)dev->dev_private; struct drm_fb_helper *fbdev = priv->fb_helper; struct fb_info *info = priv->fb_helper->fbdev; if (info) { int err; err = unregister_framebuffer(info); if (err < 0) DRM_DEBUG_KMS("failed to unregister framebuffer\n"); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } if (fbdev->fb) { drm_framebuffer_unregister_private(fbdev->fb); mtk_drm_fb_destroy(fbdev->fb); } drm_fb_helper_fini(fbdev); kfree(fbdev); }
static void tegra_fbdev_free(struct tegra_fbdev *fbdev) { struct fb_info *info = fbdev->base.fbdev; if (info) { int err; err = unregister_framebuffer(info); if (err < 0) DRM_DEBUG_KMS("failed to unregister framebuffer\n"); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } if (fbdev->fb) { drm_framebuffer_unregister_private(&fbdev->fb->base); tegra_fb_destroy(&fbdev->fb->base); } drm_fb_helper_fini(&fbdev->base); kfree(fbdev); }
static void udl_fbdev_destroy(struct drm_device *dev, struct udl_fbdev *ufbdev) { drm_fb_helper_unregister_fbi(&ufbdev->helper); drm_fb_helper_release_fbi(&ufbdev->helper); drm_fb_helper_fini(&ufbdev->helper); drm_framebuffer_unregister_private(&ufbdev->ufb.base); drm_framebuffer_cleanup(&ufbdev->ufb.base); drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base); }
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) { struct drm_framebuffer *fb = &rfbdev->fb; drm_fb_helper_unregister_fbi(&rfbdev->helper); if (fb->obj[0]) { radeonfb_destroy_pinned_object(fb->obj[0]); fb->obj[0] = NULL; drm_framebuffer_unregister_private(fb); drm_framebuffer_cleanup(fb); } drm_fb_helper_fini(&rfbdev->helper); return 0; }
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) { struct amdgpu_framebuffer *rfb = &rfbdev->rfb; drm_fb_helper_unregister_fbi(&rfbdev->helper); if (rfb->obj) { amdgpufb_destroy_pinned_object(rfb->obj); rfb->obj = NULL; } drm_fb_helper_fini(&rfbdev->helper); drm_framebuffer_unregister_private(&rfb->base); drm_framebuffer_cleanup(&rfb->base); return 0; }
static void intel_fbdev_destroy(struct drm_device *dev, struct intel_fbdev *ifbdev) { if (ifbdev->helper.fbdev) { struct fb_info *info = ifbdev->helper.fbdev; unregister_framebuffer(info); iounmap(info->screen_base); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } drm_fb_helper_fini(&ifbdev->helper); drm_framebuffer_unregister_private(&ifbdev->ifb.base); intel_framebuffer_fini(&ifbdev->ifb); }
static void evdi_fbdev_destroy(__always_unused struct drm_device *dev, struct evdi_fbdev *ufbdev) { struct fb_info *info; if (ufbdev->helper.fbdev) { info = ufbdev->helper.fbdev; unregister_framebuffer(info); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } drm_fb_helper_fini(&ufbdev->helper); drm_framebuffer_unregister_private(&ufbdev->ufb.base); drm_framebuffer_cleanup(&ufbdev->ufb.base); drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base); }
static int bochs_fbdev_destroy(struct bochs_device *bochs) { struct bochs_framebuffer *gfb = &bochs->fb.gfb; DRM_DEBUG_DRIVER("\n"); drm_fb_helper_unregister_fbi(&bochs->fb.helper); drm_fb_helper_release_fbi(&bochs->fb.helper); if (gfb->obj) { drm_gem_object_unreference_unlocked(gfb->obj); gfb->obj = NULL; } drm_fb_helper_fini(&bochs->fb.helper); drm_framebuffer_unregister_private(&gfb->base); drm_framebuffer_cleanup(&gfb->base); return 0; }
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) { struct fb_info *info; struct radeon_framebuffer *rfb = &rfbdev->rfb; if (rfbdev->helper.fbdev) { info = rfbdev->helper.fbdev; unregister_framebuffer(info); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } if (rfb->obj) { radeonfb_destroy_pinned_object(rfb->obj); rfb->obj = NULL; } drm_fb_helper_fini(&rfbdev->helper); drm_framebuffer_unregister_private(&rfb->base); drm_framebuffer_cleanup(&rfb->base); return 0; }
static int mtk_fbdev_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = helper->dev; struct drm_mode_fb_cmd2 mode = { 0 }; struct mtk_drm_fb *mtk_fb; struct mtk_drm_gem_obj *mtk_gem; struct drm_gem_object *gem; struct fb_info *info; struct drm_framebuffer *fb; unsigned long offset; size_t size; int err; mode.width = sizes->surface_width; mode.height = sizes->surface_height; mode.pitches[0] = sizes->surface_width * ((sizes->surface_bpp + 7) / 8); mode.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); mode.height = mode.height;/* << 1; for fb use? */ size = mode.pitches[0] * mode.height; dev_info(dev->dev, "mtk_fbdev_probe %dx%d bpp %d pitch %d size %zu\n", mode.width, mode.height, sizes->surface_bpp, mode.pitches[0], size); mtk_gem = mtk_drm_gem_create(dev, size, true); if (IS_ERR(mtk_gem)) { err = PTR_ERR(mtk_gem); goto fini; } gem = &mtk_gem->base; mtk_fb = mtk_drm_framebuffer_init(dev, &mode, &gem); if (IS_ERR(mtk_fb)) { dev_err(dev->dev, "failed to allocate DRM framebuffer\n"); err = PTR_ERR(mtk_fb); goto free; } fb = &mtk_fb->base; info = framebuffer_alloc(0, dev->dev); if (!info) { dev_err(dev->dev, "failed to allocate framebuffer info\n"); err = PTR_ERR(info); goto release; } helper->fb = fb; helper->fbdev = info; info->par = helper; info->flags = FBINFO_FLAG_DEFAULT; info->fbops = &mediatek_fb_ops; err = fb_alloc_cmap(&info->cmap, 256, 0); if (err < 0) { dev_err(dev->dev, "failed to allocate color map: %d\n", err); goto destroy; } drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, helper, fb->width, fb->height); offset = info->var.xoffset * (fb->bits_per_pixel + 7) / 8; offset += info->var.yoffset * fb->pitches[0]; strcpy(info->fix.id, "mtk"); /* dev->mode_config.fb_base = (resource_size_t)bo->paddr; */ info->var.yres = info->var.yres_virtual;/* >> 1; for fb use? */ info->fix.smem_start = mtk_gem->dma_addr + offset; info->fix.smem_len = size; info->screen_base = mtk_gem->kvaddr + offset; info->screen_size = size; return 0; destroy: drm_framebuffer_unregister_private(fb); mtk_drm_fb_destroy(fb); release: framebuffer_release(info); free: mtk_drm_gem_free_object(&mtk_gem->base); fini: dev_err(dev->dev, "mtk_fbdev_probe fail\n"); return err; }
static int amdgpufb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper; struct amdgpu_device *adev = rfbdev->adev; struct fb_info *info; struct drm_framebuffer *fb = NULL; struct drm_mode_fb_cmd2 mode_cmd; struct drm_gem_object *gobj = NULL; struct amdgpu_bo *abo = NULL; int ret; unsigned long tmp; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; if (sizes->surface_bpp == 24) sizes->surface_bpp = 32; mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj); if (ret) { DRM_ERROR("failed to create fbcon object %d\n", ret); return ret; } abo = gem_to_amdgpu_bo(gobj); /* okay we have an object now allocate the framebuffer */ info = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(info)) { ret = PTR_ERR(info); goto out; } info->par = rfbdev; info->skip_vt_switch = true; ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { DRM_ERROR("failed to initialize framebuffer %d\n", ret); goto out; } fb = &rfbdev->rfb.base; /* setup helper */ rfbdev->helper.fb = fb; strcpy(info->fix.id, "amdgpudrmfb"); drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &amdgpufb_ops; tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start; info->fix.smem_start = adev->mc.aper_base + tmp; info->fix.smem_len = amdgpu_bo_size(abo); info->screen_base = abo->kptr; info->screen_size = amdgpu_bo_size(abo); drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); /* setup aperture base/size for vesafb takeover */ info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; info->apertures->ranges[0].size = adev->mc.aper_size; /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ if (info->screen_base == NULL) { ret = -ENOSPC; goto out; } DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base); DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo)); DRM_INFO("fb depth is %d\n", fb->format->depth); DRM_INFO(" pitch is %d\n", fb->pitches[0]); vga_switcheroo_client_fb_set(adev->ddev->pdev, info); return 0; out: if (abo) { } if (fb && ret) { drm_gem_object_unreference_unlocked(gobj); drm_framebuffer_unregister_private(fb); drm_framebuffer_cleanup(fb); kfree(fb); } return ret; }
static int radeonfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct radeon_fbdev *rfbdev = container_of(helper, struct radeon_fbdev, helper); struct radeon_device *rdev = rfbdev->rdev; struct fb_info *info; struct drm_framebuffer *fb = NULL; struct drm_mode_fb_cmd2 mode_cmd; struct drm_gem_object *gobj = NULL; struct radeon_bo *rbo = NULL; int ret; unsigned long tmp; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; /* avivo can't scanout real 24bpp */ if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) sizes->surface_bpp = 32; mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); if (ret) { DRM_ERROR("failed to create fbcon object %d\n", ret); return ret; } rbo = gem_to_radeon_bo(gobj); /* okay we have an object now allocate the framebuffer */ info = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(info)) { ret = PTR_ERR(info); goto out; } /* radeon resume is fragile and needs a vt switch to help it along */ info->skip_vt_switch = false; ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->fb, &mode_cmd, gobj); if (ret) { DRM_ERROR("failed to initialize framebuffer %d\n", ret); goto out; } fb = &rfbdev->fb; /* setup helper */ rfbdev->helper.fb = fb; memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); info->fbops = &radeonfb_ops; tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start; info->fix.smem_start = rdev->mc.aper_base + tmp; info->fix.smem_len = radeon_bo_size(rbo); info->screen_base = rbo->kptr; info->screen_size = radeon_bo_size(rbo); drm_fb_helper_fill_info(info, &rfbdev->helper, sizes); /* setup aperture base/size for vesafb takeover */ info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; info->apertures->ranges[0].size = rdev->mc.aper_size; /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ if (info->screen_base == NULL) { ret = -ENOSPC; goto out; } DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo)); DRM_INFO("fb depth is %d\n", fb->format->depth); DRM_INFO(" pitch is %d\n", fb->pitches[0]); vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); return 0; out: if (rbo) { } if (fb && ret) { drm_gem_object_put_unlocked(gobj); drm_framebuffer_unregister_private(fb); drm_framebuffer_cleanup(fb); kfree(fb); } return ret; }
static int radeonfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct radeon_fbdev *rfbdev = container_of(helper, struct radeon_fbdev, helper); struct radeon_device *rdev = rfbdev->rdev; struct fb_info *info; struct drm_framebuffer *fb = NULL; struct drm_mode_fb_cmd2 mode_cmd; struct drm_gem_object *gobj = NULL; struct radeon_bo *rbo = NULL; struct device *device = &rdev->pdev->dev; int ret; unsigned long tmp; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; /* avivo can't scanout real 24bpp */ if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) sizes->surface_bpp = 32; mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); if (ret) { DRM_ERROR("failed to create fbcon object %d\n", ret); return ret; } rbo = gem_to_radeon_bo(gobj); /* okay we have an object now allocate the framebuffer */ info = framebuffer_alloc(0, device); if (info == NULL) { ret = -ENOMEM; goto out_unref; } info->par = rfbdev; ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { DRM_ERROR("failed to initialize framebuffer %d\n", ret); goto out_unref; } fb = &rfbdev->rfb.base; /* setup helper */ rfbdev->helper.fb = fb; rfbdev->helper.fbdev = info; memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); strcpy(info->fix.id, "radeondrmfb"); drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &radeonfb_ops; tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start; info->fix.smem_start = rdev->mc.aper_base + tmp; info->fix.smem_len = radeon_bo_size(rbo); info->screen_base = rbo->kptr; info->screen_size = radeon_bo_size(rbo); drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); /* setup aperture base/size for vesafb takeover */ info->apertures = alloc_apertures(1); if (!info->apertures) { ret = -ENOMEM; goto out_unref; } info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; info->apertures->ranges[0].size = rdev->mc.aper_size; /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ if (info->screen_base == NULL) { ret = -ENOSPC; goto out_unref; } ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret) { ret = -ENOMEM; goto out_unref; } DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo)); DRM_INFO("fb depth is %d\n", fb->depth); DRM_INFO(" pitch is %d\n", fb->pitches[0]); vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); return 0; out_unref: if (rbo) { } if (fb && ret) { drm_gem_object_unreference(gobj); drm_framebuffer_unregister_private(fb); drm_framebuffer_cleanup(fb); kfree(fb); } return ret; }
static int hisi_drm_fbdev_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct hisi_drm_fbdev *fbdev = to_hisi_drm_fbdev(helper); struct drm_mode_fb_cmd2 mode_cmd = { 0 }; struct drm_device *dev = helper->dev; struct drm_gem_cma_object *obj; struct drm_framebuffer *fb; unsigned int bytes_per_pixel; unsigned long offset; struct fb_info *fbi; size_t size; int ret; /* TODO: Need to use ion heaps to create frame buffer?? */ DRM_DEBUG_DRIVER("surface width(%d), height(%d) and bpp(%d)\n", sizes->surface_width, sizes->surface_height, sizes->surface_bpp); bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); sizes->surface_depth = PREFERRED_BPP; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height * HISI_NUM_FRAMEBUFFERS; mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel; mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); size = roundup(mode_cmd.pitches[0] * mode_cmd.height, PAGE_SIZE); obj = drm_gem_cma_create(dev, size); if (IS_ERR(obj)) return -ENOMEM; fbi = framebuffer_alloc(0, dev->dev); if (!fbi) { dev_err(dev->dev, "Failed to allocate framebuffer info.\n"); ret = -ENOMEM; goto err_drm_gem_cma_free_object; } fbdev->fb = hisi_drm_fb_alloc(dev, &mode_cmd, &obj, 1, true); if (IS_ERR(fbdev->fb)) { dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n"); ret = PTR_ERR(fbdev->fb); goto err_framebuffer_release; } fb = &fbdev->fb->fb; helper->fb = fb; helper->fbdev = fbi; fbi->par = helper; fbi->flags = FBINFO_FLAG_DEFAULT; fbi->fbops = &hisi_drm_fbdev_ops; ret = fb_alloc_cmap(&fbi->cmap, 256, 0); if (ret) { dev_err(dev->dev, "Failed to allocate color map.\n"); goto err_hisi_drm_fb_destroy; } drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height/HISI_NUM_FRAMEBUFFERS); offset = fbi->var.xoffset * bytes_per_pixel; offset += fbi->var.yoffset * fb->pitches[0]; dev->mode_config.fb_base = (resource_size_t)obj->paddr; fbi->screen_base = obj->vaddr + offset; fbi->fix.smem_start = (unsigned long)(obj->paddr + offset); fbi->screen_size = size; fbi->fix.smem_len = size; DRM_DEBUG_DRIVER("exit successfully.\n"); return 0; err_hisi_drm_fb_destroy: drm_framebuffer_unregister_private(fb); hisi_drm_fb_destroy(fb); err_framebuffer_release: framebuffer_release(fbi); err_drm_gem_cma_free_object: drm_gem_cma_free_object(&obj->base); return ret; }
static int tegra_fbdev_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct tegra_fbdev *fbdev = to_tegra_fbdev(helper); struct tegra_drm *tegra = helper->dev->dev_private; struct drm_device *drm = helper->dev; struct drm_mode_fb_cmd2 cmd = { 0 }; unsigned int bytes_per_pixel; struct drm_framebuffer *fb; unsigned long offset; struct fb_info *info; struct tegra_bo *bo; size_t size; int err; bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); cmd.width = sizes->surface_width; cmd.height = sizes->surface_height; cmd.pitches[0] = round_up(sizes->surface_width * bytes_per_pixel, tegra->pitch_align); cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); size = cmd.pitches[0] * cmd.height; bo = tegra_bo_create(drm, size, 0); if (IS_ERR(bo)) return PTR_ERR(bo); info = framebuffer_alloc(0, drm->dev); if (!info) { dev_err(drm->dev, "failed to allocate framebuffer info\n"); tegra_bo_free_object(&bo->gem); return -ENOMEM; } fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1); if (IS_ERR(fbdev->fb)) { dev_err(drm->dev, "failed to allocate DRM framebuffer\n"); err = PTR_ERR(fbdev->fb); goto release; } fb = &fbdev->fb->base; helper->fb = fb; helper->fbdev = info; info->par = helper; info->flags = FBINFO_FLAG_DEFAULT; info->fbops = &tegra_fb_ops; err = fb_alloc_cmap(&info->cmap, 256, 0); if (err < 0) { dev_err(drm->dev, "failed to allocate color map: %d\n", err); goto destroy; } drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, helper, fb->width, fb->height); offset = info->var.xoffset * bytes_per_pixel + info->var.yoffset * fb->pitches[0]; drm->mode_config.fb_base = (resource_size_t)bo->paddr; info->screen_base = (void __iomem *)bo->vaddr + offset; info->screen_size = size; info->fix.smem_start = (unsigned long)(bo->paddr + offset); info->fix.smem_len = size; return 0; destroy: drm_framebuffer_unregister_private(fb); tegra_fb_destroy(fb); release: framebuffer_release(info); return err; }