/** * radeon_set_filp_rights - Set filp right. * * @dev: drm dev pointer * @owner: drm file * @applier: drm file * @value: value * * Sets the filp rights for the device (all asics). */ void radeon_set_filp_rights(struct drm_device *dev, struct drm_file **owner, struct drm_file *applier, uint32_t *value) { DRM_LOCK(); if (*value == 1) { /* wants rights */ if (!*owner) *owner = applier; } else if (*value == 0) { /* revokes rights */ if (*owner == applier) *owner = NULL; } *value = *owner == applier ? 1 : 0; DRM_UNLOCK(); }
int drm_mmap(struct dev_mmap_args *ap) { struct cdev *kdev = ap->a_head.a_dev; vm_offset_t offset = ap->a_offset; struct drm_device *dev = drm_get_device_from_kdev(kdev); struct drm_file *file_priv = NULL; struct drm_local_map *map = NULL; struct drm_map_list *r_list; enum drm_map_type type; vm_paddr_t phys; /* d_mmap gets called twice, we can only reference file_priv during * the first call. We need to assume that if error is EBADF the * call was succesful and the client is authenticated. */ DRM_LOCK(dev); file_priv = drm_find_file_by_proc(dev, curthread); DRM_UNLOCK(dev); if (!file_priv) { DRM_ERROR("Could not find authenticator!\n"); return EINVAL; } if (!file_priv->authenticated) return EACCES; DRM_DEBUG("called with offset %016jx\n", (uintmax_t)offset); if (dev->dma && offset < ptoa(dev->dma->page_count)) { drm_device_dma_t *dma = dev->dma; spin_lock(&dev->dma_lock); if (dma->pagelist != NULL) { unsigned long page = offset >> PAGE_SHIFT; unsigned long phys = dma->pagelist[page]; spin_unlock(&dev->dma_lock); // XXX *paddr = phys; ap->a_result = phys; return 0; } else {
static void pscnv_gem_pager_dtor(void *handle) { struct drm_gem_object *gem_obj = handle; struct pscnv_bo *bo = gem_obj->driver_private; struct drm_device *dev = gem_obj->dev; vm_object_t devobj; DRM_LOCK(dev); devobj = cdev_pager_lookup(handle); if (devobj != NULL) { vm_size_t page_count = OFF_TO_IDX(bo->size); vm_page_t m; int i; VM_OBJECT_LOCK(devobj); for (i = 0; i < page_count; i++) { m = vm_page_lookup(devobj, i); if (!m) continue; if (pscnv_mem_debug > 0) NV_WARN(dev, "Freeing %010llx + %08llx (%p\n", bo->start, i * PAGE_SIZE, m); cdev_pager_free_page(devobj, m); } VM_OBJECT_UNLOCK(devobj); vm_object_deallocate(devobj); } else { DRM_UNLOCK(dev); NV_ERROR(dev, "Could not find handle %p bo %p\n", handle, bo); return; } if (pscnv_mem_debug > 0) NV_WARN(dev, "Freed %010llx (%p)\n", bo->start, bo); //kfree(bo->fake_pages); if (bo->chan) pscnv_chan_unref(bo->chan); else drm_gem_object_unreference_unlocked(gem_obj); DRM_UNLOCK(dev); }
int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) { drm_agp_mem_t *entry; void *handle; unsigned long pages; u_int32_t type; struct agp_memory_info info; if (!dev->agp || !dev->agp->acquired) return EINVAL; entry = malloc(sizeof(*entry), DRM_MEM_AGPLISTS, M_NOWAIT | M_ZERO); if (entry == NULL) return ENOMEM; pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; type = (u_int32_t) request->type; DRM_UNLOCK(dev); handle = drm_agp_allocate_memory(pages, type); DRM_LOCK(dev); if (handle == NULL) { free(entry, DRM_MEM_AGPLISTS); return ENOMEM; } entry->handle = handle; entry->bound = 0; entry->pages = pages; entry->prev = NULL; entry->next = dev->agp->memory; if (dev->agp->memory) dev->agp->memory->prev = entry; dev->agp->memory = entry; agp_memory_info(dev->agp->agpdev, entry->handle, &info); request->handle = (unsigned long) entry->handle; request->physical = info.ami_physical; return 0; }
/** * Marks the client associated with the given magic number as authenticated. */ int drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_auth *auth = data; struct drm_file *priv; DRM_DEBUG("%u\n", auth->magic); DRM_LOCK(); priv = drm_find_file(dev, auth->magic); if (priv != NULL) { priv->authenticated = 1; drm_remove_magic(dev, auth->magic); DRM_UNLOCK(); return 0; } else { DRM_UNLOCK(); return EINVAL; } }
int drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_scatter_gather *request = data; struct drm_sg_mem *entry; DRM_LOCK(dev); entry = dev->sg; dev->sg = NULL; DRM_UNLOCK(dev); if (!entry || entry->vaddr != request->handle) return (EINVAL); DRM_DEBUG("free 0x%zx\n", entry->vaddr); drm_sg_cleanup(entry); return (0); }
int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) { drm_agp_mem_t *entry; int retcode; if (!dev->agp || !dev->agp->acquired) return EINVAL; entry = drm_agp_lookup_entry(dev, (void *)request->handle); if (entry == NULL || !entry->bound) return EINVAL; DRM_UNLOCK(dev); retcode = drm_agp_unbind_memory(entry->handle); DRM_LOCK(dev); if (retcode == 0) entry->bound = 0; return retcode; }
int drm_irq_uninstall(struct drm_device *dev) { int i; DRM_LOCK(); if (!dev->irq_enabled) { DRM_UNLOCK(); return (EINVAL); } dev->irq_enabled = 0; DRM_UNLOCK(); /* * Ick. we're about to turn of vblanks, so make sure anyone waiting * on them gets woken up. Also make sure we update state correctly * so that we can continue refcounting correctly. */ if (dev->vblank != NULL) { mtx_enter(&dev->vblank->vb_lock); for (i = 0; i < dev->vblank->vb_num; i++) { #if !defined(__NetBSD__) wakeup(&dev->vblank->vb_crtcs[i]); #else /* !defined(__NetBSD__) */ cv_broadcast(&dev->vblank->vb_crtcs[i].condvar); #endif /* !defined(__NetBSD__) */ dev->vblank->vb_crtcs[i].vbl_enabled = 0; dev->vblank->vb_crtcs[i].vbl_last = dev->driver->get_vblank_counter(dev, i); } mtx_leave(&dev->vblank->vb_lock); } DRM_DEBUG("irq=%d\n", dev->irq); dev->driver->irq_uninstall(dev); return (0); }
/** * Returns the current tiling mode and required bit 6 swizzling for the object. */ int i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_get_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) return -ENOENT; DRM_LOCK(dev); args->tiling_mode = obj->tiling_mode; switch (obj->tiling_mode) { case I915_TILING_X: args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; break; case I915_TILING_Y: args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; break; case I915_TILING_NONE: args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; break; default: DRM_ERROR("unknown tiling mode\n"); } /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; drm_gem_object_unreference(&obj->base); DRM_UNLOCK(dev); return 0; }
static int intel_disable_plane(struct drm_plane *plane) { struct drm_device *dev = plane->dev; struct intel_plane *intel_plane = to_intel_plane(plane); int ret = 0; if (plane->crtc) intel_enable_primary(plane->crtc); intel_plane->disable_plane(plane); if (!intel_plane->obj) goto out; DRM_LOCK(dev); intel_unpin_fb_obj(intel_plane->obj); intel_plane->obj = NULL; DRM_UNLOCK(dev); out: return ret; }
int drm_getmagic(DRM_IOCTL_ARGS) { DRM_DEVICE; static drm_magic_t sequence = 0; drm_auth_t auth; drm_file_t *priv; DRM_LOCK(); priv = drm_find_file_by_proc(dev, p); DRM_UNLOCK(); if (priv == NULL) { DRM_ERROR("can't find authenticator\n"); return EINVAL; } /* Find unique magic */ if (priv->magic) { auth.magic = priv->magic; } else { do { int old = sequence; auth.magic = old+1; if (!atomic_cmpset_int(&sequence, old, auth.magic)) continue; } while (drm_find_file(dev, auth.magic)); priv->magic = auth.magic; drm_add_magic(dev, priv, auth.magic); } DRM_DEBUG("%u\n", auth.magic); DRM_COPY_TO_USER_IOCTL((drm_auth_t *)data, auth, sizeof(auth)); return 0; }
/** * Release file. * * \param inode device inode * \param file_priv DRM file private. * \return zero on success or a negative number on failure. * * If the hardware lock is held then free it, and take it again for the kernel * context since it's necessary to reclaim buffers. Unlink the file private * data from its list and free it. Decreases the open count and if it reaches * zero calls drm_lastclose(). */ #if 0 /* old drm_release equivalent from DragonFly */ void drm_cdevpriv_dtor(void *cd) { struct drm_file *file_priv = cd; struct drm_device *dev = file_priv->dev; int retcode = 0; DRM_DEBUG("open_count = %d\n", dev->open_count); DRM_LOCK(dev); if (dev->driver->preclose != NULL) dev->driver->preclose(dev, file_priv); /* ======================================================== * Begin inline drm_release */ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", DRM_CURRENTPID, (long)dev->dev, dev->open_count); if (dev->driver->driver_features & DRIVER_GEM) drm_gem_release(dev, file_priv); if (dev->primary->master->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->primary->master->lock.hw_lock->lock) && dev->primary->master->lock.file_priv == file_priv) { DRM_DEBUG("Process %d dead, freeing lock for context %d\n", DRM_CURRENTPID, _DRM_LOCKING_CONTEXT(dev->primary->master->lock.hw_lock->lock)); if (dev->driver->reclaim_buffers_locked != NULL) dev->driver->reclaim_buffers_locked(dev, file_priv); drm_lock_free(&dev->primary->master->lock, _DRM_LOCKING_CONTEXT(dev->primary->master->lock.hw_lock->lock)); /* FIXME: may require heavy-handed reset of hardware at this point, possibly processed via a callback to the X server. */ } else if (dev->driver->reclaim_buffers_locked != NULL && dev->primary->master->lock.hw_lock != NULL) { /* The lock is required to reclaim buffers */ for (;;) { if (!dev->primary->master->lock.hw_lock) { /* Device has been unregistered */ retcode = EINTR; break; } if (drm_lock_take(&dev->primary->master->lock, DRM_KERNEL_CONTEXT)) { dev->primary->master->lock.file_priv = file_priv; dev->primary->master->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ } /* Contention */ retcode = DRM_LOCK_SLEEP(dev, &dev->primary->master->lock.lock_queue, PCATCH, "drmlk2", 0); if (retcode) break; } if (retcode == 0) { dev->driver->reclaim_buffers_locked(dev, file_priv); drm_lock_free(&dev->primary->master->lock, DRM_KERNEL_CONTEXT); } } if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !dev->driver->reclaim_buffers_locked) drm_reclaim_buffers(dev, file_priv); funsetown(&dev->buf_sigio); if (dev->driver->postclose != NULL) dev->driver->postclose(dev, file_priv); list_del(&file_priv->lhead); /* ======================================================== * End inline drm_release */ atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); device_unbusy(dev->dev); if (--dev->open_count == 0) { retcode = drm_lastclose(dev); } DRM_UNLOCK(dev); }
int i915_reset(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; /* * We really should only reset the display subsystem if we actually * need to */ bool need_display = true; int ret; if (!i915_try_reset) return (0); if (!sx_try_xlock(&dev->dev_struct_lock)) return (-EBUSY); i915_gem_reset(dev); ret = -ENODEV; if (time_second - dev_priv->last_gpu_reset < 5) { DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); } else ret = intel_gpu_reset(dev); dev_priv->last_gpu_reset = time_second; if (ret) { DRM_ERROR("Failed to reset chip.\n"); DRM_UNLOCK(dev); return (ret); } if (drm_core_check_feature(dev, DRIVER_MODESET) || !dev_priv->mm.suspended) { dev_priv->mm.suspended = 0; i915_gem_init_swizzling(dev); dev_priv->rings[RCS].init(&dev_priv->rings[RCS]); if (HAS_BSD(dev)) dev_priv->rings[VCS].init(&dev_priv->rings[VCS]); if (HAS_BLT(dev)) dev_priv->rings[BCS].init(&dev_priv->rings[BCS]); i915_gem_context_init(dev); i915_gem_init_ppgtt(dev); drm_irq_uninstall(dev); drm_mode_config_reset(dev); DRM_UNLOCK(dev); drm_irq_install(dev); DRM_LOCK(dev); } DRM_UNLOCK(dev); if (need_display) { sx_xlock(&dev->mode_config.mutex); drm_helper_resume_force_mode(dev); sx_xunlock(&dev->mode_config.mutex); } return (0); }
int getDRIDrawableInfoLocked(void *drawHash, Display *display, int screen, Drawable draw, unsigned lockFlags, int drmFD, drm_context_t drmContext, drmAddress sarea, Bool updateInfo, drawableInfo **info, unsigned long infoSize) { drawableInfo *drawInfo; void *res; drm_drawable_t drmDraw=0; volatile drm_sarea_t *pSarea = (drm_sarea_t *) sarea; drm_clip_rect_t *clipFront, *clipBack; int ret; if (drmHashLookup(drawHash, (unsigned long) draw, &res)) { /* * The drawable is unknown to us. Create it and put it in the * hash table. */ DRM_UNLOCK(drmFD, &pSarea->lock, drmContext); if (!uniDRICreateDrawable(display, screen, draw, &drmDraw)) { DRM_LOCK(drmFD, &pSarea->lock, drmContext, lockFlags); return 1; } DRM_LOCK(drmFD, &pSarea->lock, drmContext, lockFlags); drawInfo = (drawableInfo *) malloc(infoSize); if (!drawInfo) return 1; drawInfo->drmDraw = drmDraw; drawInfo->stamp = 0; drawInfo->clipFront = 0; drawInfo->clipBack = 0; drmHashInsert( drawHash, (unsigned long) draw, drawInfo); } else { drawInfo = res; } drawInfo->touched = FALSE; while (!drawInfo->clipFront || drawInfo->stamp != drawStamp(pSarea, drawInfo->index)) { /* * The drawable has been touched since we last got info about it. * obtain new info from the X server. */ drawInfo->touched = TRUE; if (updateInfo || !drawInfo->clipFront) { DRM_UNLOCK(drmFD, &pSarea->lock, drmContext); ret = uniDRIGetDrawableInfo(display, screen, draw, &drawInfo->index, &drawInfo->stamp, &drawInfo->x, &drawInfo->y, &drawInfo->w, &drawInfo->h, &drawInfo->numClipFront, &clipFront, &drawInfo->backX, &drawInfo->backY, &drawInfo->numClipBack, &clipBack); DRM_LIGHT_LOCK(drmFD, &pSarea->lock, drmContext); /* * Error. Probably the drawable is destroyed. Return error and old values. */ if (!ret) { free(drawInfo); drawInfo = NULL; drmHashDelete(drawHash, (unsigned long) draw); DRM_UNLOCK(drmFD, &pSarea->lock, drmContext); uniDRIDestroyDrawable( display, screen, draw); DRM_LOCK(drmFD, &pSarea->lock, drmContext, lockFlags); return 1; } if (drawInfo->stamp != drawStamp(pSarea, drawInfo->index)) { /* * The info is already outdated. Sigh. Have another go. */ XFree(clipFront); XFree(clipBack); continue; } if (drawInfo->clipFront) XFree(drawInfo->clipFront); drawInfo->clipFront = clipFront; if (drawInfo->clipBack) XFree(drawInfo->clipBack); drawInfo->clipBack = clipBack; } else { if (!drawInfo->clipFront) drawInfo->clipFront = (drm_clip_rect_t *) ~0UL; drawInfo->stamp = drawStamp(pSarea, drawInfo->index); } } *info = drawInfo; return 0; }
static int intelfb_create(struct intel_fbdev *ifbdev, struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = ifbdev->helper.dev; #if 0 struct drm_i915_private *dev_priv = dev->dev_private; #endif struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd; struct drm_i915_gem_object *obj; int size, ret; /* we don't do packed 24bpp */ if (sizes->surface_bpp == 24) sizes->surface_bpp = 32; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.pitches[0] = roundup2(mode_cmd.width * ((sizes->surface_bpp + 7) / 8), 64); mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); size = mode_cmd.pitches[0] * mode_cmd.height; size = roundup2(size, PAGE_SIZE); obj = i915_gem_alloc_object(dev, size); if (!obj) { DRM_ERROR("failed to allocate framebuffer\n"); ret = -ENOMEM; goto out; } DRM_LOCK(dev); /* Flush everything out, we'll be doing GTT only from now on */ ret = intel_pin_and_fence_fb_obj(dev, obj, false); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; } #if 0 info = framebuffer_alloc(0, device); if (!info) { ret = -ENOMEM; goto out_unpin; } info->par = ifbdev; #else info = malloc(sizeof(struct fb_info), DRM_MEM_KMS, M_WAITOK | M_ZERO); info->fb_size = size; info->fb_bpp = sizes->surface_bpp; info->fb_width = sizes->fb_width; info->fb_height = sizes->fb_height; info->fb_pbase = dev->agp->base + obj->gtt_offset; info->fb_vbase = (vm_offset_t)pmap_mapdev_attr(info->fb_pbase, size, PAT_WRITE_COMBINING); #endif ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); if (ret) goto out_unpin; fb = &ifbdev->ifb.base; ifbdev->helper.fb = fb; ifbdev->helper.fbdev = info; #if 0 strcpy(info->fix.id, "inteldrmfb"); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &intelfb_ops; ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret) { ret = -ENOMEM; goto out_unpin; } /* setup aperture base/size for vesafb takeover */ info->apertures = alloc_apertures(1); if (!info->apertures) { ret = -ENOMEM; goto out_unpin; } info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; info->fix.smem_len = size; info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); if (!info->screen_base) { ret = -ENOSPC; goto out_unpin; } info->screen_size = size; // memset(info->screen_base, 0, size); drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ #endif DRM_DEBUG_KMS("allocated %dx%d (s %dbits) fb: 0x%08x, bo %p\n", fb->width, fb->height, fb->depth, obj->gtt_offset, obj); DRM_UNLOCK(dev); #if 1 KIB_NOTYET(); #else vga_switcheroo_client_fb_set(dev->pdev, info); #endif return 0; out_unpin: i915_gem_object_unpin(obj); out_unref: drm_gem_object_unreference(&obj->base); DRM_UNLOCK(dev); out: return ret; }
/** * i915_reset - reset chip after a hang * @dev: drm device to reset * * Reset the chip. Useful if a hang is detected. Returns zero on successful * reset or otherwise an error code. * * Procedure is fairly simple: * - reset the chip using the reset reg * - re-init context state * - re-init hardware status page * - re-init ring buffer * - re-init interrupt state * - re-init display */ int i915_reset(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; int ret; if (!i915_try_reset) return 0; DRM_LOCK(dev); i915_gem_reset(dev); ret = -ENODEV; if (time_uptime - dev_priv->last_gpu_reset < 5) DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); else ret = intel_gpu_reset(dev); dev_priv->last_gpu_reset = time_uptime; if (ret) { DRM_ERROR("Failed to reset chip.\n"); DRM_UNLOCK(dev); return ret; } /* Ok, now get things going again... */ /* * Everything depends on having the GTT running, so we need to start * there. Fortunately we don't need to do this unless we reset the * chip at a PCI level. * * Next we need to restore the context, but we don't use those * yet either... * * Ring buffer needs to be re-initialized in the KMS case, or if X * was running at the time of the reset (i.e. we weren't VT * switched away). */ if (drm_core_check_feature(dev, DRIVER_MODESET) || !dev_priv->mm.suspended) { struct intel_ring_buffer *ring; int i; dev_priv->mm.suspended = 0; i915_gem_init_swizzling(dev); for_each_ring(ring, dev_priv, i) ring->init(ring); #if 0 /* XXX: HW context support */ i915_gem_context_init(dev); #endif i915_gem_init_ppgtt(dev); /* * It would make sense to re-init all the other hw state, at * least the rps/rc6/emon init done within modeset_init_hw. For * some unknown reason, this blows up my ilk, so don't. */ DRM_UNLOCK(dev); drm_irq_uninstall(dev); drm_irq_install(dev); } else { DRM_UNLOCK(dev); } return 0; }
int i915_load_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; ret = intel_parse_bios(dev); if (ret) DRM_INFO("failed to find VBIOS tables\n"); #if 0 intel_register_dsm_handler(); #endif /* IIR "flip pending" bit means done if this bit is set */ if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) dev_priv->flip_pending_is_done = true; #ifdef notyet ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops); if (ret) goto cleanup_vga_client; /* Initialise stolen first so that we may reserve preallocated * objects for the BIOS to KMS transition. */ ret = i915_gem_init_stolen(dev); if (ret) goto cleanup_vga_switcheroo; #endif intel_modeset_init(dev); ret = i915_gem_init(dev); if (ret) goto cleanup_gem_stolen; intel_modeset_gem_init(dev); ret = drm_irq_install(dev); if (ret) goto cleanup_gem; /* Always safe in the mode setting case. */ /* FIXME: do pre/post-mode set stuff in core KMS code */ dev->vblank_disable_allowed = 1; ret = intel_fbdev_init(dev); if (ret) goto cleanup_irq; drm_kms_helper_poll_init(dev); /* We're off and running w/KMS */ dev_priv->mm.suspended = 0; return (0); cleanup_irq: drm_irq_uninstall(dev); cleanup_gem: DRM_LOCK(); i915_gem_cleanup_ringbuffer(dev); DRM_UNLOCK(); i915_gem_cleanup_aliasing_ppgtt(dev); cleanup_gem_stolen: #ifdef notyet i915_gem_cleanup_stolen(dev); #endif return (ret); }
/** * Sets the tiling mode of an object, returning the required swizzling of * bit 6 of addresses in the object. */ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; int ret = 0; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) return -ENOENT; if (!i915_tiling_ok(dev, args->stride, obj->base.size, args->tiling_mode)) { drm_gem_object_unreference_unlocked(&obj->base); return -EINVAL; } if (obj->pin_count) { drm_gem_object_unreference_unlocked(&obj->base); return -EBUSY; } if (args->tiling_mode == I915_TILING_NONE) { args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, * and we use the pread/pwrite bit17 paths to swizzle for it. * If there was a user that was relying on the swizzle * information for drm_intel_bo_map()ed reads/writes this would * break it, but we don't have any of those. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; args->stride = 0; } } DRM_LOCK(dev); if (args->tiling_mode != obj->tiling_mode || args->stride != obj->stride) { /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but * need to ensure that any fence register is cleared. * * After updating the tiling parameters, we then flag whether * we need to update an associated fence register. Note this * has to also include the unfenced register the GPU uses * whilst executing a fenced command for an untiled object. */ i915_gem_release_mmap(obj); obj->map_and_fenceable = obj->gtt_space == NULL || (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); /* Rebind if we need a change of alignment */ if (!obj->map_and_fenceable) { u32 unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(dev, obj->base.size, args->tiling_mode); if (obj->gtt_offset & (unfenced_alignment - 1)) ret = i915_gem_object_unbind(obj); } if (ret == 0) { obj->fence_dirty = obj->fenced_gpu_access || obj->fence_reg != I915_FENCE_REG_NONE; obj->tiling_mode = args->tiling_mode; obj->stride = args->stride; } } /* we have to maintain this existing ABI... */ args->stride = obj->stride; args->tiling_mode = obj->tiling_mode; drm_gem_object_unreference(&obj->base); DRM_UNLOCK(dev); return ret; }
static int drm_ati_alloc_pcigart_table(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) { struct drm_dma_handle *dmah; int flags, ret; #if defined(__NetBSD__) int nsegs; #endif dmah = malloc(sizeof(struct drm_dma_handle), DRM_MEM_DMA, M_ZERO | M_NOWAIT); if (dmah == NULL) return ENOMEM; #if defined(__FreeBSD__) DRM_UNLOCK(); ret = bus_dma_tag_create(NULL, PAGE_SIZE, 0, /* tag, align, boundary */ gart_info->table_mask, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */ NULL, NULL, /* filtfunc, filtfuncargs */ gart_info->table_size, 1, /* maxsize, nsegs */ gart_info->table_size, /* maxsegsize */ BUS_DMA_ALLOCNOW, NULL, NULL, /* flags, lockfunc, lockfuncargs */ &dmah->tag); if (ret != 0) { free(dmah, DRM_MEM_DMA); return ENOMEM; } flags = BUS_DMA_NOWAIT | BUS_DMA_ZERO; if (gart_info->gart_reg_if == DRM_ATI_GART_IGP) flags |= BUS_DMA_NOCACHE; ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr, flags, &dmah->map); if (ret != 0) { bus_dma_tag_destroy(dmah->tag); free(dmah, DRM_MEM_DMA); return ENOMEM; } DRM_LOCK(); ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, gart_info->table_size, drm_ati_alloc_pcigart_table_cb, dmah, 0); if (ret != 0) { bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map); bus_dma_tag_destroy(dmah->tag); free(dmah, DRM_MEM_DMA); return ENOMEM; } #elif defined(__NetBSD__) dmah->tag = dev->pa.pa_dmat; flags = BUS_DMA_NOWAIT; if (gart_info->gart_reg_if == DRM_ATI_GART_IGP) flags |= BUS_DMA_NOCACHE; ret = bus_dmamem_alloc(dmah->tag, gart_info->table_size, PAGE_SIZE, 0, dmah->segs, 1, &nsegs, flags); if (ret != 0) { printf("drm: unable to allocate %zu bytes of DMA, error %d\n", (size_t)gart_info->table_size, ret); dmah->tag = NULL; free(dmah, DRM_MEM_DMA); return ENOMEM; } if (nsegs != 1) { printf("drm: bad segment count\n"); bus_dmamem_free(dmah->tag, dmah->segs, 1); dmah->tag = NULL; free(dmah, DRM_MEM_DMA); return ENOMEM; } ret = bus_dmamem_map(dmah->tag, dmah->segs, nsegs, gart_info->table_size, &dmah->vaddr, flags | BUS_DMA_COHERENT); if (ret != 0) { printf("drm: Unable to map DMA, error %d\n", ret); bus_dmamem_free(dmah->tag, dmah->segs, 1); dmah->tag = NULL; free(dmah, DRM_MEM_DMA); return ENOMEM; } ret = bus_dmamap_create(dmah->tag, gart_info->table_size, 1, gart_info->table_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dmah->map); if (ret != 0) { printf("drm: Unable to create DMA map, error %d\n", ret); bus_dmamem_unmap(dmah->tag, dmah->vaddr, gart_info->table_size); bus_dmamem_free(dmah->tag, dmah->segs, 1); dmah->tag = NULL; free(dmah, DRM_MEM_DMA); return ENOMEM; } ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, gart_info->table_size, NULL, BUS_DMA_NOWAIT); if (ret != 0) { printf("drm: Unable to load DMA map, error %d\n", ret); bus_dmamap_destroy(dmah->tag, dmah->map); bus_dmamem_unmap(dmah->tag, dmah->vaddr, gart_info->table_size); bus_dmamem_free(dmah->tag, dmah->segs, 1); dmah->tag = NULL; free(dmah, DRM_MEM_DMA); return ENOMEM; } dmah->busaddr = dmah->map->dm_segs[0].ds_addr; dmah->size = gart_info->table_size; dmah->nsegs = 1; #if 0 /* * Mirror here FreeBSD doing BUS_DMA_ZERO. * But I see this same memset() is done in drm_ati_pcigart_init(), * so maybe this is not needed. */ memset(dmah->vaddr, 0, gart_info->table_size); #endif #endif dev->sg->dmah = dmah; return 0; }
static int pscnv_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres) { struct drm_gem_object *gem_obj = vm_obj->handle; struct pscnv_bo *bo = gem_obj->driver_private; struct drm_device *dev = gem_obj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; vm_page_t m = NULL; vm_page_t oldm; vm_memattr_t mattr; vm_paddr_t paddr; const char *what; if (bo->chan) { paddr = dev_priv->fb_phys + offset + nvc0_fifo_ctrl_offs(dev, bo->chan->cid); mattr = VM_MEMATTR_UNCACHEABLE; what = "fifo"; } else switch (bo->flags & PSCNV_GEM_MEMTYPE_MASK) { case PSCNV_GEM_VRAM_SMALL: case PSCNV_GEM_VRAM_LARGE: paddr = dev_priv->fb_phys + bo->map1->start + offset; mattr = VM_MEMATTR_WRITE_COMBINING; what = "vram"; break; case PSCNV_GEM_SYSRAM_SNOOP: case PSCNV_GEM_SYSRAM_NOSNOOP: paddr = bo->dmapages[OFF_TO_IDX(offset)]; mattr = VM_MEMATTR_WRITE_BACK; what = "sysram"; break; default: return (EINVAL); } if (offset >= bo->size) { if (pscnv_mem_debug > 0) NV_WARN(dev, "Reading %p + %08llx (%s) is past max size %08llx\n", bo, offset, what, bo->size); return (VM_PAGER_ERROR); } DRM_LOCK(dev); if (pscnv_mem_debug > 0) NV_WARN(dev, "Connecting %p+%08llx (%s) at phys %010llx\n", bo, offset, what, paddr); vm_object_pip_add(vm_obj, 1); if (*mres != NULL) { oldm = *mres; vm_page_lock(oldm); vm_page_remove(oldm); vm_page_unlock(oldm); *mres = NULL; } else oldm = NULL; //VM_OBJECT_LOCK(vm_obj); m = vm_phys_fictitious_to_vm_page(paddr); if (m == NULL) { DRM_UNLOCK(dev); return -EFAULT; } KASSERT((m->flags & PG_FICTITIOUS) != 0, ("not fictitious %p", m)); KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m)); if ((m->flags & VPO_BUSY) != 0) { DRM_UNLOCK(dev); return -EFAULT; } pmap_page_set_memattr(m, mattr); m->valid = VM_PAGE_BITS_ALL; *mres = m; vm_page_lock(m); vm_page_insert(m, vm_obj, OFF_TO_IDX(offset)); vm_page_unlock(m); vm_page_busy(m); printf("fault %p %jx %x phys %x", gem_obj, offset, prot, m->phys_addr); DRM_UNLOCK(dev); if (oldm != NULL) { vm_page_lock(oldm); vm_page_free(oldm); vm_page_unlock(oldm); } vm_object_pip_wakeup(vm_obj); return (VM_PAGER_OK); }
/** * \brief Allocate a physically contiguous DMA-accessible consistent * memory block. */ drm_dma_handle_t * drm_pci_alloc(struct drm_device *dev, size_t size, size_t align, dma_addr_t maxaddr) { drm_dma_handle_t *dmah; int ret; #if defined(__NetBSD__) int nsegs; #endif /* Need power-of-two alignment, so fail the allocation if it isn't. */ if ((align & (align - 1)) != 0) { DRM_ERROR("drm_pci_alloc with non-power-of-two alignment %d\n", (int)align); return NULL; } dmah = malloc(sizeof(drm_dma_handle_t), M_DRM, M_ZERO | M_NOWAIT); if (dmah == NULL) return NULL; #ifdef __FreeBSD__ DRM_UNLOCK(); ret = bus_dma_tag_create(NULL, align, 0, /* tag, align, boundary */ maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */ NULL, NULL, /* filtfunc, filtfuncargs */ size, 1, size, /* maxsize, nsegs, maxsegsize */ BUS_DMA_ALLOCNOW, NULL, NULL, /* flags, lockfunc, lockfuncargs */ &dmah->tag); if (ret != 0) { free(dmah, M_DRM); DRM_LOCK(); return NULL; } ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr, BUS_DMA_NOWAIT, &dmah->map); if (ret != 0) { bus_dma_tag_destroy(dmah->tag); free(dmah, M_DRM); DRM_LOCK(); return NULL; } DRM_LOCK(); ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size, drm_pci_busdma_callback, dmah, 0); if (ret != 0) { bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map); bus_dma_tag_destroy(dmah->tag); free(dmah, M_DRM); return NULL; } return dmah; #elif defined(__NetBSD__) dmah->tag = dev->pa.pa_dmat; if ((ret = bus_dmamem_alloc(dmah->tag, size, align, maxaddr, dmah->segs, 1, &nsegs, BUS_DMA_NOWAIT)) != 0) { printf("drm: Unable to allocate DMA, error %d\n", ret); goto fail; } /* XXX is there a better way to deal with this? */ if (nsegs != 1) { printf("drm: bad segment count from bus_dmamem_alloc\n"); goto free; } if ((ret = bus_dmamem_map(dmah->tag, dmah->segs, nsegs, size, &dmah->vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { printf("drm: Unable to map DMA, error %d\n", ret); goto free; } if ((ret = bus_dmamap_create(dmah->tag, size, 1, size, maxaddr, BUS_DMA_NOWAIT, &dmah->map)) != 0) { printf("drm: Unable to create DMA map, error %d\n", ret); goto unmap; } if ((ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size, NULL, BUS_DMA_NOWAIT)) != 0) { printf("drm: Unable to load DMA map, error %d\n", ret); goto destroy; } dmah->busaddr = dmah->map->dm_segs[0].ds_addr; dmah->size = size; return dmah; destroy: bus_dmamap_destroy(dmah->tag, dmah->map); unmap: bus_dmamem_unmap(dmah->tag, dmah->vaddr, size); free: bus_dmamem_free(dmah->tag, dmah->segs, 1); fail: dmah->tag = NULL; free(dmah, M_DRM); return NULL; #endif }
/* Initialize the screen-specific data structures for the DRI and the Rage 128. This is the main entry point to the device-specific initialization code. It calls device-independent DRI functions to create the DRI data structures and initialize the DRI state. */ static GLboolean R128DRIScreenInit(DRIDriverContext *ctx) { R128InfoPtr info = ctx->driverPrivate; R128DRIPtr pR128DRI; int err, major, minor, patch; drmVersionPtr version; drm_r128_sarea_t *pSAREAPriv; switch (ctx->bpp) { case 8: /* These modes are not supported (yet). */ case 15: case 24: fprintf(stderr, "[dri] R128DRIScreenInit failed (depth %d not supported). " "[dri] Disabling DRI.\n", ctx->bpp); return GL_FALSE; /* Only 16 and 32 color depths are supports currently. */ case 16: case 32: break; } r128_drm_page_size = getpagesize(); info->registerSize = ctx->MMIOSize; ctx->shared.SAREASize = SAREA_MAX; /* Note that drmOpen will try to load the kernel module, if needed. */ ctx->drmFD = drmOpen("r128", NULL ); if (ctx->drmFD < 0) { fprintf(stderr, "[drm] drmOpen failed\n"); return 0; } /* Check the r128 DRM version */ version = drmGetVersion(ctx->drmFD); if (version) { if (version->version_major != 2 || version->version_minor < 2) { /* incompatible drm version */ fprintf(stderr, "[dri] R128DRIScreenInit failed because of a version mismatch.\n" "[dri] r128.o kernel module version is %d.%d.%d but version 2.2 or greater is needed.\n" "[dri] Disabling the DRI.\n", version->version_major, version->version_minor, version->version_patchlevel); drmFreeVersion(version); return GL_FALSE; } info->drmMinor = version->version_minor; drmFreeVersion(version); } if ((err = drmSetBusid(ctx->drmFD, ctx->pciBusID)) < 0) { fprintf(stderr, "[drm] drmSetBusid failed (%d, %s), %s\n", ctx->drmFD, ctx->pciBusID, strerror(-err)); return 0; } if (drmAddMap( ctx->drmFD, 0, ctx->shared.SAREASize, DRM_SHM, DRM_CONTAINS_LOCK, &ctx->shared.hSAREA) < 0) { fprintf(stderr, "[drm] drmAddMap failed\n"); return 0; } fprintf(stderr, "[drm] added %d byte SAREA at 0x%08lx\n", ctx->shared.SAREASize, ctx->shared.hSAREA); if (drmMap( ctx->drmFD, ctx->shared.hSAREA, ctx->shared.SAREASize, (drmAddressPtr)(&ctx->pSAREA)) < 0) { fprintf(stderr, "[drm] drmMap failed\n"); return 0; } memset(ctx->pSAREA, 0, ctx->shared.SAREASize); fprintf(stderr, "[drm] mapped SAREA 0x%08lx to %p, size %d\n", ctx->shared.hSAREA, ctx->pSAREA, ctx->shared.SAREASize); /* Need to AddMap the framebuffer and mmio regions here: */ if (drmAddMap( ctx->drmFD, (drm_handle_t)ctx->FBStart, ctx->FBSize, DRM_FRAME_BUFFER, 0, &ctx->shared.hFrameBuffer) < 0) { fprintf(stderr, "[drm] drmAddMap framebuffer failed\n"); return 0; } fprintf(stderr, "[drm] framebuffer handle = 0x%08lx\n", ctx->shared.hFrameBuffer); if (!R128MemoryInit(ctx)) return GL_FALSE; /* Initialize AGP */ if (!info->IsPCI && !R128DRIAgpInit(ctx)) { info->IsPCI = GL_TRUE; fprintf(stderr, "[agp] AGP failed to initialize -- falling back to PCI mode.\n"); fprintf(stderr, "[agp] Make sure you have the agpgart kernel module loaded.\n"); } /* Initialize PCIGART */ if (info->IsPCI && !R128DRIPciInit(ctx)) { return GL_FALSE; } /* DRIScreenInit doesn't add all the common mappings. Add additional mappings here. */ if (!R128DRIMapInit(ctx)) { return GL_FALSE; } /* Create a 'server' context so we can grab the lock for * initialization ioctls. */ if ((err = drmCreateContext(ctx->drmFD, &ctx->serverContext)) != 0) { fprintf(stderr, "%s: drmCreateContext failed %d\n", __FUNCTION__, err); return 0; } DRM_LOCK(ctx->drmFD, ctx->pSAREA, ctx->serverContext, 0); /* Initialize the kernel data structures */ if (!R128DRIKernelInit(ctx)) { return GL_FALSE; } /* Initialize the vertex buffers list */ if (!R128DRIBufInit(ctx)) { return GL_FALSE; } /* Initialize IRQ */ R128DRIIrqInit(ctx); /* Initialize and start the CCE if required */ R128DRICCEInit(ctx); /* Quick hack to clear the front & back buffers. Could also use * the clear ioctl to do this, but would need to setup hw state * first. */ drimemsetio((char *)ctx->FBAddress + info->frontOffset, 0, info->frontPitch * ctx->cpp * ctx->shared.virtualHeight ); drimemsetio((char *)ctx->FBAddress + info->backOffset, 0, info->backPitch * ctx->cpp * ctx->shared.virtualHeight ); pSAREAPriv = (drm_r128_sarea_t *)(((char*)ctx->pSAREA) + sizeof(drm_sarea_t)); memset(pSAREAPriv, 0, sizeof(*pSAREAPriv)); /* This is the struct passed to radeon_dri.so for its initialization */ ctx->driverClientMsg = malloc(sizeof(R128DRIRec)); ctx->driverClientMsgSize = sizeof(R128DRIRec); pR128DRI = (R128DRIPtr)ctx->driverClientMsg; pR128DRI->deviceID = info->Chipset; pR128DRI->width = ctx->shared.virtualWidth; pR128DRI->height = ctx->shared.virtualHeight; pR128DRI->depth = ctx->bpp; pR128DRI->bpp = ctx->bpp; pR128DRI->IsPCI = info->IsPCI; pR128DRI->AGPMode = info->agpMode; pR128DRI->frontOffset = info->frontOffset; pR128DRI->frontPitch = info->frontPitch; pR128DRI->backOffset = info->backOffset; pR128DRI->backPitch = info->backPitch; pR128DRI->depthOffset = info->depthOffset; pR128DRI->depthPitch = info->depthPitch; pR128DRI->spanOffset = info->spanOffset; pR128DRI->textureOffset = info->textureOffset; pR128DRI->textureSize = info->textureSize; pR128DRI->log2TexGran = info->log2TexGran; pR128DRI->registerHandle = info->registerHandle; pR128DRI->registerSize = info->registerSize; pR128DRI->agpTexHandle = info->agpTexHandle; pR128DRI->agpTexMapSize = info->agpTexMapSize; pR128DRI->log2AGPTexGran = info->log2AGPTexGran; pR128DRI->agpTexOffset = info->agpTexStart; pR128DRI->sarea_priv_offset = sizeof(drm_sarea_t); return GL_TRUE; }
static void drm_unload(struct drm_device *dev) { int i; DRM_DEBUG("\n"); drm_sysctl_cleanup(dev); if (dev->devnode != NULL) destroy_dev(dev->devnode); drm_ctxbitmap_cleanup(dev); if (dev->driver->driver_features & DRIVER_GEM) drm_gem_destroy(dev); if (dev->agp && dev->agp->agp_mtrr) { int __unused retcode; retcode = drm_mtrr_del(0, dev->agp->agp_info.ai_aperture_base, dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC); DRM_DEBUG("mtrr_del = %d", retcode); } drm_vblank_cleanup(dev); DRM_LOCK(dev); drm_lastclose(dev); DRM_UNLOCK(dev); /* Clean up PCI resources allocated by drm_bufs.c. We're not really * worried about resource consumption while the DRM is inactive (between * lastclose and firstopen or unload) because these aren't actually * taking up KVA, just keeping the PCI resource allocated. */ for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) { if (dev->pcir[i] == NULL) continue; bus_release_resource(dev->dev, SYS_RES_MEMORY, dev->pcirid[i], dev->pcir[i]); dev->pcir[i] = NULL; } if (dev->agp) { drm_free(dev->agp, M_DRM); dev->agp = NULL; } if (dev->driver->unload != NULL) { DRM_LOCK(dev); dev->driver->unload(dev); DRM_UNLOCK(dev); } drm_mem_uninit(); if (pci_disable_busmaster(dev->dev)) DRM_ERROR("Request to disable bus-master failed.\n"); lockuninit(&dev->vbl_lock); lockuninit(&dev->dev_lock); lockuninit(&dev->event_lock); lockuninit(&dev->struct_mutex); }
static int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj, *old_obj; int pipe = intel_plane->pipe; int ret = 0; int x = src_x >> 16, y = src_y >> 16; int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay; bool disable_primary = false; intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; old_obj = intel_plane->obj; src_w = src_w >> 16; src_h = src_h >> 16; /* Pipe must be running... */ if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE)) return -EINVAL; if (crtc_x >= primary_w || crtc_y >= primary_h) return -EINVAL; /* Don't modify another pipe's plane */ if (intel_plane->pipe != intel_crtc->pipe) return -EINVAL; /* * Clamp the width & height into the visible area. Note we don't * try to scale the source if part of the visible region is offscreen. * The caller must handle that by adjusting source offset and size. */ if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) { crtc_w += crtc_x; crtc_x = 0; } if ((crtc_x + crtc_w) <= 0) /* Nothing to display */ goto out; if ((crtc_x + crtc_w) > primary_w) crtc_w = primary_w - crtc_x; if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) { crtc_h += crtc_y; crtc_y = 0; } if ((crtc_y + crtc_h) <= 0) /* Nothing to display */ goto out; if (crtc_y + crtc_h > primary_h) crtc_h = primary_h - crtc_y; if (!crtc_w || !crtc_h) /* Again, nothing to display */ goto out; /* * We can take a larger source and scale it down, but * only so much... 16x is the max on SNB. */ if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale) return -EINVAL; /* * If the sprite is completely covering the primary plane, * we can disable the primary and save power. */ if ((crtc_x == 0) && (crtc_y == 0) && (crtc_w == primary_w) && (crtc_h == primary_h)) disable_primary = true; DRM_LOCK(dev); ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); if (ret) goto out_unlock; intel_plane->obj = obj; /* * Be sure to re-enable the primary before the sprite is no longer * covering it fully. */ if (!disable_primary && intel_plane->primary_disabled) { intel_enable_primary(crtc); intel_plane->primary_disabled = false; } intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y, crtc_w, crtc_h, x, y, src_w, src_h); if (disable_primary) { intel_disable_primary(crtc); intel_plane->primary_disabled = true; } /* Unpin old obj after new one is active to avoid ugliness */ if (old_obj) { /* * It's fairly common to simply update the position of * an existing object. In that case, we don't need to * wait for vblank to avoid ugliness, we only need to * do the pin & ref bookkeeping. */ if (old_obj != obj) { DRM_UNLOCK(dev); intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); DRM_LOCK(dev); } intel_unpin_fb_obj(old_obj); } out_unlock: DRM_UNLOCK(dev); out: return ret; }
static int intelfb_create(struct intel_fbdev *ifbdev, struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = ifbdev->helper.dev; struct drm_i915_private *dev_priv = dev->dev_private; #if 0 struct fb_info *info; #endif struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd = {}; struct drm_i915_gem_object *obj; int size, ret; /* we don't do packed 24bpp */ if (sizes->surface_bpp == 24) sizes->surface_bpp = 32; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.pitches[0] = roundup2(mode_cmd.width * ((sizes->surface_bpp + 7) / 8), 64); mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); size = mode_cmd.pitches[0] * mode_cmd.height; size = roundup2(size, PAGE_SIZE); obj = i915_gem_alloc_object(dev, size); if (!obj) { DRM_ERROR("failed to allocate framebuffer\n"); ret = -ENOMEM; goto out; } DRM_LOCK(); /* Flush everything out, we'll be doing GTT only from now on */ ret = intel_pin_and_fence_fb_obj(dev, obj, false); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; } #if 0 info = framebuffer_alloc(0, device); if (!info) { ret = -ENOMEM; goto out_unpin; } info->par = ifbdev; #endif ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); if (ret) goto out_unpin; fb = &ifbdev->ifb.base; ifbdev->helper.fb = fb; #if 0 ifbdev->helper.fbdev = info; strlcpy(info->fix.id, "inteldrmfb", sizeof(info->fix.id)); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &intelfb_ops; ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret) { ret = -ENOMEM; goto out_unpin; } /* setup aperture base/size for vesafb takeover */ info->apertures = alloc_apertures(1); if (!info->apertures) { ret = -ENOMEM; goto out_unpin; } info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; info->fix.smem_len = size; info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); if (!info->screen_base) { ret = -ENOSPC; goto out_unpin; } info->screen_size = size; // memset(info->screen_base, 0, size); drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ #else { struct rasops_info *ri = &dev_priv->ro; bus_space_handle_t bsh; int err; err = agp_map_subregion(dev_priv->agph, obj->gtt_offset, size, &bsh); if (err) { ret = -err; goto out_unpin; } ri->ri_bits = bus_space_vaddr(dev->bst, bsh); ri->ri_depth = fb->bits_per_pixel; ri->ri_stride = fb->pitches[0]; ri->ri_width = sizes->fb_width; ri->ri_height = sizes->fb_height; switch (fb->pixel_format) { case DRM_FORMAT_XRGB8888: ri->ri_rnum = 8; ri->ri_rpos = 16; ri->ri_gnum = 8; ri->ri_gpos = 8; ri->ri_bnum = 8; ri->ri_bpos = 0; break; case DRM_FORMAT_RGB565: ri->ri_rnum = 5; ri->ri_rpos = 11; ri->ri_gnum = 6; ri->ri_gpos = 5; ri->ri_bnum = 5; ri->ri_bpos = 0; break; } } #endif DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", fb->width, fb->height, obj->gtt_offset, obj); DRM_UNLOCK(); #if 1 DRM_DEBUG_KMS("skipping call to vga_switcheroo_client_fb_set\n"); #else vga_switcheroo_client_fb_set(dev->pdev, info); #endif return 0; out_unpin: i915_gem_object_unpin(obj); out_unref: drm_gem_object_unreference(&obj->base); DRM_UNLOCK(); out: return ret; }