int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_via_fb_t *fb = data; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; int ret; ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0, fb->size >> VIA_MM_ALIGN_SHIFT); if (ret) { DRM_ERROR("VRAM memory manager initialisation error\n"); return ret; } dev_priv->vram_initialized = 1; dev_priv->vram_offset = fb->offset; DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size); return 0; }
int mga_warp_install_microcode(drm_mga_private_t * dev_priv) { const unsigned int size = mga_warp_microcode_size(dev_priv); DRM_DEBUG("MGA ucode size = %d bytes\n", size); if (size > dev_priv->warp->size) { DRM_ERROR("microcode too large! (%u > %lu)\n", size, dev_priv->warp->size); return -ENOMEM; } switch (dev_priv->chipset) { case MGA_CARD_TYPE_G400: case MGA_CARD_TYPE_G550: return mga_warp_install_g400_microcode(dev_priv); case MGA_CARD_TYPE_G200: return mga_warp_install_g200_microcode(dev_priv); default: return -EINVAL; } }
int mga_do_wait_for_idle( drm_mga_private_t *dev_priv ) { u32 status = 0; int i; DRM_DEBUG( "\n" ); for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { status = MGA_READ( MGA_STATUS ) & MGA_ENGINE_IDLE_MASK; if ( status == MGA_ENDPRDMASTS ) { MGA_WRITE8( MGA_CRTC_INDEX, 0 ); return 0; } DRM_UDELAY( 1 ); } #if MGA_DMA_DEBUG DRM_ERROR( "failed!\n" ); DRM_INFO( " status=0x%08x\n", status ); #endif return DRM_ERR(EBUSY); }
/** * Removes the given magic number from the hash table of used magic number * lists. */ static int drm_remove_magic(struct drm_device *dev, drm_magic_t magic) { drm_magic_entry_t *pt; struct drm_hash_item *hash; DRM_DEBUG("%d\n", magic); mutex_lock(&dev->struct_mutex); if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item); drm_ht_remove_item(&dev->magiclist, hash); list_del(&pt->head); mutex_unlock(&dev->struct_mutex); kfree(pt); return 0; }
static enum drm_mode_status virtio_gpu_conn_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct virtio_gpu_output *output = drm_connector_to_virtio_gpu_output(connector); int width, height; width = le32_to_cpu(output->info.r.width); height = le32_to_cpu(output->info.r.height); if (!(mode->type & DRM_MODE_TYPE_PREFERRED)) return MODE_OK; if (mode->hdisplay == XRES_DEF && mode->vdisplay == YRES_DEF) return MODE_OK; if (mode->hdisplay <= width && mode->hdisplay >= width - 16 && mode->vdisplay <= height && mode->vdisplay >= height - 16) return MODE_OK; DRM_DEBUG("del mode: %dx%d\n", mode->hdisplay, mode->vdisplay); return MODE_BAD; }
/** * Cleanup the DMA resources. * * \param dev DRM device. * * Free all pages associated with DMA buffers, the buffers and pages lists, and * finally the drm_device::dma structure itself. */ void drm_legacy_dma_takedown(struct drm_device *dev) { struct drm_device_dma *dma = dev->dma; int i, j; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) || !drm_core_check_feature(dev, DRIVER_LEGACY)) return; if (!dma) return; /* Clear dma buffers */ for (i = 0; i <= DRM_MAX_ORDER; i++) { if (dma->bufs[i].seg_count) { DRM_DEBUG("order %d: buf_count = %d," " seg_count = %d\n", i, dma->bufs[i].buf_count, dma->bufs[i].seg_count); for (j = 0; j < dma->bufs[i].seg_count; j++) { if (dma->bufs[i].seglist[j]) { drm_pci_free(dev, dma->bufs[i].seglist[j]); } } kfree(dma->bufs[i].seglist); } if (dma->bufs[i].buf_count) { for (j = 0; j < dma->bufs[i].buf_count; j++) { kfree(dma->bufs[i].buflist[j].dev_private); } kfree(dma->bufs[i].buflist); } } kfree(dma->buflist); kfree(dma->pagelist); kfree(dev->dma); dev->dma = NULL; }
static int cik_sdma_process_trap_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { u8 instance_id, queue_id; instance_id = (entry->ring_id & 0x3) >> 0; queue_id = (entry->ring_id & 0xc) >> 2; DRM_DEBUG("IH: SDMA trap\n"); switch (instance_id) { case 0: switch (queue_id) { case 0: amdgpu_fence_process(&adev->sdma.instance[0].ring); break; case 1: /* XXX compute */ break; case 2: /* XXX compute */ break; } break; case 1: switch (queue_id) { case 0: amdgpu_fence_process(&adev->sdma.instance[1].ring); break; case 1: /* XXX compute */ break; case 2: /* XXX compute */ break; } break; } return 0; }
int drm_irq_uninstall(struct drm_device *dev) { int i; DRM_LOCK(); if (!dev->irq_enabled) { DRM_UNLOCK(); return (EINVAL); } dev->irq_enabled = 0; DRM_UNLOCK(); /* * Ick. we're about to turn of vblanks, so make sure anyone waiting * on them gets woken up. Also make sure we update state correctly * so that we can continue refcounting correctly. */ if (dev->vblank != NULL) { mtx_enter(&dev->vblank->vb_lock); for (i = 0; i < dev->vblank->vb_num; i++) { #if !defined(__NetBSD__) wakeup(&dev->vblank->vb_crtcs[i]); #else /* !defined(__NetBSD__) */ cv_broadcast(&dev->vblank->vb_crtcs[i].condvar); #endif /* !defined(__NetBSD__) */ dev->vblank->vb_crtcs[i].vbl_enabled = 0; dev->vblank->vb_crtcs[i].vbl_last = dev->driver->get_vblank_counter(dev, i); } mtx_leave(&dev->vblank->vb_lock); } DRM_DEBUG("irq=%d\n", dev->irq); dev->driver->irq_uninstall(dev); return (0); }
static int drm_setup(struct drm_device *dev) { drm_local_map_t *map; int i; DRM_LOCK_ASSERT(dev); /* prebuild the SAREA */ i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); if (i != 0) return i; if (dev->driver->firstopen) dev->driver->firstopen(dev); dev->buf_use = 0; if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { i = drm_dma_setup(dev); if (i != 0) return i; } drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER); INIT_LIST_HEAD(&dev->magicfree); init_waitqueue_head(&dev->lock.lock_queue); if (!drm_core_check_feature(dev, DRIVER_MODESET)) dev->irq_enabled = 0; dev->context_flag = 0; dev->last_context = 0; dev->if_version = 0; dev->buf_sigio = NULL; DRM_DEBUG("\n"); return 0; }
static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; drm_mga_private_t *dev_priv = dev->dev_private; struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_iload_t *iload = data; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); #if 0 if (mga_do_wait_for_idle(dev_priv) < 0) { if (MGA_DMA_DEBUG) DRM_INFO("-EBUSY\n"); return -EBUSY; } #endif if (iload->idx < 0 || iload->idx > dma->buf_count) return -EINVAL; buf = dma->buflist[iload->idx]; buf_priv = buf->dev_private; if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) { mga_freelist_put(dev, buf); return -EINVAL; } WRAP_TEST_WITH_RETURN(dev_priv); mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length); /* Make sure we restore the 3D state next time. */ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; return 0; }
uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n) { unsigned int cur = dev_priv->current_dma_page; unsigned int rest = SAVAGE_DMA_PAGE_SIZE - dev_priv->dma_pages[cur].used; unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE; uint32_t *dma_ptr; unsigned int i; DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n", cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); if (cur + nr_pages < dev_priv->nr_dma_pages) { dma_ptr = (uint32_t *)dev_priv->cmd_dma->virtual + cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; if (n < rest) rest = n; dev_priv->dma_pages[cur].used += rest; n -= rest; cur++; } else {
/** * Register. * * \param pdev - PCI device structure * \param ent entry from the PCI ID table with device type flags * \return zero on success or a negative number on failure. * * Attempt to gets inter module "drm" information. If we are first * then register the character device and inter module information. * Try and register, if we fail to register, backout previous work. */ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver) { drm_device_t *dev; int ret; DRM_DEBUG("\n"); dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB); if (!dev) return -ENOMEM; if (!drm_fb_loaded) { pci_set_drvdata(pdev, dev); pci_request_regions(pdev, driver->pci_driver.name); pci_enable_device(pdev); } if ((ret = fill_in_dev(dev, pdev, ent, driver))) { goto err_g1; } if ((ret = drm_get_head(dev, &dev->primary))) goto err_g1; DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n", driver->name, driver->major, driver->minor, driver->patchlevel, driver->date, dev->primary.minor, pci_pretty_name(dev->pdev)); return 0; err_g1: if (!drm_fb_loaded) { pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); pci_disable_device(pdev); } drm_free(dev, sizeof(*dev), DRM_MEM_STUB); printk(KERN_ERR "DRM: drm_get_dev failed.\n"); return ret; }
int sis_ioctl_agp_free( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_mem_t agp; if (dev_priv == NULL || dev_priv->AGPHeap == NULL) return DRM_ERR(EINVAL); DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t *)data, sizeof(agp)); if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock)agp.free)) return DRM_ERR(EINVAL); mmFreeMem((PMemBlock)agp.free); if (!del_alloc_set(agp.context, AGP_TYPE, agp.free)) return DRM_ERR(EINVAL); DRM_DEBUG("free agp, free = 0x%lx\n", agp.free); return 0; }
int sis_fb_free( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_mem_t fb; if (dev_priv == NULL || dev_priv->FBHeap == NULL) return DRM_ERR(EINVAL); DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t *)data, sizeof(fb)); if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock)fb.free)) return DRM_ERR(EINVAL); if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free)) return DRM_ERR(EINVAL); mmFreeMem((PMemBlock)fb.free); DRM_DEBUG("free fb, free = 0x%lx\n", fb.free); return 0; }
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; if (!dig || !dig->afmt) return; if (enable) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, HDMI_AVI_INFO_SEND | /* enable AVI info frames */ HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */ HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, AFMT_AUDIO_SAMPLE_SEND); } else { WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, HDMI_AVI_INFO_SEND | /* enable AVI info frames */ HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, ~AFMT_AUDIO_SAMPLE_SEND); } } else { WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, ~AFMT_AUDIO_SAMPLE_SEND); WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0); } dig->afmt->enabled = enable; DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n", enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id); }
int mga_do_cleanup_dma( drm_device_t *dev ) { DRM_DEBUG( "\n" ); if ( dev->dev_private ) { drm_mga_private_t *dev_priv = dev->dev_private; DRM_IOREMAPFREE( dev_priv->warp, dev ); DRM_IOREMAPFREE( dev_priv->primary, dev ); DRM_IOREMAPFREE( dev_priv->buffers, dev ); if ( dev_priv->head != NULL ) { mga_freelist_cleanup( dev ); } DRM(free)( dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER ); dev->dev_private = NULL; } return 0; }
static int sis_ioctl_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_agp_t *agp = data; if (dev_priv == NULL) { dev->dev_private = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); dev_priv = dev->dev_private; if (dev_priv == NULL) return ENOMEM; } if (dev_priv->AGPHeap != NULL) return -EINVAL; dev_priv->AGPHeap = mmInit(agp->offset, agp->size); DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size); return 0; }
int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_via_mem_t *mem = data; int retval = 0; struct drm_memblock_item *item; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; unsigned long tmpSize; if (mem->type > VIA_MEM_AGP) { DRM_ERROR("Unknown memory type allocation\n"); return -EINVAL; } if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : dev_priv->agp_initialized)) { DRM_ERROR ("Attempt to allocate from uninitialized memory manager.\n"); return -EINVAL; } tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0, (unsigned long)file_priv); if (item) { mem->offset = ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_offset : dev_priv->agp_offset) + (item->mm-> offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT); mem->index = item->user_hash.key; } else { mem->offset = 0; mem->size = 0; mem->index = 0; DRM_DEBUG("Video memory allocation failed\n"); retval = -ENOMEM; } return retval; }
static int via_alloc_desc_pages(drm_via_sg_info_t *vsg) { int i; vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t); vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / vsg->descriptors_per_page; if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) return -ENOMEM; vsg->state = dr_via_desc_pages_alloc; for (i=0; i<vsg->num_desc_pages; ++i) { if (NULL == (vsg->desc_pages[i] = (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) return -ENOMEM; } DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, vsg->num_desc); return 0; }
static void __exit tdfx_cleanup(void) { drm_device_t *dev = &tdfx_device; DRM_DEBUG("\n"); drm_proc_cleanup(); if (misc_deregister(&tdfx_misc)) { DRM_ERROR("Cannot unload module\n"); } else { DRM_INFO("Module unloaded\n"); } drm_ctxbitmap_cleanup(dev); tdfx_takedown(dev); #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) if (dev->agp) { drm_agp_uninit(); drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); dev->agp = NULL; } #endif }
static int via_agp_free(drm_via_mem_t * mem) { drm_via_mm_t agp; int retval = 0; agp.free = mem->index; agp.context = mem->context; if (!agp.free) return -1; via_mmFreeMem((PMemBlock) agp.free); if (!del_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) { retval = -1; } DRM_DEBUG("free agp, free = %ld\n", agp.nfree); return retval; }
int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct drm_radeon_gem_get_tiling *args = data; struct drm_gem_object *gobj; struct radeon_bo *rbo; int r = 0; DRM_DEBUG("\n"); gobj = drm_gem_object_lookup(dev, filp, args->handle); if (gobj == NULL) return -ENOENT; rbo = gem_to_radeon_bo(gobj); r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) goto out; radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); radeon_bo_unreserve(rbo); out: drm_gem_object_unreference_unlocked(gobj); return r; }
static void udl_free_urb_list(struct drm_device *dev) { struct udl_device *udl = dev->dev_private; int count = udl->urbs.count; struct list_head *node; struct urb_node *unode; struct urb *urb; int ret; unsigned long flags; DRM_DEBUG("Waiting for completes and freeing all render urbs\n"); /* keep waiting and freeing, until we've got 'em all */ while (count--) { /* Getting interrupted means a leak, but ok at shutdown*/ ret = down_interruptible(&udl->urbs.limit_sem); if (ret) break; spin_lock_irqsave(&udl->urbs.lock, flags); node = udl->urbs.list.next; /* have reserved one with sem */ list_del_init(node); spin_unlock_irqrestore(&udl->urbs.lock, flags); unode = list_entry(node, struct urb_node, entry); urb = unode->urb; /* Free each separately allocated piece */ usb_free_coherent(urb->dev, udl->urbs.size, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); kfree(node); } udl->urbs.count = 0; }
u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); int i; /* Lock the graphics update lock */ tmp |= AVIVO_D1GRPH_UPDATE_LOCK; WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); /* update the scanout addresses */ if (radeon_crtc->crtc_id) { WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); } else { WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); } WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, (u32)crtc_base); WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, (u32)crtc_base); /* Wait for update_pending to go high. */ for (i = 0; i < rdev->usec_timeout; i++) { if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) break; udelay(1); } DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); /* Unlock the lock, so double-buffering can take place inside vblank */ tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); /* Return current update_pending status: */ return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; }
u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); int i; tmp |= AVIVO_D1GRPH_UPDATE_LOCK; WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); if (radeon_crtc->crtc_id) { WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); } else { WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); } WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, (u32)crtc_base); WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, (u32)crtc_base); for (i = 0; i < rdev->usec_timeout; i++) { if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) break; udelay(1); } DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; }
int drm_mmap(struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr, int prot, vm_memattr_t *memattr) { struct drm_device *dev = drm_get_device_from_kdev(kdev); struct drm_file *file_priv = NULL; drm_local_map_t *map; enum drm_map_type type; vm_paddr_t phys; int error; /* d_mmap gets called twice, we can only reference file_priv during * the first call. We need to assume that if error is EBADF the * call was succesful and the client is authenticated. */ error = devfs_get_cdevpriv((void **)&file_priv); if (error == ENOENT) { DRM_ERROR("Could not find authenticator!\n"); return EINVAL; } if (file_priv && !file_priv->authenticated) return EACCES; DRM_DEBUG("called with offset %016jx\n", offset); if (dev->dma && offset < ptoa(dev->dma->page_count)) { drm_device_dma_t *dma = dev->dma; DRM_SPINLOCK(&dev->dma_lock); if (dma->pagelist != NULL) { unsigned long page = offset >> PAGE_SHIFT; unsigned long phys = dma->pagelist[page]; DRM_SPINUNLOCK(&dev->dma_lock); *paddr = phys; return 0; } else {
static void udl_free_urb_list(struct drm_device *dev) { struct udl_device *udl = dev->dev_private; int count = udl->urbs.count; struct list_head *node; struct urb_node *unode; struct urb *urb; int ret; unsigned long flags; DRM_DEBUG("Waiting for completes and freeing all render urbs\n"); while (count--) { ret = down_interruptible(&udl->urbs.limit_sem); if (ret) break; spin_lock_irqsave(&udl->urbs.lock, flags); node = udl->urbs.list.next; list_del_init(node); spin_unlock_irqrestore(&udl->urbs.lock, flags); unode = list_entry(node, struct urb_node, entry); urb = unode->urb; usb_free_coherent(urb->dev, udl->urbs.size, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); kfree(node); } udl->urbs.count = 0; }
int mga_dma_flush( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; drm_lock_t lock; LOCK_TEST_WITH_RETURN( dev ); if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) ) return -EFAULT; DRM_DEBUG( "%s: %s%s%s\n", __FUNCTION__, (lock.flags & _DRM_LOCK_FLUSH) ? "flush, " : "", (lock.flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "", (lock.flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "" ); WRAP_WAIT_WITH_RETURN( dev_priv ); if ( lock.flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL) ) { mga_do_dma_flush( dev_priv ); } if ( lock.flags & _DRM_LOCK_QUIESCENT ) { #if MGA_DMA_DEBUG int ret = mga_do_wait_for_idle( dev_priv ); if ( ret < 0 ) DRM_INFO( "%s: -EBUSY\n", __FUNCTION__ ); return ret; #else return mga_do_wait_for_idle( dev_priv ); #endif } else { return 0; } }
int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_via_agp_t *agp = data; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; int ret; mutex_lock(&dev->struct_mutex); ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0, agp->size >> VIA_MM_ALIGN_SHIFT); if (ret) { DRM_ERROR("AGP memory manager initialisation error\n"); mutex_unlock(&dev->struct_mutex); return ret; } dev_priv->agp_initialized = 1; dev_priv->agp_offset = agp->offset; mutex_unlock(&dev->struct_mutex); DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size); return 0; }
void drm_legacy_dev_reinit(struct drm_device *dev) { if (dev->irq_enabled) drm_irq_uninstall(dev); mutex_lock(&dev->struct_mutex); drm_legacy_agp_clear(dev); drm_legacy_sg_cleanup(dev); drm_legacy_vma_flush(dev); drm_legacy_dma_takedown(dev); mutex_unlock(&dev->struct_mutex); dev->sigdata.lock = NULL; dev->context_flag = 0; dev->last_context = 0; dev->if_version = 0; DRM_DEBUG("lastclose completed\n"); }