int mga_markbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_device_dma_t *dma = dev->dma; drm_buf_desc_t request; int order; drm_buf_entry_t *entry; if (!dma) return -EINVAL; if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request))) return -EFAULT; order = drm_order(request.size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; entry = &dma->bufs[order]; if (request.low_mark < 0 || request.low_mark > entry->buf_count) return -EINVAL; if (request.high_mark < 0 || request.high_mark > entry->buf_count) return -EINVAL; entry->freelist.low_mark = request.low_mark; entry->freelist.high_mark = request.high_mark; return 0; }
static int tdfx_takedown(drm_device_t *dev) { int i; drm_magic_entry_t *pt, *next; drm_map_t *map; drm_vma_entry_t *vma, *vma_next; DRM_DEBUG("\n"); down(&dev->struct_sem); del_timer(&dev->timer); if (dev->devname) { drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER); dev->devname = NULL; } if (dev->unique) { drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER); dev->unique = NULL; dev->unique_len = 0; } /* Clear pid list */ for (i = 0; i < DRM_HASH_SIZE; i++) { for (pt = dev->magiclist[i].head; pt; pt = next) { next = pt->next; drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); } dev->magiclist[i].head = dev->magiclist[i].tail = NULL; } #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) /* Clear AGP information */ if (dev->agp) { drm_agp_mem_t *temp; drm_agp_mem_t *temp_next; temp = dev->agp->memory; while(temp != NULL) { temp_next = temp->next; drm_free_agp(temp->memory, temp->pages); drm_free(temp, sizeof(*temp), DRM_MEM_AGPLISTS); temp = temp_next; } if (dev->agp->acquired) _drm_agp_release(); } #endif /* Clear vma list (only built for debugging) */ if (dev->vmalist) { for (vma = dev->vmalist; vma; vma = vma_next) { vma_next = vma->next; drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); } dev->vmalist = NULL; } /* Clear map area and mtrr information */ if (dev->maplist) { for (i = 0; i < dev->map_count; i++) { map = dev->maplist[i]; switch (map->type) { case _DRM_REGISTERS: case _DRM_FRAME_BUFFER: #ifdef CONFIG_MTRR if (map->mtrr >= 0) { int retcode; retcode = mtrr_del(map->mtrr, map->offset, map->size); DRM_DEBUG("mtrr_del = %d\n", retcode); } #endif drm_ioremapfree(map->handle, map->size, dev); break; case _DRM_SHM: drm_free_pages((unsigned long)map->handle, drm_order(map->size) - PAGE_SHIFT, DRM_MEM_SAREA); break; case _DRM_AGP: /* Do nothing here, because this is all handled in the AGP/GART driver. */ break; } drm_free(map, sizeof(*map), DRM_MEM_MAPS); } drm_free(dev->maplist, dev->map_count * sizeof(*dev->maplist), DRM_MEM_MAPS); dev->maplist = NULL; dev->map_count = 0; } if (dev->lock.hw_lock) { dev->lock.hw_lock = NULL; /* SHM removed */ dev->lock.pid = 0; wake_up_interruptible(&dev->lock.lock_queue); } up(&dev->struct_sem); return 0; }
static int i810_takedown(drm_device_t *dev) { int i; drm_magic_entry_t *pt, *next; drm_map_t *map; drm_vma_entry_t *vma, *vma_next; DRM_DEBUG("\n"); if (dev->irq) i810_irq_uninstall(dev); down(&dev->struct_sem); del_timer(&dev->timer); if (dev->devname) { drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER); dev->devname = NULL; } if (dev->unique) { drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER); dev->unique = NULL; dev->unique_len = 0; } /* Clear pid list */ for (i = 0; i < DRM_HASH_SIZE; i++) { for (pt = dev->magiclist[i].head; pt; pt = next) { next = pt->next; drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); } dev->magiclist[i].head = dev->magiclist[i].tail = NULL; } /* Clear AGP information */ if (dev->agp) { drm_agp_mem_t *entry; drm_agp_mem_t *nexte; /* Remove AGP resources, but leave dev->agp intact until r128_cleanup is called. */ for (entry = dev->agp->memory; entry; entry = nexte) { nexte = entry->next; if (entry->bound) drm_unbind_agp(entry->memory); drm_free_agp(entry->memory, entry->pages); drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); } dev->agp->memory = NULL; if (dev->agp->acquired) _drm_agp_release(); dev->agp->acquired = 0; dev->agp->enabled = 0; } /* Clear vma list (only built for debugging) */ if (dev->vmalist) { for (vma = dev->vmalist; vma; vma = vma_next) { vma_next = vma->next; drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); } dev->vmalist = NULL; } /* Clear map area and mtrr information */ if (dev->maplist) { for (i = 0; i < dev->map_count; i++) { map = dev->maplist[i]; switch (map->type) { case _DRM_REGISTERS: case _DRM_FRAME_BUFFER: #ifdef CONFIG_MTRR if (map->mtrr >= 0) { int retcode; retcode = mtrr_del(map->mtrr, map->offset, map->size); DRM_DEBUG("mtrr_del = %d\n", retcode); } #endif drm_ioremapfree(map->handle, map->size); break; case _DRM_SHM: drm_free_pages((unsigned long)map->handle, drm_order(map->size) - PAGE_SHIFT, DRM_MEM_SAREA); break; case _DRM_AGP: break; } drm_free(map, sizeof(*map), DRM_MEM_MAPS); } drm_free(dev->maplist, dev->map_count * sizeof(*dev->maplist), DRM_MEM_MAPS); dev->maplist = NULL; dev->map_count = 0; } if (dev->queuelist) { for (i = 0; i < dev->queue_count; i++) { drm_waitlist_destroy(&dev->queuelist[i]->waitlist); if (dev->queuelist[i]) { drm_free(dev->queuelist[i], sizeof(*dev->queuelist[0]), DRM_MEM_QUEUES); dev->queuelist[i] = NULL; } } drm_free(dev->queuelist, dev->queue_slots * sizeof(*dev->queuelist), DRM_MEM_QUEUES); dev->queuelist = NULL; } drm_dma_takedown(dev); dev->queue_count = 0; if (dev->lock.hw_lock) { dev->lock.hw_lock = NULL; /* SHM removed */ dev->lock.pid = 0; wake_up_interruptible(&dev->lock.lock_queue); } up(&dev->struct_sem); return 0; }
int mga_addbufs_agp(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_device_dma_t *dma = dev->dma; drm_buf_desc_t request; drm_buf_entry_t *entry; drm_buf_t *buf; unsigned long offset; unsigned long agp_offset; int count; int order; int size; int alignment; int page_order; int total; int byte_count; int i; if (!dma) return -EINVAL; if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request))) return -EFAULT; count = request.count; order = drm_order(request.size); size = 1 << order; agp_offset = request.agp_start; alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; byte_count = 0; DRM_DEBUG("count: %d\n", count); DRM_DEBUG("order: %d\n", order); DRM_DEBUG("size: %d\n", size); DRM_DEBUG("agp_offset: %ld\n", agp_offset); DRM_DEBUG("alignment: %d\n", alignment); DRM_DEBUG("page_order: %d\n", page_order); DRM_DEBUG("total: %d\n", total); DRM_DEBUG("byte_count: %d\n", byte_count); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; if (dev->queue_count) return -EBUSY; /* Not while in use */ spin_lock(&dev->count_lock); if (dev->buf_use) { spin_unlock(&dev->count_lock); return -EBUSY; } atomic_inc(&dev->buf_alloc); spin_unlock(&dev->count_lock); down(&dev->struct_sem); entry = &dma->bufs[order]; if (entry->buf_count) { up(&dev->struct_sem); atomic_dec(&dev->buf_alloc); return -ENOMEM; /* May only call once for each order */ } /* This isnt neccessarily a good limit, but we have to stop a dumb 32 bit overflow problem below */ if ( count < 0 || count > 4096) { up(&dev->struct_sem); atomic_dec(&dev->buf_alloc); return -EINVAL; } entry->buflist = drm_alloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS); if (!entry->buflist) { up(&dev->struct_sem); atomic_dec(&dev->buf_alloc); return -ENOMEM; } memset(entry->buflist, 0, count * sizeof(*entry->buflist)); entry->buf_size = size; entry->page_order = page_order; offset = 0; while(entry->buf_count < count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = offset; /* Hrm */ buf->bus_address = dev->agp->base + agp_offset + offset; buf->address = (void *)(agp_offset + offset + dev->agp->base); buf->next = NULL; buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); buf->pid = 0; buf->dev_private = drm_alloc(sizeof(drm_mga_buf_priv_t), DRM_MEM_BUFS); buf->dev_priv_size = sizeof(drm_mga_buf_priv_t); #if DRM_DMA_HISTOGRAM buf->time_queued = 0; buf->time_dispatched = 0; buf->time_completed = 0; buf->time_freed = 0; #endif offset = offset + alignment; entry->buf_count++; byte_count += PAGE_SIZE << page_order; } dma->buflist = drm_realloc(dma->buflist, dma->buf_count * sizeof(*dma->buflist), (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), DRM_MEM_BUFS); for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++) dma->buflist[i] = &entry->buflist[i - dma->buf_count]; dma->buf_count += entry->buf_count; DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); dma->byte_count += byte_count; DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); drm_freelist_create(&entry->freelist, entry->buf_count); for (i = 0; i < entry->buf_count; i++) { drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]); } up(&dev->struct_sem); request.count = entry->buf_count; request.size = size; if (copy_to_user((drm_buf_desc_t *)arg, &request, sizeof(request))) return -EFAULT; atomic_dec(&dev->buf_alloc); DRM_DEBUG("count: %d\n", count); DRM_DEBUG("order: %d\n", order); DRM_DEBUG("size: %d\n", size); DRM_DEBUG("agp_offset: %ld\n", agp_offset); DRM_DEBUG("alignment: %d\n", alignment); DRM_DEBUG("page_order: %d\n", page_order); DRM_DEBUG("total: %d\n", total); DRM_DEBUG("byte_count: %d\n", byte_count); dma->flags = _DRM_DMA_USE_AGP; DRM_DEBUG("dma->flags : %x\n", dma->flags); return 0; }
int mga_addbufs_pci(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_device_dma_t *dma = dev->dma; drm_buf_desc_t request; int count; int order; int size; int total; int page_order; drm_buf_entry_t *entry; unsigned long page; drm_buf_t *buf; int alignment; unsigned long offset; int i; int byte_count; int page_count; if (!dma) return -EINVAL; if (copy_from_user(&request, (drm_buf_desc_t *)arg, sizeof(request))) return -EFAULT; count = request.count; order = drm_order(request.size); size = 1 << order; DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n", request.count, request.size, size, order, dev->queue_count); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; if (dev->queue_count) return -EBUSY; /* Not while in use */ alignment = (request.flags & _DRM_PAGE_ALIGN) ? PAGE_ALIGN(size):size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; spin_lock(&dev->count_lock); if (dev->buf_use) { spin_unlock(&dev->count_lock); return -EBUSY; } atomic_inc(&dev->buf_alloc); spin_unlock(&dev->count_lock); down(&dev->struct_sem); entry = &dma->bufs[order]; if (entry->buf_count) { up(&dev->struct_sem); atomic_dec(&dev->buf_alloc); return -ENOMEM; /* May only call once for each order */ } if(count < 0 || count > 4096) { up(&dev->struct_sem); atomic_dec(&dev->buf_alloc); return -EINVAL; } entry->buflist = drm_alloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS); if (!entry->buflist) { up(&dev->struct_sem); atomic_dec(&dev->buf_alloc); return -ENOMEM; } memset(entry->buflist, 0, count * sizeof(*entry->buflist)); entry->seglist = drm_alloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS); if (!entry->seglist) { drm_free(entry->buflist, count * sizeof(*entry->buflist), DRM_MEM_BUFS); up(&dev->struct_sem); atomic_dec(&dev->buf_alloc); return -ENOMEM; } memset(entry->seglist, 0, count * sizeof(*entry->seglist)); dma->pagelist = drm_realloc(dma->pagelist, dma->page_count * sizeof(*dma->pagelist), (dma->page_count + (count << page_order)) * sizeof(*dma->pagelist), DRM_MEM_PAGES); DRM_DEBUG("pagelist: %d entries\n", dma->page_count + (count << page_order)); entry->buf_size = size; entry->page_order = page_order; byte_count = 0; page_count = 0; while (entry->buf_count < count) { if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break; entry->seglist[entry->seg_count++] = page; for (i = 0; i < (1 << page_order); i++) { DRM_DEBUG("page %d @ 0x%08lx\n", dma->page_count + page_count, page + PAGE_SIZE * i); dma->pagelist[dma->page_count + page_count++] = page + PAGE_SIZE * i; } for (offset = 0; offset + size <= total && entry->buf_count < count; offset += alignment, ++entry->buf_count) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + byte_count + offset); buf->address = (void *)(page + offset); buf->next = NULL; buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); buf->pid = 0; #if DRM_DMA_HISTOGRAM buf->time_queued = 0; buf->time_dispatched = 0; buf->time_completed = 0; buf->time_freed = 0; #endif DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); } byte_count += PAGE_SIZE << page_order; } dma->buflist = drm_realloc(dma->buflist, dma->buf_count * sizeof(*dma->buflist), (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), DRM_MEM_BUFS); for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++) dma->buflist[i] = &entry->buflist[i - dma->buf_count]; dma->buf_count += entry->buf_count; dma->seg_count += entry->seg_count; dma->page_count += entry->seg_count << page_order; dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); drm_freelist_create(&entry->freelist, entry->buf_count); for (i = 0; i < entry->buf_count; i++) { drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]); } up(&dev->struct_sem); request.count = entry->buf_count; request.size = size; if (copy_to_user((drm_buf_desc_t *)arg, &request, sizeof(request))) return -EFAULT; atomic_dec(&dev->buf_alloc); return 0; }
int cayman_cp_resume(struct radeon_device *rdev) { struct radeon_ring *ring; u32 tmp; u32 rb_bufsz; int r; /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | SOFT_RESET_PA | SOFT_RESET_SH | SOFT_RESET_VGT | SOFT_RESET_SPI | SOFT_RESET_SX)); RREG32(GRBM_SOFT_RESET); mdelay(15); WREG32(GRBM_SOFT_RESET, 0); RREG32(GRBM_SOFT_RESET); WREG32(CP_SEM_WAIT_TIMER, 0x0); WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); /* Set the write pointer delay */ WREG32(CP_RB_WPTR_DELAY, 0); WREG32(CP_DEBUG, (1 << 27)); /* ring 0 - compute and gfx */ /* Set ring buffer size */ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; rb_bufsz = drm_order(ring->ring_size / 8); tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif WREG32(CP_RB0_CNTL, tmp); /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); ring->wptr = 0; WREG32(CP_RB0_WPTR, ring->wptr); /* set the wb address wether it's enabled or not */ WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); if (rdev->wb.enabled) WREG32(SCRATCH_UMSK, 0xff); else { tmp |= RB_NO_UPDATE; WREG32(SCRATCH_UMSK, 0); } mdelay(1); WREG32(CP_RB0_CNTL, tmp); WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); ring->rptr = RREG32(CP_RB0_RPTR); /* ring1 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; rb_bufsz = drm_order(ring->ring_size / 8); tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif WREG32(CP_RB1_CNTL, tmp); /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); ring->wptr = 0; WREG32(CP_RB1_WPTR, ring->wptr); /* set the wb address wether it's enabled or not */ WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); mdelay(1); WREG32(CP_RB1_CNTL, tmp); WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); ring->rptr = RREG32(CP_RB1_RPTR); /* ring2 - compute only */ /* Set ring buffer size */ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; rb_bufsz = drm_order(ring->ring_size / 8); tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; #ifdef __BIG_ENDIAN tmp |= BUF_SWAP_32BIT; #endif WREG32(CP_RB2_CNTL, tmp); /* Initialize the ring buffer's read and write pointers */ WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); ring->wptr = 0; WREG32(CP_RB2_WPTR, ring->wptr); /* set the wb address wether it's enabled or not */ WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); mdelay(1); WREG32(CP_RB2_CNTL, tmp); WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); ring->rptr = RREG32(CP_RB2_RPTR); /* start the rings */ cayman_cp_start(rdev); rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; /* this only test cp0 */ r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); if (r) { rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; return r; } return 0; }