int radeon_mem_init_heap( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_mem_init_heap_t initheap; struct mem_block **heap; if ( !dev_priv ) { DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t *)data, sizeof(initheap) ); heap = get_heap( dev_priv, initheap.region ); if (!heap) return DRM_ERR(EFAULT); if (*heap) { DRM_ERROR("heap already initialized?"); return DRM_ERR(EFAULT); } return init_heap( heap, initheap.start, initheap.size ); }
int i915_mem_destroy_heap( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_mem_destroy_heap_t destroyheap; struct mem_block **heap; if ( !dev_priv ) { DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data, sizeof(destroyheap) ); heap = get_heap( dev_priv, destroyheap.region ); if (!heap) { DRM_ERROR("get_heap failed"); return DRM_ERR(EFAULT); } if (!*heap) { DRM_ERROR("heap not initialized?"); return DRM_ERR(EFAULT); } i915_mem_takedown( heap ); return 0; }
static int mga_getparam( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_getparam_t param; int value; if ( !dev_priv ) { DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL( param, (drm_mga_getparam_t __user *)data, sizeof(param) ); DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID ); switch( param.param ) { case MGA_PARAM_IRQ_NR: value = dev->irq; break; case MGA_PARAM_CARD_TYPE: value = dev_priv->chipset; break; default: return DRM_ERR(EINVAL); } if ( DRM_COPY_TO_USER( param.value, &value, sizeof(int) ) ) { DRM_ERROR( "copy_to_user\n" ); return DRM_ERR(EFAULT); } return 0; }
int mga_warp_init(drm_mga_private_t * dev_priv) { u32 wmisc; /* FIXME: Get rid of these damned magic numbers... */ switch (dev_priv->chipset) { case MGA_CARD_TYPE_G400: case MGA_CARD_TYPE_G550: MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND); MGA_WRITE(MGA_WGETMSB, 0x00000E00); MGA_WRITE(MGA_WVRTXSZ, 0x00001807); MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000); break; case MGA_CARD_TYPE_G200: MGA_WRITE(MGA_WIADDR, MGA_WMODE_SUSPEND); MGA_WRITE(MGA_WGETMSB, 0x1606); MGA_WRITE(MGA_WVRTXSZ, 7); break; default: return DRM_ERR(EINVAL); } MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE)); wmisc = MGA_READ(MGA_WMISC); if (wmisc != WMISC_EXPECTED) { DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n", wmisc, WMISC_EXPECTED); return DRM_ERR(EINVAL); } return 0; }
static int via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) { int ret; unsigned long first_pfn = VIA_PFN(xfer->mem_addr); vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - first_pfn + 1; if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) return DRM_ERR(ENOMEM); memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); down_read(¤t->mm->mmap_sem); ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr, vsg->num_pages, (vsg->direction == DMA_FROM_DEVICE), 0, vsg->pages, NULL); up_read(¤t->mm->mmap_sem); if (ret != vsg->num_pages) { if (ret < 0) return ret; vsg->state = dr_via_pages_locked; return DRM_ERR(EINVAL); } vsg->state = dr_via_pages_locked; DRM_DEBUG("DMA pages locked\n"); return 0; }
int sis_fb_alloc( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_mem_t fb; PMemBlock block; int retval = 0; if (dev_priv == NULL || dev_priv->FBHeap == NULL) return DRM_ERR(EINVAL); DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t *)data, sizeof(fb)); block = mmAllocMem(dev_priv->FBHeap, fb.size, 0, 0); if (block) { /* TODO */ fb.offset = block->ofs; fb.free = (unsigned long)block; if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) { DRM_DEBUG("adding to allocation set fails\n"); mmFreeMem((PMemBlock)fb.free); retval = DRM_ERR(EINVAL); } } else { fb.offset = 0; fb.size = 0; fb.free = 0; } DRM_COPY_TO_USER_IOCTL((drm_sis_mem_t *)data, fb, sizeof(fb)); DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, fb.offset); return retval; }
static int mga_set_fence(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_mga_private_t *dev_priv = dev->dev_private; u32 temp; DMA_LOCALS; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); } DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); /* I would normal do this assignment in the declaration of temp, * but dev_priv may be NULL. */ temp = dev_priv->next_fence_to_post; dev_priv->next_fence_to_post++; BEGIN_DMA(1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_SOFTRAP, 0x00000000); ADVANCE_DMA(); if (DRM_COPY_TO_USER( (u32 __user *) data, & temp, sizeof(u32))) { DRM_ERROR("copy_to_user\n"); return DRM_ERR(EFAULT); } return 0; }
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t* dev_priv, drm_radeon_cmd_buffer_t* cmdbuf) { u32 header; int count; RING_LOCALS; if (4 > cmdbuf->bufsz) return DRM_ERR(EINVAL); /* Fixme !! This simply emits a packet without much checking. We need to be smarter. */ /* obtain first word - actual packet3 header */ header = *(u32 __user*)cmdbuf->buf; /* Is it packet 3 ? */ if( (header>>30)!=0x3 ) { DRM_ERROR("Not a packet3 header (0x%08x)\n", header); return DRM_ERR(EINVAL); } count=(header>>16) & 0x3fff; /* Check again now that we know how much data to expect */ if ((count+2)*4 > cmdbuf->bufsz){ DRM_ERROR("Expected packet3 of length %d but have only %d bytes left\n", (count+2)*4, cmdbuf->bufsz); return DRM_ERR(EINVAL); } /* Is it a packet type we know about ? */ switch(header & 0xff00){ case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header); case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */ case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */ case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */ case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */ case RADEON_WAIT_FOR_IDLE: case RADEON_CP_NOP: /* these packets are safe */ break; default: DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); return DRM_ERR(EINVAL); } BEGIN_RING(count+2); OUT_RING(header); OUT_RING_TABLE( (int __user*)(cmdbuf->buf+4), count+1); ADVANCE_RING(); cmdbuf->buf += (count+2)*4; cmdbuf->bufsz -= (count+2)*4; return 0; }
int radeon_mem_free( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_mem_free_t memfree; struct mem_block *block, **heap; if ( !dev_priv ) { DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t *)data, sizeof(memfree) ); heap = get_heap( dev_priv, memfree.region ); if (!heap || !*heap) return DRM_ERR(EFAULT); block = find_block( *heap, memfree.region_offset ); if (!block) return DRM_ERR(EFAULT); if (block->pid != DRM_CURRENTPID) return DRM_ERR(EPERM); free_block( block ); return 0; }
/* Needs the lock as it touches the ring. */ int i915_irq_emit(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_irq_emit_t emit; int result; LOCK_TEST_WITH_RETURN(dev, filp); if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data, sizeof(emit)); result = i915_emit_irq(dev); if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return DRM_ERR(EFAULT); } return 0; }
static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t* dev_priv, drm_radeon_cmd_buffer_t* cmdbuf, u32 header) { int count, i,k; #define MAX_ARRAY_PACKET 64 u32 payload[MAX_ARRAY_PACKET]; u32 narrays; RING_LOCALS; count=(header>>16) & 0x3fff; if((count+1)>MAX_ARRAY_PACKET){ DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", count); return DRM_ERR(EINVAL); } memset(payload, 0, MAX_ARRAY_PACKET*4); memcpy(payload, cmdbuf->buf+4, (count+1)*4); /* carefully check packet contents */ narrays=payload[0]; k=0; i=1; while((k<narrays) && (i<(count+1))){ i++; /* skip attribute field */ if(r300_check_offset(dev_priv, payload[i])){ DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i); return DRM_ERR(EINVAL); } k++; i++; if(k==narrays)break; /* have one more to process, they come in pairs */ if(r300_check_offset(dev_priv, payload[i])){ DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i); return DRM_ERR(EINVAL); } k++; i++; } /* do the counts match what we expect ? */ if((k!=narrays) || (i!=(count+1))){ DRM_ERROR("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", k, i, narrays, count+1); return DRM_ERR(EINVAL); } /* all clear, output packet */ BEGIN_RING(count+2); OUT_RING(header); OUT_RING_TABLE(payload, count+1); ADVANCE_RING(); cmdbuf->buf += (count+2)*4; cmdbuf->bufsz -= (count+2)*4; return 0; }
static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic) { int hash; drm_magic_entry_t *entry; DRM_DEBUG("%d\n", magic); hash = drm_hash_magic(magic); entry = malloc(sizeof(*entry), M_DRM, M_ZERO | M_NOWAIT); if (!entry) return DRM_ERR(ENOMEM); entry->magic = magic; entry->priv = priv; entry->next = NULL; DRM_LOCK(); if (dev->magiclist[hash].tail) { dev->magiclist[hash].tail->next = entry; dev->magiclist[hash].tail = entry; } else { dev->magiclist[hash].head = entry; dev->magiclist[hash].tail = entry; } DRM_UNLOCK(); return 0; }
int DRM(vblank_wait)(drm_device_t *dev, unsigned int *sequence) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *)dev->dev_private; unsigned int cur_vblank; int ret = 0; if ( !dev_priv ) { DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); return DRM_ERR(EINVAL); } radeon_acknowledge_irqs( dev_priv ); dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; /* Assume that the user has missed the current sequence number * by about a day rather than she wants to wait for years * using vertical blanks... */ DRM_WAIT_ON( ret, dev->vbl_queue, 3*DRM_HZ, ( ( ( cur_vblank = atomic_read(&dev->vbl_received ) ) - *sequence ) <= (1<<23) ) ); *sequence = cur_vblank; return ret; }
static int mga_warp_install_g200_microcode( drm_mga_private_t *dev_priv ) { unsigned char *vcbase = dev_priv->warp->handle; unsigned long pcbase = dev_priv->warp->offset; unsigned int size; size = mga_warp_g200_microcode_size( dev_priv ); if ( size > dev_priv->warp->size ) { DRM_ERROR( "microcode too large! (%u > %lu)\n", size, dev_priv->warp->size ); return DRM_ERR(ENOMEM); } memset( dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys) ); WARP_UCODE_INSTALL( warp_g200_tgz, MGA_WARP_TGZ ); WARP_UCODE_INSTALL( warp_g200_tgzf, MGA_WARP_TGZF ); WARP_UCODE_INSTALL( warp_g200_tgza, MGA_WARP_TGZA ); WARP_UCODE_INSTALL( warp_g200_tgzaf, MGA_WARP_TGZAF ); WARP_UCODE_INSTALL( warp_g200_tgzs, MGA_WARP_TGZS ); WARP_UCODE_INSTALL( warp_g200_tgzsf, MGA_WARP_TGZSF ); WARP_UCODE_INSTALL( warp_g200_tgzsa, MGA_WARP_TGZSA ); WARP_UCODE_INSTALL( warp_g200_tgzsaf, MGA_WARP_TGZSAF ); return 0; }
static int mga_dma_blit( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_blit_t blit; DRM_DEBUG( "\n" ); LOCK_TEST_WITH_RETURN( dev, filp ); DRM_COPY_FROM_USER_IOCTL( blit, (drm_mga_blit_t __user *)data, sizeof(blit) ); if ( sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS ) sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; if ( mga_verify_blit( dev_priv, blit.srcorg, blit.dstorg ) ) return DRM_ERR(EINVAL); WRAP_TEST_WITH_RETURN( dev_priv ); mga_dma_dispatch_blit( dev, &blit ); /* Make sure we restore the 3D state next time. */ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; return 0; }
int sis_ioctl_agp_init( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_agp_t agp; if (dev_priv == NULL) { dev->dev_private = DRM(calloc)(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); dev_priv = dev->dev_private; if (dev_priv == NULL) return ENOMEM; } if (dev_priv->AGPHeap != NULL) return DRM_ERR(EINVAL); DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t *)data, sizeof(agp)); dev_priv->AGPHeap = mmInit(agp.offset, agp.size); DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); return 0; }
/* Called by the X Server to initialize the FB heap. Allocations will fail * unless this is called. Offset is the beginning of the heap from the * framebuffer offset (MaxXFBMem in XFree86). * * Memory layout according to Thomas Winischofer: * |------------------|DDDDDDDDDDDDDDDDDDDDDDDDDDDDD|HHHH|CCCCCCCCCCC| * * X driver/sisfb HW- Command- * framebuffer memory DRI heap Cursor queue */ int sis_fb_init( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_fb_t fb; DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t *)data, sizeof(fb)); if (dev_priv == NULL) { dev->dev_private = DRM(calloc)(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); dev_priv = dev->dev_private; if (dev_priv == NULL) return ENOMEM; } if (dev_priv->FBHeap != NULL) return DRM_ERR(EINVAL); dev_priv->FBHeap = mmInit(fb.offset, fb.size); DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); return 0; }
int sis_ioctl_agp_alloc( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_mem_t agp; PMemBlock block; int retval = 0; if (dev_priv == NULL || dev_priv->AGPHeap == NULL) return DRM_ERR(EINVAL); DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t *)data, sizeof(agp)); block = mmAllocMem(dev_priv->AGPHeap, agp.size, 0, 0); if (block) { /* TODO */ agp.offset = block->ofs; agp.free = (unsigned long)block; if (!add_alloc_set(agp.context, AGP_TYPE, agp.free)) { DRM_DEBUG("adding to allocation set fails\n"); mmFreeMem((PMemBlock)agp.free); retval = -1; } } else { agp.offset = 0; agp.size = 0; agp.free = 0; } DRM_COPY_TO_USER_IOCTL((drm_sis_mem_t *)data, agp, sizeof(agp)); DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size, agp.offset); return retval; }
/** * Uploads user-supplied vertex program instructions or parameters onto * the graphics card. * Called by r300_do_cp_cmdbuf. */ static __inline__ int r300_emit_vpu(drm_radeon_private_t* dev_priv, drm_radeon_cmd_buffer_t* cmdbuf, drm_r300_cmd_header_t header) { int sz; int addr; RING_LOCALS; sz = header.vpu.count; addr = (header.vpu.adrhi << 8) | header.vpu.adrlo; if (!sz) return 0; if (sz*16 > cmdbuf->bufsz) return DRM_ERR(EINVAL); BEGIN_RING(5+sz*4); /* Wait for VAP to come to senses.. */ /* there is no need to emit it multiple times, (only once before VAP is programmed, but this optimization is for later */ OUT_RING_REG( R300_VAP_PVS_WAITIDLE, 0 ); OUT_RING_REG( R300_VAP_PVS_UPLOAD_ADDRESS, addr ); OUT_RING( CP_PACKET0_TABLE( R300_VAP_PVS_UPLOAD_DATA, sz*4 - 1 ) ); OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz*4 ); ADVANCE_RING(); cmdbuf->buf += sz*16; cmdbuf->bufsz -= sz*16; return 0; }
static int savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n) { uint32_t mask = dev_priv->status_used_mask; uint32_t threshold = dev_priv->bci_threshold_hi; uint32_t status; int i; #if SAVAGE_BCI_DEBUG if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold) DRM_ERROR("Trying to emit %d words " "(more than guaranteed space in COB)\n", n); #endif for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) { DRM_MEMORYBARRIER(); status = dev_priv->status_ptr[0]; if ((status & mask) < threshold) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold); #endif return DRM_ERR(EBUSY); }
int sis_fb_alloc( DRM_IOCTL_ARGS ) { drm_sis_mem_t fb; struct sis_memreq req; int retval = 0; DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t *)data, sizeof(fb)); req.size = fb.size; sis_malloc(&req); if (req.offset) { /* TODO */ fb.offset = req.offset; fb.free = req.offset; if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) { DRM_DEBUG("adding to allocation set fails\n"); sis_free(req.offset); retval = DRM_ERR(EINVAL); } } else { fb.offset = 0; fb.size = 0; fb.free = 0; } DRM_COPY_TO_USER_IOCTL((drm_sis_mem_t *)data, fb, sizeof(fb)); DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb.size, req.offset); return retval; }
static int i915_wait_irq(drm_device_t * dev, int irq_nr) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = 0; DRM_DEBUG("%s irq_nr=%d breadcrumb=%d\n", __FUNCTION__, irq_nr, READ_BREADCRUMB(dev_priv)); if (READ_BREADCRUMB(dev_priv) >= irq_nr) return 0; dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, READ_BREADCRUMB(dev_priv) >= irq_nr); if (ret == DRM_ERR(EBUSY)) { DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n", __FUNCTION__, READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); } dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); return ret; }
static int drm_remove_magic(drm_device_t *dev, drm_magic_t magic) { drm_magic_entry_t *prev = NULL; drm_magic_entry_t *pt; int hash; DRM_DEBUG("%d\n", magic); hash = drm_hash_magic(magic); DRM_LOCK(); for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) { if (pt->magic == magic) { if (dev->magiclist[hash].head == pt) { dev->magiclist[hash].head = pt->next; } if (dev->magiclist[hash].tail == pt) { dev->magiclist[hash].tail = prev; } if (prev) { prev->next = pt->next; } DRM_UNLOCK(); return 0; } } DRM_UNLOCK(); free(pt, M_DRM); return DRM_ERR(EINVAL); }
static int mga_freelist_init( drm_device_t *dev, drm_mga_private_t *dev_priv ) { drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_freelist_t *entry; int i; DRM_DEBUG( "count=%d\n", dma->buf_count ); dev_priv->head = DRM(alloc)( sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER ); if ( dev_priv->head == NULL ) return DRM_ERR(ENOMEM); memset( dev_priv->head, 0, sizeof(drm_mga_freelist_t) ); SET_AGE( &dev_priv->head->age, MGA_BUFFER_USED, 0 ); for ( i = 0 ; i < dma->buf_count ; i++ ) { buf = dma->buflist[i]; buf_priv = buf->dev_private; entry = DRM(alloc)( sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER ); if ( entry == NULL ) return DRM_ERR(ENOMEM); memset( entry, 0, sizeof(drm_mga_freelist_t) ); entry->next = dev_priv->head->next; entry->prev = dev_priv->head; SET_AGE( &entry->age, MGA_BUFFER_FREE, 0 ); entry->buf = buf; if ( dev_priv->head->next != NULL ) dev_priv->head->next->prev = entry; if ( entry->next == NULL ) dev_priv->tail = entry; buf_priv->list_entry = entry; buf_priv->discard = 0; buf_priv->dispatched = 0; dev_priv->head->next = entry; } return 0; }
/* drm_open_helper is called whenever a process opens /dev/drm. */ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p, drm_device_t *dev) { int m = minor(kdev); drm_file_t *priv; int retcode; if (flags & O_EXCL) return EBUSY; /* No exclusive opens */ dev->flags = flags; DRM_DEBUG("pid = %d, minor = %d\n", DRM_CURRENTPID, m); DRM_LOCK(); priv = drm_find_file_by_proc(dev, p); if (priv) { priv->refs++; } else { priv = malloc(sizeof(*priv), M_DRM, M_NOWAIT | M_ZERO); if (priv == NULL) { DRM_UNLOCK(); return DRM_ERR(ENOMEM); } #if __FreeBSD_version >= 500000 priv->uid = p->td_ucred->cr_svuid; priv->pid = p->td_proc->p_pid; #else priv->uid = p->p_cred->p_svuid; priv->pid = p->p_pid; #endif priv->refs = 1; priv->minor = m; priv->ioctl_count = 0; /* for compatibility root is always authenticated */ priv->authenticated = DRM_SUSER(p); if (dev->driver.open) { retcode = dev->driver.open(dev, priv); if (retcode != 0) { free(priv, M_DRM); DRM_UNLOCK(); return retcode; } } /* first opener automatically becomes master */ priv->master = TAILQ_EMPTY(&dev->files); TAILQ_INSERT_TAIL(&dev->files, priv, link); } DRM_UNLOCK(); #ifdef __FreeBSD__ kdev->si_drv1 = dev; #endif return 0; }
static int mga_verify_iload( drm_mga_private_t *dev_priv, unsigned int dstorg, unsigned int length ) { if ( dstorg < dev_priv->texture_offset || dstorg + length > (dev_priv->texture_offset + dev_priv->texture_size) ) { DRM_ERROR( "*** bad iload DSTORG: 0x%x\n", dstorg ); return DRM_ERR(EINVAL); } if ( length & MGA_ILOAD_MASK ) { DRM_ERROR( "*** bad iload length: 0x%x\n", length & MGA_ILOAD_MASK ); return DRM_ERR(EINVAL); } return 0; }
int sis_fb_free( DRM_IOCTL_ARGS ) { drm_sis_mem_t fb; int retval = 0; DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t *)data, sizeof(fb)); if (!fb.free) return DRM_ERR(EINVAL); if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free)) retval = DRM_ERR(EINVAL); sis_free(fb.free); DRM_DEBUG("free fb, offset = %lu\n", fb.free); return retval; }
static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t* dev_priv, drm_radeon_cmd_buffer_t* cmdbuf, drm_r300_cmd_header_t header) { int reg; int sz; int i; int values[64]; RING_LOCALS; sz = header.packet0.count; reg = (header.packet0.reghi << 8) | header.packet0.reglo; if((sz>64)||(sz<0)){ DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", reg, sz); return DRM_ERR(EINVAL); } for(i=0;i<sz;i++){ values[i]=((int __user*)cmdbuf->buf)[i]; switch(r300_reg_flags[(reg>>2)+i]){ case MARK_SAFE: break; case MARK_CHECK_OFFSET: if(r300_check_offset(dev_priv, (u32)values[i])){ DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n", reg, sz); return DRM_ERR(EINVAL); } break; default: DRM_ERROR("Register %04x failed check as flag=%02x\n", reg+i*4, r300_reg_flags[(reg>>2)+i]); return DRM_ERR(EINVAL); } } BEGIN_RING(1+sz); OUT_RING( CP_PACKET0( reg, sz-1 ) ); OUT_RING_TABLE( values, sz ); ADVANCE_RING(); cmdbuf->buf += sz*4; cmdbuf->bufsz -= sz*4; return 0; }
static int via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; drm_via_sg_info_t *vsg; drm_via_blitq_t *blitq; int ret; int engine; unsigned long irqsave; if (dev_priv == NULL) { DRM_ERROR("Called without initialization.\n"); return DRM_ERR(EINVAL); } engine = (xfer->to_fb) ? 0 : 1; blitq = dev_priv->blit_queues + engine; if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) { return ret; } if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { via_dmablit_release_slot(blitq); return DRM_ERR(ENOMEM); } if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { via_dmablit_release_slot(blitq); kfree(vsg); return ret; } spin_lock_irqsave(&blitq->blit_lock, irqsave); blitq->blits[blitq->head++] = vsg; if (blitq->head >= VIA_NUM_BLIT_SLOTS) blitq->head = 0; blitq->num_outstanding++; xfer->sync.sync_handle = ++blitq->cur_blit_handle; spin_unlock_irqrestore(&blitq->blit_lock, irqsave); xfer->sync.engine = engine; via_dmablit_handler(dev, engine, 0); return 0; }
int via_dma_blit_sync( DRM_IOCTL_ARGS ) { drm_via_blitsync_t sync; int err; DRM_DEVICE; DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); if (sync.engine >= VIA_NUM_BLIT_ENGINES) return DRM_ERR(EINVAL); err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); if (DRM_ERR(EINTR) == err) err = DRM_ERR(EAGAIN); return err; }