int mga_do_cleanup_dma( drm_device_t *dev ) { DRM_DEBUG( "\n" ); #if __HAVE_IRQ /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. */ if ( dev->irq_enabled ) DRM(irq_uninstall)(dev); #endif if ( dev->dev_private ) { drm_mga_private_t *dev_priv = dev->dev_private; if ( dev_priv->warp != NULL ) DRM_IOREMAPFREE( dev_priv->warp, dev ); if ( dev_priv->primary != NULL ) DRM_IOREMAPFREE( dev_priv->primary, dev ); if ( dev_priv->buffers != NULL ) DRM_IOREMAPFREE( dev_priv->buffers, dev ); if ( dev_priv->head != NULL ) { mga_freelist_cleanup( dev ); } DRM(free)( dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER ); dev->dev_private = NULL; } return 0; }
int sis_ioctl_agp_init( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_agp_t agp; if (dev_priv == NULL) { dev->dev_private = DRM(calloc)(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); dev_priv = dev->dev_private; if (dev_priv == NULL) return ENOMEM; } if (dev_priv->AGPHeap != NULL) return DRM_ERR(EINVAL); DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t *)data, sizeof(agp)); dev_priv->AGPHeap = mmInit(agp.offset, agp.size); DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); return 0; }
/* Called by the X Server to initialize the FB heap. Allocations will fail * unless this is called. Offset is the beginning of the heap from the * framebuffer offset (MaxXFBMem in XFree86). * * Memory layout according to Thomas Winischofer: * |------------------|DDDDDDDDDDDDDDDDDDDDDDDDDDDDD|HHHH|CCCCCCCCCCC| * * X driver/sisfb HW- Command- * framebuffer memory DRI heap Cursor queue */ int sis_fb_init( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; drm_sis_fb_t fb; DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t *)data, sizeof(fb)); if (dev_priv == NULL) { dev->dev_private = DRM(calloc)(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); dev_priv = dev->dev_private; if (dev_priv == NULL) return ENOMEM; } if (dev_priv->FBHeap != NULL) return DRM_ERR(EINVAL); dev_priv->FBHeap = mmInit(fb.offset, fb.size); DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); return 0; }
/* Free all blocks associated with the releasing file. */ void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap) { struct mem_block *p; if (!heap || !heap->next) return; for (p = heap->next; p != heap; p = p->next) { if (p->filp == filp) { p->filp = NULL; mark_block(dev, p, 0); } } /* Assumes a single contiguous range. Needs a special filp in * 'heap' to stop it being subsumed. */ for (p = heap->next; p != heap; p = p->next) { while (p->filp == NULL && p->next->filp == NULL) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; DRM(free)(q, sizeof(*q), DRM_MEM_BUFLISTS); } } }
/* Shutdown. */ void i915_mem_takedown(struct mem_block **heap) { struct mem_block *p; if (!*heap) return; for (p = (*heap)->next; p != *heap;) { struct mem_block *q = p; p = p->next; DRM(free)(q, sizeof(*q), DRM_MEM_BUFLISTS); } DRM(free)(*heap, sizeof(**heap), DRM_MEM_BUFLISTS); *heap = NULL; }
void DRM(dma_service)( DRM_IRQ_ARGS ) { drm_device_t *dev = (drm_device_t *) arg; drm_radeon_private_t *dev_priv = (drm_radeon_private_t *)dev->dev_private; u32 stat; /* Only consider the bits we're interested in - others could be used * outside the DRM */ stat = RADEON_READ(RADEON_GEN_INT_STATUS) & (RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT); if (!stat) return; /* SW interrupt */ if (stat & RADEON_SW_INT_TEST) { DRM_WAKEUP( &dev_priv->swi_queue ); } /* VBLANK interrupt */ if (stat & RADEON_CRTC_VBLANK_STAT) { atomic_inc(&dev->vbl_received); DRM_WAKEUP(&dev->vbl_queue); DRM(vbl_send_signals)( dev ); } /* Acknowledge interrupts we handle */ RADEON_WRITE(RADEON_GEN_INT_STATUS, stat); }
static int mga_freelist_init( drm_device_t *dev, drm_mga_private_t *dev_priv ) { drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_freelist_t *entry; int i; DRM_DEBUG( "count=%d\n", dma->buf_count ); dev_priv->head = DRM(alloc)( sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER ); if ( dev_priv->head == NULL ) return -ENOMEM; memset( dev_priv->head, 0, sizeof(drm_mga_freelist_t) ); SET_AGE( &dev_priv->head->age, MGA_BUFFER_USED, 0 ); for ( i = 0 ; i < dma->buf_count ; i++ ) { buf = dma->buflist[i]; buf_priv = buf->dev_private; entry = DRM(alloc)( sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER ); if ( entry == NULL ) return -ENOMEM; memset( entry, 0, sizeof(drm_mga_freelist_t) ); entry->next = dev_priv->head->next; entry->prev = dev_priv->head; SET_AGE( &entry->age, MGA_BUFFER_FREE, 0 ); entry->buf = buf; if ( dev_priv->head->next != NULL ) dev_priv->head->next->prev = entry; if ( entry->next == NULL ) dev_priv->tail = entry; buf_priv->list_entry = entry; buf_priv->discard = 0; buf_priv->dispatched = 0; dev_priv->head->next = entry; } return 0; }
int via_final_context(struct drm_device *dev, int context) { int i; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; for (i = 0; i < MAX_CONTEXT; i++) if (global_ppriv[i].used && (global_ppriv[i].context == context)) break; if (i < MAX_CONTEXT) { set_t *set; ITEM_TYPE item; int retval; DRM_DEBUG("find socket %d, context = %d\n", i, context); /* Video Memory */ set = global_ppriv[i].sets[0]; retval = via_setFirst(set, &item); while (retval) { DRM_DEBUG("free video memory 0x%lx\n", item); via_mmFreeMem((PMemBlock) item); retval = via_setNext(set, &item); } via_setDestroy(set); /* AGP Memory */ set = global_ppriv[i].sets[1]; retval = via_setFirst(set, &item); while (retval) { DRM_DEBUG("free agp memory 0x%lx\n", item); via_mmFreeMem((PMemBlock) item); retval = via_setNext(set, &item); } via_setDestroy(set); global_ppriv[i].used = 0; } via_release_futex(dev_priv, context); #if defined(__linux__) /* Linux specific until context tracking code gets ported to BSD */ /* Last context, perform cleanup */ if (dev->ctx_count == 1 && dev->dev_private) { if (dev->irq) DRM(irq_uninstall) (dev); via_cleanup_futex(dev_priv); via_do_cleanup_map(dev); } #endif return 1; }
static __inline__ int Join2Blocks(TMemBlock *p) { if (p->free && p->next && p->next->free) { TMemBlock *q = p->next; p->size += q->size; p->next = q->next; DRM(free)(q, sizeof(TMemBlock), DRM_MEM_DRIVER); return 1; } return 0; }
static struct mem_block *split_block(struct mem_block *p, int start, int size, DRMFILE filp) { /* Maybe cut off the start of an existing block */ if (start > p->start) { struct mem_block *newblock = DRM(alloc)(sizeof(*newblock), DRM_MEM_BUFLISTS); if (!newblock) goto out; newblock->start = start; newblock->size = p->size - (start - p->start); newblock->filp = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size -= newblock->size; p = newblock; } /* Maybe cut off the end of an existing block */ if (size < p->size) { struct mem_block *newblock = DRM(alloc)(sizeof(*newblock), DRM_MEM_BUFLISTS); if (!newblock) goto out; newblock->start = start + size; newblock->size = p->size - size; newblock->filp = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size = size; } out: /* Our block is in the middle */ p->filp = filp; return p; }
int via_do_init_map(drm_device_t * dev, drm_via_init_t * init) { drm_via_private_t *dev_priv; unsigned int i; DRM_DEBUG("%s\n", __FUNCTION__); via_init_command_verifier(); dev_priv = DRM(alloc) (sizeof(drm_via_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) return -ENOMEM; memset(dev_priv, 0, sizeof(drm_via_private_t)); DRM_GETSAREA(); if (!dev_priv->sarea) { DRM_ERROR("could not find sarea!\n"); dev->dev_private = (void *)dev_priv; via_do_cleanup_map(dev); return -EINVAL; } dev_priv->fb = drm_core_findmap(dev, init->fb_offset); if (!dev_priv->fb) { DRM_ERROR("could not find framebuffer!\n"); dev->dev_private = (void *)dev_priv; via_do_cleanup_map(dev); return -EINVAL; } dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); if (!dev_priv->mmio) { DRM_ERROR("could not find mmio region!\n"); dev->dev_private = (void *)dev_priv; via_do_cleanup_map(dev); return -EINVAL; } dev_priv->sarea_priv = (drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); dev_priv->agpAddr = init->agpAddr; for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i])); dev->dev_private = (void *)dev_priv; return 0; }
void mmDestroy(memHeap_t *heap) { TMemBlock *p,*q; if (heap == NULL) return; p = (TMemBlock *)heap; while (p != NULL) { q = p->next; DRM(free)(p, sizeof(TMemBlock), DRM_MEM_DRIVER); p = q; } }
int via_do_cleanup_map(drm_device_t * dev) { if (dev->dev_private) { drm_via_private_t *dev_priv = dev->dev_private; via_dma_cleanup(dev); DRM(free) (dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER); dev->dev_private = NULL; } return 0; }
static void free_block(struct mem_block *p) { p->filp = NULL; /* Assumes a single contiguous range. Needs a special filp in * 'heap' to stop it being subsumed. */ if (p->next->filp == NULL) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; DRM(free)(q, sizeof(*q), DRM_MEM_BUFLISTS); } if (p->prev->filp == NULL) { struct mem_block *q = p->prev; q->size += p->size; q->next = p->next; q->next->prev = q; DRM(free)(p, sizeof(*q), DRM_MEM_BUFLISTS); } }
/* Initialize. How to check for an uninitialized heap? */ static int init_heap(struct mem_block **heap, int start, int size) { struct mem_block *blocks = DRM(alloc)(sizeof(*blocks), DRM_MEM_BUFLISTS); if (!blocks) return -ENOMEM; *heap = DRM(alloc)(sizeof(**heap), DRM_MEM_BUFLISTS); if (!*heap) { DRM(free)(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS); return -ENOMEM; } blocks->start = start; blocks->size = size; blocks->filp = NULL; blocks->next = blocks->prev = *heap; memset(*heap, 0, sizeof(**heap)); (*heap)->filp = (DRMFILE) - 1; (*heap)->next = (*heap)->prev = blocks; return 0; }
static TMemBlock* SliceBlock(TMemBlock *p, int startofs, int size, int reserved, int alignment) { TMemBlock *newblock; /* break left */ if (startofs > p->ofs) { newblock = (TMemBlock*) DRM(calloc)(1, sizeof(TMemBlock), DRM_MEM_DRIVER); newblock->ofs = startofs; newblock->size = p->size - (startofs - p->ofs); newblock->free = 1; newblock->next = p->next; p->size -= newblock->size; p->next = newblock; p = newblock; } /* break right */ if (size < p->size) { newblock = (TMemBlock*) DRM(calloc)(1, sizeof(TMemBlock), DRM_MEM_DRIVER); newblock->ofs = startofs + size; newblock->size = p->size - size; newblock->free = 1; newblock->next = p->next; p->size = size; p->next = newblock; } /* p = middle block */ p->align = alignment; p->free = 0; p->reserved = reserved; return p; }
int via_do_init_map(drm_device_t * dev, drm_via_init_t * init) { drm_via_private_t *dev_priv; DRM_DEBUG("%s\n", __FUNCTION__); via_init_command_verifier(); dev_priv = DRM(alloc) (sizeof(drm_via_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) return -ENOMEM; memset(dev_priv, 0, sizeof(drm_via_private_t)); DRM_GETSAREA(); if (!dev_priv->sarea) { DRM_ERROR("could not find sarea!\n"); dev->dev_private = (void *)dev_priv; via_do_cleanup_map(dev); return -EINVAL; } dev_priv->fb = drm_core_findmap(dev, init->fb_offset); if (!dev_priv->fb) { DRM_ERROR("could not find framebuffer!\n"); dev->dev_private = (void *)dev_priv; via_do_cleanup_map(dev); return -EINVAL; } dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); if (!dev_priv->mmio) { DRM_ERROR("could not find mmio region!\n"); dev->dev_private = (void *)dev_priv; via_do_cleanup_map(dev); return -EINVAL; } dev_priv->sarea_priv = (drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); dev_priv->agpAddr = init->agpAddr; via_init_futex( dev_priv ); dev_priv->pro_group_a = (dev->pdev->device == 0x3118); dev->dev_private = (void *)dev_priv; return 0; }
static void mga_freelist_cleanup( drm_device_t *dev ) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *entry; drm_mga_freelist_t *next; DRM_DEBUG( "\n" ); entry = dev_priv->head; while ( entry ) { next = entry->next; DRM(free)( entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER ); entry = next; } dev_priv->head = dev_priv->tail = NULL; }
memHeap_t *mmInit(int ofs, int size) { PMemBlock blocks; if (size <= 0) return 0; blocks = (TMemBlock *)DRM(calloc)(1, sizeof(TMemBlock), DRM_MEM_DRIVER); if (blocks != NULL) { blocks->ofs = ofs; blocks->size = size; blocks->free = 1; return (memHeap_t *)blocks; } else return 0; }
void r128_dma_service( DRM_IRQ_ARGS ) { drm_device_t *dev = (drm_device_t *) arg; drm_r128_private_t *dev_priv = (drm_r128_private_t *)dev->dev_private; int status; status = R128_READ( R128_GEN_INT_STATUS ); /* VBLANK interrupt */ if ( status & R128_CRTC_VBLANK_INT ) { R128_WRITE( R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK ); atomic_inc(&dev->vbl_received); DRM_WAKEUP(&dev->vbl_queue); DRM(vbl_send_signals)( dev ); } }
set_t *setInit(void) { int i; set_t *set; set = (set_t *)DRM(alloc)(sizeof(set_t), DRM_MEM_DRIVER); if (set != NULL) { for (i = 0; i < SET_SIZE; i++) { set->list[i].free_next = i + 1; set->list[i].alloc_next = -1; } set->list[SET_SIZE-1].free_next = -1; set->free = 0; set->alloc = -1; set->trace = -1; } return set; }
irqreturn_t r128_driver_irq_handler( DRM_IRQ_ARGS ) { drm_device_t *dev = (drm_device_t *) arg; drm_r128_private_t *dev_priv = (drm_r128_private_t *)dev->dev_private; int status; status = R128_READ( R128_GEN_INT_STATUS ); /* VBLANK interrupt */ if ( status & R128_CRTC_VBLANK_INT ) { R128_WRITE( R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK ); atomic_inc(&dev->vbl_received); DRM_WAKEUP(&dev->vbl_queue); DRM(vbl_send_signals)( dev ); return IRQ_HANDLED; } return IRQ_NONE; }
int mga_do_cleanup_dma( drm_device_t *dev ) { DRM_DEBUG( "\n" ); if ( dev->dev_private ) { drm_mga_private_t *dev_priv = dev->dev_private; DRM_IOREMAPFREE( dev_priv->warp, dev ); DRM_IOREMAPFREE( dev_priv->primary, dev ); DRM_IOREMAPFREE( dev_priv->buffers, dev ); if ( dev_priv->head != NULL ) { mga_freelist_cleanup( dev ); } DRM(free)( dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER ); dev->dev_private = NULL; } return 0; }
/* Kludgey workaround for existing i810 server. Remove soon. */ memHeap_t *mmAddRange( memHeap_t *heap, int ofs, int size ) { PMemBlock blocks; blocks = (TMemBlock *)DRM(calloc)(2, sizeof(TMemBlock), DRM_MEM_DRIVER); if (blocks != NULL) { blocks[0].size = size; blocks[0].free = 1; blocks[0].ofs = ofs; blocks[0].next = &blocks[1]; /* Discontinuity - stops JoinBlock from trying to join * non-adjacent ranges. */ blocks[1].size = 0; blocks[1].free = 0; blocks[1].ofs = ofs+size; blocks[1].next = (PMemBlock)heap; return (memHeap_t *)blocks; } else return heap; }
static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d) { unsigned long address; unsigned long length; int must_free = 0; int retcode = 0; int i; int idx; drm_buf_t *buf; drm_buf_t *last_buf = NULL; drm_device_dma_t *dma = dev->dma; DECLARE_WAITQUEUE(entry, current); /* Turn off interrupt handling */ while (test_and_set_bit(0, &dev->interrupt_flag)) { schedule(); if (signal_pending(current)) return -EINTR; } if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) { while (!gamma_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { schedule(); if (signal_pending(current)) { clear_bit(0, &dev->interrupt_flag); return -EINTR; } } ++must_free; } for (i = 0; i < d->send_count; i++) { idx = d->send_indices[i]; if (idx < 0 || idx >= dma->buf_count) { DRM_ERROR("Index %d (of %d max)\n", d->send_indices[i], dma->buf_count - 1); continue; } buf = dma->buflist[ idx ]; if (buf->pid != current->pid) { DRM_ERROR("Process %d using buffer owned by %d\n", current->pid, buf->pid); retcode = -EINVAL; goto cleanup; } if (buf->list != DRM_LIST_NONE) { DRM_ERROR("Process %d using %d's buffer on list %d\n", current->pid, buf->pid, buf->list); retcode = -EINVAL; goto cleanup; } /* This isn't a race condition on buf->list, since our concern is the buffer reclaim during the time the process closes the /dev/drm? handle, so it can't also be doing DMA. */ buf->list = DRM_LIST_PRIO; buf->used = d->send_sizes[i]; buf->context = d->context; buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED; address = (unsigned long)buf->address; length = buf->used; if (!length) { DRM_ERROR("0 length buffer\n"); } if (buf->pending) { DRM_ERROR("Sending pending buffer:" " buffer %d, offset %d\n", d->send_indices[i], i); retcode = -EINVAL; goto cleanup; } if (buf->waiting) { DRM_ERROR("Sending waiting buffer:" " buffer %d, offset %d\n", d->send_indices[i], i); retcode = -EINVAL; goto cleanup; } buf->pending = 1; if (dev->last_context != buf->context && !(dev->queuelist[buf->context]->flags & _DRM_CONTEXT_PRESERVED)) { add_wait_queue(&dev->context_wait, &entry); current->state = TASK_INTERRUPTIBLE; /* PRE: dev->last_context != buf->context */ DRM(context_switch)(dev, dev->last_context, buf->context); /* POST: we will wait for the context switch and will dispatch on a later call when dev->last_context == buf->context. NOTE WE HOLD THE LOCK THROUGHOUT THIS TIME! */ schedule(); current->state = TASK_RUNNING; remove_wait_queue(&dev->context_wait, &entry); if (signal_pending(current)) { retcode = -EINTR; goto cleanup; } if (dev->last_context != buf->context) { DRM_ERROR("Context mismatch: %d %d\n", dev->last_context, buf->context); } } #if DRM_DMA_HISTOGRAM buf->time_queued = get_cycles(); buf->time_dispatched = buf->time_queued; #endif gamma_dma_dispatch(dev, address, length); atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */ atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */ if (last_buf) { gamma_free_buffer(dev, last_buf); } last_buf = buf; } cleanup: if (last_buf) { gamma_dma_ready(dev); gamma_free_buffer(dev, last_buf); } if (must_free && !dev->context_flag) { if (gamma_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { DRM_ERROR("\n"); } } clear_bit(0, &dev->interrupt_flag); return retcode; }
/* Only called by gamma_dma_schedule. */ static int gamma_do_dma(drm_device_t *dev, int locked) { unsigned long address; unsigned long length; drm_buf_t *buf; int retcode = 0; drm_device_dma_t *dma = dev->dma; #if DRM_DMA_HISTOGRAM cycles_t dma_start, dma_stop; #endif if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY; #if DRM_DMA_HISTOGRAM dma_start = get_cycles(); #endif if (!dma->next_buffer) { DRM_ERROR("No next_buffer\n"); clear_bit(0, &dev->dma_flag); return -EINVAL; } buf = dma->next_buffer; address = (unsigned long)buf->address; length = buf->used; DRM_DEBUG("context %d, buffer %d (%ld bytes)\n", buf->context, buf->idx, length); if (buf->list == DRM_LIST_RECLAIM) { gamma_clear_next_buffer(dev); gamma_free_buffer(dev, buf); clear_bit(0, &dev->dma_flag); return -EINVAL; } if (!length) { DRM_ERROR("0 length buffer\n"); gamma_clear_next_buffer(dev); gamma_free_buffer(dev, buf); clear_bit(0, &dev->dma_flag); return 0; } if (!gamma_dma_is_ready(dev)) { clear_bit(0, &dev->dma_flag); return -EBUSY; } if (buf->while_locked) { if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("Dispatching buffer %d from pid %d" " \"while locked\", but no lock held\n", buf->idx, buf->pid); } } else { if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { clear_bit(0, &dev->dma_flag); return -EBUSY; } } if (dev->last_context != buf->context && !(dev->queuelist[buf->context]->flags & _DRM_CONTEXT_PRESERVED)) { /* PRE: dev->last_context != buf->context */ if (DRM(context_switch)(dev, dev->last_context, buf->context)) { DRM(clear_next_buffer)(dev); DRM(free_buffer)(dev, buf); } retcode = -EBUSY; goto cleanup; /* POST: we will wait for the context switch and will dispatch on a later call when dev->last_context == buf->context. NOTE WE HOLD THE LOCK THROUGHOUT THIS TIME! */ } gamma_clear_next_buffer(dev); buf->pending = 1; buf->waiting = 0; buf->list = DRM_LIST_PEND; #if DRM_DMA_HISTOGRAM buf->time_dispatched = get_cycles(); #endif gamma_dma_dispatch(dev, address, length); gamma_free_buffer(dev, dma->this_buffer); dma->this_buffer = buf; atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */ atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */ if (!buf->while_locked && !dev->context_flag && !locked) { if (gamma_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { DRM_ERROR("\n"); } } cleanup: clear_bit(0, &dev->dma_flag); #if DRM_DMA_HISTOGRAM dma_stop = get_cycles(); atomic_inc(&dev->histo.dma[gamma_histogram_slot(dma_stop - dma_start)]); #endif return retcode; }
int setDestroy(set_t *set) { DRM(free)(set, sizeof(set_t), DRM_MEM_DRIVER); return 1; }
static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) { drm_mga_private_t *dev_priv; int ret; DRM_DEBUG( "\n" ); dev_priv = DRM(alloc)( sizeof(drm_mga_private_t), DRM_MEM_DRIVER ); if ( !dev_priv ) return -ENOMEM; memset( dev_priv, 0, sizeof(drm_mga_private_t) ); dev_priv->chipset = init->chipset; dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; if ( init->sgram ) { dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; } else { dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; } dev_priv->maccess = init->maccess; dev_priv->fb_cpp = init->fb_cpp; dev_priv->front_offset = init->front_offset; dev_priv->front_pitch = init->front_pitch; dev_priv->back_offset = init->back_offset; dev_priv->back_pitch = init->back_pitch; dev_priv->depth_cpp = init->depth_cpp; dev_priv->depth_offset = init->depth_offset; dev_priv->depth_pitch = init->depth_pitch; /* FIXME: Need to support AGP textures... */ dev_priv->texture_offset = init->texture_offset[0]; dev_priv->texture_size = init->texture_size[0]; DRM_GETSAREA(); if(!dev_priv->sarea) { DRM_ERROR( "failed to find sarea!\n" ); /* Assign dev_private so we can do cleanup. */ dev->dev_private = (void *)dev_priv; mga_do_cleanup_dma( dev ); return -EINVAL; } DRM_FIND_MAP( dev_priv->fb, init->fb_offset ); if(!dev_priv->fb) { DRM_ERROR( "failed to find framebuffer!\n" ); /* Assign dev_private so we can do cleanup. */ dev->dev_private = (void *)dev_priv; mga_do_cleanup_dma( dev ); return -EINVAL; } DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset ); if(!dev_priv->mmio) { DRM_ERROR( "failed to find mmio region!\n" ); /* Assign dev_private so we can do cleanup. */ dev->dev_private = (void *)dev_priv; mga_do_cleanup_dma( dev ); return -EINVAL; } DRM_FIND_MAP( dev_priv->status, init->status_offset ); if(!dev_priv->status) { DRM_ERROR( "failed to find status page!\n" ); /* Assign dev_private so we can do cleanup. */ dev->dev_private = (void *)dev_priv; mga_do_cleanup_dma( dev ); return -EINVAL; } DRM_FIND_MAP( dev_priv->warp, init->warp_offset ); if(!dev_priv->warp) { DRM_ERROR( "failed to find warp microcode region!\n" ); /* Assign dev_private so we can do cleanup. */ dev->dev_private = (void *)dev_priv; mga_do_cleanup_dma( dev ); return -EINVAL; } DRM_FIND_MAP( dev_priv->primary, init->primary_offset ); if(!dev_priv->primary) { DRM_ERROR( "failed to find primary dma region!\n" ); /* Assign dev_private so we can do cleanup. */ dev->dev_private = (void *)dev_priv; mga_do_cleanup_dma( dev ); return -EINVAL; } DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset ); if(!dev_priv->buffers) { DRM_ERROR( "failed to find dma buffer region!\n" ); /* Assign dev_private so we can do cleanup. */ dev->dev_private = (void *)dev_priv; mga_do_cleanup_dma( dev ); return -EINVAL; } dev_priv->sarea_priv = (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle + init->sarea_priv_offset); DRM_IOREMAP( dev_priv->warp, dev ); DRM_IOREMAP( dev_priv->primary, dev ); DRM_IOREMAP( dev_priv->buffers, dev ); if(!dev_priv->warp->handle || !dev_priv->primary->handle || !dev_priv->buffers->handle ) { DRM_ERROR( "failed to ioremap agp regions!\n" ); /* Assign dev_private so we can do cleanup. */ dev->dev_private = (void *)dev_priv; mga_do_cleanup_dma( dev ); return -ENOMEM; } ret = mga_warp_install_microcode( dev_priv ); if ( ret < 0 ) { DRM_ERROR( "failed to install WARP ucode!\n" ); /* Assign dev_private so we can do cleanup. */ dev->dev_private = (void *)dev_priv; mga_do_cleanup_dma( dev ); return ret; } ret = mga_warp_init( dev_priv ); if ( ret < 0 ) { DRM_ERROR( "failed to init WARP engine!\n" ); /* Assign dev_private so we can do cleanup. */ dev->dev_private = (void *)dev_priv; mga_do_cleanup_dma( dev ); return ret; } dev_priv->prim.status = (u32 *)dev_priv->status->handle; mga_do_wait_for_idle( dev_priv ); /* Init the primary DMA registers. */ MGA_WRITE( MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL ); #if 0 MGA_WRITE( MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */ MGA_PRIMPTREN1 ); /* DWGSYNC */ #endif dev_priv->prim.start = (u8 *)dev_priv->primary->handle; dev_priv->prim.end = ((u8 *)dev_priv->primary->handle + dev_priv->primary->size); dev_priv->prim.size = dev_priv->primary->size; dev_priv->prim.tail = 0; dev_priv->prim.space = dev_priv->prim.size; dev_priv->prim.wrapped = 0; dev_priv->prim.last_flush = 0; dev_priv->prim.last_wrap = 0; dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE; dev_priv->prim.status[0] = dev_priv->primary->offset; dev_priv->prim.status[1] = 0; dev_priv->sarea_priv->last_wrap = 0; dev_priv->sarea_priv->last_frame.head = 0; dev_priv->sarea_priv->last_frame.wrap = 0; if ( mga_freelist_init( dev, dev_priv ) < 0 ) { DRM_ERROR( "could not initialize freelist\n" ); /* Assign dev_private so we can do cleanup. */ dev->dev_private = (void *)dev_priv; mga_do_cleanup_dma( dev ); return -ENOMEM; } /* Make dev_private visable to others. */ dev->dev_private = (void *)dev_priv; return 0; }