/* Needs the lock as it touches the ring. */ int i830_irq_emit( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_irq_emit_t emit; int result; if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i830_irq_emit called without lock held\n"); return -EINVAL; } if ( !dev_priv ) { DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); return -EINVAL; } if (copy_from_user( &emit, (drm_i830_irq_emit_t __user *)arg, sizeof(emit) )) return -EFAULT; result = i830_emit_irq( dev ); if ( copy_to_user( emit.irq_seq, &result, sizeof(int) ) ) { DRM_ERROR( "copy_to_user\n" ); return -EFAULT; } return 0; }
void via_release_futex(drm_via_private_t *dev_priv, int context) { unsigned int i; volatile int *lock; for (i=0; i < VIA_NR_XVMC_LOCKS; ++i) { lock = (int *) XVMCLOCKPTR(dev_priv->sarea_priv, i); if ( (_DRM_LOCKING_CONTEXT( *lock ) == context)) { if (_DRM_LOCK_IS_HELD( *lock ) && (*lock & _DRM_LOCK_CONT)) { DRM_WAKEUP( &(dev_priv->decoder_queue[i])); } *lock = 0; } } }
int i810_release(struct inode *inode, struct file *filp) { drm_file_t *priv = filp->private_data; drm_device_t *dev; int retcode = 0; lock_kernel(); dev = priv->dev; DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d\n", current->pid, dev->device, dev->open_count); if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && dev->lock.pid == current->pid) { i810_reclaim_buffers(dev, priv->pid); DRM_ERROR("Process %d dead, freeing lock for context %d\n", current->pid, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); drm_lock_free(dev, &dev->lock.hw_lock->lock, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); /* FIXME: may require heavy-handed reset of hardware at this point, possibly processed via a callback to the X server. */ } else if (dev->lock.hw_lock) { /* The lock is required to reclaim buffers */ DECLARE_WAITQUEUE(entry, current); add_wait_queue(&dev->lock.lock_queue, &entry); for (;;) { current->state = TASK_INTERRUPTIBLE; if (!dev->lock.hw_lock) { /* Device has been unregistered */ retcode = -EINTR; break; } if (drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { dev->lock.pid = priv->pid; dev->lock.lock_time = jiffies; atomic_inc(&dev->total_locks); break; /* Got lock */ } /* Contention */ atomic_inc(&dev->total_sleeps); schedule(); if (signal_pending(current)) { retcode = -ERESTARTSYS; break; } } current->state = TASK_RUNNING; remove_wait_queue(&dev->lock.lock_queue, &entry); if(!retcode) { i810_reclaim_buffers(dev, priv->pid); drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); } } drm_fasync(-1, filp, 0); down(&dev->struct_sem); if (priv->prev) priv->prev->next = priv->next; else dev->file_first = priv->next; if (priv->next) priv->next->prev = priv->prev; else dev->file_last = priv->prev; up(&dev->struct_sem); drm_free(priv, sizeof(*priv), DRM_MEM_FILES); #if LINUX_VERSION_CODE < 0x020333 MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */ #endif atomic_inc(&dev->total_close); spin_lock(&dev->count_lock); if (!--dev->open_count) { if (atomic_read(&dev->ioctl_count) || dev->blocked) { DRM_ERROR("Device busy: %d %d\n", atomic_read(&dev->ioctl_count), dev->blocked); spin_unlock(&dev->count_lock); unlock_kernel(); return -EBUSY; } spin_unlock(&dev->count_lock); unlock_kernel(); return i810_takedown(dev); } spin_unlock(&dev->count_lock); unlock_kernel(); return retcode; }
/* Only called by gamma_dma_schedule. */ static int gamma_do_dma(drm_device_t *dev, int locked) { unsigned long address; unsigned long length; drm_buf_t *buf; int retcode = 0; drm_device_dma_t *dma = dev->dma; #if DRM_DMA_HISTOGRAM cycles_t dma_start, dma_stop; #endif if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY; #if DRM_DMA_HISTOGRAM dma_start = get_cycles(); #endif if (!dma->next_buffer) { DRM_ERROR("No next_buffer\n"); clear_bit(0, &dev->dma_flag); return -EINVAL; } buf = dma->next_buffer; address = (unsigned long)buf->address; length = buf->used; DRM_DEBUG("context %d, buffer %d (%ld bytes)\n", buf->context, buf->idx, length); if (buf->list == DRM_LIST_RECLAIM) { gamma_clear_next_buffer(dev); gamma_free_buffer(dev, buf); clear_bit(0, &dev->dma_flag); return -EINVAL; } if (!length) { DRM_ERROR("0 length buffer\n"); gamma_clear_next_buffer(dev); gamma_free_buffer(dev, buf); clear_bit(0, &dev->dma_flag); return 0; } if (!gamma_dma_is_ready(dev)) { clear_bit(0, &dev->dma_flag); return -EBUSY; } if (buf->while_locked) { if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("Dispatching buffer %d from pid %d" " \"while locked\", but no lock held\n", buf->idx, buf->pid); } } else { if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { clear_bit(0, &dev->dma_flag); return -EBUSY; } } if (dev->last_context != buf->context && !(dev->queuelist[buf->context]->flags & _DRM_CONTEXT_PRESERVED)) { /* PRE: dev->last_context != buf->context */ if (DRM(context_switch)(dev, dev->last_context, buf->context)) { DRM(clear_next_buffer)(dev); DRM(free_buffer)(dev, buf); } retcode = -EBUSY; goto cleanup; /* POST: we will wait for the context switch and will dispatch on a later call when dev->last_context == buf->context. NOTE WE HOLD THE LOCK THROUGHOUT THIS TIME! */ } gamma_clear_next_buffer(dev); buf->pending = 1; buf->waiting = 0; buf->list = DRM_LIST_PEND; #if DRM_DMA_HISTOGRAM buf->time_dispatched = get_cycles(); #endif gamma_dma_dispatch(dev, address, length); gamma_free_buffer(dev, dma->this_buffer); dma->this_buffer = buf; atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */ atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */ if (!buf->while_locked && !dev->context_flag && !locked) { if (gamma_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) { DRM_ERROR("\n"); } } cleanup: clear_bit(0, &dev->dma_flag); #if DRM_DMA_HISTOGRAM dma_stop = get_cycles(); atomic_inc(&dev->histo.dma[gamma_histogram_slot(dma_stop - dma_start)]); #endif return retcode; }
/** * Release file. * * \param inode device inode * \param file_priv DRM file private. * \return zero on success or a negative number on failure. * * If the hardware lock is held then free it, and take it again for the kernel * context since it's necessary to reclaim buffers. Unlink the file private * data from its list and free it. Decreases the open count and if it reaches * zero calls drm_lastclose(). */ #if 0 /* old drm_release equivalent from DragonFly */ void drm_cdevpriv_dtor(void *cd) { struct drm_file *file_priv = cd; struct drm_device *dev = file_priv->dev; int retcode = 0; DRM_DEBUG("open_count = %d\n", dev->open_count); DRM_LOCK(dev); if (dev->driver->preclose != NULL) dev->driver->preclose(dev, file_priv); /* ======================================================== * Begin inline drm_release */ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", DRM_CURRENTPID, (long)dev->dev, dev->open_count); if (dev->driver->driver_features & DRIVER_GEM) drm_gem_release(dev, file_priv); if (dev->primary->master->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->primary->master->lock.hw_lock->lock) && dev->primary->master->lock.file_priv == file_priv) { DRM_DEBUG("Process %d dead, freeing lock for context %d\n", DRM_CURRENTPID, _DRM_LOCKING_CONTEXT(dev->primary->master->lock.hw_lock->lock)); if (dev->driver->reclaim_buffers_locked != NULL) dev->driver->reclaim_buffers_locked(dev, file_priv); drm_lock_free(&dev->primary->master->lock, _DRM_LOCKING_CONTEXT(dev->primary->master->lock.hw_lock->lock)); /* FIXME: may require heavy-handed reset of hardware at this point, possibly processed via a callback to the X server. */ } else if (dev->driver->reclaim_buffers_locked != NULL && dev->primary->master->lock.hw_lock != NULL) { /* The lock is required to reclaim buffers */ for (;;) { if (!dev->primary->master->lock.hw_lock) { /* Device has been unregistered */ retcode = EINTR; break; } if (drm_lock_take(&dev->primary->master->lock, DRM_KERNEL_CONTEXT)) { dev->primary->master->lock.file_priv = file_priv; dev->primary->master->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ } /* Contention */ retcode = DRM_LOCK_SLEEP(dev, &dev->primary->master->lock.lock_queue, PCATCH, "drmlk2", 0); if (retcode) break; } if (retcode == 0) { dev->driver->reclaim_buffers_locked(dev, file_priv); drm_lock_free(&dev->primary->master->lock, DRM_KERNEL_CONTEXT); } } if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !dev->driver->reclaim_buffers_locked) drm_reclaim_buffers(dev, file_priv); funsetown(&dev->buf_sigio); if (dev->driver->postclose != NULL) dev->driver->postclose(dev, file_priv); list_del(&file_priv->lhead); /* ======================================================== * End inline drm_release */ atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); device_unbusy(dev->dev); if (--dev->open_count == 0) { retcode = drm_lastclose(dev); } DRM_UNLOCK(dev); }