static int drm_setup(struct drm_device * dev) { int i; int ret; if (dev->driver->firstopen) { ret = dev->driver->firstopen(dev); if (ret != 0) return ret; } atomic_set(&dev->ioctl_count, 0); atomic_set(&dev->vma_count, 0); if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !drm_core_check_feature(dev, DRIVER_MODESET)) { dev->buf_use = 0; atomic_set(&dev->buf_alloc, 0); i = drm_dma_setup(dev); if (i < 0) return i; } for (i = 0; i < ARRAY_SIZE(dev->counts); i++) atomic_set(&dev->counts[i], 0); dev->sigdata.lock = NULL; dev->context_flag = 0; dev->interrupt_flag = 0; dev->dma_flag = 0; dev->last_context = 0; dev->last_switch = 0; dev->last_checked = 0; init_waitqueue_head(&dev->context_wait); dev->if_version = 0; dev->ctx_start = 0; dev->lck_start = 0; dev->buf_async = NULL; init_waitqueue_head(&dev->buf_readers); init_waitqueue_head(&dev->buf_writers); DRM_DEBUG("\n"); /* * The kernel's context could be created here, but is now created * in drm_dma_enqueue. This is more resource-efficient for * hardware that does not do DMA, but may mean that * drm_select_queue fails between the time the interrupt is * initialized and the time the queues are initialized. */ return 0; }
static int drm_setup(struct drm_device *dev) { drm_local_map_t *map; int i; DRM_LOCK_ASSERT(dev); /* prebuild the SAREA */ i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); if (i != 0) return i; if (dev->driver->firstopen) dev->driver->firstopen(dev); dev->buf_use = 0; if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) { i = drm_dma_setup(dev); if (i != 0) return i; } for (i = 0; i < DRM_HASH_SIZE; i++) { dev->magiclist[i].head = NULL; dev->magiclist[i].tail = NULL; } init_waitqueue_head(&dev->lock.lock_queue); if (!drm_core_check_feature(dev, DRIVER_MODESET)) dev->irq_enabled = 0; dev->context_flag = 0; dev->last_context = 0; dev->if_version = 0; dev->buf_sigio = NULL; DRM_DEBUG("\n"); return 0; }
static int i810_setup(drm_device_t *dev) { int i; atomic_set(&dev->ioctl_count, 0); atomic_set(&dev->vma_count, 0); dev->buf_use = 0; atomic_set(&dev->buf_alloc, 0); drm_dma_setup(dev); atomic_set(&dev->total_open, 0); atomic_set(&dev->total_close, 0); atomic_set(&dev->total_ioctl, 0); atomic_set(&dev->total_irq, 0); atomic_set(&dev->total_ctx, 0); atomic_set(&dev->total_locks, 0); atomic_set(&dev->total_unlocks, 0); atomic_set(&dev->total_contends, 0); atomic_set(&dev->total_sleeps, 0); for (i = 0; i < DRM_HASH_SIZE; i++) { dev->magiclist[i].head = NULL; dev->magiclist[i].tail = NULL; } dev->maplist = NULL; dev->map_count = 0; dev->vmalist = NULL; dev->lock.hw_lock = NULL; init_waitqueue_head(&dev->lock.lock_queue); dev->queue_count = 0; dev->queue_reserved = 0; dev->queue_slots = 0; dev->queuelist = NULL; dev->irq = 0; dev->context_flag = 0; dev->interrupt_flag = 0; dev->dma_flag = 0; dev->last_context = 0; dev->last_switch = 0; dev->last_checked = 0; init_timer(&dev->timer); init_waitqueue_head(&dev->context_wait); #if DRM_DMA_HISTO memset(&dev->histo, 0, sizeof(dev->histo)); #endif dev->ctx_start = 0; dev->lck_start = 0; dev->buf_rp = dev->buf; dev->buf_wp = dev->buf; dev->buf_end = dev->buf + DRM_BSZ; dev->buf_async = NULL; init_waitqueue_head(&dev->buf_readers); init_waitqueue_head(&dev->buf_writers); DRM_DEBUG("\n"); /* The kernel's context could be created here, but is now created in drm_dma_enqueue. This is more resource-efficient for hardware that does not do DMA, but may mean that drm_select_queue fails between the time the interrupt is initialized and the time the queues are initialized. */ return 0; }