void adreno_drawctxt_destroy(struct kgsl_device *device, struct kgsl_context *context) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_context *drawctxt = context->devctxt; if (drawctxt == NULL) return; /* deactivate context */ if (adreno_dev->drawctxt_active == drawctxt) { /* no need to save GMEM or shader, the context is * being destroyed. */ drawctxt->flags &= ~(CTXT_FLAGS_GMEM_SAVE | CTXT_FLAGS_SHADER_SAVE | CTXT_FLAGS_GMEM_SHADOW | CTXT_FLAGS_STATE_SHADOW); adreno_drawctxt_switch(adreno_dev, NULL, 0); } adreno_idle(device, KGSL_TIMEOUT_DEFAULT); kgsl_sharedmem_free(&drawctxt->gpustate); kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow); kfree(drawctxt); context->devctxt = NULL; }
void adreno_drawctxt_destroy(struct kgsl_device *device, struct kgsl_context *context) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_context *drawctxt; if (context == NULL || context->devctxt == NULL) return; drawctxt = context->devctxt; if (adreno_dev->drawctxt_active == drawctxt) { drawctxt->flags &= ~(CTXT_FLAGS_GMEM_SAVE | CTXT_FLAGS_SHADER_SAVE | CTXT_FLAGS_GMEM_SHADOW | CTXT_FLAGS_STATE_SHADOW); adreno_drawctxt_switch(adreno_dev, NULL, 0); } adreno_idle(device); kgsl_sharedmem_free(&drawctxt->gpustate); kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow); kfree(drawctxt); context->devctxt = NULL; }
/** * adreno_drawctxt_detach(): detach a context from the GPU * @context: Generic KGSL context container for the context * */ void adreno_drawctxt_detach(struct kgsl_context *context) { struct kgsl_device *device; struct adreno_device *adreno_dev; struct adreno_context *drawctxt; if (context == NULL) return; device = context->device; adreno_dev = ADRENO_DEVICE(device); drawctxt = ADRENO_CONTEXT(context); /* deactivate context */ if (adreno_dev->drawctxt_active == drawctxt) { /* no need to save GMEM or shader, the context is * being destroyed. */ drawctxt->flags &= ~(CTXT_FLAGS_GMEM_SAVE | CTXT_FLAGS_SHADER_SAVE | CTXT_FLAGS_GMEM_SHADOW | CTXT_FLAGS_STATE_SHADOW); drawctxt->flags |= CTXT_FLAGS_BEING_DESTROYED; adreno_drawctxt_switch(adreno_dev, NULL, 0); } if (device->state != KGSL_STATE_HUNG) adreno_idle(device); adreno_profile_process_results(device); kgsl_sharedmem_free(&drawctxt->gpustate); kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow); }
void adreno_drawctxt_destroy(struct kgsl_device *device, struct kgsl_context *context) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_context *drawctxt; if (context == NULL || context->devctxt == NULL) return; drawctxt = context->devctxt; if (adreno_dev->drawctxt_active == drawctxt) { drawctxt->flags &= ~(CTXT_FLAGS_GMEM_SAVE | CTXT_FLAGS_SHADER_SAVE | CTXT_FLAGS_GMEM_SHADOW | CTXT_FLAGS_STATE_SHADOW); #ifdef CONFIG_MSM_KGSL_GPU_USAGE device->current_process_priv = NULL; #endif adreno_drawctxt_switch(adreno_dev, NULL, 0); } if (device->state != KGSL_STATE_HUNG) adreno_idle(device); if (adreno_is_a20x(adreno_dev) && adreno_dev->drawctxt_active) kgsl_setstate(&device->mmu, adreno_dev->drawctxt_active->id, KGSL_MMUFLAGS_PTUPDATE); kgsl_sharedmem_free(&drawctxt->gpustate); kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow); kfree(drawctxt); context->devctxt = NULL; }
void adreno_ringbuffer_close(struct adreno_ringbuffer *rb) { struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); kgsl_sharedmem_free(&rb->buffer_desc); kgsl_sharedmem_free(&rb->memptrs_desc); kfree(adreno_dev->pfp_fw); kfree(adreno_dev->pm4_fw); adreno_dev->pfp_fw = NULL; adreno_dev->pm4_fw = NULL; memset(rb, 0, sizeof(struct adreno_ringbuffer)); }
int kgsl_ringbuffer_close(struct kgsl_ringbuffer *rb) { KGSL_CMD_VDBG("enter (rb=%p)\n", rb); if (rb->buffer_desc.hostptr) kgsl_sharedmem_free(&rb->buffer_desc); if (rb->memptrs_desc.hostptr) kgsl_sharedmem_free(&rb->memptrs_desc); memset(rb, 0, sizeof(struct kgsl_ringbuffer)); KGSL_CMD_VDBG("return %d\n", 0); return 0; }
int kgsl_g12_drawctxt_destroy(struct kgsl_device *device, unsigned int drawctxt_id) { if (drawctxt_id >= KGSL_G12_CONTEXT_MAX) return KGSL_FAILURE; g_z1xx.numcontext--; if (g_z1xx.numcontext == 0) { int i; for (i = 0; i < GSL_HAL_NUMCMDBUFFERS; i++) { kgsl_sharedmem_free(&g_z1xx.cmdbufdesc[i]); kfree(g_z1xx.cmdbuf[i]); } memset(&g_z1xx, 0, sizeof(struct kgsl_g12_z1xx)); } if (g_z1xx.numcontext < 0) { g_z1xx.numcontext = 0; return KGSL_FAILURE; } return KGSL_SUCCESS; }
void kgsl_g12_cmdstream_close(struct kgsl_device *device) { struct kgsl_g12_device *g12_device = (struct kgsl_g12_device *) device; kgsl_sharedmem_free(&g12_device->ringbuffer.cmdbufdesc); memset(&g12_device->ringbuffer, 0, sizeof(struct kgsl_g12_ringbuffer)); kgsl_cmdstream_close(device); }
int kgsl_ringbuffer_close(struct kgsl_ringbuffer *rb) { KGSL_CMD_VDBG("enter (rb=%p)\n", rb); /* this must happen before first sharedmem_free */ kgsl_yamato_cleanup_pt(rb->device, rb->device->mmu.defaultpagetable); if (rb->buffer_desc.hostptr) kgsl_sharedmem_free(&rb->buffer_desc); if (rb->memptrs_desc.hostptr) kgsl_sharedmem_free(&rb->memptrs_desc); rb->flags &= ~KGSL_FLAGS_INITIALIZED; memset(rb, 0, sizeof(struct kgsl_ringbuffer)); KGSL_CMD_VDBG("return %d\n", 0); return 0; }
int kgsl_ringbuffer_close(struct kgsl_ringbuffer *rb) { struct kgsl_yamato_device *yamato_device = KGSL_YAMATO_DEVICE( rb->device); if (rb->buffer_desc.hostptr) kgsl_sharedmem_free(&rb->buffer_desc); if (rb->memptrs_desc.hostptr) kgsl_sharedmem_free(&rb->memptrs_desc); if (yamato_device->pfp_fw != NULL) kfree(yamato_device->pfp_fw); if (yamato_device->pm4_fw != NULL) kfree(yamato_device->pm4_fw); yamato_device->pfp_fw = NULL; yamato_device->pm4_fw = NULL; memset(rb, 0, sizeof(struct kgsl_ringbuffer)); return 0; }
int adreno_ringbuffer_close(struct adreno_ringbuffer *rb) { struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); if (rb->buffer_desc.hostptr) kgsl_sharedmem_free(&rb->buffer_desc); if (rb->memptrs_desc.hostptr) kgsl_sharedmem_free(&rb->memptrs_desc); if (adreno_dev->pfp_fw != NULL) kfree(adreno_dev->pfp_fw); if (adreno_dev->pm4_fw != NULL) kfree(adreno_dev->pm4_fw); adreno_dev->pfp_fw = NULL; adreno_dev->pm4_fw = NULL; memset(rb, 0, sizeof(struct adreno_ringbuffer)); return 0; }
void adreno_drawctxt_destroy(struct kgsl_device *device, struct kgsl_context *context) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); struct adreno_context *drawctxt; if (context == NULL || context->devctxt == NULL) return; drawctxt = context->devctxt; /* deactivate context */ if (adreno_dev->drawctxt_active == drawctxt) { /* no need to save GMEM or shader, the context is * being destroyed. */ drawctxt->flags &= ~(CTXT_FLAGS_GMEM_SAVE | CTXT_FLAGS_SHADER_SAVE | CTXT_FLAGS_GMEM_SHADOW | CTXT_FLAGS_STATE_SHADOW); drawctxt->flags |= CTXT_FLAGS_BEING_DESTOYED; adreno_drawctxt_switch(adreno_dev, NULL, 0); } if (device->state != KGSL_STATE_HUNG) adreno_idle(device); if (adreno_is_a20x(adreno_dev) && adreno_dev->drawctxt_active) kgsl_setstate(&device->mmu, adreno_dev->drawctxt_active->id, KGSL_MMUFLAGS_PTUPDATE); kgsl_sharedmem_free(&drawctxt->gpustate); kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow); kfree(drawctxt); context->devctxt = NULL; }
void kgsl_ringbuffer_close(struct kgsl_ringbuffer *rb) { struct kgsl_yamato_device *yamato_device = KGSL_YAMATO_DEVICE( rb->device); KGSL_CMD_VDBG("enter (rb=%p)\n", rb); if (rb->buffer_desc.hostptr) kgsl_sharedmem_free(&rb->buffer_desc); if (rb->memptrs_desc.hostptr) kgsl_sharedmem_free(&rb->memptrs_desc); if (yamato_device->pfp_fw != NULL) kfree(yamato_device->pfp_fw); if (yamato_device->pm4_fw != NULL) kfree(yamato_device->pm4_fw); yamato_device->pfp_fw = NULL; yamato_device->pm4_fw = NULL; memset(rb, 0, sizeof(struct kgsl_ringbuffer)); KGSL_CMD_VDBG("return %d\n", 0); }
int kgsl_ringbuffer_close(struct kgsl_ringbuffer *rb) { KGSL_CMD_VDBG("enter (rb=%p)\n", rb); kgsl_cmdstream_memqueue_drain(rb->device); kgsl_ringbuffer_stop(rb); kgsl_yamato_cleanup_pt(rb->device, rb->device->mmu.defaultpagetable); if (rb->buffer_desc.hostptr) kgsl_sharedmem_free(&rb->buffer_desc); if (rb->memptrs_desc.hostptr) kgsl_sharedmem_free(&rb->memptrs_desc); rb->flags &= ~KGSL_FLAGS_INITIALIZED; memset(rb, 0, sizeof(struct kgsl_ringbuffer)); KGSL_CMD_VDBG("return %d\n", 0); return 0; }
static int a2xx_drawctxt_create(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { int ret; ret = kgsl_allocate(&drawctxt->gpustate, drawctxt->pagetable, _context_size(adreno_dev)); if (ret) return ret; kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0, _context_size(adreno_dev)); tmp_ctx.cmd = tmp_ctx.start = (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET); if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) { ret = a2xx_create_gpustate_shadow(adreno_dev, drawctxt); if (ret) goto done; drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE; } if (!(drawctxt->flags & CTXT_FLAGS_NOGMEMALLOC)) { ret = a2xx_create_gmem_shadow(adreno_dev, drawctxt); if (ret) goto done; } kgsl_cache_range_op(&drawctxt->gpustate, KGSL_CACHE_OP_FLUSH); kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate, drawctxt->gpustate.gpuaddr, drawctxt->gpustate.size, false); done: if (ret) kgsl_sharedmem_free(&drawctxt->gpustate); return ret; }
int kgsl_mmu_destroypagetableobject(struct kgsl_pagetable *pagetable) { KGSL_MEM_VDBG("enter (pagetable=%p)\n", pagetable); if (pagetable) { if (pagetable->base.gpuaddr) kgsl_sharedmem_free(&pagetable->base); if (pagetable->pool) { gen_pool_destroy(pagetable->pool); pagetable->pool = NULL; } kfree(pagetable); } KGSL_MEM_VDBG("return 0x%08x\n", 0); return 0; }
/** * adreno_drawctxt_create - create a new adreno draw context * @device - KGSL device to create the context on * @pagetable - Pagetable for the context * @context- Generic KGSL context structure * @flags - flags for the context (passed from user space) * * Create a new draw context for the 3D core. Return 0 on success, * or error code on failure. */ int adreno_drawctxt_create(struct kgsl_device *device, struct kgsl_pagetable *pagetable, struct kgsl_context *context, uint32_t flags) { struct adreno_context *drawctxt; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); int ret; drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL); if (drawctxt == NULL) return -ENOMEM; drawctxt->pagetable = pagetable; drawctxt->bin_base_offset = 0; /* FIXME: Deal with preambles */ ret = adreno_dev->gpudev->ctxt_gpustate_shadow(adreno_dev, drawctxt); if (ret) goto err; /* Save the shader instruction memory on context switching */ drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE; if (!(flags & KGSL_CONTEXT_NO_GMEM_ALLOC)) { /* create gmem shadow */ ret = adreno_dev->gpudev->ctxt_gmem_shadow(adreno_dev, drawctxt); if (ret != 0) goto err; } context->devctxt = drawctxt; return 0; err: kgsl_sharedmem_free(&drawctxt->gpustate); kfree(drawctxt); return ret; }
void kgsl_mem_entry_destroy(struct kref *kref) { struct kgsl_mem_entry *entry = container_of(kref, struct kgsl_mem_entry, refcount); size_t size = entry->memdesc.size; kgsl_sharedmem_free(&entry->memdesc); if (entry->memtype == KGSL_USER_MEMORY) entry->priv->stats.user -= size; else if (entry->memtype == KGSL_MAPPED_MEMORY) { if (entry->file_ptr) fput(entry->file_ptr); kgsl_driver.stats.mapped -= size; entry->priv->stats.mapped -= size; } kfree(entry); }
int kgsl_g12_drawctxt_destroy(struct kgsl_device *device, unsigned int drawctxt_id) { struct kgsl_g12_device *g12_device = (struct kgsl_g12_device *) device; if (drawctxt_id >= KGSL_G12_CONTEXT_MAX) return KGSL_FAILURE; if (g_z1xx.numcontext == 0) return KGSL_FAILURE; g_z1xx.numcontext--; if (g_z1xx.numcontext == 0) { kgsl_sharedmem_free(&g_z1xx.cmdbufdesc); memset(&g_z1xx, 0, sizeof(struct kgsl_g12_z1xx)); g12_device->timestamp = 0; g12_device->current_timestamp = 0; } return KGSL_SUCCESS; }
/** * adreno_drawctxt_detach(): detach a context from the GPU * @context: Generic KGSL context container for the context * */ int adreno_drawctxt_detach(struct kgsl_context *context) { struct kgsl_device *device; struct adreno_device *adreno_dev; struct adreno_context *drawctxt; int ret; if (context == NULL) return 0; device = context->device; adreno_dev = ADRENO_DEVICE(device); drawctxt = ADRENO_CONTEXT(context); /* deactivate context */ if (adreno_dev->drawctxt_active == drawctxt) adreno_drawctxt_switch(adreno_dev, NULL, 0); mutex_lock(&drawctxt->mutex); while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) { struct kgsl_cmdbatch *cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head]; drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) % ADRENO_CONTEXT_CMDQUEUE_SIZE; mutex_unlock(&drawctxt->mutex); /* * Don't hold the drawctxt mutex while the cmdbatch is being * destroyed because the cmdbatch destroy takes the device * mutex and the world falls in on itself */ kgsl_cmdbatch_destroy(cmdbatch); mutex_lock(&drawctxt->mutex); } mutex_unlock(&drawctxt->mutex); /* * internal_timestamp is set in adreno_ringbuffer_addcmds, * which holds the device mutex. The entire context destroy * process requires the device mutex as well. But lets * make sure we notice if the locking changes. */ BUG_ON(!mutex_is_locked(&device->mutex)); /* Wait for the last global timestamp to pass before continuing */ ret = adreno_drawctxt_wait_global(adreno_dev, context, drawctxt->internal_timestamp, 10 * 1000); /* * If the wait for global fails then nothing after this point is likely * to work very well - BUG_ON() so we can take advantage of the debug * tools to figure out what the h - e - double hockey sticks happened */ BUG_ON(ret); kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(context->id, soptimestamp), drawctxt->timestamp); kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp), drawctxt->timestamp); kgsl_sharedmem_free(&drawctxt->gpustate); kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow); if (drawctxt->ops->detach) drawctxt->ops->detach(drawctxt); /* wake threads waiting to submit commands from this context */ wake_up_all(&drawctxt->waiting); wake_up_all(&drawctxt->wq); return ret; }
static long gsl_kmod_ioctl(struct file *fd, unsigned int cmd, unsigned long arg) { int kgslStatus = GSL_FAILURE; switch (cmd) { case IOCTL_KGSL_DEVICE_START: { kgsl_device_start_t param; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_device_start_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_device_start(param.device_id, param.flags); break; } case IOCTL_KGSL_DEVICE_STOP: { kgsl_device_stop_t param; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_device_stop_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_device_stop(param.device_id); break; } case IOCTL_KGSL_DEVICE_IDLE: { kgsl_device_idle_t param; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_device_idle_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_device_idle(param.device_id, param.timeout); break; } case IOCTL_KGSL_DEVICE_ISIDLE: { kgsl_device_isidle_t param; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_device_isidle_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_device_isidle(param.device_id); break; } case IOCTL_KGSL_DEVICE_GETPROPERTY: { kgsl_device_getproperty_t param; void *tmp; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_device_getproperty_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } tmp = kmalloc(param.sizebytes, GFP_KERNEL); if (!tmp) { printk(KERN_ERR "%s:kmalloc error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_device_getproperty(param.device_id, param.type, tmp, param.sizebytes); if (kgslStatus == GSL_SUCCESS) { if (copy_to_user(param.value, tmp, param.sizebytes)) { printk(KERN_ERR "%s: copy_to_user error\n", __func__); kgslStatus = GSL_FAILURE; kfree(tmp); break; } } else { printk(KERN_ERR "%s: kgsl_device_getproperty error\n", __func__); } kfree(tmp); break; } case IOCTL_KGSL_DEVICE_SETPROPERTY: { kgsl_device_setproperty_t param; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_device_setproperty_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_device_setproperty(param.device_id, param.type, param.value, param.sizebytes); if (kgslStatus != GSL_SUCCESS) { printk(KERN_ERR "%s: kgsl_device_setproperty error\n", __func__); } break; } case IOCTL_KGSL_DEVICE_REGREAD: { kgsl_device_regread_t param; unsigned int tmp; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_device_regread_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_device_regread(param.device_id, param.offsetwords, &tmp); if (kgslStatus == GSL_SUCCESS) { if (copy_to_user(param.value, &tmp, sizeof(unsigned int))) { printk(KERN_ERR "%s: copy_to_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } } break; } case IOCTL_KGSL_DEVICE_REGWRITE: { kgsl_device_regwrite_t param; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_device_regwrite_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_device_regwrite(param.device_id, param.offsetwords, param.value); break; } case IOCTL_KGSL_DEVICE_WAITIRQ: { kgsl_device_waitirq_t param; unsigned int count; printk(KERN_ERR "IOCTL_KGSL_DEVICE_WAITIRQ obsoleted!\n"); // kgslStatus = -ENOTTY; break; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_device_waitirq_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_device_waitirq(param.device_id, param.intr_id, &count, param.timeout); if (kgslStatus == GSL_SUCCESS) { if (copy_to_user(param.count, &count, sizeof(unsigned int))) { printk(KERN_ERR "%s: copy_to_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } } break; } case IOCTL_KGSL_CMDSTREAM_ISSUEIBCMDS: { kgsl_cmdstream_issueibcmds_t param; gsl_timestamp_t tmp; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_cmdstream_issueibcmds_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_cmdstream_issueibcmds(param.device_id, param.drawctxt_index, param.ibaddr, param.sizedwords, &tmp, param.flags); if (kgslStatus == GSL_SUCCESS) { if (copy_to_user(param.timestamp, &tmp, sizeof(gsl_timestamp_t))) { printk(KERN_ERR "%s: copy_to_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } } break; } case IOCTL_KGSL_CMDSTREAM_READTIMESTAMP: { kgsl_cmdstream_readtimestamp_t param; gsl_timestamp_t tmp; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_cmdstream_readtimestamp_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } tmp = kgsl_cmdstream_readtimestamp(param.device_id, param.type); if (copy_to_user(param.timestamp, &tmp, sizeof(gsl_timestamp_t))) { printk(KERN_ERR "%s: copy_to_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = GSL_SUCCESS; break; } case IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP: { int err; kgsl_cmdstream_freememontimestamp_t param; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_cmdstream_freememontimestamp_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } err = del_memblock_from_allocated_list(fd, param.memdesc); if(err) { /* tried to remove a block of memory that is not allocated! * NOTE that -EINVAL is Linux kernel's error codes! * the drivers error codes COULD mix up with kernel's. */ kgslStatus = -EINVAL; } else { kgslStatus = kgsl_cmdstream_freememontimestamp(param.device_id, param.memdesc, param.timestamp, param.type); } break; } case IOCTL_KGSL_CMDSTREAM_WAITTIMESTAMP: { kgsl_cmdstream_waittimestamp_t param; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_cmdstream_waittimestamp_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_cmdstream_waittimestamp(param.device_id, param.timestamp, param.timeout); break; } case IOCTL_KGSL_CMDWINDOW_WRITE: { kgsl_cmdwindow_write_t param; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_cmdwindow_write_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_cmdwindow_write(param.device_id, param.target, param.addr, param.data); break; } case IOCTL_KGSL_CONTEXT_CREATE: { kgsl_context_create_t param; unsigned int tmp; int tmpStatus; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_context_create_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_context_create(param.device_id, param.type, &tmp, param.flags); if (kgslStatus == GSL_SUCCESS) { if (copy_to_user(param.drawctxt_id, &tmp, sizeof(unsigned int))) { tmpStatus = kgsl_context_destroy(param.device_id, tmp); /* is asserting ok? Basicly we should return the error from copy_to_user * but will the user space interpret it correctly? Will the user space * always check against GSL_SUCCESS or GSL_FAILURE as they are not the only * return values. */ KOS_ASSERT(tmpStatus == GSL_SUCCESS); printk(KERN_ERR "%s: copy_to_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } else { add_device_context_to_array(fd, param.device_id, tmp); } } break; } case IOCTL_KGSL_CONTEXT_DESTROY: { kgsl_context_destroy_t param; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_context_destroy_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_context_destroy(param.device_id, param.drawctxt_id); del_device_context_from_array(fd, param.device_id, param.drawctxt_id); break; } case IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW: { kgsl_drawctxt_bind_gmem_shadow_t param; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_drawctxt_bind_gmem_shadow_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_drawctxt_bind_gmem_shadow(param.device_id, param.drawctxt_id, param.gmem_rect, param.shadow_x, param.shadow_y, param.shadow_buffer, param.buffer_id); break; } case IOCTL_KGSL_SHAREDMEM_ALLOC: { kgsl_sharedmem_alloc_t param; gsl_memdesc_t tmp; int tmpStatus; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_sharedmem_alloc_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_sharedmem_alloc(param.device_id, param.flags, param.sizebytes, &tmp); if (kgslStatus == GSL_SUCCESS) { if (copy_to_user(param.memdesc, &tmp, sizeof(gsl_memdesc_t))) { tmpStatus = kgsl_sharedmem_free(&tmp); KOS_ASSERT(tmpStatus == GSL_SUCCESS); printk(KERN_ERR "%s: copy_to_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } else { add_memblock_to_allocated_list(fd, &tmp); } } break; } case IOCTL_KGSL_SHAREDMEM_FREE: { kgsl_sharedmem_free_t param; gsl_memdesc_t tmp; int err; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_sharedmem_free_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } if (copy_from_user(&tmp, (void __user *)param.memdesc, sizeof(gsl_memdesc_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } err = del_memblock_from_allocated_list(fd, &tmp); if(err) { printk(KERN_ERR "%s: tried to free memdesc that was not allocated!\n", __func__); kgslStatus = err; break; } kgslStatus = kgsl_sharedmem_free(&tmp); if (kgslStatus == GSL_SUCCESS) { if (copy_to_user(param.memdesc, &tmp, sizeof(gsl_memdesc_t))) { printk(KERN_ERR "%s: copy_to_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } } break; } case IOCTL_KGSL_SHAREDMEM_READ: { kgsl_sharedmem_read_t param; gsl_memdesc_t memdesc; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_sharedmem_read_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } if (copy_from_user(&memdesc, (void __user *)param.memdesc, sizeof(gsl_memdesc_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_sharedmem_read(&memdesc, param.dst, param.offsetbytes, param.sizebytes, true); if (kgslStatus != GSL_SUCCESS) { printk(KERN_ERR "%s: kgsl_sharedmem_read failed\n", __func__); } break; } case IOCTL_KGSL_SHAREDMEM_WRITE: { kgsl_sharedmem_write_t param; gsl_memdesc_t memdesc; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_sharedmem_write_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } if (copy_from_user(&memdesc, (void __user *)param.memdesc, sizeof(gsl_memdesc_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_sharedmem_write(&memdesc, param.offsetbytes, param.src, param.sizebytes, true); if (kgslStatus != GSL_SUCCESS) { printk(KERN_ERR "%s: kgsl_sharedmem_write failed\n", __func__); } break; } case IOCTL_KGSL_SHAREDMEM_SET: { kgsl_sharedmem_set_t param; gsl_memdesc_t memdesc; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_sharedmem_set_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } if (copy_from_user(&memdesc, (void __user *)param.memdesc, sizeof(gsl_memdesc_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_sharedmem_set(&memdesc, param.offsetbytes, param.value, param.sizebytes); break; } case IOCTL_KGSL_SHAREDMEM_LARGESTFREEBLOCK: { kgsl_sharedmem_largestfreeblock_t param; unsigned int largestfreeblock; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_sharedmem_largestfreeblock_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } largestfreeblock = kgsl_sharedmem_largestfreeblock(param.device_id, param.flags); if (copy_to_user(param.largestfreeblock, &largestfreeblock, sizeof(unsigned int))) { printk(KERN_ERR "%s: copy_to_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = GSL_SUCCESS; break; } case IOCTL_KGSL_SHAREDMEM_CACHEOPERATION: { kgsl_sharedmem_cacheoperation_t param; gsl_memdesc_t memdesc; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_sharedmem_cacheoperation_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } if (copy_from_user(&memdesc, (void __user *)param.memdesc, sizeof(gsl_memdesc_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_sharedmem_cacheoperation(&memdesc, param.offsetbytes, param.sizebytes, param.operation); break; } case IOCTL_KGSL_SHAREDMEM_FROMHOSTPOINTER: { kgsl_sharedmem_fromhostpointer_t param; gsl_memdesc_t memdesc; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_sharedmem_fromhostpointer_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } if (copy_from_user(&memdesc, (void __user *)param.memdesc, sizeof(gsl_memdesc_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_sharedmem_fromhostpointer(param.device_id, &memdesc, param.hostptr); break; } case IOCTL_KGSL_ADD_TIMESTAMP: { kgsl_add_timestamp_t param; gsl_timestamp_t tmp; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_add_timestamp_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } tmp = kgsl_add_timestamp(param.device_id, &tmp); if (copy_to_user(param.timestamp, &tmp, sizeof(gsl_timestamp_t))) { printk(KERN_ERR "%s: copy_to_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = GSL_SUCCESS; break; } case IOCTL_KGSL_DEVICE_CLOCK: { kgsl_device_clock_t param; if (copy_from_user(¶m, (void __user *)arg, sizeof(kgsl_device_clock_t))) { printk(KERN_ERR "%s: copy_from_user error\n", __func__); kgslStatus = GSL_FAILURE; break; } kgslStatus = kgsl_device_clock(param.device, param.enable); break; } default: kgslStatus = -ENOTTY; break; } return kgslStatus; }