static int vpu_on(struct jz_vpu *vpu) { if (cpm_inl(CPM_OPCR) & OPCR_IDLE) return -EBUSY; clk_enable(vpu->clk_gate); __asm__ __volatile__ ( "mfc0 $2, $16, 7 \n\t" "ori $2, $2, 0x340 \n\t" "andi $2, $2, 0x3ff \n\t" "mtc0 $2, $16, 7 \n\t" "nop \n\t"); vpu_reset(vpu); enable_irq(vpu->irq); wake_lock(&vpu->wake_lock); dev_dbg(vpu->dev, "[%d:%d] on\n", current->tgid, current->pid); return 0; }
static long vpu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct miscdevice *dev = filp->private_data; struct jz_vpu *vpu = container_of(dev, struct jz_vpu, mdev); struct flush_cache_info info; int ret = 0; unsigned int status = 0; switch (cmd) { case WAIT_COMPLETE: ret = wait_for_completion_interruptible_timeout( &vpu->done, msecs_to_jiffies(200)); if (ret > 0) { status = vpu->status; } else { dev_warn(vpu->dev, "[%d:%d] wait_for_completion timeout\n", current->tgid, current->pid); if (vpu_reset(vpu) < 0) status = 0; vpu->done.done = 0; } if (copy_to_user((void *)arg, &status, sizeof(status))) ret = -EFAULT; break; case LOCK: if (vpu->owner_pid == current->pid) { dev_err(vpu->dev, "[%d:%d] dead lock\n", current->tgid, current->pid); ret = -EINVAL; break; } if (mutex_lock_interruptible(&vpu->mutex) != 0) { dev_err(vpu->dev, "[%d:%d] lock error!\n", current->tgid, current->pid); ret = -EIO; break; } vpu->owner_pid = current->pid; dev_dbg(vpu->dev, "[%d:%d] lock\n", current->tgid, current->pid); break; case UNLOCK: mutex_unlock(&vpu->mutex); vpu->owner_pid = 0; dev_dbg(vpu->dev, "[%d:%d] unlock\n", current->tgid, current->pid); break; case FLUSH_CACHE: if (copy_from_user(&info, (void *)arg, sizeof(info))) { ret = -EFAULT; break; } dma_cache_sync(NULL, (void *)info.addr, info.len, info.dir); dev_dbg(vpu->dev, "[%d:%d] flush cache\n", current->tgid, current->pid); break; default: break; } return ret; }
static long vpu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct miscdevice *dev = filp->private_data; struct jz_vpu *vpu = container_of(dev, struct jz_vpu, mdev); struct flush_cache_info info; int ret = 0; unsigned int status = 0; volatile unsigned long flags; unsigned int addr, size; unsigned int * arg_r; int i,num; switch (cmd) { case CMD_WAIT_COMPLETE: ret = wait_for_completion_interruptible_timeout( &vpu->done, msecs_to_jiffies(200)); if (ret > 0) { status = vpu->status; } else { dev_warn(vpu->dev, "[%d:%d] wait_for_completion timeout\n", current->tgid, current->pid); if (vpu_reset(vpu) < 0) status = 0; vpu->done.done = 0; } if (copy_to_user((void *)arg, &status, sizeof(status))) ret = -EFAULT; break; case LOCK: if (vpu->owner_pid == current->pid) { dev_err(vpu->dev, "[%d:%d] dead lock\n", current->tgid, current->pid); ret = -EINVAL; break; } if (mutex_lock_interruptible(&vpu->mutex) != 0) { dev_err(vpu->dev, "[%d:%d] lock error!\n", current->tgid, current->pid); ret = -EIO; break; } vpu->owner_pid = current->pid; dev_dbg(vpu->dev, "[%d:%d] lock\n", current->tgid, current->pid); break; case UNLOCK: mutex_unlock(&vpu->mutex); vpu->owner_pid = 0; dev_dbg(vpu->dev, "[%d:%d] unlock\n", current->tgid, current->pid); break; case FLUSH_CACHE: if (copy_from_user(&info, (void *)arg, sizeof(info))) { ret = -EFAULT; break; } dma_cache_sync(NULL, (void *)info.addr, info.len, info.dir); dev_dbg(vpu->dev, "[%d:%d] flush cache\n", current->tgid, current->pid); break; case CMD_VPU_PHY: arg_r = (unsigned int *)arg; *arg_r = (0x1fffffff) & jz_tcsm_start; break; case CMD_VPU_CACHE: arg_r = (unsigned int *)arg; addr = (unsigned int)arg_r[0]; size = arg_r[1]; dma_cache_wback_inv(addr, size); break; case CMD_VPU_DMA_NOTLB: local_irq_save(flags); arg_r = (unsigned int *)arg; REG_VPU_LOCK |= VPU_NEED_WAIT_END_FLAG; for(i = 0;i < 4; i += 2){ *(unsigned int *)(arg_r[i]) = arg_r[i+1]; printk("arg[%d]=%x arg[%d]=%d",i,arg_r[i],i+1,arg_r[i+1]); } local_irq_restore(flags); break; case CMD_VPU_DMA_TLB: local_irq_save(flags); arg_r = (unsigned int *)arg; REG_VPU_LOCK |= VPU_NEED_WAIT_END_FLAG; for(i = 0;i < 10;i += 2) *(unsigned int *)(arg_r[i]) = arg_r[i+1]; local_irq_restore(flags); break; case CMD_VPU_CLEAN_WAIT_FLAG: local_irq_save(flags); while( !(( REG_VPU_LOCK &(VPU_WAIT_OK)) ||( REG_VPU_STATUS&(VPU_END))) ) ; REG_VPU_LOCK &= ~(VPU_NEED_WAIT_END_FLAG); if(REG_VPU_LOCK & VPU_WAIT_OK) REG_VPU_LOCK &= ~(VPU_WAIT_OK); local_irq_restore(flags); break; case CMD_VPU_RESET: local_irq_save(flags); REG_CPM_VPU_SWRST |= CPM_VPU_STP; while(!(REG_CPM_VPU_SWRST & CPM_VPU_ACK)) ; REG_CPM_VPU_SWRST = ((REG_CPM_VPU_SWRST | CPM_VPU_SR) & ~CPM_VPU_STP); REG_CPM_VPU_SWRST = (REG_CPM_VPU_SWRST & ~CPM_VPU_SR & ~CPM_VPU_STP); REG_VPU_LOCK = 0; local_irq_restore(flags); break; case CMD_VPU_SET_REG: local_irq_save(flags); num = *(unsigned int*)arg; arg += 4; arg_r = (unsigned int *)arg; REG_VPU_LOCK |= VPU_NEED_WAIT_END_FLAG; for(i = 0;i < num; i += 2) *(unsigned int *)(arg_r[i]) = arg_r[i+1]; local_irq_restore(flags); break; default: break; } return ret; }