/* * @fn OS_STATUS OUTPUT_Flush() * * @brief Flush the module buffers and sample buffers * * @return OS_STATUS * * For each CPU in the system, set buffer full to the byte count to flush. * Flush the modules buffer, as well. * */ extern int OUTPUT_Flush ( VOID ) { int i; int writers = 0; OUTPUT outbuf; /* * Flush all remaining data to files * set up a flush event */ init_waitqueue_head(&flush_queue); SEP_PRINT_DEBUG("flush: waiting for %d writers\n",(GLOBAL_STATE_num_cpus(driver_state)+ OTHER_C_DEVICES)); for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { if (CPU_STATE_initial_mask(&pcb[i]) == 0) { continue; } outbuf = &(cpu_buf[i].outbuf); writers += 1; OUTPUT_buffer_full(outbuf,OUTPUT_current_buffer(outbuf)) = OUTPUT_total_buffer_size(outbuf) - OUTPUT_remaining_buffer_size(outbuf); } atomic_set(&flush_writers, writers + OTHER_C_DEVICES); // Flip the switch to terminate the output threads // Do not do this earlier, as threads may terminate before all the data is flushed flush = 1; for (i = 0; i < GLOBAL_STATE_num_cpus(driver_state); i++) { if (CPU_STATE_initial_mask(&pcb[i]) == 0) { continue; } outbuf = &BUFFER_DESC_outbuf(&cpu_buf[i]); OUTPUT_buffer_full(outbuf,OUTPUT_current_buffer(outbuf)) = OUTPUT_total_buffer_size(outbuf) - OUTPUT_remaining_buffer_size(outbuf); wake_up_interruptible_sync(&BUFFER_DESC_queue(&cpu_buf[i])); } // Flush all data from the module buffers outbuf = &BUFFER_DESC_outbuf(module_buf); OUTPUT_buffer_full(outbuf,OUTPUT_current_buffer(outbuf)) = OUTPUT_total_buffer_size(outbuf) - OUTPUT_remaining_buffer_size(outbuf); SEP_PRINT_DEBUG("OUTPUT_Flush - waking up module_queue\n"); wake_up_interruptible_sync(&BUFFER_DESC_queue(module_buf)); //Wait for buffers to empty if (wait_event_interruptible(flush_queue, atomic_read(&flush_writers)==0)) { return OS_RESTART_SYSCALL; } SEP_PRINT_DEBUG("OUTPUT_Flush - awakened from flush_queue\n"); flush = 0; return 0; }
/* Locking: Caller holds q->vb_lock */ void videobuf_queue_cancel(struct videobuf_queue *q) { unsigned long flags = 0; int i; q->streaming = 0; q->reading = 0; wake_up_interruptible_sync(&q->wait); /* remove queued buffers from list */ spin_lock_irqsave(q->irqlock, flags); for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (NULL == q->bufs[i]) continue; if (q->bufs[i]->state == VIDEOBUF_QUEUED) { list_del(&q->bufs[i]->queue); q->bufs[i]->state = VIDEOBUF_ERROR; wake_up_all(&q->bufs[i]->done); } } spin_unlock_irqrestore(q->irqlock, flags); /* free all buffers + clear queue */ for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (NULL == q->bufs[i]) continue; q->ops->buf_release(q, q->bufs[i]); } INIT_LIST_HEAD(&q->stream); }
int IO_irq_release(struct inode *inode, struct file *filp) { free_irq(IO_irq_devices->IO_irq1, NULL); free_irq(IO_irq_devices->IO_irq2, NULL); free_irq(IO_irq_devices->IO_irq3, NULL); free_irq(IO_irq_devices->IO_irq4, NULL); s3c2410_gpio_cfgpin(S3C2410_GPG11, S3C2410_GPG11_INP); s3c2410_gpio_cfgpin(S3C2410_GPG3, S3C2410_GPG3_INP); s3c2410_gpio_cfgpin(S3C2410_GPF2, S3C2410_GPF2_INP); s3c2410_gpio_cfgpin(S3C2410_GPF0, S3C2410_GPF0_INP); IO_irq_devices->IO_status = 0 ; tasklet_kill(&keytask); if(!cancel_delayed_work(&irq_work_delay)) flush_workqueue(tekkamanwork); destroy_workqueue(tekkamanwork); kfifo_free(tekkamanfifo); kfree(tekkamantmp); kfree(tekkamanbuf); atomic_inc(&IO_irq_available); /* release the device */ wake_up_interruptible_sync(&IO_irq_wait); /* awake other uid's */ printk( "IO_irq: release ! \n"); return 0; }
int videobuf_streamon(struct videobuf_queue *q) { struct videobuf_buffer *buf; unsigned long flags = 0; int retval; videobuf_queue_lock(q); retval = -EBUSY; if (q->reading) goto done; retval = 0; if (q->streaming) goto done; q->streaming = 1; spin_lock_irqsave(q->irqlock, flags); list_for_each_entry(buf, &q->stream, stream) if (buf->state == VIDEOBUF_PREPARED) q->ops->buf_queue(q, buf); spin_unlock_irqrestore(q->irqlock, flags); wake_up_interruptible_sync(&q->wait); done: videobuf_queue_unlock(q); return retval; }
void ipp_test_complete(int retval) { printk("ipp_test_complete retval=%d\n",retval); if(retval==0) { test_condition = 1; wake_up_interruptible_sync(&test_queue); } }
// insert a written block. int proc_insertBuff(int index,int pathIndex,struct block *buff,loff_t pos, size_t count,context *ct) { loff_t* f_pos,spos; size_t left = count, wrt; int ret; if(RELEASE(index)) return count; if(pathIndex==INDEX_PATH_NUM) { spos = ((pipes[index].ct)[CONTEXT]).bpos + pos; } else { spos=pos; } f_pos=(loff_t *)&spos; while(1) { Down(index); wrt = left; ret = dev_insertBuff(&((pipes[index]).device), f_pos,buff, &left, ct); if(pathIndex==INDEX_PATH_NUM) POS(ct)=*f_pos-((pipes[index].ct)[CONTEXT]).bpos; else POS(ct)=*f_pos; if(ret == DEV_FAULT) {Up(index); return -EFAULT;} if(wrt && !R_ABLE(ct)) { R_ABLE(ct) = 1; if(ret == DEV_SEEK) { wake_up_interruptible_sync(&(RQ(index))); } else { wake_up_interruptible(&(RQ(index))); } } if(ret == DEV_SEEK) {Up(index); return -P_ESEEK;} if(ret == DEV_FULL) { W_ABLE(ct) = 0; Up(index); PDEBUG("<%d>: device full, sleep, left %d\n", current->pid, (int)left); Wait_Event(WQ(index), RELEASE(index)||W_ABLE(ct)); if(RELEASE(index)) return count; continue; } Up(index); if(left <= 0) break; } return count; }
/* * Joy. Or not. Pthread wants us to wake up every thread * in our parent group. */ static void __wake_up_parent(struct task_struct *p, struct task_struct *parent) { struct task_struct *tsk = parent; /* * Fortunately this is not necessary for thread groups: */ if (p->tgid == tsk->tgid) { wake_up_interruptible_sync(&tsk->wait_chldexit); return; } do { wake_up_interruptible_sync(&tsk->wait_chldexit); tsk = next_thread(tsk); if (tsk->signal != parent->signal) BUG(); } while (tsk != parent); }
/*! * @fn int OUTPUT_Reserve_Buffer_Space (OUTPUT outbuf, * U32 size) * * @param outbuf IN output buffer to manipulate * @param size IN The size of data to reserve * * @result outloc - to the location where data is to be written * * Reserve space in the output buffers for data. If a buffer is full, * signal the caller that the flush routine needs to be called. * * <I>Special Notes:</I> * */ extern void* OUTPUT_Reserve_Buffer_Space ( BUFFER_DESC bd, U32 size ) { int signal_full = FALSE; char *outloc = NULL; OUTPUT outbuf = &BUFFER_DESC_outbuf(bd); if (OUTPUT_remaining_buffer_size(outbuf) >= size) { outloc = (OUTPUT_buffer(outbuf,OUTPUT_current_buffer(outbuf)) + (OUTPUT_total_buffer_size(outbuf) - OUTPUT_remaining_buffer_size(outbuf))); } else { U32 i, j, start; OUTPUT_buffer_full(outbuf,OUTPUT_current_buffer(outbuf)) = OUTPUT_total_buffer_size(outbuf) - OUTPUT_remaining_buffer_size(outbuf); // // Massive Naive assumption: Must find a way to fix it. // In spite of the loop. // The next buffer to fill are monotonically increasing // indicies. // signal_full = TRUE; start = OUTPUT_current_buffer(outbuf); for (i = start+1; i < start+OUTPUT_NUM_BUFFERS; i++) { j = i%OUTPUT_NUM_BUFFERS; if (!OUTPUT_buffer_full(outbuf,j)) { OUTPUT_current_buffer(outbuf) = j; OUTPUT_remaining_buffer_size(outbuf) = OUTPUT_total_buffer_size(outbuf); outloc = OUTPUT_buffer(outbuf,j); } else { signal_full = FALSE; } } } if (outloc) { OUTPUT_remaining_buffer_size(outbuf) -= size; memset(outloc, 0, size); } #if !defined(CONFIG_PREEMPT_RT) if (signal_full) { wake_up_interruptible_sync(&BUFFER_DESC_queue(bd)); } #endif return outloc; }
static int scull_w_release(struct inode *inode, struct file *filp) { int temp; spin_lock(&scull_w_lock); scull_w_count--; temp = scull_w_count; spin_unlock(&scull_w_lock); if (temp == 0) wake_up_interruptible_sync(&scull_w_wait); return 0; }
int IO_mem_release(struct inode *inode, struct file *filp) { iowrite32((u32) gpjcon_old ,io_addr); iowrite32((u32) gpjdat_old ,io_addr+4); iowrite32((u32) gpjup_old ,io_addr+8); iounmap((void *)io_addr); // ioport_unmap((void *)io_addr); /*WARNING: "ioport_unmap" undefined! Don't use it*/ atomic_inc(&IO_mem_available); /* release the device */ wake_up_interruptible_sync(&IO_mem_wait); /* awake other uid's */ return 0; }
static int smsdvb_stats_release(struct inode *inode, struct file *file) { struct smsdvb_debugfs *debug_data = file->private_data; spin_lock(&debug_data->lock); debug_data->stats_was_read = true; /* return EOF to read() */ spin_unlock(&debug_data->lock); wake_up_interruptible_sync(&debug_data->stats_queue); kref_put(&debug_data->refcount, smsdvb_debugfs_data_release); file->private_data = NULL; return 0; }
static int device_close(struct inode *i, struct file *f) { int temp; spin_lock(&lock); count--; temp = count; spin_unlock(&lock); if (temp == 0) wake_up_interruptible_sync(&wait); /* awake other uid's */ printk(KERN_INFO "Driver: close()\n"); return 0; }
static irqreturn_t rk29_ipp_irq(int irq, void *dev_id) { DBG("rk29_ipp_irq %d \n",irq); //printk("rk29_ipp_irq %d \n",irq); #ifdef IPP_TEST hw_end = ktime_get(); #endif ipp_write(ipp_read(IPP_INT)|0x3c, IPP_INT); if(((ipp_read(IPP_INT)>>6)&0x3) !=0)// idle { printk("IPP is not idle!\n"); ipp_soft_reset(); drvdata->ipp_result = -EAGAIN; } //interacting with hardware done wq_condition = 1; #ifdef IPP_TEST irq_start = ktime_get(); #endif if(drvdata->issync)//sync { //wake_up_interruptible_sync(&hw_wait_queue); wake_up(&hw_wait_queue); } else//async { //power off schedule_delayed_work(&drvdata->power_off_work, msecs_to_jiffies(50)); drvdata->ipp_irq_callback(drvdata->ipp_result); //In the case of async call ,we wake up the wait queue here drvdata->ipp_async_result = drvdata->ipp_result; idle_condition = 1; wake_up_interruptible_sync(&blit_wait_queue); } return IRQ_HANDLED; }
static ssize_t p_write(int index, struct file* filp, const char* buf, size_t count, loff_t* f_pos) { context *ct = filp->private_data; size_t left = count, wrt; int ret; if(RELEASE(index)) return count; while(1) { Down(index); wrt = left; ret = dev_write(&((pipes[index]).device), f_pos, buf+(count-left), &left, ct); if(ret == DEV_FAULT) {Up(index); return -EFAULT;} #ifdef __PIPE_SELECT pipes[index].r_poll = 1; #endif if(wrt) { R_ABLE(ct) = 1; if(ret == DEV_SEEK) { wake_up_interruptible_sync(&(RQ(index))); } else { wake_up_interruptible(&(RQ(index))); } } if(ret == DEV_SEEK) {Up(index); return -P_ESEEK;} if(ret == DEV_FULL) { W_ABLE(ct) = 0; Up(index); PDEBUG("<%d>: device full, sleep, left %d\n", current->pid, (int)left); Wait_Event(WQ(index), RELEASE(index)||W_ABLE(ct)); if(RELEASE(index)) return count; continue; } Up(index); if(left <= 0) break; } return count; }
static void wait_for_dump_helpers(struct file *file) { struct pipe_inode_info *pipe; pipe = file_inode(file)->i_pipe; pipe_lock(pipe); pipe->readers++; pipe->writers--; wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); pipe_unlock(pipe); wait_event_freezable(pipe->wait, pipe->readers == 1); pipe_lock(pipe); pipe->readers--; pipe->writers++; pipe_unlock(pipe); }
static void wait_for_dump_helpers(struct file *file) { struct pipe_inode_info *pipe; pipe = file_inode(file)->i_pipe; pipe_lock(pipe); pipe->readers++; pipe->writers--; while ((pipe->readers > 1) && (!signal_pending(current))) { wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); pipe_wait(pipe); } pipe->readers--; pipe->writers++; pipe_unlock(pipe); }
static void wait_for_dump_helpers(struct file *file) { struct pipe_inode_info *pipe = file->private_data; pipe_lock(pipe); pipe->readers++; pipe->writers--; wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); pipe_unlock(pipe); /* * We actually want wait_event_freezable() but then we need * to clear TIF_SIGPENDING and improve dump_interrupted(). */ wait_event_interruptible(pipe->wait, pipe->readers == 1); pipe_lock(pipe); pipe->readers--; pipe->writers++; pipe_unlock(pipe); }
static ssize_t pipe_read(struct kiocb *iocb, const struct iovec *_iov, unsigned long nr_segs, loff_t pos) { struct file *filp = iocb->ki_filp; struct inode *inode = filp->f_path.dentry->d_inode; struct pipe_inode_info *pipe; int do_wakeup; ssize_t ret; struct iovec *iov = (struct iovec *)_iov; size_t total_len; total_len = iov_length(iov, nr_segs); /* Null read succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; mutex_lock(&inode->i_mutex); pipe = inode->i_pipe; for (;;) { int bufs = pipe->nrbufs; if (bufs) { int curbuf = pipe->curbuf; struct pipe_buffer *buf = pipe->bufs + curbuf; const struct pipe_buf_operations *ops = buf->ops; void *addr; size_t chars = buf->len; int error, atomic; if (chars > total_len) chars = total_len; error = ops->confirm(pipe, buf); if (error) { if (!ret) error = ret; break; } atomic = !iov_fault_in_pages_write(iov, chars); redo: addr = ops->map(pipe, buf, atomic); error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic); ops->unmap(pipe, buf, addr); if (unlikely(error)) { /* * Just retry with the slow path if we failed. */ if (atomic) { atomic = 0; goto redo; } if (!ret) ret = error; break; } ret += chars; buf->offset += chars; buf->len -= chars; if (!buf->len) { buf->ops = NULL; ops->release(pipe, buf); curbuf = (curbuf + 1) & (pipe->buffers - 1); pipe->curbuf = curbuf; pipe->nrbufs = --bufs; do_wakeup = 1; } total_len -= chars; if (!total_len) break; /* common path: read succeeded */ } if (bufs) /* More to do? */ continue; if (!pipe->writers) break; if (!pipe->waiting_writers) { /* syscall merging: Usually we must not sleep * if O_NONBLOCK is set, or if we got some data. * But if a writer sleeps in kernel space, then * we can wait for that data without violating POSIX. */ if (ret) break; if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } pipe_wait(pipe); } mutex_unlock(&inode->i_mutex); /* Signal writers asynchronously that there is more room. */ if (do_wakeup) { wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } if (ret > 0) file_accessed(filp); return ret; }
static int ipp_blit_sync_real(const struct rk29_ipp_req *req) { int status; int wait_ret; //printk("ipp_blit_sync -------------------\n"); //If IPP is busy now,wait until it becomes idle mutex_lock(&drvdata->mutex); { status = wait_event_interruptible(blit_wait_queue, idle_condition); if(status < 0) { printk("ipp_blit_sync_real wait_event_interruptible=%d\n",status); mutex_unlock(&drvdata->mutex); return status; } idle_condition = 0; } mutex_unlock(&drvdata->mutex); drvdata->issync = true; drvdata->ipp_result = ipp_blit(req); if(drvdata->ipp_result == 0) { //wait_ret = wait_event_interruptible_timeout(hw_wait_queue, wq_condition, msecs_to_jiffies(req->timeout)); wait_ret = wait_event_timeout(hw_wait_queue, wq_condition, msecs_to_jiffies(req->timeout)); #ifdef IPP_TEST irq_end = ktime_get(); irq_end = ktime_sub(irq_end,irq_start); hw_end = ktime_sub(hw_end,hw_start); if((((int)ktime_to_us(hw_end)/1000)>10)||(((int)ktime_to_us(irq_end)/1000)>10)) { //printk("hw time: %d ms, irq time: %d ms\n",(int)ktime_to_us(hw_end)/1000,(int)ktime_to_us(irq_end)/1000); } #endif if (wait_ret <= 0) { printk("%s wait_ret=%d,wq_condition =%d,wait_event_timeout:%dms! \n",__FUNCTION__,wait_ret,wq_condition,req->timeout); if(wq_condition==0) { //print all register's value printk("IPP_CONFIG: %x\n",ipp_read(IPP_CONFIG)); printk("IPP_SRC_IMG_INFO: %x\n",ipp_read(IPP_SRC_IMG_INFO)); printk("IPP_DST_IMG_INFO: %x\n",ipp_read(IPP_DST_IMG_INFO)); printk("IPP_IMG_VIR: %x\n",ipp_read(IPP_IMG_VIR)); printk("IPP_INT: %x\n",ipp_read(IPP_INT)); printk("IPP_SRC0_Y_MST: %x\n",ipp_read(IPP_SRC0_Y_MST)); printk("IPP_SRC0_CBR_MST: %x\n",ipp_read(IPP_SRC0_CBR_MST)); printk("IPP_SRC1_Y_MST: %x\n",ipp_read(IPP_SRC1_Y_MST)); printk("IPP_SRC1_CBR_MST: %x\n",ipp_read(IPP_SRC1_CBR_MST)); printk("IPP_DST0_Y_MST: %x\n",ipp_read(IPP_DST0_Y_MST)); printk("IPP_DST0_CBR_MST: %x\n",ipp_read(IPP_DST0_CBR_MST)); printk("IPP_DST1_Y_MST: %x\n",ipp_read(IPP_DST1_Y_MST)); printk("IPP_DST1_CBR_MST: %x\n",ipp_read(IPP_DST1_CBR_MST)); printk("IPP_PRE_SCL_PARA: %x\n",ipp_read(IPP_PRE_SCL_PARA)); printk("IPP_POST_SCL_PARA: %x\n",ipp_read(IPP_POST_SCL_PARA)); printk("IPP_SWAP_CTRL: %x\n",ipp_read(IPP_SWAP_CTRL)); printk("IPP_PRE_IMG_INFO: %x\n",ipp_read(IPP_PRE_IMG_INFO)); printk("IPP_AXI_ID: %x\n",ipp_read(IPP_AXI_ID)); printk("IPP_SRESET: %x\n",ipp_read(IPP_SRESET)); printk("IPP_PROCESS_ST: %x\n",ipp_read(IPP_PROCESS_ST)); ipp_soft_reset(); drvdata->ipp_result = -EAGAIN; } } ipp_power_off(NULL); } drvdata->issync = false; //IPP is idle, wake up the wait queue //printk("ipp_blit_sync done ----------------\n"); status = drvdata->ipp_result; idle_condition = 1; wake_up_interruptible_sync(&blit_wait_queue); return status; }
static ssize_t pipe_readv(struct file *filp, const struct iovec *_iov, unsigned long nr_segs, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; struct pipe_inode_info *info; int do_wakeup; ssize_t ret; struct iovec *iov = (struct iovec *)_iov; size_t total_len; total_len = iov_length(iov, nr_segs); /* Null read succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; down(PIPE_SEM(*inode)); info = inode->i_pipe; for (;;) { int bufs = info->nrbufs; if (bufs) { int curbuf = info->curbuf; struct pipe_buffer *buf = info->bufs + curbuf; struct pipe_buf_operations *ops = buf->ops; void *addr; size_t chars = buf->len; int error; if (chars > total_len) chars = total_len; addr = ops->map(filp, info, buf); error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars); ops->unmap(info, buf); if (unlikely(error)) { if (!ret) ret = -EFAULT; break; } ret += chars; buf->offset += chars; buf->len -= chars; if (!buf->len) { buf->ops = NULL; ops->release(info, buf); curbuf = (curbuf + 1) & (PIPE_BUFFERS-1); info->curbuf = curbuf; info->nrbufs = --bufs; do_wakeup = 1; } total_len -= chars; if (!total_len) break; /* common path: read succeeded */ } if (bufs) /* More to do? */ continue; if (!PIPE_WRITERS(*inode)) break; if (!PIPE_WAITING_WRITERS(*inode)) { /* syscall merging: Usually we must not sleep * if O_NONBLOCK is set, or if we got some data. * But if a writer sleeps in kernel space, then * we can wait for that data without violating POSIX. */ if (ret) break; if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT); } pipe_wait(inode); } up(PIPE_SEM(*inode)); /* Signal writers asynchronously that there is more room. */ if (do_wakeup) { wake_up_interruptible(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT); } if (ret > 0) file_accessed(filp); return ret; }
static ssize_t pipe_readv(struct file *filp, const struct iovec *_iov, unsigned long nr_segs, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; int do_wakeup; ssize_t ret; struct iovec *iov = (struct iovec *)_iov; size_t total_len; total_len = iov_length(iov, nr_segs); /* Null read succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; down(PIPE_SEM(*inode)); for (;;) { int size = PIPE_LEN(*inode); if (size) { char *pipebuf = PIPE_BASE(*inode) + PIPE_START(*inode); ssize_t chars = PIPE_MAX_RCHUNK(*inode); if (chars > total_len) chars = total_len; if (chars > size) chars = size; if (pipe_iov_copy_to_user(iov, pipebuf, chars)) { if (!ret) ret = -EFAULT; break; } ret += chars; PIPE_START(*inode) += chars; PIPE_START(*inode) &= (PIPE_SIZE - 1); PIPE_LEN(*inode) -= chars; total_len -= chars; do_wakeup = 1; if (!total_len) break; /* common path: read succeeded */ } if (PIPE_LEN(*inode)) /* test for cyclic buffers */ continue; if (!PIPE_WRITERS(*inode)) break; if (!PIPE_WAITING_WRITERS(*inode)) { /* syscall merging: Usually we must not sleep * if O_NONBLOCK is set, or if we got some data. * But if a writer sleeps in kernel space, then * we can wait for that data without violating POSIX. */ if (ret) break; if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT); } pipe_wait(inode); } up(PIPE_SEM(*inode)); /* Signal writers asynchronously that there is more room. */ if (do_wakeup) { wake_up_interruptible(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT); } /* * Hack: we turn off atime updates for -RT kernels. * Who uses them on pipes anyway? */ #ifndef CONFIG_PREEMPT_RT if (ret > 0) file_accessed(filp); #endif return ret; }
void SANLED_timer_fn(unsigned long arg) { SANLED_Start_thread = 1; if(arg == 1) { Mode1 = 1; } else if(arg == 2) { Mode1 = 2; } else if(arg == 3) { Mode1 = 3; } else if(arg == 4) { Mode1 = 4; }else if(arg ==5) { Mode1 =5; } else if(arg ==6) { Mode1 =6; }else if(arg == 7) { Mode1 =7; }else if (arg ==8) { Mode1 =8; }else if(arg ==9) { Mode1 =9; } else if(arg ==10) { Mode1 =10; }else if(arg == 11) { Mode1 =11; }else if (arg ==12) { Mode1 =12; }else if (arg ==13) { Mode1 =13; } else if (arg ==14) { Mode1 =14; } else if (arg ==15) { Mode1 =15; } else if (arg ==16) { Mode1 =16; } wake_up_interruptible_sync(&SANLED_waitMain); }
static ssize_t pipe_write(struct file *filp, const char *buf, size_t count, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; ssize_t free, written, ret; /* Seeks are not allowed on pipes. */ ret = -ESPIPE; written = 0; if (ppos != &filp->f_pos) goto out_nolock; /* Null write succeeds. */ ret = 0; if (count == 0) goto out_nolock; ret = -ERESTARTSYS; if (down_interruptible(PIPE_SEM(*inode))) goto out_nolock; /* No readers yields SIGPIPE. */ if (!PIPE_READERS(*inode)) goto sigpipe; /* If count <= PIPE_BUF, we have to make it atomic. */ free = (count <= PIPE_BUF ? count : 1); /* Wait, or check for, available space. */ if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; if (PIPE_FREE(*inode) < free) goto out; } else { while (PIPE_FREE(*inode) < free) { PIPE_WAITING_WRITERS(*inode)++; pipe_wait(inode); PIPE_WAITING_WRITERS(*inode)--; ret = -ERESTARTSYS; if (signal_pending(current)) goto out; if (!PIPE_READERS(*inode)) goto sigpipe; } } /* Copy into available space. */ ret = -EFAULT; while (count > 0) { int space; char *pipebuf = PIPE_BASE(*inode) + PIPE_END(*inode); ssize_t chars = PIPE_MAX_WCHUNK(*inode); if ((space = PIPE_FREE(*inode)) != 0) { if (chars > count) chars = count; if (chars > space) chars = space; if (copy_from_user(pipebuf, buf, chars)) goto out; written += chars; PIPE_LEN(*inode) += chars; count -= chars; buf += chars; space = PIPE_FREE(*inode); continue; } ret = written; if (filp->f_flags & O_NONBLOCK) break; do { /* * Synchronous wake-up: it knows that this process * is going to give up this CPU, so it doesn't have * to do idle reschedules. */ wake_up_interruptible_sync(PIPE_WAIT(*inode)); PIPE_WAITING_WRITERS(*inode)++; pipe_wait(inode); PIPE_WAITING_WRITERS(*inode)--; if (signal_pending(current)) goto out; if (!PIPE_READERS(*inode)) goto sigpipe; } while (!PIPE_FREE(*inode)); ret = -EFAULT; } /* Signal readers asynchronously that there is more data. */ wake_up_interruptible(PIPE_WAIT(*inode)); update_mctime(inode); out: up(PIPE_SEM(*inode)); out_nolock: if (written) ret = written; return ret; sigpipe: if (written) goto out; up(PIPE_SEM(*inode)); send_sig(SIGPIPE, current, 0); return -EPIPE; }
int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b) { struct videobuf_buffer *buf; enum v4l2_field field; unsigned long flags = 0; int retval; MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); if (b->memory == V4L2_MEMORY_MMAP) down_read(¤t->mm->mmap_sem); videobuf_queue_lock(q); retval = -EBUSY; if (q->reading) { dprintk(1, "qbuf: Reading running...\n"); goto done; } retval = -EINVAL; if (b->type != q->type) { dprintk(1, "qbuf: Wrong type.\n"); goto done; } if (b->index >= VIDEO_MAX_FRAME) { dprintk(1, "qbuf: index out of range.\n"); goto done; } buf = q->bufs[b->index]; if (NULL == buf) { dprintk(1, "qbuf: buffer is null.\n"); goto done; } MAGIC_CHECK(buf->magic, MAGIC_BUFFER); if (buf->memory != b->memory) { dprintk(1, "qbuf: memory type is wrong.\n"); goto done; } if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) { dprintk(1, "qbuf: buffer is already queued or active.\n"); goto done; } switch (b->memory) { case V4L2_MEMORY_MMAP: if (0 == buf->baddr) { dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n"); goto done; } if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT || q->type == V4L2_BUF_TYPE_VBI_OUTPUT || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) { buf->size = b->bytesused; buf->field = b->field; buf->ts = v4l2_timeval_to_ns(&b->timestamp); } break; case V4L2_MEMORY_USERPTR: if (b->length < buf->bsize) { dprintk(1, "qbuf: buffer length is not enough\n"); goto done; } if (VIDEOBUF_NEEDS_INIT != buf->state && buf->baddr != b->m.userptr) q->ops->buf_release(q, buf); buf->baddr = b->m.userptr; break; case V4L2_MEMORY_OVERLAY: buf->boff = b->m.offset; break; default: dprintk(1, "qbuf: wrong memory type\n"); goto done; } dprintk(1, "qbuf: requesting next field\n"); field = videobuf_next_field(q); retval = q->ops->buf_prepare(q, buf, field); if (0 != retval) { dprintk(1, "qbuf: buffer_prepare returned %d\n", retval); goto done; } list_add_tail(&buf->stream, &q->stream); if (q->streaming) { spin_lock_irqsave(q->irqlock, flags); q->ops->buf_queue(q, buf); spin_unlock_irqrestore(q->irqlock, flags); } dprintk(1, "qbuf: succeeded\n"); retval = 0; wake_up_interruptible_sync(&q->wait); done: videobuf_queue_unlock(q); if (b->memory == V4L2_MEMORY_MMAP) up_read(¤t->mm->mmap_sem); return retval; }
static ssize_t pipe_writev(struct file *filp, const struct iovec *_iov, unsigned long nr_segs, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; ssize_t ret; size_t min; int do_wakeup; struct iovec *iov = (struct iovec *)_iov; size_t total_len; total_len = iov_length(iov, nr_segs); /* Null write succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; min = total_len; if (min > PIPE_BUF) min = 1; down(PIPE_SEM(*inode)); for (;;) { int free; if (!PIPE_READERS(*inode)) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } free = PIPE_FREE(*inode); if (free >= min) { /* transfer data */ ssize_t chars = PIPE_MAX_WCHUNK(*inode); char *pipebuf = PIPE_BASE(*inode) + PIPE_END(*inode); /* Always wakeup, even if the copy fails. Otherwise * we lock up (O_NONBLOCK-)readers that sleep due to * syscall merging. */ do_wakeup = 1; if (chars > total_len) chars = total_len; if (chars > free) chars = free; if (pipe_iov_copy_from_user(pipebuf, iov, chars)) { if (!ret) ret = -EFAULT; break; } ret += chars; PIPE_LEN(*inode) += chars; total_len -= chars; if (!total_len) break; } if (PIPE_FREE(*inode) && ret) { /* handle cyclic data buffers */ min = 1; continue; } if (filp->f_flags & O_NONBLOCK) { if (!ret) ret = -EAGAIN; break; } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN); do_wakeup = 0; } PIPE_WAITING_WRITERS(*inode)++; pipe_wait(inode); PIPE_WAITING_WRITERS(*inode)--; } up(PIPE_SEM(*inode)); if (do_wakeup) { wake_up_interruptible(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN); } /* * Hack: we turn off atime updates for -RT kernels. * Who uses them on pipes anyway? */ #ifndef CONFIG_PREEMPT_RT if (ret > 0) inode_update_time(inode, 1); /* mtime and ctime */ #endif return ret; }
/*! * @fn ssize_t output_Read(struct file *filp, * char *buf, * size_t count, * loff_t *f_pos, * BUFFER_DESC kernel_buf) * * @brief Return a sample buffer to user-mode. If not full or flush, wait * * @param *filp a file pointer * @param *buf a sampling buffer * @param count size of the user's buffer * @param f_pos file pointer (current offset in bytes) * @param kernel_buf the kernel output buffer structure * * @return number of bytes read. zero indicates end of file. Neg means error * * Place no more than count bytes into the user's buffer. * Block if unavailable on "BUFFER_DESC_queue(buf)" * * <I>Special Notes:</I> * */ static ssize_t output_Read ( struct file *filp, char *buf, size_t count, loff_t *f_pos, BUFFER_DESC kernel_buf ) { ssize_t to_copy; ssize_t uncopied; OUTPUT outbuf = &BUFFER_DESC_outbuf(kernel_buf); U32 cur_buf, i; /* Buffer is filled by output_fill_modules. */ cur_buf = OUTPUT_current_buffer(outbuf); for (i=0; i<OUTPUT_NUM_BUFFERS; i++) { //iterate through all buffers cur_buf++; if (cur_buf >= OUTPUT_NUM_BUFFERS) { cur_buf = 0; } //circularly if ((to_copy = OUTPUT_buffer_full(outbuf, cur_buf))) { break; } } SEP_PRINT_DEBUG("buffer %d has %d bytes ready\n", (S32)cur_buf, (S32)to_copy); if (!flush && to_copy == 0) { #if defined(CONFIG_PREEMPT_RT) do { unsigned long delay; delay = msecs_to_jiffies(1000); wait_event_interruptible_timeout(BUFFER_DESC_queue(kernel_buf), flush||OUTPUT_buffer_full(outbuf, cur_buf), delay); } while (!(flush||OUTPUT_buffer_full(outbuf, cur_buf))); #else if (wait_event_interruptible(BUFFER_DESC_queue(kernel_buf), flush||OUTPUT_buffer_full(outbuf, cur_buf))) { return OS_RESTART_SYSCALL; } #endif SEP_PRINT_DEBUG("Get to copy\n", (S32)cur_buf); to_copy = OUTPUT_buffer_full(outbuf, cur_buf); SEP_PRINT_DEBUG("output_Read awakened, buffer %d has %d bytes\n",cur_buf, (int)to_copy ); } /* Ensure that the user's buffer is large enough */ if (to_copy > count) { SEP_PRINT_DEBUG("user buffer is too small\n"); return OS_NO_MEM; } /* Copy data to user space. Note that we use cur_buf as the source */ if (abnormal_terminate == 0) { uncopied = copy_to_user(buf, OUTPUT_buffer(outbuf, cur_buf), to_copy); /* Mark the buffer empty */ OUTPUT_buffer_full(outbuf, cur_buf) = 0; *f_pos += to_copy-uncopied; if (uncopied) { SEP_PRINT_DEBUG("only copied %d of %lld bytes of module records\n", (S32)to_copy, (long long)uncopied); return (to_copy - uncopied); } } else { to_copy = 0; SEP_PRINT_DEBUG("to copy set to 0\n"); } // At end-of-file, decrement the count of active buffer writers if (to_copy == 0) { DRV_BOOL flush_val = atomic_dec_and_test(&flush_writers); SEP_PRINT_DEBUG("output_Read decremented flush_writers\n"); if (flush_val == TRUE) { wake_up_interruptible_sync(&flush_queue); } } return to_copy; }
/* * Pipe input worker. Most of this logic works like a regular pipe, the * key here is the 'actor' worker passed in that actually moves the data * to the wanted destination. See pipe_to_file/pipe_to_sendpage above. */ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags, splice_actor *actor) { int ret, do_wakeup, err; struct splice_desc sd; ret = 0; do_wakeup = 0; sd.total_len = len; sd.flags = flags; sd.file = out; sd.pos = *ppos; if (pipe->inode) mutex_lock(&pipe->inode->i_mutex); for (;;) { if (pipe->nrbufs) { struct pipe_buffer *buf = pipe->bufs + pipe->curbuf; struct pipe_buf_operations *ops = buf->ops; sd.len = buf->len; if (sd.len > sd.total_len) sd.len = sd.total_len; err = actor(pipe, buf, &sd); if (err <= 0) { if (!ret && err != -ENODATA) ret = err; break; } ret += err; buf->offset += err; buf->len -= err; sd.len -= err; sd.pos += err; sd.total_len -= err; if (sd.len) continue; if (!buf->len) { buf->ops = NULL; ops->release(pipe, buf); pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1); pipe->nrbufs--; if (pipe->inode) do_wakeup = 1; } if (!sd.total_len) break; } if (pipe->nrbufs) continue; if (!pipe->writers) break; if (!pipe->waiting_writers) { if (ret) break; } if (flags & SPLICE_F_NONBLOCK) { if (!ret) ret = -EAGAIN; break; } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); do_wakeup = 0; } pipe_wait(pipe); } if (pipe->inode) mutex_unlock(&pipe->inode->i_mutex); if (do_wakeup) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } return ret; }
/* * Pipe output worker. This sets up our pipe format with the page cache * pipe buffer operations. Otherwise very similar to the regular pipe_writev(). */ static ssize_t splice_to_pipe(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) { int ret, do_wakeup, page_nr; ret = 0; do_wakeup = 0; page_nr = 0; if (pipe->inode) mutex_lock(&pipe->inode->i_mutex); for (;;) { if (!pipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } if (pipe->nrbufs < PIPE_BUFFERS) { int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1); struct pipe_buffer *buf = pipe->bufs + newbuf; buf->page = spd->pages[page_nr]; buf->offset = spd->partial[page_nr].offset; buf->len = spd->partial[page_nr].len; buf->ops = spd->ops; if (spd->flags & SPLICE_F_GIFT) buf->flags |= PIPE_BUF_FLAG_GIFT; pipe->nrbufs++; page_nr++; ret += buf->len; if (pipe->inode) do_wakeup = 1; if (!--spd->nr_pages) break; if (pipe->nrbufs < PIPE_BUFFERS) continue; break; } if (spd->flags & SPLICE_F_NONBLOCK) { if (!ret) ret = -EAGAIN; break; } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); do_wakeup = 0; } pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } if (pipe->inode) mutex_unlock(&pipe->inode->i_mutex); if (do_wakeup) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } while (page_nr < spd->nr_pages) page_cache_release(spd->pages[page_nr++]); return ret; }
/* * Applications call the VIDIOC_QBUF ioctl to enqueue an empty (capturing) or * filled (output) buffer in the drivers incoming queue. */ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) { struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); unsigned long userptr = buf->m.userptr; struct videobuf_buffer *vb; struct videobuf_vmalloc_memory *vm_mem; struct sh_css_frame_info out_info, vf_info; struct sh_css_frame *handle = NULL; u32 length; u32 pgnr; int ret = 0; if ((!pipe->is_main) && (!atomisp_is_viewfinder_support(isp))) return -EINVAL; if (!buf || buf->index >= VIDEO_MAX_FRAME || !pipe->capq.bufs[buf->index]) { v4l2_err(&atomisp_dev, "Invalid index for qbuf.\n"); return -EINVAL; } v4l2_dbg(2, dbg_level, &atomisp_dev, "%s\n", __func__); /* * For userptr type frame, we convert user space address to physic * address and reprograme out page table properly */ if (buf->memory == V4L2_MEMORY_USERPTR) { vb = pipe->capq.bufs[buf->index]; vm_mem = vb->priv; if (!vm_mem) return -EINVAL; length = vb->bsize; pgnr = (length + (PAGE_SIZE - 1)) >> PAGE_SHIFT; /* We must stop to atomisp to remove the * race condition when updating the new userptr. */ if (buf->flags & V4L2_BUF_FLAG_BUFFER_INVALID) { isp->sw_contex.updating_uptr = true; return 0; } /* Check whether need to start the atomisp_work */ if (buf->flags & V4L2_BUF_FLAG_BUFFER_VALID) { isp->sw_contex.updating_uptr = false; wake_up_interruptible_sync(&pipe->capq.wait); return 0; } if ((vb->baddr == userptr) && (vm_mem->vaddr)) goto done; switch (isp->sw_contex.run_mode) { case CI_MODE_STILL_CAPTURE: if ((isp->main_format->out_sh_fmt != SH_CSS_FRAME_FORMAT_RAW) && sh_css_capture_get_viewfinder_frame_info(&vf_info)) goto error; if (sh_css_capture_get_output_frame_info(&out_info)) goto error; break; case CI_MODE_VIDEO: if (sh_css_video_get_viewfinder_frame_info(&vf_info)) goto error; if (sh_css_video_get_output_frame_info(&out_info)) goto error; break; case CI_MODE_PREVIEW: if (sh_css_preview_get_output_frame_info(&out_info)) goto error; break; } hrt_isp_css_mm_set_user_ptr(userptr, pgnr); if (!pipe->is_main) ret = sh_css_frame_allocate_from_info(&handle, &vf_info); else ret = sh_css_frame_allocate_from_info(&handle, &out_info); hrt_isp_css_mm_set_user_ptr(0, 0); if (ret != sh_css_success) { v4l2_err(&atomisp_dev, "Error to allocate frame\n"); return -ENOMEM; } if (vm_mem->vaddr) { mutex_lock(&pipe->capq.vb_lock); sh_css_frame_free(vm_mem->vaddr); vm_mem->vaddr = NULL; vb->state = VIDEOBUF_NEEDS_INIT; mutex_unlock(&pipe->capq.vb_lock); } vm_mem->vaddr = handle; buf->flags &= ~V4L2_BUF_FLAG_MAPPED; buf->flags |= V4L2_BUF_FLAG_QUEUED; buf->flags &= ~V4L2_BUF_FLAG_DONE; } else if (buf->memory == V4L2_MEMORY_MMAP) {
static ssize_t pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; ssize_t size, read, ret; /* Seeks are not allowed on pipes. */ ret = -ESPIPE; read = 0; if (ppos != &filp->f_pos) goto out_nolock; /* Always return 0 on null read. */ ret = 0; if (count == 0) goto out_nolock; /* Get the pipe semaphore */ ret = -ERESTARTSYS; if (down_interruptible(PIPE_SEM(*inode))) goto out_nolock; if (PIPE_EMPTY(*inode)) { do_more_read: ret = 0; if (!PIPE_WRITERS(*inode)) goto out; ret = -EAGAIN; if (filp->f_flags & O_NONBLOCK) goto out; for (;;) { PIPE_WAITING_READERS(*inode)++; pipe_wait(inode); PIPE_WAITING_READERS(*inode)--; ret = -ERESTARTSYS; if (signal_pending(current)) goto out; ret = 0; if (!PIPE_EMPTY(*inode)) break; if (!PIPE_WRITERS(*inode)) goto out; } } /* Read what data is available. */ ret = -EFAULT; while (count > 0 && (size = PIPE_LEN(*inode))) { char *pipebuf = PIPE_BASE(*inode) + PIPE_START(*inode); ssize_t chars = PIPE_MAX_RCHUNK(*inode); if (chars > count) chars = count; if (chars > size) chars = size; if (copy_to_user(buf, pipebuf, chars)) goto out; read += chars; PIPE_START(*inode) += chars; PIPE_START(*inode) &= (PIPE_SIZE - 1); PIPE_LEN(*inode) -= chars; count -= chars; buf += chars; } /* Cache behaviour optimization */ if (!PIPE_LEN(*inode)) PIPE_START(*inode) = 0; if (count && PIPE_WAITING_WRITERS(*inode) && !(filp->f_flags & O_NONBLOCK)) { /* * We know that we are going to sleep: signal * writers synchronously that there is more * room. */ wake_up_interruptible_sync(PIPE_WAIT(*inode)); if (!PIPE_EMPTY(*inode)) BUG(); goto do_more_read; } /* Signal writers asynchronously that there is more room. */ wake_up_interruptible(PIPE_WAIT(*inode)); ret = read; out: up(PIPE_SEM(*inode)); out_nolock: if (read) ret = read; UPDATE_ATIME(inode); return ret; }