int DMAbuf_sync(int dev) { struct audio_operations *adev = audio_devs[dev]; unsigned long flags; int n = 0; struct dma_buffparms *dmap; if (!adev->go && !(adev->enable_bits & PCM_ENABLE_OUTPUT)) return 0; if (adev->dmap_out->dma_mode == DMODE_OUTPUT) { dmap = adev->dmap_out; spin_lock_irqsave(&dmap->lock,flags); if (dmap->qlen > 0 && !(dmap->flags & DMA_ACTIVE)) DMAbuf_launch_output(dev, dmap); adev->dmap_out->flags |= DMA_SYNCING; adev->dmap_out->underrun_count = 0; while (!signal_pending(current) && n++ < adev->dmap_out->nbufs && adev->dmap_out->qlen && adev->dmap_out->underrun_count == 0) { long t = dmabuf_timeout(dmap); spin_unlock_irqrestore(&dmap->lock,flags); /* FIXME: not safe may miss events */ t = interruptible_sleep_on_timeout(&adev->out_sleeper, t); spin_lock_irqsave(&dmap->lock,flags); if (!t) { adev->dmap_out->flags &= ~DMA_SYNCING; spin_unlock_irqrestore(&dmap->lock,flags); return adev->dmap_out->qlen; } } adev->dmap_out->flags &= ~(DMA_SYNCING | DMA_ACTIVE); /* * Some devices such as GUS have huge amount of on board RAM for the * audio data. We have to wait until the device has finished playing. */ /* still holding the lock */ if (adev->d->local_qlen) { /* Device has hidden buffers */ while (!signal_pending(current) && adev->d->local_qlen(dev)){ spin_unlock_irqrestore(&dmap->lock,flags); interruptible_sleep_on_timeout(&adev->out_sleeper, dmabuf_timeout(dmap)); spin_lock_irqsave(&dmap->lock,flags); } } spin_unlock_irqrestore(&dmap->lock,flags); } adev->dmap_out->dma_mode = DMODE_NONE; return adev->dmap_out->qlen; }
static void i2c_pxa_wait_for_ite(struct i2c_algo_pxa_data *adap) { // unsigned long flags; if (adap->irq > 0) { // save_flags_cli(flags); if (adap->i2c_pending == 0) { int timeout , count ; timeout = interruptible_sleep_on_timeout(&adap->i2c_wait, /* I2C_SLEEP_TIMEOUT*/8); if ( ( 8 == timeout ) && ( adap->i2c_pending == 0 ) ) { for ( count = 0 ; count < 1000 ; count ++ ) { if ( adap->i2c_pending == 0 ) udelay( 10 ) ; else break ; } } } adap->i2c_pending = 0; // restore_flags(flags); } else { udelay(100); } }
// // Description: Put this process to sleep. We will wake up when the // IIC controller interrupts. // static void iic_ibmocp_waitforpin(void *data) { int timeout = 2; struct iic_ibm *priv_data = data; // // If interrupts are enabled (which they are), then put the process to // sleep. This process will be awakened by two events -- either the // the IIC peripheral interrupts or the timeout expires. // if (priv_data->iic_irq > 0) { spin_lock_irq(&irq_driver_lock); if (iic_pending == 0) { interruptible_sleep_on_timeout(&(iic_wait[priv_data->index]), timeout*HZ ); } else iic_pending = 0; spin_unlock_irq(&irq_driver_lock); } else { // // If interrupts are not enabled then delay for a reasonable amount // of time and return. We expect that by time we return to the calling // function that the IIC has finished our requested transaction and // the status bit reflects this. // // udelay is probably not the best choice for this since it is // the equivalent of a busy wait // udelay(100); } //printk("iic_ibmocp_waitforpin: exitting\n"); }
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *vidbuf) { if(file->f_flags & O_NONBLOCK) SCAM_MSG("(%s) %s called (non-blocking)\n", current->comm, __FUNCTION__); else SCAM_MSG("(%s) %s called (blocking)\n", current->comm, __FUNCTION__); if(vidbuf->index < 0 || vidbuf->index >= MAX_STREAMING_BUFFERS) { return -EINVAL; } if(vidbuf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { return -EINVAL; } if(vidbuf->memory != V4L2_MEMORY_MMAP) { return -EINVAL; } if(!(file->f_flags & O_NONBLOCK)) interruptible_sleep_on_timeout(&wq, HZ); /* wait max 1 second */ vidbuf->length = SMARTCAM_BUFFER_SIZE; vidbuf->bytesused = formats[format].sizeimage; vidbuf->flags = V4L2_BUF_FLAG_MAPPED; vidbuf->timestamp = frame_timestamp; vidbuf->sequence = frame_sequence; last_read_frame = frame_sequence; return 0; }
static void qt_block_until_empty(struct tty_struct *tty, struct quatech_port *qt_port) { int timeout = HZ / 10; int wait = 30; int count; while (1) { count = qt_chars_in_buffer(tty); if (count <= 0) return; interruptible_sleep_on_timeout(&qt_port->wait, timeout); wait--; if (wait == 0) { dev_dbg(&qt_port->port->dev, "%s - TIMEOUT", __func__); return; } else { wait = 30; } } }
/* * Wait for a message entry to become available for the specified channel, * but don't wait any longer than 1 jiffy. */ enum xp_retval xpc_allocate_msg_wait(struct xpc_channel *ch) { enum xp_retval ret; if (ch->flags & XPC_C_DISCONNECTING) { DBUG_ON(ch->reason == xpInterrupted); return ch->reason; } atomic_inc(&ch->n_on_msg_allocate_wq); ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); atomic_dec(&ch->n_on_msg_allocate_wq); if (ch->flags & XPC_C_DISCONNECTING) { ret = ch->reason; DBUG_ON(ch->reason == xpInterrupted); } else if (ret == 0) { ret = xpTimeout; } else { ret = xpInterrupted; } return ret; }
/* MUST not hold the spinlock - this function may sleep */ static int output_sleep(int dev, int dontblock) { struct audio_operations *adev = audio_devs[dev]; int err = 0; struct dma_buffparms *dmap = adev->dmap_out; long timeout; long timeout_value; if (dontblock) return -EAGAIN; if (!(adev->enable_bits & PCM_ENABLE_OUTPUT)) return -EAGAIN; /* * Wait for free space */ if (signal_pending(current)) return -EINTR; timeout = (adev->go && !(dmap->flags & DMA_NOTIMEOUT)); if (timeout) timeout_value = dmabuf_timeout(dmap); else timeout_value = MAX_SCHEDULE_TIMEOUT; timeout_value = interruptible_sleep_on_timeout(&adev->out_sleeper, timeout_value); if (timeout != MAX_SCHEDULE_TIMEOUT && !timeout_value) { printk(KERN_WARNING "Sound: DMA (output) timed out - IRQ/DRQ config error?\n"); dma_reset_output(dev); } else { if (signal_pending(current)) err = -EINTR; } return err; }
/* acquires lock */ int DMAbuf_getrdbuffer(int dev, char **buf, int *len, int dontblock) { struct audio_operations *adev = audio_devs[dev]; unsigned long flags; int err = 0, n = 0; struct dma_buffparms *dmap = adev->dmap_in; int go; if (!(adev->open_mode & OPEN_READ)) return -EIO; spin_lock_irqsave(&dmap->lock,flags); if (dmap->needs_reorg) reorganize_buffers(dev, dmap, 0); if (adev->dmap_in->mapping_flags & DMA_MAP_MAPPED) { /* printk(KERN_WARNING "Sound: Can't read from mmapped device (1)\n");*/ spin_unlock_irqrestore(&dmap->lock,flags); return -EINVAL; } else while (dmap->qlen <= 0 && n++ < 10) { long timeout = MAX_SCHEDULE_TIMEOUT; if (!(adev->enable_bits & PCM_ENABLE_INPUT) || !adev->go) { spin_unlock_irqrestore(&dmap->lock,flags); return -EAGAIN; } if ((err = DMAbuf_activate_recording(dev, dmap)) < 0) { spin_unlock_irqrestore(&dmap->lock,flags); return err; } /* Wait for the next block */ if (dontblock) { spin_unlock_irqrestore(&dmap->lock,flags); return -EAGAIN; } if ((go = adev->go)) timeout = dmabuf_timeout(dmap); spin_unlock_irqrestore(&dmap->lock,flags); timeout = interruptible_sleep_on_timeout(&adev->in_sleeper, timeout); if (!timeout) { /* FIXME: include device name */ err = -EIO; printk(KERN_WARNING "Sound: DMA (input) timed out - IRQ/DRQ config error?\n"); dma_reset_input(dev); } else err = -EINTR; spin_lock_irqsave(&dmap->lock,flags); } spin_unlock_irqrestore(&dmap->lock,flags); if (dmap->qlen <= 0) return err ? err : -EINTR; *buf = &dmap->raw_buf[dmap->qhead * dmap->fragment_size + dmap->counts[dmap->qhead]]; *len = dmap->fragment_size - dmap->counts[dmap->qhead]; return dmap->qhead; }
/* * Wait while server is in grace period */ static inline int nlmclnt_grace_wait(struct nlm_host *host) { if (!host->h_reclaiming) interruptible_sleep_on_timeout(&host->h_gracewait, 10*HZ); else interruptible_sleep_on(&host->h_gracewait); return signalled()? -ERESTARTSYS : 0; }
static ssize_t usblp_write(struct file *file, const char *buffer, size_t count, loff_t *ppos) { struct usblp *usblp = file->private_data; int timeout, err = 0, writecount = 0; while (writecount < count) { if (usblp->writeurb.status == -EINPROGRESS) { if (file->f_flags & O_NONBLOCK) return -EAGAIN; timeout = USBLP_WRITE_TIMEOUT; while (timeout && usblp->writeurb.status == -EINPROGRESS) { if (signal_pending(current)) return writecount ? writecount : -EINTR; timeout = interruptible_sleep_on_timeout(&usblp->wait, timeout); } } if (!usblp->dev) return -ENODEV; if (usblp->writeurb.status) { if (usblp->quirks & USBLP_QUIRK_BIDIR) { if (usblp->writeurb.status != -EINPROGRESS) err("usblp%d: error %d writing to printer", usblp->minor, usblp->writeurb.status); err = usblp->writeurb.status; continue; } else { err = usblp_check_status(usblp, err); continue; } } writecount += usblp->writeurb.transfer_buffer_length; usblp->writeurb.transfer_buffer_length = 0; if (writecount == count) continue; usblp->writeurb.transfer_buffer_length = (count - writecount) < USBLP_BUF_SIZE ? (count - writecount) : USBLP_BUF_SIZE; if (copy_from_user(usblp->writeurb.transfer_buffer, buffer + writecount, usblp->writeurb.transfer_buffer_length)) return -EFAULT; usblp->writeurb.dev = usblp->dev; usb_submit_urb(&usblp->writeurb); } return count; }
static int do_mtd_request(memory_handle_t handle, mtd_request_t *req, caddr_t buf) { int ret, tries; client_t *mtd; socket_info_t *s; mtd = handle->mtd; if (mtd == NULL) return CS_GENERAL_FAILURE; s = SOCKET(mtd); for (ret = tries = 0; tries < 100; tries++) { mtd->event_callback_args.mtdrequest = req; mtd->event_callback_args.buffer = buf; ret = EVENT(mtd, CS_EVENT_MTD_REQUEST, CS_EVENT_PRI_LOW); if (ret != CS_BUSY) break; switch (req->Status) { case MTD_WAITREQ: /* Not that we should ever need this... */ interruptible_sleep_on_timeout(&mtd->mtd_req, HZ); break; case MTD_WAITTIMER: case MTD_WAITRDY: interruptible_sleep_on_timeout(&mtd->mtd_req, req->Timeout*HZ/1000); req->Function |= MTD_REQ_TIMEOUT; break; case MTD_WAITPOWER: interruptible_sleep_on(&mtd->mtd_req); break; } if (signal_pending(current)) printk(KERN_NOTICE "cs: do_mtd_request interrupted!\n"); } if (tries == 20) { printk(KERN_NOTICE "cs: MTD request timed out!\n"); ret = CS_GENERAL_FAILURE; } wake_up_interruptible(&mtd->mtd_req); retry_erase_list(&mtd->erase_busy, 0); return ret; } /* do_mtd_request */
int WAIT_EVENT( EVENT_HNDL* pEvent, ULONG msecDelay ) { long int TimeRemain; unsigned long flags ; // wait on the queue // Round up to next jiffie for low Linux 100 Hz resolution and // add 1 jiffie for unsynchronized timers // and add 1 more jiffie for timer rate differences // and add 2 more jiffie because unknown factors are causing timeouts TimeRemain = ((msecDelay+49) * HZ) / 1000; // if no wait time then just return failure if ( ! msecDelay ) { return 0; } // atomically sleep if event is not set - we must protect the window between // finding out if the flag is already set and sleeping as if an interrupt // occurs in the window and calls SET_EVENT, SET_EVENT will signal the wait // queue before we get placed on it and we will miss this signal. save_flags (flags) ; cli () ; // indicate that I am waiting pEvent->WaitCnt++; if ( ! pEvent->SetFlag ) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0) while ( !pEvent->SetFlag && TimeRemain ) { // safe to call sleep with interrupts off as kernel will re-enable // interrupts after placing us on the queue and before calling schedule TimeRemain = interruptible_sleep_on_timeout ( &pEvent->WaitQue, TimeRemain ); } #else while ( !pEvent->SetFlag ) { // safe to call sleep with interrupts off as kernel will re-enable // interrupts after placing us on the queue and before calling schedule interruptible_sleep_on ( &pEvent->WaitQue ); } #endif } // remove wait indication pEvent->WaitCnt--; restore_flags (flags) ; // return setflag return pEvent->SetFlag; }
void kill_aodv() { wait_queue_head_t queue; init_waitqueue_head(&queue); //sets a flag letting the thread know it should die //wait for the thread to set flag saying it is dead //lower semaphore for the thread atomic_set(&kill_thread, 1); wake_up_interruptible(&aodv_wait); interruptible_sleep_on_timeout(&queue, HZ); //kthread_should_stop(); }
static ssize_t camera_core_read(struct file *file, char *data, size_t count, loff_t *ppos) { struct camera_fh *fh = file->private_data; struct camera_device *cam = fh->cam; int err; unsigned long irqflags; long timeout; #if 0 /* use video_buf to do capture */ int i; for (i = 0; i < 14; i++) videobuf_read_one(file, &fh->vbq, data, count, ppos); i = videobuf_read_one(file, &fh->vbq, data, count, ppos); return i; #endif if (!cam->capture_base) { cam->capture_base = (unsigned long)dma_alloc_coherent(NULL, cam->pix.sizeimage, (dma_addr_t *) &cam->capture_base_phys, GFP_KERNEL | GFP_DMA); } if (!cam->capture_base) { printk(KERN_ERR CAM_NAME ": cannot allocate capture buffer\n"); return 0; } spin_lock_irqsave(&cam->capture_lock, irqflags); cam->reading = fh; cam->capture_started = 1; sg_dma_address(&cam->capture_sglist) = cam->capture_base_phys; sg_dma_len(&cam->capture_sglist)= cam->pix.sizeimage; spin_unlock_irqrestore(&cam->capture_lock, irqflags); err = camera_core_sgdma_queue(cam, &cam->capture_sglist, 1, camera_core_capture_callback, NULL); /* Wait till DMA is completed */ timeout = HZ * 10; cam->capture_completed = 0; while (cam->capture_completed == 0) { timeout = interruptible_sleep_on_timeout (&cam->new_video_frame, timeout); if (timeout == 0) { printk(KERN_ERR CAM_NAME ": timeout waiting video frame\n"); return -EIO; /* time out */ } } /* copy the data to the user buffer */ err = copy_to_user(data, (void *)cam->capture_base, count); return (count - err); }
static int usb_led_probe_task(void *x) { unsigned long flags; daemonize(); reparent_to_init(); strcpy(current->comm, "USBLEDprobe"); do { usb_led_flag = 1; interruptible_sleep_on(&usb_led_queue); usb_led_flag = 0; USB_SET_LED(USB_DISCONNECT); interruptible_sleep_on_timeout(&usb_led_blink_queue, 50); USB_SET_LED(USB_CONNECT); interruptible_sleep_on_timeout(&usb_led_blink_queue, 50); } while (!signal_pending(current)); return 0; }
static ssize_t camera_read (struct file *file, char *buf, size_t len, loff_t *ppos) { struct camera_state *camera; int retries; int retval = 0; if (len > MAX_PACKET_SIZE) return -EINVAL; camera = (struct camera_state *) file->private_data; down (&camera->sem); if (!camera->dev) { up (&camera->sem); return -ENODEV; } /* Big reads are common, for image downloading. Smaller ones * are also common (even "directory listing" commands don't * send very much data). We preserve packet boundaries here, * they matter in the application protocol. */ for (retries = 0; retries < MAX_READ_RETRY; retries++) { int count; if (signal_pending (current)) { retval = -EINTR; break; } retval = usb_bulk_msg (camera->dev, usb_rcvbulkpipe (camera->dev, camera->inEP), camera->buf, len, &count, HZ*10); dbg ("read (%Zd) - 0x%x %d", len, retval, count); if (!retval) { if (copy_to_user (buf, camera->buf, count)) retval = -EFAULT; else retval = count; break; } if (retval != -ETIMEDOUT) break; interruptible_sleep_on_timeout (&camera->wait, RETRY_TIMEOUT); dbg ("read (%Zd) - retry", len); } up (&camera->sem); return retval; }
/* ============================================================= * Interface with the generic TPM driver * ============================================================= */ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) { int rc = 0; unsigned long flags; struct vtpm_state *vtpms; vtpms = (struct vtpm_state *)chip_get_private(chip); /* * Check if the previous operation only queued the command * In this case there won't be a response, so I just * return from here and reset that flag. In any other * case I should receive a response from the back-end. */ spin_lock_irqsave(&vtpms->resp_list_lock, flags); if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) { vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY; spin_unlock_irqrestore(&vtpms->resp_list_lock, flags); /* * The first few commands (measurements) must be * queued since it might not be possible to talk to the * TPM, yet. * Return a response of up to 30 '0's. */ count = min_t(size_t, count, 30); memset(buf, 0x0, count); return count; } /* * Check whether something is in the responselist and if * there's nothing in the list wait for something to appear. */ if (!vtpms->current_response) { spin_unlock_irqrestore(&vtpms->resp_list_lock, flags); interruptible_sleep_on_timeout(&vtpms->resp_wait_queue, 1000); spin_lock_irqsave(&vtpms->resp_list_lock ,flags); } if (vtpms->current_response) { struct transmission *t = vtpms->current_response; vtpms->current_response = NULL; rc = min(count, t->response_len); memcpy(buf, t->response, rc); transmission_free(t); } spin_unlock_irqrestore(&vtpms->resp_list_lock, flags); return rc; }
/* * wait until the write buffer has enough room */ int snd_seq_oss_writeq_sync(seq_oss_writeq_t *q) { seq_oss_devinfo_t *dp = q->dp; abstime_t time; unsigned long flags; time = snd_seq_oss_timer_cur_tick(dp->timer); if (q->sync_time >= time) return 0; /* already finished */ if (! q->sync_event_put) { snd_seq_event_t ev; evrec_t *rec; /* put echoback event */ memset(&ev, 0, sizeof(ev)); ev.flags = 0; ev.type = SNDRV_SEQ_EVENT_ECHO; ev.time.tick = time; /* echo back to itself */ snd_seq_oss_fill_addr(dp, &ev, dp->addr.client, dp->addr.port); rec = (evrec_t*)&ev.data; rec->t.code = SEQ_SYNCTIMER; rec->t.time = time; q->sync_event_put = 1; snd_seq_kernel_client_enqueue_blocking(dp->cseq, &ev, NULL, 0, 0); } spin_lock_irqsave(&q->sync_lock, flags); if (! q->sync_event_put) { /* echoback event has been received */ spin_unlock_irqrestore(&q->sync_lock, flags); return 0; } /* wait for echo event */ spin_unlock(&q->sync_lock); interruptible_sleep_on_timeout(&q->sync_sleep, HZ); spin_lock(&q->sync_lock); if (signal_pending(current)) { /* interrupted - return 0 to finish sync */ q->sync_event_put = 0; spin_unlock_irqrestore(&q->sync_lock, flags); return 0; } spin_unlock_irqrestore(&q->sync_lock, flags); if (q->sync_time >= time) return 0; else return 1; }
static void pcf_isa_waitforpin(void) { int timeout = 2; if (irq > 0) { cli(); if (pcf_pending == 0) { interruptible_sleep_on_timeout(&pcf_wait, timeout*HZ ); } else pcf_pending = 0; sti(); } else { udelay(100); } }
static void pcf_epp_waitforpin(void) { int timeout = 10; if (gpe.pe_irq > 0) { spin_lock_irq(&irq_driver_lock); if (pcf_pending == 0) { interruptible_sleep_on_timeout(&pcf_wait, timeout*HZ); //udelay(100); } else { pcf_pending = 0; } spin_unlock_irq(&irq_driver_lock); } else { udelay(100); } }
int jpeg_exe_enc(struct jpeg_control *ctrl) { jpeg_start_encode(ctrl->reg_base); if (interruptible_sleep_on_timeout(&ctrl->wq, INT_TIMEOUT) == 0) jpeg_err("waiting for interrupt is timeout\n"); if (ctrl->irq_ret != OK_ENC_OR_DEC) { jpeg_err("jpeg encode error(%d)\n", ctrl->irq_ret); return -1; } ctrl->enc_param.size = jpeg_get_stream_size(ctrl->reg_base); return 0; }
static void mcdx_delay(struct s_drive_stuff *stuff, long jifs) /* This routine is used for sleeping. * A jifs value <0 means NO sleeping, * =0 means minimal sleeping (let the kernel * run for other processes) * >0 means at least sleep for that amount. * May be we could use a simple count loop w/ jumps to itself, but * I wanna make this independent of cpu speed. [1 jiffy is 1/HZ] sec */ { if (jifs < 0) return; xtrace(SLEEP, "*** delay: sleepq\n"); interruptible_sleep_on_timeout(&stuff->sleepq, jifs); xtrace(SLEEP, "delay awoken\n"); if (signal_pending(current)) { xtrace(SLEEP, "got signal\n"); } }
static void dma_reset_output(int dev) { struct audio_operations *adev = audio_devs[dev]; unsigned long flags,f ; struct dma_buffparms *dmap = adev->dmap_out; if (!(dmap->flags & DMA_STARTED)) /* DMA is not active */ return; /* * First wait until the current fragment has been played completely */ spin_lock_irqsave(&dmap->lock,flags); adev->dmap_out->flags |= DMA_SYNCING; adev->dmap_out->underrun_count = 0; if (!signal_pending(current) && adev->dmap_out->qlen && adev->dmap_out->underrun_count == 0){ spin_unlock_irqrestore(&dmap->lock,flags); interruptible_sleep_on_timeout(&adev->out_sleeper, dmabuf_timeout(dmap)); spin_lock_irqsave(&dmap->lock,flags); } adev->dmap_out->flags &= ~(DMA_SYNCING | DMA_ACTIVE); /* * Finally shut the device off */ if (!(adev->flags & DMA_DUPLEX) || !adev->d->halt_output) adev->d->halt_io(dev); else adev->d->halt_output(dev); adev->dmap_out->flags &= ~DMA_STARTED; f=claim_dma_lock(); clear_dma_ff(dmap->dma); disable_dma(dmap->dma); release_dma_lock(f); dmap->byte_counter = 0; reorganize_buffers(dev, adev->dmap_out, 0); dmap->qlen = dmap->qhead = dmap->qtail = dmap->user_counter = 0; spin_unlock_irqrestore(&dmap->lock,flags); }
int s3c_mfc_wait_for_done(s3c_mfc_wait_done_type command) { unsigned int ret_val = 1; /* R2H_CMD_EMPTY = 0, R2H_CMD_OPEN_INSTANCE_RET = 1, R2H_CMD_CLOSE_INSTANCE_RET = 2, R2H_CMD_ERROR_RET = 3, R2H_CMD_SEQ_DONE_RET = 4, R2H_CMD_FRAME_DONE_RET = 5, R2H_CMD_SYS_INIT_RET = 8, R2H_CMD_FW_STATUS_RET = 9, R2H_CMD_EDFU_INIT_RET = 16, R2H_CMD_DECODE_ERR_RET = 32 */ switch(command) { /* case R2H_CMD_FW_STATUS_RET : ret_val = s3c_mfc_wait_polling(S3C_FIMV_FW_STATUS); break; */ case R2H_CMD_FW_STATUS_RET : case R2H_CMD_OPEN_INSTANCE_RET : case R2H_CMD_SYS_INIT_RET : case R2H_CMD_SEQ_DONE_RET : case R2H_CMD_FRAME_DONE_RET : case R2H_CMD_CLOSE_INSTANCE_RET : if (interruptible_sleep_on_timeout(&s3c_mfc_wait_queue, 50000) == 0) { ret_val = 0; mfc_err("Interrupt Time Out(%d)\n", command); break; } ret_val = s3c_mfc_int_type; s3c_mfc_int_type = 0; break; default : mfc_err("undefined command\n"); ret_val = 0; } return ret_val; }
static int __init interruptible_sleep_on_timeout_init(void) { int result; long result1; wait_queue_head_t head; wait_queue_t data; printk("<0>into interruptible_sleep_on_timeout_init.\n"); result=kernel_thread(my_function,NULL,CLONE_KERNEL); init_waitqueue_head(&head); init_waitqueue_entry(&data,current); add_wait_queue(&head,&data); result1=interruptible_sleep_on_timeout(&head,100); printk("<0>the result of the kernel_thread is :%d\n",result); printk("<0>the result of the interruptible_sleep_on_timeout is:%ld\n",result1); printk("<0>the current pid is:%d\n",current->pid); printk("<0>out interruptible_sleep_on_timeout_init.\n"); return 0; }
static ssize_t smartcam_read(struct file *file, char __user *data, size_t count, loff_t *f_pos) { SCAM_MSG("(%s) %s called (count=%d, f_pos = %d)\n", current->comm, __FUNCTION__, (int) count, (int) *f_pos); if(*f_pos >= formats[format].sizeimage) return 0; if (!(file->f_flags & O_NONBLOCK)) interruptible_sleep_on_timeout(&wq, HZ/10); /* wait max 1 second */ last_read_frame = frame_sequence; if(*f_pos + count > formats[format].sizeimage) count = formats[format].sizeimage - *f_pos; if(copy_to_user(data, frame_data + *f_pos, count)) { return -EFAULT; } return 0; }
static ushort read_sio_sniff(void) { long timeout; // kpd_timeout is mSec order // interrupt_sleep_on_timeout is based on 10msec timer tick if (sniffer_timeout == -1) { interruptible_sleep_on(&sniffer_queue); } else { timeout = interruptible_sleep_on_timeout(&sniffer_queue, sniffer_timeout/10); if (timeout == 0) { // timeout without keypad input return -1; } } return (ushort)sniffed_value; }
static ushort read_sio_kpd(void) { long timeout; // kpd_timeout is mSec order // interrupt_sleep_on_timeout is based on 10msec timer tick if (kpd_timeout == -1) { interruptible_sleep_on(&smartio_kpd_queue); } else { timeout = interruptible_sleep_on_timeout(&smartio_kpd_queue, kpd_timeout/10); if (timeout == 0) { // timeout without keypad input return 0xFFFF; } } return kpd_value; }
static ssize_t hci_uart_tty_read(struct tty_struct *tty, struct file *file, unsigned char __user *buf, size_t nr) { struct hci_uart *hu = (void *) tty->disc_data; struct hci_dev *hdev = hu->hdev; int ret = 0, count; BT_DBG("%s: hu = 0x%p hci_dev = 0x%p, nr = %d", __func__, hu, hdev, nr); ret = hci_uart_tty_access_allowed(); if (ret < 0) return ret; if (!hook) return -ENOMEM; if (!hook->len) interruptible_sleep_on_timeout(&read_wait, 3 * HZ); if (!hook->len) { BT_INFO("No data to read"); } else { count = nr > hook->len ? hook->len : nr; ret = copy_to_user(buf, hook->head, count); hook->len -= (count - ret); hook->head += (count - ret); ret = count - ret; } if (!hook->len) { BT_DBG("%s: free hook", __func__); kfree(hook); hook = NULL; } BT_DBG("%s: ret = %d", __func__, ret); return ret; }
void nfs_reqlist_exit(struct nfs_server *server) { struct nfs_reqlist *cache; lock_kernel(); cache = server->rw_requests; if (!cache) goto out; dprintk("NFS: reqlist_exit (ptr %p rpc %p)\n", cache, cache->task); while (cache->task) { rpc_exit(cache->task, 0); rpc_wake_up_task(cache->task); interruptible_sleep_on_timeout(&cache->request_wait, 1 * HZ); } out: unlock_kernel(); }