//Ä£¿éŒÓÔغ¯Êý¶šÒå static int __init wait_for_completion_interruptible_init(void) { int result; long leavetime; wait_queue_t data; printk("<0>into wait_for_completion_interruptible_init.\n"); result=kernel_thread(my_function,NULL,CLONE_KERNEL); //ŽŽœšÐÂœø³Ì /*»ñÈ¡ÐÂœø³ÌµÄÃèÊö·ûÐÅÏ¢*/ struct pid * kpid=find_get_pid(result); struct task_struct * task=pid_task(kpid,PIDTYPE_PID); init_completion(&comple); //³õÊŒ»¯completion±äÁ¿ init_waitqueue_entry(&data,task); //ÓÃÐÂœø³Ì³õÊŒ»¯µÈŽý¶ÓÁÐÔªËØ __add_wait_queue_tail(&(comple.wait),&data); //œ«ÐÂœø³ÌŒÓÈëµÈŽý¶ÓÁеÄβ²¿ leavetime=wait_for_completion_interruptible(&comple); //×èÈûœø³Ì£¬µÈŽýÐÂœø³ÌµÄœáÊø /*ÏÔÊŸº¯Êýwait_for_completion_interruptible( )µÄ·µ»Øœá¹û*/ printk("<0>the result of the wait_for_completion_interruptible is:%ld\n",leavetime); /*ÏÔÊŸº¯Êýkernel_thread( )µÄ·µ»Øœá¹û*/ printk("<0>the result of the kernel_thread is :%d\n",result); printk("<0>the current pid is:%d\n",current->pid); //ÏÔÊŸµ±Ç°œø³ÌµÄPIDÖµ printk("<0>out wait_for_completion_interruptible_init.\n"); return 0; }
unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout) { might_sleep(); spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); if (!timeout) { __remove_wait_queue(&x->wait, &wait); goto out; } } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; out: spin_unlock_irq(&x->wait.lock); return timeout; }
/* ARGSUSED */ void lock_wait(wait_queue_head_t *q, spinlock_t *lock, int rw) { DECLARE_WAITQUEUE( wait, current ); __set_current_state(TASK_UNINTERRUPTIBLE); wq_write_lock(&q->lock); if (rw) { __add_wait_queue_tail(q, &wait); } else { __add_wait_queue(q, &wait); } wq_write_unlock(&q->lock); spin_unlock(lock); schedule(); wq_write_lock(&q->lock); __remove_wait_queue(q, &wait); wq_write_unlock(&q->lock); spin_lock(lock); /* return with lock held */ }
void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; wait->flags |= WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&q->lock, flags); __add_wait_queue_tail(q, wait); spin_unlock_irqrestore(&q->lock, flags); }
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) { unsigned long flags; wait->flags |= WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&q->lock, flags); if (list_empty(&wait->task_list)) __add_wait_queue_tail(q, wait); set_current_state(state); spin_unlock_irqrestore(&q->lock, flags); }
void fastcall prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) { unsigned long flags; wait->flags |= WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&q->lock, flags); if (list_empty(&wait->task_list)) __add_wait_queue_tail(q, wait); /* * don't alter the task state if this is just going to * queue an async wait queue callback */ if (is_sync_wait(wait)) set_current_state(state); spin_unlock_irqrestore(&q->lock, flags); }
/* * The associated lock must be locked on entry. It is unlocked on return. * * Return values: * * n < 0 : interrupted, -n jiffies remaining on timeout, or -1 if timeout == 0 * n = 0 : timeout expired * n > 0 : sv_signal()'d, n jiffies remaining on timeout, or 1 if timeout == 0 */ signed long sv_wait(sv_t *sv, int sv_wait_flags, unsigned long timeout) { DECLARE_WAITQUEUE( wait, current ); unsigned long flags; signed long ret = 0; #ifdef SV_DEBUG_INTERRUPT_STATE { unsigned long flags; __save_flags(flags); if(sv->sv_flags & SV_INTS) { if(SV_TEST_INTERRUPTS_ENABLED(flags)) { printk(KERN_ERR "sv_wait: SV_INTS and interrupts " "enabled (flags: 0x%lx)\n", flags); BUG(); } } else { if (SV_TEST_INTERRUPTS_DISABLED(flags)) { printk(KERN_WARNING "sv_wait: !SV_INTS and interrupts " "disabled! (flags: 0x%lx)\n", flags); } } } #endif /* SV_DEBUG_INTERRUPT_STATE */ sv_lock(sv); sv->sv_mon_unlock_func(sv->sv_mon_lock); /* Add ourselves to the wait queue and set the state before * releasing the sv_lock so as to avoid racing with the * wake_up() in sv_signal() and sv_broadcast(). */ /* don't need the _irqsave part, but there is no wq_write_lock() */ wq_write_lock_irqsave(&sv->sv_waiters.lock, flags); #ifdef EXCLUSIVE_IN_QUEUE wait.flags |= WQ_FLAG_EXCLUSIVE; #endif switch(sv->sv_flags & SV_ORDER_MASK) { case SV_ORDER_FIFO: __add_wait_queue_tail(&sv->sv_waiters, &wait); break; case SV_ORDER_FILO: __add_wait_queue(&sv->sv_waiters, &wait); break; default: printk(KERN_ERR "sv_wait: unknown order! (sv: 0x%p, flags: 0x%x)\n", (void *)sv, sv->sv_flags); BUG(); } wq_write_unlock_irqrestore(&sv->sv_waiters.lock, flags); if(sv_wait_flags & SV_WAIT_SIG) set_current_state(TASK_EXCLUSIVE | TASK_INTERRUPTIBLE ); else set_current_state(TASK_EXCLUSIVE | TASK_UNINTERRUPTIBLE); spin_unlock(&sv->sv_lock); if(sv->sv_flags & SV_INTS) local_irq_enable(); else if(sv->sv_flags & SV_BHS) local_bh_enable(); if (timeout) ret = schedule_timeout(timeout); else schedule(); if(current->state != TASK_RUNNING) /* XXX Is this possible? */ { printk(KERN_ERR "sv_wait: state not TASK_RUNNING after " "schedule().\n"); set_current_state(TASK_RUNNING); } remove_wait_queue(&sv->sv_waiters, &wait); /* Return cases: - woken by a sv_signal/sv_broadcast - woken by a signal - woken by timeout expiring */ /* XXX This isn't really accurate; we may have been woken before the signal anyway.... */ if(signal_pending(current)) return timeout ? -ret : -1; return timeout ? ret : 1; }