/* * ======== error ======== * purpose: * Prints error onto the standard output. */ static void error(char *fmt, ...) { s32 arg1, arg2, arg3, arg4, arg5, arg6; va_list va; va_start(va, fmt); arg1 = va_arg(va, s32); arg2 = va_arg(va, s32); arg3 = va_arg(va, s32); arg4 = va_arg(va, s32); arg5 = va_arg(va, s32); arg6 = va_arg(va, s32); va_end(va); printk("ERROR: "); printk(fmt, arg1, arg2, arg3, arg4, arg5, arg6); #if defined(DEBUG) || defined(DDSP_DEBUG_PRODUCT) if (in_interrupt()) { printk(KERN_INFO "Not stopping after error since ISR/DPC " "are disabled\n"); } else { set_current_state(TASK_INTERRUPTIBLE); flush_signals(current); schedule(); flush_signals(current); printk(KERN_INFO "Signaled in error function\n"); } #endif }
/* * This is the lockd kernel thread */ static int lockd(void *vrqstp) { int err = 0; struct svc_rqst *rqstp = vrqstp; struct net *net = &init_net; struct lockd_net *ln = net_generic(net, lockd_net_id); /* try_to_freeze() is called from svc_recv() */ set_freezable(); /* Allow SIGKILL to tell lockd to drop all of its locks */ allow_signal(SIGKILL); dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); /* * The main request loop. We don't terminate until the last * NFS mount or NFS daemon has gone away. */ while (!kthread_should_stop()) { long timeout = MAX_SCHEDULE_TIMEOUT; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); /* update sv_maxconn if it has changed */ rqstp->rq_server->sv_maxconn = nlm_max_connections; if (signalled()) { flush_signals(current); restart_grace(); continue; } timeout = nlmsvc_retry_blocked(); /* * Find a socket with data available and call its * recvfrom routine. */ err = svc_recv(rqstp, timeout); if (err == -EAGAIN || err == -EINTR) continue; dprintk("lockd: request from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); svc_process(rqstp); } flush_signals(current); if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); cancel_delayed_work_sync(&ln->grace_period_end); locks_end_grace(&ln->lockd_manager); return 0; }
int cache_invalidator_kthread(void *__bc) { struct bittern_cache *bc = (struct bittern_cache *)__bc; set_user_nice(current, S_INVALIDATOR_THREAD_NICE); BT_TRACE(BT_LEVEL_TRACE0, bc, NULL, NULL, NULL, NULL, "enter, nice=%d", S_INVALIDATOR_THREAD_NICE); while (!kthread_should_stop()) { int ret; ASSERT(bc != NULL); ASSERT_BITTERN_CACHE(bc); ret = wait_event_interruptible(bc->bc_invalidator_wait, (cache_invalidator_has_work (bc) || kthread_should_stop())); if (signal_pending(current)) flush_signals(current); cache_invalidate_clean_blocks(bc); schedule(); } /* * wait for any pending invalidations to complete before quitting */ while (atomic_read(&bc->bc_pending_invalidate_requests) != 0) { int ret; ret = wait_event_interruptible(bc->bc_invalidator_wait, atomic_read(&bc->bc_pending_invalidate_requests) < bc->bc_max_pending_requests); if (signal_pending(current)) flush_signals(current); BT_TRACE(BT_LEVEL_TRACE0, bc, NULL, NULL, NULL, NULL, "wait: kthread_should_stop=%d, has_work=%d, pending=%d", kthread_should_stop(), cache_invalidator_has_work(bc), atomic_read(&bc->bc_pending_invalidate_requests)); } BT_TRACE(BT_LEVEL_TRACE0, bc, NULL, NULL, NULL, NULL, "exit"); bc->bc_invalidator_task = NULL; return 0; }
void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header) { while (queue->write == queue->read + queue->size) { if (down_interruptible(&queue->pop) != 0) { flush_signals(current); } } /* * Write to queue->storage must be visible after read from * queue->read */ smp_mb(); queue->storage[queue->write & (queue->size - 1)] = header; /* * Write to queue->storage must be visible before write to * queue->write */ smp_wmb(); queue->write++; up(&queue->push); }
VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue) { VCHIQ_HEADER_T *header; while (queue->write == queue->read) { if (down_interruptible(&queue->push) != 0) { flush_signals(current); } } /* * Read from queue->storage must be visible after read from * queue->write */ smp_rmb(); header = queue->storage[queue->read & (queue->size - 1)]; /* * Read from queue->storage must be visible before write to * queue->read */ smp_mb(); queue->read++; up(&queue->pop); return header; }
void rtmp_os_thread_init(PUCHAR pThreadName, PVOID pNotify) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) daemonize(pThreadName /*"%s",pAd->net_dev->name*/); allow_signal(SIGTERM); allow_signal(SIGKILL); current->flags |= PF_NOFREEZE; #else unsigned long flags; daemonize(); reparent_to_init(); strcpy(current->comm, pThreadName); siginitsetinv(¤t->blocked, sigmask(SIGTERM) | sigmask(SIGKILL)); /* Allow interception of SIGKILL only * Don't allow other signals to interrupt the transmission */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) spin_lock_irqsave(¤t->sigmask_lock, flags); flush_signals(current); recalc_sigpending(current); spin_unlock_irqrestore(¤t->sigmask_lock, flags); #endif #endif /* signal that we've started the thread */ complete(pNotify); }
/* This function is called when all applications close handles to the bridge * driver. */ static int bridge_release(struct inode *ip, struct file *filp) { struct PROCESS_CONTEXT *pr_ctxt; int status = 0; if (!filp->private_data) { status = -EIO; goto err; } pr_ctxt = filp->private_data; if (pr_ctxt) { DSP_STATUS status; flush_signals(current); status = DRV_RemoveAllResources(pr_ctxt); DBC_Ensure(DSP_SUCCEEDED(status)); if (pr_ctxt->hProcessor) PROC_Detach(pr_ctxt); mutex_destroy(&pr_ctxt->strm_mutex); mutex_destroy(&pr_ctxt->node_mutex); kfree(pr_ctxt); filp->private_data = NULL; } err: #ifdef CONFIG_BRIDGE_RECOVERY if (!atomic_dec_return(&bridge_cref)) complete(&bridge_comp); #endif return status; }
static int kthread(void *errorparameternameomitted) { struct sk_buff *iskb, *oskb; DECLARE_WAITQUEUE(wait, current); sigset_t blocked; #ifdef PF_NOFREEZE current->flags |= PF_NOFREEZE; #endif set_user_nice(current, -5); sigfillset(&blocked); sigprocmask(SIG_BLOCK, &blocked, NULL); flush_signals(current); complete(&ktrendez); do { __set_current_state(TASK_RUNNING); do { if ((iskb = skb_dequeue(&skb_inq))) ktrcv(iskb); if ((oskb = skb_dequeue(&skb_outq))) dev_queue_xmit(oskb); } while (iskb || oskb); set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&ktwaitq, &wait); schedule(); remove_wait_queue(&ktwaitq, &wait); } while (!kthread_should_stop()); __set_current_state(TASK_RUNNING); complete(&ktrendez); return 0; }
thread_return rtw_xmit_thread(thread_context context) { s32 err; PADAPTER padapter; err = _SUCCESS; padapter = (PADAPTER)context; #if 0 thread_enter(padapter->pnetdev); #else // daemonize("%s", padapter->pnetdev->name); daemonize("%s", "RTW_XMIT_THREAD"); allow_signal(SIGTERM); #endif do { err = hal_xmit_handler(padapter); if (signal_pending(current)) { flush_signals(current); } } while (_SUCCESS == err); _rtw_up_sema(&padapter->xmitpriv.terminate_xmitthread_sema); thread_exit(); }
void l4x_sig_current_kill(void) { /* * We're a user process which just got a SIGKILL/SEGV and we're now * preparing to die... */ /* * empty queue and only put SIGKILL/SEGV into it so that the process * gets killed ASAP */ spin_lock_irq(¤t->sighand->siglock); flush_signals(current); force_sig(SIGKILL, current); spin_unlock_irq(¤t->sighand->siglock); /* * invoke do_signal which will dequeue the signal from the queue * and feed us further to do_exit */ #if defined(CONFIG_X86) do_signal(L4X_THREAD_REGSP(¤t->thread)); #elif defined(ARCH_arm) do_signal(L4X_THREAD_REGSP(¤t->thread), 0); #else #error Wrong arch #endif panic("The zombie walks after SIGKILL!"); }
/* kp_wait: used for mainthread waiting for exit */ static void kp_wait(ktap_state *ks) { struct task_struct *task = G(ks)->trace_task; if (G(ks)->exit) return; ks->stop = 0; /* tell workload process to start executing */ if (G(ks)->workload) send_sig(SIGINT, G(ks)->trace_task, 0); while (!ks->stop) { set_current_state(TASK_INTERRUPTIBLE); /* sleep for 100 msecs, and try again. */ schedule_timeout(HZ / 10); if (signal_pending(current)) { flush_signals(current); /* newline for handle CTRL+C display as ^C */ kp_puts(ks, "\n"); break; } /* stop waiting if target pid is exited */ if (task && task->state == TASK_DEAD) break; } }
static int knamed_loop(void *data) { allow_signal(SIGHUP); while (1) { set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) { break; } if (signal_pending(current)) { flush_signals(current); PR_INFO("SIGNAL received"); } PR_INFO("Hello knamed_task"); schedule_timeout(5 * HZ); } knamed_task = NULL; complete_and_exit(&comp, 0); }
/* * The callback service for NFSv4.1 callbacks */ static int nfs41_callback_svc(void *vrqstp) { struct svc_rqst *rqstp = vrqstp; struct svc_serv *serv = rqstp->rq_server; struct rpc_rqst *req; int error; DEFINE_WAIT(wq); set_freezable(); while (!kthread_freezable_should_stop(NULL)) { if (signal_pending(current)) flush_signals(current); prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); spin_lock_bh(&serv->sv_cb_lock); if (!list_empty(&serv->sv_cb_list)) { req = list_first_entry(&serv->sv_cb_list, struct rpc_rqst, rq_bc_list); list_del(&req->rq_bc_list); spin_unlock_bh(&serv->sv_cb_lock); finish_wait(&serv->sv_cb_waitq, &wq); dprintk("Invoking bc_svc_process()\n"); error = bc_svc_process(serv, req, rqstp); dprintk("bc_svc_process() returned w/ error code= %d\n", error); } else {
static int powerd(void *__unused) { static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; char *argv[] = { "/sbin/shutdown", "-h", "now", NULL }; DECLARE_WAITQUEUE(wait, current); daemonize("powerd"); add_wait_queue(&powerd_wait, &wait); again: for (;;) { set_task_state(current, TASK_INTERRUPTIBLE); if (button_pressed) break; flush_signals(current); schedule(); } __set_current_state(TASK_RUNNING); remove_wait_queue(&powerd_wait, &wait); /* Ok, down we go... */ button_pressed = 0; if (kernel_execve("/sbin/shutdown", argv, envp) < 0) { printk("powerd: shutdown execution failed\n"); add_wait_queue(&powerd_wait, &wait); goto again; } return 0; }
static int test_func(void *data) { struct test_thread_data *td = data; int ret; current->flags |= PF_MUTEX_TESTER; set_freezable(); allow_signal(SIGHUP); for(;;) { set_current_state(TASK_INTERRUPTIBLE); if (td->opcode > 0) { set_current_state(TASK_RUNNING); ret = handle_op(td, 0); set_current_state(TASK_INTERRUPTIBLE); td->opcode = ret; } /* Wait for the next command to be executed */ schedule(); try_to_freeze(); if (signal_pending(current)) flush_signals(current); if(kthread_should_stop()) break; } return 0; }
/* * This is the task which runs the usermode application */ static int ____call_usermodehelper(void *data) { struct subprocess_info *sub_info = data; struct key *new_session, *old_session; int retval; /* Unblock all signals and set the session keyring. */ new_session = key_get(sub_info->ring); flush_signals(current); spin_lock_irq(¤t->sighand->siglock); old_session = __install_session_keyring(current, new_session); flush_signal_handlers(current, 1); sigemptyset(¤t->blocked); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); key_put(old_session); /* We can run anywhere, unlike our parent keventd(). */ set_cpus_allowed(current, CPU_MASK_ALL); retval = -EPERM; if (current->fs->root) retval = execve(sub_info->path, sub_info->argv,sub_info->envp); /* Exec failed? */ sub_info->retval = retval; do_exit(0); }
thread_return rtl8723bs_xmit_thread(thread_context context) { s32 ret; PADAPTER padapter; struct xmit_priv *pxmitpriv; u8 thread_name[20] = "RTWHALXT"; ret = _SUCCESS; padapter = (PADAPTER)context; pxmitpriv = &padapter->xmitpriv; rtw_sprintf(thread_name, 20, "%s-"ADPT_FMT, thread_name, ADPT_ARG(padapter)); thread_enter(thread_name); DBG_871X("start "FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter)); // For now, no one would down sema to check thread is running, // so mark this temporary, [email protected] // _rtw_up_sema(&pxmitpriv->SdioXmitTerminateSema); do { ret = rtl8723bs_xmit_handler(padapter); if (signal_pending(current)) { flush_signals(current); } } while (_SUCCESS == ret); _rtw_up_sema(&pxmitpriv->SdioXmitTerminateSema); RT_TRACE(_module_hal_xmit_c_, _drv_notice_, ("-%s\n", __FUNCTION__)); thread_exit(); }
static int bridge_release(struct inode *ip, struct file *filp) { int status = 0; struct process_context *pr_ctxt; if (!filp->private_data) { status = -EIO; goto err; } pr_ctxt = filp->private_data; flush_signals(current); drv_remove_all_resources(pr_ctxt); proc_detach(pr_ctxt); kfree(pr_ctxt->node_id); kfree(pr_ctxt->stream_id); kfree(pr_ctxt); filp->private_data = NULL; err: #ifdef CONFIG_TIDSPBRIDGE_RECOVERY if (!atomic_dec_return(&bridge_cref)) complete(&bridge_comp); #endif return status; }
thread_return rtl8723as_xmit_thread(thread_context context) { PADAPTER padapter; struct xmit_priv *pxmitpriv; PHAL_DATA_TYPE phal; s32 ret; padapter = (PADAPTER)context; pxmitpriv = &padapter->xmitpriv; phal = GET_HAL_DATA(padapter); ret = _SUCCESS; thread_enter("RTWHALXT"); do { ret = rtl8723as_xmit_handler(padapter); if (signal_pending(current)) { flush_signals(current); } } while (_SUCCESS == ret); _rtw_up_sema(&phal->SdioXmitTerminateSema); RT_TRACE(_module_hal_xmit_c_, _drv_notice_, ("-%s\n", __FUNCTION__)); thread_exit(); }
struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts) { int ret; spin_lock_bh(&ts->ts_state_lock); if (ts->create_threads) { spin_unlock_bh(&ts->ts_state_lock); goto sleep; } flush_signals(current); if (ts->delay_inactive && (--ts->thread_count == 0)) { spin_unlock_bh(&ts->ts_state_lock); iscsi_del_ts_from_active_list(ts); if (!iscsit_global->in_shutdown) iscsi_deallocate_extra_thread_sets(); iscsi_add_ts_to_inactive_list(ts); spin_lock_bh(&ts->ts_state_lock); } if ((ts->status == ISCSI_THREAD_SET_RESET) && (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) complete(&ts->tx_restart_comp); ts->thread_clear &= ~ISCSI_CLEAR_TX_THREAD; spin_unlock_bh(&ts->ts_state_lock); sleep: ret = wait_for_completion_interruptible(&ts->tx_start_comp); if (ret != 0) return NULL; if (iscsi_signal_thread_pre_handler(ts) < 0) return NULL; if (!ts->conn) { pr_err("struct iscsi_thread_set->conn is NULL for " " thread_id: %d, going back to sleep\n", ts->thread_id); goto sleep; } iscsi_check_to_add_additional_sets(); /* * From the TX thread, up the tx_post_start_comp that the RX Thread is * sleeping on in iscsi_rx_thread_pre_handler(), then up the * rx_post_start_comp that iscsi_activate_thread_set() is sleeping on. */ ts->thread_clear |= ISCSI_CLEAR_TX_THREAD; complete(&ts->tx_post_start_comp); complete(&ts->rx_post_start_comp); spin_lock_bh(&ts->ts_state_lock); ts->status = ISCSI_THREAD_SET_ACTIVE; spin_unlock_bh(&ts->ts_state_lock); return ts->conn; }
struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts) { int ret; spin_lock_bh(&ts->ts_state_lock); if (ts->create_threads) { spin_unlock_bh(&ts->ts_state_lock); goto sleep; } flush_signals(current); if (ts->delay_inactive && (--ts->thread_count == 0)) { spin_unlock_bh(&ts->ts_state_lock); iscsi_del_ts_from_active_list(ts); if (!iscsit_global->in_shutdown) iscsi_deallocate_extra_thread_sets(); iscsi_add_ts_to_inactive_list(ts); spin_lock_bh(&ts->ts_state_lock); } if ((ts->status == ISCSI_THREAD_SET_RESET) && (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) complete(&ts->rx_restart_comp); ts->thread_clear &= ~ISCSI_CLEAR_RX_THREAD; spin_unlock_bh(&ts->ts_state_lock); sleep: ret = wait_for_completion_interruptible(&ts->rx_start_comp); pr_info("%s: %s/%d (thread set %d) got %d waiting for rx_start_comp\n", __func__, current->comm, task_pid_nr(current), ts->thread_id, ret); if (ret != 0) return NULL; if (iscsi_signal_thread_pre_handler(ts) < 0) return NULL; if (!ts->conn) { pr_err("struct iscsi_thread_set->conn is NULL for" " thread_id: %d, going back to sleep\n", ts->thread_id); goto sleep; } iscsi_check_to_add_additional_sets(); /* * The RX Thread starts up the TX Thread and sleeps. */ spin_lock_bh(&ts->ts_state_lock); ts->thread_clear |= ISCSI_CLEAR_RX_THREAD; spin_unlock_bh(&ts->ts_state_lock); complete(&ts->tx_start_comp); wait_for_completion(&ts->tx_post_start_comp); pr_info("%s/%d: returning connection %p for rx thread\n", current->comm, task_pid_nr(current), ts->conn); return ts->conn; }
/* * This is the callback kernel thread. */ static void nfs_callback_svc(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; int err; __module_get(THIS_MODULE); lock_kernel(); nfs_callback_info.pid = current->pid; daemonize("nfsv4-svc"); /* Process request with signals blocked, but allow SIGKILL. */ allow_signal(SIGKILL); complete(&nfs_callback_info.started); for(;;) { if (signalled()) { if (nfs_callback_info.users == 0) break; flush_signals(current); } /* * Listen for a request on the socket */ err = svc_recv(serv, rqstp, MAX_SCHEDULE_TIMEOUT); if (err == -EAGAIN || err == -EINTR) continue; if (err < 0) { printk(KERN_WARNING "%s: terminating on error %d\n", __FUNCTION__, -err); break; } dprintk("%s: request from %u.%u.%u.%u\n", __FUNCTION__, NIPQUAD(rqstp->rq_addr.sin_addr.s_addr)); svc_process(serv, rqstp); } flush_signals(current); svc_exit_thread(rqstp); nfs_callback_info.pid = 0; complete(&nfs_callback_info.stopped); unlock_kernel(); module_put_and_exit(0); }
VCHIQ_HEADER_T *vchiu_queue_peek(VCHIU_QUEUE_T *queue) { while (queue->write == queue->read) { if (down_interruptible(&queue->push) != 0) { flush_signals(current); } } up(&queue->push); // We haven't removed anything from the queue. return queue->storage[queue->read & (queue->size - 1)]; }
struct vchiq_header *vchiu_queue_peek(struct vchiu_queue *queue) { while (queue->write == queue->read) { if (wait_for_completion_killable(&queue->push)) flush_signals(current); } complete(&queue->push); // We haven't removed anything from the queue. return queue->storage[queue->read & (queue->size - 1)]; }
int osi_NetReceive(osi_socket so, struct sockaddr_in *from, struct iovec *iov, int iovcnt, int *lengthp) { struct msghdr msg; int code; #ifdef ADAPT_PMTU int sockerr; int esize; #endif struct iovec tmpvec[RX_MAXWVECS + 2]; struct socket *sop = (struct socket *)so; if (iovcnt > RX_MAXWVECS + 2) { osi_Panic("Too many (%d) iovecs passed to osi_NetReceive\n", iovcnt); } #ifdef ADAPT_PMTU while (1) { sockerr=0; esize = sizeof(sockerr); kernel_getsockopt(sop, SOL_SOCKET, SO_ERROR, (char *)&sockerr, &esize); if (sockerr == 0) break; handle_socket_error(so); } #endif memcpy(tmpvec, iov, iovcnt * sizeof(struct iovec)); msg.msg_name = from; msg.msg_iov = tmpvec; msg.msg_iovlen = iovcnt; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; code = kernel_recvmsg(sop, &msg, (struct kvec *)tmpvec, iovcnt, *lengthp, 0); if (code < 0) { afs_try_to_freeze(); /* Clear the error before using the socket again. * Oh joy, Linux has hidden header files as well. It appears we can * simply call again and have it clear itself via sock_error(). */ flush_signals(current); /* We don't want no stinkin' signals. */ rxk_lastSocketError = code; rxk_nSocketErrors++; } else { *lengthp = code; code = 0; } return code; }
void vchiu_queue_push(VCHIU_QUEUE_T *queue, VCHIQ_HEADER_T *header) { while (queue->write == queue->read + queue->size) { if (down_interruptible(&queue->pop) != 0) { flush_signals(current); } } queue->storage[queue->write & (queue->size - 1)] = header; queue->write++; up(&queue->push); }
void vchiu_queue_push(struct vchiu_queue *queue, struct vchiq_header *header) { if (!queue->initialized) return; while (queue->write == queue->read + queue->size) { if (wait_for_completion_killable(&queue->pop)) flush_signals(current); } queue->storage[queue->write & (queue->size - 1)] = header; queue->write++; complete(&queue->push); }
static inline void __exit_sighand(struct task_struct *tsk) { struct signal_struct * sig = tsk->sig; unsigned long flags; spin_lock_irqsave(&tsk->sigmask_lock, flags); if (sig) { tsk->sig = NULL; if (atomic_dec_and_test(&sig->count)) kfree(sig); } flush_signals(tsk); spin_unlock_irqrestore(&tsk->sigmask_lock, flags); }
static int context_thread(void *startup) { struct task_struct *curtask = current; DECLARE_WAITQUEUE(wait, curtask); struct k_sigaction sa; daemonize(); strcpy(curtask->comm, "keventd"); current->flags |= PF_IOTHREAD; keventd_running = 1; keventd_task = curtask; spin_lock_irq(&curtask->sigmask_lock); siginitsetinv(&curtask->blocked, sigmask(SIGCHLD)); recalc_sigpending(curtask); spin_unlock_irq(&curtask->sigmask_lock); complete((struct completion *)startup); /* Install a handler so SIGCLD is delivered */ sa.sa.sa_handler = SIG_IGN; sa.sa.sa_flags = 0; siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); /* * If one of the functions on a task queue re-adds itself * to the task queue we call schedule() in state TASK_RUNNING */ for (;;) { set_task_state(curtask, TASK_INTERRUPTIBLE); add_wait_queue(&context_task_wq, &wait); if (TQ_ACTIVE(tq_context)) set_task_state(curtask, TASK_RUNNING); schedule(); remove_wait_queue(&context_task_wq, &wait); run_task_queue(&tq_context); wake_up(&context_task_done); if (signal_pending(curtask)) { while (waitpid(-1, (unsigned int *)0, __WALL|WNOHANG) > 0) ; spin_lock_irq(&curtask->sigmask_lock); flush_signals(curtask); recalc_sigpending(curtask); spin_unlock_irq(&curtask->sigmask_lock); } } }
struct vchiq_header *vchiu_queue_pop(struct vchiu_queue *queue) { struct vchiq_header *header; while (queue->write == queue->read) { if (wait_for_completion_killable(&queue->push)) flush_signals(current); } header = queue->storage[queue->read & (queue->size - 1)]; queue->read++; complete(&queue->pop); return header; }