Esempio n. 1
0
/*
 * This function is where XPC's kthreads wait for messages to deliver.
 */
static void
xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
{
	do {
		/* deliver messages to their intended recipients */

		while (xpc_n_of_deliverable_payloads(ch) > 0 &&
		       !(ch->flags & XPC_C_DISCONNECTING)) {
			xpc_deliver_payload(ch);
		}

		if (atomic_inc_return(&ch->kthreads_idle) >
		    ch->kthreads_idle_limit) {
			/* too many idle kthreads on this channel */
			atomic_dec(&ch->kthreads_idle);
			break;
		}

		dev_dbg(xpc_chan, "idle kthread calling "
			"wait_event_interruptible_exclusive()\n");

		(void)wait_event_interruptible_exclusive(ch->idle_wq,
				(xpc_n_of_deliverable_payloads(ch) > 0 ||
				 (ch->flags & XPC_C_DISCONNECTING)));

		atomic_dec(&ch->kthreads_idle);

	} while (!(ch->flags & XPC_C_DISCONNECTING));
}
Esempio n. 2
0
static void
xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
{
	int (*n_of_deliverable_payloads) (struct xpc_channel *) =
		xpc_arch_ops.n_of_deliverable_payloads;

	do {
		/*                                               */

		while (n_of_deliverable_payloads(ch) > 0 &&
		       !(ch->flags & XPC_C_DISCONNECTING)) {
			xpc_deliver_payload(ch);
		}

		if (atomic_inc_return(&ch->kthreads_idle) >
		    ch->kthreads_idle_limit) {
			/*                                        */
			atomic_dec(&ch->kthreads_idle);
			break;
		}

		dev_dbg(xpc_chan, "idle kthread calling "
			"wait_event_interruptible_exclusive()\n");

		(void)wait_event_interruptible_exclusive(ch->idle_wq,
				(n_of_deliverable_payloads(ch) > 0 ||
				 (ch->flags & XPC_C_DISCONNECTING)));

		atomic_dec(&ch->kthreads_idle);

	} while (!(ch->flags & XPC_C_DISCONNECTING));
}
Esempio n. 3
0
KAL_UINT32 eemcs_ccci_UL_write_wait(CCCI_CHANNEL_T chn)
{
    KAL_UINT32   port_id = 0;
    KAL_UINT32   tx_queue_idx=0;
    KAL_UINT32   ret = 0;
    
    port_id = ccci_ch_to_port(chn);
    tx_queue_idx = ccci_port_info[port_id].txq_id;
    ret = wait_event_interruptible_exclusive(ccci_tx_waitq[tx_queue_idx].tx_waitq, (hif_ul_swq_space(tx_queue_idx) - atomic_read(&ccci_tx_waitq[tx_queue_idx].reserve_space) )> 0);

    DBGLOG(CCCI, TRA, "ccci_write_wait: wakeup port%d, txq=%d, ret=%d", port_id, tx_queue_idx, ret);
	return ret;
}
Esempio n. 4
0
static ssize_t
mycdrv_read (struct file *file, char __user * buf, size_t lbuf, loff_t * ppos)
{
    printk (KERN_INFO "process %i (%s) going to sleep\n", current->pid,
            current->comm);
    atomic_inc (&nsleepers);
    wait_event_interruptible_exclusive (wq, (atomic_read (&data_ready)));
    printk (KERN_INFO "process %i (%s) awakening\n", current->pid,
            current->comm);

    /* need to take spin lock to avoid preemption */

    spin_lock (&nsleepers_lock);
    if (atomic_dec_and_test (&nsleepers))
        atomic_set (&data_ready, 0);
    spin_unlock (&nsleepers_lock);

    return mycdrv_generic_read (file, buf, lbuf, ppos);
}
Esempio n. 5
0
static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
				       bool for_background)
{
	struct fuse_req *req;
	int err;
	atomic_inc(&fc->num_waiting);

	if (fuse_block_alloc(fc, for_background)) {
		sigset_t oldset;
		int intr;

		block_sigs(&oldset);
		intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
				!fuse_block_alloc(fc, for_background));
		restore_sigs(&oldset);
		err = -EINTR;
		if (intr)
			goto out;
	}

	err = -ENOTCONN;
	if (!fc->connected)
		goto out;

	req = fuse_request_alloc(npages);
	err = -ENOMEM;
	if (!req) {
		if (for_background)
			wake_up(&fc->blocked_waitq);
		goto out;
	}

	fuse_req_init_context(req);
	req->waiting = 1;
	req->background = for_background;
	return req;

 out:
	atomic_dec(&fc->num_waiting);
	return ERR_PTR(err);
}
Esempio n. 6
0
static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
                            size_t count, loff_t *offp)
{
    struct f_hidg *hidg  = file->private_data;
    ssize_t status = -ENOMEM;

    if (!access_ok(VERIFY_READ, buffer, count))
        return -EFAULT;

    if (hacky_device_list_check(hidg)) {
        pr_err("%s: trying to write to device %p that was destroyed\n", __func__, hidg);
        return -EIO;
    }

    mutex_lock(&hidg->lock);

#define WRITE_COND (!hidg->write_pending)

    /* write queue */
    while (!WRITE_COND) {
        mutex_unlock(&hidg->lock);
        if (file->f_flags & O_NONBLOCK)
            return -EAGAIN;

        if (wait_event_interruptible_exclusive(
                    hidg->write_queue, WRITE_COND))
            return -ERESTARTSYS;

        if (hacky_device_list_check(hidg)) {
            pr_err("%s: trying to write to device %p that was destroyed\n", __func__, hidg);
            return -EIO;
        }

        mutex_lock(&hidg->lock);
    }

    count  = min_t(unsigned, count, hidg->report_length);
    status = copy_from_user(hidg->req->buf, buffer, count);

    if (status != 0) {
        ERROR(hidg->func.config->cdev,
              "copy_from_user error\n");
        mutex_unlock(&hidg->lock);
        return -EINVAL;
    }

    hidg->req->status   = 0;
    hidg->req->zero     = 0;
    hidg->req->length   = count;
    hidg->req->complete = f_hidg_req_complete;
    hidg->req->context  = hidg;
    hidg->write_pending = 1;

    status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
    if (status < 0) {
        ERROR(hidg->func.config->cdev,
              "usb_ep_queue error on int endpoint %zd\n", status);
        hidg->write_pending = 0;
        wake_up(&hidg->write_queue);
    } else {
        status = count;
    }

    mutex_unlock(&hidg->lock);

    return status;
}