Ejemplo n.º 1
0
static int __cpuinit evtchn_cpu_notify(struct notifier_block *nfb,
			unsigned long action, void *hcpu)
{
	int hotcpu = (unsigned long)hcpu;
	cpumask_t map = cpu_online_map;
	int i, j, newcpu;
	struct per_user_data *u;

	switch (action) {
	case CPU_DOWN_PREPARE:
		cpu_clear(hotcpu, map);
		spin_lock_irq(&port_user_lock);
		for (i = 0; i < NR_EVENT_CHANNELS; i++) {
			u = port_user[i];
			if ((u == NULL) || (u->bind_cpu != hotcpu))
				continue;
			newcpu = next_bind_cpu(map);
			for (j = i; j < NR_EVENT_CHANNELS; j++)
				if (port_user[j] == u)
					rebind_evtchn_to_cpu(j, newcpu);
			u->bind_cpu = newcpu;
		}
		spin_unlock_irq(&port_user_lock);
		break;
	default:
		return NOTIFY_DONE;
	}
	return NOTIFY_OK;
}
Ejemplo n.º 2
0
static int evtchn_release(struct inode *inode, struct file *filp)
{
	int i;
	struct per_user_data *u = filp->private_data;
	struct evtchn_close close;

	spin_lock_irq(&port_user_lock);

	free_page((unsigned long)u->ring);

	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
		int ret;
		if (port_user[i] != u)
			continue;

		port_user[i] = NULL;
		mask_evtchn(i);
		rebind_evtchn_to_cpu(i, 0);

		close.port = i;
		ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
		BUG_ON(ret);
	}

	spin_unlock_irq(&port_user_lock);

	kfree(u);

	return 0;
}
Ejemplo n.º 3
0
static void evtchn_check_wrong_delivery(struct per_user_data *u)
{
	evtchn_port_t port;
	unsigned int current_cpu = smp_processor_id();

	/* Delivered to correct CPU? All is good. */
	if (u->bind_cpu == current_cpu) {
		u->nr_event_wrong_delivery = 0;
		return;
	}

	/* Tolerate up to 100 consecutive misdeliveries. */
	if (++u->nr_event_wrong_delivery < 100)
		return;

	spin_lock_irq(&port_user_lock);

	for (port = 0; port < NR_EVENT_CHANNELS; port++)
		if (port_user[port] == u)
			rebind_evtchn_to_cpu(port, current_cpu);

	u->bind_cpu = current_cpu;
	u->nr_event_wrong_delivery = 0;

	spin_unlock_irq(&port_user_lock);
}
Ejemplo n.º 4
0
static void rebind_irq_to_cpu(unsigned int irq, unsigned int tcpu)
{
	int evtchn = evtchn_from_irq(irq);

	if (VALID_EVTCHN(evtchn))
		rebind_evtchn_to_cpu(evtchn, tcpu);
}
Ejemplo n.º 5
0
static void evtchn_bind_to_user(struct per_user_data *u, int port)
{
	spin_lock_irq(&port_user_lock);

	BUG_ON(port_user[port] != NULL);
	port_user[port] = u;

	if (u->bind_cpu == -1)
		u->bind_cpu = next_bind_cpu(cpu_online_map);

	rebind_evtchn_to_cpu(port, u->bind_cpu);

	unmask_evtchn(port);

	spin_unlock_irq(&port_user_lock);
}
Ejemplo n.º 6
0
static long evtchn_ioctl(struct file *file,
			 unsigned int cmd, unsigned long arg)
{
	int rc;
	struct per_user_data *u = file->private_data;
	void __user *uarg = (void __user *) arg;

	switch (cmd) {
	case IOCTL_EVTCHN_BIND_VIRQ: {
		struct ioctl_evtchn_bind_virq bind;
		struct evtchn_bind_virq bind_virq;

		rc = -EFAULT;
		if (copy_from_user(&bind, uarg, sizeof(bind)))
			break;

		bind_virq.virq = bind.virq;
		bind_virq.vcpu = 0;
		rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
						 &bind_virq);
		if (rc != 0)
			break;

		rc = bind_virq.port;
		evtchn_bind_to_user(u, rc);
		break;
	}

	case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
		struct ioctl_evtchn_bind_interdomain bind;
		struct evtchn_bind_interdomain bind_interdomain;

		rc = -EFAULT;
		if (copy_from_user(&bind, uarg, sizeof(bind)))
			break;

		bind_interdomain.remote_dom  = bind.remote_domain;
		bind_interdomain.remote_port = bind.remote_port;
		rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
						 &bind_interdomain);
		if (rc != 0)
			break;

		rc = bind_interdomain.local_port;
		evtchn_bind_to_user(u, rc);
		break;
	}

	case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
		struct ioctl_evtchn_bind_unbound_port bind;
		struct evtchn_alloc_unbound alloc_unbound;

		rc = -EFAULT;
		if (copy_from_user(&bind, uarg, sizeof(bind)))
			break;

		alloc_unbound.dom        = DOMID_SELF;
		alloc_unbound.remote_dom = bind.remote_domain;
		rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
						 &alloc_unbound);
		if (rc != 0)
			break;

		rc = alloc_unbound.port;
		evtchn_bind_to_user(u, rc);
		break;
	}

	case IOCTL_EVTCHN_UNBIND: {
		struct ioctl_evtchn_unbind unbind;
		struct evtchn_close close;
		int ret;

		rc = -EFAULT;
		if (copy_from_user(&unbind, uarg, sizeof(unbind)))
			break;

		rc = -EINVAL;
		if (unbind.port >= NR_EVENT_CHANNELS)
			break;

		spin_lock_irq(&port_user_lock);
    
		rc = -ENOTCONN;
		if (port_user[unbind.port] != u) {
			spin_unlock_irq(&port_user_lock);
			break;
		}

		port_user[unbind.port] = NULL;
		mask_evtchn(unbind.port);
		rebind_evtchn_to_cpu(unbind.port, 0);

		spin_unlock_irq(&port_user_lock);

		close.port = unbind.port;
		ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
		BUG_ON(ret);

		rc = 0;
		break;
	}

	case IOCTL_EVTCHN_NOTIFY: {
		struct ioctl_evtchn_notify notify;

		rc = -EFAULT;
		if (copy_from_user(&notify, uarg, sizeof(notify)))
			break;

		if (notify.port >= NR_EVENT_CHANNELS) {
			rc = -EINVAL;
		} else if (port_user[notify.port] != u) {
			rc = -ENOTCONN;
		} else {
			notify_remote_via_evtchn(notify.port);
			rc = 0;
		}
		break;
	}

	case IOCTL_EVTCHN_RESET: {
		/* Initialise the ring to empty. Clear errors. */
		mutex_lock(&u->ring_cons_mutex);
		spin_lock_irq(&port_user_lock);
		u->ring_cons = u->ring_prod = u->ring_overflow = 0;
		spin_unlock_irq(&port_user_lock);
		mutex_unlock(&u->ring_cons_mutex);
		rc = 0;
		break;
	}

	default:
		rc = -ENOSYS;
		break;
	}

	return rc;
}