Exemplo n.º 1
0
static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
						    struct net_device *dev)
{
	struct ve_struct *env;
	int i = skb_get_queue_mapping(skb);
	struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
	u16 vlan_tci;
	unsigned int len;
	int ret;

	vlan_tci = vlan_dev_info(dev)->vlan_id;
	vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
	skb = __vlan_hwaccel_put_tag(skb, vlan_tci);

	skb->dev = vlan_dev_info(dev)->real_dev;
	len = skb->len;
	skb->owner_env = skb->dev->owner_env;
	env = set_exec_env(skb->owner_env);
	ret = dev_queue_xmit(skb);
	set_exec_env(env);

	if (likely(ret == NET_XMIT_SUCCESS)) {
		txq->tx_packets++;
		txq->tx_bytes += len;
	} else
		txq->tx_dropped++;

	return NETDEV_TX_OK;
}
Exemplo n.º 2
0
static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
					    struct net_device *dev)
{
	struct ve_struct *env;
	int i = skb_get_queue_mapping(skb);
	struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
	unsigned int len;
	int ret;

	/* Handle non-VLAN frames if they are sent to us, for example by DHCP.
	 *
	 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
	 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
	 */
	if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
	    vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
		unsigned int orig_headroom = skb_headroom(skb);
		u16 vlan_tci;

		vlan_dev_info(dev)->cnt_encap_on_xmit++;

		vlan_tci = vlan_dev_info(dev)->vlan_id;
		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
		skb = __vlan_put_tag(skb, vlan_tci);
		if (!skb) {
			txq->tx_dropped++;
			return NETDEV_TX_OK;
		}

		if (orig_headroom < VLAN_HLEN)
			vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
	}


	skb->dev = vlan_dev_info(dev)->real_dev;
	len = skb->len;
	skb->owner_env = skb->dev->owner_env;
	env = set_exec_env(skb->owner_env);
	ret = dev_queue_xmit(skb);
	set_exec_env(env);

	if (likely(ret == NET_XMIT_SUCCESS)) {
		txq->tx_packets++;
		txq->tx_bytes += len;
	} else
		txq->tx_dropped++;

	return NETDEV_TX_OK;
}
Exemplo n.º 3
0
void
nfs4_renew_state(struct work_struct *work)
{
	const struct nfs4_state_maintenance_ops *ops;
	struct nfs_client *clp =
		container_of(work, struct nfs_client, cl_renewd.work);
	struct rpc_cred *cred;
	long lease;
	unsigned long last, now;

	ops = clp->cl_mvops->state_renewal_ops;
	dprintk("%s: start\n", __func__);

	if (test_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state))
		goto out;

	spin_lock(&clp->cl_lock);
	lease = clp->cl_lease_time;
	last = clp->cl_last_renewal;
	now = jiffies;
	/* Are we close to a lease timeout? */
	if (time_after(now, last + lease/3)) {
		cred = ops->get_state_renewal_cred_locked(clp);
		spin_unlock(&clp->cl_lock);
		if (cred == NULL) {
			if (list_empty(&clp->cl_delegations)) {
				set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
				goto out;
			}
			nfs_expire_all_delegations(clp);
		} else {
			struct ve_struct *ve;
			/* Queue an asynchronous RENEW. */
			ve = set_exec_env(clp->cl_rpcclient->cl_xprt->owner_env);
			ops->sched_state_renewal(clp, cred);
			(void)set_exec_env(ve);
			put_rpccred(cred);
			goto out_exp;
		}
	} else {
		dprintk("%s: failed to call renewd. Reason: lease not expired \n",
				__func__);
		spin_unlock(&clp->cl_lock);
	}
	nfs4_schedule_state_renewal(clp);
out_exp:
	nfs_expire_unreferenced_delegations(clp);
out:
	dprintk("%s: done\n", __func__);
}
Exemplo n.º 4
0
int vzevent_send(int event, const char *attrs_fmt, ...)
{
	va_list args;
	int len, err;
	struct ve_struct *ve;
	char *page;

	err = -ENOMEM;
	page = (char *)__get_free_page(GFP_KERNEL);
	if (!page)
		goto out;

	va_start(args, attrs_fmt);
	len = vscnprintf(page, PAGE_SIZE, attrs_fmt, args);
	va_end(args);

	ve = set_exec_env(get_ve0());
	err = do_vzevent_send(event, page, len);
	(void)set_exec_env(ve);
	free_page((unsigned long)page);
out:
	return err;
}
Exemplo n.º 5
0
/*
 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
 *
 * __QDISC_STATE_RUNNING guarantees only one CPU can process
 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
 * this queue.
 *
 *  netif_tx_lock serializes accesses to device driver.
 *
 *  qdisc_lock(q) and netif_tx_lock are mutually exclusive,
 *  if one is grabbed, another must be free.
 *
 * Note, that this procedure can be called by a watchdog timer
 *
 * Returns to the caller:
 *				0  - queue is empty or throttled.
 *				>0 - queue is not empty.
 *
 */
static inline int qdisc_restart(struct Qdisc *q)
{
	struct netdev_queue *txq;
	struct net_device *dev;
	spinlock_t *root_lock;
	struct sk_buff *skb;
	int ret;
	struct ve_struct *old_ve;

	/* Dequeue packet */
	skb = dequeue_skb(q);
	if (unlikely(!skb))
		return 0;

	old_ve = set_exec_env(skb->owner_env);
	root_lock = qdisc_lock(q);
	dev = qdisc_dev(q);
	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));

	ret = sch_direct_xmit(skb, q, dev, txq, root_lock);
	(void)set_exec_env(old_ve);

	return ret;
}
Exemplo n.º 6
0
/**
 * kthread_create_ve - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run(), kthread_create_on_cpu().
 *
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which noone will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create_ve(struct ve_struct *ve,
				   int (*threadfn)(void *data),
				   void *data,
				   const char namefmt[],
				   ...)
{
	struct kthread_create_info create;
	struct ve_struct *old_ve;

	old_ve = set_exec_env(ve);

	create.threadfn = threadfn;
	create.data = data;
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
	}

	set_exec_env(old_ve);

	return create.result;
}
EXPORT_SYMBOL(kthread_create_ve);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);
	get_task_struct(k);

	kthread = to_kthread(k);
	barrier(); /* it might have exited */
	if (k->vfork_done != NULL) {
		kthread->should_stop = 1;
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);

	return ret;
}
EXPORT_SYMBOL(kthread_stop);

int kthreadd(void *data)
{
	struct task_struct *tsk = current;
	struct kthreadd_create_info *kcreate;
	struct kthread self;
	int rc;

	self.should_stop = 0;

	kcreate = (struct kthreadd_create_info *) data;

	if (kcreate) {
		daemonize("kthreadd/%d", get_exec_env()->veid);
		kcreate->result = current;
		set_fs(KERNEL_DS);
		init_completion(&self.exited);
		current->vfork_done = &self.exited;
	} else
		set_task_comm(tsk, "kthreadd");

	/* Setup a clean context for our children to inherit. */
	ignore_signals(tsk);
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
	set_mems_allowed(node_states[N_HIGH_MEMORY]);

	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;

	if (kcreate)
		complete(&kcreate->done);

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list)) {
			if (self.should_stop)
				break;
			else
				schedule();
		}
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	do {
		clear_thread_flag(TIF_SIGPENDING);
		rc = sys_wait4(-1, NULL, __WALL, NULL);
	} while (rc != -ECHILD);

	do_exit(0);
}

int kthreadd_create()
{
	struct kthreadd_create_info create;
	int ret;
	struct ve_struct *ve = get_exec_env();

	BUG_ON(ve->_kthreadd_task);

	INIT_LIST_HEAD(&ve->_kthread_create_list);
	init_completion(&create.done);
	ret = kernel_thread(kthreadd, (void *) &create, CLONE_FS);
	if (ret < 0) {
		return ret;
	}
	wait_for_completion(&create.done);
	ve->_kthreadd_task = create.result;
	return 0;
}
EXPORT_SYMBOL(kthreadd_create);

void kthreadd_stop(struct ve_struct *ve)
{
	struct kthread *kthread;
	int ret;
	struct task_struct *k;

	if (!ve->_kthreadd_task)
		return;

	k = ve->_kthreadd_task;
	trace_sched_kthread_stop(k);
	get_task_struct(k);

	BUG_ON(!k->vfork_done);

	kthread = container_of(k->vfork_done, struct kthread, exited);
	kthread->should_stop = 1;
	wake_up_process(k);
	wait_for_completion(&kthread->exited);
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);
}
EXPORT_SYMBOL(kthreadd_stop);
Exemplo n.º 7
0
static int cpt_dump_iptables(struct cpt_context * ctx)
{
	int err = 0;
#ifdef CONFIG_VE_IPTABLES
	int pid;
	int pfd[2];
	struct file *f;
	struct cpt_object_hdr v;
	char buf[16];
	loff_t pos;
	int n;
	int status;
	mm_segment_t oldfs;
	sigset_t ignore, blocked;
	struct args_t args;
	struct ve_struct *oldenv;

	if (!(get_exec_env()->_iptables_modules & VE_IP_IPTABLES_MOD))
		return 0;

	err = sc_pipe(pfd);
	if (err < 0) {
		eprintk_ctx("sc_pipe: %d\n", err);
		return err;
	}
	args.pfd = pfd;
	args.veid = VEID(get_exec_env());
	ignore.sig[0] = CPT_SIG_IGNORE_MASK;
	sigprocmask(SIG_BLOCK, &ignore, &blocked);
	oldenv = set_exec_env(get_ve0());
	err = pid = local_kernel_thread(dumpfn, (void*)&args,
			SIGCHLD | CLONE_VFORK, 0);
	set_exec_env(oldenv);
	if (err < 0) {
		eprintk_ctx("local_kernel_thread: %d\n", err);
		goto out;
	}

	f = fget(pfd[0]);
	sc_close(pfd[1]);
	sc_close(pfd[0]);

	cpt_open_section(ctx, CPT_SECT_NET_IPTABLES);

	cpt_open_object(NULL, ctx);
	v.cpt_next = CPT_NULL;
	v.cpt_object = CPT_OBJ_NAME;
	v.cpt_hdrlen = sizeof(v);
	v.cpt_content = CPT_CONTENT_NAME;

	ctx->write(&v, sizeof(v), ctx);

	pos = ctx->file->f_pos;
	do {
		oldfs = get_fs(); set_fs(KERNEL_DS);
		n = f->f_op->read(f, buf, sizeof(buf), &f->f_pos);
		set_fs(oldfs);
		if (n > 0)
			ctx->write(buf, n, ctx);
	} while (n > 0);

	if (n < 0)
		eprintk_ctx("read: %d\n", n);

	fput(f);

	oldfs = get_fs(); set_fs(KERNEL_DS);
	if ((err = sc_waitx(pid, 0, &status)) < 0)
		eprintk_ctx("wait4: %d\n", err);
	else if ((status & 0x7f) == 0) {
		err = (status & 0xff00) >> 8;
		if (err != 0) {
			eprintk_ctx("iptables-save exited with %d\n", err);
			err = -EINVAL;
		}
	} else {