예제 #1
0
파일: exynos-pm.c 프로젝트: 1paul1/dynamite
int exynos_lpa_prepare(void)
{
	int nr_calls = 0;
	int ret = 0;

	read_lock(&exynos_pm_notifier_lock);
	ret = exynos_pm_notify(LPA_PREPARE, -1, &nr_calls);
#ifdef CONFIG_SEC_PM_DEBUG
	if (unlikely(lpa_log_en) && ret < 0) {
		struct notifier_block *nb, *next_nb;
		struct notifier_block *nh = exynos_pm_notifier_chain.head;
		int nr_to_call = nr_calls - 1;

		nb = rcu_dereference_raw(nh);

		while (nb && nr_to_call) {
			next_nb = rcu_dereference_raw(nb->next);
			nb = next_nb;
			nr_to_call--;
		}

		if (nb)
			pr_info("%s: failed at %ps\n", __func__,
					(void *)nb->notifier_call);
	}
#endif
	read_unlock(&exynos_pm_notifier_lock);

	return ret;
}
예제 #2
0
/*! 2016.11.19 study -ing */
static int __kprobes notifier_call_chain(struct notifier_block **nl,
					unsigned long val, void *v,
					int nr_to_call,	int *nr_calls)
{
	int ret = NOTIFY_DONE;
	struct notifier_block *nb, *next_nb;

	nb = rcu_dereference_raw(*nl);

	while (nb && nr_to_call) {
		next_nb = rcu_dereference_raw(nb->next);

#ifdef CONFIG_DEBUG_NOTIFIERS
		if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) {
			WARN(1, "Invalid notifier called!");
			nb = next_nb;
			continue;
		}
#endif
		ret = nb->notifier_call(nb, val, v);

		if (nr_calls)
			(*nr_calls)++;

		if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
			break;
		nb = next_nb;
		nr_to_call--;
	}
	return ret;
}
예제 #3
0
파일: fb_crr_rx.c 프로젝트: floriade/CRR_RX
static void fb_crr_rx_dtor(struct fblock *fb)
{
	int i, queue_len;
	struct sk_buff *skb_last;
	struct fb_crr_rx_priv *fb_priv_cpu;
	struct fb_crr_rx_priv __percpu *fb_priv;
	printk(KERN_ERR "[CRR_RX] Deinitialization 1!\n");

	rcu_read_lock();
	fb_priv = (struct fb_crr_rx_priv __percpu *) rcu_dereference_raw(fb->private_data);
	fb_priv_cpu = per_cpu_ptr(fb_priv, 0);	/* CPUs share same priv. d */
	rcu_read_unlock();
	printk(KERN_ERR "[CRR_RX] Deinitialization 2!\n");

	write_lock(fb_priv_cpu->rx_lock);					// LOCK
	queue_len = skb_queue_len(fb_priv_cpu->list);
	printk(KERN_ERR "[CRR_RX] Deinitialization Qlen: %d!\n", queue_len);
	for (i = 0; i < queue_len; i++) {
		skb_last = skb_dequeue(fb_priv_cpu->list);
		kfree(skb_last);
	}
	kfree(fb_priv_cpu->list);
	kfree(fb_priv_cpu->rx_seq_nr);
	kfree(fb_priv_cpu->rx_win_nr);
	kfree(fb_priv_cpu->rx_bitstream);
	write_unlock(fb_priv_cpu->rx_lock);					// UNLOCK

	free_percpu(rcu_dereference_raw(fb->private_data));
	module_put(THIS_MODULE);
	printk(KERN_ERR "[CRR_RX] Deinitialization passed!\n");
}
예제 #4
0
/**
 * notifier_call_chain - Informs the registered notifiers about an event.
 *	@nl:		Pointer to head of the blocking notifier chain
 *	@val:		Value passed unmodified to notifier function
 *	@v:		Pointer passed unmodified to notifier function
 *	@nr_to_call:	Number of notifier functions to be called. Don't care
 *			value of this parameter is -1.
 *	@nr_calls:	Records the number of notifications sent. Don't care
 *			value of this field is NULL.
 *	@returns:	notifier_call_chain returns the value returned by the
 *			last notifier function called.
 */
static int __kprobes notifier_call_chain(struct notifier_block **nl,
					unsigned long val, void *v,
					int nr_to_call,	int *nr_calls)
{
	int ret = NOTIFY_DONE;
	struct notifier_block *nb, *next_nb;
#if defined(CONFIG_SMP) && (defined(MTK_CPU_HOTPLUG_DEBUG_1) || defined(MTK_CPU_HOTPLUG_DEBUG_2))
	int index = 0;
	extern struct raw_notifier_head cpu_chain;
#endif //#if defined(CONFIG_SMP) && (defined(MTK_CPU_HOTPLUG_DEBUG_1) || defined(MTK_CPU_HOTPLUG_DEBUG_2))

	nb = rcu_dereference_raw(*nl);

	while (nb && nr_to_call) {
		next_nb = rcu_dereference_raw(nb->next);

#ifdef CONFIG_DEBUG_NOTIFIERS
		if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) {
			WARN(1, "Invalid notifier called!");
			nb = next_nb;
			continue;
		}
#endif

#if defined(CONFIG_SMP) && (defined(MTK_CPU_HOTPLUG_DEBUG_1) || defined(MTK_CPU_HOTPLUG_DEBUG_2))
		if (nl == &cpu_chain.head)
		{
		#if defined(MTK_CPU_HOTPLUG_DEBUG_1)
			printk(KERN_DEBUG "[cpu_ntf] %02lx_%02d, %p\n", val, index, nb->notifier_call);
		#endif //#if defined(MTK_CPU_HOTPLUG_DEBUG_1)
		#if defined(MTK_CPU_HOTPLUG_DEBUG_2)
			aee_rr_rec_hoplug(0, val & 0xff, index & 0xff);
		#endif //#if defined(MTK_CPU_HOTPLUG_DEBUG_2)
			++index;
		}
#endif //#if defined(CONFIG_SMP) && (defined(MTK_CPU_HOTPLUG_DEBUG_1) || defined(MTK_CPU_HOTPLUG_DEBUG_2))

		ret = nb->notifier_call(nb, val, v);

		if (nr_calls)
			(*nr_calls)++;

		if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
			break;
		nb = next_nb;
		nr_to_call--;
	}
	return ret;
}
예제 #5
0
static int fb_huf_netrx(const struct fblock * const fb,
			  struct sk_buff * const skb,
			  enum path_type * const dir)
{
	unsigned int seq;
//	unsigned int padding;
	struct fb_huf_priv *fb_priv;
//	size_t i = 0;
//	unsigned char ciphertext[16];

	fb_priv = rcu_dereference_raw(fb->private_data);
	do {
		seq = read_seqbegin(&fb_priv->lock);
		write_next_idp_to_skb(skb, fb->idp, fb_priv->port[*dir]);
		if (fb_priv->port[*dir] == IDP_UNKNOWN)
			goto drop;
	} while (read_seqretry(&fb_priv->lock, seq));

	read_lock(&fb_priv->klock);

	//send it trough compression
	compress(skb);	

	read_unlock(&fb_priv->klock);

	return PPE_SUCCESS;
drop:
	printk(KERN_INFO "[fb_aes] drop packet. Out of key material?\n");
	kfree_skb(skb);
	return PPE_DROPPED;
}
예제 #6
0
static int fb_counter_netrx(const struct fblock * const fb,
			    struct sk_buff * const skb,
			    enum path_type * const dir)
{
	int drop = 0;
	unsigned int seq;
	struct fb_counter_priv __percpu *fb_priv_cpu;

	fb_priv_cpu = this_cpu_ptr(rcu_dereference_raw(fb->private_data));
	prefetchw(skb->cb);
	do {
		seq = read_seqbegin(&fb_priv_cpu->lock);
		write_next_idp_to_skb(skb, fb->idp, fb_priv_cpu->port[*dir]);
		if (fb_priv_cpu->port[*dir] == IDP_UNKNOWN)
			drop = 1;
	} while (read_seqretry(&fb_priv_cpu->lock, seq));

	u64_stats_update_begin(&fb_priv_cpu->syncp);
	fb_priv_cpu->packets++;
	fb_priv_cpu->bytes += skb->len;
	u64_stats_update_end(&fb_priv_cpu->syncp);

	if (drop) {
		kfree_skb(skb);
		return PPE_DROPPED;
	}
	return PPE_SUCCESS;
}
예제 #7
0
/*
 * Exit and free an icq.  Called with both ioc and q locked.
 */
static void ioc_exit_icq(struct io_cq *icq)
{
	struct io_context *ioc = icq->ioc;
	struct request_queue *q = icq->q;
	struct elevator_type *et = q->elevator->type;

	lockdep_assert_held(&ioc->lock);
	lockdep_assert_held(q->queue_lock);

	radix_tree_delete(&ioc->icq_tree, icq->q->id);
	hlist_del_init(&icq->ioc_node);
	list_del_init(&icq->q_node);

	/*
	 * Both setting lookup hint to and clearing it from @icq are done
	 * under queue_lock.  If it's not pointing to @icq now, it never
	 * will.  Hint assignment itself can race safely.
	 */
	if (rcu_dereference_raw(ioc->icq_hint) == icq)
		rcu_assign_pointer(ioc->icq_hint, NULL);

	if (et->ops.elevator_exit_icq_fn) {
		ioc_release_depth_inc(q);
		et->ops.elevator_exit_icq_fn(icq);
		ioc_release_depth_dec(q);
	}

	/*
	 * @icq->q might have gone away by the time RCU callback runs
	 * making it impossible to determine icq_cache.  Record it in @icq.
	 */
	icq->__rcu_icq_cache = et->icq_cache;
	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
}
예제 #8
0
/*
 * Initialize the NFS4 callback service
 */
static int nfs4_init_callback(struct nfs_client *clp)
{
	int error;

	if (clp->rpc_ops->version == 4) {
		struct rpc_xprt *xprt;

		xprt = rcu_dereference_raw(clp->cl_rpcclient->cl_xprt);

		if (nfs4_has_session(clp)) {
			error = xprt_setup_backchannel(xprt,
						NFS41_BC_MIN_CALLBACKS);
			if (error < 0)
				return error;
		}

		error = nfs_callback_up(clp->cl_mvops->minor_version, xprt);
		if (error < 0) {
			dprintk("%s: failed to start callback. Error = %d\n",
				__func__, error);
			return error;
		}
		__set_bit(NFS_CS_CALLBACK, &clp->cl_res_state);
	}
	return 0;
}
예제 #9
0
파일: fb_bpf.c 프로젝트: digideskio/lana
static int fb_bpf_netrx(const struct fblock * const fb,
			struct sk_buff * const skb,
			enum path_type * const dir)
{
	int drop = 0;
	unsigned int pkt_len;
	unsigned long flags;
	struct fb_bpf_priv __percpu *fb_priv_cpu;

	fb_priv_cpu = this_cpu_ptr(rcu_dereference_raw(fb->private_data));

	spin_lock_irqsave(&fb_priv_cpu->flock, flags);
	if (fb_priv_cpu->filter) {
		pkt_len = SK_RUN_FILTER(fb_priv_cpu->filter, skb);
		if (pkt_len < skb->len) {
			spin_unlock_irqrestore(&fb_priv_cpu->flock, flags);
			kfree_skb(skb);
			return PPE_DROPPED;
		}
	}
	write_next_idp_to_skb(skb, fb->idp, fb_priv_cpu->port[*dir]);
	if (fb_priv_cpu->port[*dir] == IDP_UNKNOWN)
		drop = 1;
	spin_unlock_irqrestore(&fb_priv_cpu->flock, flags);
	if (drop) {
		kfree_skb(skb);
		return PPE_DROPPED;
	}
	return PPE_SUCCESS;
}
예제 #10
0
static int fb_counter_proc_show(struct seq_file *m, void *v)
{
	u64 pkts_sum = 0, bytes_sum = 0;
	unsigned int cpu;
	char sline[256];
	struct fblock *fb = (struct fblock *) m->private;
	struct fb_counter_priv __percpu *fb_priv;

	rcu_read_lock();
	fb_priv = (struct fb_counter_priv __percpu *) rcu_dereference_raw(fb->private_data);
	rcu_read_unlock();

	get_online_cpus();
	for_each_online_cpu(cpu) {
		unsigned int start;
		struct fb_counter_priv *fb_priv_cpu;
		fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
		do {
			start = u64_stats_fetch_begin(&fb_priv_cpu->syncp);
			pkts_sum += fb_priv_cpu->packets;
			bytes_sum += fb_priv_cpu->bytes;
		} while (u64_stats_fetch_retry(&fb_priv_cpu->syncp, start));
	}
	put_online_cpus();

	memset(sline, 0, sizeof(sline));
	snprintf(sline, sizeof(sline), "%llu %llu\n", pkts_sum, bytes_sum);
	seq_puts(m, sline);

	return 0;
}
예제 #11
0
static int fb_udp_netrx_out(const struct fblock * const fb,
			    struct sk_buff * const skb)
{
	int fdrop = 0;
	idp_t next_fb;
	unsigned int seq;
	struct udphdr *hdr;
	struct fb_udp_priv *fb_priv;

	fb_priv = rcu_dereference_raw(fb->private_data);
	do {
		seq = read_seqbegin(&fb_priv->lock);
		next_fb = fb_priv->port[TYPE_EGRESS];
		if (next_fb == IDP_UNKNOWN)
			fdrop = 1;
	} while (read_seqretry(&fb_priv->lock, seq));
	if (fdrop)
		goto drop;

	hdr = (struct udphdr *) skb_push(skb, sizeof(*hdr));
	if (!hdr)
		goto drop;

	hdr->source = htons(fb_priv->own_port);
	hdr->dest = htons(fb_priv->rem_port);
	hdr->len = htons(skb->len);
	hdr->check = 0;

	write_next_idp_to_skb(skb, fb->idp, next_fb);
	return PPE_SUCCESS;
drop:
	kfree_skb(skb);
	return PPE_DROPPED;
}
예제 #12
0
파일: fb_bpf.c 프로젝트: digideskio/lana
static int fb_bpf_init_filter_cpus(struct fblock *fb,
				   struct sock_fprog_kern *fprog)
{
	int err = 0;
	unsigned int cpu;
	struct fb_bpf_priv __percpu *fb_priv;

	if (!fprog || !fb)
		return -EINVAL;

	rcu_read_lock();
	fb_priv = (struct fb_bpf_priv __percpu *) rcu_dereference_raw(fb->private_data);
	rcu_read_unlock();

	get_online_cpus();
	for_each_online_cpu(cpu) {
		struct fb_bpf_priv *fb_priv_cpu;
		fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
		err = fb_bpf_init_filter(fb_priv_cpu, fprog, cpu);
		if (err != 0) {
			printk(KERN_ERR "[%s::%s] fb_bpf_init_filter error: %d\n",
			       fb->name, fb->factory->type, err);
			break;
		}
	}
	put_online_cpus();

	return err;
}
예제 #13
0
파일: vport.c 프로젝트: JunoZhu/ovs
/**
 *	ovs_vport_free - uninitialize and free vport
 *
 * @vport: vport to free
 *
 * Frees a vport allocated with vport_alloc() when it is no longer needed.
 *
 * The caller must ensure that an RCU grace period has passed since the last
 * time @vport was in a datapath.
 */
void ovs_vport_free(struct vport *vport)
{
	/* vport is freed from RCU callback or error path, Therefore
	 * it is safe to use raw dereference.
	 */
	kfree(rcu_dereference_raw(vport->upcall_portids));
	kfree(vport);
}
예제 #14
0
static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
			struct radix_tree_node **nodep, unsigned long index)
{
	unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
	void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);

	*nodep = (void *)entry;
	return offset;
}
예제 #15
0
파일: radix-tree.c 프로젝트: AK101111/linux
static unsigned int radix_tree_descend(struct radix_tree_node *parent,
			struct radix_tree_node **nodep, unsigned long index)
{
	unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
	void **entry = rcu_dereference_raw(parent->slots[offset]);

#ifdef CONFIG_RADIX_TREE_MULTIORDER
	if (radix_tree_is_internal_node(entry)) {
		unsigned long siboff = get_slot_offset(parent, entry);
		if (siboff < RADIX_TREE_MAP_SIZE) {
			offset = siboff;
			entry = rcu_dereference_raw(parent->slots[offset]);
		}
	}
#endif

	*nodep = (void *)entry;
	return offset;
}
예제 #16
0
/*
 * Find a service connection under RCU conditions.
 *
 * We could use a hash table, but that is subject to bucket stuffing by an
 * attacker as the client gets to pick the epoch and cid values and would know
 * the hash function.  So, instead, we use a hash table for the peer and from
 * that an rbtree to find the service connection.  Under ordinary circumstances
 * it might be slower than a large hash table, but it is at least limited in
 * depth.
 */
struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
						     struct sk_buff *skb)
{
	struct rxrpc_connection *conn = NULL;
	struct rxrpc_conn_proto k;
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
	struct rb_node *p;
	unsigned int seq = 0;

	k.epoch	= sp->hdr.epoch;
	k.cid	= sp->hdr.cid & RXRPC_CIDMASK;

	do {
		/* Unfortunately, rbtree walking doesn't give reliable results
		 * under just the RCU read lock, so we have to check for
		 * changes.
		 */
		read_seqbegin_or_lock(&peer->service_conn_lock, &seq);

		p = rcu_dereference_raw(peer->service_conns.rb_node);
		while (p) {
			conn = rb_entry(p, struct rxrpc_connection, service_node);

			if (conn->proto.index_key < k.index_key)
				p = rcu_dereference_raw(p->rb_left);
			else if (conn->proto.index_key > k.index_key)
				p = rcu_dereference_raw(p->rb_right);
			else
				goto done;
			conn = NULL;
		}
	} while (need_seqretry(&peer->service_conn_lock, seq));

done:
	done_seqretry(&peer->service_conn_lock, seq);
	_leave(" = %d", conn ? conn->debug_id : -1);
	return conn;
}
예제 #17
0
static int fb_udp_event(struct notifier_block *self, unsigned long cmd,
			void *args)
{
	int ret = NOTIFY_OK;
	struct fblock *fb;
	struct fb_udp_priv *fb_priv;

	rcu_read_lock();
	fb = rcu_dereference_raw(container_of(self, struct fblock_notifier, nb)->self);
	fb_priv = rcu_dereference_raw(fb->private_data);
	rcu_read_unlock();

	switch (cmd) {
	case FBLOCK_BIND_IDP: {
		struct fblock_bind_msg *msg = args;
		if (fb_priv->port[msg->dir] == IDP_UNKNOWN) {
			write_seqlock(&fb_priv->lock);
			fb_priv->port[msg->dir] = msg->idp;
			write_sequnlock(&fb_priv->lock);
		} else {
			ret = NOTIFY_BAD;
		}
		break; }
	case FBLOCK_UNBIND_IDP: {
		struct fblock_bind_msg *msg = args;
		if (fb_priv->port[msg->dir] == msg->idp) {
			write_seqlock(&fb_priv->lock);
			fb_priv->port[msg->dir] = IDP_UNKNOWN;
			write_sequnlock(&fb_priv->lock);
		} else {
			ret = NOTIFY_BAD;
		}
		break; }
	case FBLOCK_SET_OPT: {
		struct fblock_opt_msg *msg = args;
		if (!strncmp(msg->key, "src-port", strlen("src-port"))) {
			fb_priv->own_port = (uint16_t) simple_strtoul(msg->val, NULL, 10);
			printk(KERN_INFO "[udp] src bound to %u\n", fb_priv->own_port);
		} else if (!strncmp(msg->key, "dst-port", strlen("dst-port"))) {
			fb_priv->rem_port = (uint16_t) simple_strtoul(msg->val, NULL, 10);
			printk(KERN_INFO "[udp] dst bound to %u\n", fb_priv->rem_port);
		}
		break; }
	default:
		break;
	}

	return ret;
}
예제 #18
0
static void nested_table_free(union nested_table *ntbl, unsigned int size)
{
	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
	const unsigned int len = 1 << shift;
	unsigned int i;

	ntbl = rcu_dereference_raw(ntbl->table);
	if (!ntbl)
		return;

	if (size > len) {
		size >>= shift;
		for (i = 0; i < len; i++)
			nested_table_free(ntbl + i, size);
	}
void fdleak_debug_print(struct files_struct *files)
{
    struct fdtable *fdt;
    unsigned int n;
    fdt = files_fdtable(files);

    for(n=0; n < fdt->max_fds; n++){
        if (rcu_dereference_raw(fdt->fd[n]) != NULL) {
            printk("[TOO MANY OPEN FILES] (%d)/(%d) =  %s\n",
            n,
            fdt->max_fds,
            fdt->fd[n]->f_path.dentry->d_name.name);
        }
    }
}
예제 #20
0
파일: fork.c 프로젝트: 19Dan01/linux
/**
 * set_mm_exe_file - change a reference to the mm's executable file
 *
 * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
 *
 * Main users are mmput() and sys_execve(). Callers prevent concurrent
 * invocations: in mmput() nobody alive left, in execve task is single
 * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the
 * mm->exe_file, but does so without using set_mm_exe_file() in order
 * to do avoid the need for any locks.
 */
void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
{
	struct file *old_exe_file;

	/*
	 * It is safe to dereference the exe_file without RCU as
	 * this function is only called if nobody else can access
	 * this mm -- see comment above for justification.
	 */
	old_exe_file = rcu_dereference_raw(mm->exe_file);

	if (new_exe_file)
		get_file(new_exe_file);
	rcu_assign_pointer(mm->exe_file, new_exe_file);
	if (old_exe_file)
		fput(old_exe_file);
}
예제 #21
0
/*! 2017. 3.18 study -ing */
int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
				   unsigned long val, void *v,
				   int nr_to_call, int *nr_calls)
{
	int ret = NOTIFY_DONE;

	/*
	 * We check the head outside the lock, but if this access is
	 * racy then it does not matter what the result of the test
	 * is, we re-check the list after having taken the lock anyway:
	 */
	if (rcu_dereference_raw(nh->head)) {
		down_read(&nh->rwsem);
		ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
					nr_calls);
		up_read(&nh->rwsem);
	}
	return ret;
}
예제 #22
0
static int fb_huf_proc_show(struct seq_file *m, void *v)
{
	struct fblock *fb = (struct fblock *) m->private;
	struct fb_huf_priv *fb_priv;
	char sline[64];

	rcu_read_lock();
	fb_priv = rcu_dereference_raw(fb->private_data);
	rcu_read_unlock();

	memset(sline, 0, sizeof(sline));

	read_lock(&fb_priv->klock);
	snprintf(sline, sizeof(sline), "hello from huf");
	read_unlock(&fb_priv->klock);

	seq_puts(m, sline);
	return 0;
}
예제 #23
0
파일: fb_bpf.c 프로젝트: digideskio/lana
static void fb_bpf_cleanup_filter_cpus(struct fblock *fb)
{
	unsigned int cpu;
	struct fb_bpf_priv __percpu *fb_priv;

	if (!fb)
		return;

	rcu_read_lock();
	fb_priv = (struct fb_bpf_priv __percpu *) rcu_dereference_raw(fb->private_data);
	rcu_read_unlock();

	get_online_cpus();
	for_each_online_cpu(cpu) {
		struct fb_bpf_priv *fb_priv_cpu;
		fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
		fb_bpf_cleanup_filter(fb_priv_cpu);
	}
	put_online_cpus();
}
예제 #24
0
static int fb_counter_proc_show(struct seq_file *m, void *v)
{
	char sline[256];
	unsigned int start;
	struct fblock *fb = (struct fblock *) m->private;
	struct fb_counter_priv *fb_priv;

	rcu_read_lock();
	fb_priv = rcu_dereference_raw(fb->private_data);
	rcu_read_unlock();
	do {
		start = u64_stats_fetch_begin(&fb_priv->syncp);
		memset(sline, 0, sizeof(sline));
		snprintf(sline, sizeof(sline), "%llu %llu\n",
			 fb_priv->packets, fb_priv->bytes);
	} while (u64_stats_fetch_retry(&fb_priv->syncp, start));
	seq_puts(m, sline);

	return 0;
}
예제 #25
0
파일: fb_bpf.c 프로젝트: digideskio/lana
static int fb_bpf_proc_show_filter(struct seq_file *m, void *v)
{
	unsigned long flags;
	struct fblock *fb = (struct fblock *) m->private;
	struct fb_bpf_priv *fb_priv_cpu;
	struct sk_filter *sf;

	get_online_cpus();
	rcu_read_lock();
	fb_priv_cpu = this_cpu_ptr(rcu_dereference_raw(fb->private_data));
	rcu_read_unlock();

	spin_lock_irqsave(&fb_priv_cpu->flock, flags);
	sf = fb_priv_cpu->filter;
	if (sf) {
		unsigned int i;
		if (sf->bpf_func == sk_run_filter)
			seq_puts(m, "bpf jit: 0\n");
		else
			seq_puts(m, "bpf jit: 1\n");
		seq_puts(m, "code:\n");
		for (i = 0; i < sf->len; ++i) {
			char sline[32];
			memset(sline, 0, sizeof(sline));
			snprintf(sline, sizeof(sline),
				 "{ 0x%x, %u, %u, 0x%x }\n",
				 sf->insns[i].code,
				 sf->insns[i].jt,
				 sf->insns[i].jf,
				 sf->insns[i].k);
			sline[sizeof(sline) - 1] = 0;
			seq_puts(m, sline);
		}
	}
	spin_unlock_irqrestore(&fb_priv_cpu->flock, flags);
	put_online_cpus();

	return 0;
}
예제 #26
0
static int fb_huf_event(struct notifier_block *self, unsigned long cmd,
			  void *args)
{
	int ret = NOTIFY_OK;
	struct fblock *fb;
	struct fb_huf_priv *fb_priv;

	rcu_read_lock();
	fb = rcu_dereference_raw(container_of(self, struct fblock_notifier, nb)->self);
	fb_priv = rcu_dereference_raw(fb->private_data);
	rcu_read_unlock();

	switch (cmd) {
	case FBLOCK_BIND_IDP: {
		struct fblock_bind_msg *msg = args;
		if (fb_priv->port[msg->dir] == IDP_UNKNOWN) {
			write_seqlock(&fb_priv->lock);
			fb_priv->port[msg->dir] = msg->idp;
			write_sequnlock(&fb_priv->lock);
		} else {
			ret = NOTIFY_BAD;
		}
		break; }
	case FBLOCK_UNBIND_IDP: {
		struct fblock_bind_msg *msg = args;
		if (fb_priv->port[msg->dir] == msg->idp) {
			write_seqlock(&fb_priv->lock);
			fb_priv->port[msg->dir] = IDP_UNKNOWN;
			write_sequnlock(&fb_priv->lock);
		} else {
			ret = NOTIFY_BAD;
		}
		break; }
	default:
		break;
	}

	return ret;
}
예제 #27
0
파일: cell.c 프로젝트: AlexShiLucky/linux
/*
 * Look up and get an activation reference on a cell record under RCU
 * conditions.  The caller must hold the RCU read lock.
 */
struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
				     const char *name, unsigned int namesz)
{
	struct afs_cell *cell = NULL;
	struct rb_node *p;
	int n, seq = 0, ret = 0;

	_enter("%*.*s", namesz, namesz, name);

	if (name && namesz == 0)
		return ERR_PTR(-EINVAL);
	if (namesz > AFS_MAXCELLNAME)
		return ERR_PTR(-ENAMETOOLONG);

	do {
		/* Unfortunately, rbtree walking doesn't give reliable results
		 * under just the RCU read lock, so we have to check for
		 * changes.
		 */
		if (cell)
			afs_put_cell(net, cell);
		cell = NULL;
		ret = -ENOENT;

		read_seqbegin_or_lock(&net->cells_lock, &seq);

		if (!name) {
			cell = rcu_dereference_raw(net->ws_cell);
			if (cell) {
				afs_get_cell(cell);
				break;
			}
			ret = -EDESTADDRREQ;
			continue;
		}

		p = rcu_dereference_raw(net->cells.rb_node);
		while (p) {
			cell = rb_entry(p, struct afs_cell, net_node);

			n = strncasecmp(cell->name, name,
					min_t(size_t, cell->name_len, namesz));
			if (n == 0)
				n = cell->name_len - namesz;
			if (n < 0) {
				p = rcu_dereference_raw(p->rb_left);
			} else if (n > 0) {
				p = rcu_dereference_raw(p->rb_right);
			} else {
				if (atomic_inc_not_zero(&cell->usage)) {
					ret = 0;
					break;
				}
				/* We want to repeat the search, this time with
				 * the lock properly locked.
				 */
			}
			cell = NULL;
		}

	} while (need_seqretry(&net->cells_lock, seq));

	done_seqretry(&net->cells_lock, seq);

	return ret == 0 ? cell : ERR_PTR(ret);
}
예제 #28
0
static void fb_udp_dtor(struct fblock *fb)
{
	kfree(rcu_dereference_raw(fb->private_data));
	module_put(THIS_MODULE);
}
예제 #29
0
static void fb_counter_dtor(struct fblock *fb)
{
	free_percpu(rcu_dereference_raw(fb->private_data));
	remove_proc_entry(fb->name, fblock_proc_dir);
	module_put(THIS_MODULE);
}
예제 #30
0
static int fb_counter_event(struct notifier_block *self, unsigned long cmd,
			    void *args)
{
	int ret = NOTIFY_OK;
	unsigned int cpu;
	struct fblock *fb;
	struct fb_counter_priv __percpu *fb_priv;

	rcu_read_lock();
	fb = rcu_dereference_raw(container_of(self, struct fblock_notifier, nb)->self);
	fb_priv = (struct fb_counter_priv __percpu *) rcu_dereference_raw(fb->private_data);
	rcu_read_unlock();

	switch (cmd) {
	case FBLOCK_BIND_IDP: {
		int bound = 0;
		struct fblock_bind_msg *msg = args;
		get_online_cpus();
		for_each_online_cpu(cpu) {
			struct fb_counter_priv *fb_priv_cpu;
			fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
			if (fb_priv_cpu->port[msg->dir] == IDP_UNKNOWN) {
				write_seqlock(&fb_priv_cpu->lock);
				fb_priv_cpu->port[msg->dir] = msg->idp;
				write_sequnlock(&fb_priv_cpu->lock);
				bound = 1;
			} else {
				ret = NOTIFY_BAD;
				break;
			}
		}
		put_online_cpus();
		if (bound)
			printk(KERN_INFO "[%s::%s] port %s bound to IDP%u\n",
			       fb->name, fb->factory->type,
			       path_names[msg->dir], msg->idp);
		} break;
	case FBLOCK_UNBIND_IDP: {
		int unbound = 0;
		struct fblock_bind_msg *msg = args;
		get_online_cpus();
		for_each_online_cpu(cpu) {
			struct fb_counter_priv *fb_priv_cpu;
			fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
			if (fb_priv_cpu->port[msg->dir] == msg->idp) {
				write_seqlock(&fb_priv_cpu->lock);
				fb_priv_cpu->port[msg->dir] = IDP_UNKNOWN;
				write_sequnlock(&fb_priv_cpu->lock);
				unbound = 1;
			} else {
				ret = NOTIFY_BAD;
				break;
			}
		}
		put_online_cpus();
		if (unbound)
			printk(KERN_INFO "[%s::%s] port %s unbound\n",
			       fb->name, fb->factory->type,
			       path_names[msg->dir]);
		} break;
	case FBLOCK_SET_OPT: {
		struct fblock_opt_msg *msg = args;
		printk("Set option %s to %s!\n", msg->key, msg->val);
		} break;
	default:
		break;
	}

	return ret;
}