Exemplo n.º 1
0
static int __init nf_nat_init(void)
{
	int ret;

	ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
	if (ret)
		return ret;

	ret = nf_ct_extend_register(&nat_extend);
	if (ret < 0) {
		rhltable_destroy(&nf_nat_bysource_table);
		printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
		return ret;
	}

	ret = register_pernet_subsys(&nf_nat_net_ops);
	if (ret < 0)
		goto cleanup_extend;

	nf_ct_helper_expectfn_register(&follow_master_nat);

	BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
	RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
			   nfnetlink_parse_nat_setup);
#ifdef CONFIG_XFRM
	BUG_ON(nf_nat_decode_session_hook != NULL);
	RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session);
#endif
	return 0;

 cleanup_extend:
	rhltable_destroy(&nf_nat_bysource_table);
	nf_ct_extend_unregister(&nat_extend);
	return ret;
}
Exemplo n.º 2
0
/* holding rtnl */
static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
{
	struct rtable *rth = rtnl_dereference(vrf->rth);
	struct rtable *rth_local = rtnl_dereference(vrf->rth_local);
	struct net *net = dev_net(dev);
	struct dst_entry *dst;

	RCU_INIT_POINTER(vrf->rth, NULL);
	RCU_INIT_POINTER(vrf->rth_local, NULL);
	synchronize_rcu();

	/* move dev in dst's to loopback so this VRF device can be deleted
	 * - based on dst_ifdown
	 */
	if (rth) {
		dst = &rth->dst;
		dev_put(dst->dev);
		dst->dev = net->loopback_dev;
		dev_hold(dst->dev);
		dst_release(dst);
	}

	if (rth_local) {
		dst = &rth_local->dst;
		dev_put(dst->dev);
		dst->dev = net->loopback_dev;
		dev_hold(dst->dev);
		dst_release(dst);
	}
}
Exemplo n.º 3
0
static int __init nx842_pseries_init(void)
{
	struct nx842_devdata *new_devdata;
	int ret;

	if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
		return -ENODEV;

	RCU_INIT_POINTER(devdata, NULL);
	new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
	if (!new_devdata) {
		pr_err("Could not allocate memory for device data\n");
		return -ENOMEM;
	}
	RCU_INIT_POINTER(devdata, new_devdata);

	ret = vio_register_driver(&nx842_vio_driver);
	if (ret) {
		pr_err("Could not register VIO driver %d\n", ret);

		kfree(new_devdata);
		return ret;
	}

	return 0;
}
Exemplo n.º 4
0
void wg_noise_keypairs_clear(struct noise_keypairs *keypairs)
{
	struct noise_keypair *old;

	spin_lock_bh(&keypairs->keypair_update_lock);

	/* We zero the next_keypair before zeroing the others, so that
	 * wg_noise_received_with_keypair returns early before subsequent ones
	 * are zeroed.
	 */
	old = rcu_dereference_protected(keypairs->next_keypair,
		lockdep_is_held(&keypairs->keypair_update_lock));
	RCU_INIT_POINTER(keypairs->next_keypair, NULL);
	wg_noise_keypair_put(old, true);

	old = rcu_dereference_protected(keypairs->previous_keypair,
		lockdep_is_held(&keypairs->keypair_update_lock));
	RCU_INIT_POINTER(keypairs->previous_keypair, NULL);
	wg_noise_keypair_put(old, true);

	old = rcu_dereference_protected(keypairs->current_keypair,
		lockdep_is_held(&keypairs->keypair_update_lock));
	RCU_INIT_POINTER(keypairs->current_keypair, NULL);
	wg_noise_keypair_put(old, true);

	spin_unlock_bh(&keypairs->keypair_update_lock);
}
Exemplo n.º 5
0
struct sw_flow *ovs_flow_alloc(void)
{
	struct sw_flow *flow;
	struct flow_stats *stats;
	int node;

	flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
	if (!flow)
		return ERR_PTR(-ENOMEM);

	flow->sf_acts = NULL;
	flow->mask = NULL;
	flow->stats_last_writer = NUMA_NO_NODE;

	/* Initialize the default stat node. */
	stats = kmem_cache_alloc_node(flow_stats_cache,
				      GFP_KERNEL | __GFP_ZERO, 0);
	if (!stats)
		goto err;

	spin_lock_init(&stats->lock);

	RCU_INIT_POINTER(flow->stats[0], stats);

	for_each_node(node)
		if (node != 0)
			RCU_INIT_POINTER(flow->stats[node], NULL);

	return flow;
err:
	kmem_cache_free(flow_cache, flow);
	return ERR_PTR(-ENOMEM);
}
Exemplo n.º 6
0
static int __init init(void)
{
	printk("nf_nat_rtsp v" IP_NF_RTSP_VERSION " loading\n");

	if (stunaddr != NULL)
		extip = in_aton(stunaddr);

	if (destaction != NULL) {
	        if (strcmp(destaction, "auto") == 0)
			dstact = DSTACT_AUTO;

		if (strcmp(destaction, "strip") == 0)
			dstact = DSTACT_STRIP;

		if (strcmp(destaction, "none") == 0)
			dstact = DSTACT_NONE;
	}

	BUG_ON(rcu_dereference(nf_nat_rtsp_hook));
	RCU_INIT_POINTER(nf_nat_rtsp_hook, help);

	BUG_ON(rcu_dereference(nf_nat_rtsp_hook_expectfn));
	RCU_INIT_POINTER(nf_nat_rtsp_hook_expectfn, expected);

	return 0;
}
Exemplo n.º 7
0
static void
reservation_object_add_shared_replace(struct reservation_object *obj,
				      struct reservation_object_list *old,
				      struct reservation_object_list *fobj,
				      struct fence *fence)
{
	unsigned i;
	struct fence *old_fence = NULL;

	fence_get(fence);

	if (!old) {
		RCU_INIT_POINTER(fobj->shared[0], fence);
		fobj->shared_count = 1;
		goto done;
	}

	/*
	 * no need to bump fence refcounts, rcu_read access
	 * requires the use of kref_get_unless_zero, and the
	 * references from the old struct are carried over to
	 * the new.
	 */
	fobj->shared_count = old->shared_count;

	for (i = 0; i < old->shared_count; ++i) {
		struct fence *check;

		check = rcu_dereference_protected(old->shared[i],
						reservation_object_held(obj));

		if (!old_fence && check->context == fence->context) {
			old_fence = check;
			RCU_INIT_POINTER(fobj->shared[i], fence);
		} else
			RCU_INIT_POINTER(fobj->shared[i], check);
	}
	if (!old_fence) {
		RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
		fobj->shared_count++;
	}

done:
	preempt_disable();
	write_seqcount_begin(&obj->seq);
	/*
	 * RCU_INIT_POINTER can be used here,
	 * seqcount provides the necessary barriers
	 */
	RCU_INIT_POINTER(obj->fence, fobj);
	write_seqcount_end(&obj->seq);
	preempt_enable();

	if (old)
		kfree_rcu(old, rcu);

	if (old_fence)
		fence_put(old_fence);
}
Exemplo n.º 8
0
static void __exit nf_nat_helper_pptp_fini(void)
{
	RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, NULL);
	RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, NULL);
	RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, NULL);
	RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, NULL);
	synchronize_rcu();
}
Exemplo n.º 9
0
Arquivo: bearer.c Projeto: 3null/linux
/* tipc_disable_l2_media - detach TIPC bearer from an L2 interface
 *
 * Mark L2 bearer as inactive so that incoming buffers are thrown away,
 * then get worker thread to complete bearer cleanup.  (Can't do cleanup
 * here because cleanup code needs to sleep and caller holds spinlocks.)
 */
void tipc_disable_l2_media(struct tipc_bearer *b)
{
	struct net_device *dev;

	dev = (struct net_device *)rtnl_dereference(b->media_ptr);
	RCU_INIT_POINTER(b->media_ptr, NULL);
	RCU_INIT_POINTER(dev->tipc_ptr, NULL);
	synchronize_net();
	dev_put(dev);
}
Exemplo n.º 10
0
int ip_ra_control(struct sock *sk, unsigned char on,
		  void (*destructor)(struct sock *))
{
	struct ip_ra_chain *ra, *new_ra;
	struct ip_ra_chain __rcu **rap;
	struct net *net = sock_net(sk);

	if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
		return -EINVAL;

	new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;

	mutex_lock(&net->ipv4.ra_mutex);
	for (rap = &net->ipv4.ra_chain;
	     (ra = rcu_dereference_protected(*rap,
			lockdep_is_held(&net->ipv4.ra_mutex))) != NULL;
	     rap = &ra->next) {
		if (ra->sk == sk) {
			if (on) {
				mutex_unlock(&net->ipv4.ra_mutex);
				kfree(new_ra);
				return -EADDRINUSE;
			}
			/* dont let ip_call_ra_chain() use sk again */
			ra->sk = NULL;
			RCU_INIT_POINTER(*rap, ra->next);
			mutex_unlock(&net->ipv4.ra_mutex);

			if (ra->destructor)
				ra->destructor(sk);
			/*
			 * Delay sock_put(sk) and kfree(ra) after one rcu grace
			 * period. This guarantee ip_call_ra_chain() dont need
			 * to mess with socket refcounts.
			 */
			ra->saved_sk = sk;
			call_rcu(&ra->rcu, ip_ra_destroy_rcu);
			return 0;
		}
	}
	if (!new_ra) {
		mutex_unlock(&net->ipv4.ra_mutex);
		return -ENOBUFS;
	}
	new_ra->sk = sk;
	new_ra->destructor = destructor;

	RCU_INIT_POINTER(new_ra->next, ra);
	rcu_assign_pointer(*rap, new_ra);
	sock_hold(sk);
	mutex_unlock(&net->ipv4.ra_mutex);

	return 0;
}
Exemplo n.º 11
0
Arquivo: br_vlan.c Projeto: 7799/linux
static void __vlan_flush(struct net_port_vlans *v)
{
	smp_wmb();
	v->pvid = 0;
	bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
	if (v->port_idx)
		RCU_INIT_POINTER(v->parent.port->vlan_info, NULL);
	else
		RCU_INIT_POINTER(v->parent.br->vlan_info, NULL);
	kfree_rcu(v, rcu);
}
Exemplo n.º 12
0
static void add_new_keypair(struct noise_keypairs *keypairs,
			    struct noise_keypair *new_keypair)
{
	struct noise_keypair *previous_keypair, *next_keypair, *current_keypair;

	spin_lock_bh(&keypairs->keypair_update_lock);
	previous_keypair = rcu_dereference_protected(keypairs->previous_keypair,
		lockdep_is_held(&keypairs->keypair_update_lock));
	next_keypair = rcu_dereference_protected(keypairs->next_keypair,
		lockdep_is_held(&keypairs->keypair_update_lock));
	current_keypair = rcu_dereference_protected(keypairs->current_keypair,
		lockdep_is_held(&keypairs->keypair_update_lock));
	if (new_keypair->i_am_the_initiator) {
		/* If we're the initiator, it means we've sent a handshake, and
		 * received a confirmation response, which means this new
		 * keypair can now be used.
		 */
		if (next_keypair) {
			/* If there already was a next keypair pending, we
			 * demote it to be the previous keypair, and free the
			 * existing current. Note that this means KCI can result
			 * in this transition. It would perhaps be more sound to
			 * always just get rid of the unused next keypair
			 * instead of putting it in the previous slot, but this
			 * might be a bit less robust. Something to think about
			 * for the future.
			 */
			RCU_INIT_POINTER(keypairs->next_keypair, NULL);
			rcu_assign_pointer(keypairs->previous_keypair,
					   next_keypair);
			wg_noise_keypair_put(current_keypair, true);
		} else /* If there wasn't an existing next keypair, we replace
			* the previous with the current one.
			*/
			rcu_assign_pointer(keypairs->previous_keypair,
					   current_keypair);
		/* At this point we can get rid of the old previous keypair, and
		 * set up the new keypair.
		 */
		wg_noise_keypair_put(previous_keypair, true);
		rcu_assign_pointer(keypairs->current_keypair, new_keypair);
	} else {
		/* If we're the responder, it means we can't use the new keypair
		 * until we receive confirmation via the first data packet, so
		 * we get rid of the existing previous one, the possibly
		 * existing next one, and slide in the new next one.
		 */
		rcu_assign_pointer(keypairs->next_keypair, new_keypair);
		wg_noise_keypair_put(next_keypair, true);
		RCU_INIT_POINTER(keypairs->previous_keypair, NULL);
		wg_noise_keypair_put(previous_keypair, true);
	}
	spin_unlock_bh(&keypairs->keypair_update_lock);
}
Exemplo n.º 13
0
/**
 * bearer_disable
 *
 * Note: This routine assumes caller holds RTNL lock.
 */
static void bearer_disable(struct net *net, struct tipc_bearer *b)
{
	struct tipc_net *tn = tipc_net(net);
	int bearer_id = b->identity;

	pr_info("Disabling bearer <%s>\n", b->name);
	b->media->disable_media(b);
	tipc_node_delete_links(net, bearer_id);
	RCU_INIT_POINTER(b->media_ptr, NULL);
	if (b->link_req)
		tipc_disc_delete(b->link_req);
	RCU_INIT_POINTER(tn->bearer_list[bearer_id], NULL);
	kfree_rcu(b, rcu);
}
Exemplo n.º 14
0
void stp_proto_unregister(const struct stp_proto *proto)
{
	mutex_lock(&stp_proto_mutex);
	if (is_zero_ether_addr(proto->group_address))
		RCU_INIT_POINTER(stp_proto, NULL);
	else
		RCU_INIT_POINTER(garp_protos[proto->group_address[5] -
					       GARP_ADDR_MIN], NULL);
	synchronize_rcu();

	if (--sap_registered == 0)
		llc_sap_put(sap);
	mutex_unlock(&stp_proto_mutex);
}
Exemplo n.º 15
0
static int __init nf_nat_helper_pptp_init(void)
{
	BUG_ON(nf_nat_pptp_hook_outbound != NULL);
	RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, pptp_outbound_pkt);

	BUG_ON(nf_nat_pptp_hook_inbound != NULL);
	RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, pptp_inbound_pkt);

	BUG_ON(nf_nat_pptp_hook_exp_gre != NULL);
	RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, pptp_exp_gre);

	BUG_ON(nf_nat_pptp_hook_expectfn != NULL);
	RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, pptp_nat_expected);
	return 0;
}
Exemplo n.º 16
0
static int tcf_sample_init(struct net *net, struct nlattr *nla,
			   struct nlattr *est, struct tc_action **a, int ovr,
			   int bind)
{
	struct tc_action_net *tn = net_generic(net, sample_net_id);
	struct nlattr *tb[TCA_SAMPLE_MAX + 1];
	struct psample_group *psample_group;
	struct tc_sample *parm;
	struct tcf_sample *s;
	bool exists = false;
	int ret;

	if (!nla)
		return -EINVAL;
	ret = nla_parse_nested(tb, TCA_SAMPLE_MAX, nla, sample_policy, NULL);
	if (ret < 0)
		return ret;
	if (!tb[TCA_SAMPLE_PARMS] || !tb[TCA_SAMPLE_RATE] ||
	    !tb[TCA_SAMPLE_PSAMPLE_GROUP])
		return -EINVAL;

	parm = nla_data(tb[TCA_SAMPLE_PARMS]);

	exists = tcf_idr_check(tn, parm->index, a, bind);
	if (exists && bind)
		return 0;

	if (!exists) {
		ret = tcf_idr_create(tn, parm->index, est, a,
				     &act_sample_ops, bind, false);
		if (ret)
			return ret;
		ret = ACT_P_CREATED;
	} else {
		tcf_idr_release(*a, bind);
		if (!ovr)
			return -EEXIST;
	}
	s = to_sample(*a);

	s->tcf_action = parm->action;
	s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
	s->psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
	psample_group = psample_group_get(net, s->psample_group_num);
	if (!psample_group) {
		if (ret == ACT_P_CREATED)
			tcf_idr_release(*a, bind);
		return -ENOMEM;
	}
	RCU_INIT_POINTER(s->psample_group, psample_group);

	if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
		s->truncate = true;
		s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
	}

	if (ret == ACT_P_CREATED)
		tcf_idr_insert(tn, *a);
	return ret;
}
Exemplo n.º 17
0
/**
 * amdgpu_fence_process - check for fence activity
 *
 * @ring: pointer to struct amdgpu_ring
 *
 * Checks the current fence value and calculates the last
 * signalled fence value. Wakes the fence queue if the
 * sequence number has increased.
 */
void amdgpu_fence_process(struct amdgpu_ring *ring)
{
	struct amdgpu_fence_driver *drv = &ring->fence_drv;
	uint32_t seq, last_seq;
	int r;

	do {
		last_seq = atomic_read(&ring->fence_drv.last_seq);
		seq = amdgpu_fence_read(ring);

	} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);

	if (seq != ring->fence_drv.sync_seq)
		amdgpu_fence_schedule_fallback(ring);

	while (last_seq != seq) {
		struct fence *fence, **ptr;

		ptr = &drv->fences[++last_seq & drv->num_fences_mask];

		/* There is always exactly one thread signaling this fence slot */
		fence = rcu_dereference_protected(*ptr, 1);
		RCU_INIT_POINTER(*ptr, NULL);

		BUG_ON(!fence);

		r = fence_signal(fence);
		if (!r)
			FENCE_TRACE(fence, "signaled from irq context\n");
		else
			BUG();

		fence_put(fence);
	}
}
Exemplo n.º 18
0
static void __exit nf_nat_cleanup(void)
{
	unsigned int i;

	unregister_pernet_subsys(&nf_nat_net_ops);
	nf_ct_extend_unregister(&nat_extend);
	nf_ct_helper_expectfn_unregister(&follow_master_nat);
	RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
#ifdef CONFIG_XFRM
	RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
#endif
	for (i = 0; i < NFPROTO_NUMPROTO; i++)
		kfree(nf_nat_l4protos[i]);

	rhashtable_destroy(&nf_nat_bysource_table);
}
Exemplo n.º 19
0
static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
{
	struct net_device *dev = edev->net;
	struct bnxt *bp = netdev_priv(dev);
	struct bnxt_ulp *ulp;
	int i = 0;

	ASSERT_RTNL();
	if (ulp_id >= BNXT_MAX_ULP)
		return -EINVAL;

	ulp = &edev->ulp_tbl[ulp_id];
	if (!rcu_access_pointer(ulp->ulp_ops)) {
		netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
		return -EINVAL;
	}
	if (ulp_id == BNXT_ROCE_ULP && ulp->msix_requested)
		edev->en_ops->bnxt_free_msix(edev, ulp_id);

	if (ulp->max_async_event_id)
		bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);

	RCU_INIT_POINTER(ulp->ulp_ops, NULL);
	synchronize_rcu();
	ulp->max_async_event_id = 0;
	ulp->async_events_bmap = NULL;
	while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
		msleep(100);
		i++;
	}
	return 0;
}
Exemplo n.º 20
0
Arquivo: qp.c Projeto: 020gzh/linux
/**
 * free_all_qps - check for QPs still in use
 * @qpt: the QP table to empty
 *
 * There should not be any QPs still in use.
 * Free memory for table.
 */
static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
{
	unsigned long flags;
	struct rvt_qp *qp;
	unsigned n, qp_inuse = 0;
	spinlock_t *ql; /* work around too long line below */

	if (rdi->driver_f.free_all_qps)
		qp_inuse = rdi->driver_f.free_all_qps(rdi);

	qp_inuse += rvt_mcast_tree_empty(rdi);

	if (!rdi->qp_dev)
		return qp_inuse;

	ql = &rdi->qp_dev->qpt_lock;
	spin_lock_irqsave(ql, flags);
	for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
		qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
					       lockdep_is_held(ql));
		RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);

		for (; qp; qp = rcu_dereference_protected(qp->next,
							  lockdep_is_held(ql)))
			qp_inuse++;
	}
	spin_unlock_irqrestore(ql, flags);
	synchronize_rcu();
	return qp_inuse;
}
Exemplo n.º 21
0
void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
{
    mutex_lock(&afinfo_mutex);
    RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
    mutex_unlock(&afinfo_mutex);
    synchronize_rcu();
}
Exemplo n.º 22
0
int nf_register_afinfo(const struct nf_afinfo *afinfo)
{
    mutex_lock(&afinfo_mutex);
    RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
    mutex_unlock(&afinfo_mutex);
    return 0;
}
Exemplo n.º 23
0
static int tpci200_free_irq(struct ipack_device *dev)
{
    struct slot_irq *slot_irq;
    struct tpci200_board *tpci200;

    tpci200 = check_slot(dev);
    if (tpci200 == NULL)
        return -EINVAL;

    if (mutex_lock_interruptible(&tpci200->mutex))
        return -ERESTARTSYS;

    if (tpci200->slots[dev->slot].irq == NULL) {
        mutex_unlock(&tpci200->mutex);
        return -EINVAL;
    }

    tpci200_disable_irq(tpci200, dev->slot);
    slot_irq = tpci200->slots[dev->slot].irq;
    /* uninstall handler */
    RCU_INIT_POINTER(tpci200->slots[dev->slot].irq, NULL);
    synchronize_rcu();
    kfree(slot_irq);
    mutex_unlock(&tpci200->mutex);
    return 0;
}
Exemplo n.º 24
0
Arquivo: super.c Projeto: avagin/linux
/*
 * allocate an AFS inode struct from our slab cache
 */
static struct inode *afs_alloc_inode(struct super_block *sb)
{
	struct afs_vnode *vnode;

	vnode = kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL);
	if (!vnode)
		return NULL;

	atomic_inc(&afs_count_active_inodes);

	/* Reset anything that shouldn't leak from one inode to the next. */
	memset(&vnode->fid, 0, sizeof(vnode->fid));
	memset(&vnode->status, 0, sizeof(vnode->status));

	vnode->volume		= NULL;
	vnode->lock_key		= NULL;
	vnode->permit_cache	= NULL;
	RCU_INIT_POINTER(vnode->cb_interest, NULL);
#ifdef CONFIG_AFS_FSCACHE
	vnode->cache		= NULL;
#endif

	vnode->flags		= 1 << AFS_VNODE_UNSET;
	vnode->lock_state	= AFS_VNODE_LOCK_NONE;

	init_rwsem(&vnode->rmdir_lock);

	_leave(" = %p", &vnode->vfs_inode);
	return &vnode->vfs_inode;
}
Exemplo n.º 25
0
static int __init nx842_init(void)
{
	struct nx842_devdata *new_devdata;
	pr_info("Registering IBM Power 842 compression driver\n");

	RCU_INIT_POINTER(devdata, NULL);
	new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
	if (!new_devdata) {
		pr_err("Could not allocate memory for device data\n");
		return -ENOMEM;
	}
	new_devdata->status = UNAVAILABLE;
	RCU_INIT_POINTER(devdata, new_devdata);

	return vio_register_driver(&nx842_driver);
}
Exemplo n.º 26
0
void reservation_object_add_excl_fence(struct reservation_object *obj,
				       struct fence *fence)
{
	struct fence *old_fence = reservation_object_get_excl(obj);
	struct reservation_object_list *old;
	u32 i = 0;

	old = reservation_object_get_list(obj);
	if (old)
		i = old->shared_count;

	if (fence)
		fence_get(fence);

	preempt_disable();
	write_seqcount_begin(&obj->seq);
	/* write_seqcount_begin provides the necessary memory barrier */
	RCU_INIT_POINTER(obj->fence_excl, fence);
	if (old)
		old->shared_count = 0;
	write_seqcount_end(&obj->seq);
	preempt_enable();

	/* inplace update, no shared fences */
	while (i--)
		fence_put(rcu_dereference_protected(old->shared[i],
						reservation_object_held(obj)));

	if (old_fence)
		fence_put(old_fence);
}
Exemplo n.º 27
0
int scif_peer_unregister_device(struct scif_dev *scifdev)
{
	struct scif_peer_dev *spdev;

	mutex_lock(&scifdev->lock);
	/* Flush work to ensure device register is complete */
	flush_work(&scifdev->peer_add_work);

	/*
	 * Continue holding scifdev->lock since theoretically unregister_device
	 * can be called simultaneously from multiple threads
	 */
	spdev = rcu_dereference(scifdev->spdev);
	if (!spdev) {
		mutex_unlock(&scifdev->lock);
		return -ENODEV;
	}

	RCU_INIT_POINTER(scifdev->spdev, NULL);
	synchronize_rcu();
	mutex_unlock(&scifdev->lock);

	dev_dbg(&spdev->dev, "Removing peer dnode %d\n", spdev->dnode);
	device_unregister(&spdev->dev);

	mutex_lock(&scif_info.conflock);
	scif_info.total--;
	mutex_unlock(&scif_info.conflock);
	return 0;
}
Exemplo n.º 28
0
Arquivo: fork.c Projeto: 19Dan01/linux
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
	down_write(&oldmm->mmap_sem);
	RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
	up_write(&oldmm->mmap_sem);
	return 0;
}
Exemplo n.º 29
0
static int __init br_netfilter_init(void)
{
	int ret;

	ret = register_pernet_subsys(&brnf_net_ops);
	if (ret < 0)
		return ret;

	ret = register_netdevice_notifier(&brnf_notifier);
	if (ret < 0) {
		unregister_pernet_subsys(&brnf_net_ops);
		return ret;
	}

#ifdef CONFIG_SYSCTL
	brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table);
	if (brnf_sysctl_header == NULL) {
		printk(KERN_WARNING
		       "br_netfilter: can't register to sysctl.\n");
		unregister_netdevice_notifier(&brnf_notifier);
		unregister_pernet_subsys(&brnf_net_ops);
		return -ENOMEM;
	}
#endif
	RCU_INIT_POINTER(nf_br_ops, &br_ops);
	printk(KERN_NOTICE "Bridge firewalling registered\n");
	return 0;
}
Exemplo n.º 30
0
static int __init nf_nat_snmp_basic_init(void)
{
	BUG_ON(nf_nat_snmp_hook != NULL);
	RCU_INIT_POINTER(nf_nat_snmp_hook, help);

	return nf_conntrack_helper_register(&snmp_trap_helper);
}