Esempio n. 1
0
void fib_release_info(struct fib_info *fi)
{
    spin_lock_bh(&fib_info_lock);
    if (fi && --fi->fib_treeref == 0) {
        hlist_del(&fi->fib_hash);
        if (fi->fib_prefsrc)
            hlist_del(&fi->fib_lhash);
        change_nexthops(fi) {
            if (!nh->nh_dev)
                continue;
            hlist_del(&nh->nh_hash);
        }
        endfor_nexthops(fi)
        fi->fib_dead = 1;
        fib_info_put(fi);
    }
Esempio n. 2
0
static void
__instance_destroy(struct nfulnl_instance *inst)
{
	/* first pull it out of the global list */
	UDEBUG("removing instance %p (queuenum=%u) from hash\n",
		inst, inst->group_num);

	hlist_del(&inst->hlist);

	/* then flush all pending packets from skb */

	spin_lock_bh(&inst->lock);
	if (inst->skb) {
		/* timer "holds" one reference (we have one more) */
		if (del_timer(&inst->timer))
			instance_put(inst);
		if (inst->qlen)
			__nfulnl_send(inst);
		if (inst->skb) {
			kfree_skb(inst->skb);
			inst->skb = NULL;
		}
	}
	spin_unlock_bh(&inst->lock);

	/* and finally put the refcount */
	instance_put(inst);
}
static int __msm_pmem_table_del(struct hlist_head *ptype,
			struct msm_pmem_info *pinfo)
{
	int rc = 0;
	struct msm_pmem_region *region;
	struct hlist_node *node, *n;

	switch (pinfo->type) {
	case MSM_PMEM_AEC_AWB:
	case MSM_PMEM_AF:
		hlist_for_each_entry_safe(region, node, n,
				ptype, list) {

			if (pinfo->type == region->info.type &&
				pinfo->vaddr == region->info.vaddr &&
				pinfo->fd == region->info.fd) {
				hlist_del(node);
#ifdef CONFIG_ANDROID_PMEM
				put_pmem_file(region->file);
#else

#endif
				kfree(region);
			}
		}
		break;

	default:
		rc = -EINVAL;
		break;
	}

	return rc;
}
Esempio n. 4
0
void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
{
	const unsigned long s = req->rq_state;
	struct drbd_conf *mdev = req->mdev;
	
	int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE;

	if (s & RQ_NET_QUEUED)
		return;
	if (s & RQ_NET_PENDING)
		return;
	if (s & RQ_LOCAL_PENDING)
		return;

	if (req->master_bio) {

		int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
		int error = PTR_ERR(req->private_bio);

		if (!hlist_unhashed(&req->collision))
			hlist_del(&req->collision);
		else
			D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);

		
		if (rw == WRITE)
			_about_to_complete_local_write(mdev, req);

		
		_drbd_end_io_acct(mdev, req);

		m->error = ok ? 0 : (error ?: -EIO);
		m->bio = req->master_bio;
		req->master_bio = NULL;
	}
//Called when module unloaded using 'rmmod'
void cleanup_module()
{
	print_all();
	nf_unregister_hook(&nfho);  //cleanup – unregister hook
	//TODO - cleanup hash table
	kfree(ts);
	int i;
	for(i = 0; i < GLOBAL_MAP_SIZE; i++){
		if(global_map[i]){
			struct hlist_node* n;
			struct hlist_node* old;
			hte_t* h;
			/*
			hlist_for_each(n, global_map[i]){
				h = hlist_entry(h, hte_t, node);
				hlist_del(n);
				//if(h) kfree(h);
				//kfree(n);
			}
			*/
			n = global_map[i]->first;
			while(n){
				h = hlist_entry(n,hte_t, node);
				old = n;
				n = n->next;
				hlist_del(old);
				//kfree(old);
				kfree(h);
			}
			kfree(global_map[i]);
				
		}
	}
	kfree(global_map);
}
Esempio n. 6
0
static void
_instance_destroy2(struct nfulnl_instance *inst, int lock)
{
	/* first pull it out of the global list */
	if (lock)
		write_lock_bh(&instances_lock);

	UDEBUG("removing instance %p (queuenum=%u) from hash\n",
		inst, inst->group_num);

	hlist_del(&inst->hlist);

	if (lock)
		write_unlock_bh(&instances_lock);

	/* then flush all pending packets from skb */

	spin_lock_bh(&inst->lock);
	if (inst->skb) {
		if (inst->qlen)
			__nfulnl_send(inst);
		if (inst->skb) {
			kfree_skb(inst->skb);
			inst->skb = NULL;
		}
	}
	spin_unlock_bh(&inst->lock);

	/* and finally put the refcount */
	instance_put(inst);

	module_put(THIS_MODULE);
}
Esempio n. 7
0
static void faf_polled_fd_node_free(struct faf_polled_fd *polled_fd,
				    struct faf_polled_fd_node *polled_fd_node)
{
	BUG_ON(polled_fd_node->count);
	hlist_del(&polled_fd_node->list);
	polled_fd->count--;
	kfree(polled_fd_node);
}
Esempio n. 8
0
static void patch_destroy(struct vport *vport)
{
	struct patch_vport *patch_vport = patch_vport_priv(vport);

	update_peers(ovs_dp_get_net(vport->dp), patch_vport->name, NULL);
	hlist_del(&patch_vport->hash_node);
	call_rcu(&patch_vport->rcu, free_port_rcu);
}
Esempio n. 9
0
void auth_domain_put(struct auth_domain *dom)
{
	if (atomic_dec_and_lock(&dom->ref.refcount, &auth_domain_lock)) {
		hlist_del(&dom->hash);
		dom->flavour->domain_release(dom);
		spin_unlock(&auth_domain_lock);
	}
}
Esempio n. 10
0
static inline void free_ll_remote_perm(struct ll_remote_perm *lrp)
{
	if (!lrp)
		return;

	if (!hlist_unhashed(&lrp->lrp_list))
		hlist_del(&lrp->lrp_list);
	OBD_SLAB_FREE(lrp, ll_remote_perm_cachep, sizeof(*lrp));
}
/**
 * batadv_frag_clear_chain - delete entries in the fragment buffer chain
 * @head: head of chain with entries.
 *
 * Free fragments in the passed hlist. Should be called with appropriate lock.
 */
static void batadv_frag_clear_chain(struct hlist_head *head)
{
	struct batadv_frag_list_entry *entry;
	struct hlist_node *node;

	hlist_for_each_entry_safe(entry, node, head, list) {
		hlist_del(&entry->list);
		kfree_skb(entry->skb);
		kfree(entry);
	}
Esempio n. 12
0
void khashmap_del(struct khashmap *hlist, u64 key)
{
	struct khashmap_item *item;

	item = khashmap_find_item(hlist, key);
	if (!item)
		return;
	hlist_del(&item->hlist);
	kmem_cache_free(hlist_cachep, item);
}
Esempio n. 13
0
void nfc_llcp_free_sdp_tlv_list(struct hlist_head *head)
{
	struct nfc_llcp_sdp_tlv *sdp;
	struct hlist_node *n;

	hlist_for_each_entry_safe(sdp, n, head, node) {
		hlist_del(&sdp->node);

		nfc_llcp_free_sdp_tlv(sdp);
	}
Esempio n. 14
0
void *slab_alloc(ohc_slab_t *slab)
{
	slab_block_t *sblock;
	uintptr_t leader;
	struct hlist_node *p;
	int buckets;
	int i;

	if(hlist_empty(&slab->block_head)) {
		buckets = slab_buckets(slab);
		sblock = malloc(sizeof(slab_block_t) + slab->item_size * buckets);
		if(sblock == NULL) {
			return NULL;
		}

		sblock->slab = slab;
		sblock->frees = buckets;
		hlist_add_head(&sblock->block_node, &slab->block_head);
		INIT_HLIST_HEAD(&sblock->item_head);

		leader = (uintptr_t)sblock + sizeof(slab_block_t);
		for(i = 0; i < buckets; i++) {
			*((slab_block_t **)leader) = sblock;
			p = (struct hlist_node *)(leader + sizeof(slab_block_t *));
			hlist_add_head(p, &sblock->item_head);
			leader += slab->item_size;
		}

	} else {
		sblock = list_entry(slab->block_head.first, slab_block_t, block_node);
	}

	p = sblock->item_head.first;
	hlist_del(p);

	sblock->frees--;
	if(sblock->frees == 0) {
		/* if no free items, we throw the block away */
		hlist_del(&sblock->block_node);
	}

	return p;
}
Esempio n. 15
0
void unregister_kprobe(struct kprobe *p)
{
	unsigned long flags;
	arch_remove_kprobe(p);
	spin_lock_irqsave(&kprobe_lock, flags);
	*p->addr = p->opcode;
	hlist_del(&p->hlist);
	flush_icache_range((unsigned long) p->addr,
			   (unsigned long) p->addr + sizeof(kprobe_opcode_t));
	spin_unlock_irqrestore(&kprobe_lock, flags);
}
Esempio n. 16
0
static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
	dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n",
		    pte->pte.eaddr, pte->pte.vpage, pte->host_va);

	/* Different for 32 and 64 bit */
	kvmppc_mmu_invalidate_pte(vcpu, pte);

	if (pte->pte.may_write)
		kvm_release_pfn_dirty(pte->pfn);
	else
		kvm_release_pfn_clean(pte->pfn);

	hlist_del(&pte->list_pte);
	hlist_del(&pte->list_vpte);
	hlist_del(&pte->list_vpte_long);

	vcpu->arch.hpte_cache_count--;
	kmem_cache_free(hpte_cache, pte);
}
Esempio n. 17
0
/**
 *	llc_sap_remove_socket - removes a socket from SAP
 *	@sap: SAP
 *	@sk: socket
 *
 *	This function removes a connection from the hash tables of a SAP if
 *	the connection was in this list.
 */
void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk)
{
	struct llc_sock *llc = llc_sk(sk);

	spin_lock_bh(&sap->sk_lock);
	sk_nulls_del_node_init_rcu(sk);
	hlist_del(&llc->dev_hash_node);
	sap->sk_count--;
	spin_unlock_bh(&sap->sk_lock);
	llc_sap_put(sap);
}
Esempio n. 18
0
/*
 * @inmates must have been initialised prior to this call
 */
static void __cell_release(struct dm_bio_prison_cell *cell,
			   struct bio_list *inmates)
{
	hlist_del(&cell->list);

	if (inmates) {
		if (cell->holder)
			bio_list_add(inmates, cell->holder);
		bio_list_merge(inmates, &cell->bios);
	}
}
Esempio n. 19
0
/**
 * batadv_forw_packet_list_free() - free a list of forward packets
 * @head: a list of to be freed forw_packets
 *
 * This function cancels the scheduling of any packet in the provided list,
 * waits for any possibly running packet forwarding thread to finish and
 * finally, safely frees this forward packet.
 *
 * This function might sleep.
 */
static void batadv_forw_packet_list_free(struct hlist_head *head)
{
	struct batadv_forw_packet *forw_packet;
	struct hlist_node *safe_tmp_node;

	hlist_for_each_entry_safe(forw_packet, safe_tmp_node, head,
				  cleanup_list) {
		cancel_delayed_work_sync(&forw_packet->delayed_work);

		hlist_del(&forw_packet->cleanup_list);
		batadv_forw_packet_free(forw_packet, true);
	}
Esempio n. 20
0
void khashmap_destroy(struct khashmap *hlist)
{
	struct khashmap_item *item;
	struct hlist_node *node, *p;
	int i;

	if (unlikely(!hlist))
		return;
	khashmap_for_each_entry_safe(hlist, i, node, p, item) {
		hlist_del(&item->hlist);
		kmem_cache_free(hlist_cachep, item);
	}
Esempio n. 21
0
/*
 * Delete a file after having released all locks, blocks and shares
 */
static inline void
nlm_delete_file(struct nlm_file *file)
{
	nlm_debug_print_file("closing file", file);
	if (!hlist_unhashed(&file->f_list)) {
		hlist_del(&file->f_list);
		nlmsvc_ops->fclose(file->f_file);
		kfree(file);
	} else {
		printk(KERN_WARNING "lockd: attempt to release unknown file!\n");
	}
}
Esempio n. 22
0
static void tcf_hash_destroy(struct tcf_hashinfo *hinfo, struct tc_action *p)
{
	spin_lock_bh(&hinfo->lock);
	hlist_del(&p->tcfa_head);
	spin_unlock_bh(&hinfo->lock);
	gen_kill_estimator(&p->tcfa_rate_est);
	/*
	 * gen_estimator est_timer() might access p->tcfa_lock
	 * or bstats, wait a RCU grace period before freeing p
	 */
	call_rcu(&p->tcfa_rcu, free_tcf);
}
Esempio n. 23
0
extern int
hashtable_del_node(struct hlist_node *node)
{
	if (NULL == node) {
		eag_log_err("hashtable_del_node input error!\n");
		return ERR_HASH_PARAM_ERR;
	}
//      unsigned int hash_value = 0;

	hlist_del(node);

	return HASH_RETURN_OK;
}
Esempio n. 24
0
static void remove_from_hash(struct buffer_head *buf)
{
	struct buffer_head *node = NULL;
	struct hlist_head *head = &HASH(buf->b_dev,buf->b_blocknr);
	spin_lock(&hash_lock);
	hlist_for_each_entry(node, head, list_free){
		if (node->b_dev == buf->b_dev && node->b_blocknr == buf->b_blocknr) {
			hlist_del(&node->list_free);
			break;
		}
	}
	buffer_count--;
	spin_unlock(&hash_lock);
}
Esempio n. 25
0
static inline void put_entry(struct trunk_entry_incore *entry)
{
	int h = hash(entry->raw.oid);

	pthread_mutex_lock(&hashtable_lock[h]);
	hlist_del(&entry->hash);
	pthread_mutex_unlock(&hashtable_lock[h]);

	pthread_mutex_lock(&active_list_lock);
	list_del(&entry->active_list);
	trunk_entry_active_nr--;
	pthread_mutex_unlock(&active_list_lock);
	free(entry);
}
Esempio n. 26
0
int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list)
{
	struct sk_buff *msg;
	struct nlattr *sdp_attr, *uri_attr;
	struct nfc_llcp_sdp_tlv *sdres;
	struct hlist_node *n;
	void *hdr;
	int rc = -EMSGSIZE;
	int i;

	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
	if (!msg)
		return -ENOMEM;

	hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
			  NFC_EVENT_LLC_SDRES);
	if (!hdr)
		goto free_msg;

	if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
		goto nla_put_failure;

	sdp_attr = nla_nest_start(msg, NFC_ATTR_LLC_SDP);
	if (sdp_attr == NULL) {
		rc = -ENOMEM;
		goto nla_put_failure;
	}

	i = 1;
	hlist_for_each_entry_safe(sdres, n, sdres_list, node) {
		pr_debug("uri: %s, sap: %d\n", sdres->uri, sdres->sap);

		uri_attr = nla_nest_start(msg, i++);
		if (uri_attr == NULL) {
			rc = -ENOMEM;
			goto nla_put_failure;
		}

		if (nla_put_u8(msg, NFC_SDP_ATTR_SAP, sdres->sap))
			goto nla_put_failure;

		if (nla_put_string(msg, NFC_SDP_ATTR_URI, sdres->uri))
			goto nla_put_failure;

		nla_nest_end(msg, uri_attr);

		hlist_del(&sdres->node);

		nfc_llcp_free_sdp_tlv(sdres);
	}
Esempio n. 27
0
static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock)
{
	unsigned h = hash_64(from_oblock(oblock), mq->hash_bits);
	struct hlist_head *bucket = mq->table + h;
	struct entry *e;

	hlist_for_each_entry(e, bucket, hlist)
		if (e->oblock == oblock) {
			hlist_del(&e->hlist);
			hlist_add_head(&e->hlist, bucket);
			return e;
		}

	return NULL;
}
Esempio n. 28
0
/*
* Expand the size of the hash table to @size.
* @ht: the hash table to expand
* @size: the size we expand to
*/
static int uproc_htable_expand(uproc_htable_t *ht, int size){
    int new_len, new_idx, new_load_limit,  i;
    struct hlist_head *new_buckets, *head;
    struct hlist_node *p, *q;
    unsigned h;
    new_load_limit = ht->load_limit;
    new_len = ht->len;
    new_idx = ht->p_index;
    while(new_load_limit < size && new_idx < uproc_htable_nprimes){
        new_len = uproc_htable_primes[++new_idx];
        new_load_limit = ht->load_factor * new_len;
    }

    if((new_buckets = malloc(new_len * sizeof(struct hlist_head))) == NULL){
        fprintf(stderr, "failed to malloc: %s", strerror(errno));
        return -ENOMEM;
    }

    for(i = 0; i < new_len; ++i){
        INIT_HLIST_HEAD(&new_buckets[i]);
    }

    /*
    * Rehash and move all event to new_buckets.
    */
    for(i = 0; i < ht->len; ++i){
        head = &(ht->buckets[i]);
        if(!hlist_empty(head)){
            p = head->first;
            while(p){
                q = p->next;
                hlist_del(p);
                h = ht->hf(p) % new_len;
                hlist_add_head(&new_buckets[h], p);
                p = q;
            }
        }
    }

    free(ht->buckets);

    ht->p_index = new_idx;
    ht->buckets = new_buckets;
    ht->len = new_len;
    ht->load_limit = new_load_limit;

    return 0;
}
Esempio n. 29
0
void tcf_hash_destroy(struct tc_action *a)
{
	struct tcf_common *p = a->priv;
	struct tcf_hashinfo *hinfo = a->ops->hinfo;

	spin_lock_bh(&hinfo->lock);
	hlist_del(&p->tcfc_head);
	spin_unlock_bh(&hinfo->lock);
	gen_kill_estimator(&p->tcfc_bstats,
			   &p->tcfc_rate_est);
	/*
	 * gen_estimator est_timer() might access p->tcfc_lock
	 * or bstats, wait a RCU grace period before freeing p
	 */
	kfree_rcu(p, tcfc_rcu);
}
Esempio n. 30
0
static void
__instance_destroy(struct nfulnl_instance *inst)
{
	/* first pull it out of the global list */
	hlist_del(&inst->hlist);

	/* then flush all pending packets from skb */

	spin_lock_bh(&inst->lock);
	if (inst->skb)
		__nfulnl_flush(inst);
	spin_unlock_bh(&inst->lock);

	/* and finally put the refcount */
	instance_put(inst);
}