Esempio n. 1
0
void CCodeGenerator::operator()(Assignment* expr) {
    // Handle all types of assignment, including member assignment
    Expression::Ptr init = expr->initializer();
    if (dynamic_cast<Empty*>(init.pointer())) {
        return_ = Operand(env_->integer("0"));
    } else {
        return_ = emit(init);
    }

    String::Ptr id = expr->identifier();
    Variable::Ptr var = variable(id);
    Attribute::Ptr attr = class_ ? class_->attribute(id) : 0;

    if (var) {
        // Assignment to a local var that has already been initialized once in
        // the current scope.
        Type::Ptr type = var->type();
        if (!type->is_value()) {
            refcount_dec(Operand(var->name()));
        }
        line();
        out_ << id->string() << " = " << return_ << ";\n";
        if (!type->is_value()) {
            refcount_inc(Operand(var->name()));
        }
    } else if (attr) {
        // Assignment to an attribute within a class
/*
        Type::Ptr type = expr->type(); 
        Variable::Ptr self = variable(env_->name("__self"));
        Operand addr = Operand::addr(, attr->slot());  
        Operand old = load(addr);
        if (!type->is_value() && !attr->is_weak()) {
            refcount_dec(old);
        } 
        store(addr, return_);
        if (!type->is_value() && !attr->is_weak()) {
            refcount_inc(return_);
        }
*/
        assert(!"not impl");
    } else {
        // Assignment to a local var that has not yet been initialized in the
        // current scope.
        Type::Ptr declared = expr->declared_type();
        if (declared->is_top()) {
            declared = expr->type();
        }
        line();
        brace();
        operator()(declared);
        out_ << " " << id->string() << " = " << return_ << "; ";
        out_ << "(void)" << id->string()  << ";\n";
        variable(new Variable(id, declared));
        if (!declared->is_value()) {
            refcount_inc(return_);
        }
    }
}
Esempio n. 2
0
static int ip_frag_reinit(struct ipq *qp)
{
	struct sk_buff *fp;
	unsigned int sum_truesize = 0;

	if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
		refcount_inc(&qp->q.refcnt);
		return -ETIMEDOUT;
	}

	fp = qp->q.fragments;
	do {
		struct sk_buff *xp = fp->next;

		sum_truesize += fp->truesize;
		kfree_skb(fp);
		fp = xp;
	} while (fp);
	sub_frag_mem_limit(qp->q.net, sum_truesize);

	qp->q.flags = 0;
	qp->q.len = 0;
	qp->q.meat = 0;
	qp->q.fragments = NULL;
	qp->q.fragments_tail = NULL;
	qp->iif = 0;
	qp->ecn = 0;

	return 0;
}
Esempio n. 3
0
static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
					  struct mpoa_client *client)
{
	in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL);

	if (entry == NULL) {
		pr_info("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n");
		return NULL;
	}

	dprintk("adding an ingress entry, ip = %pI4\n", &dst_ip);

	refcount_set(&entry->use, 1);
	dprintk("new_in_cache_entry: about to lock\n");
	write_lock_bh(&client->ingress_lock);
	entry->next = client->in_cache;
	entry->prev = NULL;
	if (client->in_cache != NULL)
		client->in_cache->prev = entry;
	client->in_cache = entry;

	memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN);
	entry->ctrl_info.in_dst_ip = dst_ip;
	do_gettimeofday(&(entry->tv));
	entry->retry_time = client->parameters.mpc_p4;
	entry->count = 1;
	entry->entry_state = INGRESS_INVALID;
	entry->ctrl_info.holding_time = HOLDING_TIME_DEFAULT;
	refcount_inc(&entry->use);

	write_unlock_bh(&client->ingress_lock);
	dprintk("new_in_cache_entry: unlocked\n");

	return entry;
}
Esempio n. 4
0
static struct input_stream *
bz2_open_stream(struct archive_file *file, const char *path,
		GMutex *mutex, GCond *cond,
		GError **error_r)
{
	struct bz2_archive_file *context = (struct bz2_archive_file *) file;
	struct bz2_input_stream *bis = g_new(struct bz2_input_stream, 1);

	input_stream_init(&bis->base, &bz2_inputplugin, path,
			  mutex, cond);

	bis->archive = context;

	bis->base.ready = true;
	bis->base.seekable = false;

	if (!bz2_alloc(bis, error_r)) {
		input_stream_deinit(&bis->base);
		g_free(bis);
		return NULL;
	}

	bis->eof = false;

	refcount_inc(&context->ref);

	return &bis->base;
}
Esempio n. 5
0
/**
 * vb2_common_vm_open() - increase refcount of the vma
 * @vma:	virtual memory region for the mapping
 *
 * This function adds another user to the provided vma. It expects
 * struct vb2_vmarea_handler pointer in vma->vm_private_data.
 */
static void vb2_common_vm_open(struct vm_area_struct *vma)
{
	struct vb2_vmarea_handler *h = vma->vm_private_data;

	pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
	       __func__, h, refcount_read(h->refcount), vma->vm_start,
	       vma->vm_end);

	refcount_inc(h->refcount);
}
Esempio n. 6
0
struct mdesc_handle *mdesc_grab(void)
{
	struct mdesc_handle *hp;
	unsigned long flags;

	spin_lock_irqsave(&mdesc_lock, flags);
	hp = cur_mdesc;
	if (hp)
		refcount_inc(&hp->refcnt);
	spin_unlock_irqrestore(&mdesc_lock, flags);

	return hp;
}
Esempio n. 7
0
void CCodeGenerator::operator()(Return* statement) {
	Expression::Ptr expr = statement->expression();
    source_line(statement);

	if (!dynamic_cast<Empty*>(expr.pointer())) {
		Operand ret = emit(expr);
        return_val_ = ret;

		// Increment the refcount if the returned value is an object.  The 
		// refcount must be incremented so that the object won't be freed
		// when the function returns.  It is the caller's responsibility to 
		// correctly free the returned object.
		if (!expr->type()->is_value()) {
			refcount_inc(ret);
		}
	}
    block_has_return_ = true;
}
Esempio n. 8
0
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
					  struct sk_buff_head *queue,
					  unsigned int flags,
					  void (*destructor)(struct sock *sk,
							   struct sk_buff *skb),
					  int *peeked, int *off, int *err,
					  struct sk_buff **last)
{
	bool peek_at_off = false;
	struct sk_buff *skb;
	int _off = 0;

	if (unlikely(flags & MSG_PEEK && *off >= 0)) {
		peek_at_off = true;
		_off = *off;
	}

	*last = queue->prev;
	skb_queue_walk(queue, skb) {
		if (flags & MSG_PEEK) {
			if (peek_at_off && _off >= skb->len &&
			    (_off || skb->peeked)) {
				_off -= skb->len;
				continue;
			}
			if (!skb->len) {
				skb = skb_set_peeked(skb);
				if (unlikely(IS_ERR(skb))) {
					*err = PTR_ERR(skb);
					return NULL;
				}
			}
			*peeked = 1;
			refcount_inc(&skb->users);
		} else {
			__skb_unlink(skb, queue);
			if (destructor)
				destructor(sk, skb);
		}
		*off = _off;
		return skb;
	}
	return NULL;
}
Esempio n. 9
0
static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id,
						struct mpoa_client *mpc)
{
	eg_cache_entry *entry;

	read_lock_irq(&mpc->egress_lock);
	entry = mpc->eg_cache;
	while (entry != NULL) {
		if (entry->ctrl_info.cache_id == cache_id) {
			refcount_inc(&entry->use);
			read_unlock_irq(&mpc->egress_lock);
			return entry;
		}
		entry = entry->next;
	}
	read_unlock_irq(&mpc->egress_lock);

	return NULL;
}
Esempio n. 10
0
static in_cache_entry *in_cache_get(__be32 dst_ip,
				    struct mpoa_client *client)
{
	in_cache_entry *entry;

	read_lock_bh(&client->ingress_lock);
	entry = client->in_cache;
	while (entry != NULL) {
		if (entry->ctrl_info.in_dst_ip == dst_ip) {
			refcount_inc(&entry->use);
			read_unlock_bh(&client->ingress_lock);
			return entry;
		}
		entry = entry->next;
	}
	read_unlock_bh(&client->ingress_lock);

	return NULL;
}
Esempio n. 11
0
/* This can be called from any context since it saves CPU flags */
static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
{
	unsigned long flags;
	eg_cache_entry *entry;

	read_lock_irqsave(&mpc->egress_lock, flags);
	entry = mpc->eg_cache;
	while (entry != NULL) {
		if (entry->ctrl_info.tag == tag) {
			refcount_inc(&entry->use);
			read_unlock_irqrestore(&mpc->egress_lock, flags);
			return entry;
		}
		entry = entry->next;
	}
	read_unlock_irqrestore(&mpc->egress_lock, flags);

	return NULL;
}
Esempio n. 12
0
static int add_cgroup(struct perf_evlist *evlist, char *str)
{
	struct perf_evsel *counter;
	struct cgroup_sel *cgrp = NULL;
	int n;
	/*
	 * check if cgrp is already defined, if so we reuse it
	 */
	evlist__for_each_entry(evlist, counter) {
		cgrp = counter->cgrp;
		if (!cgrp)
			continue;
		if (!strcmp(cgrp->name, str)) {
			refcount_inc(&cgrp->refcnt);
			break;
		}

		cgrp = NULL;
	}
Esempio n. 13
0
static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr,
					      struct mpoa_client *mpc)
{
	eg_cache_entry *entry;

	read_lock_irq(&mpc->egress_lock);
	entry = mpc->eg_cache;
	while (entry != NULL) {
		if (entry->latest_ip_addr == ipaddr) {
			refcount_inc(&entry->use);
			read_unlock_irq(&mpc->egress_lock);
			return entry;
		}
		entry = entry->next;
	}
	read_unlock_irq(&mpc->egress_lock);

	return NULL;
}
Esempio n. 14
0
static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc,
					   struct mpoa_client *client)
{
	in_cache_entry *entry;

	read_lock_bh(&client->ingress_lock);
	entry = client->in_cache;
	while (entry != NULL) {
		if (entry->shortcut == vcc) {
			refcount_inc(&entry->use);
			read_unlock_bh(&client->ingress_lock);
			return entry;
		}
		entry = entry->next;
	}
	read_unlock_bh(&client->ingress_lock);

	return NULL;
}
Esempio n. 15
0
/* This can be called from any context since it saves CPU flags */
static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc,
					   struct mpoa_client *mpc)
{
	unsigned long flags;
	eg_cache_entry *entry;

	read_lock_irqsave(&mpc->egress_lock, flags);
	entry = mpc->eg_cache;
	while (entry != NULL) {
		if (entry->shortcut == vcc) {
			refcount_inc(&entry->use);
			read_unlock_irqrestore(&mpc->egress_lock, flags);
			return entry;
		}
		entry = entry->next;
	}
	read_unlock_irqrestore(&mpc->egress_lock, flags);

	return NULL;
}
Esempio n. 16
0
int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
			 struct hns_roce_db *db)
{
	struct hns_roce_user_db_page *page;
	int ret = 0;

	mutex_lock(&context->page_mutex);

	list_for_each_entry(page, &context->page_list, list)
		if (page->user_virt == (virt & PAGE_MASK))
			goto found;

	page = kmalloc(sizeof(*page), GFP_KERNEL);
	if (!page) {
		ret = -ENOMEM;
		goto out;
	}

	refcount_set(&page->refcount, 1);
	page->user_virt = (virt & PAGE_MASK);
	page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
				 PAGE_SIZE, 0, 0);
	if (IS_ERR(page->umem)) {
		ret = PTR_ERR(page->umem);
		kfree(page);
		goto out;
	}

	list_add(&page->list, &context->page_list);

found:
	db->dma = sg_dma_address(page->umem->sg_head.sgl) +
		  (virt & ~PAGE_MASK);
	db->u.user_page = page;
	refcount_inc(&page->refcount);

out:
	mutex_unlock(&context->page_mutex);

	return ret;
}
Esempio n. 17
0
/**
 * vfs_dup_fc_config: Duplicate a filesystem context.
 * @src_fc: The context to copy.
 */
struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc)
{
	struct fs_context *fc;
	int ret;

	if (!src_fc->ops->dup)
		return ERR_PTR(-EOPNOTSUPP);

	fc = kmemdup(src_fc, sizeof(struct fs_context), GFP_KERNEL);
	if (!fc)
		return ERR_PTR(-ENOMEM);

	mutex_init(&fc->uapi_mutex);

	fc->fs_private	= NULL;
	fc->s_fs_info	= NULL;
	fc->source	= NULL;
	fc->security	= NULL;
	get_filesystem(fc->fs_type);
	get_net(fc->net_ns);
	get_user_ns(fc->user_ns);
	get_cred(fc->cred);
	if (fc->log)
		refcount_inc(&fc->log->usage);

	/* Can't call put until we've called ->dup */
	ret = fc->ops->dup(fc, src_fc);
	if (ret < 0)
		goto err_fc;

	ret = security_fs_context_dup(fc, src_fc);
	if (ret < 0)
		goto err_fc;
	return fc;

err_fc:
	put_fs_context(fc);
	return ERR_PTR(ret);
}
Esempio n. 18
0
static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
					  struct mpoa_client *client)
{
	eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL);

	if (entry == NULL) {
		pr_info("out of memory\n");
		return NULL;
	}

	dprintk("adding an egress entry, ip = %pI4, this should be our IP\n",
		&msg->content.eg_info.eg_dst_ip);

	refcount_set(&entry->use, 1);
	dprintk("new_eg_cache_entry: about to lock\n");
	write_lock_irq(&client->egress_lock);
	entry->next = client->eg_cache;
	entry->prev = NULL;
	if (client->eg_cache != NULL)
		client->eg_cache->prev = entry;
	client->eg_cache = entry;

	memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN);
	entry->ctrl_info = msg->content.eg_info;
	do_gettimeofday(&(entry->tv));
	entry->entry_state = EGRESS_RESOLVED;
	dprintk("new_eg_cache_entry cache_id %u\n",
		ntohl(entry->ctrl_info.cache_id));
	dprintk("mps_ip = %pI4\n", &entry->ctrl_info.mps_ip);
	refcount_inc(&entry->use);

	write_unlock_irq(&client->egress_lock);
	dprintk("new_eg_cache_entry: unlocked\n");

	return entry;
}
Esempio n. 19
0
/** Open a handle to a semaphore.
 * @param id		ID of the semaphore to open.
 * @param rights	Access rights for the handle.
 * @param handlep	Where to store handle to semaphore.
 * @return		Status code describing result of the operation. */
status_t kern_semaphore_open(semaphore_id_t id, object_rights_t rights, handle_t *handlep) {
	user_semaphore_t *sem;
	status_t ret;

	if(!handlep)
		return STATUS_INVALID_ARG;

	rwlock_read_lock(&semaphore_tree_lock);

	sem = avl_tree_lookup(&semaphore_tree, id);
	if(!sem) {
		rwlock_unlock(&semaphore_tree_lock);
		return STATUS_NOT_FOUND;
	}

	refcount_inc(&sem->count);
	rwlock_unlock(&semaphore_tree_lock);

	ret = object_handle_open(&sem->obj, NULL, rights, NULL, 0, NULL, NULL, handlep);
	if(ret != STATUS_SUCCESS)
		user_semaphore_release(sem);

	return ret;
}
Esempio n. 20
0
/** Increase a session's reference count.
 * @param session	Session to reference. */
void session_get(session_t *session) {
	refcount_inc(&session->count);
}
Esempio n. 21
0
struct nsinfo *nsinfo__get(struct nsinfo *nsi)
{
	if (nsi)
		refcount_inc(&nsi->refcnt);
	return nsi;
}
Esempio n. 22
0
File: write.c Progetto: krzk/linux
/*
 * write to a file
 */
static int afs_store_data(struct address_space *mapping,
			  pgoff_t first, pgoff_t last,
			  unsigned offset, unsigned to)
{
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
	struct afs_fs_cursor fc;
	struct afs_wb_key *wbk = NULL;
	struct list_head *p;
	int ret = -ENOKEY, ret2;

	_enter("%s{%x:%u.%u},%lx,%lx,%x,%x",
	       vnode->volume->name,
	       vnode->fid.vid,
	       vnode->fid.vnode,
	       vnode->fid.unique,
	       first, last, offset, to);

	spin_lock(&vnode->wb_lock);
	p = vnode->wb_keys.next;

	/* Iterate through the list looking for a valid key to use. */
try_next_key:
	while (p != &vnode->wb_keys) {
		wbk = list_entry(p, struct afs_wb_key, vnode_link);
		_debug("wbk %u", key_serial(wbk->key));
		ret2 = key_validate(wbk->key);
		if (ret2 == 0)
			goto found_key;
		if (ret == -ENOKEY)
			ret = ret2;
		p = p->next;
	}

	spin_unlock(&vnode->wb_lock);
	afs_put_wb_key(wbk);
	_leave(" = %d [no keys]", ret);
	return ret;

found_key:
	refcount_inc(&wbk->usage);
	spin_unlock(&vnode->wb_lock);

	_debug("USE WB KEY %u", key_serial(wbk->key));

	ret = -ERESTARTSYS;
	if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
		while (afs_select_fileserver(&fc)) {
			fc.cb_break = afs_calc_vnode_cb_break(vnode);
			afs_fs_store_data(&fc, mapping, first, last, offset, to);
		}

		afs_check_for_remote_deletion(&fc, fc.vnode);
		afs_vnode_commit_status(&fc, vnode, fc.cb_break);
		ret = afs_end_vnode_operation(&fc);
	}

	switch (ret) {
	case 0:
		afs_stat_v(vnode, n_stores);
		atomic_long_add((last * PAGE_SIZE + to) -
				(first * PAGE_SIZE + offset),
				&afs_v2net(vnode)->n_store_bytes);
		break;
	case -EACCES:
	case -EPERM:
	case -ENOKEY:
	case -EKEYEXPIRED:
	case -EKEYREJECTED:
	case -EKEYREVOKED:
		_debug("next");
		spin_lock(&vnode->wb_lock);
		p = wbk->vnode_link.next;
		afs_put_wb_key(wbk);
		goto try_next_key;
	}

	afs_put_wb_key(wbk);
	_leave(" = %d", ret);
	return ret;
}
Esempio n. 23
0
static struct rpc_auth *
nul_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
{
	refcount_inc(&null_auth.au_count);
	return &null_auth;
}
Esempio n. 24
0
/* Find the correct entry in the "incomplete datagrams" queue for
 * this IP datagram, and create new one, if nothing is found.
 */
static struct ipq *ip_find(struct net *net, struct iphdr *iph,
			   u32 user, int vif)
{
	struct frag_v4_compare_key key = {
		.saddr = iph->saddr,
		.daddr = iph->daddr,
		.user = user,
		.vif = vif,
		.id = iph->id,
		.protocol = iph->protocol,
	};
	struct inet_frag_queue *q;

	q = inet_frag_find(&net->ipv4.frags, &key);
	if (!q)
		return NULL;

	return container_of(q, struct ipq, q);
}

/* Is the fragment too far ahead to be part of ipq? */
static int ip_frag_too_far(struct ipq *qp)
{
	struct inet_peer *peer = qp->peer;
	unsigned int max = qp->q.net->max_dist;
	unsigned int start, end;

	int rc;

	if (!peer || !max)
		return 0;

	start = qp->rid;
	end = atomic_inc_return(&peer->rid);
	qp->rid = end;

	rc = qp->q.fragments_tail && (end - start) > max;

	if (rc) {
		struct net *net;

		net = container_of(qp->q.net, struct net, ipv4.frags);
		__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
	}

	return rc;
}

static int ip_frag_reinit(struct ipq *qp)
{
	unsigned int sum_truesize = 0;

	if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
		refcount_inc(&qp->q.refcnt);
		return -ETIMEDOUT;
	}

	sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments);
	sub_frag_mem_limit(qp->q.net, sum_truesize);

	qp->q.flags = 0;
	qp->q.len = 0;
	qp->q.meat = 0;
	qp->q.fragments = NULL;
	qp->q.rb_fragments = RB_ROOT;
	qp->q.fragments_tail = NULL;
	qp->q.last_run_head = NULL;
	qp->iif = 0;
	qp->ecn = 0;

	return 0;
}
Esempio n. 25
0
static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
	struct request_sock *req;
	struct dccp_request_sock *dreq;
	struct inet_request_sock *ireq;
	struct ipv6_pinfo *np = inet6_sk(sk);
	const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);

	if (skb->protocol == htons(ETH_P_IP))
		return dccp_v4_conn_request(sk, skb);

	if (!ipv6_unicast_destination(skb))
		return 0;	/* discard, don't send a reset here */

	if (dccp_bad_service_code(sk, service)) {
		dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
		goto drop;
	}
	/*
	 * There are no SYN attacks on IPv6, yet...
	 */
	dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
	if (inet_csk_reqsk_queue_is_full(sk))
		goto drop;

	if (sk_acceptq_is_full(sk))
		goto drop;

	req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
	if (req == NULL)
		goto drop;

	if (dccp_reqsk_init(req, dccp_sk(sk), skb))
		goto drop_and_free;

	dreq = dccp_rsk(req);
	if (dccp_parse_options(sk, dreq, skb))
		goto drop_and_free;

	if (security_inet_conn_request(sk, skb, req))
		goto drop_and_free;

	ireq = inet_rsk(req);
	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
	ireq->ireq_family = AF_INET6;
	ireq->ir_mark = inet_request_mark(sk, skb);

	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
		refcount_inc(&skb->users);
		ireq->pktopts = skb;
	}
	ireq->ir_iif = sk->sk_bound_dev_if;

	/* So that link locals have meaning */
	if (!sk->sk_bound_dev_if &&
	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
		ireq->ir_iif = inet6_iif(skb);

	/*
	 * Step 3: Process LISTEN state
	 *
	 *   Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
	 *
	 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
	 */
	dreq->dreq_isr	   = dcb->dccpd_seq;
	dreq->dreq_gsr     = dreq->dreq_isr;
	dreq->dreq_iss	   = dccp_v6_init_sequence(skb);
	dreq->dreq_gss     = dreq->dreq_iss;
	dreq->dreq_service = service;

	if (dccp_v6_send_response(sk, req))
		goto drop_and_free;

	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
	reqsk_put(req);
	return 0;

drop_and_free:
	reqsk_free(req);
drop:
	__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
	return -1;
}
Esempio n. 26
0
void perf_mmap__get(struct perf_mmap *map)
{
	refcount_inc(&map->refcnt);
}
Esempio n. 27
0
static struct rpc_auth *
unx_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
{
	refcount_inc(&unix_auth.au_count);
	return &unix_auth;
}
Esempio n. 28
0
static inline void
instance_get(struct nfulnl_instance *inst)
{
	refcount_inc(&inst->use);
}
Esempio n. 29
0
static void aca_get(struct ifacaddr6 *aca)
{
	refcount_inc(&aca->aca_refcnt);
}