/** Decrease a session's reference count. * @param session Session to release. */ void session_release(session_t *session) { if(refcount_dec(&session->count) == 0) { dprintf("session: destroyed session %d\n", session->id); id_allocator_free(&session_id_allocator, session->id); kfree(session); } }
void CCodeGenerator::operator()(Assignment* expr) { // Handle all types of assignment, including member assignment Expression::Ptr init = expr->initializer(); if (dynamic_cast<Empty*>(init.pointer())) { return_ = Operand(env_->integer("0")); } else { return_ = emit(init); } String::Ptr id = expr->identifier(); Variable::Ptr var = variable(id); Attribute::Ptr attr = class_ ? class_->attribute(id) : 0; if (var) { // Assignment to a local var that has already been initialized once in // the current scope. Type::Ptr type = var->type(); if (!type->is_value()) { refcount_dec(Operand(var->name())); } line(); out_ << id->string() << " = " << return_ << ";\n"; if (!type->is_value()) { refcount_inc(Operand(var->name())); } } else if (attr) { // Assignment to an attribute within a class /* Type::Ptr type = expr->type(); Variable::Ptr self = variable(env_->name("__self")); Operand addr = Operand::addr(, attr->slot()); Operand old = load(addr); if (!type->is_value() && !attr->is_weak()) { refcount_dec(old); } store(addr, return_); if (!type->is_value() && !attr->is_weak()) { refcount_inc(return_); } */ assert(!"not impl"); } else { // Assignment to a local var that has not yet been initialized in the // current scope. Type::Ptr declared = expr->declared_type(); if (declared->is_top()) { declared = expr->type(); } line(); brace(); operator()(declared); out_ << " " << id->string() << " = " << return_ << "; "; out_ << "(void)" << id->string() << ";\n"; variable(new Variable(id, declared)); if (!declared->is_value()) { refcount_inc(return_); } } }
void processors_io_out_thread(void *data, void *userdata) { g_debug("%s data %p userdata %p", __PRETTY_FUNCTION__, data, userdata); struct connection *con = data; struct processor_data *pd = userdata; g_mutex_lock(&pd->mutex); refcount_dec(&pd->queued); recurse_io_process(pd, con, bistream_out); g_mutex_unlock(&pd->mutex); connection_unref(con); }
/** Release a user semaphore. * @param sem Semaphore to release. */ static void user_semaphore_release(user_semaphore_t *sem) { if(refcount_dec(&sem->count) == 0) { rwlock_write_lock(&semaphore_tree_lock); avl_tree_remove(&semaphore_tree, &sem->tree_link); rwlock_unlock(&semaphore_tree_lock); object_destroy(&sem->obj); id_allocator_free(&semaphore_id_allocator, sem->id); kfree(sem); } }
static void bz2_close(struct archive_file *file) { struct bz2_archive_file *context = (struct bz2_archive_file *) file; if (!refcount_dec(&context->ref)) return; g_free(context->name); input_stream_close(context->istream); g_free(context); }
void hns_roce_db_unmap_user(struct hns_roce_ucontext *context, struct hns_roce_db *db) { mutex_lock(&context->page_mutex); refcount_dec(&db->u.user_page->refcount); if (refcount_dec_if_one(&db->u.user_page->refcount)) { list_del(&db->u.user_page->list); ib_umem_release(db->u.user_page->umem); kfree(db->u.user_page); } mutex_unlock(&context->page_mutex); }
void CCodeGenerator::scope_cleanup(Variable* var) { // Emits the code to clean up the stack when exiting a block. This // includes decrementing reference counts, and calling destructors for // value types. Type::Ptr type = var->type(); if (type && !type->is_primitive()) { if (type->is_value()) { assert(!"Need to figure out how to do value types"); // Call destructor } else { // Emit a branch to check the variable's reference count and free // it if necessary. refcount_dec(Operand(var->name())); } } }
int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue, struct sk_buff *skb, unsigned int flags, void (*destructor)(struct sock *sk, struct sk_buff *skb)) { int err = 0; if (flags & MSG_PEEK) { err = -ENOENT; spin_lock_bh(&sk_queue->lock); if (skb->next) { __skb_unlink(skb, sk_queue); refcount_dec(&skb->users); if (destructor) destructor(sk, skb); err = 0; } spin_unlock_bh(&sk_queue->lock); } atomic_inc(&sk->sk_drops); return err; }