static int __del_msg_handle (struct shim_msg_handle * msgq) { if (msgq->deleted) return -EIDRM; msgq->deleted = true; free(msgq->queue); msgq->queuesize = 0; msgq->queueused = 0; free(msgq->types); msgq->ntypes = 0; struct shim_handle * hdl = MSG_TO_HANDLE(msgq); lock(msgq_list_lock); list_del_init(&msgq->list); put_handle(hdl); if (!hlist_unhashed(&msgq->key_hlist)) { hlist_del_init(&msgq->key_hlist); put_handle(hdl); } if (!hlist_unhashed(&msgq->qid_hlist)) { hlist_del_init(&msgq->qid_hlist); put_handle(hdl); } unlock(msgq_list_lock); return 0; }
void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) { const unsigned long s = req->rq_state; struct drbd_conf *mdev = req->mdev; int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE; if (s & RQ_NET_QUEUED) return; if (s & RQ_NET_PENDING) return; if (s & RQ_LOCAL_PENDING) return; if (req->master_bio) { int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); int error = PTR_ERR(req->private_bio); if (!hlist_unhashed(&req->collision)) hlist_del(&req->collision); else D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); if (rw == WRITE) _about_to_complete_local_write(mdev, req); _drbd_end_io_acct(mdev, req); m->error = ok ? 0 : (error ?: -EIO); m->bio = req->master_bio; req->master_bio = NULL; }
/** * bfq_drop_dead_cic - free an exited cic. * @bfqd: bfq data for the device in use. * @ioc: io_context owning @cic. * @cic: the @cic to free. * * We drop cfq io contexts lazily, so we may find a dead one. */ static void bfq_drop_dead_cic(struct bfq_data *bfqd, struct io_context *ioc, struct cfq_io_context *cic) { unsigned long flags; WARN_ON(!list_empty(&cic->queue_list)); BUG_ON(cic->key != bfqd_dead_key(bfqd)); spin_lock_irqsave(&ioc->lock, flags); BUG_ON(ioc->ioc_data == cic); /* * With shared I/O contexts two lookups may race and drop the * same cic more than one time: RCU guarantees that the storage * will not be freed too early, here we make sure that we do * not try to remove the cic from the hashing structures multiple * times. */ if (!hlist_unhashed(&cic->cic_list)) { radix_tree_delete(&ioc->bfq_radix_root, bfqd->cic_index); hlist_del_init_rcu(&cic->cic_list); bfq_cic_free(cic); } spin_unlock_irqrestore(&ioc->lock, flags); }
static inline void hlist_del_init(struct hlist_node *n) { if (!hlist_unhashed(n)) { __hlist_del(n); INIT_HLIST_NODE(n); } }
static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); trace_kvm_book3s_mmu_invalidate(pte); /* Different for 32 and 64 bit */ kvmppc_mmu_invalidate_pte(vcpu, pte); spin_lock(&vcpu3s->mmu_lock); /* pte already invalidated in between? */ if (hlist_unhashed(&pte->list_pte)) { spin_unlock(&vcpu3s->mmu_lock); return; } hlist_del_init_rcu(&pte->list_pte); hlist_del_init_rcu(&pte->list_pte_long); hlist_del_init_rcu(&pte->list_vpte); hlist_del_init_rcu(&pte->list_vpte_long); if (pte->pte.may_write) kvm_release_pfn_dirty(pte->pfn); else kvm_release_pfn_clean(pte->pfn); spin_unlock(&vcpu3s->mmu_lock); vcpu3s->hpte_cache_count--; call_rcu(&pte->rcu_head, free_pte_rcu); }
static inline void free_ll_remote_perm(struct ll_remote_perm *lrp) { if (!lrp) return; if (!hlist_unhashed(&lrp->lrp_list)) hlist_del(&lrp->lrp_list); OBD_SLAB_FREE(lrp, ll_remote_perm_cachep, sizeof(*lrp)); }
static bool __add_ipc_port (struct shim_ipc_port * port, IDTYPE vmid, int type, port_fini fini) { bool need_restart = false; assert(vmid != cur_process.vmid); if (vmid && !port->info.vmid) { port->info.vmid = vmid; port->update = true; } if (port->info.vmid && hlist_unhashed(&port->hlist)) { struct hlist_head * head = &ipc_port_pool[PID_HASH(vmid)]; __get_ipc_port(port); hlist_add_head(&port->hlist, head); } if (!(port->info.type & IPC_PORT_IFPOLL) && (type & IPC_PORT_IFPOLL)) need_restart = true; if ((port->info.type & type) != type) { port->info.type |= type; port->update = true; } if (fini && (type & ~IPC_PORT_IFPOLL)) { port_fini * cb = port->fini; for ( ; cb < port->fini + MAX_IPC_PORT_FINI_CB ; cb++) if (!*cb || *cb == fini) break; assert(cb < port->fini + MAX_IPC_PORT_FINI_CB); *cb = fini; } if (need_restart) { if (list_empty(&port->list)) { __get_ipc_port(port); list_add(&port->list, &pobj_list); port->recent = true; } else { if (!port->recent) { list_del_init(&port->list); list_add(&port->list, &pobj_list); port->recent = true; } } return true; } else { if (list_empty(&port->list)) { __get_ipc_port(port); list_add_tail(&port->list, &pobj_list); } return false; } }
static void update_ctime(struct inode *inode) { struct timespec now = current_fs_time(inode->i_sb); if (hlist_unhashed(&inode->i_hash) || !inode->i_nlink || timespec_equal(&inode->i_ctime, &now)) return; inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); }
void free_inode_check(struct tux3_inode *tuxnode) { struct inode *inode = &tuxnode->vfs_inode; tux3_check_destroy_inode(inode); assert(hlist_unhashed(&inode->i_hash)); assert(inode->i_state == I_FREEING); assert(mapping(inode)); }
/* * Delete a file after having released all locks, blocks and shares */ static inline void nlm_delete_file(struct nlm_file *file) { nlm_debug_print_file("closing file", file); if (!hlist_unhashed(&file->f_list)) { hlist_del(&file->f_list); nlmsvc_ops->fclose(file->f_file); kfree(file); } else { printk(KERN_WARNING "lockd: attempt to release unknown file!\n"); } }
/** * Drop a reference on the specified pool and free its memory if needed. * * One reference is held by the LOD OBD device while it is configured, from * the time the configuration log defines the pool until the time when it is * dropped when the LOD OBD is cleaned up or the pool is deleted. This means * that the pool will not be freed while the LOD device is configured, unless * it is explicitly destroyed by the sysadmin. The pool structure is freed * after the last reference on the structure is released. * * \param[in] pool pool descriptor to drop reference on and possibly free */ void lod_pool_putref(struct pool_desc *pool) { CDEBUG(D_INFO, "pool %p\n", pool); if (atomic_dec_and_test(&pool->pool_refcount)) { LASSERT(hlist_unhashed(&pool->pool_hash)); LASSERT(list_empty(&pool->pool_list)); LASSERT(pool->pool_proc_entry == NULL); lod_ost_pool_free(&(pool->pool_rr.lqr_pool)); lod_ost_pool_free(&(pool->pool_obds)); OBD_FREE_PTR(pool); EXIT; } }
void rds_shutdown_worker(struct work_struct *work) { struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w); /* shut it down unless it's down already */ if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) { /* * Quiesce the connection mgmt handlers before we start tearing * things down. We don't hold the mutex for the entire * duration of the shutdown operation, else we may be * deadlocking with the CM handler. Instead, the CM event * handler is supposed to check for state DISCONNECTING */ mutex_lock(&conn->c_cm_lock); if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { rds_conn_error(conn, "shutdown called in state %d\n", atomic_read(&conn->c_state)); mutex_unlock(&conn->c_cm_lock); return; } mutex_unlock(&conn->c_cm_lock); mutex_lock(&conn->c_send_lock); conn->c_trans->conn_shutdown(conn); rds_conn_reset(conn); mutex_unlock(&conn->c_send_lock); if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) { /* This can happen - eg when we're in the middle of tearing * down the connection, and someone unloads the rds module. * Quite reproduceable with loopback connections. * Mostly harmless. */ rds_conn_error(conn, "%s: failed to transition to state DOWN, " "current state is %d\n", __func__, atomic_read(&conn->c_state)); return; } } /* Then reconnect if it's still live. * The passive side of an IB loopback connection is never added * to the conn hash, so we never trigger a reconnect on this * conn - the reconnect is always triggered by the active peer. */ cancel_delayed_work(&conn->c_conn_w); if (!hlist_unhashed(&conn->c_hash_node)) rds_queue_reconnect(conn); }
/* * caller must hold spinlock */ static void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist) { assert_spin_locked(&ctx->cc_sec->ps_lock); LASSERT(atomic_read(&ctx->cc_refcount) > 0); LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)); LASSERT(!hlist_unhashed(&ctx->cc_cache)); clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); if (atomic_dec_and_test(&ctx->cc_refcount)) { __hlist_del(&ctx->cc_cache); hlist_add_head(&ctx->cc_cache, freelist); } else { hlist_del_init(&ctx->cc_cache); } }
void show_buffers_(map_t *map, int all) { struct buffer_head *buffer; unsigned i; for (i = 0; i < BUFFER_BUCKETS; i++) { struct hlist_head *bucket = &map->hash[i]; if (hlist_empty(bucket)) continue; printf("[%i] ", i); hlist_for_each_entry(buffer, bucket, hashlink) { if (all || buffer->count >= !hlist_unhashed(&buffer->hashlink) + 1) show_buffer(buffer); } printf("\n"); } }
static void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace) { LASSERT(ctx->cc_sec); LASSERT(atomic_read(&ctx->cc_refcount) > 0); cli_ctx_expire(ctx); spin_lock(&ctx->cc_sec->ps_lock); if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) { LASSERT(!hlist_unhashed(&ctx->cc_cache)); LASSERT(atomic_read(&ctx->cc_refcount) > 1); hlist_del_init(&ctx->cc_cache); if (atomic_dec_and_test(&ctx->cc_refcount)) LBUG(); } spin_unlock(&ctx->cc_sec->ps_lock); }
static void crypto_remove_instance(struct crypto_instance *inst, struct list_head *list) { struct crypto_template *tmpl = inst->tmpl; if (crypto_is_dead(&inst->alg)) return; inst->alg.cra_flags |= CRYPTO_ALG_DEAD; if (hlist_unhashed(&inst->list)) return; if (!tmpl || !crypto_tmpl_get(tmpl)) return; crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg); list_move(&inst->alg.cra_list, list); hlist_del(&inst->list); inst->alg.cra_destroy = crypto_destroy_instance; BUG_ON(!list_empty(&inst->alg.cra_users)); }
void rds_shutdown_worker(struct work_struct *work) { struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w); if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) { mutex_lock(&conn->c_cm_lock); if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { rds_conn_error(conn, "shutdown called in state %d\n", atomic_read(&conn->c_state)); mutex_unlock(&conn->c_cm_lock); return; } mutex_unlock(&conn->c_cm_lock); mutex_lock(&conn->c_send_lock); conn->c_trans->conn_shutdown(conn); rds_conn_reset(conn); mutex_unlock(&conn->c_send_lock); if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) { rds_conn_error(conn, "%s: failed to transition to state DOWN, " "current state is %d\n", __func__, atomic_read(&conn->c_state)); return; } } cancel_delayed_work(&conn->c_conn_w); if (!hlist_unhashed(&conn->c_hash_node)) rds_queue_reconnect(conn); }
static bool __del_ipc_port (struct shim_ipc_port * port, int type) { debug("deleting port %p (handle %p) for process %u\n", port, port->pal_handle, port->info.vmid); bool need_restart = false; type = type ? (type & port->info.type) : port->info.type; if ((type & IPC_PORT_KEEPALIVE) ^ (port->info.type & IPC_PORT_KEEPALIVE)) need_restart = true; /* if the port still have other usage, we will not remove the port */ if (port->info.type & ~(type|IPC_PORT_IFPOLL|IPC_PORT_KEEPALIVE)) { debug("masking port %p (handle %p): type %x->%x\n", port, port->pal_handle, port->info.type, port->info.type & ~type); port->info.type &= ~type; goto out; } if (port->info.type & IPC_PORT_IFPOLL) need_restart = true; if (!list_empty(&port->list)) { list_del_init(&port->list); port->info.type &= IPC_PORT_IFPOLL; __put_ipc_port(port); } if (!hlist_unhashed(&port->hlist)) { hlist_del_init(&port->hlist); __put_ipc_port(port); } out: port->update = true; return need_restart; }
/* Helper for __req_mod(). * Set m->bio to the master bio, if it is fit to be completed, * or leave it alone (it is initialized to NULL in __req_mod), * if it has already been completed, or cannot be completed yet. * If m->bio is set, the error status to be returned is placed in m->error. */ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) { const unsigned long s = req->rq_state; struct drbd_conf *mdev = req->mdev; /* only WRITES may end up here without a master bio (on barrier ack) */ int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE; /* we must not complete the master bio, while it is * still being processed by _drbd_send_zc_bio (drbd_send_dblock) * not yet acknowledged by the peer * not yet completed by the local io subsystem * these flags may get cleared in any order by * the worker, * the receiver, * the bio_endio completion callbacks. */ if (s & RQ_NET_QUEUED) return; if (s & RQ_NET_PENDING) return; if (s & RQ_LOCAL_PENDING) return; if (req->master_bio) { /* this is data_received (remote read) * or protocol C P_WRITE_ACK * or protocol B P_RECV_ACK * or protocol A "handed_over_to_network" (SendAck) * or canceled or failed, * or killed from the transfer log due to connection loss. */ /* * figure out whether to report success or failure. * * report success when at least one of the operations succeeded. * or, to put the other way, * only report failure, when both operations failed. * * what to do about the failures is handled elsewhere. * what we need to do here is just: complete the master_bio. * * local completion error, if any, has been stored as ERR_PTR * in private_bio within drbd_endio_pri. */ int ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); int error = PTR_ERR(req->private_bio); /* remove the request from the conflict detection * respective block_id verification hash */ if (!hlist_unhashed(&req->collision)) hlist_del(&req->collision); else D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); /* for writes we need to do some extra housekeeping */ if (rw == WRITE) _about_to_complete_local_write(mdev, req); /* Update disk stats */ _drbd_end_io_acct(mdev, req); m->error = ok ? 0 : (error ?: -EIO); m->bio = req->master_bio; req->master_bio = NULL; }
/** * batadv_forw_packet_was_stolen() - check whether someone stole this packet * @forw_packet: the forwarding packet to check * * This function checks whether the given forwarding packet was claimed by * someone else for free(). * * Return: True if someone stole it, false otherwise. */ static bool batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet) { return !hlist_unhashed(&forw_packet->cleanup_list); }
/* * Returns NULL if the entry is free. */ static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock) { struct entry *e = ep->entries + from_cblock(cblock); return !hlist_unhashed(&e->hlist) ? e : NULL; }
static void minix_inode_unhash(struct minix_inode *mi) { if (!hlist_unhashed(&mi->m_hnode)) hlist_del(&mi->m_hnode); }
/* note: already called with rcu_read_lock */ int br_handle_frame_finish(struct sk_buff *skb) { const unsigned char *dest = eth_hdr(skb)->h_dest; struct net_bridge_port *p = br_port_get_rcu(skb->dev); struct net_bridge *br; struct net_bridge_fdb_entry *dst; struct net_bridge_mdb_entry *mdst; struct sk_buff *skb2; if (!p || p->state == BR_STATE_DISABLED) goto drop; /* insert into forwarding database after filtering to avoid spoofing */ br = p->br; br_fdb_update(br, p, eth_hdr(skb)->h_source); if (is_multicast_ether_addr(dest) && br_multicast_rcv(br, p, skb)) goto drop; if (p->state == BR_STATE_LEARNING) goto drop; BR_INPUT_SKB_CB(skb)->brdev = br->dev; /* The packet skb2 goes to the local host (NULL to skip). */ skb2 = NULL; if (br->dev->flags & IFF_PROMISC) skb2 = skb; dst = NULL; if (is_multicast_ether_addr(dest)) { mdst = br_mdb_get(br, skb); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { if ((mdst && !hlist_unhashed(&mdst->mglist)) || br_multicast_is_router(br)) skb2 = skb; br_multicast_forward(mdst, skb, skb2); skb = NULL; if (!skb2) goto out; } else skb2 = skb; br->dev->stats.multicast++; } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) { skb2 = skb; /* Do not forward the packet since it's local. */ skb = NULL; } if (skb) { if (dst) br_forward(dst->dst, skb, skb2); else br_flood_forward(br, skb, skb2); } if (skb2) #ifdef CONFIG_WIRELESS_GUEST_ZONE { /* deny guest zone access UI */ if(unlikely(br->guestzone_enabled)) { //if (skb2->dev->support_guest_zone == 1) { if(unlikely(p->support_guest_zone)) { if(!cameo_check_guest_local(skb2, p)) { kfree_skb(skb2); goto out; } } } #endif return br_pass_frame_up(skb2); #ifdef CONFIG_WIRELESS_GUEST_ZONE } #endif out: return 0; drop: kfree_skb(skb); goto out; }
static void block_unhash(struct block *block) { if (!hlist_unhashed(&block->b_hnode)) hlist_del(&block->b_hnode); }