static int unx_hash_cred(struct auth_cred *acred, unsigned int hashbits) { return hash_64(from_kgid(&init_user_ns, acred->gid) | ((u64)from_kuid(&init_user_ns, acred->uid) << (sizeof(gid_t) * 8)), hashbits); }
static void h_remove(struct hash_table *ht, struct entry *e) { unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits); struct entry *prev; /* * The down side of using a singly linked list is we have to * iterate the bucket to remove an item. */ e = __h_lookup(ht, h, e->oblock, &prev); if (e) __h_unlink(ht, h, e, prev); }
static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock) { unsigned h = hash_64(from_oblock(oblock), mq->hash_bits); struct hlist_head *bucket = mq->table + h; struct entry *e; hlist_for_each_entry(e, bucket, hlist) if (e->oblock == oblock) { hlist_del(&e->hlist); hlist_add_head(&e->hlist, bucket); return e; } return NULL; }
/* * Also moves each entry to the front of the bucket. */ static struct entry *h_lookup(struct hash_table *ht, dm_oblock_t oblock) { struct entry *e, *prev; unsigned h = hash_64(from_oblock(oblock), ht->hash_bits); e = __h_lookup(ht, h, oblock, &prev); if (e && prev) { /* * Move to the front because this entry is likely * to be hit again. */ __h_unlink(ht, h, e, prev); __h_insert(ht, h, e); } return e; }
static struct trace_func_item *func_search(unsigned long item_id) { unsigned long key; struct hlist_head *hhead; struct trace_func_item *item; struct hlist_node *n; key = hash_64(item_id, TRACE_HASH_BITS); hhead = &ftrace_func_hash[key]; hlist_for_each_entry(item, n, hhead, node) { if (item->id == item_id) { return item; } } return NULL; }
static int func_trace_add(unsigned long item_id) { unsigned long key; struct hlist_head *hhead; struct trace_func_item *item = malloc(sizeof(struct trace_func_item)); if (!item) return -1; item->id = item_id; key = hash_64(item_id, TRACE_HASH_BITS); hhead = &ftrace_func_hash[key]; hlist_add_head(&item->node, hhead); return 0; }
/* * Simple hash table implementation. Should replace with the standard hash * table that's making its way upstream. */ static void hash_insert(struct mq_policy *mq, struct entry *e) { unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits); hlist_add_head(&e->hlist, mq->table + h); }
static inline int hash(journal_t *journal, unsigned long long block) { return hash_64(block, journal->j_revoke->hash_shift); }
static void h_insert(struct hash_table *ht, struct entry *e) { unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits); __h_insert(ht, h, e); }
static inline int hash(uint64_t vid) { return hash_64(vid, HASH_BITS); }
/* * Public call interface for looking up machine creds. */ struct rpc_cred *rpc_lookup_machine_cred(const char *service_name) { struct auth_cred acred = { .uid = RPC_MACHINE_CRED_USERID, .gid = RPC_MACHINE_CRED_GROUPID, .principal = service_name, .machine_cred = 1, }; dprintk("RPC: looking up machine cred for service %s\n", service_name); return generic_auth.au_ops->lookup_cred(&generic_auth, &acred, 0); } EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred); static struct rpc_cred *generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags) { struct rpc_auth *auth = task->tk_client->cl_auth; struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred; return auth->au_ops->lookup_cred(auth, acred, lookupflags); } static int generic_hash_cred(struct auth_cred *acred, unsigned int hashbits) { return hash_64(from_kgid(&init_user_ns, acred->gid) | ((u64)from_kuid(&init_user_ns, acred->uid) << (sizeof(gid_t) * 8)), hashbits); } /* * Lookup generic creds for current process */ static struct rpc_cred * generic_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) { return rpcauth_lookup_credcache(&generic_auth, acred, flags, GFP_KERNEL); } static struct rpc_cred * generic_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp) { struct generic_cred *gcred; gcred = kmalloc(sizeof(*gcred), gfp); if (gcred == NULL) return ERR_PTR(-ENOMEM); rpcauth_init_cred(&gcred->gc_base, acred, &generic_auth, &generic_credops); gcred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE; gcred->acred.uid = acred->uid; gcred->acred.gid = acred->gid; gcred->acred.group_info = acred->group_info; gcred->acred.ac_flags = 0; if (gcred->acred.group_info != NULL) get_group_info(gcred->acred.group_info); gcred->acred.machine_cred = acred->machine_cred; gcred->acred.principal = acred->principal; dprintk("RPC: allocated %s cred %p for uid %d gid %d\n", gcred->acred.machine_cred ? "machine" : "generic", gcred, from_kuid(&init_user_ns, acred->uid), from_kgid(&init_user_ns, acred->gid)); return &gcred->gc_base; } static void generic_free_cred(struct rpc_cred *cred) { struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base); dprintk("RPC: generic_free_cred %p\n", gcred); if (gcred->acred.group_info != NULL) put_group_info(gcred->acred.group_info); kfree(gcred); } static void generic_free_cred_callback(struct rcu_head *head) { struct rpc_cred *cred = container_of(head, struct rpc_cred, cr_rcu); generic_free_cred(cred); } static void generic_destroy_cred(struct rpc_cred *cred) { call_rcu(&cred->cr_rcu, generic_free_cred_callback); }
static inline u64 kvmppc_mmu_hash_vpte(u64 vpage) { return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE); }
static unsigned prefetch_hash(dm_block_t b) { return hash_64(b, PREFETCH_BITS); }