static void mc_action_bucket_update(struct mc_action *action, struct mm_stack *bucket, struct mm_stack *freelist) { struct mc_tpart *part = action->part; struct mm_slink *pred = &bucket->head; uint32_t time = mc_action_get_exp_time(); while (!mm_stack_is_tail(pred)) { struct mm_slink *link = pred->next; struct mc_entry *entry = containerof(link, struct mc_entry, link); if (mc_action_is_expired_entry(part, entry, time)) { mc_action_unlink_entry(part, pred, entry); mm_stack_insert(freelist, &entry->link); } else { if (mc_action_match_entry(action, entry)) { action->old_entry = entry; action->entry_match = (!action->stamp || action->stamp == entry->stamp); if (action->entry_match) { uint8_t state = entry->state; mc_action_unlink_entry(action->part, pred, entry); mm_stack_insert(freelist, &entry->link); mc_action_bucket_insert(action, bucket, state); } return; } pred = link; } } action->old_entry = NULL; action->entry_match = false; }
static void mc_action_bucket_lookup(struct mc_action *action, struct mm_stack *bucket, struct mm_stack *freelist) { struct mc_tpart *part = action->part; struct mm_slink *pred = &bucket->head; uint32_t time = mc_action_get_exp_time(); while (!mm_stack_is_tail(pred)) { struct mm_slink *link = pred->next; struct mc_entry *entry = containerof(link, struct mc_entry, link); if (mc_action_is_expired_entry(part, entry, time)) { mc_action_unlink_entry(part, pred, entry); mm_stack_insert(freelist, &entry->link); } else { if (mc_action_match_entry(action, entry)) { ASSERT(entry->state >= MC_ENTRY_USED_MIN); ASSERT(entry->state <= MC_ENTRY_USED_MAX); action->old_entry = entry; return; } pred = link; } } action->old_entry = NULL; }
static void mc_action_free_entry(struct mc_tpart *part, struct mc_entry *entry) { ASSERT(entry->state == MC_ENTRY_NOT_USED); entry->state = MC_ENTRY_FREE; mm_stack_insert(&part->free_list, &entry->link); part->nentries_free++; }
mm_pool_local_free(struct mm_pool *pool, void *item) { ENTER(); ASSERT(mm_pool_contains(pool, item)); mm_stack_insert(&pool->free_list, (struct mm_slink *) item); LEAVE(); }
static void mc_action_bucket_insert(struct mc_action *action, struct mm_stack *bucket, uint8_t state) { ASSERT(action->new_entry->state == MC_ENTRY_NOT_USED); ASSERT(state != MC_ENTRY_NOT_USED || state != MC_ENTRY_FREE); action->new_entry->state = state; action->new_entry->stamp = action->part->stamp; mm_stack_insert(bucket, &action->new_entry->link); action->part->stamp += mc_table.nparts; action->part->volume += mc_entry_size(action->new_entry); // Store stamp value needed for binary protocol response. action->stamp = action->new_entry->stamp; }
static bool mc_action_find_victims(struct mc_tpart *part, struct mm_stack *victims, uint32_t nrequired) { uint32_t nvictims = 0; mm_stack_prepare(victims); struct mm_core *core = mm_core_selfptr(); mm_timeval_t real_time = mm_core_getrealtime(core); uint32_t time = real_time / 1000000; // useconds -> seconds. bool end = false; while (nvictims < nrequired) { struct mc_entry *hand = part->clock_hand; if (unlikely(hand == part->entries_end)) { // Prevent infinite loop. if (end) break; else end = true; hand = part->entries; } uint8_t state = hand->state; if (state >= MC_ENTRY_USED_MIN && state <= MC_ENTRY_USED_MAX) { if (mc_action_is_eviction_victim(part, hand, time)) { uint32_t index = mc_table_index(part, hand->hash); struct mm_stack *bucket = &part->buckets[index]; mc_action_remove_entry(part, &bucket->head, hand); mm_stack_insert(victims, &hand->link); ++nvictims; } else { hand->state--; } } part->clock_hand = hand + 1; } return (nvictims > 0 && nvictims == nrequired); }
struct mm_lock_stat * mm_lock_get_domain_stat(struct mm_lock_stat_set *stat_set, struct mm_thread *thread, struct mm_domain *domain) { mm_thread_t dom_index = mm_thread_getnumber(thread); // Try to find domain entry optimistically (w/o acquiring a lock). struct mm_lock_domain_stat *dom_stat = mm_lock_find_domain_stat(stat_set, domain); if (likely(dom_stat != NULL)) return MM_THREAD_LOCAL_DEREF(dom_index, dom_stat->stat); // Allocate a new statistics entry. dom_stat = mm_global_alloc(sizeof(struct mm_lock_domain_stat)); dom_stat->domain = domain; // Mark it as not ready. dom_stat->ready = 0; // Start critical section. mm_global_lock(&stat_set->domain_lock); // Try to find it again in case it was added concurrently. struct mm_lock_domain_stat *recheck_stat = mm_lock_find_domain_stat(stat_set, domain); if (unlikely(recheck_stat != NULL)) { // Bail out if so. mm_global_unlock(&stat_set->domain_lock); mm_global_free(dom_stat); return MM_THREAD_LOCAL_DEREF(dom_index, recheck_stat->stat); } mm_stack_insert(&stat_set->domain_list, &dom_stat->link); // End critical section. mm_global_unlock(&stat_set->domain_lock); // Initialize per-thread data. char *name; if (stat_set->moreinfo != NULL) name = mm_format(&mm_global_arena, "lock %s (%s)", stat_set->location, stat_set->moreinfo); else name = mm_format(&mm_global_arena, "lock %s", stat_set->location); MM_THREAD_LOCAL_ALLOC(domain, name, dom_stat->stat); for (mm_thread_t c = 0; c < domain->nthreads; c++) { struct mm_lock_stat *stat = MM_THREAD_LOCAL_DEREF(c, dom_stat->stat); stat->lock_count = 0; stat->fail_count = 0; } mm_global_free(name); // Mark it as ready. mm_memory_store_fence(); dom_stat->ready = 1; return MM_THREAD_LOCAL_DEREF(dom_index, dom_stat->stat); }