void sim_event_dispatch_pending() { sim_event_t *ev = heap_peek(gQueue->activeEventHeap); while (ev && ev->fireTime < sim_time_get_time_stamp()) { heap_remove(gQueue->activeEventHeap); ev->handler(ev->data); sim_event_release(ev); ev = heap_peek(gQueue->activeEventHeap); } }
int my_heapsort (int ** arrayptr, int size) { int retval, counter; int *element, *element2; heap *heap = heap_new(0); retval = 0; /*lets sort that array heap style */ for (counter = 0; counter < size; counter++) { element = malloc(sizeof *element); *element = arrayptr[0][counter]; heap_add(heap, element, *element); } /*overwrite the original values with the sorted ones */ for (counter = 0; counter < size; counter++) { element = (int *)heap_peek(heap); element2 = (int *)heap_pop(heap); if (*element != *element2) { fprintf(stderr, "on iteration: %c, peek returned : %d at %p. pop returned %d at %p.\n", counter, *element, element, *element2, element2); retval++; } arrayptr[0][counter] = *element2; } heap_free(heap); return retval; }
void* heap_pop(void** arr, size_t* len, int (*func)(void*,void*)){ if (arr == NULL || *len == 0) return NULL; void* root = heap_peek(arr); arr[0] = arr[*len - 1]; *len -= 1; heapify(arr, *len, 0, func); return root; }
heap_node_t *heap_pop(heap_t *heap) { heap_node_t *node = heap_peek(heap); if (NULL != node) { heap_remove(node); } return node; }
void ticker_tick(struct ticker *ticker, uint64_t microseconds) { ticker->tick += microseconds; uint64_t key; void *data; if(heap_peek(&ticker->heap, &key, &data) == 0) { if(key < ticker->tick) { tm_thread_raise_flag(current_thread, THREAD_TICKER_DOWORK); } } }
static void invalidate_buckets_lru(struct cache *ca) { struct bucket *b; ssize_t i; ca->heap.used = 0; for_each_bucket(b, ca) { /* * If we fill up the unused list, if we then return before * adding anything to the free_inc list we'll skip writing * prios/gens and just go back to allocating from the unused * list: */ if (fifo_full(&ca->unused)) return; if (!can_invalidate_bucket(ca, b)) continue; if (!GC_SECTORS_USED(b) && bch_bucket_add_unused(ca, b)) continue; if (!heap_full(&ca->heap)) heap_add(&ca->heap, b, bucket_max_cmp); else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { ca->heap.data[0] = b; heap_sift(&ca->heap, 0, bucket_max_cmp); } } for (i = ca->heap.used / 2 - 1; i >= 0; --i) heap_sift(&ca->heap, i, bucket_min_cmp); while (!fifo_full(&ca->free_inc)) { if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { /* * We don't want to be calling invalidate_buckets() * multiple times when it can't do anything */ ca->invalidate_needs_gc = 1; wake_up_gc(ca->set); return; } invalidate_one_bucket(ca, b); } }
long long heap_extract(struct heap_t *heap, void **data) { long long value; /* peek min */ value = heap_peek(heap, data); if (heap->error) return 0; /* delete element from heap */ heap->count--; heap->elem[0] = heap->elem[heap->count]; heapify(heap, 0); return value; }
static void timer_tick() { timer_event_t *event; timer_time_t time; pthread_mutex_lock(&timer_mutex); time = timer_getmillis(); for (;;) { event = heap_peek(&timer_events); if (event == NULL || event->time > time) { break; } event = heap_pop(&timer_events); event->callback(event->data); free(event); } pthread_mutex_unlock(&timer_mutex); }
/* Check expired timers */ static uint32_t ktimers_check(void) { struct ktimer *t; struct ktimer t_previous; if (!ktimer_list) return -1; if (!ktimer_list->n) return -1; t = heap_first(ktimer_list); while ((t) && (t->expire_time < jiffies)) { if (t->handler) { t->handler(jiffies, t->arg); } heap_peek(ktimer_list, &t_previous); t = heap_first(ktimer_list); } return (t->expire_time - jiffies); }
void main_loop(void) { int n; struct timer *timer; int delay; while (num_pollfds > 1 || heap_len(timers) > 1 || pollfds[0].events & POLLOUT) { if (heap_empty(timers)) { timer = NULL; delay = -1; } else { timer = heap_peek(timers); delay = (timer->time - get_now()) * 1000; } if (timer && delay <= 0) n = 0; else n = poll(pollfds, num_pollfds, delay); if (!n) { timer = heap_extract_min(timers); timer->func(timer->data); free(timer); goto cont; } for (int i = 0; i < num_pollfds && n; i++) { if (pollfds[i].revents) { event_handlers[i]->func(event_handlers[i]->data); n--; /* We may have just deleted this id. * Try it again in case it's a new * one. */ i--; } } cont: maybe_dequeue(); } }
void timer_check(void) { evquick_timer_instance t, *first; unsigned long long now = gettimeofdayms(); first = heap_first(ctx->timers); while(first && (first->expire <= now)) { heap_peek(ctx->timers, &t); if (!t.ev_timer) { first = heap_first(ctx->timers); continue; } if (t.ev_timer->flags & EVQUICK_EV_DISABLED) { /* Timer was disabled in the meanwhile. * Take no action, and destroy it. */ free(t.ev_timer); } else if (t.ev_timer->flags & EVQUICK_EV_RETRIGGER) { timer_trigger(t.ev_timer, now, now + t.ev_timer->interval); t.ev_timer->callback(t.ev_timer->arg); /* Don't free the timer, reuse for next instance * that has just been scheduled. */ } else { /* One shot, invoke callback, * then destroy the timer. */ t.ev_timer->callback(t.ev_timer->arg); free(t.ev_timer); } first = heap_first(ctx->timers); } if(first) { unsigned long long interval = first->expire - now; if (interval >= 1000) alarm((unsigned)(interval / 1000)); else ualarm((useconds_t) (1000 * (first->expire - now)), 0); } }
void ticker_dowork(struct ticker *ticker) { uint64_t key; void *data; int old = cpu_interrupt_set(0); assert(!current_thread->blocklist); if(__current_cpu->preempt_disable > 0) { cpu_interrupt_set(old); return; } if(current_thread->held_locks) { cpu_interrupt_set(old); return; } while(heap_peek(&ticker->heap, &key, &data) == 0) { if(key < ticker->tick) { /* get the data again, since it's cheap and * we need to in case something bubbled up * through the heap between the call to * peak and now */ spinlock_acquire(&ticker->lock); int res = heap_pop(&ticker->heap, &key, &data); if(!res) tm_thread_lower_flag(current_thread, THREAD_TICKER_DOWORK); spinlock_release(&ticker->lock); if(res == 0) { /* handle the time-event */ struct async_call *call = (struct async_call *)data; call->queue = 0; async_call_execute(call); } } else { break; } } cpu_interrupt_set(old); }
static void invalidate_buckets_lru(struct cache *ca) { struct bucket *b; ssize_t i; ca->heap.used = 0; for_each_bucket(b, ca) { if (!bch_can_invalidate_bucket(ca, b)) continue; if (!heap_full(&ca->heap)) heap_add(&ca->heap, b, bucket_max_cmp); else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { ca->heap.data[0] = b; heap_sift(&ca->heap, 0, bucket_max_cmp); } } for (i = ca->heap.used / 2 - 1; i >= 0; --i) heap_sift(&ca->heap, i, bucket_min_cmp); while (!fifo_full(&ca->free_inc)) { if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { /* * We don't want to be calling invalidate_buckets() * multiple times when it can't do anything */ ca->invalidate_needs_gc = 1; wake_up_gc(ca->set); return; } bch_invalidate_one_bucket(ca, b); } }
list* search_a_star(void* state, void* state_world, search_is_goal state_goal_func, search_gen_successors state_gen_func, search_link_parent state_link_func, search_goal_backtrace state_back_func, search_trans_cost state_trans_func, search_heuristic state_heur_func, search_set_f_cost state_f_cost_set_func, hash_func state_hash_alg, generic_comp state_comp_func, generic_cpy state_copy_func, generic_op state_free_func, heap_comp state_heap_func) { int* g_cost_ptr, *f_cost_ptr, f_cost, tmp_f, g_cost, found; void* current_state, *successor_state, *heap_memory_location; list* states_overflow, *successor_list, *path; hash_table* states_closed_set, *states_open_set; hash_map* states_g_cost, *states_f_cost, *states_heap_index; heap* states_heap; states_overflow = list_create(NULL, NULL, state_free_func); states_closed_set = hash_table_create(89, .75, state_hash_alg, state_comp_func, state_copy_func, state_free_func); states_open_set = hash_table_create(89, .75, state_hash_alg, state_comp_func, state_copy_func, state_free_func); states_g_cost = hash_map_create(89, .75, state_hash_alg, state_comp_func, NULL, NULL, NULL, state_free_func, (generic_op)free); states_f_cost = hash_map_create(89, .75, state_hash_alg, state_comp_func, NULL, NULL, NULL, state_free_func, (generic_op)free); states_heap_index = hash_map_create(89, .75, state_hash_alg, state_comp_func, NULL, NULL, NULL, NULL, NULL); states_heap = heap_create(89, state_heap_func, state_comp_func, state_copy_func, state_free_func); current_state = state; f_cost = state_heur_func(current_state, NULL); state_f_cost_set_func(current_state, f_cost); g_cost = 0; g_cost_ptr = malloc(sizeof(int)); *g_cost_ptr = g_cost; f_cost_ptr = malloc(sizeof(int)); *f_cost_ptr = f_cost; hash_map_insert(states_g_cost, current_state, g_cost_ptr, 0); heap_memory_location = heap_add(states_heap, state_copy_func(current_state)); hash_table_insert(states_open_set, state_copy_func(current_state), 0); hash_map_insert(states_f_cost, state_copy_func(current_state), f_cost_ptr, 0); hash_map_insert(states_heap_index, current_state, heap_memory_location, 1); path = NULL; found = 0; while(!heap_is_empty(states_heap) && !found) { current_state = state_copy_func(heap_peek(states_heap)); heap_remove(states_heap); hash_table_remove(states_open_set, current_state); hash_map_remove(states_heap_index, current_state); if(state_goal_func(current_state, state_world)) { path = state_back_func(current_state); found = 1; } else { if(!hash_table_insert(states_closed_set, current_state, 0)) { list_push_front(states_overflow, current_state); } successor_list = state_gen_func(current_state, state_world); while(!list_is_empty(successor_list)) { successor_state = list_front(successor_list); g_cost = *(int*)hash_map_get(states_g_cost, current_state) + state_trans_func(current_state, successor_state, state_world); f_cost = g_cost + state_heur_func(successor_state, state_world); tmp_f = hash_map_contains_key(states_f_cost, successor_state) ? *(int*)hash_map_get(states_f_cost, successor_state) : UINT_MAX; if(hash_table_contains(states_closed_set, successor_state) && f_cost > tmp_f) { list_remove_front(successor_list); continue; } if(!hash_table_contains(states_open_set, successor_state) || f_cost < tmp_f) { state_f_cost_set_func(successor_state, f_cost); state_link_func(successor_state, current_state); g_cost_ptr = malloc(sizeof(int)); f_cost_ptr = malloc(sizeof(int)); *g_cost_ptr = g_cost; *f_cost_ptr = f_cost; if(!hash_table_contains(states_open_set, successor_state)) { hash_table_insert(states_open_set, successor_state, 0); heap_memory_location = heap_add(states_heap, state_copy_func(successor_state)); hash_map_insert(states_heap_index, successor_state, heap_memory_location, 1); } else { heap_memory_location = hash_map_get(states_heap_index, successor_state); heap_up_mod_data(states_heap, heap_memory_location, successor_state); } if(!hash_map_set(states_g_cost, successor_state, g_cost_ptr)) { hash_map_insert(states_g_cost, state_copy_func(successor_state), g_cost_ptr, 0); } if(!hash_map_set(states_f_cost, successor_state, f_cost_ptr)) { hash_map_insert(states_f_cost, state_copy_func(successor_state), f_cost_ptr, 0); } list_pop(successor_list); } else { list_remove_front(successor_list); } } list_kill(successor_list); } } heap_kill(states_heap); list_kill(states_overflow); hash_map_kill(states_g_cost); hash_map_kill(states_f_cost); hash_table_kill(states_open_set); hash_table_kill(states_closed_set); hash_map_dissolve(states_heap_index); return path; }
QueryResult *Query_Execute(Query *query) { //__queryStage_Print(query->root, 0); QueryResult *res = malloc(sizeof(QueryResult)); res->error = 0; res->errorString = NULL; res->totalResults = 0; res->ids = NULL; res->numIds = 0; int num = query->offset + query->limit; heap_t *pq = malloc(heap_sizeof(num)); heap_init(pq, cmpHits, NULL, num); // start lazy evaluation of all query steps IndexIterator *it = NULL; if (query->root != NULL) { it = Query_EvalStage(query, query->root); } // no query evaluation plan? if (query->root == NULL || it == NULL) { res->error = QUERY_ERROR_INTERNAL; res->errorString = QUERY_ERROR_INTERNAL_STR; return res; } IndexHit *pooledHit = NULL; // iterate the root iterator and push everything to the PQ while (1) { // TODO - Use static allocation if (pooledHit == NULL) { pooledHit = malloc(sizeof(IndexHit)); } IndexHit *h = pooledHit; IndexHit_Init(h); int rc = it->Read(it->ctx, h); if (rc == INDEXREAD_EOF) { break; } else if (rc == INDEXREAD_NOTFOUND) { continue; } h->totalFreq = processHitScore(h, query->docTable); ++res->totalResults; if (heap_count(pq) < heap_size(pq)) { heap_offerx(pq, h); pooledHit = NULL; } else { IndexHit *qh = heap_peek(pq); if (qh->totalFreq < h->totalFreq) { pooledHit = heap_poll(pq); heap_offerx(pq, h); // IndexHit_Terminate(pooledHit); } else { pooledHit = h; // IndexHit_Terminate(pooledHit); } } } if (pooledHit) { free(pooledHit); } it->Free(it); // Reverse the results into the final result size_t n = MIN(heap_count(pq), query->limit); res->numIds = n; res->ids = calloc(n, sizeof(RedisModuleString *)); for (int i = 0; i < n; ++i) { IndexHit *h = heap_poll(pq); LG_DEBUG("Popping %d freq %f", h->docId, h->totalFreq); res->ids[n - i - 1] = Redis_GetDocKey(query->ctx, h->docId); free(h); } // if we still have something in the heap (meaning offset > 0), we need to // poll... while (heap_count(pq) > 0) { IndexHit *h = heap_poll(pq); free(h); } heap_free(pq); return res; }
__inline ElmtType pqueue_peek(const PQueue queue) { return heap_peek(queue); }