int main() { priority_queue *pq = NULL; int a = 55; int b = 48; int c = 1289; int d = 78; int r = 0; /* 48 is minimal priority queue size */ pq = create_priority_queue( 48, &comparator ); priority_queue_insert( pq, &a ); priority_queue_insert( pq, &b ); priority_queue_insert( pq, &c ); priority_queue_insert( pq, &d ); r = *(int *) priority_queue_poll( pq ); printf("First is %d\n", r); r = *(int *) priority_queue_poll( pq ); printf("Second is %d\n", r); free_priority_queue( &pq ); return EXIT_SUCCESS; }
int queue_insert_multiple_noorder(void) { // This function exercises the insert bubbling code paths priority_queue_t queue = priority_queue_create(realloc, 0); if (!queue) return 1; if (priority_queue_insert(queue, (void*)34567, 3) != 0) return 1; if (priority_queue_insert(queue, (void*)12345, 1) != 0) return 1; if (priority_queue_insert(queue, (void*)45678, 4) != 0) return 1; if (priority_queue_insert(queue, (void*)23456, 2) != 0) return 1; if (priority_queue_insert(queue, (void*)56789, 5) != 0) return 1; if (priority_queue_get_obj(queue) != (void*)12345) return 1; if (priority_queue_get_obj(queue) != (void*)23456) return 1; if (priority_queue_get_obj(queue) != (void*)34567) return 1; if (priority_queue_get_obj(queue) != (void*)45678) return 1; if (priority_queue_get_obj(queue) != (void*)56789) return 1; if (priority_queue_peek_pri(queue) != -1) return 1; priority_queue_free(queue, free); return 0; }
int queue_insert_multiple_inorder(void) { // This function happens to exercise all of the remove bubbling code paths // (no children, 1 child, 2 children) priority_queue_t queue = priority_queue_create(realloc, 0); if (!queue) return 1; if (priority_queue_insert(queue, (void*)12345, 1) != 0) return 1; if (priority_queue_insert(queue, (void*)23456, 2) != 0) return 1; if (priority_queue_insert(queue, (void*)34567, 3) != 0) return 1; if (priority_queue_insert(queue, (void*)45678, 4) != 0) return 1; if (priority_queue_insert(queue, (void*)56789, 5) != 0) return 1; if (priority_queue_get_obj(queue) != (void*)12345) return 1; if (priority_queue_get_obj(queue) != (void*)23456) return 1; if (priority_queue_get_obj(queue) != (void*)34567) return 1; if (priority_queue_get_obj(queue) != (void*)45678) return 1; if (priority_queue_get_obj(queue) != (void*)56789) return 1; if (priority_queue_peek_pri(queue) != -1) return 1; priority_queue_free(queue, free); return 0; }
char *test_insert_then_swim() { priority_queue_p queue = priority_queue_create(sizeof(int), compare_intp); priority_queue_insert(queue, test_data(10)); priority_queue_insert(queue, test_data(20)); mu_assert(*(int*)vector_get(queue->vector, 0) == 10, "should have set first item"); priority_queue_free(queue); return NULL; }
/* TODO do random inserts and deletes */ int main(void) { float numbers[8] = {0.5f, 1.0f, 0.1f, 10.0f, 5.0f, 0.3f, 0.0f, 42.0f}; int ordered[8] = {6, 2, 5, 0, 1, 4, 3, 7}; struct PriorityQueue *queue = priority_queue_create(); for (int i = 0; i < 8; ++i) priority_queue_insert(queue, i, numbers[i]); for (int i = 0; priority_queue_length(queue); ++i) { int id; float value; priority_queue_minimum(queue, &id, &value); assert(ordered[i] == id); assert(numbers[ordered[i]] == value); priority_queue_extract_min(queue); } /* worst case test */ for (int i = 10000000; i >= 0; --i) priority_queue_insert(queue, 10000000 - i, i); /* should be sorted */ int previous = -1; for (int i = 0; i < 10000000; ++i) { int id; float value; priority_queue_minimum(queue, &id, &value); priority_queue_extract_min(queue); if (previous > value) printf("%g[%d] ", value, id); previous = value; } printf("\n"); priority_queue_destroy(queue); return 0; }
int queue_insert_one(void) { priority_queue_t queue = priority_queue_create(realloc, 0); if (!queue) return 1; if (priority_queue_insert(queue, (void*)12345, 1) != 0) return 1; if (priority_queue_peek_pri(queue) != 1) return 1; if (priority_queue_peek_obj(queue) != (void*)12345) return 1; priority_queue_free(queue, free); return 0; }
int queue_insert_resize(void) { priority_queue_t queue = priority_queue_create(realloc, 1); if (!queue) return 1; if (priority_queue_insert(queue, (void*)34567, 3) != 0) return 1; if (priority_queue_insert(queue, (void*)12345, 1) != 0) return 1; if (priority_queue_insert(queue, (void*)45678, 4) != 0) return 1; if (priority_queue_insert(queue, (void*)23456, 2) != 0) return 1; if (priority_queue_insert(queue, (void*)56789, 5) != 0) return 1; if (priority_queue_get_obj(queue) != (void*)12345) return 1; if (priority_queue_get_obj(queue) != (void*)23456) return 1; if (priority_queue_get_obj(queue) != (void*)34567) return 1; if (priority_queue_get_obj(queue) != (void*)45678) return 1; if (priority_queue_get_obj(queue) != (void*)56789) return 1; if (priority_queue_peek_pri(queue) != -1) return 1; priority_queue_free(queue, free); return 0; }
void GOMP_taskloop (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *), long arg_size, long arg_align, unsigned flags, unsigned long num_tasks, int priority, TYPE start, TYPE end, TYPE step) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; #ifdef HAVE_BROKEN_POSIX_SEMAPHORES /* If pthread_mutex_* is used for omp_*lock*, then each task must be tied to one thread all the time. This means UNTIED tasks must be tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN might be running on different thread than FN. */ if (cpyfn) flags &= ~GOMP_TASK_FLAG_IF; flags &= ~GOMP_TASK_FLAG_UNTIED; #endif /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (team && gomp_team_barrier_cancelled (&team->barrier)) return; #ifdef TYPE_is_long TYPE s = step; if (step > 0) { if (start >= end) return; s--; } else { if (start <= end) return; s++; } UTYPE n = (end - start + s) / step; #else UTYPE n; if (flags & GOMP_TASK_FLAG_UP) { if (start >= end) return; n = (end - start + step - 1) / step; } else { if (start <= end) return; n = (start - end - step - 1) / -step; } #endif TYPE task_step = step; unsigned long nfirst = n; if (flags & GOMP_TASK_FLAG_GRAINSIZE) { unsigned long grainsize = num_tasks; #ifdef TYPE_is_long num_tasks = n / grainsize; #else UTYPE ndiv = n / grainsize; num_tasks = ndiv; if (num_tasks != ndiv) num_tasks = ~0UL; #endif if (num_tasks <= 1) { num_tasks = 1; task_step = end - start; } else if (num_tasks >= grainsize #ifndef TYPE_is_long && num_tasks != ~0UL #endif ) { UTYPE mul = num_tasks * grainsize; task_step = (TYPE) grainsize * step; if (mul != n) { task_step += step; nfirst = n - mul - 1; } } else { UTYPE div = n / num_tasks; UTYPE mod = n % num_tasks; task_step = (TYPE) div * step; if (mod) { task_step += step; nfirst = mod - 1; } } } else { if (num_tasks == 0) num_tasks = team ? team->nthreads : 1; if (num_tasks >= n) num_tasks = n; else { UTYPE div = n / num_tasks; UTYPE mod = n % num_tasks; task_step = (TYPE) div * step; if (mod) { task_step += step; nfirst = mod - 1; } } } if (flags & GOMP_TASK_FLAG_NOGROUP) { if (thr->task && thr->task->taskgroup && thr->task->taskgroup->cancelled) return; } else ialias_call (GOMP_taskgroup_start) (); if (priority > gomp_max_task_priority_var) priority = gomp_max_task_priority_var; if ((flags & GOMP_TASK_FLAG_IF) == 0 || team == NULL || (thr->task && thr->task->final_task) || team->task_count + num_tasks > 64 * team->nthreads) { unsigned long i; if (__builtin_expect (cpyfn != NULL, 0)) { struct gomp_task task[num_tasks]; struct gomp_task *parent = thr->task; arg_size = (arg_size + arg_align - 1) & ~(arg_align - 1); char buf[num_tasks * arg_size + arg_align - 1]; char *arg = (char *) (((uintptr_t) buf + arg_align - 1) & ~(uintptr_t) (arg_align - 1)); char *orig_arg = arg; for (i = 0; i < num_tasks; i++) { gomp_init_task (&task[i], parent, gomp_icv (false)); task[i].priority = priority; task[i].kind = GOMP_TASK_UNDEFERRED; task[i].final_task = (thr->task && thr->task->final_task) || (flags & GOMP_TASK_FLAG_FINAL); if (thr->task) { task[i].in_tied_task = thr->task->in_tied_task; task[i].taskgroup = thr->task->taskgroup; } thr->task = &task[i]; cpyfn (arg, data); arg += arg_size; } arg = orig_arg; for (i = 0; i < num_tasks; i++) { thr->task = &task[i]; ((TYPE *)arg)[0] = start; start += task_step; ((TYPE *)arg)[1] = start; if (i == nfirst) task_step -= step; fn (arg); arg += arg_size; if (!priority_queue_empty_p (&task[i].children_queue, MEMMODEL_RELAXED)) { gomp_mutex_lock (&team->task_lock); gomp_clear_parent (&task[i].children_queue); gomp_mutex_unlock (&team->task_lock); } gomp_end_task (); } } else for (i = 0; i < num_tasks; i++) { struct gomp_task task; gomp_init_task (&task, thr->task, gomp_icv (false)); task.priority = priority; task.kind = GOMP_TASK_UNDEFERRED; task.final_task = (thr->task && thr->task->final_task) || (flags & GOMP_TASK_FLAG_FINAL); if (thr->task) { task.in_tied_task = thr->task->in_tied_task; task.taskgroup = thr->task->taskgroup; } thr->task = &task; ((TYPE *)data)[0] = start; start += task_step; ((TYPE *)data)[1] = start; if (i == nfirst) task_step -= step; fn (data); if (!priority_queue_empty_p (&task.children_queue, MEMMODEL_RELAXED)) { gomp_mutex_lock (&team->task_lock); gomp_clear_parent (&task.children_queue); gomp_mutex_unlock (&team->task_lock); } gomp_end_task (); } } else { struct gomp_task *tasks[num_tasks]; struct gomp_task *parent = thr->task; struct gomp_taskgroup *taskgroup = parent->taskgroup; char *arg; int do_wake; unsigned long i; for (i = 0; i < num_tasks; i++) { struct gomp_task *task = gomp_malloc (sizeof (*task) + arg_size + arg_align - 1); tasks[i] = task; arg = (char *) (((uintptr_t) (task + 1) + arg_align - 1) & ~(uintptr_t) (arg_align - 1)); gomp_init_task (task, parent, gomp_icv (false)); task->priority = priority; task->kind = GOMP_TASK_UNDEFERRED; task->in_tied_task = parent->in_tied_task; task->taskgroup = taskgroup; thr->task = task; if (cpyfn) { cpyfn (arg, data); task->copy_ctors_done = true; } else memcpy (arg, data, arg_size); ((TYPE *)arg)[0] = start; start += task_step; ((TYPE *)arg)[1] = start; if (i == nfirst) task_step -= step; thr->task = parent; task->kind = GOMP_TASK_WAITING; task->fn = fn; task->fn_data = arg; task->final_task = (flags & GOMP_TASK_FLAG_FINAL) >> 1; } gomp_mutex_lock (&team->task_lock); /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (__builtin_expect ((gomp_team_barrier_cancelled (&team->barrier) || (taskgroup && taskgroup->cancelled)) && cpyfn == NULL, 0)) { gomp_mutex_unlock (&team->task_lock); for (i = 0; i < num_tasks; i++) { gomp_finish_task (tasks[i]); free (tasks[i]); } if ((flags & GOMP_TASK_FLAG_NOGROUP) == 0) ialias_call (GOMP_taskgroup_end) (); return; } if (taskgroup) taskgroup->num_children += num_tasks; for (i = 0; i < num_tasks; i++) { struct gomp_task *task = tasks[i]; priority_queue_insert (PQ_CHILDREN, &parent->children_queue, task, priority, PRIORITY_INSERT_BEGIN, /*last_parent_depends_on=*/false, task->parent_depends_on); if (taskgroup) priority_queue_insert (PQ_TASKGROUP, &taskgroup->taskgroup_queue, task, priority, PRIORITY_INSERT_BEGIN, /*last_parent_depends_on=*/false, task->parent_depends_on); priority_queue_insert (PQ_TEAM, &team->task_queue, task, priority, PRIORITY_INSERT_END, /*last_parent_depends_on=*/false, task->parent_depends_on); ++team->task_count; ++team->task_queued_count; } gomp_team_barrier_set_task_pending (&team->barrier); if (team->task_running_count + !parent->in_tied_task < team->nthreads) { do_wake = team->nthreads - team->task_running_count - !parent->in_tied_task; if ((unsigned long) do_wake > num_tasks) do_wake = num_tasks; } else do_wake = 0; gomp_mutex_unlock (&team->task_lock); if (do_wake) gomp_team_barrier_wake (&team->barrier, do_wake); } if ((flags & GOMP_TASK_FLAG_NOGROUP) == 0) ialias_call (GOMP_taskgroup_end) (); }
void Check_And_Enqueue(PRIORITY_QUEUE *Queue, BBOX_TREE *Node, BBOX *BBox, RAYINFO *rayinfo) { DBL tmin, tmax; DBL dmin, dmax; if (Node->Infinite) { /* Set intersection depth to -Max_Distance. */ dmin = -Max_Distance; } else { Increase_Counter(stats[nChecked]); if (rayinfo->nonzero[X]) { if (rayinfo->positive[X]) { dmin = (BBox->Lower_Left[X] - rayinfo->slab_num[X]) * rayinfo->slab_den[X]; dmax = dmin + (BBox->Lengths[X] * rayinfo->slab_den[X]); if (dmax < EPSILON) return; } else { dmax = (BBox->Lower_Left[X] - rayinfo->slab_num[X]) * rayinfo->slab_den[X]; if (dmax < EPSILON) return; dmin = dmax + (BBox->Lengths[X] * rayinfo->slab_den[X]); } if (dmin > dmax) return; } else { if ((rayinfo->slab_num[X] < BBox->Lower_Left[X]) || (rayinfo->slab_num[X] > BBox->Lengths[X] + BBox->Lower_Left[X])) { return; } dmin = -BOUND_HUGE; dmax = BOUND_HUGE; } if (rayinfo->nonzero[Y]) { if (rayinfo->positive[Y]) { tmin = (BBox->Lower_Left[Y] - rayinfo->slab_num[Y]) * rayinfo->slab_den[Y]; tmax = tmin + (BBox->Lengths[Y] * rayinfo->slab_den[Y]); } else { tmax = (BBox->Lower_Left[Y] - rayinfo->slab_num[Y]) * rayinfo->slab_den[Y]; tmin = tmax + (BBox->Lengths[Y] * rayinfo->slab_den[Y]); } /* * Unwrap the logic - do the dmin and dmax checks only when tmin and * tmax actually affect anything, also try to escape ASAP. Better * yet, fold the logic below into the two branches above so as to * compute only what is needed. */ /* * You might even try tmax < EPSILON first (instead of second) for an * early quick out. */ if (tmax < dmax) { if (tmax < EPSILON) return; /* check bbox only if tmax changes dmax */ if (tmin > dmin) { if (tmin > tmax) return; /* do this last in case it's not needed! */ dmin = tmin; } else { if (dmin > tmax) return; } /* do this last in case it's not needed! */ dmax = tmax; } else { if (tmin > dmin) { if (tmin > dmax) return; /* do this last in case it's not needed! */ dmin = tmin; } /* else nothing needs to happen, since dmin and dmax did not change! */ } } else { if ((rayinfo->slab_num[Y] < BBox->Lower_Left[Y]) || (rayinfo->slab_num[Y] > BBox->Lengths[Y] + BBox->Lower_Left[Y])) { return; } } if (rayinfo->nonzero[Z]) { if (rayinfo->positive[Z]) { tmin = (BBox->Lower_Left[Z] - rayinfo->slab_num[Z]) * rayinfo->slab_den[Z]; tmax = tmin + (BBox->Lengths[Z] * rayinfo->slab_den[Z]); } else { tmax = (BBox->Lower_Left[Z] - rayinfo->slab_num[Z]) * rayinfo->slab_den[Z]; tmin = tmax + (BBox->Lengths[Z] * rayinfo->slab_den[Z]); } if (tmax < dmax) { if (tmax < EPSILON) return; /* check bbox only if tmax changes dmax */ if (tmin > dmin) { if (tmin > tmax) return; /* do this last in case it's not needed! */ dmin = tmin; } else { if (dmin > tmax) return; } } else { if (tmin > dmin) { if (tmin > dmax) return; /* do this last in case it's not needed! */ dmin = tmin; } /* else nothing needs to happen, since dmin and dmax did not change! */ } } else { if ((rayinfo->slab_num[Z] < BBox->Lower_Left[Z]) || (rayinfo->slab_num[Z] > BBox->Lengths[Z] + BBox->Lower_Left[Z])) { return; } } Increase_Counter(stats[nEnqueued]); } priority_queue_insert(Queue, dmin, Node); }