void test_fast_read_set04(CuTest *tc){ short int i, j; fast_read_set set; printf("\nFast-read-set test#04 - elements 0-19: unique_insert\nmarked: 0-14 repeated 50 times\narray read by is_marked:\n"); construct_fast_read_set(&set, 20); for(j = 0; j < 50; ++j){ for(i = 0; i < 15; ++i){ set_mark(&set, i); } } for(i = 0; i < 20; ++i){ printf("%d ", (int)is_marked(&set, i)); } printf("\n\tThere should be no repetitions in the tree;\t"); RB_display_keys_in_order(&set.tree); reset_marks(&set); printf("Now all cells should be reset:\n"); for(i = 0; i < 20; ++i){ printf("%d ", (int)is_marked(&set, i)); } printf("\n"); RB_display_keys_in_order(&set.tree); destroy_fast_read_set(&set); }
void mini_span_tree_prim_calculate_min_weight_arc( AdjacentMultipleListGraphic graphic, int* mark, int* start_vertex_index, int* end_vertex_index, int* weight) { int i = 0; ArcNode *arc_node = NULL; *weight = -1; for (i = 0; i < graphic -> vertex_count; i++) { arc_node = (graphic -> vertex_list + i) -> arc_list; while (arc_node) { int s_vertex_index = i; int e_vertex_index = arc_node -> start_vertex_index == i ? arc_node -> end_vertex_index : arc_node -> start_vertex_index; ArcNode *next_node = arc_node -> start_vertex_index == i ? arc_node -> start_link_next : arc_node -> end_link_next; if (mark != NULL) { if (!is_marked(graphic, mark, i) || is_marked(graphic, mark, e_vertex_index)) { arc_node = next_node; continue; } } if (*weight == -1 || arc_node -> weight < *weight) { *start_vertex_index = s_vertex_index; *end_vertex_index = e_vertex_index; *weight = arc_node -> weight; } arc_node = next_node; } } }
// Fraser's SkipList Algorithm inline void fraser_search(sl_intset_t *set, uint32_t val, sl_node_t **left_list, sl_node_t **right_list) { int i; sl_node_t *left, *left_next, *right, *right_next; retry: left = set->head; for (i = LEVELMAX - 1; i >= 0; i--) { left_next = left->nexts[i]; if (is_marked((uint32_t)left_next)) goto retry; /* Find unmarked node pair at this level */ for (right = left_next; ; right = right_next) { /* Skip a sequence of marked nodes */ while(1) { right_next = right->nexts[i]; if (!is_marked((uint32_t)right_next)) break; right = (sl_node_t*)unset_mark((uint32_t)right_next); } if (right->val >= val) break; left = right; left_next = right_next; } /* Ensure left and right nodes are adjacent */ if ((left_next != right) && (!ATOMIC_CAS_MB(&left->nexts[i], left_next, right))) goto retry; if (left_list != NULL) left_list[i] = left; if (right_list != NULL) right_list[i] = right; } }
void test_fast_read_set03(CuTest *tc){ short int i; fast_read_set set; printf("\nFast-read-set test#03 - elements 0-19: reset\nmarked: 0-9, 12, 15, 17, 19\narray read by is_marked:\n"); construct_fast_read_set(&set, 20); for(i = 0; i < 10; ++i){ set_mark(&set, i); } set_mark(&set, 12); set_mark(&set, 19); set_mark(&set, 17); set_mark(&set, 15); for(i = 0; i < 20; ++i){ printf("%d ", (int)is_marked(&set, i)); } printf("\n"); RB_display_keys_in_order(&set.tree); reset_marks(&set); printf("\tNow all cells should be reset:\n"); for(i = 0; i < 20; ++i){ printf("%d ", (int)is_marked(&set, i)); } printf("\n"); RB_display_keys_in_order(&set.tree); destroy_fast_read_set(&set); }
void ut_unsorted(int size) { printf("Testing UNSORTED.\n\n"); char *name = "test_unsorted"; make_new_column_file(name, SORTED); column_t *c = malloc(sizeof(column_t)); init_col(c, name); // append things! for(int i = 0; i < size; i ++) { append_value_to_file(c, i); append_value_to_file(c, i-1); } print_data_file(c->fp); init_col(c, name); bv_t *bv = create_bv(2 * size); for(int i = 0; i < size - 3; i ++) { mark_matching_bv_for_unsorted(c, bv, i-1, i); assert(is_marked(bv, (2*i))); assert(is_marked(bv, (2*i + 1))); assert(is_marked(bv, (2*i + 3))); unmark_all_bv(bv); } return; }
void LIR_OopMapGenerator::process_move(LIR_Op* op) { LIR_Op1* op1 = op->as_Op1(); LIR_Opr src = op1->in_opr(); LIR_Opr dst = op1->result_opr(); assert(!src->is_stack() || !dst->is_stack(), "No memory-memory moves allowed"); if ((src->is_stack() && frame_map()->is_spill_pos(src)) || (dst->is_stack() && frame_map()->is_spill_pos(dst))) { // Oops in the spill area are handled by another mechanism (see // CodeEmitInfo::record_spilled_oops) return; } if (dst->is_oop()) { assert((src->is_oop() && (src->is_stack() || src->is_register() || src->is_constant()) ) || src->is_address(), "Wrong tracking of oops/non-oops in LIR"); assert(!src->is_stack() || is_marked(src->single_stack_ix()), "Error in tracking of oop stores to stack"); if (dst->is_stack()) { mark(dst->single_stack_ix()); } else if (dst->is_register()) { if (LIRCacheLocals) { if (local_mapping()->is_cache_reg(dst)) { mark(dst); } } else { assert(local_mapping() == NULL, "expected no local mapping"); } } } else { // !dst->is_oop() // Note that dst may be an address assert(!src->is_single_stack() || !is_marked(src->single_stack_ix()), "Error in tracking of oop stores to stack"); assert(!src->is_double_stack() || !is_marked(src->double_stack_ix()), "Error in tracking of oop stores to stack"); assert(!src->is_double_stack() || !is_marked(1 + src->double_stack_ix()), "Error in tracking of oop stores to stack"); if (dst->is_stack()) { if (dst->is_single_stack()) { clear_all(dst->single_stack_ix()); } else { clear_all(dst->double_stack_ix()); clear_all(1 + dst->double_stack_ix()); } } else if (dst->is_register()) { if (LIRCacheLocals) { if (local_mapping()->is_cache_reg(dst)) { clear_all(dst); } } else { assert(local_mapping() == NULL, "expected no local mapping"); } } } }
int fraser_search(sl_intset_t *set, skey_t key, sl_node_t **left_list, sl_node_t **right_list) { int i; sl_node_t *left, *left_next, *right = NULL, *right_next; retry: PARSE_TRY(); left = set->head; for (i = levelmax - 1; i >= 0; i--) { left_next = left->next[i]; if ((is_marked((uintptr_t)left_next))) { goto retry; } /* Find unmarked node pair at this level */ for (right = left_next; ; right = right_next) { /* Skip a sequence of marked nodes */ right_next = right->next[i]; while ((is_marked((uintptr_t)right_next))) { right = (sl_node_t*)unset_mark((uintptr_t)right_next); right_next = right->next[i]; } if (right->key >= key) { break; } left = right; left_next = right_next; } /* Ensure left and right nodes are adjacent */ if ((left_next != right) && (!ATOMIC_CAS_MB(&left->next[i], left_next, right))) { goto retry; } if (left_list != NULL) { left_list[i] = left; } if (right_list != NULL) { right_list[i] = right; } } return (right->key == key); }
inline int mark_node_ptrs(sl_node_t *n) { sl_node_t *n_next; uint32_t status; //uint32_t attempts = 0; uint32_t i = n->toplevel - 1; //retry: status = _xbegin(); if(status == _XBEGIN_STARTED) { while ( i > 0 ) { if(!is_marked((uint32_t)n->nexts[i])) n->nexts[i] = (sl_node_t*)set_mark((uint32_t)n->nexts[i]); i--; } if (is_marked((uint32_t)n->nexts[0])) { _xend(); return 0; }else { n->nexts[0] = (sl_node_t*)set_mark((uint32_t)n->nexts[0]); _xend(); return 1; } }/*else{ if (++attempts < MAX_ATTEMPT_NUM) { goto retry; } }*/ for (int i=n->toplevel-1; i>0; i--) { do { n_next = n->nexts[i]; if (is_marked((uint32_t)n_next)) break; if (ATOMIC_CAS_MB(&n->nexts[i], n_next, (sl_node_t*)set_mark((uint32_t)n_next))) break; } while (true); } do { n_next = n->nexts[0]; if (is_marked((uint32_t)n_next)) return 0; if (ATOMIC_CAS_MB(&n->nexts[0], n_next, (sl_node_t*)set_mark((uint32_t)n_next))) return 1; } while (true); }
static void zero_weak_boxes(GCTYPE *gc, int is_late, int force_zero) { GC_Weak_Box *wb; wb = gc->weak_boxes[is_late]; while (wb) { if (force_zero || !is_marked(gc, wb->val)) { wb->val = NULL; if (wb->secondary_erase) { void **p; mpage *page; /* it's possible for the secondary to be in an old generation and therefore on an mprotected page: */ page = pagemap_find_page(gc->page_maps, wb->secondary_erase); if (page->mprotected) { page->mprotected = 0; mmu_write_unprotect_page(gc->mmu, page->addr, APAGE_SIZE); GC_MP_CNT_INC(mp_mark_cnt); } p = (void **)GC_resolve2(wb->secondary_erase, gc); *(p + wb->soffset) = NULL; wb->secondary_erase = NULL; } } wb = wb->next; } /* reset, in case we have a second round */ gc->weak_boxes[is_late] = NULL; }
static void trace_by_addr(void *obj_addr, int distance) { // An arbitrary cut-off for a search that's too deep to be useful. if(distance > 20) { return; } Object_With_Header *obj = (Object_With_Header *) GC::pinned_objects; while(obj) { uint64 sz = get_object_size(obj); void **obj_start = (void **)obj->start(); void **obj_end = (void **)(((Byte *)obj_start) + sz); for(void **p = obj_start; p < obj_end; p++) { if(obj_addr == *p) { printf("GC TRACE: [%d] %p in obj %p(+%d): %s\n", distance, obj_addr, obj_start, (int)(((Byte *)p) - ((Byte *)obj_start)), class_get_name(obj->vt()->gcvt->ch)); if(!is_marked(obj)) { printf("GC TRACE: %p is not marked\n", obj_start); trace_by_addr(obj_start, distance + 1); } } } obj = next_object(obj); } } //trace_by_addr
bool chunk_registry::is_marked (void* ptr) { auto header = find_chunk (ptr); assert (header && "Must be a valid ptr"); return header->is_marked (ptr); }
static int mark_ready_ephemerons(GCTYPE *gc, int inc_gen1) { GC_Ephemeron *waiting, *next, *eph; int did_one = 0, j; GC_mark_no_recur(gc, 1); for (j = 0; j < (inc_gen1 ? 1 : 2); j++) { if (inc_gen1) eph = gc->inc_ephemerons; else if (j == 0) eph = gc->ephemerons; else eph = gc->bp_ephemerons; waiting = NULL; for (; eph; eph = next) { if (inc_gen1) next = eph->inc_next; else next = eph->next; if (is_marked(gc, eph->key)) { if (!inc_gen1) eph->key = GC_resolve2(eph->key, gc); gcMARK2(eph->val, gc); gc->num_last_seen_ephemerons++; did_one = 1; if (!inc_gen1 && (j == 0) && !gc->gc_full && gc->started_incremental) { /* Need to preserve the ephemeron in the incremental list, unless it's kept in generation 1/2 nistead of promoted to generation 1. */ if (!is_in_generation_half(gc, eph)) { eph->inc_next = gc->inc_ephemerons; gc->inc_ephemerons = eph; } } } else { if (inc_gen1) { /* Ensure that we can write to the page containing the emphemeron: */ check_incremental_unprotect(gc, pagemap_find_page(gc->page_maps, eph)); eph->inc_next = waiting; } else eph->next = waiting; waiting = eph; } } if (inc_gen1) gc->inc_ephemerons = waiting; else if (j == 0) gc->ephemerons = waiting; else gc->bp_ephemerons = waiting; } GC_mark_no_recur(gc, 0); return did_one; }
// Give advice about whether the oop that contains this markOop // should be cached or not. bool markOopDesc::should_not_be_cached() const { // the cast is because decode_pointer() isn't marked const if (is_marked() && ((markOopDesc *)this)->decode_pointer() != NULL) { // If the oop containing this markOop is being forwarded, then // we are in the middle of GC and we do not want the containing // oop to be added to a cache. We have no way of knowing whether // the cache has already been visited by the current GC phase so // we don't know whether the forwarded oop will be properly // processed in this phase. If the forwarded oop is not properly // processed, then we'll see strange crashes or asserts during // the next GC run because the markOop will contain an unexpected // value. // // This situation has been seen when we are GC'ing a methodOop // because we use the methodOop while we're GC'ing it. Scary // stuff. Some of the uses the methodOop cause the methodOop to // be added to the OopMapCache in the instanceKlass as a side // effect. This check lets the cache maintainer know when a // cache addition would not be safe. return true; } // caching the containing oop should be just fine return false; }
// help get a list of positions from variable int get_pos_from_var(list_t *vars, char *var_name, int **pos) { variable_t *pos_var = find_variable(vars, var_name); int num_pos = 0; if (pos_var->is_id) { // dealing with row id, assign directly num_pos = get_list_size(pos_var->ids); *pos = calloc(num_pos, sizeof(int)); memcpy(*pos, pos_var->ids->data, sizeof(int) * num_pos); } else { // now we need to convert from bit vector num_pos = get_bv_count(pos_var->bv); *pos = calloc(num_pos, sizeof(int)); // go through the whole bit vector to get the values // TODO: should be a better strategy than this! uint32_t size = get_bv_size(pos_var->bv); uint32_t itr = 0; for (uint32_t u=0; u < size; u++) { if (is_marked(pos_var->bv, u)) { (*pos)[itr] = u; itr ++; } } debug("Itr ended at %d\n", itr); dbg_assert(itr == num_pos); } return num_pos; }
void ut_sorted_bounds(int size) { printf("Testing SORTED with BOUNDS.\n\n"); char *name = "test_sorted_bounds"; make_new_column_file(name, SORTED); column_t *c = malloc(sizeof(column_t)); init_col(c, name); bv_t *bv = create_bv(size); int insert_index; for (int i = size-1; i>=0; i--) { insert_index = get_lower_bound(c->fp, c->m.size, i); insert_value_to_file(c, insert_index, i); } printf("Finished inserting data.\n"); #ifdef UT_VERBOSE print_data_file(c->bpt_fp); #endif for (int i = 0; i < size; i++) { printf("Searching values at %d\n", i); mark_matching_bv_for_sorted(c, bv, i, i); assert(is_marked(bv, i)); unmark_all_bv(bv); } return; }
static void mark_function(lua_State *L, lua_State *dL) { const void *p = lua_topointer(L, -1); int i; lua_Debug ar; char used_in[128]; const char *name; if (!is_marked(dL, p)) { marked(dL, p, 0); //已经在table里头算了 lua_pushvalue(L, -1); lua_getinfo(L, ">S", &ar); snprintf(used_in, sizeof(used_in) - 1, "%s:%d~%d", ar.short_src, ar.linedefined, ar.lastlinedefined); used_in[sizeof(used_in) - 1] = 0; for (i=1;;i++) { name = lua_getupvalue(L,-1,i); if (name == NULL) break; p = lua_topointer(L, -1); if (*name != '\0' && LUA_TTABLE == lua_type(L, -1)) { make_root(dL, p, name, RT_UPVALUE, used_in, 1); lua_insert(dL, MARKED_TABLE); mark_object(L, dL); lua_remove(dL, MARKED_TABLE); } else if (LUA_TFUNCTION == lua_type(L, -1)) { mark_function(L, dL); } lua_pop(L, 1); } } }
void ut_bit_vector() { printf("Bit vector unit testing!\n"); bv_t *bv = create_bv(5); mark_bv(bv, 1); assert(is_marked(bv, 1)==true); unmark_bv(bv, 1); assert(is_marked(bv, 1)==false); mark_bv(bv, 2); mark_bv(bv, 4); assert(is_marked(bv, 2)==true); assert(is_marked(bv, 4)==true); bv =create_bv(500); assert(is_marked(bv, 398)==false); mark_bv(bv, 398); assert(is_marked(bv, 398)==true); printf("Checking mark_bv_by_range\n"); mark_bv_by_range(bv, 100, 200); for (uint32_t i = 100; i < 201; i ++) { // printf("At index %d\n", i); assert(is_marked(bv, i)==true); } destroy_bv(bv); printf("BIT VECTOR SUCCESS!\n"); return; }
static void mark_pointer_to_object(Object_With_Header *obj, int ptr_idx) { if(!is_marked(obj)) { // This object would have been deleted, hadn't we handled managed // pointers in GC. push_on_mark_stack(obj->start()); } remove_pointer_from_table(ptr_idx); } //mark_pointer_to_object
void evacuate_BLACKHOLE(StgClosure **p) { bdescr *bd; uint32_t gen_no; StgClosure *q; const StgInfoTable *info; q = *p; // closure is required to be a heap-allocated BLACKHOLE ASSERT(HEAP_ALLOCED_GC(q)); ASSERT(GET_CLOSURE_TAG(q) == 0); bd = Bdescr((P_)q); // blackholes can't be in a compact ASSERT((bd->flags & BF_COMPACT) == 0); // blackholes *can* be in a large object: when raiseAsync() creates an // AP_STACK the payload might be large enough to create a large object. // See #14497. if (bd->flags & BF_LARGE) { evacuate_large((P_)q); return; } if (bd->flags & BF_EVACUATED) { if (bd->gen_no < gct->evac_gen_no) { gct->failed_to_evac = true; TICK_GC_FAILED_PROMOTION(); } return; } if (bd->flags & BF_MARKED) { if (!is_marked((P_)q,bd)) { mark((P_)q,bd); push_mark_stack((P_)q); } return; } gen_no = bd->dest_no; info = q->header.info; if (IS_FORWARDING_PTR(info)) { StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info); *p = e; if (gen_no < gct->evac_gen_no) { // optimisation if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) { gct->failed_to_evac = true; TICK_GC_FAILED_PROMOTION(); } } return; } ASSERT(INFO_PTR_TO_STRUCT(info)->type == BLACKHOLE); copy(p,info,q,sizeofW(StgInd),gen_no); }
void fraser_search(sl_intset_t *set, val_t val, sl_node_t **left_list, sl_node_t **right_list) { int i; sl_node_t *left, *left_next, *right, *right_next; #ifdef DEBUG printf("++> fraser_search\n"); IO_FLUSH; #endif retry: left = set->head; for (i = levelmax - 1; i >= 0; i--) { left_next = left->next[i]; if (is_marked((uintptr_t)left_next)) goto retry; /* Find unmarked node pair at this level */ for (right = left_next; ; right = right_next) { /* Skip a sequence of marked nodes */ while(1) { right_next = right->next[i]; if (!is_marked((uintptr_t)right_next)) break; right = (sl_node_t*)unset_mark((uintptr_t)right_next); } if (right->val >= val) break; left = right; left_next = right_next; } /* Ensure left and right nodes are adjacent */ if ((left_next != right) && (!ATOMIC_CAS_MB(&left->next[i], left_next, right))) goto retry; if (left_list != NULL) left_list[i] = left; if (right_list != NULL) right_list[i] = right; } #ifdef DEBUG printf("++> fraser_search ends\n"); IO_FLUSH; #endif }
static int CheckFinish(struct localAlloc *local, void **markstack, int *counterPtr){ //Check Finish: struct allocator *alc=local->global; const int T = alc->numThreads; const long localVer = local->localVer; int curPhases[T]; void *tracing[T]; int counter=0; CheckFinish: usleep(10); for(int i=0; i<T; ++i){ struct localAlloc *ai = &alc->locals[i]; curPhases[i]=ai->localVer; void *trptr = ai->markstack[0]; tracing[i]=trptr; if(((long)trptr & 1) || curPhases[i]!=localVer) continue; void *ptr; for(int j=1; (ptr=ai->markstack[j])!=NULL; ++j){ if(is_marked(ptr)==0 && counter<10){ //an unmarked pointer. markstack[++counter]=ptr; //goto CheckFinish; } } if(is_marked(trptr)==0) markstack[++counter]=trptr; } if(counter>=1){ usleep(2); *counterPtr=counter; if(localVer!=alc->phase.phase){//is it relevant to help? for(int i=1; markstack[i]!=NULL; ++i) markstack[i]=NULL; return 1;//Not relevant - finish } return 0;//Still in current phase - help (and do not finish) } for(int i=0; i<T; ++i){ struct localAlloc *ai = &alc->locals[i]; void *trptr = ai->markstack[0]; if(trptr!=tracing[i] || ai->localVer!=curPhases[i]) goto CheckFinish; } return 1; }
int un_mark_in_degree(GRAPHIC_TYPE graphic, int* traverse_mark, int vertex_index) { int in_degree = 0, i = 0; for (i = 0; i < MAX_VERTEX_SIZE; i++) { if ((graphic -> arc_list)[i][vertex_index] != -1 && !is_marked(graphic, traverse_mark, i)) { in_degree++; } } return in_degree; }
static int zero_weak_arrays(GCTYPE *gc, int force_zero, int from_inc, int fuel) { GC_Weak_Array *wa; int i, num_gen0; if (from_inc) { wa = gc->inc_weak_arrays; num_gen0 = 0; } else wa = append_weak_arrays(gc->weak_arrays, gc->bp_weak_arrays, &num_gen0); if (gc->gc_full || !gc->started_incremental) num_gen0 = 0; while (wa) { void **data; data = wa->data; for (i = wa->count; i--; ) { void *p = data[i]; if (p && (force_zero || !is_marked(gc, p))) data[i] = wa->replace_val; else data[i] = GC_resolve2(p, gc); } if (fuel > 0) fuel = ((fuel > wa->count) ? (fuel - wa->count) : 0); if (num_gen0 > 0) { if (!is_in_generation_half(gc, wa)) { /* For incremental mode, preserve this weak box in the incremental list for re-checking later. */ wa->data[wa->count] = gc->inc_weak_arrays; gc->inc_weak_arrays = wa; } } if (from_inc) { GC_Weak_Array *next; next = (GC_Weak_Array *)wa->data[wa->count]; wa->data[wa->count] = gc->weak_incremental_done; wa = next; } else wa = wa->next; num_gen0--; } if (from_inc) gc->inc_weak_arrays = NULL; else { gc->weak_arrays = NULL; gc->bp_weak_arrays = NULL; } return fuel; }
void verifyList(Entry *e){ Entry *volatile prev=NULL; while(e!=NULL){ if(!is_marked(e)){ clearDebugging(); B(prev); } prev=e; e=clearDeleted(e->nextEntry); } }
void mark_node_ptrs(sl_node_t *n) { int i; sl_node_t *n_next; for (i=n->toplevel-1; i>=0; i--) { do { n_next = n->next[i]; if (is_marked((uintptr_t)n_next)) break; } while (!ATOMIC_CAS_MB(&n->next[i], n_next, set_mark((uintptr_t)n_next))); } }
void in_order_check_collision(level_data *level, fast_read_set *movable_done, short int who, RB_node *node, RB_node *nil, long double time_passed){ if(node != nil){ in_order_check_collision(level, movable_done, who, node->left, nil, time_passed); if(!is_marked(movable_done, node->key)){ set_mark(movable_done, node->key); get_and_check_mov_coll_if_valid(level, who, node->key, time_passed); } in_order_check_collision(level, movable_done, who, node->right, nil, time_passed); } }
void ExitNode::Compile(MacroAssembler* masm) { ASSERT(!is_marked()); is_marked_ = true; Comment cmnt(masm, "[ ExitNode"); if (FLAG_trace) { __ push(eax); __ CallRuntime(Runtime::kTraceExit, 1); } __ RecordJSReturn(); __ mov(esp, ebp); __ pop(ebp); int count = CfgGlobals::current()->fun()->scope()->num_parameters(); __ ret((count + 1) * kPointerSize); }
Status DepthFirstTravese_Action(GRAPHIC_TYPE graphic, int* traverse_mark, int vertex_index, Status(*visit)(ElementType*)) { VERTEX_TYPE *vertex = graphic -> vertex_list + vertex_index; visit(&(vertex -> value)); mark(traverse_mark, vertex_index); VERTEX_TYPE *adjacent_vertex = FirstAdjacentVertex(graphic, vertex); while (adjacent_vertex != NULL) { int adjacent_vertex_index = adjacent_vertex - graphic -> vertex_list; if (!is_marked(graphic, traverse_mark, adjacent_vertex_index)) DepthFirstTravese_Action(graphic, traverse_mark, adjacent_vertex_index, visit); adjacent_vertex = NextAdjacentVertex(graphic, vertex, adjacent_vertex); } return OK; }
static void mark_ready_ephemerons() { GC_Ephemeron *waiting = NULL, *next, *eph; for (eph = ephemerons; eph; eph = next) { next = eph->next; if (is_marked(eph->key)) { gcMARK(eph->val); num_last_seen_ephemerons++; } else { eph->next = waiting; waiting = eph; } } ephemerons = waiting; }
void find_next_collision(level_data *level, short int index, fast_read_set *primitive_done, fast_read_set *movable_done, long double time_passed){ collision_data new_coll; int i, j, k; level->movable_objects[index].coll_with_fixed.time = EMPTY_COLLISION_TIME; for(i = level->movable_objects[index].zones[0]; i <= level->movable_objects[index].zones[2]; ++i){ for(j = level->movable_objects[index].zones[1]; j <= level->movable_objects[index].zones[3]; ++j){ for(k = 0; k < level->zones[i][j].number_of_primitives; ++k){ if(!is_marked(primitive_done, level->zones[i][j].primitives[k])){ set_mark(primitive_done, level->zones[i][j].primitives[k]); new_coll = get_collision_with_primitive(&level->movable_objects[index], &level->primitive_objects[level->zones[i][j].primitives[k]]); new_coll.time += time_passed; if(new_coll.time >= 0 && new_coll.time <= 1){ if(new_coll.time < level->movable_objects[index].coll_with_fixed.time){ new_coll.who = index; new_coll.with = level->zones[i][j].primitives[k]; level->movable_objects[index].coll_with_fixed = new_coll; } } } } } } reset_marks(primitive_done); set_mark(movable_done, index); for(i = level->movable_objects[index].zones[0]; i <= level->movable_objects[index].zones[2]; ++i){ for(j = level->movable_objects[index].zones[1]; j <= level->movable_objects[index].zones[3]; ++j){ in_order_check_collision(level, movable_done, index, level->zones[i][j].movable.root, level->zones[i][j].movable.nil, time_passed); } } reset_marks(movable_done); collision_min_for_object(level, index); //push on queue //heap checks if who < with and does necessary exchanges if(level->movable_objects[index].next_collision->time <= 1 && level->movable_objects[index].next_collision->time >= 0){ heap_insert(&level->collision_queue, level->movable_objects[index].next_collision); } }