static void reset_thread( th_context *th, th_context *ctxt, VariantSF sgf, VariantSF *resetsgf ) { CPtr tbreg ; /* if the subgoal is not being computed by this thread, because meanwhile it has been grabbed by another thread, nothing should be done */ if( subg_tid(sgf) != ctxt->tid ) { *resetsgf = ctxt->waiting_for_subgoal; return ; } sgf = bottom_leader(ctxt, sgf) ; *resetsgf = sgf ; #ifdef WIN_NT ReclaimDSandMarkReset(ctxt, sgf, xsb_thread_id); #else ReclaimDSandMarkReset(ctxt, sgf, (pthread_t) xsb_thread_id); #endif /* trick to use other thread's context */ th = ctxt ; /* reset the stacks by restoring the generator cp of this sg */ breg = subg_cp_ptr(sgf) ; tbreg = breg ; openreg = prev_compl_frame(subg_compl_stack_ptr(sgf)) ; switch_envs(tbreg); ptcpreg = tcp_ptcp(tbreg); delayreg = tcp_pdreg(tbreg); reclaim_stacks(tbreg) ; restore_some_wamregs(tbreg, ereg); /* restore_trail_condition_registers */ ebreg = cp_ebreg(tcp_prevbreg(tbreg)); hbreg = cp_hreg(tcp_prevbreg(tbreg)); pcreg = (byte *)tcp_reset_pcreg(tbreg) ; table_restore_registers(tbreg, pcreg[3], reg); /* delete the generator cp */ breg = tcp_prevbreg(breg) ; ctxt->reset_thread = TRUE; }
void find_the_visitors(CTXTdeclc VariantSF subgoal) { CPtr cp_top1,cp_bot1 ; CPtr cp_root; CPtr cp_first; byte cp_inst; Cell listHead; int ans_subst_num, i, attv_num; BTNptr trieNode; ALNptr ALNlist; // printf("find the visitors: subg %p trie root %p\n",subgoal,subg_ans_root_ptr(subgoal)); cp_top1 = breg ; cp_bot1 = (CPtr)(tcpstack.high) - CP_SIZE; if (xwammode && hreg < hfreg) { printf("uh-oh! hreg was less than hfreg in in find the visitors\n"); hreg = hfreg; } while ( cp_top1 < cp_bot1 ) { // printf("1 cp_top1 %p cp_bot1 %p prev %p\n",cp_top1,cp_bot1,cp_prevtop(cp_top1)); cp_inst = *(byte *)*cp_top1; // Want trie insts, but need to distinguish from asserted and interned tries // printf("cp_inst %x\n",cp_inst); if ( is_trie_instruction(cp_inst) ) { // printf("found trie instr\n"); // Below we want basic_answer_trie_tt, ts_answer_trie_tt trieNode = TrieNodeFromCP(cp_top1); if (IsInAnswerTrie(trieNode)) { // printf("in answer trie\n"); if (subgoal == get_subgoal_frame_for_answer_trie_cp(CTXTc trieNode)) { // printf("found top of run %p \n",cp_top1); // print_subgoal(CTXTc stdout, subgoal); printf("\n"); cp_root = cp_top1; cp_first = cp_top1; while (*cp_pcreg(cp_root) != trie_fail) { cp_first = cp_root; cp_root = cp_prevbreg(cp_root); if (*cp_pcreg(cp_root) != trie_fail && subgoal != get_subgoal_frame_for_answer_trie_cp(CTXTc TrieNodeFromCP(cp_root))) printf(" couldn't find incr trie root -- whoa, whu? (%p\n",cp_root); } ALNlist = traverse_variant_answer_trie(subgoal, cp_root,cp_top1); ans_subst_num = (int)int_val(cell(cp_root + CP_SIZE + 1)) ; // account for sf ptr of trie root cp attv_num = (int)int_val(cell(breg+CP_SIZE+1+ans_subst_num)) + 1;; // printf("found root %p first %p top %p ans_subst_num %d & %p attv_num %d\n",cp_root,cp_first,cp_top1,ans_subst_num,breg+CP_SIZE, attv_num); listHead = list_of_answers_from_answer_list(subgoal,ans_subst_num,attv_num,ALNlist); // Free ALNlist; cp_pcreg(cp_top1) = (byte *) &completed_trie_member_inst; cp_ebreg(cp_top1) = cp_ebreg(cp_root); cp_hreg(cp_top1) = hreg; cp_ereg(cp_top1) = cp_ereg(cp_root); cp_trreg(cp_top1) = cp_trreg(cp_root); cp_prevbreg(cp_top1) = cp_prevbreg(cp_root); cp_prevtop(cp_top1) = cp_prevtop(cp_root); // cpreg, ereg, pdreg, ptcpreg should not need to be reset (prob not ebreg?) // printf("sf %p\n",* (cp_root + CP_SIZE + 2)); * (cp_top1 + CP_SIZE) = makeint(ans_subst_num); for (i = 0;i < ans_subst_num ;i++) { // Use registers for root of trie, not leaf (top) * (cp_top1 + CP_SIZE + 1 + i) = * (cp_root + CP_SIZE + 2 +i); // account for sf ptr or root } * (cp_top1 + CP_SIZE + 1+ ans_subst_num) = listHead; * (cp_top1 + CP_SIZE + 2+ ans_subst_num) = (Cell)hfreg; // printf("4 cp_root %p prev %p\n",cp_root,cp_prevtop(cp_root)); // printf("constructed listhead hreg %x\n",hreg); // cp_top1 = cp_root; // next iteration // printf("7 cp_top1 %p cp_bot1 %p prev %p\n",cp_top1,cp_bot1,cp_prevtop(cp_top1)); } } } cp_top1 = cp_prevtop(cp_top1); } if (xwammode) hfreg = hreg; // printf("constructed listhead hreg %x hfreg %x\n",hreg,hfreg); subg_visitors(subgoal) = 0; // instr_flag = 1; printf("setting instr_flag\n"); hreg_pos = hreg; }
int gc_heap(CTXTdeclc int arity, int ifStringGC) { #ifdef GC CPtr p; double begin_marktime, end_marktime, end_slidetime, end_copy_time, begin_stringtime, end_stringtime; size_t marked = 0, marked_dregs = 0, i; int ii; size_t start_heap_size; size_t rnum_in_trieinstr_unif_stk = (trieinstr_unif_stkptr-trieinstr_unif_stk)+1; DECL_GC_PROFILE; garbage_collecting = 1; // flag for profiling that we are gc-ing // printf("start gc(%ld): e:%p,h:%p,hf:%p\n",(long)(cpu_time()*1000),ereg,hreg,hfreg); INIT_GC_PROFILE; if (pflags[GARBAGE_COLLECT] != NO_GC) { num_gc++ ; GC_PROFILE_PRE_REPORT; slide = (pflags[GARBAGE_COLLECT] == SLIDING_GC) | (pflags[GARBAGE_COLLECT] == INDIRECTION_SLIDE_GC); if (fragmentation_only) slide = FALSE; heap_early_reset = ls_early_reset = 0; GC_PROFILE_START_SUMMARY; begin_marktime = cpu_time(); start_heap_size = hreg+1-(CPtr)glstack.low; /* make sure the top choice point heap pointer that might not point into heap, does */ if (hreg == cp_hreg(breg)) { *hreg = makeint(666) ; hreg++; } #ifdef SLG_GC /* same for the freeze heap pointer */ if (hfreg == hreg && hreg == cp_hreg(bfreg)) { *hreg = makeint(66600); hreg++; } #endif /* copy the aregs to the top of the heap - only if sliding */ /* just hope there is enough space */ /* this happens best before the stack_boundaries are computed */ if (slide) { if (delayreg != NULL) { arity++; reg[arity] = (Cell)delayreg; } for (ii = 1; ii <= arity; ii++) { // printf("reg[%d] to heap: %lx\n",ii,(size_t)reg[i]); *hreg = reg[ii]; hreg++; } arity += (int)rnum_in_trieinstr_unif_stk; for (i = 0; i < rnum_in_trieinstr_unif_stk; i++) { // printf("trieinstr_unif_stk[%d] to heap: %lx\n",i,(size_t)trieinstr_unif_stk[i]); *hreg = trieinstr_unif_stk[i]; hreg++; } // printf("extended heap: hreg=%p, arity=%d, rnum_in=%d\n",hreg,arity, rnum_in_trieinstr_unif_stk); #ifdef SLG_GC /* in SLGWAM, copy hfreg to the heap */ // printf("hfreg to heap is %p at %p, rnum_in_trieinstr_unif_stk=%d,arity=%d,delay=%p\n",hfreg,hreg,rnum_in_trieinstr_unif_stk,arity,delayreg); *(hreg++) = (Cell) hfreg; #endif } if (top_of_localstk < hreg) { fprintf(stderr,"stack clobbered: no space for gc_heap\n"); xsb_exit( "stack clobbered"); } gc_strings = ifStringGC; /* default */ gc_strings = should_gc_strings(); // collect strings for any reason? marked = mark_heap(CTXTc arity, &marked_dregs); end_marktime = cpu_time(); if (fragmentation_only) { /* fragmentation is expressed as ratio not-marked/total heap in use this is internal fragmentation only. we print marked and total, so that postprocessing can do what it wants with this info. */ xsb_dbgmsg((LOG_GC, "marked_used_missed(%d,%d,%d,%d).", marked,hreg+1-(CPtr)glstack.low, heap_early_reset,ls_early_reset)); free_marks: #ifdef PRE_IMAGE_TRAIL /* re-tag pre image cells in trail */ for (p = tr_bot; p <= tr_top ; p++ ) { if (tr_pre_marked(p-tr_bot)) { *p = *p | PRE_IMAGE_MARK; tr_clear_pre_mark(p-tr_bot); } } #endif /* get rid of the marking areas - if they exist */ if (heap_marks) { mem_dealloc((heap_marks-1),heap_marks_size,GC_SPACE); heap_marks = NULL; } if (tr_marks) { mem_dealloc(tr_marks,tr_top-tr_bot+1,GC_SPACE); tr_marks = NULL; } if (ls_marks) { mem_dealloc(ls_marks,ls_bot - ls_top + 1,GC_SPACE); ls_marks = NULL; } if (cp_marks) { mem_dealloc(cp_marks,cp_bot - cp_top + 1,GC_SPACE); cp_marks = NULL; } if (slide_buf) { mem_dealloc(slide_buf,(slide_buf_size+1)*sizeof(CPtr),GC_SPACE); slide_buf = NULL; } goto end; } GC_PROFILE_MARK_SUMMARY; /* An attempt to add some gc/expansion policy; ideally this should be user-controlled */ #if (! defined(GC_TEST)) if (marked > ((hreg+1-(CPtr)glstack.low)*mark_threshold)) { GC_PROFILE_QUIT_MSG; if (slide) hreg -= arity; total_time_gc += (double) (end_marktime-begin_marktime); goto free_marks; /* clean-up temp areas and get out of here... */ } #endif total_collected += (start_heap_size - marked); if (slide) { GC_PROFILE_SLIDE_START_TIME; hreg = slide_heap(CTXTc marked) ; #ifdef DEBUG_VERBOSE if (hreg != (heap_bot+marked)) xsb_dbgmsg((LOG_GC, "heap sliding gc - inconsistent hreg")); #endif #ifdef SLG_GC /* copy hfreg back from the heap */ hreg--; hfreg = (CPtr) *hreg; #endif /* copy the aregs from the top of the heap back */ hreg -= arity; hbreg = cp_hreg(breg); p = hreg; arity -= (int)rnum_in_trieinstr_unif_stk; for (ii = 1; ii <= arity; ii++) { reg[ii] = *p++; // printf("heap to reg[%d]: %lx\n",ii,(size_t)reg[i]); } if (delayreg != NULL) delayreg = (CPtr)reg[arity--]; for (i = 0; i < rnum_in_trieinstr_unif_stk; i++) { trieinstr_unif_stk[i] = *p++; // printf("heap to trieinstr_unif_stk[%d]: %lx\n",i,(size_t)trieinstr_unif_stk[i]); } end_slidetime = cpu_time(); total_time_gc += (double) (end_slidetime - begin_marktime); GC_PROFILE_SLIDE_FINAL_SUMMARY; } else { /* else we call the copying collector a la Cheney */ CPtr begin_new_heap, end_new_heap; GC_PROFILE_COPY_START_TIME; begin_new_heap = (CPtr)mem_alloc(marked*sizeof(Cell),GC_SPACE); if (begin_new_heap == NULL) xsb_exit( "copying garbage collection could not allocate new heap"); end_new_heap = begin_new_heap+marked; hreg = copy_heap(CTXTc marked,begin_new_heap,end_new_heap,arity); mem_dealloc(begin_new_heap,marked*sizeof(Cell),GC_SPACE); adapt_hfreg_from_choicepoints(CTXTc hreg); hbreg = cp_hreg(breg); #ifdef SLG_GC hfreg = hreg; #endif end_copy_time = cpu_time(); total_time_gc += (double) (end_copy_time - begin_marktime); GC_PROFILE_COPY_FINAL_SUMMARY; } if (print_on_gc) print_all_stacks(CTXTc arity); /* get rid of the marking areas - if they exist */ if (heap_marks) { check_zero(heap_marks,(heap_top - heap_bot),"heap") ; mem_dealloc((heap_marks-1),heap_marks_size,GC_SPACE) ; /* see its calloc */ heap_marks = NULL ; } if (tr_marks) { check_zero(tr_marks,(tr_top - tr_bot + 1),"tr") ; mem_dealloc(tr_marks,tr_top-tr_bot+1,GC_SPACE) ; tr_marks = NULL ; } if (ls_marks) { check_zero(ls_marks,(ls_bot - ls_top + 1),"ls") ; mem_dealloc(ls_marks,ls_bot - ls_top + 1,GC_SPACE) ; ls_marks = NULL ; } if (cp_marks) { check_zero(cp_marks,(cp_bot - cp_top + 1),"cp") ; mem_dealloc(cp_marks,cp_bot - cp_top + 1,GC_SPACE) ; cp_marks = NULL ; } if (slide_buf) { mem_dealloc(slide_buf,(slide_buf_size+1)*sizeof(CPtr),GC_SPACE); slide_buf = NULL; } #ifdef SAFE_GC p = hreg; while (p < heap_top) *p++ = 0; #endif } /* if (pflags[GARBAGE_COLLECT]) */ #else /* for no-GC, there is no gc, but stack expansion can be done */ #endif #ifdef GC end: /*************** GC STRING-TABLE (already marked from heap) *******************/ #ifndef NO_STRING_GC #ifdef MULTI_THREAD if (flags[NUM_THREADS] == 1) { #endif if (gc_strings && (flags[STRING_GARBAGE_COLLECT] == 1)) { num_sgc++; begin_stringtime = cpu_time(); mark_nonheap_strings(CTXT); free_unused_strings(); // printf("String GC reclaimed: %d bytes\n",beg_string_space_size - pspacesize[STRING_SPACE]); gc_strings = FALSE; end_stringtime = cpu_time(); total_time_gc += end_stringtime - begin_stringtime; } /* update these even if no GC, to avoid too many calls just to gc strings */ last_string_space_size = pspacesize[STRING_SPACE]; last_assert_space_size = pspacesize[ASSERT_SPACE]; force_string_gc = FALSE; #ifdef MULTI_THREAD } #endif #endif /* ndef NO_STRING_GC */ GC_PROFILE_POST_REPORT; garbage_collecting = 0; #endif /* GC */ // printf(" end gc(%ld), hf:%p,h:%p, space=%d\n",(long)(cpu_time()*1000),hfreg,hreg,(pb)top_of_localstk - (pb)top_of_heap); return(TRUE); } /* gc_heap */
int gc_heap(int arity) { #ifdef GC CPtr p; unsigned long begin_marktime, end_marktime, end_slidetime, end_copy_time; int marked = 0, marked_dregs = 0, i; int start_heap_size; DECL_GC_PROFILE; INIT_GC_PROFILE; if (flags[GARBAGE_COLLECT] != NO_GC) { num_gc++ ; GC_PROFILE_PRE_REPORT; slide = (flags[GARBAGE_COLLECT] == SLIDING_GC) | (flags[GARBAGE_COLLECT] == INDIRECTION_SLIDE_GC); if (fragmentation_only) slide = FALSE; heap_early_reset = ls_early_reset = 0; GC_PROFILE_START_SUMMARY; begin_marktime = cpu_time(); start_heap_size = hreg+1-(CPtr)glstack.low; /* make sure the top choice point heap pointer that might not point into heap, does */ if (hreg == cp_hreg(breg)) { *hreg = makeint(666) ; hreg++ ; } #ifdef SLG_GC /* same for the freeze heap pointer */ if (hfreg == hreg && hreg == cp_hreg(bfreg)) { *hreg = makeint(66600); hreg++; } #endif /* copy the aregs to the top of the heap - only if sliding */ /* just hope there is enough space */ /* this happens best before the stack_boundaries are computed */ if (slide) { if (delayreg != NULL) { arity++; reg[arity] = (Cell)delayreg; } for (i = 1; i <= arity; i++) { *hreg = reg[i]; hreg++; } } #ifdef SLG_GC /* in SLGWAM, copy hfreg to the heap */ if (slide) { *hreg = (unsigned long) hfreg; hreg++; } #endif marked = mark_heap(arity, &marked_dregs); end_marktime = cpu_time(); if (fragmentation_only) { /* fragmentation is expressed as ratio not-marked/total heap in use this is internal fragmentation only. we print marked and total, so that postprocessing can do what it wants with this info. */ xsb_dbgmsg((LOG_GC, "marked_used_missed(%d,%d,%d,%d).", marked,hreg+1-(CPtr)glstack.low, heap_early_reset,ls_early_reset)); free_marks: /* get rid of the marking areas - if they exist */ if (heap_marks) { free((heap_marks-1)); heap_marks = NULL; } if (tr_marks) { free(tr_marks); tr_marks = NULL; } if (ls_marks) { free(ls_marks); ls_marks = NULL; } if (cp_marks) { free(cp_marks); cp_marks = NULL; } goto end; } GC_PROFILE_MARK_SUMMARY; /* An attempt to add some gc/expansion policy; ideally this should be user-controlled */ #if (! defined(GC_TEST)) if (marked > ((hreg+1-(CPtr)glstack.low)*mark_threshold)) { GC_PROFILE_QUIT_MSG; if (slide) hreg -= arity; total_time_gc += (double) (end_marktime-begin_marktime)*1000/CLOCKS_PER_SEC; goto free_marks; /* clean-up temp areas and get out of here... */ } #endif total_collected += (start_heap_size - marked); if (slide) { GC_PROFILE_SLIDE_START_TIME; hreg = slide_heap(marked) ; if (hreg != (heap_bot+marked)) xsb_dbgmsg((LOG_GC, "heap sliding gc - inconsistent hreg")); #ifdef SLG_GC /* copy hfreg back from the heap */ hreg--; hfreg = (unsigned long*) *hreg; #endif /* copy the aregs from the top of the heap back */ hreg -= arity; hbreg = cp_hreg(breg); p = hreg; for (i = 1; i <= arity; i++) reg[i] = *p++ ; if (delayreg != NULL) delayreg = (CPtr)reg[arity--]; end_slidetime = cpu_time(); total_time_gc += (double) (end_slidetime - begin_marktime)*1000/CLOCKS_PER_SEC; GC_PROFILE_SLIDE_FINAL_SUMMARY; } else { /* else we call the copying collector a la Cheney */ CPtr begin_new_heap, end_new_heap; GC_PROFILE_COPY_START_TIME; begin_new_heap = (CPtr)malloc(marked*sizeof(Cell)); if (begin_new_heap == NULL) xsb_exit("copying garbage collection could not allocate new heap"); end_new_heap = begin_new_heap+marked; hreg = copy_heap(marked,begin_new_heap,end_new_heap,arity); free(begin_new_heap); adapt_hfreg_from_choicepoints(hreg); hbreg = cp_hreg(breg); #ifdef SLG_GC hfreg = hreg; #endif end_copy_time = cpu_time(); total_time_gc += (double) (end_copy_time - begin_marktime)*1000/CLOCKS_PER_SEC; GC_PROFILE_COPY_FINAL_SUMMARY; } if (print_on_gc) print_all_stacks(arity); /* get rid of the marking areas - if they exist */ if (heap_marks) { check_zero(heap_marks,(heap_top - heap_bot),"heap") ; free((heap_marks-1)) ; /* see its calloc */ heap_marks = NULL ; } if (tr_marks) { check_zero(tr_marks,(tr_top - tr_bot + 1),"tr") ; free(tr_marks) ; tr_marks = NULL ; } if (ls_marks) { check_zero(ls_marks,(ls_bot - ls_top + 1),"ls") ; free(ls_marks) ; ls_marks = NULL ; } if (cp_marks) { check_zero(cp_marks,(cp_bot - cp_top + 1),"cp") ; free(cp_marks) ; cp_marks = NULL ; } #ifdef SAFE_GC p = hreg; while (p < heap_top) *p++ = 0; #endif } /* if (flags[GARBAGE_COLLECT]) */ #else /* for no-GC, there is no gc, but stack expansion can be done */ #endif #ifdef GC end: GC_PROFILE_POST_REPORT; #endif /* GC */ return(TRUE); } /* gc_heap */