/** * copy_s_point * * Copies the fields of one spoint into the memory referenced by a second * specified S_POINT pointer. This function would go in the "S_POINT" class, * if it existed. * */ void copy_s_point ( S_POINT *s_point, ///< The S_POINT being copied S_POINT *sp_copy ///< The copy of the S_POINT - OUT ) { assert(s_point != NULL); assert(sp_copy != NULL); sp_copy->score = s_point->score; sp_copy->iseq = s_point->iseq; sp_copy->ioff = s_point->ioff; sp_copy->w0 = s_point->w0; sp_copy->nsites0 = s_point->nsites0; sp_copy->wgt_nsites = s_point->wgt_nsites; sp_copy->evaluate = s_point->evaluate; sp_copy->sig = s_point->sig; char *e_cons_copy = NULL; Resize(e_cons_copy, s_point->w0, char); int char_idx; for(char_idx = 0; char_idx < s_point->w0; char_idx++) { e_cons_copy[char_idx] = (s_point->e_cons0)[char_idx]; } sp_copy->e_cons0 = e_cons_copy; char *cons_copy = NULL; Resize(cons_copy, (s_point->w0 + 1), char); strcpy(cons_copy, s_point->cons0); sp_copy->cons0 = cons_copy; sp_copy->seed_heap = copy_heap(s_point->seed_heap); } // copy_s_point
HEAP *create_heap_from_sp_matrix ( SP_MATRIX *sp_mat // the matrix of s_points ) { int row_idx, col_idx, i; int num_seeds = 0; int num_rows = sp_get_num_rows(sp_mat); int num_cols = sp_get_num_cols(sp_mat); void *root, *temp; // iterate over the s_points in the sp_matrix to get the total number of seeds for (row_idx = 0; row_idx < num_rows; row_idx++) { for (col_idx = 0; col_idx < num_cols; col_idx++) { S_POINT *current_sp = get_spoint(sp_mat, row_idx, col_idx); HEAP *seed_heap = current_sp->seed_heap; num_seeds += get_num_nodes(seed_heap); } } // create the heap HEAP *mega_heap = create_heap( num_seeds, (int (*) (void *, void*))compare_seed, (void *)copy_seed, (void (*)(void*))free_seed, (char* (*)(void*))get_str_seed, (void (*)(FILE *, void*))print_seed ); // add the seeds to the heap for (row_idx = 0; row_idx < num_rows; row_idx++) { for (col_idx = 0; col_idx < num_cols; col_idx++) { S_POINT *current_sp = get_spoint(sp_mat, row_idx, col_idx); HEAP *current_heap = current_sp->seed_heap; HEAP *seed_heap = copy_heap(current_heap); // add copies of the seeds to the mega_heap int num_nodes = get_num_nodes(seed_heap); for (i=1; i<= num_nodes; i++){ root = pop_heap_root(seed_heap); temp = mega_heap->copy(root); temp = add_node_heap(mega_heap, temp); } } } // return the heap return mega_heap; } // create_heap_from_sp_matrix
int main(void){ OrdinaryHeap H[REPEAT][MAX_MERGE]; FILE *fp; int size, each_size, merges; int method; int x, i, merge_cnt, rpt; clock_t start, stop; double duration; /* initialize */ fp = fopen("settings.txt", "r"); fscanf(fp, "%d%d", &size, &merges); fclose(fp); each_size = size / merges; fp = fopen("test_cases/test_all.txt", "r"); for (merge_cnt = 0; merge_cnt < merges; merge_cnt++){ H[0][merge_cnt] = create_ordinary_heap(size); for (i = 0; i < each_size; i++){ fscanf(fp, "%d", &x); insert_ordinary_heap(x, H[0][merge_cnt]); } } fclose(fp); /* copy heap since a merge operation destroy the original heap */ for (rpt = 1; rpt < REPEAT; rpt++) for (merge_cnt = 0; merge_cnt < merges; merge_cnt++) H[rpt][merge_cnt] = copy_heap(H[0][merge_cnt], H[rpt][merge_cnt]); /* Start test */ start = clock(); for (rpt = 0; rpt < REPEAT; rpt++) for (merge_cnt = 0; merge_cnt < merges - 1; merge_cnt++) H[rpt][merge_cnt + 1] = merge_heap(H[rpt][merge_cnt + 1], H[rpt][merge_cnt]); stop = clock(); duration = ((double)(stop - start)) / CLOCKS_PER_SEC; printf("%lf\n", duration); return 0; }
obj_t *mem_alloc_obj(const mem_ops_t *ops, size_t size_bytes) { assert(heap_is_initialized); verify_heap(); remember_ops(ops); size_t alloc_size = aligned_size(size_bytes); if (next_alloc > alloc_end - alloc_size) { copy_heap(); assert(next_alloc <= tospace_end - alloc_size && "out of memory"); } const mem_ops_t **p; /* with lock */ { p = next_alloc; next_alloc += alloc_size; } *p = ops; return (obj_t *)p; }
int gc_heap(CTXTdeclc int arity, int ifStringGC) { #ifdef GC CPtr p; double begin_marktime, end_marktime, end_slidetime, end_copy_time, begin_stringtime, end_stringtime; size_t marked = 0, marked_dregs = 0, i; int ii; size_t start_heap_size; size_t rnum_in_trieinstr_unif_stk = (trieinstr_unif_stkptr-trieinstr_unif_stk)+1; DECL_GC_PROFILE; garbage_collecting = 1; // flag for profiling that we are gc-ing // printf("start gc(%ld): e:%p,h:%p,hf:%p\n",(long)(cpu_time()*1000),ereg,hreg,hfreg); INIT_GC_PROFILE; if (pflags[GARBAGE_COLLECT] != NO_GC) { num_gc++ ; GC_PROFILE_PRE_REPORT; slide = (pflags[GARBAGE_COLLECT] == SLIDING_GC) | (pflags[GARBAGE_COLLECT] == INDIRECTION_SLIDE_GC); if (fragmentation_only) slide = FALSE; heap_early_reset = ls_early_reset = 0; GC_PROFILE_START_SUMMARY; begin_marktime = cpu_time(); start_heap_size = hreg+1-(CPtr)glstack.low; /* make sure the top choice point heap pointer that might not point into heap, does */ if (hreg == cp_hreg(breg)) { *hreg = makeint(666) ; hreg++; } #ifdef SLG_GC /* same for the freeze heap pointer */ if (hfreg == hreg && hreg == cp_hreg(bfreg)) { *hreg = makeint(66600); hreg++; } #endif /* copy the aregs to the top of the heap - only if sliding */ /* just hope there is enough space */ /* this happens best before the stack_boundaries are computed */ if (slide) { if (delayreg != NULL) { arity++; reg[arity] = (Cell)delayreg; } for (ii = 1; ii <= arity; ii++) { // printf("reg[%d] to heap: %lx\n",ii,(size_t)reg[i]); *hreg = reg[ii]; hreg++; } arity += (int)rnum_in_trieinstr_unif_stk; for (i = 0; i < rnum_in_trieinstr_unif_stk; i++) { // printf("trieinstr_unif_stk[%d] to heap: %lx\n",i,(size_t)trieinstr_unif_stk[i]); *hreg = trieinstr_unif_stk[i]; hreg++; } // printf("extended heap: hreg=%p, arity=%d, rnum_in=%d\n",hreg,arity, rnum_in_trieinstr_unif_stk); #ifdef SLG_GC /* in SLGWAM, copy hfreg to the heap */ // printf("hfreg to heap is %p at %p, rnum_in_trieinstr_unif_stk=%d,arity=%d,delay=%p\n",hfreg,hreg,rnum_in_trieinstr_unif_stk,arity,delayreg); *(hreg++) = (Cell) hfreg; #endif } if (top_of_localstk < hreg) { fprintf(stderr,"stack clobbered: no space for gc_heap\n"); xsb_exit( "stack clobbered"); } gc_strings = ifStringGC; /* default */ gc_strings = should_gc_strings(); // collect strings for any reason? marked = mark_heap(CTXTc arity, &marked_dregs); end_marktime = cpu_time(); if (fragmentation_only) { /* fragmentation is expressed as ratio not-marked/total heap in use this is internal fragmentation only. we print marked and total, so that postprocessing can do what it wants with this info. */ xsb_dbgmsg((LOG_GC, "marked_used_missed(%d,%d,%d,%d).", marked,hreg+1-(CPtr)glstack.low, heap_early_reset,ls_early_reset)); free_marks: #ifdef PRE_IMAGE_TRAIL /* re-tag pre image cells in trail */ for (p = tr_bot; p <= tr_top ; p++ ) { if (tr_pre_marked(p-tr_bot)) { *p = *p | PRE_IMAGE_MARK; tr_clear_pre_mark(p-tr_bot); } } #endif /* get rid of the marking areas - if they exist */ if (heap_marks) { mem_dealloc((heap_marks-1),heap_marks_size,GC_SPACE); heap_marks = NULL; } if (tr_marks) { mem_dealloc(tr_marks,tr_top-tr_bot+1,GC_SPACE); tr_marks = NULL; } if (ls_marks) { mem_dealloc(ls_marks,ls_bot - ls_top + 1,GC_SPACE); ls_marks = NULL; } if (cp_marks) { mem_dealloc(cp_marks,cp_bot - cp_top + 1,GC_SPACE); cp_marks = NULL; } if (slide_buf) { mem_dealloc(slide_buf,(slide_buf_size+1)*sizeof(CPtr),GC_SPACE); slide_buf = NULL; } goto end; } GC_PROFILE_MARK_SUMMARY; /* An attempt to add some gc/expansion policy; ideally this should be user-controlled */ #if (! defined(GC_TEST)) if (marked > ((hreg+1-(CPtr)glstack.low)*mark_threshold)) { GC_PROFILE_QUIT_MSG; if (slide) hreg -= arity; total_time_gc += (double) (end_marktime-begin_marktime); goto free_marks; /* clean-up temp areas and get out of here... */ } #endif total_collected += (start_heap_size - marked); if (slide) { GC_PROFILE_SLIDE_START_TIME; hreg = slide_heap(CTXTc marked) ; #ifdef DEBUG_VERBOSE if (hreg != (heap_bot+marked)) xsb_dbgmsg((LOG_GC, "heap sliding gc - inconsistent hreg")); #endif #ifdef SLG_GC /* copy hfreg back from the heap */ hreg--; hfreg = (CPtr) *hreg; #endif /* copy the aregs from the top of the heap back */ hreg -= arity; hbreg = cp_hreg(breg); p = hreg; arity -= (int)rnum_in_trieinstr_unif_stk; for (ii = 1; ii <= arity; ii++) { reg[ii] = *p++; // printf("heap to reg[%d]: %lx\n",ii,(size_t)reg[i]); } if (delayreg != NULL) delayreg = (CPtr)reg[arity--]; for (i = 0; i < rnum_in_trieinstr_unif_stk; i++) { trieinstr_unif_stk[i] = *p++; // printf("heap to trieinstr_unif_stk[%d]: %lx\n",i,(size_t)trieinstr_unif_stk[i]); } end_slidetime = cpu_time(); total_time_gc += (double) (end_slidetime - begin_marktime); GC_PROFILE_SLIDE_FINAL_SUMMARY; } else { /* else we call the copying collector a la Cheney */ CPtr begin_new_heap, end_new_heap; GC_PROFILE_COPY_START_TIME; begin_new_heap = (CPtr)mem_alloc(marked*sizeof(Cell),GC_SPACE); if (begin_new_heap == NULL) xsb_exit( "copying garbage collection could not allocate new heap"); end_new_heap = begin_new_heap+marked; hreg = copy_heap(CTXTc marked,begin_new_heap,end_new_heap,arity); mem_dealloc(begin_new_heap,marked*sizeof(Cell),GC_SPACE); adapt_hfreg_from_choicepoints(CTXTc hreg); hbreg = cp_hreg(breg); #ifdef SLG_GC hfreg = hreg; #endif end_copy_time = cpu_time(); total_time_gc += (double) (end_copy_time - begin_marktime); GC_PROFILE_COPY_FINAL_SUMMARY; } if (print_on_gc) print_all_stacks(CTXTc arity); /* get rid of the marking areas - if they exist */ if (heap_marks) { check_zero(heap_marks,(heap_top - heap_bot),"heap") ; mem_dealloc((heap_marks-1),heap_marks_size,GC_SPACE) ; /* see its calloc */ heap_marks = NULL ; } if (tr_marks) { check_zero(tr_marks,(tr_top - tr_bot + 1),"tr") ; mem_dealloc(tr_marks,tr_top-tr_bot+1,GC_SPACE) ; tr_marks = NULL ; } if (ls_marks) { check_zero(ls_marks,(ls_bot - ls_top + 1),"ls") ; mem_dealloc(ls_marks,ls_bot - ls_top + 1,GC_SPACE) ; ls_marks = NULL ; } if (cp_marks) { check_zero(cp_marks,(cp_bot - cp_top + 1),"cp") ; mem_dealloc(cp_marks,cp_bot - cp_top + 1,GC_SPACE) ; cp_marks = NULL ; } if (slide_buf) { mem_dealloc(slide_buf,(slide_buf_size+1)*sizeof(CPtr),GC_SPACE); slide_buf = NULL; } #ifdef SAFE_GC p = hreg; while (p < heap_top) *p++ = 0; #endif } /* if (pflags[GARBAGE_COLLECT]) */ #else /* for no-GC, there is no gc, but stack expansion can be done */ #endif #ifdef GC end: /*************** GC STRING-TABLE (already marked from heap) *******************/ #ifndef NO_STRING_GC #ifdef MULTI_THREAD if (flags[NUM_THREADS] == 1) { #endif if (gc_strings && (flags[STRING_GARBAGE_COLLECT] == 1)) { num_sgc++; begin_stringtime = cpu_time(); mark_nonheap_strings(CTXT); free_unused_strings(); // printf("String GC reclaimed: %d bytes\n",beg_string_space_size - pspacesize[STRING_SPACE]); gc_strings = FALSE; end_stringtime = cpu_time(); total_time_gc += end_stringtime - begin_stringtime; } /* update these even if no GC, to avoid too many calls just to gc strings */ last_string_space_size = pspacesize[STRING_SPACE]; last_assert_space_size = pspacesize[ASSERT_SPACE]; force_string_gc = FALSE; #ifdef MULTI_THREAD } #endif #endif /* ndef NO_STRING_GC */ GC_PROFILE_POST_REPORT; garbage_collecting = 0; #endif /* GC */ // printf(" end gc(%ld), hf:%p,h:%p, space=%d\n",(long)(cpu_time()*1000),hfreg,hreg,(pb)top_of_localstk - (pb)top_of_heap); return(TRUE); } /* gc_heap */
/* * cluster * * Check that the relation is a relation in the appropriate user * ACL. I will use the same security that limits users on the * renamerel() function. * * Check that the index specified is appropriate for the task * ( ie it's an index over this relation ). This is trickier. * * Create a list of all the other indicies on this relation. Because * the cluster will wreck all the tids, I'll need to destroy bogus * indicies. The user will have to re-create them. Not nice, but * I'm not a nice guy. The alternative is to try some kind of post * destroy re-build. This may be possible. I'll check out what the * index create functiond want in the way of paramaters. On the other * hand, re-creating n indicies may blow out the space. * * Create new (temporary) relations for the base heap and the new * index. * * Exclusively lock the relations. * * Create new clustered index and base heap relation. * */ void cluster(char oldrelname[], char oldindexname[]) { Oid OIDOldHeap, OIDOldIndex, OIDNewHeap; Relation OldHeap, OldIndex; Relation NewHeap; char *NewIndexName; char *szNewHeapName; /* * * I'm going to force all checking back into the commands.c function. * * Get the list if indicies for this relation. If the index we want * is among them, do not add it to the 'kill' list, as it will be * handled by the 'clean up' code which commits this transaction. * * I'm not using the SysCache, because this will happen but * once, and the slow way is the sure way in this case. * */ /* * Like vacuum, cluster spans transactions, so I'm going to handle it in * the same way. */ /* matches the StartTransaction in PostgresMain() */ OldHeap = heap_openr(oldrelname); if (!RelationIsValid(OldHeap)) { elog(WARN, "cluster: unknown relation: \"%-.*s\"", NAMEDATALEN, oldrelname); } OIDOldHeap = OldHeap->rd_id; /* Get OID for the index scan */ OldIndex=index_openr(oldindexname);/* Open old index relation */ if (!RelationIsValid(OldIndex)) { elog(WARN, "cluster: unknown index: \"%-.*s\"", NAMEDATALEN, oldindexname); } OIDOldIndex = OldIndex->rd_id; /* OID for the index scan */ heap_close(OldHeap); index_close(OldIndex); /* * I need to build the copies of the heap and the index. The Commit() * between here is *very* bogus. If someone is appending stuff, they will * get the lock after being blocked and add rows which won't be present in * the new table. Bleagh! I'd be best to try and ensure that no-one's * in the tables for the entire duration of this process with a pg_vlock. */ NewHeap = copy_heap(OIDOldHeap); OIDNewHeap = NewHeap->rd_id; szNewHeapName = pstrdup(NewHeap->rd_rel->relname.data); /* Need to do this to make the new heap visible. */ CommandCounterIncrement(); rebuildheap(OIDNewHeap, OIDOldHeap, OIDOldIndex); /* Need to do this to make the new heap visible. */ CommandCounterIncrement(); /* can't be found in the SysCache. */ copy_index(OIDOldIndex, OIDNewHeap); /* No contention with the old */ /* * make this really happen. Flush all the buffers. */ CommitTransactionCommand(); StartTransactionCommand(); /* * Questionable bit here. Because the renamerel destroys all trace of the * pre-existing relation, I'm going to Destroy old, and then rename new * to old. If this fails, it fails, and you lose your old. Tough - say * I. Have good backups! */ /* Here lies the bogosity. The RelationNameGetRelation returns a bad list of TupleDescriptors. Damn. Can't work out why this is. */ heap_destroy(oldrelname); /* AAAAAAAAGH!! */ CommandCounterIncrement(); /* * The Commit flushes all palloced memory, so I have to grab the * New stuff again. This is annoying, but oh heck! */ /* renamerel(szNewHeapName.data, oldrelname); TypeRename(&szNewHeapName, &szOldRelName); sprintf(NewIndexName.data, "temp_%x", OIDOldIndex); renamerel(NewIndexName.data, szOldIndexName.data); */ NewIndexName = palloc(NAMEDATALEN+1); /* XXX */ sprintf(NewIndexName, "temp_%x", OIDOldIndex); renamerel(NewIndexName, oldindexname); }
int gc_heap(int arity) { #ifdef GC CPtr p; unsigned long begin_marktime, end_marktime, end_slidetime, end_copy_time; int marked = 0, marked_dregs = 0, i; int start_heap_size; DECL_GC_PROFILE; INIT_GC_PROFILE; if (flags[GARBAGE_COLLECT] != NO_GC) { num_gc++ ; GC_PROFILE_PRE_REPORT; slide = (flags[GARBAGE_COLLECT] == SLIDING_GC) | (flags[GARBAGE_COLLECT] == INDIRECTION_SLIDE_GC); if (fragmentation_only) slide = FALSE; heap_early_reset = ls_early_reset = 0; GC_PROFILE_START_SUMMARY; begin_marktime = cpu_time(); start_heap_size = hreg+1-(CPtr)glstack.low; /* make sure the top choice point heap pointer that might not point into heap, does */ if (hreg == cp_hreg(breg)) { *hreg = makeint(666) ; hreg++ ; } #ifdef SLG_GC /* same for the freeze heap pointer */ if (hfreg == hreg && hreg == cp_hreg(bfreg)) { *hreg = makeint(66600); hreg++; } #endif /* copy the aregs to the top of the heap - only if sliding */ /* just hope there is enough space */ /* this happens best before the stack_boundaries are computed */ if (slide) { if (delayreg != NULL) { arity++; reg[arity] = (Cell)delayreg; } for (i = 1; i <= arity; i++) { *hreg = reg[i]; hreg++; } } #ifdef SLG_GC /* in SLGWAM, copy hfreg to the heap */ if (slide) { *hreg = (unsigned long) hfreg; hreg++; } #endif marked = mark_heap(arity, &marked_dregs); end_marktime = cpu_time(); if (fragmentation_only) { /* fragmentation is expressed as ratio not-marked/total heap in use this is internal fragmentation only. we print marked and total, so that postprocessing can do what it wants with this info. */ xsb_dbgmsg((LOG_GC, "marked_used_missed(%d,%d,%d,%d).", marked,hreg+1-(CPtr)glstack.low, heap_early_reset,ls_early_reset)); free_marks: /* get rid of the marking areas - if they exist */ if (heap_marks) { free((heap_marks-1)); heap_marks = NULL; } if (tr_marks) { free(tr_marks); tr_marks = NULL; } if (ls_marks) { free(ls_marks); ls_marks = NULL; } if (cp_marks) { free(cp_marks); cp_marks = NULL; } goto end; } GC_PROFILE_MARK_SUMMARY; /* An attempt to add some gc/expansion policy; ideally this should be user-controlled */ #if (! defined(GC_TEST)) if (marked > ((hreg+1-(CPtr)glstack.low)*mark_threshold)) { GC_PROFILE_QUIT_MSG; if (slide) hreg -= arity; total_time_gc += (double) (end_marktime-begin_marktime)*1000/CLOCKS_PER_SEC; goto free_marks; /* clean-up temp areas and get out of here... */ } #endif total_collected += (start_heap_size - marked); if (slide) { GC_PROFILE_SLIDE_START_TIME; hreg = slide_heap(marked) ; if (hreg != (heap_bot+marked)) xsb_dbgmsg((LOG_GC, "heap sliding gc - inconsistent hreg")); #ifdef SLG_GC /* copy hfreg back from the heap */ hreg--; hfreg = (unsigned long*) *hreg; #endif /* copy the aregs from the top of the heap back */ hreg -= arity; hbreg = cp_hreg(breg); p = hreg; for (i = 1; i <= arity; i++) reg[i] = *p++ ; if (delayreg != NULL) delayreg = (CPtr)reg[arity--]; end_slidetime = cpu_time(); total_time_gc += (double) (end_slidetime - begin_marktime)*1000/CLOCKS_PER_SEC; GC_PROFILE_SLIDE_FINAL_SUMMARY; } else { /* else we call the copying collector a la Cheney */ CPtr begin_new_heap, end_new_heap; GC_PROFILE_COPY_START_TIME; begin_new_heap = (CPtr)malloc(marked*sizeof(Cell)); if (begin_new_heap == NULL) xsb_exit("copying garbage collection could not allocate new heap"); end_new_heap = begin_new_heap+marked; hreg = copy_heap(marked,begin_new_heap,end_new_heap,arity); free(begin_new_heap); adapt_hfreg_from_choicepoints(hreg); hbreg = cp_hreg(breg); #ifdef SLG_GC hfreg = hreg; #endif end_copy_time = cpu_time(); total_time_gc += (double) (end_copy_time - begin_marktime)*1000/CLOCKS_PER_SEC; GC_PROFILE_COPY_FINAL_SUMMARY; } if (print_on_gc) print_all_stacks(arity); /* get rid of the marking areas - if they exist */ if (heap_marks) { check_zero(heap_marks,(heap_top - heap_bot),"heap") ; free((heap_marks-1)) ; /* see its calloc */ heap_marks = NULL ; } if (tr_marks) { check_zero(tr_marks,(tr_top - tr_bot + 1),"tr") ; free(tr_marks) ; tr_marks = NULL ; } if (ls_marks) { check_zero(ls_marks,(ls_bot - ls_top + 1),"ls") ; free(ls_marks) ; ls_marks = NULL ; } if (cp_marks) { check_zero(cp_marks,(cp_bot - cp_top + 1),"cp") ; free(cp_marks) ; cp_marks = NULL ; } #ifdef SAFE_GC p = hreg; while (p < heap_top) *p++ = 0; #endif } /* if (flags[GARBAGE_COLLECT]) */ #else /* for no-GC, there is no gc, but stack expansion can be done */ #endif #ifdef GC end: GC_PROFILE_POST_REPORT; #endif /* GC */ return(TRUE); } /* gc_heap */