static int mark_for_duplicate(Word p, int flags ARG_LD) { term_agenda agenda; initTermAgenda(&agenda, 1, p); while((p=nextTermAgenda(&agenda))) { again: switch(tag(*p)) { case TAG_ATTVAR: { if ( flags & COPY_ATTRS ) { p = valPAttVar(*p); goto again; } /*FALLTHROUGH*/ } case TAG_VAR: { if ( virgin(*p) ) set_visited(*p); else if ( visited_once(*p) ) set_shared(*p); break; } case TAG_COMPOUND: { Functor t = valueTerm(*p); int arity = arityFunctor(t->definition); if ( virgin(t->definition) ) { set_visited(t->definition); } else { if ( visited_once(t->definition) ) set_shared(t->definition); break; } if ( !pushWorkAgenda(&agenda, arity, t->arguments) ) return MEMORY_OVERFLOW; continue; } } } clearTermAgenda(&agenda); return TRUE; }
void grid_segment::grow(std::function<grid_segment::ptr(grid_cell_base::label)> segments_accessor) { vector<grid_cell::ptr> tovisit; tovisit.push_back(*cells_.begin()); set_visited(tovisit.front()); grid_cell_base::label label = tovisit.front()->get_label(); while(!tovisit.empty()) { grid_cell::ptr c = tovisit.back(); tovisit.pop_back(); c->set_label(label); cells_.insert(c); for(size_t i=0; i<4; i++) { auto nc = grid_.get_neighbour_cell_4(c,i); if(nc) { if(nc->has_label()) { if(nc->get_label() != label && !nc->is_ignored()) { add_edge(std::static_pointer_cast<graph_node>(segments_accessor(label)), std::static_pointer_cast<graph_node>(segments_accessor(nc->get_label()))); } continue; } if(nc->is_target() && !nc->is_ignored() && nc->is_covered() && !is_visited(nc)) { set_visited(nc); tovisit.push_back(nc); } } } } }
/* * Given a reference, a variable or a pointer to a heap block, with known size, * Return its aggregated reachable in-use blocks */ CA_BOOL calc_aggregate_size(const struct object_reference *ref, size_t var_len, CA_BOOL all_reachable_blocks, struct inuse_block *inuse_blocks, unsigned long num_inuse_blocks, size_t *total_size, unsigned long *total_count) { address_t addr, cursor, end; size_t ptr_sz = g_ptr_bit >> 3; size_t aggr_size = 0; unsigned long aggr_count = 0; struct inuse_block *blk; size_t bitmap_sz = ((num_inuse_blocks+15)*2/32) * sizeof(unsigned int); static unsigned int* qv_bitmap = NULL; // Bit flags of whether a block is queued/visited static unsigned long bitmap_capacity = 0; // in terms of number of blocks handled // ground return values *total_size = 0; *total_count = 0; // Prepare bitmap with the clean state if (bitmap_capacity < num_inuse_blocks) { if (qv_bitmap) free (qv_bitmap); // Each block uses two bits(queued/visited) qv_bitmap = (unsigned int*) malloc(bitmap_sz); if (!qv_bitmap) { bitmap_capacity = 0; CA_PRINT("Out of Memory\n"); return CA_FALSE; } bitmap_capacity = num_inuse_blocks; } memset(qv_bitmap, 0, bitmap_sz); // Input is a pointer to an in-use memory block if (ref->storage_type == ENUM_REGISTER || ref->storage_type == ENUM_HEAP) { if (var_len != ptr_sz) return CA_FALSE; blk = find_inuse_block(ref->vaddr, inuse_blocks, num_inuse_blocks); if (blk) { // cached result is available, return now if (all_reachable_blocks && blk->reachable.aggr_size) { *total_size = blk->reachable.aggr_size; *total_count = blk->reachable.aggr_count; return CA_TRUE; } else { // search starts with the memory block cursor = blk->addr; end = cursor + blk->size; aggr_size = blk->size; aggr_count = 1; set_visited(qv_bitmap, blk - inuse_blocks); } } else return CA_FALSE; } // input reference is an object with given size, e.g. a local/global variable else { if (all_reachable_blocks && var_len == ptr_sz) { // input is of pointer size, which is candidate for cache value if(read_memory_wrapper(NULL, ref->vaddr, (void*)&addr, ptr_sz)) { blk = find_inuse_block(addr, inuse_blocks, num_inuse_blocks); if (blk) { if (blk->reachable.aggr_size) { *total_size = blk->reachable.aggr_size; *total_count = blk->reachable.aggr_count; return CA_TRUE; } } else return CA_FALSE; } else return CA_FALSE; } cursor = ref->vaddr; end = cursor + var_len; } // We now have a range of memory to search cursor = ALIGN(cursor, ptr_sz); while (cursor < end) { if(!read_memory_wrapper(NULL, cursor, (void*)&addr, ptr_sz)) break; blk = find_inuse_block(addr, inuse_blocks, num_inuse_blocks); if (blk && !is_queued_or_visited(qv_bitmap, blk - inuse_blocks)) { if (all_reachable_blocks) { unsigned long sub_count = 0; aggr_size += heap_aggregate_size(blk, inuse_blocks, num_inuse_blocks, qv_bitmap, &sub_count); aggr_count += sub_count; } else { aggr_size += blk->size; aggr_count++; set_visited(qv_bitmap, blk - inuse_blocks); } } cursor += ptr_sz; } // can we cache the result? if (all_reachable_blocks && aggr_size) { if (ref->storage_type == ENUM_REGISTER || ref->storage_type == ENUM_HEAP) { blk = find_inuse_block(ref->vaddr, inuse_blocks, num_inuse_blocks); blk->reachable.aggr_size = aggr_size; blk->reachable.aggr_count = aggr_count; } else if (var_len == ptr_sz) { if (read_memory_wrapper(NULL, ref->vaddr, (void*)&addr, ptr_sz)) { blk = find_inuse_block(addr, inuse_blocks, num_inuse_blocks); if (blk) { blk->reachable.aggr_size = aggr_size; blk->reachable.aggr_count = aggr_count; } } } } // return happily *total_size = aggr_size; *total_count = aggr_count; return CA_TRUE; }
// ta procedura odpowiada za kolejne przejscia rekurencyjne, celem // unifikacji kolorystyki obrazu static int process_img(struct raster *r_out, struct raster *r, char visited[], double d_max, int x, int y, int x_prev, int y_prev, struct rgb24 base) { int off_x, off_y; // przesuniecia x,y int cx, cy; // CurrentX, CurrentY double d_cur; // bierzacy gradient! double d[2]; // gradienty miedzy elementami double tmp; // pomocniczy struct rgb24 pix_out; // pixel do wstawienia struct rgb24 pix[3]; // pixele: // pix[2] - bierzacy // pix[1] - poprzedni pix[2] w linii // pix[0] - poprzedni pix[1] w linii set_visited(r, visited, x, y); // zaznaczamy, ze tu juz bylismy! // // etap 1: przetwazanie lokalne // pix[2]=raster_pix_get(r,x, y );// pobieramy bierzacy pixel pix[1]=raster_pix_get(r,x_prev,y_prev);// oraz poprzedni cx=x_prev-(x-x_prev); // wyliczamy jeszcze wczesniejsza poz. cy=y_prev-(y-y_prev); if( raster_xy_rng(r, cx, cy) ) // nalezy ona jeszcze do obrazka? pix[0]=raster_pix_get(r,cx,cy); // pobieramy wiec wartosc! else pix[0]=pix[1]; // domyslnie wstawiamy poprzednia // analiza i ustalanie gradientu: d[0] =grad_v(pix[0], pix[1]); d[1] =grad_v(pix[1], pix[2]); d_cur=fabs( (1*d[0]-2*d[1])/3 ); if(d_cur>d_max) // koniec zejsc?? return 0; // ustawiamy pixel na zadany! //pix_out=base; // jakis ustalony kolorek... //pix_out=pix[1]; // poprzedni /* tmp =d_max-d_cur; // wspl proporc pix_out.r=(int)( ((1-tmp)*base.r + tmp*pix[2].r) ); pix_out.g=(int)( ((1-tmp)*base.g + tmp*pix[2].g) ); pix_out.b=(int)( ((1-tmp)*base.b + tmp*pix[2].b) ); */ /* pix_out.r=(1*base.r + 3*pix[2].r)/4; pix_out.g=(1*base.g + 3*pix[2].g)/4; pix_out.b=(1*base.b + 3*pix[2].b)/4; */ double a,b,c,e, sum; c =1; // base e =0; // pix[0] a =1; // pix[1] b =1; // pix[2] sum=a+b+c+e; pix_out.r=(e*pix[0].r + a*pix[1].r + b*pix[2].r + c*base.r)/sum; pix_out.g=(e*pix[0].g + a*pix[1].g + b*pix[2].g + c*base.g)/sum; pix_out.b=(e*pix[0].b + a*pix[1].b + b*pix[2].b + c*base.b)/sum; //tmp =fabs(c*base.r-b*pix[2].r)/sum; tmp =fabs(base.r-pix[2].r); tmp =1 + tmp/255.0; pix_out.r=MIN(255, (int)(tmp*pix_out.r) ); pix_out.g=MIN(255, (int)(tmp*pix_out.g) ); pix_out.b=MIN(255, (int)(tmp*pix_out.b) ); //pix_out.r=MIN(255, pix_out.r+MAX(base.r, pix[0].r)-(base.r+pix[0].r)/2 ); //pix_out.g=MIN(255, pix_out.g+MAX(base.g, pix[0].g)-(base.g+pix[0].g)/2 ); //pix_out.b=MIN(255, pix_out.b+MAX(base.b, pix[0].b)-(base.b+pix[0].b)/2 ); raster_pix_set(r_out, x, y, &pix_out); // // etap 2: przetwazanie reqrencyjne, po najblizszych sasiadach // for(off_y=-1; off_y<=1; off_y++) for(off_x=-1; off_x<=1; off_x++) { cx=x+off_x; cy=y+off_y; if(off_x==0 && off_y==0) // dla siebie samego NIE wywolujemy continue; if( !raster_xy_rng(r, cx, cy) ) // indexy poza zasiegiem? continue; if( is_visited(r, visited, cx, cy) ) // pomijamy juz odwiedzione continue; process_img(r_out, r, visited, d_max, cx, cy, x, y, base); // idziemy glebiej! }; // for(x+off,y+off) return 0; };
static int mark_for_copy(Word p, int flags ARG_LD) { Word start = p; int walk_ref = FALSE; Word buf[1024]; segstack stack; initSegStack(&stack, sizeof(Word), sizeof(buf), buf); for(;;) { switch(tag(*p)) { case TAG_ATTVAR: { if ( flags & COPY_ATTRS ) { if ( !pushForMark(&stack, p, walk_ref) ) { clearSegStack(&stack); return MEMORY_OVERFLOW; } walk_ref = TRUE; p = valPAttVar(*p); continue; } /*FALLTHROUGH*/ } case TAG_VAR: { if ( virgin(*p) ) set_visited(*p); else if ( visited_once(*p) ) set_shared(*p); break; } case TAG_REFERENCE: { if ( !pushForMark(&stack, p, walk_ref) ) { clearSegStack(&stack); return MEMORY_OVERFLOW; } walk_ref = TRUE; deRef(p); continue; } case TAG_COMPOUND: { Functor t = valueTerm(*p); int arity = arityFunctor(t->definition); if ( virgin(t->definition) ) { set_visited(t->definition); } else { if ( visited_once(t->definition) ) set_shared(t->definition); break; } if ( arity >= 1 ) { if ( !pushForMark(&stack, p, walk_ref) ) { clearSegStack(&stack); return MEMORY_OVERFLOW; } walk_ref = FALSE; p = &t->arguments[arity-1]; /* last argument */ continue; } } } if ( p == start ) { clearSegStack(&stack); return TRUE; } while ( walk_ref ) { popForMark(&stack, &p, &walk_ref); if ( isAttVar(*p) ) { Word ap = valPAttVar(*p); unshare_attvar(ap PASS_LD); } if ( p == start ) { clearSegStack(&stack); return TRUE; } } p--; if ( tagex(*p) == (TAG_ATOM|STG_GLOBAL) ) { popForMark(&stack, &p, &walk_ref); update_ground(p PASS_LD); } } }