Ejemplo n.º 1
0
static void*
cas_thread(void* arg) {
	unsigned int loop = 0;
	cas_value_t val = *(cas_value_t*)arg;

	thread_sleep(10);

	while (!thread_try_wait(0) && (loop < 65535)) {
		while (!atomic_cas32(&val_32, val.val_32, 0))
			thread_yield();
		while (!atomic_cas32(&val_32, 0, val.val_32))
			thread_yield();
		while (!atomic_cas64(&val_64, val.val_64, 0))
			thread_yield();
		while (!atomic_cas64(&val_64, 0, val.val_64))
			thread_yield();
		while (!atomic_cas_ptr(&val_ptr, val.val_ptr, 0))
			thread_yield();
		while (!atomic_cas_ptr(&val_ptr, 0, val.val_ptr))
			thread_yield();

		++loop;
		thread_yield();
	}
	return 0;
}
Ejemplo n.º 2
0
static void _profile_put_root_block( uint32_t block )
{
	uint32_t sibling;
	profile_block_t* self = GET_BLOCK( block );

#if PROFILE_ENABLE_SANITY_CHECKS
	FOUNDATION_ASSERT( self->sibling == 0 );
#endif
	while( !atomic_cas32( &_profile_root, block, 0 ) )
	{
		do
		{
			sibling = atomic_load32( &_profile_root );
		} while( sibling && !atomic_cas32( &_profile_root, 0, sibling ) );

		if( sibling )
		{
			if( self->sibling )
			{
				uint32_t leaf = self->sibling;
				while( GET_BLOCK( leaf )->sibling )
					leaf = GET_BLOCK( leaf )->sibling;
				GET_BLOCK( sibling )->previous = leaf;
				GET_BLOCK( leaf )->sibling = sibling;
			}
			else
			{
				self->sibling = sibling;
			}
		}
	}
}
Ejemplo n.º 3
0
static profile_block_t* _profile_allocate_block( void )
{
	//Grab block from free list, avoiding ABA issues by
	//using high 16 bit as a loop counter
	profile_block_t* block;
	uint32_t free_block_tag, free_block, next_block_tag;
	do
	{
		free_block_tag = atomic_load32( &_profile_free );
		free_block = free_block_tag & 0xffff;

		next_block_tag = GET_BLOCK( free_block )->child;
		next_block_tag |= ( atomic_incr32( &_profile_loopid ) & 0xffff ) << 16;
	} while( free_block && !atomic_cas32( &_profile_free, next_block_tag, free_block_tag ) );

	if( !free_block )
	{
		static atomic32_t has_warned = {0};
		if( atomic_cas32( &has_warned, 1, 0 ) )
			log_error( 0, ERROR_OUT_OF_MEMORY, ( _profile_num_blocks < 65535 ) ? "Profile blocks exhausted, increase profile memory block size" : "Profile blocks exhausted, decrease profile output wait time" );
		return 0;
	}

	block = GET_BLOCK( free_block );
	memset( block, 0, sizeof( profile_block_t ) );
	return block;
}
Ejemplo n.º 4
0
void* cas_thread( object_t thread, void* arg )
{
    int loop = 0;
    cas_value_t val = *(cas_value_t*)arg;

    thread_sleep( 10 );

    while( !thread_should_terminate( thread ) && ( loop < 65535 ) )
    {
        while( !atomic_cas32( &val_32, val.val_32, 0 ) )
            thread_yield();
        while( !atomic_cas32( &val_32, 0, val.val_32 ) )
            thread_yield();
        while( !atomic_cas64( &val_64, val.val_64, 0 ) )
            thread_yield();
        while( !atomic_cas64( &val_64, 0, val.val_64 ) )
            thread_yield();
        while( !atomic_cas_ptr( &val_ptr, val.val_ptr, 0 ) )
            thread_yield();
        while( !atomic_cas_ptr( &val_ptr, 0, val.val_ptr ) )
            thread_yield();

        ++loop;
        thread_yield();
    }
    return 0;
}
Ejemplo n.º 5
0
int lfmpscq_enqueue(lfmpscq_d_t *qd, void *data)
{
	int node_idx, t, t_next;
	lfmpscq_slot_head_t  *node_ptr, *t_ptr;
	int *shared_idx;

	/* try to get a free slot for the new node */
	if ((node_idx = getFreeSlot(qd)) == -1)
		return -1;

	/* set up new node */
	node_ptr = (void *)((unsigned long)(qd->slots) + node_idx * (sizeof(lfmpscq_slot_head_t) + qd->nodeDataSize));
	hwfunctions_memcpy((void *)((unsigned long)node_ptr + sizeof(lfmpscq_slot_head_t)), data, qd->nodeDataSize);
	node_ptr->next = UNDEFINED_NODE;
	
	/* set pointer to shared node index */
	shared_idx = (int *)(qd->sharedIdxs + qd->userID);

	/* now enqueue */
	while(1) {
		/* read tail */
		t = qd->q->tail;

		/* set shared index to protect the node from reuse until this was allowed explicitely */
		*(shared_idx) = t;
		
		/* make sure the index saved in shared_idx was a valid index of tail at some time beween the previous line and now */
		if(qd->q->tail != t)
			continue;

		/* read the next-index of the node we assume as tail */
		t_ptr = (lfmpscq_slot_head_t *)((unsigned long)(qd->slots) + t * (sizeof(lfmpscq_slot_head_t) + qd->nodeDataSize));
		t_next = t_ptr->next;
		
		/* if tail (or what we assume to be tail) is not pointing to last node in list, 
		 * try to update tail and start over 
		 */
		if(t_next != UNDEFINED_NODE) {
			atomic_cas32(&(qd->q->tail), t, t_next);
			continue;
		}

		/* try to append the new node */
		if(atomic_cas32(&(t_ptr->next), UNDEFINED_NODE, node_idx))
			break;
	}

	/* try to update tail; if not successfull, the next enqueue will do this for us */
	atomic_cas32(&(qd->q->tail), t, node_idx);

	return 0;
}
Ejemplo n.º 6
0
int barrier_get(barrier_d_t *bd, int objID, int numUser)
{
	void *ptr;

	bd->objID = objID;
	bd->numUser = numUser;

	/* create shared memory segment for barrier */
	if (shmman_get_shmseg(bd->objID, sizeof(barrier_data_t), &ptr) == -1) {
		perror("barrier: shmman_get_shmseg() failed\n");
		return -1;
	}

	/* set pointer to shared memory */
        bd->shm = (barrier_data_t *)ptr;

        /* get lock */
        while (!atomic_cas32(&(bd->shm->lock), 0, 1))
                hwfunctions_nop();

        /* initialize if we are the first user of flag */
        if (bd->shm->init == 0) {
                bd->shm->cnt = 0;
                bd->shm->fallen = 0;

                bd->shm->init = 1;
        }

        /* release lock */
        bd->shm->lock = 0;
	
	return 0;
}
Ejemplo n.º 7
0
static Boolean mspace_alloc_block(Mspace* mspace, Allocator* allocator)
{
  alloc_context_reset(allocator);

  /* now try to get a new block */
  unsigned int old_free_idx = mspace->free_block_idx;
  unsigned int new_free_idx = old_free_idx+1;
  while( old_free_idx <= mspace->ceiling_block_idx ){   
    unsigned int allocated_idx = atomic_cas32(&mspace->free_block_idx, new_free_idx, old_free_idx);
    if(allocated_idx != old_free_idx){
      old_free_idx = mspace->free_block_idx;
      new_free_idx = old_free_idx+1;
      continue;
    }
    /* ok, got one */
    Block_Header* alloc_block = (Block_Header*)&(mspace->blocks[allocated_idx - mspace->first_block_idx]);

    allocator_init_free_block(allocator, alloc_block);

    return TRUE;
  }

  /* Mspace is out. If it's caused by mutator, a collection should be triggered. 
     If it's caused by collector, a fallback should be triggered. */
  return FALSE;
  
}
Ejemplo n.º 8
0
static void _profile_free_block( uint32_t block, uint32_t leaf )
{
	uint32_t last_tag, block_tag;
	do
	{
		block_tag = block | ( ( atomic_incr32( &_profile_loopid ) & 0xffff ) << 16 );
		last_tag = atomic_load32( &_profile_free );
		GET_BLOCK( leaf )->child = last_tag & 0xffff;
	} while( !atomic_cas32( &_profile_free, block_tag, last_tag ) );
}
Ejemplo n.º 9
0
object_t _object_unref( object_base_t* obj )
{
	int32_t ref;
	if( obj ) do
	{
		ref = atomic_load32( &obj->ref );
		if( ( ref > 0 ) && atomic_cas32( &obj->ref, ref - 1, ref ) )
			return ( ref == 1 ) ? 0 : obj->id;
	} while( ref > 0 );
	return 0;
}
Ejemplo n.º 10
0
object_t
render_buffer_ref(object_t id) {
	int32_t ref;
	render_buffer_t* buffer = objectmap_lookup(_render_map_buffer, id);
	if (buffer)
		do {
			ref = atomic_load32(&buffer->ref);
			if ((ref > 0) && atomic_cas32(&buffer->ref, ref + 1, ref))
				return id;
		}
		while (ref > 0);
	return 0;
}
Ejemplo n.º 11
0
void
lua_push_op(lua_t* env, lua_op_t* op) {
	unsigned int ofs, old;
	do {
		old = atomic_load32(&env->queue_tail);
		ofs = old + 1;
		if (ofs >= BUILD_LUA_CALL_QUEUE_SIZE)
			ofs = 0;
	}
	while (!atomic_cas32(&env->queue_tail, ofs, old));

	//Got slot, copy except command
	memcpy(&env->queue[ofs].data, &op->data, sizeof(op->data) + sizeof(lua_arg_t));
	//Now set command, completing insert
	env->queue[ofs].cmd = op->cmd;
}
Ejemplo n.º 12
0
void* objectmap_lookup_ref( const objectmap_t* map, object_t id )
{
	void* object;
	do
	{
		object = map->map[ id & map->mask_index ];
		if( object && !( (uintptr_t)object & 1 ) &&
		   ( ( *( (uint64_t*)object + 1 ) & map->mask_id ) == ( id & map->mask_id ) ) ) //ID in object is offset by 8 bytes
		{
			object_base_t* base_obj = object;
			int32_t ref = atomic_load32( &base_obj->ref );
			if( ref && atomic_cas32( &base_obj->ref, ref + 1, ref ) )
				return object;
		}
	} while( object );
	return 0;
}
Ejemplo n.º 13
0
int getFreeSlot(lfmpscq_d_t *qd)
{
	int cnt;
	void *s;
	static int i = 0;

	cnt = 0;
	while(cnt < qd->numSlots) {
		s = (void *)((unsigned long)(qd->slots) + i * (sizeof(lfmpscq_slot_head_t) + qd->nodeDataSize));
		if (atomic_cas32(&(((lfmpscq_slot_head_t *)s)->used), 0, 1))
			return i;			
		i = (i + 1) % qd->numSlots;
		cnt++;
	}
	
	return -1;
}
Ejemplo n.º 14
0
inline bool robust_spin_mutex<Mutex>::check_if_owner_dead_and_take_ownership_atomically()
{
   boost::uint32_t cur_owner = get_current_process_id();
   boost::uint32_t old_owner = atomic_read32(&this->owner), old_owner2;
   //The cas loop guarantees that only one thread from this or another process
   //will succeed taking ownership
   do{
      //Check if owner is dead
      if(!this->is_owner_dead(old_owner)){
         return false;
      }
      //If it's dead, try to mark this process as the owner in the owner field
      old_owner2 = old_owner;
      old_owner = atomic_cas32(&this->owner, cur_owner, old_owner);
   }while(old_owner2 != old_owner);
   //If success, we fix mutex internals to assure our ownership
   mutex_traits_t::take_ownership(mtx);
   return true;
}
Ejemplo n.º 15
0
void
render_buffer_destroy(object_t id) {
	int32_t ref;
	render_buffer_t* buffer = GET_BUFFER(id);
	if (buffer) {
		do {
			ref = atomic_load32(&buffer->ref);
			if ((ref > 0) && atomic_cas32(&buffer->ref, ref - 1, ref)) {
				if (ref == 1) {
					objectmap_free(_render_map_buffer, id);
					buffer->backend->vtable.deallocate_buffer(buffer->backend, buffer, true, true);
					memory_deallocate(buffer);
				}
				return;
			}
		}
		while (ref > 0);
	}
}
Ejemplo n.º 16
0
static void _memory_tracker_track( void* addr, uint64_t size )
{
	if( addr ) do
	{
		int32_t tag = atomic_exchange_and_add32( &_memory_tag_next, 1 );
		if( tag >= MAX_CONCURRENT_ALLOCATIONS )
		{
			int32_t newtag = tag % MAX_CONCURRENT_ALLOCATIONS;
			atomic_cas32( &_memory_tag_next, newtag, tag + 1 );
			tag = newtag;
		}
		if( !_memory_tags[ tag ].address && atomic_cas_ptr( &_memory_tags[ tag ].address, addr, 0 ) )
		{
			_memory_tags[ tag ].size = (uintptr_t)size;
			stacktrace_capture( _memory_tags[ tag ].trace, 14, 3 );
			hashtable_set( _memory_table, (uintptr_t)addr, (uintptr_t)( tag + 1 ) );
			return;
		}
	} while( true );
}
Ejemplo n.º 17
0
static void _profile_process_root_block( void )
{
	uint32_t block;

	do
	{
		block = atomic_load32( &_profile_root );
	} while( block && !atomic_cas32( &_profile_root, 0, block ) );

	while( block )
	{
		profile_block_t* leaf;
		profile_block_t* current = GET_BLOCK( block );
		uint32_t next = current->sibling;

		current->sibling = 0;
		leaf = _profile_process_block( current );
		_profile_free_block( block, BLOCK_INDEX( leaf ) );

		block = next;
	}
}
Ejemplo n.º 18
0
int spscq_get(spscq_d_t *qd, int objID, int slotSize, int numSlots) 
{
	void *ptr;

	qd->objID = objID;
	qd->slotSize = slotSize;
	qd->numSlots = numSlots;
	
        /* get shared memory segment for queue */
        if (shmman_get_shmseg(qd->objID, sizeof(spscq_head_t) + qd->numSlots * qd->slotSize, &ptr) == -1) {
                perror("shmman_get_shmseg() failed\n");
                return -1;
        }

	/* set pointer to shared memory segment */
	qd->q = (spscq_head_t *)ptr;
	
	/* get lock */
        while (!atomic_cas32(&(qd->q->lock), 0, 1)) {
                hwfunctions_nop();
	}
         
        /* initialize if we are the first user of flag */
        if (qd->q->init == 0) {

		/* set deault values */
		qd->q->head = 0;
		qd->q->next_slot = 0;
	
		qd->q->init = 1;	
	}

	/* release lock */
	qd->q->lock = 0;

	return 0;	
}
Ejemplo n.º 19
0
bool objectmap_lookup_unref( const objectmap_t* map, object_t id, object_deallocate_fn deallocate )
{
	void* object;
	do
	{
		object = map->map[ id & map->mask_index ];
		if( object && !( (uintptr_t)object & 1 ) &&
		   ( ( *( (uint64_t*)object + 1 ) & map->mask_id ) == ( id & map->mask_id ) ) ) //ID in object is offset by 8 bytes
		{
			object_base_t* base_obj = object;
			int32_t ref = atomic_load32( &base_obj->ref );
			if( ref && atomic_cas32( &base_obj->ref, ref - 1, ref ) )
			{
				if( ref == 1 )
				{
					deallocate( id, object );
					return false;
				}
				return true;
			}
		}
	} while( object );
	return false;
}
Ejemplo n.º 20
0
static void collector_trace_rootsets(Collector* collector)
{
  GC* gc = collector->gc;
  GC_Metadata* metadata = gc->metadata;
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif
  
  unsigned int num_active_collectors = gc->num_active_collectors;
  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);

  Space* space = collector->collect_space;
  collector->trace_stack = free_task_pool_get_entry(metadata);

  /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */  
  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);

  /* first step: copy all root objects to trace tasks. */ 

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: copy root objects to trace stack ......");
  while(root_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(root_set,iter);
      
      if(!*p_ref) continue;  /* root ref cann't be NULL, but remset can be */
      Partial_Reveal_Object *p_obj = read_slot(p_ref);

#ifdef GC_GEN_STATS
      gc_gen_collector_update_rootset_ref_num(stats);
#endif

      if(obj_belongs_to_nos(p_obj)){
        collector_tracestack_push(collector, p_ref);
      }
    } 
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  /* second step: iterate over the trace tasks and forward objects */
  collector->trace_stack = free_task_pool_get_entry(metadata);

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish copying root objects to trace stack.");

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: trace and forward objects ......");

retry:
  Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);

  while(trace_task){    
    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
    while(!vector_block_iterator_end(trace_task,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(trace_task,iter);
      assert(*p_ref); /* a task can't be NULL, it was checked before put into the task stack */
#ifdef PREFETCH_SUPPORTED      
      /* DO PREFETCH */  
      if( mark_prefetch ) {    
        if(!vector_block_iterator_end(trace_task, iter)) {
      	  REF *pref= (REF*) *iter;
      	  PREFETCH( read_slot(pref));
        }	
      }
#endif            
      /* in sequential version, we only trace same object once, but we were using a local hashset for that,
         which couldn't catch the repetition between multiple collectors. This is subject to more study. */
   
      /* FIXME:: we should not let root_set empty during working, other may want to steal it. 
         degenerate my stack into root_set, and grab another stack */
   
      /* a task has to belong to collected space, it was checked before put into the stack */
      trace_object(collector, p_ref);
      if(collector->result == FALSE)  break; /* force return */
    }
    vector_stack_clear(trace_task);
    pool_put_entry(metadata->free_task_pool, trace_task);
    if(collector->result == FALSE){
      gc_task_pool_clear(metadata->mark_task_pool);
      break; /* force return */
    }

    trace_task = pool_get_entry(metadata->mark_task_pool);
  }
  
  atomic_inc32(&num_finished_collectors);
  while(num_finished_collectors != num_active_collectors){
    if( pool_is_empty(metadata->mark_task_pool)) continue;
    /* we can't grab the task here, because of a race condition. If we grab the task, 
       and the pool is empty, other threads may fall to this barrier and then pass. */
    atomic_dec32(&num_finished_collectors);
    goto retry;      
  }
  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish tracing and forwarding objects.");

  /* now we are done, but each collector has a private stack that is empty */  
  trace_task = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(trace_task);
  pool_put_entry(metadata->free_task_pool, trace_task);   
  collector->trace_stack = NULL;
  
  return;
}
Ejemplo n.º 21
0
void slide_compact_mspace(Collector* collector)
{
    GC* gc = collector->gc;
    Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc);
    Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc);

    unsigned int num_active_collectors = gc->num_active_collectors;

    /* Pass 1: **************************************************
      *mark all live objects in heap, and save all the slots that
      *have references  that are going to be repointed.
      */

    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: marking...");

    unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);

    if(collect_is_fallback())
        mark_scan_heap_for_fallback(collector);
    else if(gc->tuner->kind != TRANS_NOTHING)
        mark_scan_heap_for_space_tune(collector);
    else
        mark_scan_heap(collector);
    old_num = atomic_inc32(&num_marking_collectors);

    /* last collector's world here */
    if( ++old_num == num_active_collectors ) {

        if(!IGNORE_FINREF )
            collector_identify_finref(collector);
#ifndef BUILD_IN_REFERENT
        else {
            gc_set_weakref_sets(gc);
            gc_update_weakref_ignore_finref(gc);
        }
#endif
        gc_identify_dead_weak_roots(gc);

        if( gc->tuner->kind != TRANS_NOTHING ) gc_compute_space_tune_size_after_marking(gc);
        //assert(!(gc->tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
        /* prepare for next phase */
        gc_init_block_for_collectors(gc, mspace);

#ifdef USE_32BITS_HASHCODE
        if(collect_is_fallback())
            fallback_clear_fwd_obj_oi_init(collector);
#endif

        last_block_for_dest = NULL;
        /* let other collectors go */
        num_marking_collectors++;
    }
    while(num_marking_collectors != num_active_collectors + 1);

    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass1 and start pass2: relocating mos&nos...");

    /* Pass 2: **************************************************
       assign target addresses for all to-be-moved objects */

    atomic_cas32( &num_repointing_collectors, 0, num_active_collectors+1);

#ifdef USE_32BITS_HASHCODE
    if(collect_is_fallback())
        fallback_clear_fwd_obj_oi(collector);
#endif
    mspace_compute_object_target(collector, mspace);

    old_num = atomic_inc32(&num_repointing_collectors);
    /*last collector's world here*/
    if( ++old_num == num_active_collectors ) {
        if(lspace->move_object) {
            TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: relocating los ...");
            lspace_compute_object_target(collector, lspace);
        }
        gc->collect_result = gc_collection_result(gc);
        if(!gc->collect_result) {
            num_repointing_collectors++;
            return;
        }
        gc_reset_block_for_collectors(gc, mspace);
        gc_init_block_for_fix_repointed_refs(gc, mspace);
        num_repointing_collectors++;
    }
    while(num_repointing_collectors != num_active_collectors + 1);
    if(!gc->collect_result) return;
    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass2 and start pass3: repointing...");

    /* Pass 3: **************************************************
      *update all references whose objects are to be moved
      */
    old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1);
    mspace_fix_repointed_refs(collector, mspace);
    old_num = atomic_inc32(&num_fixing_collectors);
    /*last collector's world here */
    if( ++old_num == num_active_collectors ) {
        lspace_fix_repointed_refs(collector, lspace);
        gc_fix_rootset(collector, FALSE);
        gc_init_block_for_sliding_compact(gc, mspace);
        /*LOS_Shrink: This operation moves objects in LOS, and should be part of Pass 4
          *lspace_sliding_compact is not binded with los shrink, we could slide compact los individually.
          *So we use a flag lspace->move_object here, not tuner->kind == TRANS_FROM_LOS_TO_MOS.
          */
        if(lspace->move_object)  lspace_sliding_compact(collector, lspace);
        /*The temp blocks for storing interim infomation is copied to the real place they should be.
          *And the space of the blocks are freed, which is alloced in gc_space_tuner_init_fake_blocks_for_los_shrink.
          */
        last_block_for_dest = (Block_Header *)round_down_to_size((POINTER_SIZE_INT)last_block_for_dest->base, GC_BLOCK_SIZE_BYTES);
        if(gc->tuner->kind == TRANS_FROM_LOS_TO_MOS) gc_space_tuner_release_fake_blocks_for_los_shrink(gc);
        num_fixing_collectors++;
    }
    while(num_fixing_collectors != num_active_collectors + 1);

    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass3 and start pass4: moving...");

    /* Pass 4: **************************************************
       move objects                                             */

    atomic_cas32( &num_moving_collectors, 0, num_active_collectors);

    mspace_sliding_compact(collector, mspace);

    atomic_inc32(&num_moving_collectors);
    while(num_moving_collectors != num_active_collectors);

    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass4 and start pass 5: restoring obj_info...");

    /* Pass 5: **************************************************
       restore obj_info                                         */

    atomic_cas32( &num_restoring_collectors, 0, num_active_collectors+1);

    collector_restore_obj_info(collector);
#ifdef USE_32BITS_HASHCODE
    collector_attach_hashcode(collector);
#endif

    old_num = atomic_inc32(&num_restoring_collectors);

    if( ++old_num == num_active_collectors ) {
        if(gc->tuner->kind != TRANS_NOTHING)
            mspace_update_info_after_space_tuning(mspace);
        num_restoring_collectors++;
    }
    while(num_restoring_collectors != num_active_collectors + 1);

    /* Dealing with out of memory in mspace */
    void* mspace_border = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx];
    if( mspace_border > nos_boundary) {
        atomic_cas32( &num_extending_collectors, 0, num_active_collectors);

        mspace_extend_compact(collector);

        atomic_inc32(&num_extending_collectors);
        while(num_extending_collectors != num_active_collectors);
    }

    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass5 and done.");

    return;
}
Ejemplo n.º 22
0
int lfmpscq_get(lfmpscq_d_t *qd, int objID, int userID, int nodeDataSize, int numSlots, int maxUser, int init) 
{
	int i;
	void *ptr;
	int *idx;
	unsigned long s;

	qd->objID = objID;
	qd->userID = userID;
	qd->nodeDataSize = nodeDataSize;
	qd->numSlots = numSlots;
	qd->maxUser = maxUser;	

        /* get shared memory segment for queue */
        if (shmman_get_shmseg(qd->objID, sizeof(lfmpscq_head_t) + qd->maxUser * sizeof(int) + qd->numSlots * (qd->nodeDataSize + sizeof(lfmpscq_slot_head_t)), &ptr) == -1) {
                perror("shmman_get_shmseg() failed\n");
                return -1;
        }

	/* set pointer to shared memory segment */
	qd->q = (lfmpscq_head_t *)ptr;
	qd->sharedIdxs = (int *)((unsigned long)ptr + sizeof(lfmpscq_head_t));
	qd->slots = (void *)((unsigned long)ptr + sizeof(lfmpscq_head_t) + qd->maxUser * sizeof(int));
	
	/* get lock */
        while (!atomic_cas32(&(qd->q->lock), 0, 1)) {
                hwfunctions_nop();
	}
         
        /* initialize if we are the first user of queue */
        if (qd->q->init == 0) {

		/* init shared indexes */
		idx = (int *)(qd->sharedIdxs);
		for (i = 0; i < qd->maxUser; i++) {
			*idx = UNDEFINED_NODE;
			idx++;
		}

		/* init slots */
		s = (unsigned long)(qd->slots);
		((lfmpscq_slot_head_t *)s)->used = 1; /* dummy */
		((lfmpscq_slot_head_t *)s)->next = UNDEFINED_NODE; /* dummy */
		for (i = 1; i < qd->numSlots; i++) {
			s += qd->nodeDataSize + sizeof(lfmpscq_slot_head_t);
			((lfmpscq_slot_head_t *)s)->used = 0;
			((lfmpscq_slot_head_t *)s)->next = UNDEFINED_NODE;
		}

		/* init shared head/tail */
		qd->q->head = 0;
		qd->q->tail = 0;
	
		qd->q->init = 1;	
	}

	/* init local data if necessary */
	if (init == 1) {
		RList = (int *)malloc(qd->numSlots * sizeof(int));
		TmpList = (int *)malloc(qd->numSlots * sizeof(int));
	}

	/* release lock */
	qd->q->lock = 0;

	return 0;	
}
Ejemplo n.º 23
0
void mark_scan_pool(Collector* collector)
{
  GC* gc = collector->gc;
  GC_Metadata* metadata = gc->metadata;
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif

  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
  unsigned int num_active_collectors = gc->num_active_collectors;
  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
   
  collector->trace_stack = free_task_pool_get_entry(metadata);

  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);

  /* first step: copy all root objects to mark tasks. 
      FIXME:: can be done sequentially before coming here to eliminate atomic ops */ 
  while(root_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(root_set,iter);

      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */
      assert(p_obj!=NULL);
      /* we have to mark the object before put it into marktask, because
         it is possible to have two slots containing a same object. They will
         be scanned twice and their ref slots will be recorded twice. Problem
         occurs after the ref slot is updated first time with new position
         and the second time the value is the ref slot is the old position as expected.
         This can be worked around if we want. 
      */
      if(obj_mark_in_vt(p_obj)){
        collector_tracestack_push(collector, p_obj);
#ifdef GC_GEN_STATS
        gc_gen_collector_update_rootset_ref_num(stats);
        gc_gen_collector_update_marked_obj_stats_major(stats);
#endif
      }

    } 
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  /* second step: iterate over the mark tasks and scan objects */
  /* get a task buf for the mark stack */
  collector->trace_stack = free_task_pool_get_entry(metadata);

retry:
  Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
  
  while(mark_task){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(mark_task);
    while(!vector_block_iterator_end(mark_task,iter)){
      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter;
      iter = vector_block_iterator_advance(mark_task,iter);

      /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. 
         degenerate my stack into mark_task, and grab another mark_task */
      trace_object(collector, p_obj);
    } 
    /* run out one task, put back to the pool and grab another task */
   vector_stack_clear(mark_task);
   pool_put_entry(metadata->free_task_pool, mark_task);
   mark_task = pool_get_entry(metadata->mark_task_pool);      
  }
  
  /* termination detection. This is also a barrier.
     NOTE:: We can simply spin waiting for num_finished_collectors, because each 
     generated new task would surely be processed by its generating collector eventually. 
     So code below is only for load balance optimization. */
  atomic_inc32(&num_finished_collectors);
  while(num_finished_collectors != num_active_collectors){
    if( !pool_is_empty(metadata->mark_task_pool)){
      atomic_dec32(&num_finished_collectors);
      goto retry;  
    }
  }
     
  /* put back the last mark stack to the free pool */
  mark_task = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(mark_task);
  pool_put_entry(metadata->free_task_pool, mark_task);   
  collector->trace_stack = NULL;
  
  return;
}
Ejemplo n.º 24
0
	void lock() {
		while(atomic_cas32(&lock_var, 1, 0)) {}
	}
void move_compact_mspace(Collector* collector) 
{
  GC* gc = collector->gc;
  Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc);
  Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
  Blocked_Space* nos = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
  
  unsigned int num_active_collectors = gc->num_active_collectors;
  Boolean is_fallback = collect_is_fallback();
  
  /* Pass 1: **************************************************
     mark all live objects in heap, and save all the slots that 
            have references  that are going to be repointed */

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: mark live objects in heap ...");

  unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
  
  if(!is_fallback)
       mark_scan_heap(collector);  
  else
       mark_scan_heap_for_fallback(collector);

  old_num = atomic_inc32(&num_marking_collectors);
  if( ++old_num == num_active_collectors ){
    /* last collector's world here */
    /* prepare for next phase */
    gc_init_block_for_collectors(gc, mspace); 
    
    if(!IGNORE_FINREF )
      collector_identify_finref(collector);
#ifndef BUILD_IN_REFERENT
    else {
      gc_set_weakref_sets(gc);
      gc_update_weakref_ignore_finref(gc);
    }
#endif
    gc_identify_dead_weak_roots(gc);

#ifdef USE_32BITS_HASHCODE
    if((!LOS_ADJUST_BOUNDARY) && (is_fallback))
      fallback_clear_fwd_obj_oi_init(collector);
#endif
    debug_num_compact_blocks = 0;
    /* let other collectors go */
    num_marking_collectors++; 
  }
  while(num_marking_collectors != num_active_collectors + 1);

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass1");

  /* Pass 2: **************************************************
     move object and set the forwarding offset table */

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: move object and set the forwarding offset table ...");

  atomic_cas32( &num_moving_collectors, 0, num_active_collectors+1);
#ifdef USE_32BITS_HASHCODE
  if(is_fallback)
    fallback_clear_fwd_obj_oi(collector);
#endif

  mspace_move_objects(collector, mspace);   
  
  old_num = atomic_inc32(&num_moving_collectors);
  if( ++old_num == num_active_collectors ){
    /* single thread world */
    if(lspace->move_object) 
      lspace_compute_object_target(collector, lspace);    
    
    gc->collect_result = gc_collection_result(gc);
    if(!gc->collect_result){
      num_moving_collectors++; 
      return;
    }
    
    if(verify_live_heap){
      assert( debug_num_compact_blocks == mspace->num_managed_blocks + nos->num_managed_blocks );	
      debug_num_compact_blocks = 0;
    }

    gc_reset_block_for_collectors(gc, mspace);
    blocked_space_block_iterator_init((Blocked_Space*)mspace);
    num_moving_collectors++; 
  }
  while(num_moving_collectors != num_active_collectors + 1);
  if(!gc->collect_result) return;
  
  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass2");

  /* Pass 3: **************************************************
     update all references whose pointed objects were moved */  

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass3: update all references ...");

  old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1);

  mspace_fix_repointed_refs(collector, mspace);

  old_num = atomic_inc32(&num_fixing_collectors);
  if( ++old_num == num_active_collectors ){
    /* last collector's world here */
    lspace_fix_repointed_refs(collector, lspace);   
    gc_fix_rootset(collector, FALSE);
    if(lspace->move_object)  lspace_sliding_compact(collector, lspace);    

    num_fixing_collectors++; 
  }
  while(num_fixing_collectors != num_active_collectors + 1);

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass3");

  /* Pass 4: **************************************************
     restore obj_info . Actually only LOS needs it.   Since oi is recorded for new address, so the restoration
     doesn't need to to specify space. */

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass4: restore obj_info ...");

  atomic_cas32( &num_restoring_collectors, 0, num_active_collectors);
  
  collector_restore_obj_info(collector);

  atomic_inc32(&num_restoring_collectors);

  while(num_restoring_collectors != num_active_collectors);

   /* Dealing with out of memory in mspace */  
  if(mspace->free_block_idx > nos->first_block_idx){    
     atomic_cas32( &num_extending_collectors, 0, num_active_collectors);        
     mspace_extend_compact(collector);        
     atomic_inc32(&num_extending_collectors);    
     while(num_extending_collectors != num_active_collectors);  
  }

 
  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass4");

  /* Leftover: **************************************************
   */
  if( (POINTER_SIZE_INT)collector->thread_handle != 0 ){
    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]  finished");
    return;
  }
  
  TRACE2("gc.process", "GC: collector[0]  finished");
  return;
}
Ejemplo n.º 26
0
static void collector_trace_rootsets(Collector* collector)
{
  GC* gc = collector->gc;
  GC_Metadata* metadata = gc->metadata;
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif
  
  unsigned int num_active_collectors = gc->num_active_collectors;
  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);

  Space* space = collector->collect_space;
  collector->trace_stack = free_task_pool_get_entry(metadata);

  /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */  
  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);

  /* first step: copy all root objects to trace tasks. */ 

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: copy root objects to trace stack ...");
  while(root_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(root_set, iter);

      assert(*p_ref);  /* root ref cann't be NULL, but remset can be */

      collector_tracestack_push(collector, p_ref);

#ifdef GC_GEN_STATS    
      gc_gen_collector_update_rootset_ref_num(stats);
#endif
    } 
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  /* second step: iterate over the trace tasks and forward objects */
  collector->trace_stack = free_task_pool_get_entry(metadata);

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish copying root objects to trace stack.");

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: trace and forward objects ...");

retry:
  Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);

  while(trace_task){    
    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
    while(!vector_block_iterator_end(trace_task,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(trace_task, iter);
#ifdef PREFETCH_SUPPORTED
      /* DO PREFETCH */
      if( mark_prefetch ) {    
        if(!vector_block_iterator_end(trace_task, iter)) {
      	  REF *pref= (REF*) *iter;
      	  PREFETCH( read_slot(pref));
        }	
      }
#endif      
      trace_object(collector, p_ref);
      
      if(collector->result == FALSE)  break; /* force return */
 
    }
    vector_stack_clear(trace_task);
    pool_put_entry(metadata->free_task_pool, trace_task);

    if(collector->result == FALSE){
      gc_task_pool_clear(metadata->mark_task_pool);
      break; /* force return */
    }
    
    trace_task = pool_get_entry(metadata->mark_task_pool);
  }
  
  /* A collector comes here when seeing an empty mark_task_pool. The last collector will ensure 
     all the tasks are finished.*/
     
  atomic_inc32(&num_finished_collectors);
  while(num_finished_collectors != num_active_collectors){
    if( pool_is_empty(metadata->mark_task_pool)) continue;
    /* we can't grab the task here, because of a race condition. If we grab the task, 
       and the pool is empty, other threads may fall to this barrier and then pass. */
    atomic_dec32(&num_finished_collectors);
    goto retry; 
  }

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish tracing and forwarding objects.");

  /* now we are done, but each collector has a private stack that is empty */  
  trace_task = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(trace_task);
  pool_put_entry(metadata->free_task_pool, trace_task);   
  collector->trace_stack = NULL;
  
  return;
}
Ejemplo n.º 27
0
Block_Header* mspace_get_next_target_block(Collector* collector, Mspace* mspace)
{    
  Block_Header* cur_target_block = (Block_Header*)next_block_for_target;
  
  /* firstly, we bump the next_block_for_target global var to the first non BLOCK_TARGET block
     This need not atomic op, because the global var is only a hint. */
  while(cur_target_block->status == BLOCK_TARGET){
      cur_target_block = cur_target_block->next;
  }
  next_block_for_target = cur_target_block;

  /* cur_target_block has to be BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET. Reason: 
     Any block after it must be either BLOCK_TARGET, or: 
     1. Since cur_target_block < cur_compact_block, we at least can get cur_compact_block as target.
     2. For a block that is >=cur_target_block and <cur_compact_block. 
        Since it is before cur_compact_block, we know it must be a compaction block of some thread. 
        So it is either BLOCK_IN_COMPACT or BLOCK_COMPACTED. 
     We care only the BLOCK_COMPACTED block or own BLOCK_IN_COMPACT. But I can't make the assert
     as below because of a race condition where the block status is not yet updated by other thread.
    assert( cur_target_block->status & (BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET)); 
  */

  /* mos may be out of space, so we can use nos blocks for compaction target.
   * but we can't use the blocks which are given to los when los extension happens.
   * in this case, an out-of-mem should be given to user.
   */
  GC* gc = collector->gc;
  Blocked_Space* nos = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
  Block_Header *nos_end; 
  if( nos->num_managed_blocks != 0)
    nos_end = ((Block_Header *)&nos->blocks[nos->num_managed_blocks-1])->next;
  else 
    nos_end = ((Block_Header *)&mspace->blocks[mspace->num_managed_blocks-1])->next;

  while( cur_target_block != nos_end){
    //For_LOS_extend
    //assert( cur_target_block <= collector->cur_compact_block);
    Block_Header* next_target_block = cur_target_block->next;
    volatile unsigned int* p_block_status = &cur_target_block->status;
    unsigned int block_status = cur_target_block->status;
    //assert( block_status & (BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET));

    /* if it is not BLOCK_COMPACTED, let's move on to next except it's own cur_compact_block */
    if(block_status != BLOCK_COMPACTED){
      if(cur_target_block == collector->cur_compact_block){
        assert( block_status == BLOCK_IN_COMPACT);
        *p_block_status = BLOCK_TARGET;
        collector->cur_target_block = cur_target_block;
        return cur_target_block;
      }
      /* it's not my own cur_compact_block, it can be BLOCK_TARGET or other's cur_compact_block */
      cur_target_block = next_target_block;
      continue;
    }    
    /* else, find a BLOCK_COMPACTED before own cur_compact_block */    
    unsigned int temp = atomic_cas32(p_block_status, BLOCK_TARGET, BLOCK_COMPACTED);
    if(temp == BLOCK_COMPACTED){
      collector->cur_target_block = cur_target_block;
      return cur_target_block;
    }
    /* missed it, it must be set by other into BLOCK_TARGET */
    assert(temp == BLOCK_TARGET); 
    cur_target_block = next_target_block;     
  }
  /* mos is run out for major collection */
  return NULL;  
}