Beispiel #1
0
void * acqrel_thr(void *id)
{
  int me = (int)(AO_PTRDIFF_T)id;

  int i;

  for (i = 0; i < NITERS; ++i)
    if (me & 1)
      {
        AO_t my_counter1;
    if (me != 1)
      fprintf(stderr, "acqrel test: too many threads\n");
    my_counter1 = AO_load(&counter1);
    AO_store(&counter1, my_counter1 + 1);
    AO_store_release_write(&counter2, my_counter1 + 1);
      }
    else
      {
    AO_t my_counter1a, my_counter2a;
    AO_t my_counter1b, my_counter2b;

    my_counter2a = AO_load_acquire_read(&counter2);
    my_counter1a = AO_load(&counter1);
    /* Redo this, to make sure that the second load of counter1 */
    /* is not viewed as a common subexpression.         */
    my_counter2b = AO_load_acquire_read(&counter2);
    my_counter1b = AO_load(&counter1);
    if (my_counter1a < my_counter2a)
      {
        fprintf(stderr, "Saw release store out of order: %lu < %lu\n",
            (unsigned long)my_counter1a, (unsigned long)my_counter2a);
        abort();
      }
    if (my_counter1b < my_counter2b)
      {
        fprintf(stderr,
            "Saw release store out of order (bad CSE?): %lu < %lu\n",
            (unsigned long)my_counter1b, (unsigned long)my_counter2b);
        abort();
      }
      }

  return 0;
}
int
ns_stat_set_cache_pin_max_obj_size(const char	*ns_uuid,
                                   uint64_t	max_bytes)
{
    ns_stat_entry_t *ns;
    int64_t keyhash;

    keyhash = n_str_hash(ns_uuid);

    NS_HASH_RLOCK();
    ns = nhash_lookup(s_ns_stat_hash, keyhash, (void *)ns_uuid /*key*/);
    if (ns == NULL || ns->state == NS_STATE_DELETED) {
        NS_HASH_RUNLOCK();
        return ENOENT;
    }
    ns->cache_pin_max_obj_bytes = max_bytes;
    AO_store(&ns_stats[ns->index][NS_CACHE_PIN_SPACE_OBJ_LIMIT], max_bytes);
    NS_HASH_RUNLOCK();
    return 0;
}	/* ns_stat_set_cache_pin_max_obj_size */
int
ns_stat_set_cache_pin_enabled_state(const char	*ns_uuid,
                                    int		enabled)
{
    ns_stat_entry_t *ns;
    int64_t keyhash;

    keyhash = n_str_hash(ns_uuid);

    NS_HASH_RLOCK();
    ns = nhash_lookup(s_ns_stat_hash, keyhash, (void *)ns_uuid/*key*/);
    if (ns == NULL || ns->state == NS_STATE_DELETED) {
        NS_HASH_RUNLOCK();
        return ENOENT;
    }
    ns->cache_pin_enabled = enabled;
    AO_store(&ns_stats[ns->index][NS_CACHE_PIN_SPACE_ENABLED], enabled);
    NS_HASH_RUNLOCK();
    return 0;
}	/* ns_stat_set_cache_pin_enabled_state */
Beispiel #4
0
void SupervisorThread::threadEntryPoint()
{
  #ifdef GETSPECIFICTHREADLOCAL
  if(pthread_setspecific(threadSupervisor->m_ThreadSpecificKey,m_ThreadLocal))
    abort();
  #endif
  m_Interrupt=&THREADLOCAL(interrupts_interruptedFlag,struct atomic_field);
  m_Exception=&THREADLOCAL(interrupts_exceptionFlag,struct atomic_field);
  reverse_run(thread_prepare_list);// re-initialize any thread local variables
  while(m_KeepRunning)
    {
      if(currentAllowedThreads<=m_LocalThreadId)
	{
	  sleep(1);
	  continue;
	}
      AO_store(&m_Interrupt->field,false);
      struct ThreadTask* task = threadSupervisor->getTask();
      task->run(this);
    }
}
int
ns_stat_set(ns_stat_token_t	stoken,
            ns_stat_category_t	stat_type,
            int64_t		val)
{
    uint32_t ns_index = stoken.u.stat_token_data.val;
    uint32_t gen = stoken.u.stat_token_data.gen;
    uint32_t curr_gen;

    curr_gen = (uint32_t) AO_load(&ns_token_gen[ns_index]);
    if (curr_gen != gen) {
        glob_ns_stat_set_gen_mismatch_err++;
        return NKN_STAT_GEN_MISMATCH;
    }

    if (stat_type >= NS_STAT_MAX) {
        glob_ns_stat_set_type_einval_err++;
        return NKN_STAT_TYPE_EINVAL;
    }

    AO_store(&ns_stats[ns_index][stat_type], val);
    return 0;
}	/* ns_stat_add */
int
ns_stat_set_cache_pin_diskspace(const char *ns_uuid,
                                uint64_t   max_resv_bytes)
{
    ns_stat_entry_t *ns;
    int64_t keyhash;

    keyhash = n_str_hash(ns_uuid);

    /* Perform operation w/ the token under lock, so refcnt is not needed */
    NS_HASH_RLOCK();
    ns = nhash_lookup(s_ns_stat_hash, keyhash, (void *)ns_uuid /*key*/);
    if (ns == NULL || IS_NS_STATE_DELETING(ns)) {
        NS_HASH_RUNLOCK();
        return ENOENT;
    }
    ns->cache_pin_resv_bytes = max_resv_bytes;
    AO_store(&ns_stats[ns->index][NS_CACHE_PIN_SPACE_RESV_TOTAL],
             max_resv_bytes);

    NS_HASH_RUNLOCK();
    return 0;
}	/* ns_stat_set_cache_pin_diskspace */
Beispiel #7
0
static void set_region_next(struct PupHeapRegion *region,
                            struct PupHeapRegion *next)
{
	AO_store(&region->next, (AO_t)next);
	ANNOTATE_HAPPENS_BEFORE(&region->next);
}
Beispiel #8
0
static void set_gc_state(struct PupHeap *heap,
                         struct PupGCState *gc_state)
{
	AO_store((AO_t *)&heap->gc_state, (AO_t)gc_state);
	ANNOTATE_HAPPENS_BEFORE(&heap->gc_state);
}
Beispiel #9
0
 /* purposes.  But as a correctness test, it should be OK. */
 AO_INLINE AO_t fetch_and_add(volatile AO_t * addr, AO_t val)
 {
   AO_t result = AO_load(addr);
   AO_store(addr, result + val);
   return result;
 }
int
ns_stat_add_namespace(const int nth,
                      const int state,
                      const char *ns_uuid,
                      const uint64_t cache_pin_max_obj_bytes,
                      const uint64_t cache_pin_resv_bytes,
                      const int cache_pin_enabled,
                      const ns_tier_entry_t *ssd,
                      const ns_tier_entry_t *sas,
                      const ns_tier_entry_t *sata,
                      const int lock_flag)
{
    ns_stat_entry_t	*ns;
    int64_t		old_index, orig_old_index = -1;
    uint32_t		keyhash;
    const AO_t		tmp_one = 1, tmp_zero = 0;
    int			not_found = 1;
    int			num_entries = 0;

    keyhash = n_str_hash(ns_uuid);

    NS_HASH_WLOCK(lock_flag);

    ns = nhash_lookup(s_ns_stat_hash, keyhash, (void *)ns_uuid /*key*/);
    if (ns) {
        /* This is the case where cache-inherit is done from multiple namespaces
         * to a single deleted namespace or for an active namespace.
         */
        ns->num_dup_ns++;
        NS_HASH_WUNLOCK(lock_flag);
        DBG_LOG(SEVERE, MOD_NAMESPACE, "Stat add called for existing"
                " Namespace (%s)", ns_uuid);
        return 0;
    }

    while (not_found) {
        /* By putting this first, we can atomically hand out indices */
        old_index = (int64_t)AO_fetch_and_add1(&g_ns_index);
        if (old_index == NS_NUM_STAT_ENTRIES) {	// size of array
            AO_store(&g_ns_index, tmp_zero);
            continue;
        }
        if (num_entries >= NKN_MAX_NAMESPACES) {// # of supported namespaces
            NS_HASH_WUNLOCK(lock_flag);	// Ran out of valid namespace slots
            glob_ns_stat_add_too_many_err++;
            return ENOMEM;
        }
        if (orig_old_index == -1)
            orig_old_index = old_index;
        else if (orig_old_index == old_index) {
            /* No free slots */
            NS_HASH_WUNLOCK(lock_flag);
            NKN_ASSERT(0);	// should never happen
            glob_ns_stat_add_no_free_slot_err++;
            return ENOMEM;
        }
        if (ns_slot_info[old_index] == NULL) {
            ns = nkn_calloc_type(1, sizeof(ns_stat_entry_t),
                                 mod_ns_stat_entry_t);
            if (ns == NULL) {
                NS_HASH_WUNLOCK(lock_flag);
                glob_ns_stat_add_nomem_err++;
                return ENOMEM;
            }
            ns_slot_info[old_index] = ns;
            AO_fetch_and_add1(&ns_num_namespaces);
            glob_ns_stat_add_new_slot_cnt++;
            break;
        }
        if (ns_slot_info[old_index]->state == NS_STATE_DELETED) {
            ns = ns_slot_info[old_index];
            AO_fetch_and_add1(&ns_num_namespaces);
            glob_ns_stat_add_reuse_slot_cnt++;
            break;
        }
        num_entries++;
    }
    NS_HASH_WUNLOCK(lock_flag);

    DBG_LOG(MSG1, MOD_NAMESPACE, "Namespace (%s) stat added (index %lu) "
            "(num_ns %lu) (state %d)", ns_uuid, old_index, ns_num_namespaces,
            state);
    ns->index = old_index;
    ns->token_curr_gen = 1;
    ns->rp_index = nth;	// MGMT promises that value never changes

    ns->state = state;

    strncpy(ns->ns_key, ns_uuid, NKN_MAX_UID_LENGTH-1);
    ns->ns_key[NKN_MAX_UID_LENGTH] = '\0';

    /* If ns_key stored is truncated, it would cause
     * issue during stat_delete. Added assert at the source of the issue.
     */
    if (strlen(ns->ns_key) != strlen(ns_uuid)) {
        DBG_LOG(SEVERE, MOD_NAMESPACE,
                "Namespace uid is longer than MAX_UID_LENGTH: %s",
                ns_uuid);
        NKN_ASSERT(0);
    }

    ns->cache_pin_max_obj_bytes = cache_pin_max_obj_bytes;
    ns->cache_pin_resv_bytes = cache_pin_resv_bytes;
    ns->cache_pin_enabled = cache_pin_enabled;

    ns->ssd.read_size = ssd->read_size;
    ns->ssd.block_free_threshold = ssd->block_free_threshold;
    ns->ssd.group_read = ssd->group_read;
    ns->ssd.max_disk_usage = ssd->max_disk_usage * 1024 * 1024;

    ns->sas.read_size = sas->read_size;
    ns->sas.block_free_threshold = sas->block_free_threshold;
    ns->sas.group_read = sas->group_read;
    ns->sas.max_disk_usage = sas->max_disk_usage * 1024 *1024;

    ns->sata.read_size = sata->read_size;
    ns->sata.block_free_threshold = sata->block_free_threshold;
    ns->sata.group_read = sata->group_read;
    ns->sata.max_disk_usage = sata->max_disk_usage * 1024 *1024;

    /* insert into hash table with key "namespace:uuid" */
    NS_HASH_WLOCK(lock_flag);
    nhash_insert(s_ns_stat_hash, keyhash, ns->ns_key, ns /* value */);
    NS_HASH_WUNLOCK(lock_flag);

    AO_store(&ns_token_gen[ns->index], tmp_one);
    AO_store(&ns_stats[ns->index][NS_CACHE_PIN_SPACE_ENABLED],
             ns->cache_pin_enabled);
    AO_store(&ns_stats[ns->index][NS_CACHE_PIN_SPACE_RESV_TOTAL],
             ns->cache_pin_resv_bytes);
    AO_store(&ns_stats[ns->index][NS_CACHE_PIN_SPACE_OBJ_LIMIT],
             ns->cache_pin_max_obj_bytes);

    AO_store(&ns_stats[ns->index][NS_SSD_MAX_USAGE],
             ns->ssd.max_disk_usage);
    AO_store(&ns_stats[ns->index][NS_SAS_MAX_USAGE],
             ns->sas.max_disk_usage);
    AO_store(&ns_stats[ns->index][NS_SATA_MAX_USAGE],
             ns->sata.max_disk_usage);

    ns_stat_add_counters(ns->ns_key, ns->index);
    glob_ns_num_namespaces = AO_load(&ns_num_namespaces);
    return 0;
}	/* ns_stat_add_namespace */
Beispiel #11
0
/*
 * This may be called from DllMain, and hence operates under unusual
 * constraints.  In particular, it must be lock-free if GC_win32_dll_threads
 * is set.  Always called from the thread being added.
 * If GC_win32_dll_threads is not set, we already hold the allocation lock,
 * except possibly during single-threaded start-up code.
 */
static GC_thread GC_register_my_thread_inner(struct GC_stack_base *sb,
					     DWORD thread_id)
{
  GC_vthread me;

  /* The following should be a noop according to the win32	*/
  /* documentation.  There is empirical evidence that it	*/
  /* isn't.		- HB					*/
# if defined(MPROTECT_VDB)
#   if defined(GWW_VDB)
      if (GC_incremental && !GC_gww_dirty_init())
	SetUnhandledExceptionFilter(GC_write_fault_handler);
#   else
      if (GC_incremental) SetUnhandledExceptionFilter(GC_write_fault_handler);
#   endif
# endif

  if (GC_win32_dll_threads) {
    int i;
    /* It appears to be unsafe to acquire a lock here, since this	*/
    /* code is apparently not preeemptible on some systems.		*/
    /* (This is based on complaints, not on Microsoft's official	*/
    /* documentation, which says this should perform "only simple	*/
    /* initialization tasks".)						*/
    /* Hence we make do with nonblocking synchronization.		*/
    /* It has been claimed that DllMain is really only executed with	*/
    /* a particular system lock held, and thus careful use of locking	*/
    /* around code that doesn't call back into the system libraries	*/
    /* might be OK.  But this hasn't been tested across all win32	*/
    /* variants.							*/
                /* cast away volatile qualifier */
    for (i = 0; InterlockedExchange((IE_t)&dll_thread_table[i].in_use,1) != 0;
	 i++) {
      /* Compare-and-swap would make this cleaner, but that's not 	*/
      /* supported before Windows 98 and NT 4.0.  In Windows 2000,	*/
      /* InterlockedExchange is supposed to be replaced by		*/
      /* InterlockedExchangePointer, but that's not really what I	*/
      /* want here.							*/
      /* FIXME: We should eventually declare Win95 dead and use AO_	*/
      /* primitives here.						*/
      if (i == MAX_THREADS - 1)
        ABORT("too many threads");
    }
    /* Update GC_max_thread_index if necessary.  The following is safe,	*/
    /* and unlike CompareExchange-based solutions seems to work on all	*/
    /* Windows95 and later platforms.					*/
    /* Unfortunately, GC_max_thread_index may be temporarily out of 	*/
    /* bounds, so readers have to compensate.				*/
    while (i > GC_max_thread_index) {
      InterlockedIncrement((IE_t)&GC_max_thread_index);
    }
    if (GC_max_thread_index >= MAX_THREADS) {
      /* We overshot due to simultaneous increments.	*/
      /* Setting it to MAX_THREADS-1 is always safe.	*/
      GC_max_thread_index = MAX_THREADS - 1;
    }
    me = dll_thread_table + i;
  } else /* Not using DllMain */ {
    GC_ASSERT(I_HOLD_LOCK());
    GC_in_thread_creation = TRUE; /* OK to collect from unknown thread. */
    me = GC_new_thread(thread_id);
    GC_in_thread_creation = FALSE;
  }
# ifdef GC_PTHREADS
    /* me can be NULL -> segfault */
    me -> pthread_id = pthread_self();
# endif

  if (!DuplicateHandle(GetCurrentProcess(),
                 	GetCurrentThread(),
		        GetCurrentProcess(),
		        (HANDLE*)&(me -> handle),
		        0,
		        0,
		        DUPLICATE_SAME_ACCESS)) {
	DWORD last_error = GetLastError();
	GC_err_printf("Last error code: %d\n", last_error);
	ABORT("DuplicateHandle failed");
  }
  me -> stack_base = sb -> mem_base;
  /* Up until this point, GC_push_all_stacks considers this thread	*/
  /* invalid.								*/
  /* Up until this point, this entry is viewed as reserved but invalid	*/
  /* by GC_delete_thread.						*/
  me -> id = thread_id;
# if defined(THREAD_LOCAL_ALLOC)
      GC_init_thread_local((GC_tlfs)(&(me->tlfs)));
# endif
  if (me -> stack_base == NULL) 
      ABORT("Bad stack base in GC_register_my_thread_inner");
  if (GC_win32_dll_threads) {
    if (GC_please_stop) {
      AO_store(&GC_attached_thread, TRUE);
      AO_nop_full();  // Later updates must become visible after this.
    }
    /* We'd like to wait here, but can't, since waiting in DllMain 	*/
    /* provokes deadlocks.						*/
    /* Thus we force marking to be restarted instead.			*/
  } else {
    GC_ASSERT(!GC_please_stop);
  	/* Otherwise both we and the thread stopping code would be	*/
  	/* holding the allocation lock.					*/
  }
  return (GC_thread)(me);
}