Пример #1
0
// delete a block in the cache, and decrement the number of blocks.
// for use with external clients of this module only.
// return 0 on success
// return negative on error (see md_cache_evict_block_internal)
int md_cache_evict_block( struct md_syndicate_cache* cache, uint64_t file_id, int64_t file_version, uint64_t block_id, int64_t block_version ) {
   
   int rc = md_cache_evict_block_internal( cache, file_id, file_version, block_id, block_version );
   if( rc == 0 ) {
      __sync_fetch_and_sub( &cache->num_blocks_written, 1 );
   }
   
   return rc;
}
static long m_interlocked_decrement(volatile long * pv) {
#ifdef WIN32
  return InterlockedDecrement(pv);
#elif defined(HAS_SYNC_FUNCTIONS)
  return __sync_fetch_and_sub(pv, 1L);
#else
  return --(*pv);
#endif
}
Пример #3
0
int pthread_mutex_unlock(pthread_mutex_t *mutex) {
  if (NACL_UNLIKELY(mutex->mutex_type != PTHREAD_MUTEX_FAST_NP)) {
    if ((PTHREAD_MUTEX_RECURSIVE_NP == mutex->mutex_type) &&
        (0 != (--mutex->recursion_counter))) {
      /*
       * We assume that this thread owns the lock
       * (no verification for recursive locks),
       * so just decrement the counter, this thread is still the owner.
       */
      return 0;
    }
    if ((PTHREAD_MUTEX_ERRORCHECK_NP == mutex->mutex_type) &&
        (pthread_self() != mutex->owner_thread_id)) {
      /* Error - releasing a mutex that's free or owned by another thread. */
      return EPERM;
    }
    /* Writing to owner_thread_id here must be done atomically. */
    mutex->owner_thread_id = NACL_PTHREAD_ILLEGAL_THREAD_ID;
    mutex->recursion_counter = 0;
  }

  /*
   * Release the mutex.  This atomic decrement executes the full
   * memory barrier that pthread_mutex_unlock() is required to
   * execute.
   */
  int old_state = __sync_fetch_and_sub(&mutex->mutex_state, 1);
  if (NACL_UNLIKELY(old_state != LOCKED_WITHOUT_WAITERS)) {
    if (old_state == UNLOCKED) {
      /*
       * The mutex was not locked.  mutex_state is now -1 and the
       * mutex is likely unusable, but that is the caller's fault for
       * using the mutex interface incorrectly.
       */
      return EPERM;
    }
    /*
     * We decremented mutex_state from LOCKED_WITH_WAITERS to
     * LOCKED_WITHOUT_WAITERS.  We must now release the mutex fully.
     *
     * No further memory barrier is required for the following
     * modification of mutex_state.  The full memory barrier from the
     * atomic decrement acts as a release memory barrier for the
     * following modification.
     *
     * TODO(mseaborn): Change the following store to use an atomic
     * store builtin when this is available in all the NaCl
     * toolchains.  For now, PNaCl converts the volatile store to an
     * atomic store.
     */
    mutex->mutex_state = UNLOCKED;
    int woken;
    __nc_irt_futex.futex_wake(&mutex->mutex_state, 1, &woken);
  }
  return 0;
}
Пример #4
0
int cell_glocktree(struct cell *c) {

  struct cell *finger, *finger2;
  TIMER_TIC

  /* First of all, try to lock this cell. */
  if (c->ghold || lock_trylock(&c->glock) != 0) {
    TIMER_TOC(timer_locktree);
    return 1;
  }

  /* Did somebody hold this cell in the meantime? */
  if (c->ghold) {

    /* Unlock this cell. */
    if (lock_unlock(&c->glock) != 0) error("Failed to unlock cell.");

    /* Admit defeat. */
    TIMER_TOC(timer_locktree);
    return 1;
  }

  /* Climb up the tree and lock/hold/unlock. */
  for (finger = c->parent; finger != NULL; finger = finger->parent) {

    /* Lock this cell. */
    if (lock_trylock(&finger->glock) != 0) break;

    /* Increment the hold. */
    __sync_fetch_and_add(&finger->ghold, 1);

    /* Unlock the cell. */
    if (lock_unlock(&finger->glock) != 0) error("Failed to unlock cell.");
  }

  /* If we reached the top of the tree, we're done. */
  if (finger == NULL) {
    TIMER_TOC(timer_locktree);
    return 0;
  }

  /* Otherwise, we hit a snag. */
  else {

    /* Undo the holds up to finger. */
    for (finger2 = c->parent; finger2 != finger; finger2 = finger2->parent)
      __sync_fetch_and_sub(&finger2->ghold, 1);

    /* Unlock this cell. */
    if (lock_unlock(&c->glock) != 0) error("Failed to unlock cell.");

    /* Admit defeat. */
    TIMER_TOC(timer_locktree);
    return 1;
  }
}
Пример #5
0
static void complete(struct pfiled *pfiled, struct io *io)
{
	struct xseg_request *req = io->req;
	req->state |= XS_SERVED;
	if (cmdline_verbose)
		log_io("complete", io);
	xport p = xseg_respond(pfiled->xseg, req, pfiled->portno, X_ALLOC);
	xseg_signal(pfiled->xseg, p);
	__sync_fetch_and_sub(&pfiled->fdcache[io->fdcacheidx].ref, 1);
}
Пример #6
0
// Keep a session count for stats gauge
static void update_session_count(bool add) {
    static int current_session_count = 0;

    if (add) __sync_fetch_and_add(&current_session_count, 1);
    else __sync_fetch_and_sub(&current_session_count, 1);
    log_print(LOG_INFO, SECTION_SESSION_DEFAULT, "update_session_count: %d", current_session_count);
    // We atomically update current_session_count, but don't atomically get its value for the stat.
    // That should be ok, it will always at least be a valid value for some point in recent time.
    stats_timer_cluster("sessions", current_session_count);
}
Пример #7
0
static inline void dgInterlockedDecrement(dgInt32* Addend)
{
#ifdef _WIN32
  InterlockedDecrement((long*) Addend);
#elif defined (__APPLE__)
  OSAtomicAdd32 (-1, (int32_t*)Addend);
#else
  __sync_fetch_and_sub ((int32_t*)Addend, 1 );
#endif
}
Пример #8
0
void BaseNativeWindow::_decRef(struct android_native_base_t* base)
{
	ANativeWindow* self = container_of(base, ANativeWindow, common);
	BaseNativeWindow* bnw = static_cast<BaseNativeWindow*>(self);

	if (__sync_fetch_and_sub(&bnw->refcount,1) == 1)
	{
		delete bnw;
	}
}
Пример #9
0
void h2o_cache_release(h2o_cache_t *cache, h2o_cache_ref_t *ref)
{
    if (__sync_fetch_and_sub(&ref->_refcnt, 1) == 1) {
        assert(!h2o_linklist_is_linked(&ref->_lru_link));
        assert(!h2o_linklist_is_linked(&ref->_age_link));
        if (cache->destroy_cb != NULL)
            cache->destroy_cb(ref->value);
        free(ref->key.base);
        free(ref);
    }
}
Пример #10
0
void comb_sense_sense(int num_of_threads, int* count, bool* local_sense, bool* global_sense, int num_proc, int rank, int thread_id, MPI_Status* status_array){
	*local_sense = !(*local_sense);
	if(__sync_fetch_and_sub(count,1) == 1){
		*count = num_of_threads;
		// mpi_sense_initialize(num_proc, status_array);
		mpi_sense( num_proc, status_array);
		// mpi_sense_finalize(status_array);
		*global_sense = *local_sense;
	}
	while(*local_sense != *global_sense){}	
}
Пример #11
0
int main()
{
  int procs = 0;
  int i;
  pthread_t *thrs;

  // Getting number of CPUs
  procs = (int)sysconf( _SC_NPROCESSORS_ONLN );
  if (procs < 0)
  {
    perror( "sysconf" );
    return -1;
  }

  typedef char       tag[36];

  vsx_printf("This system has %d cores available\n", procs);

  thrs = (pthread_t*)malloc( sizeof( pthread_t ) * procs );
  if (thrs == NULL)
  {
    perror( "malloc" );
    return -1;
  }

  pthread_create(
    &thrs[0],
    NULL,
    thread_producer,
    0x0
  );

  pthread_create(
    &thrs[1],
    NULL,
    thread_producer2,
    0x0
  );


  sleep( 20 );

  __sync_fetch_and_sub( &run_threads, 1);


  for (i = 0; i < 2; i++)
  {
    pthread_join( thrs[i], NULL );
  }

  free( thrs );

  return 0;
}
Пример #12
0
/*Takes in a cb_vector_array struct and an index and returns the element at
  that index or NULL if no element exists at that index.*/
void *cb_vector_array_get(struct cb_vector_array *array, int index){
    void *el = NULL;

    /*When getting the data, increment the number of threads accessing the
      array*/
    __sync_fetch_and_add(&(array->th), 1);
    el = index >= array->size ? NULL : array->data[index];
    __sync_fetch_and_sub(&(array->th), 1);

    return el;
}
Пример #13
0
int nc_sendmsg(net_client_t *nc,void *msg,uint32_t len)
{
	int rv;
	struct cpeer *p = nc->peer;
	struct msg_t *message = NULL;

	thread_mutex_lock(nc->peer_mutex);
	if(!p || p->status == CPEER_DISCONNECTED)
	{
		thread_mutex_unlock(nc->peer_mutex);
		return -1;
	}
        __sync_fetch_and_add(&p->refcount,1);
	thread_mutex_unlock(nc->peer_mutex);

	message = (struct msg_t *)mmalloc(p->allocator,
			sizeof(struct msg_t));
	message->buf = (uint8_t *)mmalloc(p->allocator,len);
	bcopy((char *)msg,message->buf,len);
	message->len = len;
	message->peer_id = p->id;

	thread_mutex_lock(p->sq_mutex);
	BTPDQ_INSERT_TAIL(&p->send_queue, message, msg_entry);  

	rv = fdev_enable(&p->ioev,EV_WRITE);
	thread_mutex_unlock(p->sq_mutex);

	if(rv != 0)
	{
        log_error(nc->log,"nc_sendmsg fdev_enable EV_WRITE failed!");
		__sync_fetch_and_sub(&p->refcount,1);
		cpeer_kill(p);
		return -1;
	}
	else
	{
		__sync_fetch_and_sub(&p->refcount,1);
		return 0;
	}
}
void gtmp_barrier(){
    int thread_num = omp_get_thread_num();
    *localSense[thread_num] = !(*localSense[thread_num]);

    if (__sync_fetch_and_sub(&count, 1) == 1)
    {   /* if it is the last processor */ 
        count = P;
        sense = *localSense[thread_num];
    }
    else
        while (sense != *localSense[thread_num]);
}
Пример #15
0
int svrapp::on_close(app_connection *n, int reason){

	con * s = (con*)n->get_context();
	if(!s){
		__sync_fetch_and_sub(&g_sdr->u.ls_con.cur_connection, 1);	
		return -1;
	}
	
	if (s->get_type() == mgr_type){
		mgr_disconnect(n, reason);
	}
	else if (s->get_type() == cnt_type){
		cnt_disconnect(n, reason);
		__sync_fetch_and_sub(&g_sdr->u.ls_con.cur_connection, 1);	
	}
	else{
		
	}
	
	return 0;
}
Пример #16
0
static void dec_capture_files(void)
{
#   ifdef __GNUC__
    unsigned prev = __sync_fetch_and_sub(&capture_files, 1);
    assert(prev > 0);
#   else
    mutex_lock(&capfiles_lock);
    assert(capture_files > 0);
    capture_files --;
    mutex_unlock(&capfiles_lock);
#   endif
}
Пример #17
0
void ijkmp_dec_ref(IjkMediaPlayer *mp)
{
    if (!mp)
        return;

    int ref_count = __sync_fetch_and_sub(&mp->ref_count, 1);
    if (ref_count == 0) {
        MPTRACE("ijkmp_dec_ref(): ref=0");
        ijkmp_shutdown(mp);
        ijkmp_destroy_p(&mp);
    }
}
Пример #18
0
/* when an object is deleted */
void cache_destroy(struct giga_directory *dir)
{
    assert(dir->refcount > 1);

    /* once to release from the caller */
    __sync_fetch_and_sub(&dir->refcount, 1);

    HASH_DEL(dircache, dir);

    if (__sync_sub_and_fetch(&dir->refcount, 1) == 0)
        free(dir);
}
Пример #19
0
  size_t dc_buffered_stream_send2::get_outgoing_data(circular_iovec_buffer& outdata) {
    if (writebuffer_totallen.value == 0) return 0;
    
    // swap the buffer
    size_t curid = bufid;
    bufid = !bufid;
    // decrement the reference count
    __sync_fetch_and_sub(&(buffer[curid].ref_count), 1);
    // wait till the reference count is negative
    while(buffer[curid].ref_count >= 0);
    
    // ok now we have exclusive access to the buffer
    size_t sendlen = buffer[curid].numbytes;
    size_t real_send_len = 0;
    if (sendlen > 0) {
      size_t oldbsize = buffer[curid].buf.size();
      size_t numel = std::min((size_t)(buffer[curid].numel.value), buffer[curid].buf.size());
      bool buffull = (numel == buffer[curid].buf.size());
      std::vector<iovec> &sendbuffer = buffer[curid].buf;
      
      writebuffer_totallen.dec(sendlen);    
      block_header_type* blockheader = new block_header_type;
      (*blockheader) = sendlen;
      
      // fill the first msg block
      sendbuffer[0].iov_base = reinterpret_cast<void*>(blockheader);
      sendbuffer[0].iov_len = sizeof(block_header_type);
      // give the buffer away
      for (size_t i = 0;i < numel; ++i) {
        real_send_len += sendbuffer[i].iov_len;
        outdata.write(sendbuffer[i]);
      }
      // reset the buffer;
      buffer[curid].numbytes = 0;
      buffer[curid].numel = 1;

      if (buffull) {
        sendbuffer.resize(2 * numel);
      }
      else {
        sendbuffer.resize(oldbsize);
      }
      __sync_fetch_and_add(&(buffer[curid].ref_count), 1);
      return real_send_len;
    }
    else {
      // reset the buffer;
      buffer[curid].numbytes = 0;
      buffer[curid].numel = 1;
      __sync_fetch_and_add(&(buffer[curid].ref_count), 1);
      return 0;
    }
  }
Пример #20
0
void registry_perf_dec(struct registry_perf *p, uint64_t val) {

	if (p->type != registry_perf_type_gauge) {
		pomlog(POMLOG_ERR "Trying to decrease a perf item which is not of type gauge");
		return;
	} else if (p->update_hook) {
		pomlog(POMLOG_ERR "Trying to decrease a perf item with an update hook");
		return;
	}

	__sync_fetch_and_sub(&p->value, val);
}
/**
 * connman_network_unref:
 * @network: network structure
 *
 * Decrease reference counter of network
 */
void connman_network_unref(struct connman_network *network)
{
	DBG("network %p name %s refcount %d", network, network->name,
		network->refcount - 1);

	if (__sync_fetch_and_sub(&network->refcount, 1) != 1)
		return;

	network_list = g_slist_remove(network_list, network);

	network_destruct(network);
}
Пример #22
0
void BaseNativeWindowBuffer::_decRef(struct android_native_base_t* base)
{
	ANativeWindowBuffer* self = container_of(base, ANativeWindowBuffer, common);
	BaseNativeWindowBuffer* bnwb = static_cast<BaseNativeWindowBuffer*>(self) ;

	TRACE("%s %p refcount = %i\n", __PRETTY_FUNCTION__, bnwb, bnwb->refcount - 1);

	if (__sync_fetch_and_sub(&bnwb->refcount,1) == 1)
	{
		delete bnwb;
	}
}
Пример #23
0
void *thread2(void *arg)
{
  if (__sync_fetch_and_sub(&threads, 1) == 1)
    qsim_magic_enable();

  while(threads);

  for (uint64_t i = 0; i < 10000; i++)
    __sync_fetch_and_add(&value, i);

  return 0;
}
void mp_senserevbarrier(int num_threads) {
	int threadno = omp_get_thread_num();
	localsense_list[threadno].lsense = !localsense_list[threadno].lsense;

	if (__sync_fetch_and_sub (&count, 1) == 1) {
		count = num_threads;
		sense = localsense_list[threadno].lsense;
	}
	else {
		while(sense != localsense_list[threadno].lsense){};
	}
}
Пример #25
0
int ot_insert(const char *id, void *data_ptr) {
    uint64_t slot, chunk_for_slot, slot_in_chunk;
    object_table_entry_t *entry = NULL;
    nvm_chunk_header_t *chunk_hdr = NULL;
    nvm_object_table_entry_t *nvm_entry = NULL;

    if (ot_get(id) != NULL) {
        return OT_DUPLICATE;
    }

    /* find and reserve a slot on NVM, check freed slots first */
    if (__sync_fetch_and_sub(&slot_buffer_n_free, 1) <= 0) {
        slot_buffer_n_free += 1;
        if ((slot = __sync_fetch_and_add(&next_nvm_slot, 1)) >= total_slots_available) {
            /* oops, we ran out of slots... */
            __sync_fetch_and_sub(&next_nvm_slot, 1);
            return OT_FAIL;
        }
    } else {
        slot = slot_buffer[__sync_fetch_and_add(&slot_buffer_head_idx, 1)];
    }

    /* determine NVM location for OT entry */
    chunk_for_slot = slot / 63;
    slot_in_chunk = slot % 63;
    chunk_hdr = (nvm_chunk_header_t*)((uintptr_t)first_chunk + chunk_for_slot*CHUNK_SIZE);
    nvm_entry = &chunk_hdr->object_table[slot_in_chunk];

    /* create the volatile OT entry */
    entry = (object_table_entry_t*) malloc(sizeof(object_table_entry_t));
    strncpy(entry->id, id, MAX_ID_LENGTH);
    entry->id[MAX_ID_LENGTH] = '\0';
    entry->slot = slot;
    entry->data_ptr = data_ptr;
    entry->nvm_entry = nvm_entry;

    /* insert into the hashmap */
    HASHMAP_INSERT(entry->id, entry);
    return OT_OK;
}
Пример #26
0
bool remove_hashlist(HashList_t * list, int key){
  if(!contains_hashlist(list,key))
    return false;
  
  HashItem_t * curr = list->head;
  
  if(curr == NULL)
    return false;
  else if (curr->key ==key){
    HashItem_t * temp = curr;
    if (curr->next != NULL) {
      list->head = list->head->next;
    }
    else {
      list->head = NULL;
    }
    free(temp);
    __sync_fetch_and_sub(&(list->size), 1);
    //list->size--;
    return true;
  }else{
    while(curr->next != NULL) {
      if(curr->next->key == key){
	HashItem_t * temp = curr->next;
	curr->next = curr->next->next;
	if (curr->next == NULL) {
	  list->tail = curr;
	}
	free(temp);
	__sync_fetch_and_sub(&(list->size), 1);
	//list->size--;
	return true;
      }
      else
	curr = curr->next;
    }
  }
  return false;
}
Пример #27
0
void machine_track_platform_idle(boolean_t entry) {
	cpu_data_t		*my_cpu		= current_cpu_datap();

	if (entry) {
		(void)__sync_fetch_and_add(&my_cpu->lcpu.package->num_idle, 1);
	}
 	else {
 		uint32_t nidle = __sync_fetch_and_sub(&my_cpu->lcpu.package->num_idle, 1);
 		if (nidle == topoParms.nLThreadsPerPackage) {
 			my_cpu->lcpu.package->package_idle_exits++;
 		}
 	}
}
Пример #28
0
int counting_bloom_remove_with_hash(counting_bloom_t *bloom, unsigned int *hashes)
{
    unsigned int index, i, offset;
    for (i = 0; i < bloom->nfuncs; i++) {
        offset = i * bloom->counts_per_func;
        index = hashes[i] + offset;
        bitmap_decrement(bloom->bitmap, index, bloom->offset);
    }
    __sync_fetch_and_sub(&(bloom->header->count), 1);
    //bloom->header->count--;

    return 0;
}
Пример #29
0
  long AtomicCounter::operator--()
  {
#ifdef WIN32
    InterlockedDecrement(&m_lValue);
#elif defined STAFF_USE_GNUC_BUILTINS
    __sync_fetch_and_sub(&m_lValue, 1);
#else
    m_tMutex.Lock();
    --m_lValue;
    m_tMutex.Unlock();
#endif
    return m_lValue;
  }
void myth_malloc_wrapper_fini_worker(int rank)
{
#ifdef MYTH_WRAP_MALLOC_RUNTIME
  /* is it possible to come here before myth_malloc_wrapper_init is called? */
  if (!g_wrap_malloc) return;
#endif
  //Release freelist contents
  /*for (i=0;i<FREE_LIST_NUM;i++){
    }*/
  //Release the array
  real_free(g_myth_malloc_wrapper_fl[rank]);
  __sync_fetch_and_sub(&g_alloc_hook_ok,1);
}