예제 #1
0
void RequestInjectionData::reset() {
  __sync_fetch_and_and(getConditionFlags(), 0);
  m_coverage = RuntimeOption::RecordCodeCoverage;
  m_debugger = false;
  m_debuggerIntr = false;
  updateJit();
  while (!interrupts.empty()) interrupts.pop();
}
예제 #2
0
 //! Clear the bit returning the old value
 inline bool clear_bit(uint32_t b) {
     // use CAS to set the bit
     uint32_t arrpos, bitpos;
     bit_to_pos(b, arrpos, bitpos);
     const size_t test_mask(size_t(1) << size_t(bitpos)); 
     const size_t clear_mask(~test_mask); 
     return __sync_fetch_and_and(array + arrpos, clear_mask) & test_mask;
 }
예제 #3
0
static VALUE dispatcher_callback_enabled_set(VALUE self, VALUE enabled) {
    if (RTEST(enabled)) {
        __sync_fetch_and_or(&callback_enabled, 1);
    } else {
        __sync_fetch_and_and(&callback_enabled, 0);
    }
    return enabled;
}
예제 #4
0
void test_fetch_and_op (void)
{
  sc = __sync_fetch_and_add (&sc, 11);
  uc = __sync_fetch_and_add (&uc, 11);
  ss = __sync_fetch_and_add (&ss, 11);
  us = __sync_fetch_and_add (&us, 11);
  si = __sync_fetch_and_add (&si, 11);
  ui = __sync_fetch_and_add (&ui, 11);
  sll = __sync_fetch_and_add (&sll, 11);
  ull = __sync_fetch_and_add (&ull, 11);

  sc = __sync_fetch_and_sub (&sc, 11);
  uc = __sync_fetch_and_sub (&uc, 11);
  ss = __sync_fetch_and_sub (&ss, 11);
  us = __sync_fetch_and_sub (&us, 11);
  si = __sync_fetch_and_sub (&si, 11);
  ui = __sync_fetch_and_sub (&ui, 11);
  sll = __sync_fetch_and_sub (&sll, 11);
  ull = __sync_fetch_and_sub (&ull, 11);

  sc = __sync_fetch_and_or (&sc, 11);
  uc = __sync_fetch_and_or (&uc, 11);
  ss = __sync_fetch_and_or (&ss, 11);
  us = __sync_fetch_and_or (&us, 11);
  si = __sync_fetch_and_or (&si, 11);
  ui = __sync_fetch_and_or (&ui, 11);
  sll = __sync_fetch_and_or (&sll, 11);
  ull = __sync_fetch_and_or (&ull, 11);

  sc = __sync_fetch_and_xor (&sc, 11);
  uc = __sync_fetch_and_xor (&uc, 11);
  ss = __sync_fetch_and_xor (&ss, 11);
  us = __sync_fetch_and_xor (&us, 11);
  si = __sync_fetch_and_xor (&si, 11);
  ui = __sync_fetch_and_xor (&ui, 11);
  sll = __sync_fetch_and_xor (&sll, 11);
  ull = __sync_fetch_and_xor (&ull, 11);

  sc = __sync_fetch_and_and (&sc, 11);
  uc = __sync_fetch_and_and (&uc, 11);
  ss = __sync_fetch_and_and (&ss, 11);
  us = __sync_fetch_and_and (&us, 11);
  si = __sync_fetch_and_and (&si, 11);
  ui = __sync_fetch_and_and (&ui, 11);
  sll = __sync_fetch_and_and (&sll, 11);
  ull = __sync_fetch_and_and (&ull, 11);

  sc = __sync_fetch_and_nand (&sc, 11);
  uc = __sync_fetch_and_nand (&uc, 11);
  ss = __sync_fetch_and_nand (&ss, 11);
  us = __sync_fetch_and_nand (&us, 11);
  si = __sync_fetch_and_nand (&si, 11);
  ui = __sync_fetch_and_nand (&ui, 11);
  sll = __sync_fetch_and_nand (&sll, 11);
  ull = __sync_fetch_and_nand (&ull, 11);
}
예제 #5
0
void test_op_ignore (void)
{
  (void) __sync_fetch_and_add (&sc, 1);
  (void) __sync_fetch_and_add (&uc, 1);
  (void) __sync_fetch_and_add (&ss, 1);
  (void) __sync_fetch_and_add (&us, 1);
  (void) __sync_fetch_and_add (&si, 1);
  (void) __sync_fetch_and_add (&ui, 1);
  (void) __sync_fetch_and_add (&sll, 1);
  (void) __sync_fetch_and_add (&ull, 1);

  (void) __sync_fetch_and_sub (&sc, 1);
  (void) __sync_fetch_and_sub (&uc, 1);
  (void) __sync_fetch_and_sub (&ss, 1);
  (void) __sync_fetch_and_sub (&us, 1);
  (void) __sync_fetch_and_sub (&si, 1);
  (void) __sync_fetch_and_sub (&ui, 1);
  (void) __sync_fetch_and_sub (&sll, 1);
  (void) __sync_fetch_and_sub (&ull, 1);

  (void) __sync_fetch_and_or (&sc, 1);
  (void) __sync_fetch_and_or (&uc, 1);
  (void) __sync_fetch_and_or (&ss, 1);
  (void) __sync_fetch_and_or (&us, 1);
  (void) __sync_fetch_and_or (&si, 1);
  (void) __sync_fetch_and_or (&ui, 1);
  (void) __sync_fetch_and_or (&sll, 1);
  (void) __sync_fetch_and_or (&ull, 1);

  (void) __sync_fetch_and_xor (&sc, 1);
  (void) __sync_fetch_and_xor (&uc, 1);
  (void) __sync_fetch_and_xor (&ss, 1);
  (void) __sync_fetch_and_xor (&us, 1);
  (void) __sync_fetch_and_xor (&si, 1);
  (void) __sync_fetch_and_xor (&ui, 1);
  (void) __sync_fetch_and_xor (&sll, 1);
  (void) __sync_fetch_and_xor (&ull, 1);

  (void) __sync_fetch_and_and (&sc, 1);
  (void) __sync_fetch_and_and (&uc, 1);
  (void) __sync_fetch_and_and (&ss, 1);
  (void) __sync_fetch_and_and (&us, 1);
  (void) __sync_fetch_and_and (&si, 1);
  (void) __sync_fetch_and_and (&ui, 1);
  (void) __sync_fetch_and_and (&sll, 1);
  (void) __sync_fetch_and_and (&ull, 1);

  (void) __sync_fetch_and_nand (&sc, 1);
  (void) __sync_fetch_and_nand (&uc, 1);
  (void) __sync_fetch_and_nand (&ss, 1);
  (void) __sync_fetch_and_nand (&us, 1);
  (void) __sync_fetch_and_nand (&si, 1);
  (void) __sync_fetch_and_nand (&ui, 1);
  (void) __sync_fetch_and_nand (&sll, 1);
  (void) __sync_fetch_and_nand (&ull, 1);
}
예제 #6
0
void mainserv_stats(uint64_t *bin,uint64_t *bout,uint32_t *hlopr,uint32_t *hlopw) {
#ifdef HAVE___SYNC_FETCH_AND_OP
	*bin = __sync_fetch_and_and(&stats_bytesin,0);
	*bout = __sync_fetch_and_and(&stats_bytesout,0);
	*hlopr = __sync_fetch_and_and(&stats_hlopr,0);
	*hlopw = __sync_fetch_and_and(&stats_hlopw,0);
#else
	zassert(pthread_mutex_lock(&statslock));
	*bin = stats_bytesin;
	*bout = stats_bytesout;
	*hlopr = stats_hlopr;
	*hlopw = stats_hlopw;
	stats_bytesin = 0;
	stats_bytesout = 0;
	stats_hlopr = 0;
	stats_hlopw = 0;
	zassert(pthread_mutex_unlock(&statslock));
#endif
}
예제 #7
0
파일: io.c 프로젝트: CPFL/gxen
static inline int send_notify(struct libxenvchan *ctrl, uint8_t bit)
{
	uint8_t *notify, prev;
	xen_mb(); /* caller updates indexes /before/ we decode to notify */
	notify = ctrl->is_server ? &ctrl->ring->srv_notify : &ctrl->ring->cli_notify;
	prev = __sync_fetch_and_and(notify, ~bit);
	if (prev & bit)
		return xc_evtchn_notify(ctrl->event, ctrl->event_port);
	else
		return 0;
}
예제 #8
0
CAMLprim value stub_atomic_fetch_and_uint8(value buf, value idx, value val)
{
  CAMLparam3(buf, idx, val);
  uint8_t c_val = (uint8_t)Int_val(val);
  uint8_t *ptr = Caml_ba_data_val(buf) + Int_val(idx);

  if (Int_val(idx) >= Caml_ba_array_val(buf)->dim[0])
    caml_invalid_argument("idx");

  CAMLreturn(Val_int((uint8_t)__sync_fetch_and_and(ptr, c_val)));
}
예제 #9
0
//============================================================================
//		NTargetThread::AtomicAnd32 : Atomic 32-bit and.
//----------------------------------------------------------------------------
void NTargetThread::AtomicAnd32(UInt32 &theValue, UInt32 theMask)
{


	// Validate our parameters
	NN_ASSERT_ALIGNED_4(&theValue);



	// AND the value
	__sync_fetch_and_and(&theValue, theMask);
}
예제 #10
0
파일: vcore.c 프로젝트: windyuuy/akaros
/* This can return, if you failed to yield due to a concurrent event.  Note
 * we're atomicly setting the CAN_RCV flag, and aren't bothering with CASing
 * (either with the kernel or uthread's handle_indirs()).  We don't particularly
 * care what other code does - we intend to set those flags no matter what. */
void vcore_yield(bool preempt_pending)
{
    unsigned long old_nr;
    uint32_t vcoreid = vcore_id();
    struct preempt_data *vcpd = vcpd_of(vcoreid);
    __sync_fetch_and_and(&vcpd->flags, ~VC_CAN_RCV_MSG);
    /* no wrmb() necessary, handle_events() has an mb() if it is checking */
    /* Clears notif pending and tries to handle events.  This is an optimization
     * to avoid the yield syscall if we have an event pending.  If there is one,
     * we want to unwind and return to the 2LS loop, where we may not want to
     * yield anymore.
     * Note that the kernel only cares about CAN_RCV_MSG for the desired vcore,
     * not for a FALLBACK.  */
    if (handle_events(vcoreid)) {
        __sync_fetch_and_or(&vcpd->flags, VC_CAN_RCV_MSG);
        return;
    }
    /* If we are yielding since we don't want the core, tell the kernel we want
     * one less vcore (vc_yield assumes a dumb 2LS).
     *
     * If yield fails (slight race), we may end up having more vcores than
     * amt_wanted for a while, and might lose one later on (after a
     * preempt/timeslicing) - the 2LS will have to notice eventually if it
     * actually needs more vcores (which it already needs to do).  amt_wanted
     * could even be 0.
     *
     * In general, any time userspace decrements or sets to 0, it could get
     * preempted, so the kernel will still give us at least one, until the last
     * vcore properly yields without missing a message (and becomes a WAITING
     * proc, which the ksched will not give cores to).
     *
     * I think it's possible for userspace to do this (lock, read amt_wanted,
     * check all message queues for all vcores, subtract amt_wanted (not set to
     * 0), unlock) so long as every event handler +1s the amt wanted, but that's
     * a huge pain, and we already have event handling code making sure a
     * process can't sleep (transition to WAITING) if a message arrives (can't
     * yield if notif_pending, can't go WAITING without yielding, and the event
     * posting the notif_pending will find the online VC or be delayed by
     * spinlock til the proc is WAITING). */
    if (!preempt_pending) {
        do {
            old_nr = __procdata.res_req[RES_CORES].amt_wanted;
            if (old_nr == 0)
                break;
        } while (!__sync_bool_compare_and_swap(
                     &__procdata.res_req[RES_CORES].amt_wanted,
                     old_nr, old_nr - 1));
    }
    /* We can probably yield.  This may pop back up if notif_pending became set
     * by the kernel after we cleared it and we lost the race. */
    sys_yield(preempt_pending);
    __sync_fetch_and_or(&vcpd->flags, VC_CAN_RCV_MSG);
}
예제 #11
0
unsigned
Atomic::AND(unsigned value)
{
#if defined(_OPENTHREADS_ATOMIC_USE_GCC_BUILTINS)
    return __sync_fetch_and_and(&_value, value);
#elif defined(_OPENTHREADS_ATOMIC_USE_WIN32_INTERLOCKED)
    return _InterlockedAnd(&_value, value);
#elif defined(_OPENTHREADS_ATOMIC_USE_BSD_ATOMIC)
    return OSAtomicAnd32((uint32_t)value, (uint32_t *)&_value);
#else
# error This implementation should happen inline in the include file
#endif
}
예제 #12
0
파일: Atomics.c 프로젝트: ADonut/LLVM-GPGPU
void test_op_ignore (void) // CHECK-LABEL: define void @test_op_ignore
{
  (void) __sync_fetch_and_add (&sc, 1); // CHECK: atomicrmw add i8
  (void) __sync_fetch_and_add (&uc, 1); // CHECK: atomicrmw add i8
  (void) __sync_fetch_and_add (&ss, 1); // CHECK: atomicrmw add i16
  (void) __sync_fetch_and_add (&us, 1); // CHECK: atomicrmw add i16
  (void) __sync_fetch_and_add (&si, 1); // CHECK: atomicrmw add i32
  (void) __sync_fetch_and_add (&ui, 1); // CHECK: atomicrmw add i32
  (void) __sync_fetch_and_add (&sll, 1); // CHECK: atomicrmw add i64
  (void) __sync_fetch_and_add (&ull, 1); // CHECK: atomicrmw add i64

  (void) __sync_fetch_and_sub (&sc, 1); // CHECK: atomicrmw sub i8
  (void) __sync_fetch_and_sub (&uc, 1); // CHECK: atomicrmw sub i8
  (void) __sync_fetch_and_sub (&ss, 1); // CHECK: atomicrmw sub i16
  (void) __sync_fetch_and_sub (&us, 1); // CHECK: atomicrmw sub i16
  (void) __sync_fetch_and_sub (&si, 1); // CHECK: atomicrmw sub i32
  (void) __sync_fetch_and_sub (&ui, 1); // CHECK: atomicrmw sub i32
  (void) __sync_fetch_and_sub (&sll, 1); // CHECK: atomicrmw sub i64
  (void) __sync_fetch_and_sub (&ull, 1); // CHECK: atomicrmw sub i64

  (void) __sync_fetch_and_or (&sc, 1); // CHECK: atomicrmw or i8
  (void) __sync_fetch_and_or (&uc, 1); // CHECK: atomicrmw or i8
  (void) __sync_fetch_and_or (&ss, 1); // CHECK: atomicrmw or i16
  (void) __sync_fetch_and_or (&us, 1); // CHECK: atomicrmw or i16
  (void) __sync_fetch_and_or (&si, 1); // CHECK: atomicrmw or i32
  (void) __sync_fetch_and_or (&ui, 1); // CHECK: atomicrmw or i32
  (void) __sync_fetch_and_or (&sll, 1); // CHECK: atomicrmw or i64
  (void) __sync_fetch_and_or (&ull, 1); // CHECK: atomicrmw or i64

  (void) __sync_fetch_and_xor (&sc, 1); // CHECK: atomicrmw xor i8
  (void) __sync_fetch_and_xor (&uc, 1); // CHECK: atomicrmw xor i8
  (void) __sync_fetch_and_xor (&ss, 1); // CHECK: atomicrmw xor i16
  (void) __sync_fetch_and_xor (&us, 1); // CHECK: atomicrmw xor i16
  (void) __sync_fetch_and_xor (&si, 1); // CHECK: atomicrmw xor i32
  (void) __sync_fetch_and_xor (&ui, 1); // CHECK: atomicrmw xor i32
  (void) __sync_fetch_and_xor (&sll, 1); // CHECK: atomicrmw xor i64
  (void) __sync_fetch_and_xor (&ull, 1); // CHECK: atomicrmw xor i64

  (void) __sync_fetch_and_and (&sc, 1); // CHECK: atomicrmw and i8
  (void) __sync_fetch_and_and (&uc, 1); // CHECK: atomicrmw and i8
  (void) __sync_fetch_and_and (&ss, 1); // CHECK: atomicrmw and i16
  (void) __sync_fetch_and_and (&us, 1); // CHECK: atomicrmw and i16
  (void) __sync_fetch_and_and (&si, 1); // CHECK: atomicrmw and i32
  (void) __sync_fetch_and_and (&ui, 1); // CHECK: atomicrmw and i32
  (void) __sync_fetch_and_and (&sll, 1); // CHECK: atomicrmw and i64
  (void) __sync_fetch_and_and (&ull, 1); // CHECK: atomicrmw and i64

}
예제 #13
0
파일: Atomics.c 프로젝트: ADonut/LLVM-GPGPU
void test_fetch_and_op (void) // CHECK-LABEL: define void @test_fetch_and_op
{
  sc = __sync_fetch_and_add (&sc, 11); // CHECK: atomicrmw add
  uc = __sync_fetch_and_add (&uc, 11); // CHECK: atomicrmw add
  ss = __sync_fetch_and_add (&ss, 11); // CHECK: atomicrmw add
  us = __sync_fetch_and_add (&us, 11); // CHECK: atomicrmw add
  si = __sync_fetch_and_add (&si, 11); // CHECK: atomicrmw add
  ui = __sync_fetch_and_add (&ui, 11); // CHECK: atomicrmw add
  sll = __sync_fetch_and_add (&sll, 11); // CHECK: atomicrmw add
  ull = __sync_fetch_and_add (&ull, 11); // CHECK: atomicrmw add

  sc = __sync_fetch_and_sub (&sc, 11); // CHECK: atomicrmw sub
  uc = __sync_fetch_and_sub (&uc, 11); // CHECK: atomicrmw sub
  ss = __sync_fetch_and_sub (&ss, 11); // CHECK: atomicrmw sub
  us = __sync_fetch_and_sub (&us, 11); // CHECK: atomicrmw sub
  si = __sync_fetch_and_sub (&si, 11); // CHECK: atomicrmw sub
  ui = __sync_fetch_and_sub (&ui, 11); // CHECK: atomicrmw sub
  sll = __sync_fetch_and_sub (&sll, 11); // CHECK: atomicrmw sub
  ull = __sync_fetch_and_sub (&ull, 11); // CHECK: atomicrmw sub

  sc = __sync_fetch_and_or (&sc, 11); // CHECK: atomicrmw or
  uc = __sync_fetch_and_or (&uc, 11); // CHECK: atomicrmw or
  ss = __sync_fetch_and_or (&ss, 11); // CHECK: atomicrmw or
  us = __sync_fetch_and_or (&us, 11); // CHECK: atomicrmw or
  si = __sync_fetch_and_or (&si, 11); // CHECK: atomicrmw or
  ui = __sync_fetch_and_or (&ui, 11); // CHECK: atomicrmw or
  sll = __sync_fetch_and_or (&sll, 11); // CHECK: atomicrmw or
  ull = __sync_fetch_and_or (&ull, 11); // CHECK: atomicrmw or

  sc = __sync_fetch_and_xor (&sc, 11); // CHECK: atomicrmw xor
  uc = __sync_fetch_and_xor (&uc, 11); // CHECK: atomicrmw xor
  ss = __sync_fetch_and_xor (&ss, 11); // CHECK: atomicrmw xor
  us = __sync_fetch_and_xor (&us, 11); // CHECK: atomicrmw xor
  si = __sync_fetch_and_xor (&si, 11); // CHECK: atomicrmw xor
  ui = __sync_fetch_and_xor (&ui, 11); // CHECK: atomicrmw xor
  sll = __sync_fetch_and_xor (&sll, 11); // CHECK: atomicrmw xor
  ull = __sync_fetch_and_xor (&ull, 11); // CHECK: atomicrmw xor

  sc = __sync_fetch_and_and (&sc, 11); // CHECK: atomicrmw and
  uc = __sync_fetch_and_and (&uc, 11); // CHECK: atomicrmw and
  ss = __sync_fetch_and_and (&ss, 11); // CHECK: atomicrmw and
  us = __sync_fetch_and_and (&us, 11); // CHECK: atomicrmw and
  si = __sync_fetch_and_and (&si, 11); // CHECK: atomicrmw and
  ui = __sync_fetch_and_and (&ui, 11); // CHECK: atomicrmw and
  sll = __sync_fetch_and_and (&sll, 11); // CHECK: atomicrmw and
  ull = __sync_fetch_and_and (&ull, 11); // CHECK: atomicrmw and

}
예제 #14
0
파일: putBit.hpp 프로젝트: gt1/libmaus2
		inline void putBit1Sync(iterator A, uint64_t const offset, uint8_t v)
		{
			static const uint8_t maskone[] = {
				static_cast<uint8_t>(~(static_cast<uint8_t>(1)<<7)), static_cast<uint8_t>(~(static_cast<uint8_t>(1)<<6)),
				static_cast<uint8_t>(~(static_cast<uint8_t>(1)<<5)), static_cast<uint8_t>(~(static_cast<uint8_t>(1)<<4)),
				static_cast<uint8_t>(~(static_cast<uint8_t>(1)<<3)), static_cast<uint8_t>(~(static_cast<uint8_t>(1)<<2)),
				static_cast<uint8_t>(~(static_cast<uint8_t>(1)<<1)), static_cast<uint8_t>(~(static_cast<uint8_t>(1)<<0))
			};
			static const uint8_t insone[] = { 0u, 1u<<7,0u,1u<<6,0u,1u<<5,0u,1u<<4,0u,1u<<3,0u,1u<<2,0u,1u<<1,0u,1u<<0 };
			uint64_t const wordoffset = offset>>3;
			uint64_t const bitoffset = offset&0x7u;
			__sync_fetch_and_and(A+wordoffset,maskone[bitoffset]);
			__sync_fetch_and_or (A+wordoffset,insone[(bitoffset<<1)|v]);
		}
예제 #15
0
int process_signals (void) {
  if (pending_signals & ((1 << SIGINT) | (1 << SIGTERM))) {
    return 0;
  }
  if (last_cron_time != now) {
    last_cron_time = now;
    check_child_status ();
    if (__sync_fetch_and_and (&sighup_cnt, 0)) {
      sf.sighup ();
    }
    if (__sync_fetch_and_and (&sigusr1_cnt, 0)) {
      sf.sigusr1 ();
    }
    if (__sync_fetch_and_and (&sigrtmax_cnt, 0)) {
      fork_write_index ();
    }
    sf.cron ();
  }
  if (epoll_pre_event) {
    epoll_pre_event ();
  }
  return 1;
}
예제 #16
0
    void AtomicRefCount::reset()
    {
#if defined( _WIN32 ) && !defined( __SYMBIAN32__ )
      ::InterlockedExchange( (volatile LONG*)&m_count, (volatile LONG)0 );
//#elif defined( __APPLE__ )
//      OSAtomicAnd32Barrier( (int32_t)0, (volatile int32_t*)&m_count );
#elif defined( HAVE_GCC_ATOMIC_BUILTINS )
      // Use the gcc intrinsic for atomic decrement if supported.
      __sync_fetch_and_and( &m_count, 0 );
#else
      // Fallback to using a lock
      MutexGuard m( m_lock );
      m_count = 0;
#endif
    }
예제 #17
0
/*++
Function:
    PAL_FreeExceptionRecords

    Free EXCEPTION_RECORD and CONTEXT structures of an exception that were allocated by the
    AllocateExceptionRecords.
Parameters:
    exceptionRecord - exception record
    contextRecord - context record
--*/
VOID
PALAPI
PAL_FreeExceptionRecords(IN EXCEPTION_RECORD *exceptionRecord, IN CONTEXT *contextRecord)
{
    // Both records are allocated at once and the allocated memory starts at the contextRecord
    ExceptionRecords* records = (ExceptionRecords*)contextRecord;
    if ((records >= &s_fallbackContexts[0]) && (records < &s_fallbackContexts[MaxFallbackContexts]))
    {
        int index = records - &s_fallbackContexts[0];
        __sync_fetch_and_and(&s_allocatedContextsBitmap, ~((size_t)1 << index));
    }
    else
    {
        free(contextRecord);
    }
}
예제 #18
0
int ngx_shmap_get_int64_and_clear(ngx_shm_zone_t* zone, ngx_str_t* key, int64_t* i)
{
	uint8_t value_type = VT_NULL;
	ngx_str_t data = ngx_null_string;
	int ret = ngx_shmap_get(zone, key, &data, &value_type,NULL,NULL);
	if(ret == 0){
		if(value_type != VT_INT64){
			ret = -1;
			//NLOG_ERROR("ngx_shmap_get_int64(key=%V) return invalid value_type=%d",key, value_type);
		}else{
			int64_t* p = (int64_t*)data.data;
			*i = __sync_fetch_and_and(p, 0);
		}
	}
	return ret;
}
예제 #19
0
// Send ack for any unusual signal received.
BOOL ExLocalServerSendAckIfNecessary(void *ctx) {
  Exchanger *ex = (Exchanger *)ctx;
  // Clear the flag.
  unsigned char flag = get_flag(ex);
  // If the flag is not zero before cleanup, we need to send an ack (so that the client know we have received it).
  BOOL clean_flag = FALSE;
  BOOL send_ack = TRUE;
  if (flag != 0) {
    if (flag & (1 << SIG_RESTART)) {
      // Clean up the message queues.
      int num_discarded = 0;
      MBoard mboard;
      while (PipeRead(&ex->channels[PIPE_BOARD], ARG(mboard)) == 0) num_discarded ++;
      printf("#Board Discarded = %d\n", num_discarded);
      clean_flag = TRUE;
    } else if (flag & (1 << SIG_FINISHSOON)) {
      clean_flag = TRUE;
      // Do not need to send ack for FINISHSOON (No one is going to receive it).
      send_ack = FALSE;
    }
  }

  if (clean_flag) {
    printf("Summary: Board received = %d, Move sent = %d\n", ex->board_received, ex->move_sent);
    ex->board_received = 0;
    ex->move_sent = 0;

    // All states are resumed, then we clear the flag. (If we clear the flag before that, sendmove and receiveboard might run before the stats are reset).
    __sync_fetch_and_and(&ex->ctrl_flag, 0);

    // Send message.
    if (send_ack) {
      MCtrl mctrl;
      mctrl.code = SIG_ACK;
      while (! ex->done) {
        if (PipeWrite(&ex->channels[PIPE_S2C], ARG(mctrl)) == 0) {
          printf("Ack sent with previous flag = %d\n", flag);

          // Sent.
          return TRUE;
        }
      }
    }
  }
  // Not sent.
  return FALSE;
}
예제 #20
0
파일: owned.hpp 프로젝트: Adyoulike/mesos
T* Owned<T>::release()
{
  if (data.get() == NULL) {
    // The ownership of this pointer has already been lost.
    return NULL;
  }

  // Atomically set the pointer 'data->t' to NULL.
  T* t = __sync_fetch_and_and(&data->t, NULL);
  if (t == NULL) {
    // The ownership of this pointer has already been lost.
    return NULL;
  }

  data.reset();
  return t;
}
/* Now check return values.  */
static void
do_ret_di (void)
{
  if (__sync_val_compare_and_swap (AL+0, 0x100000002ll, 0x1234567890ll) !=
	0x100000002ll) abort ();
  if (__sync_bool_compare_and_swap (AL+1, 0x200000003ll, 0x1234567890ll) !=
	1) abort ();
  if (__sync_lock_test_and_set (AL+2, 1) != 0) abort ();
  __sync_lock_release (AL+3); /* no return value, but keep to match results.  */

  /* The following tests should not change the value since the
     original does NOT match.  */
  if (__sync_val_compare_and_swap (AL+4, 0x000000002ll, 0x1234567890ll) !=
	0x100000002ll) abort ();
  if (__sync_val_compare_and_swap (AL+5, 0x100000000ll, 0x1234567890ll) !=
	0x100000002ll) abort ();
  if (__sync_bool_compare_and_swap (AL+6, 0x000000002ll, 0x1234567890ll) !=
	0) abort ();
  if (__sync_bool_compare_and_swap (AL+7, 0x100000000ll, 0x1234567890ll) !=
	0) abort ();

  if (__sync_fetch_and_add (AL+8, 1) != 0) abort ();
  if (__sync_fetch_and_add (AL+9, 0xb000e0000000ll) != 0x1000e0de0000ll) abort ();
  if (__sync_fetch_and_sub (AL+10, 22) != 42) abort ();
  if (__sync_fetch_and_sub (AL+11, 0xb000e0000000ll) != 0xc001c0de0000ll)
	abort ();

  if (__sync_fetch_and_and (AL+12, 0x300000007ll) != -1ll) abort ();
  if (__sync_fetch_and_or (AL+13, 0x500000009ll) != 0) abort ();
  if (__sync_fetch_and_xor (AL+14, 0xe00000001ll) != 0xff00ff0000ll) abort ();
  if (__sync_fetch_and_nand (AL+15, 0xa00000007ll) != -1ll) abort ();

  /* These should be the same as the fetch_and_* cases except for
     return value.  */
  if (__sync_add_and_fetch (AL+16, 1) != 1) abort ();
  if (__sync_add_and_fetch (AL+17, 0xb000e0000000ll) != 0xc001c0de0000ll)
	abort ();
  if (__sync_sub_and_fetch (AL+18, 22) != 20) abort ();
  if (__sync_sub_and_fetch (AL+19, 0xb000e0000000ll) != 0x1000e0de0000ll)
	abort ();

  if (__sync_and_and_fetch (AL+20, 0x300000007ll) != 0x300000007ll) abort ();
  if (__sync_or_and_fetch (AL+21, 0x500000009ll) != 0x500000009ll) abort ();
  if (__sync_xor_and_fetch (AL+22, 0xe00000001ll) != 0xf100ff0001ll) abort ();
  if (__sync_nand_and_fetch (AL+23, 0xa00000007ll) != ~0xa00000007ll) abort ();
}
예제 #22
0
static void
do_noret_di (void)
{
  __sync_val_compare_and_swap(AL+0, 0, 1);
  __sync_bool_compare_and_swap(AL+1, 0, 1);
  __sync_lock_test_and_set(AL+2, 1);
  __sync_lock_release(AL+3);

  __sync_fetch_and_add(AL+4, 1);
  __sync_fetch_and_add(AL+5, 4);
  __sync_fetch_and_add(AL+6, 22);
  __sync_fetch_and_sub(AL+7, 12);
  __sync_fetch_and_and(AL+8, 7);
  __sync_fetch_and_or(AL+9, 8);
  __sync_fetch_and_xor(AL+10, 9);
  __sync_fetch_and_nand(AL+11, 7);
}
예제 #23
0
static void
do_di (void)
{
  if (__sync_val_compare_and_swap(AL+0, 0, 1) != 0)
    abort ();
  if (__sync_val_compare_and_swap(AL+0, 0, 1) != 1)
    abort ();
  if (__sync_bool_compare_and_swap(AL+1, 0, 1) != 1)
    abort ();
  if (__sync_bool_compare_and_swap(AL+1, 0, 1) != 0)
    abort ();

  if (__sync_lock_test_and_set(AL+2, 1) != 0)
    abort ();

  if (__sync_fetch_and_add(AL+4, 1) != 0)
    abort ();
  if (__sync_fetch_and_add(AL+5, 4) != 0)
    abort ();
  if (__sync_fetch_and_add(AL+6, 22) != 0)
    abort ();
  if (__sync_fetch_and_sub(AL+7, 12) != 0)
    abort ();
  if (__sync_fetch_and_and(AL+8, 7) != -1)
    abort ();
  if (__sync_fetch_and_or(AL+9, 8) != 0)
    abort ();
  if (__sync_fetch_and_xor(AL+10, 9) != 0)
    abort ();
  if (__sync_fetch_and_nand(AL+11, 7) != 0)
    abort ();

  if (__sync_add_and_fetch(AL+12, 1) != 1)
    abort ();
  if (__sync_sub_and_fetch(AL+13, 12) != -12)
    abort ();
  if (__sync_and_and_fetch(AL+14, 7) != 7)
    abort ();
  if (__sync_or_and_fetch(AL+15, 8) != 8)
    abort ();
  if (__sync_xor_and_fetch(AL+16, 9) != 9)
    abort ();
  if (__sync_nand_and_fetch(AL+17, 7) != 7)
    abort ();
}
예제 #24
0
파일: algs.hpp 프로젝트: hlitz/rstm_sitevm
  /*** unset a bit */
  inline void rrec_t::unsetbit(unsigned slot)
  {
      uint32_t bucket = slot / BITS;
      uintptr_t mask = 1lu<<(slot % BITS);
      uintptr_t unmask = ~mask;
      uintptr_t oldval = bits[bucket];
      if (!(oldval & mask))
          return;
      // NB:  this GCC-specific code
#if defined(STM_CPU_X86) && defined(STM_CC_GCC)
      __sync_fetch_and_and(&bits[bucket], unmask);
#else
      while (true) {
          if (bcasptr(&bits[bucket], oldval, (oldval & unmask)))
              return;
          oldval = bits[bucket];
      }
#endif
  }
예제 #25
0
파일: putBit.hpp 프로젝트: gt1/libmaus2
		inline void putBit4Sync(iterator A, uint64_t const offset, uint32_t v)
		{
			static const uint32_t maskone[] = {
				~(static_cast<uint32_t>(1)<<31), ~(static_cast<uint32_t>(1)<<30), ~(static_cast<uint32_t>(1)<<29), ~(static_cast<uint32_t>(1)<<28), ~(static_cast<uint32_t>(1)<<27), ~(static_cast<uint32_t>(1)<<26), ~(static_cast<uint32_t>(1)<<25), ~(static_cast<uint32_t>(1)<<24),
				~(static_cast<uint32_t>(1)<<23), ~(static_cast<uint32_t>(1)<<22), ~(static_cast<uint32_t>(1)<<21), ~(static_cast<uint32_t>(1)<<20), ~(static_cast<uint32_t>(1)<<19), ~(static_cast<uint32_t>(1)<<18), ~(static_cast<uint32_t>(1)<<17), ~(static_cast<uint32_t>(1)<<16),
				~(static_cast<uint32_t>(1)<<15), ~(static_cast<uint32_t>(1)<<14), ~(static_cast<uint32_t>(1)<<13), ~(static_cast<uint32_t>(1)<<12), ~(static_cast<uint32_t>(1)<<11), ~(static_cast<uint32_t>(1)<<10), ~(static_cast<uint32_t>(1)<<9), ~(static_cast<uint32_t>(1)<<8),
				~(static_cast<uint32_t>(1)<<7), ~(static_cast<uint32_t>(1)<<6), ~(static_cast<uint32_t>(1)<<5), ~(static_cast<uint32_t>(1)<<4), ~(static_cast<uint32_t>(1)<<3), ~(static_cast<uint32_t>(1)<<2), ~(static_cast<uint32_t>(1)<<1), ~(static_cast<uint32_t>(1)<<0)
			};
			static const uint32_t insone[] = {
				0u, (static_cast<uint32_t>(1)<<31), 0u, (static_cast<uint32_t>(1)<<30), 0u, (static_cast<uint32_t>(1)<<29), 0u, (static_cast<uint32_t>(1)<<28), 0u, (static_cast<uint32_t>(1)<<27), 0u, (static_cast<uint32_t>(1)<<26), 0u, (static_cast<uint32_t>(1)<<25), 0u, (static_cast<uint32_t>(1)<<24),
				0u, (static_cast<uint32_t>(1)<<23), 0u, (static_cast<uint32_t>(1)<<22), 0u, (static_cast<uint32_t>(1)<<21), 0u, (static_cast<uint32_t>(1)<<20), 0u, (static_cast<uint32_t>(1)<<19), 0u, (static_cast<uint32_t>(1)<<18), 0u, (static_cast<uint32_t>(1)<<17), 0u, (static_cast<uint32_t>(1)<<16),
				0u, (static_cast<uint32_t>(1)<<15), 0u, (static_cast<uint32_t>(1)<<14), 0u, (static_cast<uint32_t>(1)<<13), 0u, (static_cast<uint32_t>(1)<<12), 0u, (static_cast<uint32_t>(1)<<11), 0u, (static_cast<uint32_t>(1)<<10), 0u, (static_cast<uint32_t>(1)<<9), 0u, (static_cast<uint32_t>(1)<<8),
				0u, (static_cast<uint32_t>(1)<<7),0u,(static_cast<uint32_t>(1)<<6),0u,(static_cast<uint32_t>(1)<<5),0u,(static_cast<uint32_t>(1)<<4),0u,(static_cast<uint32_t>(1)<<3),0u,(static_cast<uint32_t>(1)<<2),0u,(static_cast<uint32_t>(1)<<1),0u,(static_cast<uint32_t>(1)<<0)
			};
			uint64_t const wordoffset = offset>>5;
			uint64_t const bitoffset = offset&0x1fu;
			__sync_fetch_and_and(A+wordoffset,maskone[bitoffset]);
			__sync_fetch_and_or (A+wordoffset,insone[(bitoffset<<1)|v]);
		}
예제 #26
0
파일: putBit.hpp 프로젝트: gt1/libmaus2
		inline bool eraseBit2Sync(iterator A, uint64_t const offset)
		{
			static const uint16_t setone[] = {
				static_cast<uint16_t>(1ull<<15),
				static_cast<uint16_t>(1ull<<14),
				static_cast<uint16_t>(1ull<<13),
				static_cast<uint16_t>(1ull<<12),
				static_cast<uint16_t>(1ull<<11),
				static_cast<uint16_t>(1ull<<10),
				static_cast<uint16_t>(1ull<<9),
				static_cast<uint16_t>(1ull<<8),
				static_cast<uint16_t>(1ull<<7),
				static_cast<uint16_t>(1ull<<6),
				static_cast<uint16_t>(1ull<<5),
				static_cast<uint16_t>(1ull<<4),
				static_cast<uint16_t>(1ull<<3),
				static_cast<uint16_t>(1ull<<2),
				static_cast<uint16_t>(1ull<<1),
				static_cast<uint16_t>(1ull<<0)
			};
			static const uint16_t maskone[] = {
				static_cast<uint16_t>(~(1ull<<15)),
				static_cast<uint16_t>(~(1ull<<14)),
				static_cast<uint16_t>(~(1ull<<13)),
				static_cast<uint16_t>(~(1ull<<12)),
				static_cast<uint16_t>(~(1ull<<11)),
				static_cast<uint16_t>(~(1ull<<10)),
				static_cast<uint16_t>(~(1ull<<9)),
				static_cast<uint16_t>(~(1ull<<8)),
				static_cast<uint16_t>(~(1ull<<7)),
				static_cast<uint16_t>(~(1ull<<6)),
				static_cast<uint16_t>(~(1ull<<5)),
				static_cast<uint16_t>(~(1ull<<4)),
				static_cast<uint16_t>(~(1ull<<3)),
				static_cast<uint16_t>(~(1ull<<2)),
				static_cast<uint16_t>(~(1ull<<1)),
				static_cast<uint16_t>(~(1ull<<0))
			};
			uint64_t const wordoffset = offset>>4;
			uint64_t const bitoffset = offset&0xFu;
			return __sync_fetch_and_and (A+wordoffset,maskone[bitoffset]) & setone[bitoffset];
		}
예제 #27
0
static void vhost_dev_sync_region(struct vhost_dev *dev,
                                  MemoryRegionSection *section,
                                  uint64_t mfirst, uint64_t mlast,
                                  uint64_t rfirst, uint64_t rlast)
{
    uint64_t start = MAX(mfirst, rfirst);
    uint64_t end = MIN(mlast, rlast);
    vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK;
    vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1;
    uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;

    if (end < start) {
        return;
    }
    assert(end / VHOST_LOG_CHUNK < dev->log_size);
    assert(start / VHOST_LOG_CHUNK < dev->log_size);

    for (;from < to; ++from) {
        vhost_log_chunk_t log;
        int bit;
        /* We first check with non-atomic: much cheaper,
         * and we expect non-dirty to be the common case. */
        if (!*from) {
            addr += VHOST_LOG_CHUNK;
            continue;
        }
        /* Data must be read atomically. We don't really
         * need the barrier semantics of __sync
         * builtins, but it's easier to use them than
         * roll our own. */
        log = __sync_fetch_and_and(from, 0);
        while ((bit = sizeof(log) > sizeof(int) ?
                ffsll(log) : ffs(log))) {
            ram_addr_t ram_addr;
            bit -= 1;
            ram_addr = section->offset_within_region + bit * VHOST_LOG_PAGE;
            memory_region_set_dirty(section->mr, ram_addr, VHOST_LOG_PAGE);
            log &= ~(0x1ull << bit);
        }
        addr += VHOST_LOG_CHUNK;
    }
}
예제 #28
0
파일: pcm.c 프로젝트: 33d/linux-2.6.21-hh20
int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels)
{
	unsigned long flags_old =
	    __sync_fetch_and_and(&line6pcm->flags, ~channels);
	unsigned long flags_new = flags_old & ~channels;

	if (test_flags(flags_new, flags_old, MASK_CAPTURE)) {
		line6_unlink_audio_in_urbs(line6pcm);

		if (!(flags_old & MASK_PCM_ALSA_CAPTURE))
			line6_free_capture_buffer(line6pcm);
	}

	if (test_flags(flags_new, flags_old, MASK_PLAYBACK)) {
		line6_unlink_audio_out_urbs(line6pcm);

		if (!(flags_old & MASK_PCM_ALSA_PLAYBACK))
			line6_free_playback_buffer(line6pcm);
	}

	return 0;
}
예제 #29
0
/* Release the timeout.
 * Returns 0 if the timeout had not fired yet, 1 if it had.
*/
int timeout_unset(const int timeout)
{
    if (timeout >= 0 && timeout < TIMEOUTS) {
        /* Only keep TIMEOUT_PASSED for the specified timer. */
        const int  state = __sync_fetch_and_and(&timeout_state[timeout], TIMEOUT_PASSED);

        /* Invalid timeout? */
        if (!(state & TIMEOUT_USED))
            return -1;

        /* Not armed? */
        if (!(state & TIMEOUT_ARMED))
            return -1;

        /* Return 1 if passed, 0 otherwise. */
        return (state & TIMEOUT_PASSED) ? 1 : 0;

    } else {
        /* Invalid timeout number. */
        return -1;
    }
}
예제 #30
0
파일: packet.c 프로젝트: nomnom100/pom-ng
int packet_release(struct packet *p) {

    // Release the multipart
    struct packet_multipart *multipart = __sync_fetch_and_and(&p->multipart, 0);
    if (multipart && packet_multipart_cleanup(multipart) != POM_OK)
        return POM_ERR;

    // The packet refcount will be 0 afterwards
    // We can clean up the buffer if any
    if (p->refcount > 1) {
        __sync_fetch_and_sub(&p->refcount, 1);
        return POM_OK;
    }

    if (p->pkt_buff)
        packet_buffer_release(p->pkt_buff);

    registry_perf_dec(perf_pkt_in_use, 1);
    free(p);

    return POM_OK;
}