Beispiel #1
0
/**
 * Handles any signals sent to the scheduler that are not SIGCHLD.
 *
 * Currently Handles:
 *   SIGCHLD: scheduler will handle to death of the child process or agent
 *   SIGALRM: scheduler will run agent updates and database updates
 *   SIGTERM: scheduler will gracefully shut down
 *   SIGQUIT: scheduler will forcefully shut down
 *   SIGHIP:  scheduler will reload configuration data
 *
 * @param signo  the number of the signal that was sent
 */
void scheduler_sig_handle(int signo)
{
  /* Anywhere you see a "#if __GNUC__" the code is checking if gcc is the
   * compiler. This is because the __sync... set of functions are the gcc
   * version of atomics.
   *
   * This means that if you aren't compiling with gcc, you can have a race
   * condition that results in a signal being lost during the
   * signal_scheduler() function.
   *
   * What could happen:
   *   1. signal_scheduler() reads value of sigmask
   *   2. scheduler receives a SIG**** and sets the correct bit in sigmask
   *   3. signal_scheduler() clears sigmask by setting it to 0
   *
   * In this set of events, a signal has been lost. If this is a sigchld this
   * could be very bad as a job could never get marked as finsihed.
   */
  switch(signo)
  {
#if __GNUC__
    case SIGCHLD: __sync_fetch_and_or(&sigmask, MASK_SIGCHLD); break;
    case SIGTERM: __sync_fetch_and_or(&sigmask, MASK_SIGTERM); break;
    case SIGQUIT: __sync_fetch_and_or(&sigmask, MASK_SIGQUIT); break;
    case SIGHUP:  __sync_fetch_and_or(&sigmask, MASK_SIGHUP);  break;
#else
    case SIGCHLD: sigmask |= MASK_SIGCHLD; break;
    case SIGALRM: sigmask |= MASK_SIGALRM; break;
    case SIGTERM: sigmask |= MASK_SIGTERM; break;
    case SIGQUIT: sigmask |= MASK_SIGQUIT; break;
    case SIGHUP:  sigmask |= MASK_SIGHUP ; break;
#endif
  }
}
Beispiel #2
0
/* This can return, if you failed to yield due to a concurrent event.  Note
 * we're atomicly setting the CAN_RCV flag, and aren't bothering with CASing
 * (either with the kernel or uthread's handle_indirs()).  We don't particularly
 * care what other code does - we intend to set those flags no matter what. */
void vcore_yield(bool preempt_pending)
{
    unsigned long old_nr;
    uint32_t vcoreid = vcore_id();
    struct preempt_data *vcpd = vcpd_of(vcoreid);
    __sync_fetch_and_and(&vcpd->flags, ~VC_CAN_RCV_MSG);
    /* no wrmb() necessary, handle_events() has an mb() if it is checking */
    /* Clears notif pending and tries to handle events.  This is an optimization
     * to avoid the yield syscall if we have an event pending.  If there is one,
     * we want to unwind and return to the 2LS loop, where we may not want to
     * yield anymore.
     * Note that the kernel only cares about CAN_RCV_MSG for the desired vcore,
     * not for a FALLBACK.  */
    if (handle_events(vcoreid)) {
        __sync_fetch_and_or(&vcpd->flags, VC_CAN_RCV_MSG);
        return;
    }
    /* If we are yielding since we don't want the core, tell the kernel we want
     * one less vcore (vc_yield assumes a dumb 2LS).
     *
     * If yield fails (slight race), we may end up having more vcores than
     * amt_wanted for a while, and might lose one later on (after a
     * preempt/timeslicing) - the 2LS will have to notice eventually if it
     * actually needs more vcores (which it already needs to do).  amt_wanted
     * could even be 0.
     *
     * In general, any time userspace decrements or sets to 0, it could get
     * preempted, so the kernel will still give us at least one, until the last
     * vcore properly yields without missing a message (and becomes a WAITING
     * proc, which the ksched will not give cores to).
     *
     * I think it's possible for userspace to do this (lock, read amt_wanted,
     * check all message queues for all vcores, subtract amt_wanted (not set to
     * 0), unlock) so long as every event handler +1s the amt wanted, but that's
     * a huge pain, and we already have event handling code making sure a
     * process can't sleep (transition to WAITING) if a message arrives (can't
     * yield if notif_pending, can't go WAITING without yielding, and the event
     * posting the notif_pending will find the online VC or be delayed by
     * spinlock til the proc is WAITING). */
    if (!preempt_pending) {
        do {
            old_nr = __procdata.res_req[RES_CORES].amt_wanted;
            if (old_nr == 0)
                break;
        } while (!__sync_bool_compare_and_swap(
                     &__procdata.res_req[RES_CORES].amt_wanted,
                     old_nr, old_nr - 1));
    }
    /* We can probably yield.  This may pop back up if notif_pending became set
     * by the kernel after we cleared it and we lost the race. */
    sys_yield(preempt_pending);
    __sync_fetch_and_or(&vcpd->flags, VC_CAN_RCV_MSG);
}
Beispiel #3
0
static void
do_hi (void)
{
  if (__sync_fetch_and_add(AL+4, 1) != 0)
    abort ();
  if (__sync_fetch_and_add(AL+5, 4) != 0)
    abort ();
  if (__sync_fetch_and_add(AL+6, 22) != 0)
    abort ();
  if (__sync_fetch_and_sub(AL+7, 12) != 0)
    abort ();
  if (__sync_fetch_and_and(AL+8, 7) != -1)
    abort ();
  if (__sync_fetch_and_or(AL+9, 8) != 0)
    abort ();
  if (__sync_fetch_and_xor(AL+10, 9) != 0)
    abort ();
  if (__sync_fetch_and_nand(AL+11, 7) != -1)
    abort ();

  if (__sync_add_and_fetch(AL+12, 1) != 1)
    abort ();
  if (__sync_sub_and_fetch(AL+13, 12) != -12)
    abort ();
  if (__sync_and_and_fetch(AL+14, 7) != 7)
    abort ();
  if (__sync_or_and_fetch(AL+15, 8) != 8)
    abort ();
  if (__sync_xor_and_fetch(AL+16, 9) != 9)
    abort ();
  if (__sync_nand_and_fetch(AL+17, 7) != ~7)
    abort ();
}
Beispiel #4
0
static void
do_qi (void)
{
  if (__sync_fetch_and_add(AI+4, 1) != 0)
    abort ();
  if (__sync_fetch_and_add(AI+5, 4) != 0)
    abort ();
  if (__sync_fetch_and_add(AI+6, 22) != 0)
    abort ();
  if (__sync_fetch_and_sub(AI+7, 12) != 0)
    abort ();
  if (__sync_fetch_and_and(AI+8, 7) != (char)-1)
    abort ();
  if (__sync_fetch_and_or(AI+9, 8) != 0)
    abort ();
  if (__sync_fetch_and_xor(AI+10, 9) != 0)
    abort ();
  if (__sync_fetch_and_nand(AI+11, 7) != (char)-1)
    abort ();

  if (__sync_add_and_fetch(AI+12, 1) != 1)
    abort ();
  if (__sync_sub_and_fetch(AI+13, 12) != (char)-12)
    abort ();
  if (__sync_and_and_fetch(AI+14, 7) != 7)
    abort ();
  if (__sync_or_and_fetch(AI+15, 8) != 8)
    abort ();
  if (__sync_xor_and_fetch(AI+16, 9) != 9)
    abort ();
  if (__sync_nand_and_fetch(AI+17, 7) != (char)~7)
    abort ();
}
Beispiel #5
0
 //! Atomically sets the bit at position b to true returning the old value
 inline bool set_bit(size_t b) {
   // use CAS to set the bit
   size_t arrpos, bitpos;
   bit_to_pos(b, arrpos, bitpos);
   const size_t mask(size_t(1) << size_t(bitpos)); 
   return __sync_fetch_and_or(array + arrpos, mask) & mask;
 }
Beispiel #6
0
//Free a cb_vector_array struct without freeing its data
void cb_vector_array_free_no_data(struct cb_vector_array *array){
    //Wait until no thread is accessing the array
    while (__sync_fetch_and_or(&(array->th), 0) > 0) {}

    free(array->data);
    free(array);
}
Beispiel #7
0
int proto_expectation_add(struct proto_expectation *e) {

	if (!e || !e->tail || !e->tail->proto) {
		pomlog(POMLOG_ERR "Cannot add expectation as it's incomplete");
		return POM_ERR;
	}

	if (e->flags & PROTO_EXPECTATION_FLAG_QUEUED)
		return POM_ERR;

	struct proto *proto = e->tail->proto;
	pom_rwlock_wlock(&proto->expectation_lock);

	__sync_fetch_and_or(&e->flags, PROTO_EXPECTATION_FLAG_QUEUED);

	e->next = proto->expectations;
	if (e->next)
		e->next->prev = e;

	proto->expectations = e;

	pom_rwlock_unlock(&proto->expectation_lock);

	registry_perf_inc(e->proto->perf_expt_pending, 1);

	return POM_OK;
}
Beispiel #8
0
bool anscheduler_socket_connect(socket_desc_t * socket, task_t * task) {
  if (__sync_fetch_and_or(&socket->socket->hasBeenConnected, 1)) {
    return false;
  }
  
  // generate another link
  socket_desc_t * link = _create_descriptor(socket->socket, task, false);
  if (!link) {
    return false;
  }
  
  anscheduler_task_dereference(task);
  anscheduler_socket_dereference(link);
  
  socket_msg_t * msg = anscheduler_alloc(sizeof(socket_msg_t));
  if (!msg) {
    anscheduler_abort("failed to allocate connect message");
  }
  msg->type = ANSCHEDULER_MSG_TYPE_CONNECT;
  msg->len = 0;
  
  // if we couldn't send the message, we'd need to release our resources
  if (!anscheduler_socket_msg(socket, msg)) {
    anscheduler_abort("failed to send connect message");
  }
  return true;
}
Beispiel #9
0
int main(int argc, const char** args)
{
  static int completed = 0;
  static uint32_t job = 0;
  static const int TASKS = 8 * sizeof(job);
  
  // schedule tasks
  for (int i = 0; i < TASKS; i++)
  SMP::add_task(
  [i] {
    // the job
    __sync_fetch_and_or(&job, 1 << i);
  }, 
  [i] {
    // job completion
    completed++;
    
    if (completed == TASKS) {
      printf("All jobs are done now, compl = %d\n", completed);
      printf("bits = %#x\n", job);
      assert(job = 0xffffffff && "All 32 bits must be set");
    }
  });
  // start working on tasks
  SMP::start();
  
  printf("*** %s started *** \n", args[0]);
  return 0;
}
Beispiel #10
0
int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels)
{
	unsigned long flags_old =
	    __sync_fetch_and_or(&line6pcm->flags, channels);
	unsigned long flags_new = flags_old | channels;
	int err = 0;
	
	line6pcm->prev_fbuf = NULL;

	if (test_flags(flags_old, flags_new, MASK_CAPTURE)) {
		/*
		   Waiting for completion of active URBs in the stop handler is
		   a bug, we therefore report an error if capturing is restarted
		   too soon.
		 */
		if (line6pcm->active_urb_in | line6pcm->unlink_urb_in)
			return -EBUSY;

		if (!(flags_new & MASK_PCM_ALSA_CAPTURE)) {
			err = line6_alloc_capture_buffer(line6pcm);

			if (err < 0)
				goto pcm_start_error;
		}

		line6pcm->count_in = 0;
		line6pcm->prev_fsize = 0;
		err = line6_submit_audio_in_all_urbs(line6pcm);

		if (err < 0)
			goto pcm_start_error;
	}

	if (test_flags(flags_old, flags_new, MASK_PLAYBACK)) {
		/*
		   See comment above regarding PCM restart.
		 */
		if (line6pcm->active_urb_out | line6pcm->unlink_urb_out)
			return -EBUSY;

		if (!(flags_new & MASK_PCM_ALSA_PLAYBACK)) {
			err = line6_alloc_playback_buffer(line6pcm);

			if (err < 0)
				goto pcm_start_error;
		}

		line6pcm->count_out = 0;
		err = line6_submit_audio_out_all_urbs(line6pcm);

		if (err < 0)
			goto pcm_start_error;
	}

	return 0;

pcm_start_error:
	__sync_fetch_and_and(&line6pcm->flags, ~channels);
	return err;
}
Beispiel #11
0
		inline void putBit8Sync(iterator A, uint64_t const offset, uint64_t v)
		{
			static const uint64_t maskone[] = {
				~(1ULL<<63), ~(1ULL<<62), ~(1ULL<<61), ~(1ULL<<60), ~(1ULL<<59), ~(1ULL<<58), ~(1ULL<<57), ~(1ULL<<56),
				~(1ULL<<55), ~(1ULL<<54), ~(1ULL<<53), ~(1ULL<<52), ~(1ULL<<51), ~(1ULL<<50), ~(1ULL<<49), ~(1ULL<<48),
				~(1ULL<<47), ~(1ULL<<46), ~(1ULL<<45), ~(1ULL<<44), ~(1ULL<<43), ~(1ULL<<42), ~(1ULL<<41), ~(1ULL<<40),
				~(1ULL<<39), ~(1ULL<<38), ~(1ULL<<37), ~(1ULL<<36), ~(1ULL<<35), ~(1ULL<<34), ~(1ULL<<33), ~(1ULL<<32),
				~(1ULL<<31), ~(1ULL<<30), ~(1ULL<<29), ~(1ULL<<28), ~(1ULL<<27), ~(1ULL<<26), ~(1ULL<<25), ~(1ULL<<24),
				~(1ULL<<23), ~(1ULL<<22), ~(1ULL<<21), ~(1ULL<<20), ~(1ULL<<19), ~(1ULL<<18), ~(1ULL<<17), ~(1ULL<<16),
				~(1ULL<<15), ~(1ULL<<14), ~(1ULL<<13), ~(1ULL<<12), ~(1ULL<<11), ~(1ULL<<10), ~(1ULL<<9), ~(1ULL<<8),
				~(1ULL<<7), ~(1ULL<<6), ~(1ULL<<5), ~(1ULL<<4), ~(1ULL<<3), ~(1ULL<<2), ~(1ULL<<1), ~(1ULL<<0)
			};
			static const uint64_t insone[] = {
				0ULL, (1ULL<<63), 0ULL, (1ULL<<62), 0ULL, (1ULL<<61), 0ULL, (1ULL<<60), 0ULL, (1ULL<<59), 0ULL, (1ULL<<58), 0ULL, (1ULL<<57), 0ULL, (1ULL<<56),
				0ULL, (1ULL<<55), 0ULL, (1ULL<<54), 0ULL, (1ULL<<53), 0ULL, (1ULL<<52), 0ULL, (1ULL<<51), 0ULL, (1ULL<<50), 0ULL, (1ULL<<49), 0ULL, (1ULL<<48),
				0ULL, (1ULL<<47), 0ULL, (1ULL<<46), 0ULL, (1ULL<<45), 0ULL, (1ULL<<44), 0ULL, (1ULL<<43), 0ULL, (1ULL<<42), 0ULL, (1ULL<<41), 0ULL, (1ULL<<40),
				0ULL, (1ULL<<39), 0ULL, (1ULL<<38), 0ULL, (1ULL<<37), 0ULL, (1ULL<<36), 0ULL, (1ULL<<35), 0ULL, (1ULL<<34), 0ULL, (1ULL<<33), 0ULL, (1ULL<<32),
				0ULL, (1ULL<<31), 0ULL, (1ULL<<30), 0ULL, (1ULL<<29), 0ULL, (1ULL<<28), 0ULL, (1ULL<<27), 0ULL, (1ULL<<26), 0ULL, (1ULL<<25), 0ULL, (1ULL<<24),
				0ULL, (1ULL<<23), 0ULL, (1ULL<<22), 0ULL, (1ULL<<21), 0ULL, (1ULL<<20), 0ULL, (1ULL<<19), 0ULL, (1ULL<<18), 0ULL, (1ULL<<17), 0ULL, (1ULL<<16),
				0ULL, (1ULL<<15), 0ULL, (1ULL<<14), 0ULL, (1ULL<<13), 0ULL, (1ULL<<12), 0ULL, (1ULL<<11), 0ULL, (1ULL<<10), 0ULL, (1ULL<<9), 0ULL, (1ULL<<8),
				0ULL, 1ULL<<7,0ULL,1ULL<<6,0ULL,1ULL<<5,0ULL,1ULL<<4,0ULL,1ULL<<3,0ULL,1ULL<<2,0ULL,1ULL<<1,0ULL,1ULL<<0,0ULL
			};

			uint64_t const wordoffset = offset>>6;
			uint64_t const bitoffset = offset&0x3fu;

			__sync_fetch_and_and(A+wordoffset,maskone[bitoffset]);
			__sync_fetch_and_or (A+wordoffset,insone[(bitoffset<<1)|v]);
		}
Beispiel #12
0
static inline void arch_perfAddBranch(uint64_t from, uint64_t to)
{
    /*
     * Kernel sometimes reports branches from the kernel (iret), we are not interested in that as it
     * makes the whole concept of unique branch counting less predictable
     */
    if (__builtin_expect(from > 0xFFFFFFFF00000000, false)
        || __builtin_expect(to > 0xFFFFFFFF00000000, false)) {
        LOG_D("Adding branch %#018" PRIx64 " - %#018" PRIx64, from, to);
        return;
    }
    if (from >= perfCutOffAddr || to >= perfCutOffAddr) {
        return;
    }

    register size_t pos = 0ULL;
    if (perfDynamicMethod == _HF_DYNFILE_UNIQUE_BLOCK_COUNT) {
        pos = from % (_HF_PERF_BLOOM_SZ * 8);
    } else if (perfDynamicMethod == _HF_DYNFILE_UNIQUE_EDGE_COUNT) {
        pos = (from * to) % (_HF_PERF_BLOOM_SZ * 8);
    }

    size_t byteOff = pos / 8;
    uint8_t bitSet = (uint8_t) (1 << (pos % 8));

    register uint8_t prev = __sync_fetch_and_or(&perfBloom[byteOff], bitSet);
    if (!(prev & bitSet)) {
        perfBranchesCnt++;
    }
}
static long __syscall_dup(long fd, long cmd, unsigned long start_fd)
{
	int sysid;
	long dup_fd, dup_dfd, dfd;

	dfd = sclib_file_getid(&sclib_file, fd, &sysid);
	SCLIB_ERR_RET(dfd);
	dup_fd = sclib_file_add(&sclib_file, start_fd);
	SCLIB_VAL_RET(dup_fd, dup_dfd);

	__sync_fetch_and_or(&sclib_file.fds[fd].ectl_doms, 0xFF);

	switch (sysid)
	{
	default:
		dup_dfd = SCLIB_REMOTE_CALL(sysid, fcntl, 3, dfd, cmd, 0);
		break;

	case SYSCALL_SYSID_LOCAL:
		dup_dfd = SCLIB_LOCAL_CALL(fcntl, 3, dfd, cmd, 0);
		break;

	case SYSCALL_SYSID_ALL:
	{
		sclib_fd_t *aux, *dup_aux;
		size_t n, i;

		aux = sclib_file_aux(&sclib_file, fd);
		dup_aux = sclib_file_aux(&sclib_file, dup_fd);
		for (n = 0; n < SYSCALL_SYSIDS; n++) {
			dup_aux[n] = SCLIB_REMOTE_CALL(n, fcntl, 3, aux[n], cmd, 0);
			if (SCLIB_IS_ERR(dup_aux[n])) {
				dup_dfd = dup_aux[n];
				goto error_aux;
			}
		}
		dup_dfd = SCLIB_LOCAL_CALL(fcntl, 3, dfd, cmd, 0);
		if (SCLIB_IS_ERR(dup_dfd)) {
error_aux:
			for (i = 0; i < n; i++)
				SCLIB_REMOTE_CALL(i, close, 1, dup_aux[i]);
		}
		break;
	}
	}

	if (SCLIB_IS_ERR(dup_dfd)) {
		sclib_file_add_fail(&sclib_file, dup_fd);
	} else {
		uint8_t dup_dfd_flags = 0;
		if (cmd == F_DUPFD_CLOEXEC)
			dup_dfd_flags |= SCLIB_FD_EXEC;
		sclib_file_add_ok(&sclib_file, dup_fd, dup_dfd, sysid, dup_dfd_flags, 0xFF);
	}

error_val:
	sclib_file_put(&sclib_file, fd);	
	SCLIB_ERR_RET(dup_dfd);
	return dup_fd;
}
Beispiel #14
0
		inline void putBit2Sync(iterator A, uint64_t const offset, uint16_t v)
		{
			static const uint16_t maskone[] = {
				static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<15)), static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<14)), static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<13)),
				static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<12)), static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<11)), static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<10)),
				static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<9)),  static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<8)),  static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<7)),
				static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<6)),  static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<5)),  static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<4)),
				static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<3)),  static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<2)),  static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<1)),
				static_cast<uint16_t>(~(static_cast<uint16_t>(1)<<0))
			};
			static const uint16_t insone[] = {
				0u, (static_cast<uint16_t>(1)<<15), 0u, (static_cast<uint16_t>(1)<<14),
				0u, (static_cast<uint16_t>(1)<<13), 0u, (static_cast<uint16_t>(1)<<12),
				0u, (static_cast<uint16_t>(1)<<11), 0u, (static_cast<uint16_t>(1)<<10),
				0u, (static_cast<uint16_t>(1)<<9),  0u, (static_cast<uint16_t>(1)<<8),
				0u, (static_cast<uint16_t>(1)<<7), 0u, (static_cast<uint16_t>(1)<<6),
				0u, (static_cast<uint16_t>(1)<<5), 0u, (static_cast<uint16_t>(1)<<4),
				0u, (static_cast<uint16_t>(1)<<3), 0u, (static_cast<uint16_t>(1)<<2),
				0u, (static_cast<uint16_t>(1)<<1), 0u, (static_cast<uint16_t>(1)<<0)
			};
			uint64_t const wordoffset = offset>>4;
			uint64_t const bitoffset = offset&0xfu;
			__sync_fetch_and_and(A+wordoffset,maskone[bitoffset]);
			__sync_fetch_and_or (A+wordoffset,insone[(bitoffset<<1)|v]);
		}
bool VInterlocked::TestAndSet( sLONG *inValue, sLONG inBitNumber)
{
    xbox_assert( inBitNumber >= 0 && inBitNumber <= 31);

#if VERSIONMAC

    // works on byte ((char*)theAddress + (n>>3))
    // and operate on bit (0x80>>(n&7))
    #if SMALLENDIAN
    return ::OSAtomicTestAndSetBarrier( inBitNumber ^ 7, inValue);
    #else
    return ::OSAtomicTestAndSetBarrier( 31 - inBitNumber, inValue);
    #endif

#elif VERSION_LINUX

    uLONG mask=0x1<<inBitNumber;
    sLONG val=__sync_fetch_and_or(inValue, mask);
    
    return (val & mask ? true : false);

#elif VERSIONWIN

    // always work on a long
    return InterlockedBitTestAndSet( (LONG*) inValue, inBitNumber) != 0;

#endif
}
Beispiel #16
0
void bt_readlock(BtLatch *latch)
{
ushort prev;

  do {
	//	obtain latch mutex
#ifdef unix
	if( __sync_fetch_and_or((ushort *)latch, Mutex) & Mutex )
		continue;
#else
	if( prev = _InterlockedOr16((ushort *)latch, Mutex) & Mutex )
		continue;
#endif
	//  see if exclusive request is granted or pending

	if( prev = !(latch->exclusive | latch->pending) )
#ifdef unix
		__sync_fetch_and_add((ushort *)latch, Share);
#else
		_InterlockedExchangeAdd16 ((ushort *)latch, Share);
#endif

#ifdef unix
	__sync_fetch_and_and ((ushort *)latch, ~Mutex);
#else
	_InterlockedAnd16((ushort *)latch, ~Mutex);
#endif

	if( prev )
		return;
#ifdef  unix
  } while( sched_yield(), 1 );
#else
  } while( SwitchToThread(), 1 );
  // Insert key with given hashes
  unsigned int insert__(const uint64_t *hashes) {
    // Prefetch memory locations
    // This static_assert make clang++ happy...
    static_assert(std::is_pod<typename super::prefetch_info>::value, "prefetch_info must be a POD");

    typename super::prefetch_info pinfo[super::k_];
    const size_t base    = super::d_.remainder(hashes[0]);
    const size_t inc     = super::d_.remainder(hashes[1]);
    for(unsigned long i = 0; i < super::k_; ++i) {
      const size_t pos   = super::d_.remainder(base + i * inc);
      const size_t elt_i = pos / 8;
      pinfo[i].boff      = pos % 8;
      pinfo[i].pos       = super::data_ + elt_i;
      __builtin_prefetch(pinfo[i].pos, 1, 0);
    }

    // Check if element present
    bool present = true;
    for(unsigned long i = 0; i < super::k_; ++i) {
      const char mask = (char)1 << pinfo[i].boff;
      const char prev = __sync_fetch_and_or(pinfo[i].pos, mask);
      present         = present && (prev & mask);
    }

    return present;
  }
Beispiel #18
0
// Copy colour set bits from src to dst
void gpath_colset_or_mt(GPath *dst_gp, const GPath *src_gp, size_t ncols)
{
  uint8_t *dst = gpath_get_colset(dst_gp, ncols);
  const uint8_t *src = gpath_get_colset(src_gp, ncols);
  size_t i, nbytes = (ncols+7)/8;
  for(i = 0; i < nbytes; i++)
    __sync_fetch_and_or((volatile uint8_t*)&dst[i], src[i]);
}
Beispiel #19
0
void test_fetch_and_op (void)
{
  sc = __sync_fetch_and_add (&sc, 11);
  uc = __sync_fetch_and_add (&uc, 11);
  ss = __sync_fetch_and_add (&ss, 11);
  us = __sync_fetch_and_add (&us, 11);
  si = __sync_fetch_and_add (&si, 11);
  ui = __sync_fetch_and_add (&ui, 11);
  sll = __sync_fetch_and_add (&sll, 11);
  ull = __sync_fetch_and_add (&ull, 11);

  sc = __sync_fetch_and_sub (&sc, 11);
  uc = __sync_fetch_and_sub (&uc, 11);
  ss = __sync_fetch_and_sub (&ss, 11);
  us = __sync_fetch_and_sub (&us, 11);
  si = __sync_fetch_and_sub (&si, 11);
  ui = __sync_fetch_and_sub (&ui, 11);
  sll = __sync_fetch_and_sub (&sll, 11);
  ull = __sync_fetch_and_sub (&ull, 11);

  sc = __sync_fetch_and_or (&sc, 11);
  uc = __sync_fetch_and_or (&uc, 11);
  ss = __sync_fetch_and_or (&ss, 11);
  us = __sync_fetch_and_or (&us, 11);
  si = __sync_fetch_and_or (&si, 11);
  ui = __sync_fetch_and_or (&ui, 11);
  sll = __sync_fetch_and_or (&sll, 11);
  ull = __sync_fetch_and_or (&ull, 11);

  sc = __sync_fetch_and_xor (&sc, 11);
  uc = __sync_fetch_and_xor (&uc, 11);
  ss = __sync_fetch_and_xor (&ss, 11);
  us = __sync_fetch_and_xor (&us, 11);
  si = __sync_fetch_and_xor (&si, 11);
  ui = __sync_fetch_and_xor (&ui, 11);
  sll = __sync_fetch_and_xor (&sll, 11);
  ull = __sync_fetch_and_xor (&ull, 11);

  sc = __sync_fetch_and_and (&sc, 11);
  uc = __sync_fetch_and_and (&uc, 11);
  ss = __sync_fetch_and_and (&ss, 11);
  us = __sync_fetch_and_and (&us, 11);
  si = __sync_fetch_and_and (&si, 11);
  ui = __sync_fetch_and_and (&ui, 11);
  sll = __sync_fetch_and_and (&sll, 11);
  ull = __sync_fetch_and_and (&ull, 11);

  sc = __sync_fetch_and_nand (&sc, 11);
  uc = __sync_fetch_and_nand (&uc, 11);
  ss = __sync_fetch_and_nand (&ss, 11);
  us = __sync_fetch_and_nand (&us, 11);
  si = __sync_fetch_and_nand (&si, 11);
  ui = __sync_fetch_and_nand (&ui, 11);
  sll = __sync_fetch_and_nand (&sll, 11);
  ull = __sync_fetch_and_nand (&ull, 11);
}
Beispiel #20
0
static VALUE dispatcher_callback_enabled_set(VALUE self, VALUE enabled) {
    if (RTEST(enabled)) {
        __sync_fetch_and_or(&callback_enabled, 1);
    } else {
        __sync_fetch_and_and(&callback_enabled, 0);
    }
    return enabled;
}
Beispiel #21
0
bool FlvMicrotome::SetDataRate(uint32_t datarate)
{
    data_rate_ = datarate; 
    //flv_info_buf_status_ |= FLV_DATARATE_OK;
    __sync_fetch_and_or(&flv_info_buf_status_, FLV_DATARATE_OK);
    INFO("SetDataRate ok status is FLV_DATARATE_OK");
    return true;
}
Beispiel #22
0
void test_op_ignore (void)
{
  (void) __sync_fetch_and_add (&sc, 1);
  (void) __sync_fetch_and_add (&uc, 1);
  (void) __sync_fetch_and_add (&ss, 1);
  (void) __sync_fetch_and_add (&us, 1);
  (void) __sync_fetch_and_add (&si, 1);
  (void) __sync_fetch_and_add (&ui, 1);
  (void) __sync_fetch_and_add (&sll, 1);
  (void) __sync_fetch_and_add (&ull, 1);

  (void) __sync_fetch_and_sub (&sc, 1);
  (void) __sync_fetch_and_sub (&uc, 1);
  (void) __sync_fetch_and_sub (&ss, 1);
  (void) __sync_fetch_and_sub (&us, 1);
  (void) __sync_fetch_and_sub (&si, 1);
  (void) __sync_fetch_and_sub (&ui, 1);
  (void) __sync_fetch_and_sub (&sll, 1);
  (void) __sync_fetch_and_sub (&ull, 1);

  (void) __sync_fetch_and_or (&sc, 1);
  (void) __sync_fetch_and_or (&uc, 1);
  (void) __sync_fetch_and_or (&ss, 1);
  (void) __sync_fetch_and_or (&us, 1);
  (void) __sync_fetch_and_or (&si, 1);
  (void) __sync_fetch_and_or (&ui, 1);
  (void) __sync_fetch_and_or (&sll, 1);
  (void) __sync_fetch_and_or (&ull, 1);

  (void) __sync_fetch_and_xor (&sc, 1);
  (void) __sync_fetch_and_xor (&uc, 1);
  (void) __sync_fetch_and_xor (&ss, 1);
  (void) __sync_fetch_and_xor (&us, 1);
  (void) __sync_fetch_and_xor (&si, 1);
  (void) __sync_fetch_and_xor (&ui, 1);
  (void) __sync_fetch_and_xor (&sll, 1);
  (void) __sync_fetch_and_xor (&ull, 1);

  (void) __sync_fetch_and_and (&sc, 1);
  (void) __sync_fetch_and_and (&uc, 1);
  (void) __sync_fetch_and_and (&ss, 1);
  (void) __sync_fetch_and_and (&us, 1);
  (void) __sync_fetch_and_and (&si, 1);
  (void) __sync_fetch_and_and (&ui, 1);
  (void) __sync_fetch_and_and (&sll, 1);
  (void) __sync_fetch_and_and (&ull, 1);

  (void) __sync_fetch_and_nand (&sc, 1);
  (void) __sync_fetch_and_nand (&uc, 1);
  (void) __sync_fetch_and_nand (&ss, 1);
  (void) __sync_fetch_and_nand (&us, 1);
  (void) __sync_fetch_and_nand (&si, 1);
  (void) __sync_fetch_and_nand (&ui, 1);
  (void) __sync_fetch_and_nand (&sll, 1);
  (void) __sync_fetch_and_nand (&ull, 1);
}
Beispiel #23
0
//Free a cb_vector_array struct and its data
void cb_vector_array_free(struct cb_vector_array *array){
    //Wait until no thread is accessing the array
    while (__sync_fetch_and_or(&(array->th), 0) > 0) {}

    for (int i = 0; i < array->size; i++)
        free(array->data[i]);
    free(array->data);
    free(array);
}
Beispiel #24
0
  inline void set_union(Set<bitset> *A_in,Set<variant> *B_in){
    uint64_t* A = (uint64_t*)(A_in->data+sizeof(uint64_t));
    const uint64_t start_index = (A_in->number_of_bytes > 0) ? ((uint64_t*)A_in->data)[0]:0;

    B_in->foreach( [&A_in,&A,start_index] (uint32_t cur){
      const size_t word_index = bitset::word_index(cur);
      if(!(A[word_index-start_index] & ((uint64_t)1 << (cur % BITS_PER_WORD))))
        __sync_fetch_and_or(&A[word_index-start_index],((uint64_t) 1 << (cur % BITS_PER_WORD)));
    });
  }
Beispiel #25
0
static void do_slabs_free(void *ptr, const size_t size, unsigned int id) {
    slabclass_t *p;

    assert(((item *)ptr)->slabs_clsid == 0);
    assert(id >= POWER_SMALLEST && id <= power_largest);
    if (id < POWER_SMALLEST || id > power_largest)
        return;

    MEMCACHED_SLABS_FREE(size, id, ptr);
    p = &slabclass[id];

#ifdef USE_SYSTEM_MALLOC
    mem_malloced -= size;
    free(ptr);
    return;
#endif

#ifdef TEST_LRU
    item *it = (item *)ptr;
    //it->it_flags |= ITEM_SLABBED;
    __sync_fetch_and_or(&it->it_flags, ITEM_SLABBED);
    it->prev = 0;
    it->next = p->slots;
    if (it->next) it->next->prev = it;
    p->slots = it;
#endif

#ifdef TEST_CLOCK
    slabbed_item *sl_it = (slabbed_item *)ptr;
    //sl_it->it_flags |= ITEM_SLABBED;
    __sync_fetch_and_or(&sl_it->it_flags, ITEM_SLABBED);
    sl_it->prev = 0;
    sl_it->next = p->slots;
    if (sl_it->next) sl_it->next->prev = sl_it;
    p->slots = sl_it;
#endif

    p->sl_curr++;
    p->requested -= size;
    return;
}
Beispiel #26
0
//============================================================================
//		NTargetThread::AtomicOr32 : Atomic 32-bit or.
//----------------------------------------------------------------------------
void NTargetThread::AtomicOr32(UInt32 &theValue, UInt32 theMask)
{


	// Validate our parameters
	NN_ASSERT_ALIGNED_4(&theValue);



	// OR the value
	__sync_fetch_and_or(&theValue, theMask);
}
RU64
    rInterlocked_get64
    (
        volatile RU64* pRu64
    )
{
#ifdef RPAL_PLATFORM_WINDOWS
    return InterlockedCompareExchange64( (LONG64*)pRu64, *pRu64, *pRu64 );
#elif defined( RPAL_PLATFORM_LINUX ) || defined( RPAL_PLATFORM_MACOSX )
    return __sync_fetch_and_or( pRu64, 0 );
#endif
}
RU32
    rInterlocked_get32
    (
        volatile RU32* pRu32
    )
{
#ifdef RPAL_PLATFORM_WINDOWS
    return InterlockedCompareExchange( (LONG*)pRu32, *pRu32, *pRu32 );
#elif defined( RPAL_PLATFORM_LINUX ) || defined( RPAL_PLATFORM_MACOSX )
    return __sync_fetch_and_or( pRu32, 0 );
#endif
}
Beispiel #29
0
int pthread_spin_trylock(pthread_spinlock_t* lock)
{
  int ret;

  if (__sync_fetch_and_or(lock, 1) == 0)
    ret = 0;
  else
    ret = EBUSY;

  __sync_synchronize();

  return ret;
}
Beispiel #30
0
unsigned
Atomic::OR(unsigned value)
{
#if defined(_OPENTHREADS_ATOMIC_USE_GCC_BUILTINS)
    return __sync_fetch_and_or(&_value, value);
#elif defined(_OPENTHREADS_ATOMIC_USE_WIN32_INTERLOCKED)
    return _InterlockedOr(&_value, value);
#elif defined(_OPENTHREADS_ATOMIC_USE_BSD_ATOMIC)
    return OSAtomicOr32((uint32_t)value, (uint32_t *)&_value);
#else
# error This implementation should happen inline in the include file
#endif
}