コード例 #1
0
ファイル: ReadWriteLock.cpp プロジェクト: minortones/kissur
	WriteGuard ReadWriteLock::Write()
	{
		const ThreadID threadID = GetCurrentThreadId();
		const u32 lock_key(EXCL_ENCODE(exclusive_write, 0));
		while (atomic_compare_and_swap(&mMutualExclusivityMask, exclusive_none, lock_key) != exclusive_none)
		{
			if (threadID == mWriteEntryThread)
			{
				atomic_increment((u32*)&mReentrancyCount);
				break;
			}
			cond_wait();
		}

		KS_ASSERT(mWriteEntryThread == 0 || mWriteEntryThread == threadID);
		mWriteEntryThread = threadID;

		return WriteGuard(this);
	}
コード例 #2
0
ファイル: lock.cpp プロジェクト: GridOPTICS/FNCS-gridlab-d
/** Check a lock trace
 **/
void check_lock(unsigned int *lock, bool write, bool unlock)
{
	LOCKLIST *item;

	// lock locklist
	static unsigned int check_lock=0;
	unsigned int timeout = MAXSPIN;
	unsigned int value;
	do {
		value = check_lock;
		if ( timeout--==0 ) 
			throw_exception("check lock timeout");
	} while ((value&1) || !atomic_compare_and_swap(&check_lock, value, value + 1));

	for ( item=locklist ; item!=NULL ; item=item->next )
	{
		if ( item->lock==lock )
			break;
	}
	if ( item==NULL )
	{
		printf("%s %slock(%p) = %d (unregistered)\n", 
			write?"write":"read ", 
			unlock?"un":"  ",
			lock,
			*lock);
		register_lock("unregistered",lock);
	}
	else 
	{
		bool no_lock = unlock&&((*lock&1)!=1);
//		bool damage = abs(item->last_value-*lock)>1;
//		if ( damage ) // found a registered lock that was damaged
//			printf("%s %slock(%p) = %d (%s) - possible damage (last=%d)\n", write?"write":"read ", unlock?"un":"  ",lock,*lock,item->name, item->last_value);
		if ( no_lock ) // found a registered lock that was not locked
			printf("%s %slock(%p) = %d (%s) - no lock detected (last=%d)\n", write?"write":"read ", unlock?"un":"  ",lock,*lock,item->name, item->last_value);
		item->last_value = *lock;
	}

	// unlock locklist
	atomic_increment(&check_lock);
}
コード例 #3
0
ファイル: ticketlock_cap.c プロジェクト: amintimany/verifast
void ticket_lock_unlock(struct ticketlock* l)
  //@ requires is_locked(l, ?f);
  //@ ensures [f]ticketlock(l);
{
;
  //@ open is_locked(l, f);
  //@ assert holds_lock(?ha, ?id, ?ticket);
  /*@
  consuming_box_predicate ticketlock_box(id, l)
  consuming_handle_predicate holds_lock(ha, ticket)
  perform_action next(ticket)
  {
    @*/
    int i = atomic_increment(&l->owner);    
    /*@
    close ticketlock_owner(l, _);
  };
  @*/
  //@close [f]ticketlock(l);
  //@ leak ticketlock_box_next(id, _);
}
コード例 #4
0
ファイル: sp_counted_base.cpp プロジェクト: melpon/cppcms
void sp_counted_base::weak_add_ref() // nothrow
{
    atomic_increment( &weak_count_,&lock_ );
}
void __pthread_restart_old(pthread_descr th)
{
    if (atomic_increment(&th->p_resume_count) == -1)
	kill(th->p_pid, __pthread_sig_restart);
}
コード例 #6
0
 void weak_add_ref() // nothrow
 {
     atomic_increment( &weak_count_ );
 }
コード例 #7
0
ファイル: createthread.c プロジェクト: jameshilliard/20-4-4
static int
do_clone (struct pthread *pd, const struct pthread_attr *attr,
	  int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS,
	  int stopped)
{
#ifdef PREPARE_CREATE
  PREPARE_CREATE;
#endif

  if (stopped)
    /* We Make sure the thread does not run far by forcing it to get a
       lock.  We lock it here too so that the new thread cannot continue
       until we tell it to.  */
    lll_lock (pd->lock);

  /* One more thread.  We cannot have the thread do this itself, since it
     might exist but not have been scheduled yet by the time we've returned
     and need to check the value to behave correctly.  We must do it before
     creating the thread, in case it does get scheduled first and then
     might mistakenly think it was the only thread.  In the failure case,
     we momentarily store a false value; this doesn't matter because there
     is no kosher thing a signal handler interrupting us right here can do
     that cares whether the thread count is correct.  */
  atomic_increment (&__nptl_nthreads);

  if (ARCH_CLONE (fct, STACK_VARIABLES_ARGS, clone_flags,
		  pd, &pd->tid, TLS_VALUE, &pd->tid) == -1)
    {
      atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second.  */

      /* Failed.  If the thread is detached, remove the TCB here since
	 the caller cannot do this.  The caller remembered the thread
	 as detached and cannot reverify that it is not since it must
	 not access the thread descriptor again.  */
      if (IS_DETACHED (pd))
	__deallocate_stack (pd);

      return errno;
    }

  /* Now we have the possibility to set scheduling parameters etc.  */
  if (__builtin_expect (stopped != 0, 0))
    {
      INTERNAL_SYSCALL_DECL (err);
      int res = 0;

      /* Set the affinity mask if necessary.  */
      if (attr->cpuset != NULL)
	{
	  res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
				  sizeof (cpu_set_t), attr->cpuset);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    {
	      /* The operation failed.  We have to kill the thread.  First
		 send it the cancellation signal.  */
	      INTERNAL_SYSCALL_DECL (err2);
	    err_out:
#if __ASSUME_TGKILL
	      (void) INTERNAL_SYSCALL (tgkill, err2, 3,
				       THREAD_GETMEM (THREAD_SELF, pid),
				       pd->tid, SIGCANCEL);
#else
	      (void) INTERNAL_SYSCALL (tkill, err2, 2, pd->tid, SIGCANCEL);
#endif

	      return (INTERNAL_SYSCALL_ERROR_P (res, err)
		      ? INTERNAL_SYSCALL_ERRNO (res, err)
		      : 0);
	    }
	}

      /* Set the scheduling parameters.  */
      if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
	{
	  res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid,
				  pd->schedpolicy, &pd->schedparam);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    goto err_out;
	}
    }

  /* We now have for sure more than one thread.  The main thread might
     not yet have the flag set.  No need to set the global variable
     again if this is what we use.  */
  THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);

  return 0;
}
コード例 #8
0
ファイル: tst-atomic.c プロジェクト: AubrCool/glibc
/* Test various atomic.h macros.  */
static int
do_test (void)
{
  atomic_t mem, expected;
  int ret = 0;

#ifdef atomic_compare_and_exchange_val_acq
  mem = 24;
  if (atomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24
      || mem != 35)
    {
      puts ("atomic_compare_and_exchange_val_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (atomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12
      || mem != 12)
    {
      puts ("atomic_compare_and_exchange_val_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (atomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15
      || mem != -56)
    {
      puts ("atomic_compare_and_exchange_val_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (atomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1
      || mem != -1)
    {
      puts ("atomic_compare_and_exchange_val_acq test 4 failed");
      ret = 1;
    }
#endif

  mem = 24;
  if (atomic_compare_and_exchange_bool_acq (&mem, 35, 24)
      || mem != 35)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (! atomic_compare_and_exchange_bool_acq (&mem, 10, 15)
      || mem != 12)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (atomic_compare_and_exchange_bool_acq (&mem, -56, -15)
      || mem != -56)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (! atomic_compare_and_exchange_bool_acq (&mem, 17, 0)
      || mem != -1)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 4 failed");
      ret = 1;
    }

  mem = 64;
  if (atomic_exchange_acq (&mem, 31) != 64
      || mem != 31)
    {
      puts ("atomic_exchange_acq test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_exchange_and_add (&mem, 11) != 2
      || mem != 13)
    {
      puts ("atomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_exchange_and_add_acq (&mem, 11) != 2
      || mem != 13)
    {
      puts ("atomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_exchange_and_add_rel (&mem, 11) != 2
      || mem != 13)
    {
      puts ("atomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = -21;
  atomic_add (&mem, 22);
  if (mem != 1)
    {
      puts ("atomic_add test failed");
      ret = 1;
    }

  mem = -1;
  atomic_increment (&mem);
  if (mem != 0)
    {
      puts ("atomic_increment test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_increment_val (&mem) != 3)
    {
      puts ("atomic_increment_val test failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_increment_and_test (&mem)
      || mem != 1)
    {
      puts ("atomic_increment_and_test test 1 failed");
      ret = 1;
    }

  mem = 35;
  if (atomic_increment_and_test (&mem)
      || mem != 36)
    {
      puts ("atomic_increment_and_test test 2 failed");
      ret = 1;
    }

  mem = -1;
  if (! atomic_increment_and_test (&mem)
      || mem != 0)
    {
      puts ("atomic_increment_and_test test 3 failed");
      ret = 1;
    }

  mem = 17;
  atomic_decrement (&mem);
  if (mem != 16)
    {
      puts ("atomic_decrement test failed");
      ret = 1;
    }

  if (atomic_decrement_val (&mem) != 15)
    {
      puts ("atomic_decrement_val test failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_decrement_and_test (&mem)
      || mem != -1)
    {
      puts ("atomic_decrement_and_test test 1 failed");
      ret = 1;
    }

  mem = 15;
  if (atomic_decrement_and_test (&mem)
      || mem != 14)
    {
      puts ("atomic_decrement_and_test test 2 failed");
      ret = 1;
    }

  mem = 1;
  if (! atomic_decrement_and_test (&mem)
      || mem != 0)
    {
      puts ("atomic_decrement_and_test test 3 failed");
      ret = 1;
    }

  mem = 1;
  if (atomic_decrement_if_positive (&mem) != 1
      || mem != 0)
    {
      puts ("atomic_decrement_if_positive test 1 failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_decrement_if_positive (&mem) != 0
      || mem != 0)
    {
      puts ("atomic_decrement_if_positive test 2 failed");
      ret = 1;
    }

  mem = -1;
  if (atomic_decrement_if_positive (&mem) != -1
      || mem != -1)
    {
      puts ("atomic_decrement_if_positive test 3 failed");
      ret = 1;
    }

  mem = -12;
  if (! atomic_add_negative (&mem, 10)
      || mem != -2)
    {
      puts ("atomic_add_negative test 1 failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_add_negative (&mem, 100)
      || mem != 100)
    {
      puts ("atomic_add_negative test 2 failed");
      ret = 1;
    }

  mem = 15;
  if (atomic_add_negative (&mem, -10)
      || mem != 5)
    {
      puts ("atomic_add_negative test 3 failed");
      ret = 1;
    }

  mem = -12;
  if (atomic_add_negative (&mem, 14)
      || mem != 2)
    {
      puts ("atomic_add_negative test 4 failed");
      ret = 1;
    }

  mem = 0;
  if (! atomic_add_negative (&mem, -1)
      || mem != -1)
    {
      puts ("atomic_add_negative test 5 failed");
      ret = 1;
    }

  mem = -31;
  if (atomic_add_negative (&mem, 31)
      || mem != 0)
    {
      puts ("atomic_add_negative test 6 failed");
      ret = 1;
    }

  mem = -34;
  if (atomic_add_zero (&mem, 31)
      || mem != -3)
    {
      puts ("atomic_add_zero test 1 failed");
      ret = 1;
    }

  mem = -36;
  if (! atomic_add_zero (&mem, 36)
      || mem != 0)
    {
      puts ("atomic_add_zero test 2 failed");
      ret = 1;
    }

  mem = 113;
  if (atomic_add_zero (&mem, -13)
      || mem != 100)
    {
      puts ("atomic_add_zero test 3 failed");
      ret = 1;
    }

  mem = -18;
  if (atomic_add_zero (&mem, 20)
      || mem != 2)
    {
      puts ("atomic_add_zero test 4 failed");
      ret = 1;
    }

  mem = 10;
  if (atomic_add_zero (&mem, -20)
      || mem != -10)
    {
      puts ("atomic_add_zero test 5 failed");
      ret = 1;
    }

  mem = 10;
  if (! atomic_add_zero (&mem, -10)
      || mem != 0)
    {
      puts ("atomic_add_zero test 6 failed");
      ret = 1;
    }

  mem = 0;
  atomic_bit_set (&mem, 1);
  if (mem != 2)
    {
      puts ("atomic_bit_set test 1 failed");
      ret = 1;
    }

  mem = 8;
  atomic_bit_set (&mem, 3);
  if (mem != 8)
    {
      puts ("atomic_bit_set test 2 failed");
      ret = 1;
    }

#ifdef TEST_ATOMIC64
  mem = 16;
  atomic_bit_set (&mem, 35);
  if (mem != 0x800000010LL)
    {
      puts ("atomic_bit_set test 3 failed");
      ret = 1;
    }
#endif

  mem = 0;
  if (atomic_bit_test_set (&mem, 1)
      || mem != 2)
    {
      puts ("atomic_bit_test_set test 1 failed");
      ret = 1;
    }

  mem = 8;
  if (! atomic_bit_test_set (&mem, 3)
      || mem != 8)
    {
      puts ("atomic_bit_test_set test 2 failed");
      ret = 1;
    }

#ifdef TEST_ATOMIC64
  mem = 16;
  if (atomic_bit_test_set (&mem, 35)
      || mem != 0x800000010LL)
    {
      puts ("atomic_bit_test_set test 3 failed");
      ret = 1;
    }

  mem = 0x100000000LL;
  if (! atomic_bit_test_set (&mem, 32)
      || mem != 0x100000000LL)
    {
      puts ("atomic_bit_test_set test 4 failed");
      ret = 1;
    }
#endif

#ifdef catomic_compare_and_exchange_val_acq
  mem = 24;
  if (catomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24
      || mem != 35)
    {
      puts ("catomic_compare_and_exchange_val_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (catomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12
      || mem != 12)
    {
      puts ("catomic_compare_and_exchange_val_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (catomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15
      || mem != -56)
    {
      puts ("catomic_compare_and_exchange_val_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (catomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1
      || mem != -1)
    {
      puts ("catomic_compare_and_exchange_val_acq test 4 failed");
      ret = 1;
    }
#endif

  mem = 24;
  if (catomic_compare_and_exchange_bool_acq (&mem, 35, 24)
      || mem != 35)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (! catomic_compare_and_exchange_bool_acq (&mem, 10, 15)
      || mem != 12)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (catomic_compare_and_exchange_bool_acq (&mem, -56, -15)
      || mem != -56)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (! catomic_compare_and_exchange_bool_acq (&mem, 17, 0)
      || mem != -1)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 4 failed");
      ret = 1;
    }

  mem = 2;
  if (catomic_exchange_and_add (&mem, 11) != 2
      || mem != 13)
    {
      puts ("catomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = -21;
  catomic_add (&mem, 22);
  if (mem != 1)
    {
      puts ("catomic_add test failed");
      ret = 1;
    }

  mem = -1;
  catomic_increment (&mem);
  if (mem != 0)
    {
      puts ("catomic_increment test failed");
      ret = 1;
    }

  mem = 2;
  if (catomic_increment_val (&mem) != 3)
    {
      puts ("catomic_increment_val test failed");
      ret = 1;
    }

  mem = 17;
  catomic_decrement (&mem);
  if (mem != 16)
    {
      puts ("catomic_decrement test failed");
      ret = 1;
    }

  if (catomic_decrement_val (&mem) != 15)
    {
      puts ("catomic_decrement_val test failed");
      ret = 1;
    }

  /* Tests for C11-like atomics.  */
  mem = 11;
  if (atomic_load_relaxed (&mem) != 11 || atomic_load_acquire (&mem) != 11)
    {
      puts ("atomic_load_{relaxed,acquire} test failed");
      ret = 1;
    }

  atomic_store_relaxed (&mem, 12);
  if (mem != 12)
    {
      puts ("atomic_store_relaxed test failed");
      ret = 1;
    }
  atomic_store_release (&mem, 13);
  if (mem != 13)
    {
      puts ("atomic_store_release test failed");
      ret = 1;
    }

  mem = 14;
  expected = 14;
  if (!atomic_compare_exchange_weak_relaxed (&mem, &expected, 25)
      || mem != 25 || expected != 14)
    {
      puts ("atomic_compare_exchange_weak_relaxed test 1 failed");
      ret = 1;
    }
  if (atomic_compare_exchange_weak_relaxed (&mem, &expected, 14)
      || mem != 25 || expected != 25)
    {
      puts ("atomic_compare_exchange_weak_relaxed test 2 failed");
      ret = 1;
    }
  mem = 14;
  expected = 14;
  if (!atomic_compare_exchange_weak_acquire (&mem, &expected, 25)
      || mem != 25 || expected != 14)
    {
      puts ("atomic_compare_exchange_weak_acquire test 1 failed");
      ret = 1;
    }
  if (atomic_compare_exchange_weak_acquire (&mem, &expected, 14)
      || mem != 25 || expected != 25)
    {
      puts ("atomic_compare_exchange_weak_acquire test 2 failed");
      ret = 1;
    }
  mem = 14;
  expected = 14;
  if (!atomic_compare_exchange_weak_release (&mem, &expected, 25)
      || mem != 25 || expected != 14)
    {
      puts ("atomic_compare_exchange_weak_release test 1 failed");
      ret = 1;
    }
  if (atomic_compare_exchange_weak_release (&mem, &expected, 14)
      || mem != 25 || expected != 25)
    {
      puts ("atomic_compare_exchange_weak_release test 2 failed");
      ret = 1;
    }

  mem = 23;
  if (atomic_exchange_acquire (&mem, 42) != 23 || mem != 42)
    {
      puts ("atomic_exchange_acquire test failed");
      ret = 1;
    }
  mem = 23;
  if (atomic_exchange_release (&mem, 42) != 23 || mem != 42)
    {
      puts ("atomic_exchange_release test failed");
      ret = 1;
    }

  mem = 23;
  if (atomic_fetch_add_relaxed (&mem, 1) != 23 || mem != 24)
    {
      puts ("atomic_fetch_add_relaxed test failed");
      ret = 1;
    }
  mem = 23;
  if (atomic_fetch_add_acquire (&mem, 1) != 23 || mem != 24)
    {
      puts ("atomic_fetch_add_acquire test failed");
      ret = 1;
    }
  mem = 23;
  if (atomic_fetch_add_release (&mem, 1) != 23 || mem != 24)
    {
      puts ("atomic_fetch_add_release test failed");
      ret = 1;
    }
  mem = 23;
  if (atomic_fetch_add_acq_rel (&mem, 1) != 23 || mem != 24)
    {
      puts ("atomic_fetch_add_acq_rel test failed");
      ret = 1;
    }

  mem = 3;
  if (atomic_fetch_and_acquire (&mem, 2) != 3 || mem != 2)
    {
      puts ("atomic_fetch_and_acquire test failed");
      ret = 1;
    }

  mem = 4;
  if (atomic_fetch_or_relaxed (&mem, 2) != 4 || mem != 6)
    {
      puts ("atomic_fetch_or_relaxed test failed");
      ret = 1;
    }
  mem = 4;
  if (atomic_fetch_or_acquire (&mem, 2) != 4 || mem != 6)
    {
      puts ("atomic_fetch_or_acquire test failed");
      ret = 1;
    }

  /* This is a single-threaded test, so we can't test the effects of the
     fences.  */
  atomic_thread_fence_acquire ();
  atomic_thread_fence_release ();
  atomic_thread_fence_seq_cst ();

  return ret;
}
コード例 #9
0
 void weak_add_ref() {
     atomic_increment(&weak_count_);
 }
コード例 #10
0
ファイル: shared_data.cpp プロジェクト: ropez/pieces
bool SharedData::ref()
{
    return atomic_increment(&m_count);
}
コード例 #11
0
ファイル: createthread.c プロジェクト: Drakey83/steamlink-sdk
static int
do_clone (struct pthread *pd, const struct pthread_attr *attr,
	  int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS,
	  int stopped)
{
#ifdef PREPARE_CREATE
  PREPARE_CREATE;
#endif

  if (__builtin_expect (stopped != 0, 0))
    /* We make sure the thread does not run far by forcing it to get a
       lock.  We lock it here too so that the new thread cannot continue
       until we tell it to.  */
    lll_lock (pd->lock, LLL_PRIVATE);

  /* One more thread.  We cannot have the thread do this itself, since it
     might exist but not have been scheduled yet by the time we've returned
     and need to check the value to behave correctly.  We must do it before
     creating the thread, in case it does get scheduled first and then
     might mistakenly think it was the only thread.  In the failure case,
     we momentarily store a false value; this doesn't matter because there
     is no kosher thing a signal handler interrupting us right here can do
     that cares whether the thread count is correct.  */
  atomic_increment (&__nptl_nthreads);

  int rc = ARCH_CLONE (fct, STACK_VARIABLES_ARGS, clone_flags,
		       pd, &pd->tid, TLS_VALUE, &pd->tid);

  if (__builtin_expect (rc == -1, 0))
    {
      atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second.  */

      /* Perhaps a thread wants to change the IDs and if waiting
	 for this stillborn thread.  */
      if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0)
			    == -2, 0))
	lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);

      /* Free the resources.  */
	__deallocate_stack (pd);

      /* We have to translate error codes.  */
      return errno == ENOMEM ? EAGAIN : errno;
    }

  /* Now we have the possibility to set scheduling parameters etc.  */
  if (__builtin_expect (stopped != 0, 0))
    {
      INTERNAL_SYSCALL_DECL (err);
      int res = 0;

      /* Set the affinity mask if necessary.  */
      if (attr->cpuset != NULL)
	{
	  res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
				  attr->cpusetsize, attr->cpuset);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    {
	      /* The operation failed.  We have to kill the thread.  First
		 send it the cancellation signal.  */
	      INTERNAL_SYSCALL_DECL (err2);
	    err_out:
	      (void) INTERNAL_SYSCALL (tgkill, err2, 3,
				       THREAD_GETMEM (THREAD_SELF, pid),
				       pd->tid, SIGCANCEL);

	      /* We do not free the stack here because the canceled thread
		 itself will do this.  */

	      return (INTERNAL_SYSCALL_ERROR_P (res, err)
		      ? INTERNAL_SYSCALL_ERRNO (res, err)
		      : 0);
	    }
	}

      /* Set the scheduling parameters.  */
      if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
	{
	  res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid,
				  pd->schedpolicy, &pd->schedparam);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    goto err_out;
	}
    }

  /* We now have for sure more than one thread.  The main thread might
     not yet have the flag set.  No need to set the global variable
     again if this is what we use.  */
  THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);

  return 0;
}
コード例 #12
0
ファイル: nullclient.c プロジェクト: kristopolous/taim
void *taim_server(void*stupid) {
  unsigned int addrlen;

  int server,
      client,
      ret,
      ix,
      yes=1;

  struct 	sockaddr_in 	name;
  struct	sockaddr	addr;
  struct	hostent 	*gethostbyaddr();

  // End of Declaration
  atomic_increment();

  addr.sa_family = AF_INET;
  strcpy(addr.sa_data, "somename");

  name.sin_family = AF_INET;
  name.sin_port = htons(19091);
  name.sin_addr.s_addr = INADDR_ANY;

  server = socket(PF_INET, SOCK_STREAM, 0);
  handle_register(server);

  setsockopt(server, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes));

  // setsockopt
  if(bind(server, (struct sockaddr*)&name, sizeof(name))<0) {
    printf("Could not bind to port: ");
    while(bind(server, (struct sockaddr*)&name, sizeof(name))<0) {	
      printf(".");
      usleep(990000);
      fflush(0);
    }
    printf("\n");
  }

  printf("Running on port:\t%d\n", ntohs(name.sin_port));
  addrlen = sizeof(addr);

  getsockname(server, &addr, &addrlen);
  listen(server, 10);
  for(;;) {

    client = accept(server,0,0);
    if(client == -1) {

      if (g_die) {
        break;
      }

      continue;
    }

    handle_register(client);
    for(ix = 0; ix < MAX_CONNECT; ix++) {

      if(g_client_inuse[ix] == 0) {
        client_struct toPass;

        toPass.client = client;
        toPass.thread = ix;
        ret = pthread_create(&g_client_thread[ix], 0, client_chat, (void*)&toPass);
        pthread_detach(g_client_thread[ix]);
        break;
      }
    }
    handle_deregister(client);
  }

  fcntl(server,F_SETFL,O_NONBLOCK);

  atomic_decrement();
  return 0;
}
コード例 #13
0
ファイル: nullclient.c プロジェクト: kristopolous/taim
void *client_chat(void*in) {	
  int ret = 0;

  taim_buddy *tbuddy = 0;

  taim_session *pses = 0;
  taim_pipe *ptofree = 0;

  char buffer[BUFFER_SIZE] = {0},
       ret_buffer[BUFFER_SIZE] = {0},
       *uid;

  client_struct*pin = ((client_struct*)in);

  // End declaration

  // Raise the lock count
  atomic_increment();
  g_client_inuse[pin->thread] = 1;

  ret = read(pin->client, buffer, BUFFER_SIZE);

  // No data found, return
  if(ret <= 0) {
    atomic_decrement();
    return 0;
  }

  d(buffer);
  // Get the uid from the input string
  ret = parse(buffer, ret_buffer, &uid);
  if(ret == RET_ERROR) {
    atomic_decrement();
    return 0;
  }

  // Find the uid structure
  pses = uid_find(uid);
  if(pses != NULL) {

    if(ret == RET_DATA) {
      // clear the outgoing buffers
      buddy_ret_clear(pses);

      // lock the pipe mutex and fill the pipe
      pthread_mutex_lock(&pses->pses->pipe_mutex);
      for(;;) {

        if(pses->pses->cpipe->next == 0) {
          break;
        }

        // if a buddy was talking, set it as active
        tbuddy = buddy_get(pses, pses->pses->cpipe->user);
        if(tbuddy != NULL) {
          buddy_set_active(pses, tbuddy);

          buddy_ret_add(
              pses,
              tbuddy,
              pses->pses->cpipe->data,
              strlen(pses->pses->cpipe->data));
        }

        // clear this member of the linked list and move on
        ptofree = pses->pses->cpipe;
        pses->pses->cpipe = pses->pses->cpipe->next;
        free(ptofree);
      }

      // generate the list of buddies to return
      buddy_get_list(pses);

      // fill the buffer with this list
      buddy_ret_print(pses, ret_buffer);

      // unlock the pipes
      pthread_mutex_unlock(&pses->pses->pipe_mutex);
    }	
  }

  ret = write(pin->client, ret_buffer, strlen(ret_buffer));

  // drop the connection
  close(pin->client);
  g_client_inuse[pin->thread] = 0;

  free(uid);
  atomic_decrement();

  return 0;
}	
コード例 #14
0
int
__pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
{
  sigset_t unblock, initial_mask;
  int was_signalled = 0;
  sigjmp_buf jmpbuf;

  if (atomic_decrement(&self->p_resume_count) == 0) {
    /* Set up a longjmp handler for the restart signal, unblock
       the signal and sleep. */

    if (sigsetjmp(jmpbuf, 1) == 0) {
      THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
      THREAD_SETMEM(self, p_signal, 0);
      /* Unblock the restart signal */
      sigemptyset(&unblock);
      sigaddset(&unblock, __pthread_sig_restart);
      sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);

      while (1) {
	struct timeval now;
	struct timespec reltime;

	/* Compute a time offset relative to now.  */
	__gettimeofday (&now, NULL);
	reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
	reltime.tv_sec = abstime->tv_sec - now.tv_sec;
	if (reltime.tv_nsec < 0) {
	  reltime.tv_nsec += 1000000000;
	  reltime.tv_sec -= 1;
	}

	/* Sleep for the required duration. If woken by a signal,
	   resume waiting as required by Single Unix Specification.  */
	if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
	  break;
      }

      /* Block the restart signal again */
      sigprocmask(SIG_SETMASK, &initial_mask, NULL);
      was_signalled = 0;
    } else {
      was_signalled = 1;
    }
    THREAD_SETMEM(self, p_signal_jmp, NULL);
  }

  /* Now was_signalled is true if we exited the above code
     due to the delivery of a restart signal.  In that case,
     we know we have been dequeued and resumed and that the
     resume count is balanced.  Otherwise, there are some
     cases to consider. First, try to bump up the resume count
     back to zero. If it goes to 1, it means restart() was
     invoked on this thread. The signal must be consumed
     and the count bumped down and everything is cool. We
     can return a 1 to the caller.
     Otherwise, no restart was delivered yet, so a potential
     race exists; we return a 0 to the caller which must deal
     with this race in an appropriate way; for example by
     atomically removing the thread from consideration for a
     wakeup---if such a thing fails, it means a restart is
     being delivered. */

  if (!was_signalled) {
    if (atomic_increment(&self->p_resume_count) != -1) {
      __pthread_wait_for_restart_signal(self);
      atomic_decrement(&self->p_resume_count); /* should be zero now! */
      /* woke spontaneously and consumed restart signal */
      return 1;
    }
    /* woke spontaneously but did not consume restart---caller must resolve */
    return 0;
  }
  /* woken due to restart signal */
  return 1;
}
コード例 #15
0
ファイル: main.cpp プロジェクト: gaoxiaojun/task-schedulers
void child1(void* data) {
    //std::cout << "child1\n";
    atomic_increment(static_cast< child1_context* >(data)->counter);
}
コード例 #16
0
ファイル: fork.c プロジェクト: JamesLinus/uClibc-or1k
pid_t
__libc_fork (void)
{
  pid_t pid;
  struct used_handler
  {
    struct fork_handler *handler;
    struct used_handler *next;
  } *allp = NULL;

  /* Run all the registered preparation handlers.  In reverse order.
     While doing this we build up a list of all the entries.  */
  struct fork_handler *runp;
  while ((runp = __fork_handlers) != NULL)
    {
      /* Make sure we read from the current RUNP pointer.  */
      atomic_full_barrier ();

      unsigned int oldval = runp->refcntr;

      if (oldval == 0)
	/* This means some other thread removed the list just after
	   the pointer has been loaded.  Try again.  Either the list
	   is empty or we can retry it.  */
	continue;

      /* Bump the reference counter.  */
      if (atomic_compare_and_exchange_bool_acq (&__fork_handlers->refcntr,
						oldval + 1, oldval))
	/* The value changed, try again.  */
	continue;

      /* We bumped the reference counter for the first entry in the
	 list.  That means that none of the following entries will
	 just go away.  The unloading code works in the order of the
	 list.

         While executing the registered handlers we are building a
         list of all the entries so that we can go backward later on.  */
      while (1)
	{
	  /* Execute the handler if there is one.  */
	  if (runp->prepare_handler != NULL)
	    runp->prepare_handler ();

	  /* Create a new element for the list.  */
	  struct used_handler *newp
	    = (struct used_handler *) alloca (sizeof (*newp));
	  newp->handler = runp;
	  newp->next = allp;
	  allp = newp;

	  /* Advance to the next handler.  */
	  runp = runp->next;
	  if (runp == NULL)
	    break;

	  /* Bump the reference counter for the next entry.  */
	  atomic_increment (&runp->refcntr);
	}

      /* We are done.  */
      break;
    }

  __UCLIBC_IO_MUTEX_LOCK_CANCEL_UNSAFE(_stdio_openlist_add_lock);

#ifndef NDEBUG
  pid_t ppid = THREAD_GETMEM (THREAD_SELF, tid);
#endif

  /* We need to prevent the getpid() code to update the PID field so
     that, if a signal arrives in the child very early and the signal
     handler uses getpid(), the value returned is correct.  */
  pid_t parentpid = THREAD_GETMEM (THREAD_SELF, pid);
  THREAD_SETMEM (THREAD_SELF, pid, -parentpid);

#ifdef ARCH_FORK
  pid = ARCH_FORK ();
#else
# error "ARCH_FORK must be defined so that the CLONE_SETTID flag is used"
  pid = INLINE_SYSCALL (fork, 0);
#endif


  if (pid == 0)
    {
      struct pthread *self = THREAD_SELF;

      assert (THREAD_GETMEM (self, tid) != ppid);

      if (__fork_generation_pointer != NULL)
	*__fork_generation_pointer += 4;

      /* Adjust the PID field for the new process.  */
      THREAD_SETMEM (self, pid, THREAD_GETMEM (self, tid));

#if HP_TIMING_AVAIL
      /* The CPU clock of the thread and process have to be set to zero.  */
      hp_timing_t now;
      HP_TIMING_NOW (now);
      THREAD_SETMEM (self, cpuclock_offset, now);
      GL(dl_cpuclock_offset) = now;
#endif

      /* Reset the file list.  These are recursive mutexes.  */
      fresetlockfiles ();

      /* Reset locks in the I/O code.  */
      STDIO_INIT_MUTEX(_stdio_openlist_add_lock);

      /* XXX reset any locks in dynamic loader */

      /* Run the handlers registered for the child.  */
      while (allp != NULL)
	{
	  if (allp->handler->child_handler != NULL)
	    allp->handler->child_handler ();

	  /* Note that we do not have to wake any possible waiter.
 	     This is the only thread in the new process.  The count
 	     may have been bumped up by other threads doing a fork.
 	     We reset it to 1, to avoid waiting for non-existing
 	     thread(s) to release the count.  */
	  allp->handler->refcntr = 1;

	  /* XXX We could at this point look through the object pool
	     and mark all objects not on the __fork_handlers list as
	     unused.  This is necessary in case the fork() happened
	     while another thread called dlclose() and that call had
	     to create a new list.  */

	  allp = allp->next;
	}

      /* Initialize the fork lock.  */
      __fork_lock = LLL_LOCK_INITIALIZER;
    }
  else
    {
      assert (THREAD_GETMEM (THREAD_SELF, tid) == ppid);

      /* Restore the PID value.  */
      THREAD_SETMEM (THREAD_SELF, pid, parentpid);

      /* We execute this even if the 'fork' call failed.  */
      __UCLIBC_IO_MUTEX_UNLOCK_CANCEL_UNSAFE(_stdio_openlist_add_lock);

      /* Run the handlers registered for the parent.  */
      while (allp != NULL)
	{
	  if (allp->handler->parent_handler != NULL)
	    allp->handler->parent_handler ();

	  if (atomic_decrement_and_test (&allp->handler->refcntr)
	      && allp->handler->need_signal)
	    lll_futex_wake (allp->handler->refcntr, 1, LLL_PRIVATE);

	  allp = allp->next;
	}
    }

  return pid;
}
コード例 #17
0
	/**
	 * increment the current progress.
	 *
	 * Iff called by the master thread, it will also update the display if needed
	 *
	 * @param amount the number of newly performed tasks to report
	 *
	 * @return false iff the computation is aborted
	 */
	bool increment(unsigned long amount=1) {
		if ( is_aborted() )
			return false;
		return is_master() ? update_master(_current + amount) : atomic_increment(amount);
	}
コード例 #18
0
/* Try to get a file descriptor for the shared meory segment
   containing the database.  */
static struct mapped_database *
get_mapping (request_type type, const char *key,
	     struct mapped_database **mappedp)
{
  struct mapped_database *result = NO_MAPPING;
#ifdef SCM_RIGHTS
  const size_t keylen = strlen (key) + 1;
  char resdata[keylen];
  int saved_errno = errno;

  int mapfd = -1;

  /* Send the request.  */
  struct iovec iov[2];
  request_header req;

  int sock = open_socket ();
  if (sock < 0)
    goto out;

  req.version = NSCD_VERSION;
  req.type = type;
  req.key_len = keylen;

  iov[0].iov_base = &req;
  iov[0].iov_len = sizeof (req);
  iov[1].iov_base = (void *) key;
  iov[1].iov_len = keylen;

  if (__builtin_expect (TEMP_FAILURE_RETRY (__writev (sock, iov, 2))
			!= iov[0].iov_len + iov[1].iov_len, 0))
    /* We cannot even write the request.  */
    goto out_close2;

  /* Room for the data sent along with the file descriptor.  We expect
     the key name back.  */
  iov[0].iov_base = resdata;
  iov[0].iov_len = keylen;

  union
  {
    struct cmsghdr hdr;
    char bytes[CMSG_SPACE (sizeof (int))];
  } buf;
  struct msghdr msg = { .msg_iov = iov, .msg_iovlen = 1,
			.msg_control = buf.bytes,
			.msg_controllen = sizeof (buf) };
  struct cmsghdr *cmsg = CMSG_FIRSTHDR (&msg);

  cmsg->cmsg_level = SOL_SOCKET;
  cmsg->cmsg_type = SCM_RIGHTS;
  cmsg->cmsg_len = CMSG_LEN (sizeof (int));

  /* This access is well-aligned since BUF is correctly aligned for an
     int and CMSG_DATA preserves this alignment.  */
  *(int *) CMSG_DATA (cmsg) = -1;

  msg.msg_controllen = cmsg->cmsg_len;

  if (wait_on_socket (sock) <= 0)
    goto out_close2;

# ifndef MSG_NOSIGNAL
#  define MSG_NOSIGNAL 0
# endif
  if (__builtin_expect (TEMP_FAILURE_RETRY (__recvmsg (sock, &msg,
						       MSG_NOSIGNAL))
			!= keylen, 0))
    goto out_close2;

  mapfd = *(int *) CMSG_DATA (cmsg);

  if (__builtin_expect (CMSG_FIRSTHDR (&msg)->cmsg_len
			!= CMSG_LEN (sizeof (int)), 0))
    goto out_close;

  struct stat64 st;
  if (__builtin_expect (strcmp (resdata, key) != 0, 0)
      || __builtin_expect (fstat64 (mapfd, &st) != 0, 0)
      || __builtin_expect (st.st_size < sizeof (struct database_pers_head), 0))
    goto out_close;

  struct database_pers_head head;
  if (__builtin_expect (TEMP_FAILURE_RETRY (__pread (mapfd, &head,
						     sizeof (head), 0))
			!= sizeof (head), 0))
    goto out_close;

  if (__builtin_expect (head.version != DB_VERSION, 0)
      || __builtin_expect (head.header_size != sizeof (head), 0)
      /* This really should not happen but who knows, maybe the update
	 thread got stuck.  */
      || __builtin_expect (! head.nscd_certainly_running
			   && head.timestamp + MAPPING_TIMEOUT < time (NULL),
			   0))
    goto out_close;

  size_t size = (sizeof (head) + roundup (head.module * sizeof (ref_t), ALIGN)
		 + head.data_size);

  if (__builtin_expect (st.st_size < size, 0))
    goto out_close;

  /* The file is large enough, map it now.  */
  void *mapping = __mmap (NULL, size, PROT_READ, MAP_SHARED, mapfd, 0);
  if (__builtin_expect (mapping != MAP_FAILED, 1))
    {
      /* Allocate a record for the mapping.  */
      struct mapped_database *newp = malloc (sizeof (*newp));
      if (newp == NULL)
	{
	  /* Ugh, after all we went through the memory allocation failed.  */
	  __munmap (mapping, size);
	  goto out_close;
	}

      newp->head = mapping;
      newp->data = ((char *) mapping + head.header_size
		    + roundup (head.module * sizeof (ref_t), ALIGN));
      newp->mapsize = size;
      /* Set counter to 1 to show it is usable.  */
      newp->counter = 1;

      result = newp;
    }

 out_close:
  __close (mapfd);
 out_close2:
  __close (sock);
 out:
  __set_errno (saved_errno);
#endif	/* SCM_RIGHTS */

  struct mapped_database *oldval = *mappedp;
  *mappedp = result;

  if (oldval != NULL && atomic_decrement_val (&oldval->counter) == 0)
    __nscd_unmap (oldval);

  return result;
}


struct mapped_database *
__nscd_get_map_ref (request_type type, const char *name,
		    struct locked_map_ptr *mapptr, int *gc_cyclep)
{
  struct mapped_database *cur = mapptr->mapped;
  if (cur == NO_MAPPING)
    return cur;

  int cnt = 0;
  while (atomic_compare_and_exchange_val_acq (&mapptr->lock, 1, 0) != 0)
    {
      // XXX Best number of rounds?
      if (++cnt > 5)
	return NO_MAPPING;

      atomic_delay ();
    }

  cur = mapptr->mapped;

  if (__builtin_expect (cur != NO_MAPPING, 1))
    {
      /* If not mapped or timestamp not updated, request new map.  */
      if (cur == NULL
	  || (cur->head->nscd_certainly_running == 0
	      && cur->head->timestamp + MAPPING_TIMEOUT < time (NULL)))
	cur = get_mapping (type, name, &mapptr->mapped);

      if (__builtin_expect (cur != NO_MAPPING, 1))
	{
	  if (__builtin_expect (((*gc_cyclep = cur->head->gc_cycle) & 1) != 0,
				0))
	    cur = NO_MAPPING;
	  else
	    atomic_increment (&cur->counter);
	}
    }

  mapptr->lock = 0;

  return cur;
}


const struct datahead *
__nscd_cache_search (request_type type, const char *key, size_t keylen,
		     const struct mapped_database *mapped)
{
  unsigned long int hash = __nis_hash (key, keylen) % mapped->head->module;

  ref_t work = mapped->head->array[hash];
  while (work != ENDREF)
    {
      struct hashentry *here = (struct hashentry *) (mapped->data + work);

      if (type == here->type && keylen == here->len
	  && memcmp (key, mapped->data + here->key, keylen) == 0)
	{
	  /* We found the entry.  Increment the appropriate counter.  */
	  const struct datahead *dh
	    = (struct datahead *) (mapped->data + here->packet);

	  /* See whether we must ignore the entry or whether something
	     is wrong because garbage collection is in progress.  */
	  if (dh->usable && ((char *) dh + dh->allocsize
			     <= (char *) mapped->head + mapped->mapsize))
	    return dh;
	}

      work = here->next;
    }

  return NULL;
}
コード例 #19
0
ファイル: tst-atomic.c プロジェクト: Drakey83/steamlink-sdk
/* Test various atomic.h macros.  */
static int
do_test (void)
{
  atomic_t mem;
  int ret = 0;

#ifdef atomic_compare_and_exchange_val_acq
  mem = 24;
  if (atomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24
      || mem != 35)
    {
      puts ("atomic_compare_and_exchange_val_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (atomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12
      || mem != 12)
    {
      puts ("atomic_compare_and_exchange_val_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (atomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15
      || mem != -56)
    {
      puts ("atomic_compare_and_exchange_val_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (atomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1
      || mem != -1)
    {
      puts ("atomic_compare_and_exchange_val_acq test 4 failed");
      ret = 1;
    }
#endif

  mem = 24;
  if (atomic_compare_and_exchange_bool_acq (&mem, 35, 24)
      || mem != 35)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (! atomic_compare_and_exchange_bool_acq (&mem, 10, 15)
      || mem != 12)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (atomic_compare_and_exchange_bool_acq (&mem, -56, -15)
      || mem != -56)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (! atomic_compare_and_exchange_bool_acq (&mem, 17, 0)
      || mem != -1)
    {
      puts ("atomic_compare_and_exchange_bool_acq test 4 failed");
      ret = 1;
    }

  mem = 64;
  if (atomic_exchange_acq (&mem, 31) != 64
      || mem != 31)
    {
      puts ("atomic_exchange_acq test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_exchange_and_add (&mem, 11) != 2
      || mem != 13)
    {
      puts ("atomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = -21;
  atomic_add (&mem, 22);
  if (mem != 1)
    {
      puts ("atomic_add test failed");
      ret = 1;
    }

  mem = -1;
  atomic_increment (&mem);
  if (mem != 0)
    {
      puts ("atomic_increment test failed");
      ret = 1;
    }

  mem = 2;
  if (atomic_increment_val (&mem) != 3)
    {
      puts ("atomic_increment_val test failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_increment_and_test (&mem)
      || mem != 1)
    {
      puts ("atomic_increment_and_test test 1 failed");
      ret = 1;
    }

  mem = 35;
  if (atomic_increment_and_test (&mem)
      || mem != 36)
    {
      puts ("atomic_increment_and_test test 2 failed");
      ret = 1;
    }

  mem = -1;
  if (! atomic_increment_and_test (&mem)
      || mem != 0)
    {
      puts ("atomic_increment_and_test test 3 failed");
      ret = 1;
    }

  mem = 17;
  atomic_decrement (&mem);
  if (mem != 16)
    {
      puts ("atomic_decrement test failed");
      ret = 1;
    }

  if (atomic_decrement_val (&mem) != 15)
    {
      puts ("atomic_decrement_val test failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_decrement_and_test (&mem)
      || mem != -1)
    {
      puts ("atomic_decrement_and_test test 1 failed");
      ret = 1;
    }

  mem = 15;
  if (atomic_decrement_and_test (&mem)
      || mem != 14)
    {
      puts ("atomic_decrement_and_test test 2 failed");
      ret = 1;
    }

  mem = 1;
  if (! atomic_decrement_and_test (&mem)
      || mem != 0)
    {
      puts ("atomic_decrement_and_test test 3 failed");
      ret = 1;
    }

  mem = 1;
  if (atomic_decrement_if_positive (&mem) != 1
      || mem != 0)
    {
      puts ("atomic_decrement_if_positive test 1 failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_decrement_if_positive (&mem) != 0
      || mem != 0)
    {
      puts ("atomic_decrement_if_positive test 2 failed");
      ret = 1;
    }

  mem = -1;
  if (atomic_decrement_if_positive (&mem) != -1
      || mem != -1)
    {
      puts ("atomic_decrement_if_positive test 3 failed");
      ret = 1;
    }

  mem = -12;
  if (! atomic_add_negative (&mem, 10)
      || mem != -2)
    {
      puts ("atomic_add_negative test 1 failed");
      ret = 1;
    }

  mem = 0;
  if (atomic_add_negative (&mem, 100)
      || mem != 100)
    {
      puts ("atomic_add_negative test 2 failed");
      ret = 1;
    }

  mem = 15;
  if (atomic_add_negative (&mem, -10)
      || mem != 5)
    {
      puts ("atomic_add_negative test 3 failed");
      ret = 1;
    }

  mem = -12;
  if (atomic_add_negative (&mem, 14)
      || mem != 2)
    {
      puts ("atomic_add_negative test 4 failed");
      ret = 1;
    }

  mem = 0;
  if (! atomic_add_negative (&mem, -1)
      || mem != -1)
    {
      puts ("atomic_add_negative test 5 failed");
      ret = 1;
    }

  mem = -31;
  if (atomic_add_negative (&mem, 31)
      || mem != 0)
    {
      puts ("atomic_add_negative test 6 failed");
      ret = 1;
    }

  mem = -34;
  if (atomic_add_zero (&mem, 31)
      || mem != -3)
    {
      puts ("atomic_add_zero test 1 failed");
      ret = 1;
    }

  mem = -36;
  if (! atomic_add_zero (&mem, 36)
      || mem != 0)
    {
      puts ("atomic_add_zero test 2 failed");
      ret = 1;
    }

  mem = 113;
  if (atomic_add_zero (&mem, -13)
      || mem != 100)
    {
      puts ("atomic_add_zero test 3 failed");
      ret = 1;
    }

  mem = -18;
  if (atomic_add_zero (&mem, 20)
      || mem != 2)
    {
      puts ("atomic_add_zero test 4 failed");
      ret = 1;
    }

  mem = 10;
  if (atomic_add_zero (&mem, -20)
      || mem != -10)
    {
      puts ("atomic_add_zero test 5 failed");
      ret = 1;
    }

  mem = 10;
  if (! atomic_add_zero (&mem, -10)
      || mem != 0)
    {
      puts ("atomic_add_zero test 6 failed");
      ret = 1;
    }

  mem = 0;
  atomic_bit_set (&mem, 1);
  if (mem != 2)
    {
      puts ("atomic_bit_set test 1 failed");
      ret = 1;
    }

  mem = 8;
  atomic_bit_set (&mem, 3);
  if (mem != 8)
    {
      puts ("atomic_bit_set test 2 failed");
      ret = 1;
    }

#ifdef TEST_ATOMIC64
  mem = 16;
  atomic_bit_set (&mem, 35);
  if (mem != 0x800000010LL)
    {
      puts ("atomic_bit_set test 3 failed");
      ret = 1;
    }
#endif

  mem = 0;
  if (atomic_bit_test_set (&mem, 1)
      || mem != 2)
    {
      puts ("atomic_bit_test_set test 1 failed");
      ret = 1;
    }

  mem = 8;
  if (! atomic_bit_test_set (&mem, 3)
      || mem != 8)
    {
      puts ("atomic_bit_test_set test 2 failed");
      ret = 1;
    }

#ifdef TEST_ATOMIC64
  mem = 16;
  if (atomic_bit_test_set (&mem, 35)
      || mem != 0x800000010LL)
    {
      puts ("atomic_bit_test_set test 3 failed");
      ret = 1;
    }

  mem = 0x100000000LL;
  if (! atomic_bit_test_set (&mem, 32)
      || mem != 0x100000000LL)
    {
      puts ("atomic_bit_test_set test 4 failed");
      ret = 1;
    }
#endif

#ifdef catomic_compare_and_exchange_val_acq
  mem = 24;
  if (catomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24
      || mem != 35)
    {
      puts ("catomic_compare_and_exchange_val_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (catomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12
      || mem != 12)
    {
      puts ("catomic_compare_and_exchange_val_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (catomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15
      || mem != -56)
    {
      puts ("catomic_compare_and_exchange_val_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (catomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1
      || mem != -1)
    {
      puts ("catomic_compare_and_exchange_val_acq test 4 failed");
      ret = 1;
    }
#endif

  mem = 24;
  if (catomic_compare_and_exchange_bool_acq (&mem, 35, 24)
      || mem != 35)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 1 failed");
      ret = 1;
    }

  mem = 12;
  if (! catomic_compare_and_exchange_bool_acq (&mem, 10, 15)
      || mem != 12)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 2 failed");
      ret = 1;
    }

  mem = -15;
  if (catomic_compare_and_exchange_bool_acq (&mem, -56, -15)
      || mem != -56)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 3 failed");
      ret = 1;
    }

  mem = -1;
  if (! catomic_compare_and_exchange_bool_acq (&mem, 17, 0)
      || mem != -1)
    {
      puts ("catomic_compare_and_exchange_bool_acq test 4 failed");
      ret = 1;
    }

  mem = 2;
  if (catomic_exchange_and_add (&mem, 11) != 2
      || mem != 13)
    {
      puts ("catomic_exchange_and_add test failed");
      ret = 1;
    }

  mem = -21;
  catomic_add (&mem, 22);
  if (mem != 1)
    {
      puts ("catomic_add test failed");
      ret = 1;
    }

  mem = -1;
  catomic_increment (&mem);
  if (mem != 0)
    {
      puts ("catomic_increment test failed");
      ret = 1;
    }

  mem = 2;
  if (catomic_increment_val (&mem) != 3)
    {
      puts ("catomic_increment_val test failed");
      ret = 1;
    }

  mem = 17;
  catomic_decrement (&mem);
  if (mem != 16)
    {
      puts ("catomic_decrement test failed");
      ret = 1;
    }

  if (catomic_decrement_val (&mem) != 15)
    {
      puts ("catomic_decrement_val test failed");
      ret = 1;
    }

  return ret;
}
コード例 #20
0
ファイル: fork.c プロジェクト: KubaKaszycki/kklibc
pid_t
__libc_fork (void)
{
    pid_t pid;
    struct used_handler
    {
        struct fork_handler *handler;
        struct used_handler *next;
    } *allp = NULL;

    /* Run all the registered preparation handlers.  In reverse order.
       While doing this we build up a list of all the entries.  */
    struct fork_handler *runp;
    while ((runp = __fork_handlers) != NULL)
    {
        /* Make sure we read from the current RUNP pointer.  */
        atomic_full_barrier ();

        unsigned int oldval = runp->refcntr;

        if (oldval == 0)
            /* This means some other thread removed the list just after
               the pointer has been loaded.  Try again.  Either the list
               is empty or we can retry it.  */
            continue;

        /* Bump the reference counter.  */
        if (atomic_compare_and_exchange_bool_acq (&__fork_handlers->refcntr,
                oldval + 1, oldval))
            /* The value changed, try again.  */
            continue;

        /* We bumped the reference counter for the first entry in the
        list.  That means that none of the following entries will
         just go away.  The unloading code works in the order of the
         list.

         While executing the registered handlers we are building a
         list of all the entries so that we can go backward later on.  */
        while (1)
        {
            /* Execute the handler if there is one.  */
            if (runp->prepare_handler != NULL)
                runp->prepare_handler ();

            /* Create a new element for the list.  */
            struct used_handler *newp
                = (struct used_handler *) alloca (sizeof (*newp));
            newp->handler = runp;
            newp->next = allp;
            allp = newp;

            /* Advance to the next handler.  */
            runp = runp->next;
            if (runp == NULL)
                break;

            /* Bump the reference counter for the next entry.  */
            atomic_increment (&runp->refcntr);
        }

        /* We are done.  */
        break;
    }

    _IO_list_lock ();

#ifndef NDEBUG
    pid_t ppid = THREAD_GETMEM (THREAD_SELF, tid);
#endif

    /* We need to prevent the getpid() code to update the PID field so
       that, if a signal arrives in the child very early and the signal
       handler uses getpid(), the value returned is correct.  */
    pid_t parentpid = THREAD_GETMEM (THREAD_SELF, pid);
    THREAD_SETMEM (THREAD_SELF, pid, -parentpid);

#ifdef ARCH_FORK
    pid = ARCH_FORK ();
#else
# error "ARCH_FORK must be defined so that the CLONE_SETTID flag is used"
    pid = INLINE_SYSCALL (fork, 0);
#endif


    if (pid == 0)
    {
        struct pthread *self = THREAD_SELF;

        assert (THREAD_GETMEM (self, tid) != ppid);

        /* See __pthread_once.  */
        if (__fork_generation_pointer != NULL)
            *__fork_generation_pointer += __PTHREAD_ONCE_FORK_GEN_INCR;

        /* Adjust the PID field for the new process.  */
        THREAD_SETMEM (self, pid, THREAD_GETMEM (self, tid));

#if HP_TIMING_AVAIL
        /* The CPU clock of the thread and process have to be set to zero.  */
        hp_timing_t now;
        HP_TIMING_NOW (now);
        THREAD_SETMEM (self, cpuclock_offset, now);
        GL(dl_cpuclock_offset) = now;
#endif

#ifdef __NR_set_robust_list
        /* Initialize the robust mutex list which has been reset during
        the fork.  We do not check for errors since if it fails here
         it failed at process start as well and noone could have used
         robust mutexes.  We also do not have to set
         self->robust_head.futex_offset since we inherit the correct
         value from the parent.  */
# ifdef SHARED
        if (__builtin_expect (__libc_pthread_functions_init, 0))
            PTHFCT_CALL (ptr_set_robust, (self));
# else
        extern __typeof (__nptl_set_robust) __nptl_set_robust
        __attribute__((weak));
        if (__builtin_expect (__nptl_set_robust != NULL, 0))
            __nptl_set_robust (self);
# endif
#endif

        /* Reset the file list.  These are recursive mutexes.  */
        fresetlockfiles ();

        /* Reset locks in the I/O code.  */
        _IO_list_resetlock ();

        /* Reset the lock the dynamic loader uses to protect its data.  */
        __rtld_lock_initialize (GL(dl_load_lock));

        /* Run the handlers registered for the child.  */
        while (allp != NULL)
        {
            if (allp->handler->child_handler != NULL)
                allp->handler->child_handler ();

            /* Note that we do not have to wake any possible waiter.
               This is the only thread in the new process.  The count
               may have been bumped up by other threads doing a fork.
               We reset it to 1, to avoid waiting for non-existing
               thread(s) to release the count.  */
            allp->handler->refcntr = 1;

            /* XXX We could at this point look through the object pool
               and mark all objects not on the __fork_handlers list as
               unused.  This is necessary in case the fork() happened
               while another thread called dlclose() and that call had
               to create a new list.  */

            allp = allp->next;
        }

        /* Initialize the fork lock.  */
        __fork_lock = LLL_LOCK_INITIALIZER;
    }
    else
    {
コード例 #21
0
ファイル: tasksTests.cpp プロジェクト: Bewolf2/LoomSDK
static task_t *doAtomicInc(void *payload, task_t *t)
{
    atomic_increment(&gWorkDone);
    return NULL;
}
コード例 #22
0
ファイル: lock.cpp プロジェクト: GridOPTICS/FNCS-gridlab-d
/** Write unlock
 **/
extern "C" void wunlock(unsigned int *lock)
{
	unsigned int value = *lock;
	check_lock(lock,true,true);
	atomic_increment(lock);
}
コード例 #23
0
ファイル: tasksTests.cpp プロジェクト: Bewolf2/LoomSDK
task_t *dummyChildTask(void *payload, task_t *task)
{
    atomic_increment(&gChildWorkCount);
    loom_thread_sleep(2);
    return NULL;
}
コード例 #24
0
 void add_ref_copy()
 {
     atomic_increment( &use_count_ );
 }
コード例 #25
0
ファイル: sp_counted_base.cpp プロジェクト: melpon/cppcms
void sp_counted_base::add_ref_copy()
{
    atomic_increment( &use_count_ ,&lock_);
}
コード例 #26
0
ファイル: cache.c プロジェクト: mseaborn/plash-glibc
/* Add a new entry to the cache.  The return value is zero if the function
   call was successful.

   This function must be called with the read-lock held.

   We modify the table but we nevertheless only acquire a read-lock.
   This is ok since we use operations which would be safe even without
   locking, given that the `prune_cache' function never runs.  Using
   the readlock reduces the chance of conflicts.  */
int
cache_add (int type, const void *key, size_t len, struct datahead *packet,
	   bool first, struct database_dyn *table,
	   uid_t owner, bool prune_wakeup)
{
  if (__builtin_expect (debug_level >= 2, 0))
    {
      const char *str;
      char buf[INET6_ADDRSTRLEN + 1];
      if (type == GETHOSTBYADDR || type == GETHOSTBYADDRv6)
	str = inet_ntop (type == GETHOSTBYADDR ? AF_INET : AF_INET6,
			 key, buf, sizeof (buf));
      else
	str = key;

      dbg_log (_("add new entry \"%s\" of type %s for %s to cache%s"),
	       str, serv2str[type], dbnames[table - dbs],
	       first ? _(" (first)") : "");
    }

  unsigned long int hash = __nis_hash (key, len) % table->head->module;
  struct hashentry *newp;

  newp = mempool_alloc (table, sizeof (struct hashentry), 0);
  /* If we cannot allocate memory, just do not do anything.  */
  if (newp == NULL)
    {
      /* If necessary mark the entry as unusable so that lookups will
	 not use it.  */
      if (first)
	packet->usable = false;

      return -1;
    }

  newp->type = type;
  newp->first = first;
  newp->len = len;
  newp->key = (char *) key - table->data;
  assert (newp->key + newp->len <= table->head->first_free);
  newp->owner = owner;
  newp->packet = (char *) packet - table->data;
  assert ((newp->packet & BLOCK_ALIGN_M1) == 0);

  /* Put the new entry in the first position.  */
  do
    newp->next = table->head->array[hash];
  while (atomic_compare_and_exchange_bool_acq (&table->head->array[hash],
					       (ref_t) ((char *) newp
							- table->data),
					       (ref_t) newp->next));

  /* Update the statistics.  */
  if (packet->notfound)
    ++table->head->negmiss;
  else if (first)
    ++table->head->posmiss;

  /* We depend on this value being correct and at least as high as the
     real number of entries.  */
  atomic_increment (&table->head->nentries);

  /* It does not matter that we are not loading the just increment
     value, this is just for statistics.  */
  unsigned long int nentries = table->head->nentries;
  if (nentries > table->head->maxnentries)
    table->head->maxnentries = nentries;

  if (table->persistent)
    // XXX async OK?
    msync ((void *) table->head,
	   (char *) &table->head->array[hash] - (char *) table->head
	   + sizeof (ref_t), MS_ASYNC);

  /* We do not have to worry about the pruning thread if we are
     re-adding the data since this is done by the pruning thread.  We
     also do not have to do anything in case this is not the first
     time the data is entered since different data heads all have the
     same timeout.  */
  if (first && prune_wakeup)
    {
      /* Perhaps the prune thread for the table is not running in a long
	 time.  Wake it if necessary.  */
      pthread_mutex_lock (&table->prune_lock);
      time_t next_wakeup = table->wakeup_time;
      bool do_wakeup = false;
      if (next_wakeup > packet->timeout + CACHE_PRUNE_INTERVAL)
	{
	  table->wakeup_time = packet->timeout;
	  do_wakeup = true;
	}
      pthread_mutex_unlock (&table->prune_lock);
      if (do_wakeup)
	pthread_cond_signal (&table->prune_cond);
    }

  return 0;
}
コード例 #27
0
ファイル: createthread.c プロジェクト: VanL/glibc
static int
do_clone (struct pthread *pd, const struct pthread_attr *attr,
	  int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS,
	  int stopped)
{
#if 0
  PREPARE_CREATE;
#endif

  if (__builtin_expect (stopped != 0, 0))
    /* We make sure the thread does not run far by forcing it to get a
       lock.  We lock it here too so that the new thread cannot continue
       until we tell it to.  */
    lll_lock (pd->lock, LLL_PRIVATE);

  /* One more thread.  We cannot have the thread do this itself, since it
     might exist but not have been scheduled yet by the time we've returned
     and need to check the value to behave correctly.  We must do it before
     creating the thread, in case it does get scheduled first and then
     might mistakenly think it was the only thread.  In the failure case,
     we momentarily store a false value; this doesn't matter because there
     is no kosher thing a signal handler interrupting us right here can do
     that cares whether the thread count is correct.  */
  atomic_increment (&__nptl_nthreads);

#if !defined(__native_client__) && !defined(__ZRT_HOST)
#error "This code was changed to work only in Native Client"
#endif

  /* Native Client does not have a notion of a thread ID, so we make
     one up.  This must be small enough to leave space for number identifying
     the clock.  Use CLOCK_IDFIELD_SIZE to guarantee that.  */
  pd->tid = ((unsigned int) pd) >> CLOCK_IDFIELD_SIZE;

  /* Native Client syscall thread_create does not push return address onto stack
     as opposed to the kernel.  We emulate this behavior on x86-64 to meet the
     ABI requirement ((%rsp + 8) mod 16 == 0).  On x86-32 the attribute
     'force_align_arg_pointer' does the same for start_thread ().  */
#ifdef __x86_64__
  STACK_VARIABLES_ARGS -= 8;
#endif

  if (__nacl_irt_thread_create (fct, STACK_VARIABLES_ARGS, pd) != 0)
    {
      pd->tid = 0;
      atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second.  */

      /* Failed.  If the thread is detached, remove the TCB here since
	 the caller cannot do this.  The caller remembered the thread
	 as detached and cannot reverify that it is not since it must
	 not access the thread descriptor again.  */
      if (IS_DETACHED (pd))
	__deallocate_stack (pd);

      /* We have to translate error codes.  */
      return errno == ENOMEM ? EAGAIN : errno;
    }

  /* Now we have the possibility to set scheduling parameters etc.  */
  if (__builtin_expect (stopped != 0, 0))
    {
      INTERNAL_SYSCALL_DECL (err);
      int res = 0;

      /* Set the affinity mask if necessary.  */
      if (attr->cpuset != NULL)
	{
	  res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
				  attr->cpusetsize, attr->cpuset);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    {
	      /* The operation failed.  We have to kill the thread.  First
		 send it the cancellation signal.  */
	      INTERNAL_SYSCALL_DECL (err2);
	    err_out:
#if __ASSUME_TGKILL
	      (void) INTERNAL_SYSCALL (tgkill, err2, 3,
				       THREAD_GETMEM (THREAD_SELF, pid),
				       pd->tid, SIGCANCEL);
#else
	      (void) INTERNAL_SYSCALL (tkill, err2, 2, pd->tid, SIGCANCEL);
#endif

	      return (INTERNAL_SYSCALL_ERROR_P (res, err)
		      ? INTERNAL_SYSCALL_ERRNO (res, err)
		      : 0);
	    }
	}

      /* Set the scheduling parameters.  */
      if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
	{
	  res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid,
				  pd->schedpolicy, &pd->schedparam);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    goto err_out;
	}
    }

  /* We now have for sure more than one thread.  The main thread might
     not yet have the flag set.  No need to set the global variable
     again if this is what we use.  */
  THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);

  return 0;
}