Exemplo n.º 1
0
    bool watch(void *p, ArrayElement *a){
        
        rc_count.fetch_add(1);

        if (a->load() != p){
            rc_count.fetch_add(-1);
            return false;
        }
        return true;
    };
Exemplo n.º 2
0
Arquivo: main.cpp Projeto: CCJY/coliru
void test(std::atomic<int> n)
{
    ___________________________________________();
    ___________________________________________();
    
    ++n;
    ___________________________________________();
    
    n.fetch_add(1, std::memory_order_relaxed);
    ___________________________________________();
    
    n.fetch_add(1, std::memory_order_seq_cst);
    
    ___________________________________________();
    ___________________________________________();
}
Exemplo n.º 3
0
bool just_spinning_barrier(int tid, int gn)
{
    unsigned int step = step_.load();
    if (nwait_.fetch_add(1) == gn - 1) {
      nwait_.store(0);
      step_.fetch_add(1);
      return true;
    } else {
      while (step_.load() == step){
       std::this_thread::yield();
      }
          
      return false;
    }

}
Exemplo n.º 4
0
void runSimulationThread(UCTNode* root, int millaSecondsToThink, Patterns* patterns) {
  struct timespec start, end;
  clock_gettime(CLOCK_MONOTONIC, &start);
  clock_gettime(CLOCK_MONOTONIC, &end);

  int i = 0;

  std::default_random_engine engine(time(nullptr));

  while (diffclock(end, start) < millaSecondsToThink) {
    i++;

    UCTNode* v = TreePolicy(root, engine);

    int reward = DefaultPolicy(engine, v, patterns);

    backup(v, reward);

    /*if (i % 100 == 0 && i != 0) {
      char buffer[100];
      sprintf(buffer, "#%d - %d", getpid(), i);
      Log(buffer);
      sprintf(buffer, "  %f %d", diffclock(end, start), millaSecondsToThink);
      Log(buffer);
    }*/
    // end = clock();
    clock_gettime(CLOCK_MONOTONIC, &end);
  }

  simulationCount.fetch_add(i);
  Log(std::to_string(i).c_str());
}
Exemplo n.º 5
0
	void Schedule(const char * name, task_fn fn, uint32_t threadid)
	{
		BASIS_ASSERT(Scheduler != nullptr);
		
		if (threadid >=0 && threadid < ThreadCount)
		{
			scheduler_data * s = SchedulerList + threadid;
			s->privateTasks.push_back<task_entry>({ fn, basis::stralloc(name) });
			s->privateTaskCount.fetch_add(1, std::memory_order_relaxed);

			if (s != Scheduler)
			{
				SignalScheduler(s);
			}
		}
		else
		{
			BASIS_ASSERT(threadid == TACO_INVALID_THREAD_ID);
			
			Scheduler->sharedTasks.push_back<task_entry>({ fn, basis::stralloc(name) });
			uint32_t count = GlobalSharedTaskCount.fetch_add(1, std::memory_order_relaxed) + 1;
			if (count > 1 || !Scheduler->isActive)
			{
				AskForHelp(count);
			}
		}
	}
Exemplo n.º 6
0
void* fhBaseRenderList::AllocateBytes(uint32 bytes) {
	uint32 offset = allocated.fetch_add(bytes);
	assert(offset + bytes < renderlistMaxSize);
	assert(renderlistMemory);

	return &static_cast<char*>(renderlistMemory)[offset];
}
    void threadFunc(int threadNum)
    {
        std::random_device rd;
        std::mt19937 randomEngine(rd());
        int writes = 0;
        volatile int accumulator = 0;   // Prevent compiler from eliminating this variable

        for (int i = 0; i < m_iterationCount; i++)
        {
            // Choose randomly whether to read or write.
            if (std::uniform_int_distribution<>(0, 30)(randomEngine) == 0)
            {
                WriteLockGuard<NonRecursiveRWLock> guard(m_rwLock);
                m_sharedInt++;
                writes++;
            }
            else
            {
                ReadLockGuard<NonRecursiveRWLock> guard(m_rwLock);
                accumulator += m_sharedInt;
            }
        }

        m_totalWrites.fetch_add(writes, std::memory_order_relaxed);
    }
Exemplo n.º 8
0
	/**
	*	@brief	Allocates a new slot for a texture and returns a shared pointer to the new slot.
	*			Slot lifetime is tied to the pointer's lifetime.
	*
	*	@param	tex		Texture to insert
	*/
	auto allocate_slot(texture_t &&tex,
					   image_layout layout = default_layout) {
		// Find a location for the slot: If there are tombstones, replace one of them with new element, if possible
		optional<std::uint32_t> location;
		{
			std::unique_lock<std::mutex> l(tombstones_mutex);

			if (tombstones.size()) {
				location = std::prev(tombstones.end())->start;
				tombstones.pop_back();
			}
		}

		// If no tombstones, use a location past the vector's end.
		if (!location)
			location = count.fetch_add(1);

		// Create slot and pipeline image, we can do that without a lock
		value_type val = lib::allocate_shared<slot_t>(slot_t::token(),
													  std::move(tex),
													  *this,
													  location.get());
		auto img = image_t(val->tex, layout);
		
		// Update changes data
		{
			std::unique_lock<std::mutex> l(general_mutex);
			add_change(location.get(),
					   std::move(img));
		}

		return val;
	}
Exemplo n.º 9
0
Socket* SocketPool::GetSocket() {
  for ( ; ; ) {
    Socket* sock = nullptr;
    {
      std::unique_lock<std::mutex> lock(mtx_);
      if (pool_.empty()) {
        break;
      }

      sock = pool_.back();
      pool_.pop_back();
    }

    pool_count_.fetch_sub(1, std::memory_order_relaxed);

    if (sock && sock->IsValid()) {
      return sock;
    }

    delete sock;
  }

  if (pool_count_.load(std::memory_order_relaxed) < pool_size_) {
    pool_count_.fetch_add(1, std::memory_order_relaxed);
    return new Socket(remote_side_.ip, remote_side_.port);
  }

  return nullptr;
}
Exemplo n.º 10
0
DWORD DNNModelThreadForward(ThreadLayerState *tl)
{
    SetTrainingThreadAffinity(tl->_threadNum);

#ifdef PREALLOCATE_THREAD_BUFFERS
//	tl->PrintSparsity();
#else
    float **inputActivation = new float *[tl->_numLayers];
    float **outputActivation = new float *[tl->_numLayers];
    for (int i = tl->_startLayer; i < tl->_numLayers; i++)
    {
        inputActivation[i] = new float[tl->_LayerState[i]._InputSize];
        outputActivation[i] = new float[tl->_LayerState[i]._OutputSize];
    }
#endif

    while (true)
    {
		INT64 sampleId = g_CurrentSamplePos.fetch_add(1);
		if (sampleId >= G_SAMPLE_COUNT) break;
        int numLayers = ((sampleId % G_WORKER_COUNT) == 0) ? tl->_numLayers : tl->_numLayers-1;
        for (int l = tl->_startLayer; l < numLayers; l++) {
            
#ifdef PREPARE_COMPUTE_DATA
            std::vector<std::vector<float>>& layerActivations = g_Activations[l];
            int activationId = sampleId % layerActivations.size();
            std::vector<float>& activationVector = layerActivations[activationId];
            Sparsify(layerActivations[activationId], G_FORWARD_SPARSITY);
            const float* inpACT = &activationVector[0];
#elif PREALLOCATE_THREAD_BUFFERS
            float *inpACT = tl->_inputActivation[l];
#else
            float *inpACT = inputActivation[l];
            Sparsify(inpACT, tl->_LayerState[l]._InputSize, G_FORWARD_SPARSITY, G_ACTIVATION_CACHELINE_SPARSITY);
#endif
            float *outACT = tl->_outputActivation[l];
            Layer *layer = (tl->_LayerState + l);

            DECLARE_TIMER(timer);
            START_TIMER(timer);
            g_DNNKernels._feedForward(layer, inpACT, outACT);           
            STOP_TIMER(timer);
            tl->_FLOPTime[l] += ELAPSED_USEC_TIME(timer);

            tl->_SampleCount[l]++;
        }
    }

#ifndef PREALLOCATE_THREAD_BUFFERS
    for (int i = tl->_startLayer; i < tl->_numLayers; i++)
    {
        delete [] inputActivation[i];
        delete [] outputActivation[i];
    }
        
    delete []inputActivation;
    delete []outputActivation;
#endif 
    return 0;
}
Exemplo n.º 11
0
 virtual ~JavaBBinder()
 {
     ALOGV("Destroying JavaBBinder %p\n", this);
     gNumLocalRefsDeleted.fetch_add(1, memory_order_relaxed);
     JNIEnv* env = javavm_to_jnienv(mVM);
     env->DeleteGlobalRef(mObject);
 }
Exemplo n.º 12
0
 JavaBBinder(JNIEnv* env, jobject /* Java Binder */ object)
     : mVM(jnienv_to_javavm(env)), mObject(env->NewGlobalRef(object))
 {
     ALOGV("Creating JavaBBinder %p\n", this);
     gNumLocalRefsCreated.fetch_add(1, std::memory_order_relaxed);
     gcIfManyNewRefs(env);
 }
Exemplo n.º 13
0
/* This function checks the emulated CPU - GPU distance and may wake up the GPU,
 * or block the CPU if required. It should be called by the CPU thread regularly.
 * @ticks The gone emulated CPU time.
 * @return A good time to call WaitForGpuThread() next.
 */
static int WaitForGpuThread(int ticks)
{
  const SConfig& param = SConfig::GetInstance();

  int old = s_sync_ticks.fetch_add(ticks);
  int now = old + ticks;

  // GPU is idle, so stop polling.
  if (old >= 0 && s_gpu_mainloop.IsDone())
    return -1;

  // Wakeup GPU
  if (old < param.iSyncGpuMinDistance && now >= param.iSyncGpuMinDistance)
    RunGpu();

  // If the GPU is still sleeping, wait for a longer time
  if (now < param.iSyncGpuMinDistance)
    return GPU_TIME_SLOT_SIZE + param.iSyncGpuMinDistance - now;

  // Wait for GPU
  if (now >= param.iSyncGpuMaxDistance)
    s_sync_wakeup_event.Wait();

  return GPU_TIME_SLOT_SIZE;
}
Exemplo n.º 14
0
std::shared_ptr<HiresTexture>
HiresTexture::Search(const std::string& basename,
                     std::function<u8*(size_t)> request_buffer_delegate)
{
  if (g_ActiveConfig.bCacheHiresTextures)
  {
    std::unique_lock<std::mutex> lk(s_textureCacheMutex);

    auto iter = s_textureCache.find(basename);
    if (iter != s_textureCache.end())
    {
      HiresTexture* current = iter->second.get();
      u8* dst = request_buffer_delegate(current->m_cached_data_size);
      memcpy(dst, current->m_cached_data.get(), current->m_cached_data_size);
      return iter->second;
    }
    lk.unlock();
    if (size_sum.load() < max_mem)
    {
      std::shared_ptr<HiresTexture> ptr(
          Load(basename, [](size_t requested_size) { return new u8[requested_size]; }, true));
      lk.lock();
      if (ptr)
      {
        s_textureCache[basename] = ptr;
        HiresTexture* current = ptr.get();
        size_sum.fetch_add(current->m_cached_data_size);
        u8* dst = request_buffer_delegate(current->m_cached_data_size);
        memcpy(dst, current->m_cached_data.get(), current->m_cached_data_size);
      }
      return ptr;
    }
  }
  return std::shared_ptr<HiresTexture>(Load(basename, request_buffer_delegate, false));
}
Exemplo n.º 15
0
int GrMockGpu::NextInternalTextureID() {
    static std::atomic<int> nextID{1};
    int id;
    do {
        id = nextID.fetch_add(1);
    } while (0 == id);  // Reserve 0 for an invalid ID.
    return id;
}
 void operator()(BlockingQueue<int> &queue) {
   for (int i = 0; i < size; ++i) {
     int value = product_item.fetch_add(1);
     if (!queue.Push(value)) {
       std::cout << "failed to push item " << value << " into queue\n";
     }
   }
 }
Exemplo n.º 17
0
 MemoryCheck(const MemoryCheck& x)
 {
     // We have to do this to make sure that destructor calls are paired
     //
     // Really, copy constructor should be deletable, but CCheckQueue breaks
     // if it is deleted because of internal push_back.
     fake_allocated_memory.fetch_add(b, std::memory_order_relaxed);
 };
Exemplo n.º 18
0
static inline uint32_t next_path_cache_id() {
    static std::atomic<uint32_t> gNextID(1);
    for (;;) {
        uint32_t id = gNextID.fetch_add(+1, std::memory_order_acquire);
        if (SK_InvalidUniqueID != id) {
            return id;
        }
    }
}
Exemplo n.º 19
0
	void unsafeFree(){
		this->rc_next=tl_safe_pool;
		tl_safe_pool=this;

        #ifdef DEBUG
            freecount.fetch_add(1);
            assert(freecount.load() == allcount.load());
        #endif
	};
Exemplo n.º 20
0
void FileMap::getRawBlock(BinaryDataRef& bdr, uint64_t offset, uint32_t size,
   std::atomic<uint64_t>& lastSeenCumulative) const
{
   bdr.setRef(getMapPtr(offset), size);

   lastSeenCumulated_.store(
      lastSeenCumulative.fetch_add(size, std::memory_order_relaxed) + size,
      std::memory_order_relaxed);
}
Exemplo n.º 21
0
 void Push(int index)
 {
     int head1 = head.load(std::memory_order_acquire);
     do
     {
         array[index].Next.store(head1, std::memory_order_release);
        
     } while (!head.compare_exchange_strong(head1, index, std::memory_order_seq_cst));
     count.fetch_add(1, std::memory_order_seq_cst);
 }
Exemplo n.º 22
0
/**
 * Run a timed experiment
 */
void run(uintptr_t id)
{
    // wait until all threads created, then set alarm and read timer
    b0.fetch_add(1); while (b0 != Config::CFG.threads) { std::this_thread::yield(); }

    if (id == 0) {
    	    //we use timer
	    if (Config::CFG.opcount == 0){
		    signal(SIGALRM, catch_SIGALRM);
		    alarm(Config::CFG.duration);
	    }
	    Config::CFG.totaltime = getElapsedTime();
    }

    // wait until read of start timer finishes, then start transactions
    b1.fetch_add(1); while (b1 != Config::CFG.threads) { std::this_thread::yield(); }
    uint32_t count = 0;
    uint32_t seed = id;

    // we do constant number of operations
    if (Config::CFG.opcount > 0){
    	while(count < Config::CFG.opcount){
		bench_test(id, &seed);
		++count;
	}
    }else{
	    // run until alarm fires
	while (Config::CFG.running) {
		bench_test(id, &seed);
        	++count;
	}
    }

    // wait until all operations finish, then get time
    b2.fetch_add(1); while (b2 != Config::CFG.threads) { std::this_thread::yield(); }
    if (id == 0)
        Config::CFG.totaltime = getElapsedTime() - Config::CFG.totaltime;

    // we use timer, so fix opcount
    if (Config::CFG.opcount == 0)
    	// add this thread's count to an accumulator
    	__sync_fetch_and_add(&Config::CFG.opcount, count);
}
Exemplo n.º 23
0
 virtual ~JavaDeathRecipient()
 {
     //ALOGI("Removing death ref: recipient=%p\n", mObject);
     gNumDeathRefsDeleted.fetch_add(1, std::memory_order_relaxed);
     JNIEnv* env = javavm_to_jnienv(mVM);
     if (mObject != NULL) {
         env->DeleteGlobalRef(mObject);
     } else {
         env->DeleteWeakGlobalRef(mObjectWeak);
     }
 }
Exemplo n.º 24
0
void Profiler::addProfilePoint(const char* tag, thread_id_type threadId, u64 startTime, u64 endTime) {
	
	while(!m_canWrite);
	
	u32 pos = m_writeIndex.fetch_add(1);
	
	ProfilerSample & sample = m_samples[pos % NB_SAMPLES];
	sample.tag = tag;
	sample.threadId = threadId;
	sample.startTime = startTime;
	sample.endTime = endTime;
}
Exemplo n.º 25
0
    JavaDeathRecipient(JNIEnv* env, jobject object, const sp<DeathRecipientList>& list)
        : mVM(jnienv_to_javavm(env)), mObject(env->NewGlobalRef(object)),
          mObjectWeak(NULL), mList(list)
    {
        // These objects manage their own lifetimes so are responsible for final bookkeeping.
        // The list holds a strong reference to this object.
        LOGDEATH("Adding JDR %p to DRL %p", this, list.get());
        list->add(this);

        gNumDeathRefsCreated.fetch_add(1, std::memory_order_relaxed);
        gcIfManyNewRefs(env);
    }
Exemplo n.º 26
0
DWORD DNNModelThreadBackward(ThreadLayerState *tl)
{
    SetTrainingThreadAffinity(tl->_threadNum);

#ifndef PREALLOCATE_THREAD_BUFFERS
    float **outputError = new float *[tl->_numLayers];
    float **inputError = new float *[tl->_numLayers];
    for (int i = tl->_startLayer; i < tl->_numLayers; i++)
    {
        outputError[i] = new float[tl->_LayerState[i]._InputSize];
        inputError[i] = new float[tl->_LayerState[i]._OutputSize];
    }
#endif

    while (true)
    {
		INT64 sampleId = g_CurrentSamplePos.fetch_add(1);
		if (sampleId >= G_SAMPLE_COUNT) break;
        int numLayers = ((sampleId % G_WORKER_COUNT) == 0) ? tl->_numLayers : tl->_numLayers-1;
        for (int l = tl->_startLayer; l < numLayers; l++)
        {
#ifdef PREPARE_COMPUTE_DATA
#elif PREALLOCATE_THREAD_BUFFERS
#else
            Sparsify(tl->_inputError[l], tl->_LayerState[l]._OutputSize, G_BACKPROP_SPARSITY, G_DELTA_CACHELINE_SPARSITY);
#endif
            
            Layer *layer = (tl->_LayerState + l);
 
            DECLARE_TIMER(timer);
            START_TIMER(timer);
            g_DNNKernels._backPropagate(layer, tl->_inputError[l], tl->_outputError[l]);
            STOP_TIMER(timer);
            tl->_FLOPTime[l] += ELAPSED_USEC_TIME(timer);

            tl->_SampleCount[l]++;
        }
    }

#ifdef PREPARE_COMPUTE_DATA
#elif PREALLOCATE_THREAD_BUFFERS
#else
    for (int i = tl->_startLayer; i < tl->_numLayers; i++)
    {
        delete [] outputError[i];
        delete [] inputError[i];
    }
    delete []outputError;
    delete []inputError;
#endif 

    return 0;
}
Exemplo n.º 27
0
    void executeImpl(Block & block, const ColumnNumbers &, size_t result, size_t input_rows_count) override
    {
        size_t current_row_number = rows.fetch_add(input_rows_count);

        auto column = ColumnUInt64::create();
        auto & data = column->getData();
        data.resize(input_rows_count);
        for (size_t i = 0; i < input_rows_count; ++i)
            data[i] = current_row_number + i;

        block.getByPosition(result).column = std::move(column);
    }
Exemplo n.º 28
0
static void CopyHeaderVariables(Array& server,
                                const HeaderMap& headers) {
    static std::atomic<int> badRequests(-1);

    std::vector<std::string> badHeaders;
    for (auto const& header : headers) {
        auto const& key = header.first;
        auto const& values = header.second;
        auto normalizedKey = s_HTTP_ +
                             string_replace(f_strtoupper(key), s_dash,
                                            s_underscore);

        // Detect suspicious headers.  We are about to modify header names for
        // the SERVER variable.  This means that it is possible to deliberately
        // cause a header collision, which an attacker could use to sneak a
        // header past a proxy that would either overwrite or filter it
        // otherwise.  Client code should use apache_request_headers() to
        // retrieve the original headers if they are security-critical.
        if (RuntimeOption::LogHeaderMangle != 0 && server.exists(normalizedKey)) {
            badHeaders.push_back(key);
        }

        if (!values.empty()) {
            // When a header has multiple values, we always take the last one.
            server.set(normalizedKey, String(values.back()));
        }
    }

    if (!badHeaders.empty()) {
        auto reqId = badRequests.fetch_add(1, std::memory_order_acq_rel) + 1;
        if (!(reqId % RuntimeOption::LogHeaderMangle)) {
            std::string badNames = folly::join(", ", badHeaders);
            std::string allHeaders;

            const char* separator = "";
            for (auto const& header : headers) {
                for (auto const& value : header.second) {
                    folly::toAppend(separator, header.first, ": ", value,
                                    &allHeaders);
                    separator = "\n";
                }
            }

            Logger::Warning(
                "HeaderMangle warning: "
                "The header(s) [%s] overwrote other headers which mapped to the same "
                "key. This happens because PHP normalises - to _, ie AN_EXAMPLE "
                "and AN-EXAMPLE are equivalent. You should treat this as "
                "malicious. All headers from this request:\n%s",
                badNames.c_str(), allHeaders.c_str());
        }
    }
}
Exemplo n.º 29
0
void SocketPool::ReturnSocket(Socket* sock) {
  if (sock == nullptr) {
    return;
  }

  {
    std::unique_lock<std::mutex> lock(mtx_);
    pool_.push_back(sock);
  }

  pool_count_.fetch_add(1, std::memory_order_relaxed);
}
Exemplo n.º 30
0
    void call_void()
    {
        ++count_call_void;

        // make sure this function is not concurrently invoked
        HPX_TEST_EQ(count_active_call_void.fetch_add(1) + 1, 1);

        hpx::this_thread::suspend(std::chrono::microseconds(100));

        --count_active_call_void;
        HPX_TEST_EQ(count_active_call_void.load(), 0);
    }