Exemplo n.º 1
0
void consumer()
{
    std::cout << "Consumer start.\n";
    std::string *p2;
    while( !(p2 = ptr.load(std::memory_order_acquire)) ) 
    {} // spin

    assert(*p2  == "Hello");
    assert(data == 42); 
    std::cout << "Consumer end.\n";
}
Exemplo n.º 2
0
unsigned lookup(const StringData* name) {
  assert(g_initFlag.load(std::memory_order_acquire) ||
         pthread_equal(s_initThread.load(std::memory_order_acquire),
                       pthread_self()));

  if (auto const ptr = folly::get_ptr(s_instanceBitsMap, name)) {
    assert(*ptr >= 1 && *ptr < kNumInstanceBits);
    return *ptr;
  }
  return 0;
}
Exemplo n.º 3
0
bool getMask(const StringData* name, int& offset, uint8_t& mask) {
  assert(g_initFlag.load(std::memory_order_acquire));

  unsigned bit = lookup(name);
  if (!bit) return false;

  const size_t bitWidth = sizeof(mask) * CHAR_BIT;
  offset = Class::instanceBitsOff() + bit / bitWidth * sizeof(mask);
  mask = 1u << (bit % bitWidth);
  return true;
}
Exemplo n.º 4
0
void SWRenderer::DrawTexture(u8 *texture, int width, int height)
{
	// FIXME: This should add black bars when the game has set the VI to render less than the full xfb.

	// Save screenshot
	if (s_bScreenshot.load())
	{
		std::lock_guard<std::mutex> lk(s_criticalScreenshot);
		TextureToPng(texture, width * 4, s_sScreenshotName, width, height, false);
		// Reset settings
		s_sScreenshotName.clear();
		s_bScreenshot.store(false);
	}

	GLsizei glWidth = (GLsizei)GLInterface->GetBackBufferWidth();
	GLsizei glHeight = (GLsizei)GLInterface->GetBackBufferHeight();


	// Update GLViewPort
	glViewport(0, 0, glWidth, glHeight);
	glScissor(0, 0, glWidth, glHeight);

	glBindTexture(GL_TEXTURE_2D, s_RenderTarget);

	glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, (GLsizei)width, (GLsizei)height, 0, GL_RGBA, GL_UNSIGNED_BYTE, texture);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
	glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);

	glUseProgram(program);
	static const GLfloat verts[4][2] = {
		{ -1, -1}, // Left top
		{ -1,  1}, // left bottom
		{  1,  1}, // right bottom
		{  1, -1} // right top
	};
	static const GLfloat texverts[4][2] = {
		{0, 1},
		{0, 0},
		{1, 0},
		{1, 1}
	};

	glVertexAttribPointer(attr_pos, 2, GL_FLOAT, GL_FALSE, 0, verts);
	glVertexAttribPointer(attr_tex, 2, GL_FLOAT, GL_FALSE, 0, texverts);
	glEnableVertexAttribArray(attr_pos);
	glEnableVertexAttribArray(attr_tex);
	glUniform1i(uni_tex, 0);
	glActiveTexture(GL_TEXTURE0);
	glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
	glDisableVertexAttribArray(attr_pos);
	glDisableVertexAttribArray(attr_tex);

	glBindTexture(GL_TEXTURE_2D, 0);
}
Exemplo n.º 5
0
Arquivo: main.cpp Projeto: CCJY/coliru
 // close the thread pool - this will wait for all queued up jobs to finish
 // additional jobs will not allowed to get posted to the queue for scheduling
 void close() {
     if (isActive.load()){
         // only stop the threads if transitioning 
         // from active to inactive
         isActive.store(false);
         condition.notify_all();
         for (std::thread& t : mWorkers) {
             t.join();
         }
     }
 }
Exemplo n.º 6
0
 channel_op_status try_value_pop_( slot *& s, std::size_t & idx) {
     idx = consumer_idx_.load( std::memory_order_relaxed);
     for (;;) {
         s = & slots_[idx & (capacity_ - 1)];
         std::size_t cycle = s->cycle.load( std::memory_order_acquire);
         std::intptr_t diff{ static_cast< std::intptr_t >( cycle) - static_cast< std::intptr_t >( idx + 1) };
         if ( 0 == diff) {
             if ( consumer_idx_.compare_exchange_weak( idx, idx + 1, std::memory_order_relaxed) ) {
                 break;
             }
         } else if ( 0 > diff) {
             return channel_op_status::empty;
         } else {
             idx = consumer_idx_.load( std::memory_order_relaxed);
         }
     }
     // incrementing the slot cycle must be deferred till the value has been consumed
     // slot cycle tells procuders that the cell can be re-used (store new value)
     return channel_op_status::success;
 }
int main()
{
    x=false;
    y=false;
    z=0;
    std::thread a(write_x_then_y);
    std::thread b(read_y_then_x);
    a.join();
    b.join();
    assert(z.load()!=0);
}
    void lock() noexcept {
        std::int32_t collisions = 0, tests = 0, expected = 0;
        // after max. spins or collisions suspend via futex
        while ( BOOST_FIBERS_SPIN_MAX_TESTS > tests && BOOST_FIBERS_SPIN_MAX_COLLISIONS > collisions) {
            // avoid using multiple pause instructions for a delay of a specific cycle count
            // the delay of cpu_relax() (pause on Intel) depends on the processor family
            // the cycle count can not guaranteed from one system to the next
            // -> check the shared variable 'value_' in between each cpu_relax() to prevent
            //    unnecessarily long delays on some systems
            // test shared variable 'status_'
            // first access to 'value_' -> chache miss
            // sucessive acccess to 'value_' -> cache hit
            // if 'value_' was released by other fiber
            // cached 'value_' is invalidated -> cache miss
            if ( 0 != ( expected = value_.load( std::memory_order_relaxed) ) ) {
                ++tests;
#if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
                // give CPU a hint that this thread is in a "spin-wait" loop
                // delays the next instruction's execution for a finite period of time (depends on processor family)
                // the CPU is not under demand, parts of the pipeline are no longer being used
                // -> reduces the power consumed by the CPU
                cpu_relax();
#else
                // std::this_thread::yield() allows this_thread to give up the remaining part of its time slice,
                // but only to another thread on the same processor
                // instead of constant checking, a thread only checks if no other useful work is pending
                std::this_thread::yield();
#endif
            } else if ( ! value_.compare_exchange_strong( expected, 1, std::memory_order_acquire, std::memory_order_release) ) {
                // spinlock now contended
                // utilize 'Binary Exponential Backoff' algorithm
                // linear_congruential_engine is a random number engine based on Linear congruential generator (LCG)
                static thread_local std::minstd_rand generator;
                const std::int32_t z = std::uniform_int_distribution< std::int32_t >{
                    0, static_cast< std::int32_t >( 1) << collisions }( generator);
                ++collisions;
                for ( std::int32_t i = 0; i < z; ++i) {
                    cpu_relax();
                }
            } else {
                // success, lock acquired
                return;
            }
        }
        // failure, lock not acquired
        // pause via futex
        if ( 2 != expected) {
            expected = value_.exchange( 2, std::memory_order_acquire);
        }
        while ( 0 != expected) {
            futex_wait( & value_, 2);
            expected = value_.exchange( 2, std::memory_order_acquire);
        }
    }
Exemplo n.º 9
0
void reader() {
    while (true) {
        folly::rcu_reader r;
        std::ostringstream oss;
        IntArray* cur_ptr = data.load();
        for (int i = 0; i < ELE_NUM; ++i) {
            oss << cur_ptr->data[i] << " ";
        }
        printf("%s\n", oss.str().c_str());
    }
}
Exemplo n.º 10
0
		/**
		 * @brief Default Destructor
		 */
		~SharedWriteLock()
		{
			if (reference == nullptr || reference->operator--() > 0)
				return;

			if (locked->load() == true)
				rw_mutex->writeUnlock();

			delete reference;
			delete locked;
		};
Exemplo n.º 11
0
 pc_region* acquire()
 {
     pc_sys_anchor cmp (head.load(rl::memory_order_relaxed));
     pc_sys_anchor xchg;
     do
     {
         xchg.refcnt = cmp.refcnt + 2;
         xchg.region = cmp.region;
     }
     while (false == head.compare_exchange_weak(cmp, xchg, rl::memory_order_acquire));
     return cmp.region;
 }
Exemplo n.º 12
0
JNIEnv* get_jvm_env()
{
    abort_if_no_jvm();
    JNIEnv* env = nullptr;
    auto result = JVM.load()->AttachCurrentThread(&env, nullptr);
    if (result != JNI_OK)
    {
        throw std::runtime_error("Could not attach to JVM");
    }

    return env;
}
Exemplo n.º 13
0
/*! Perform an atomic read-acquire to check whether the message is ready. */
bool TryReceiveMessage(Message& result) {
    int ready = g_guard.load(std::memory_order_acquire);
    if (ready != 0) {
        // Yes. Copy from shared memory using non-atomic loads.
        result.tick  = g_payload.tick;
        result.str   = g_msg_str;
        result.param = g_payload.param;

        return true;
    }
    return false;               // no
}
Exemplo n.º 14
0
Arquivo: smr.cpp Projeto: arssivka/2rc
 void push(unsigned index, int data)
 {
     node* n = new node ();
     n->VAR(data_) = data;
     node* next = head_.load(std::memory_order_relaxed);
     for (;;)
     {
         n->next_.store(next, rl::memory_order_relaxed);
         if (head_.compare_exchange_weak(next, n, rl::memory_order_release))
             break;
     }
 }
Exemplo n.º 15
0
    void call_void()
    {
        ++count_call_void;

        // make sure this function is not concurrently invoked
        HPX_TEST_EQ(count_active_call_void.fetch_add(1) + 1, 1);

        hpx::this_thread::suspend(std::chrono::microseconds(100));

        --count_active_call_void;
        HPX_TEST_EQ(count_active_call_void.load(), 0);
    }
Exemplo n.º 16
0
static int RunGpuOnCpu(int ticks)
{
  SCPFifoStruct& fifo = CommandProcessor::fifo;
  bool reset_simd_state = false;
  int available_ticks = int(ticks * SConfig::GetInstance().fSyncGpuOverclock) + s_sync_ticks.load();
  while (fifo.bFF_GPReadEnable && fifo.CPReadWriteDistance && !AtBreakpoint() &&
         available_ticks >= 0)
  {
    if (s_use_deterministic_gpu_thread)
    {
      ReadDataFromFifoOnCPU(fifo.CPReadPointer);
      s_gpu_mainloop.Wakeup();
    }
    else
    {
      if (!reset_simd_state)
      {
        FPURoundMode::SaveSIMDState();
        FPURoundMode::LoadDefaultSIMDState();
        reset_simd_state = true;
      }
      ReadDataFromFifo(fifo.CPReadPointer);
      u32 cycles = 0;
      s_video_buffer_read_ptr = OpcodeDecoder::Run(
          DataReader(s_video_buffer_read_ptr, s_video_buffer_write_ptr), &cycles, false);
      available_ticks -= cycles;
    }

    if (fifo.CPReadPointer == fifo.CPEnd)
      fifo.CPReadPointer = fifo.CPBase;
    else
      fifo.CPReadPointer += 32;

    fifo.CPReadWriteDistance -= 32;
  }

  CommandProcessor::SetCPStatusFromGPU();

  if (reset_simd_state)
  {
    FPURoundMode::LoadSIMDState();
  }

  // Discard all available ticks as there is nothing to do any more.
  s_sync_ticks.store(std::min(available_ticks, 0));

  // If the GPU is idle, drop the handler.
  if (available_ticks >= 0)
    return -1;

  // Always wait at least for GPU_TIME_SLOT_SIZE cycles.
  return -available_ticks + GPU_TIME_SLOT_SIZE;
}
Exemplo n.º 17
0
/////// Participant is where most of the interesting stuff happens
bool Participant::quickEnter() noexcept {
    uintptr_t new_count = in_critical_.load(mo_rlx) + 1;
    in_critical_.store(new_count, mo_rlx);
    // Nothing to do if we were already in a critical section
    if (new_count > 1) return false;

    // Copy the global epoch to the local one
    uintptr_t global_epoch = global_epoch_.load(mo_rlx);
    remote_thread_fence::placeholder(mo_sc);
    epoch_.store(global_epoch, mo_rlx);
    return true;
}
Exemplo n.º 18
0
 static void dispose(std::atomic<T*>& ptr) {
   for (;;) {
     auto p = ptr.load();
     if (p == nullptr) {
       return;
     } else if (ptr.compare_exchange_weak(p, nullptr)) {
       p->dispose();
       ptr = nullptr;
       return;
     }
   }
 }
Exemplo n.º 19
0
 int wait(long* old_val = NULL) {
     if (old_val) {
         long cur_val = m_count.load(std::memory_order_relaxed);
         if (*old_val != cur_val) {
             *old_val = cur_val;
             return 0;
         }
     }
     std::unique_lock<std::mutex> g(m_lock);
     try { m_cond.wait(g); } catch(...) { return -1; }
     return 0;
 }
Exemplo n.º 20
0
 channel_op_status try_push_( ValueType && value) {
     slot * s{ nullptr };
     std::size_t idx{ producer_idx_.load( std::memory_order_relaxed) };
     for (;;) {
         s = & slots_[idx & (capacity_ - 1)];
         std::size_t cycle{ s->cycle.load( std::memory_order_acquire) };
         std::intptr_t diff{ static_cast< std::intptr_t >( cycle) - static_cast< std::intptr_t >( idx) };
         if ( 0 == diff) {
             if ( producer_idx_.compare_exchange_weak( idx, idx + 1, std::memory_order_relaxed) ) {
                 break;
             }
         } else if ( 0 > diff) {
             return channel_op_status::full;
         } else {
             idx = producer_idx_.load( std::memory_order_relaxed);
         }
     }
     ::new ( static_cast< void * >( std::addressof( s->storage) ) ) value_type( std::forward< ValueType >( value) );
     s->cycle.store( idx + 1, std::memory_order_release);
     return channel_op_status::success;
 }
Exemplo n.º 21
0
static void gcIfManyNewRefs(JNIEnv* env)
{
    uint32_t totalRefs = gNumLocalRefsCreated.load(std::memory_order_relaxed)
            + gNumDeathRefsCreated.load(std::memory_order_relaxed);
    uint32_t collectedAtRefs = gCollectedAtRefs.load(memory_order_relaxed);
    // A bound on the number of threads that can have incremented gNum...RefsCreated before the
    // following check is executed. Effectively a bound on #threads. Almost any value will do.
    static constexpr uint32_t MAX_RACING = 100000;

    if (totalRefs - (collectedAtRefs + GC_INTERVAL) /* modular arithmetic! */ < MAX_RACING) {
        // Recently passed next GC interval.
        if (gCollectedAtRefs.compare_exchange_strong(collectedAtRefs,
                collectedAtRefs + GC_INTERVAL, std::memory_order_relaxed)) {
            ALOGV("Binder forcing GC at %u created refs", totalRefs);
            env->CallStaticVoidMethod(gBinderInternalOffsets.mClass,
                    gBinderInternalOffsets.mForceGc);
        }  // otherwise somebody else beat us to it.
    } else {
        ALOGV("Now have %d binder ops", totalRefs - collectedAtRefs);
    }
}
    std::shared_ptr<T> pop()
    {
        // get the hazard pointers for the current thread
        std::atomic<void*>& hp=get_hazard_pointer_for_current_thread();
        node* old_head=head.load();
        do
        {
            node* temp;
            do
            {
                temp=old_head;
                // store old_head in the hazard pointer
                hp.store(old_head);
                old_head=head.load();
            } while(old_head!=temp);
        }
        // check old_head before deleting.
        while(old_head &&
              !head.compare_exchange_strong(old_head,old_head->next));
        hp.store(nullptr);
        std::shared_ptr<T> res;
        if(old_head)
        {
            res.swap(old_head->data);

            if(outstanding_hazard_pointers_for(old_head))
            {
                // old head being accessed by other threads.
                // add to reclaim later list
                reclaim_later(old_head);
            }
            else
            {
                delete old_head;
            }
            // traverse through the reclaim later list and delete nodes which are not hazardous.
            delete_nodes_with_no_hazards();
        }
        return res;
    }
Exemplo n.º 23
0
    bool s_execute(WFVector *vec){
        fcount=0;
        controlWord=(std::atomic<void *> *)&(this->isComplete);

        complete(vec, pos);
        if (next.load() != (ShiftHelper<InsertAt> *)(0x1) ){
            
            return true;
        }
        else {
            return false; //throw out of bounds exception.
        }
    };
TEST(sysdeps_mutex, mutex_smoke) {
    static std::atomic<bool> finished(false);
    static std::mutex &m = *new std::mutex();
    m.lock();
    ASSERT_FALSE(m.try_lock());
    adb_thread_create([](void*) {
        ASSERT_FALSE(m.try_lock());
        m.lock();
        finished.store(true);
        adb_sleep_ms(200);
        m.unlock();
    }, nullptr);

    ASSERT_FALSE(finished.load());
    adb_sleep_ms(100);
    ASSERT_FALSE(finished.load());
    m.unlock();
    adb_sleep_ms(100);
    m.lock();
    ASSERT_TRUE(finished.load());
    m.unlock();
}
Exemplo n.º 25
0
    void resolve (
        std::vector <std::string> const& names,
        HandlerType const& handler)
    {
        check_precondition (m_called_stop.load () == 0);
        check_precondition (!names.empty());

        // TODO NIKB use rvalue references to construct and move
        //           reducing cost.
        m_io_service.dispatch (m_strand.wrap (boost::bind (
            &NameResolverImpl::do_resolve, this,
            names, handler, CompletionCounter(this))));
    }
Exemplo n.º 26
0
// Trace start of perform sema call, returns OpId
uint64_t trace::startOperation(trace::OperationKind OpKind,
                               const trace::SwiftInvocation &Inv,
                               const trace::StringPairs &OpArgs) {
  auto OpId = ++operation_id;
  if (trace::enabled()) {
    auto Node = consumers.load(std::memory_order_acquire);
    while (Node) {
      Node->Consumer->operationStarted(OpId, OpKind, Inv, OpArgs);
      Node = Node->Next;
    }
  }
  return OpId;
}
Exemplo n.º 27
0
DEF_TEST(TLS, reporter) {
    // TODO: Disabled for now to work around
    // http://code.google.com/p/skia/issues/detail?id=619
    // ('flaky segfault in TLS test on Shuttle_Ubuntu12 buildbots')
    if( false ) test_threads(&thread_main);

    // Test to ensure that at thread destruction, TLS destructors
    // have been called.
    test_threads([] {
        SkTLS::Get(fake_create_TLS, fake_delete_TLS);
    });
    REPORTER_ASSERT(reporter, 0 == gCounter.load());
}
Exemplo n.º 28
0
	   /** setter for file state
	    * @param state - file state to mark the file with
	    */
	   inline void state(State state) {
		   // do not change file state when it is marked for deletion:
		   if(m_state.load(std::memory_order_acquire) == State::FILE_IS_MARKED_FOR_DELETION)
			   return;

		   if(state == State::FILE_IS_IN_USE_BY_SYNC)
			   m_lastsyncattempt = boost::posix_time::microsec_clock::local_time();

		   // fire the condition variable for whoever waits for file status to be changed:
		   m_state.exchange(state, std::memory_order_release);
		   boost::mutex::scoped_lock lock(m_state_changed_mux);
		   m_state_changed_condition.notify_all();
	   }
Exemplo n.º 29
0
BENCHMARK_RELATIVE(Fib_Sum_Gen, iters) {
  int s = 0;
  while (iters--) {
    auto fib = GENERATOR(int) {
      int a = 0;
      int b = 1;
      for (;;) {
        yield(a += b);
        yield(b += a);
      }
    };
    s += fib | take(testSize.load()) | sum;
  }
Exemplo n.º 30
0
	void startSim(int* common,int hold1,int hold2,int playerNum,SimType type)
	{
		stopAndGetRes();
		while(running.load())
		{
			this_thread::sleep_for(std::chrono::milliseconds(2));
		}
		std::lock_guard<std::mutex> lck (mtx);
		sum=win=0;
		stop=false;
		simThread=move(thread(work,this,common,(int)type,hold1,hold2,playerNum));
		simThread.detach();
	}