bool try_lock() {
   size_t bad_id = -1;
   auto ThisID = _hash(std::this_thread::get_id());
   if (!_lock.compare_exchange_strong(bad_id, ThisID) && !(_lock.compare_exchange_strong(ThisID, ThisID))) {
     return false;
   }
   ++_lock_count;
   return true;
 }
	   /**
	    * Try mark the file for deletion. Only few file states permit this operation to happen.
	    *
        * @return true if file was marked for deletion
        * No one should reference this file since it is marked for deletion
	    */
	   inline bool mark_for_deletion(){
		   boost::mutex::scoped_lock lock(m_state_changed_mux);

		   bool marked = false;

		   LOG (INFO) << "Managed file OTO \"" << fqp() << "\" with state \"" << state() << "\" is requested for deletion." <<
				   "subscribers # = " <<  m_subscribers.load(std::memory_order_acquire) << "\n";
		   // check all states that allow to mark the file for deletion:
		   State expected = State::FILE_IS_IDLE;

		   // look for "idle marker"
           marked = m_state.compare_exchange_strong(expected, State::FILE_IS_MARKED_FOR_DELETION);

           if(marked){
        	   // wait for all detaching clients to finish:
        	   boost::unique_lock<boost::mutex> detaching_lock(m_detaching_mux_guard);
        	   m_detaching_condition.wait(detaching_lock, [&]{ return (m_detaching_clients.size() == 0); });
        	   detaching_lock.unlock();

        	   // go ahead, no detaching clients are in progress more
        	   m_state_changed_condition.notify_all();
        	   LOG (INFO) << "Managed file OTO \"" << fqp() << "\" with state \"" << state() <<
        			   "\" is successfully marked for deletion." << "\n";
        	   if(m_subscribers.load(std::memory_order_acquire) == 0)
        		   return true;
        	   return false;
           }

           expected = State::FILE_IS_FORBIDDEN;
           marked   =  m_state.compare_exchange_strong(expected, State::FILE_IS_MARKED_FOR_DELETION);

           if(marked){
        	   m_state_changed_condition.notify_all();
        	   LOG (INFO) << "Managed file OTO \"" << fqp() << "\" with state \"" << state() <<
        			   "\" is successfully marked for deletion." << "\n";
        	   if(m_subscribers.load(std::memory_order_acquire) == 0)
        		   return true;
        	   return false;
           }

           expected = State::FILE_IS_AMORPHOUS;
           marked   =  m_state.compare_exchange_strong(expected, State::FILE_IS_MARKED_FOR_DELETION);

           m_state_changed_condition.notify_all();
           marked = (marked && (m_subscribers.load(std::memory_order_acquire) == 0));
           std::string marked_str = marked ? "successfully" : "NOT";
    	   LOG (INFO) << "Managed file OTO \"" << fqp() << "\" with state \"" << state() <<
    			   "\" is " << marked_str  << " marked for deletion." << "\n";

    	   return marked;
	   }
Exemple #3
0
    bool s_execute(WFVector *vec, void *&v){
        ArrayElement *spot=vec->getSpot(pos);
        
        CasHelper *cah=new CasHelper(this); 

        void *ahelper=assoc.load();
        while ( ahelper == NULL){
            void *cvalue=spot->load(std::memory_order_relaxed);

            if (Helper::isHelper(cvalue)){
                Helper *tHelper=Helper::unmark(cvalue);
                if (tHelper->watch(cvalue, spot)){
                    Helper::remove(vec, pos, cvalue);
                    tHelper->unwatch();
                }
                ahelper=assoc.load();
                continue;
            }

            else if (cvalue == o_value){   
                
                void *tempMarked=Helper::mark(cah);
                if (spot->compare_exchange_strong(cvalue,tempMarked )){
                    if (assoc.compare_exchange_strong(ahelper, tempMarked) || ahelper == tempMarked){
                        spot->compare_exchange_strong(tempMarked, n_value);
                    }
                    else{
                        spot->compare_exchange_strong(tempMarked, o_value);
                        cah->safeFree();
                    }
                    break;
                }
                
            }
            else{
                assoc.compare_exchange_strong(ahelper, cvalue);
                cah->unsafeFree();
                break;
            }
        }//End while

        ahelper=assoc.load();
        if (Helper::isHelper(ahelper)){
            return true;
        }
        else{
            v=ahelper;
            return false;
        }

    }
Exemple #4
0
int main()
{
    ai= 3;
    valsout();
    cout << "hello world" << endl;
 
    // tst_val != ai   ==>  tst_val is modified
    exchanged= ai.compare_exchange_strong( tst_val, new_val );
    valsout();
 
    // tst_val == ai   ==>  ai is modified
    exchanged= ai.compare_exchange_strong( tst_val, new_val );
    valsout();
 
    return 0;
}
Exemple #5
0
void thread_2()
{
    int expected=1;
    while (!flag.compare_exchange_strong(expected, 2, std::memory_order_acq_rel)) {
        expected = 1;
    }
}
Exemple #6
0
 void unlock ()
 {
     bool expected = true;
     POMAGMA_ASSERT(m_flag.compare_exchange_strong(expected, false),
             "unlock contention");
     store_barrier();
 }
 void lock() {
     Node::Ptr unlocked(nullptr, 0);
     if (!tail_.compare_exchange_strong(unlocked, Node::Ptr(nullptr, 1),
                                        std::memory_order_acquire)) {
         slowpathLock(unlocked);
     }
 }
 std::shared_ptr<T> pop()
 {
     std::atomic<void*>& hp = get_hazard_pointer_for_current_thread();
     node* old_head = head.load();
     do {
         node* temp;
         do { // loop until you've set the harzard pointer to head
             temp = old_head;
             hp.store(old_head);
             old_head = head.load();
         } while (old_head != temp);
     }
     while (old_head && 
            !head.compare_exchange_strong(old_head, old_head->next));
     hp.store(nullptr); // clear hazard pointer once you're finished
     std::shared_ptr<T> res;
     if (old_head) {
         res.swap(old_head->data);
         if (outstanding_hazard_pointers_for(old_head)) { 
             // check for hazard pointers referencing
             // a node before you delete it
             reclaim_later(old_head);
         }
         else {
             delete old_head;
         }
         delete_nodes_with_no_hazards();
     }
     return res;
 }
Exemple #9
0
 void lock ()
 {
     load_barrier();
     bool expected = false;
     POMAGMA_ASSERT(m_flag.compare_exchange_strong(expected, true),
             "lock contention");
 }
  int Pop()
  {
      while (count.load(std::memory_order_acquire) > 1)
      {
          int head1 = head.load(std::memory_order_acquire);
          int next1 = array[head1].Next.exchange(-1, std::memory_order_seq_cst);

          if (next1 >= 0)
          {
              int head2 = head1;
              if (head.compare_exchange_strong(head2, next1, std::memory_order_seq_cst))
              {
                  count.fetch_sub(1, std::memory_order_seq_cst);
                  return head1;
              }
              else
              {
                  array[head1].Next.exchange(next1, std::memory_order_seq_cst);
              }
          }
          else
          {
            sched_yield();
          }
      }

      return -1;
  }
Exemple #11
0
JNIEXPORT void JNICALL Java_JNITest_print0
  (JNIEnv * jenv, jobject jobj, jstring jstr) {

      struct Malloc* defMalloc = defaultMalloc.load();
    //   if (defaultMalloc == nullptr) {
          std::cout << "lookup... " << std::endl;
          jclass klass = jenv->GetObjectClass(jobj);
          jmethodID mid = jenv->GetMethodID(klass, "multiply", "(JI)J");

          struct Malloc* nullObj = nullptr;

          jclass globalClass = reinterpret_cast<jclass>(jenv->NewGlobalRef(klass));
          jobject globalObj = reinterpret_cast<jobject>(jenv->NewGlobalRef(jobj));

          defMalloc = (struct Malloc*) malloc(sizeof(struct Malloc));
          defMalloc->mallocClass = globalClass;
          defMalloc->mallocObj = globalObj;
          defMalloc->mallocMethodId = mid;

          if (!defaultMalloc.compare_exchange_strong(nullObj, defMalloc)) {
              free(defMalloc);
              jenv->DeleteGlobalRef(globalObj);
              jenv->DeleteGlobalRef(globalClass);
              std::cout << "ERROR" << std::endl;
              return;
          }
    //   }

      jlong address = jenv->CallLongMethod(defMalloc->mallocObj, defMalloc->mallocMethodId, 123, 10);
      std::cout << "hello" << ": " << address << std::endl;
  }
 void synchronize_rcu() noexcept {
     const uint64_t waitForVersion = updaterVersion.load()+1;
     auto tmp = waitForVersion-1;
     updaterVersion.compare_exchange_strong(tmp, waitForVersion);
     for (int i=0; i < maxThreads; i++) {
         while (readersVersion[i*CLPAD].load() < waitForVersion) { } // spin
     }
 }
bool Participant::tryCollect() {
    remote_thread_fence::trigger();

    uintptr_t cur_epoch = epoch_.load(mo_rlx);

    // Check whether all active threads are in the current epoch so we
    // can advance it.
    // As we do it, we lazily clean up exited threads.
try_again:
    std::atomic<Participant::Ptr> *prevp = &participants_;
    Participant::Ptr cur = prevp->load(mo_acq);
    while (cur) {
        Participant::Ptr next = cur->next_.load(mo_rlx);
        if (next.tag()) {
            // This node has exited. Try to unlink it from the
            // list. This will fail if it's already been unlinked or
            // the previous node has exited; in those cases, we start
            // back over at the head of the list.
            next = Ptr(next, 0); // clear next's tag
            if (prevp->compare_exchange_strong(cur, next, mo_rlx)) {
                Guard g(this);
                g.unlinked(cur.ptr());
            } else {
                goto try_again;
            }
        } else {
            // We can only advance the epoch if every thread in a critical
            // section is in the current epoch.
            if (cur->in_critical_.load(mo_rlx) &&
                cur->epoch_.load(mo_rlx) != cur_epoch) {
                return false;
            }
            prevp = &cur->next_;
        }

        cur = next;
    }

    // Everything visible to the reads from the loop we want hb before
    // the epoch update.
    std::atomic_thread_fence(mo_acq);
    // Try to advance the global epoch
    uintptr_t new_epoch = cur_epoch + 1;
    if (!global_epoch_.compare_exchange_strong(cur_epoch, new_epoch,
                                               mo_acq_rel)) {
        return false;
    }

    // Garbage collect
    global_garbage_[(new_epoch+1) % kNumEpochs].collect();
    garbage_.collect();
    // Now that the collection is done, we can safely update our
    // local epoch.
    epoch_ = new_epoch;

    return true;
}
Exemple #14
0
    void lock() noexcept {
        std::int32_t collisions = 0, tests = 0, expected = 0;
        // after max. spins or collisions suspend via futex
        while ( BOOST_FIBERS_SPIN_MAX_TESTS > tests && BOOST_FIBERS_SPIN_MAX_COLLISIONS > collisions) {
            // avoid using multiple pause instructions for a delay of a specific cycle count
            // the delay of cpu_relax() (pause on Intel) depends on the processor family
            // the cycle count can not guaranteed from one system to the next
            // -> check the shared variable 'value_' in between each cpu_relax() to prevent
            //    unnecessarily long delays on some systems
            // test shared variable 'status_'
            // first access to 'value_' -> chache miss
            // sucessive acccess to 'value_' -> cache hit
            // if 'value_' was released by other fiber
            // cached 'value_' is invalidated -> cache miss
            if ( 0 != ( expected = value_.load( std::memory_order_relaxed) ) ) {
                ++tests;
#if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
                // give CPU a hint that this thread is in a "spin-wait" loop
                // delays the next instruction's execution for a finite period of time (depends on processor family)
                // the CPU is not under demand, parts of the pipeline are no longer being used
                // -> reduces the power consumed by the CPU
                // -> prevent pipeline stalls
                cpu_relax();
#else
                // std::this_thread::yield() allows this_thread to give up the remaining part of its time slice,
                // but only to another thread on the same processor
                // instead of constant checking, a thread only checks if no other useful work is pending
                std::this_thread::yield();
#endif
            } else if ( ! value_.compare_exchange_strong( expected, 1, std::memory_order_acquire, std::memory_order_release) ) {
                // spinlock now contended
                // utilize 'Binary Exponential Backoff' algorithm
                // linear_congruential_engine is a random number engine based on Linear congruential generator (LCG)
                static thread_local std::minstd_rand generator;
                static std::uniform_int_distribution< std::int32_t > distribution{ 0, static_cast< std::int32_t >( 1) << collisions };
                const std::int32_t z = distribution( generator);
                ++collisions;
                for ( std::int32_t i = 0; i < z; ++i) {
                    // -> reduces the power consumed by the CPU
                    // -> prevent pipeline stalls
                    cpu_relax();
                }
            } else {
                // success, lock acquired
                return;
            }
        }
        // failure, lock not acquired
        // pause via futex
        if ( 2 != expected) {
            expected = value_.exchange( 2, std::memory_order_acquire);
        }
        while ( 0 != expected) {
            futex_wait( & value_, 2);
            expected = value_.exchange( 2, std::memory_order_acquire);
        }
    }
Exemple #15
0
	static void increase_external_count(std::atomic<counted_node_ptr> &counter, counted_node_ptr &old_counter) {
		counted_node_ptr new_counter;
		do {
			new_counter = old_counter;
			new_counter.inc();
		} while (!counter.compare_exchange_strong(old_counter, new_counter, std::memory_order_acquire, std::memory_order_relaxed));

		old_counter.set_counter(new_counter.get_counter());
	}
Exemple #16
0
		// Returns false if node needs to be deallocated.
		bool release_ref() {
			node_counter old_counter = count.load(std::memory_order_relaxed);
			node_counter new_counter;
			do {
				new_counter = old_counter;
				--new_counter.internal_count;
			} while (!count.compare_exchange_strong(old_counter, new_counter, std::memory_order_acquire, std::memory_order_relaxed));

			return new_counter.internal_count || new_counter.external_counters;
		}
 void Push(int index)
 {
     int head1 = head.load(std::memory_order_acquire);
     do
     {
         array[index].Next.store(head1, std::memory_order_release);
        
     } while (!head.compare_exchange_strong(head1, index, std::memory_order_seq_cst));
     count.fetch_add(1, std::memory_order_seq_cst);
 }
Exemple #18
0
inline bool Carrier::set_and_merge(std::atomic<Ob>& destin, Ob source) const {
    POMAGMA_ASSERT_RANGE_(5, source, item_dim());

    Ob old = 0;
    while (not destin.compare_exchange_strong(
        old, source, std::memory_order_acq_rel, std::memory_order_acquire)) {
        source = ensure_equal(source, old);
        if (old == source) return false;
    }
    return old == 0;
}
Exemple #19
0
 void wait ()
 {
     bool expected = true;
     if (m_accepting.compare_exchange_strong(expected, false)) {
         m_condition.notify_all();
         for (auto & worker : m_pool) {
             worker.join();
         }
         POMAGMA_DEBUG("Stopped pool of " << m_pool.size() << " workers");
     }
 }
Exemple #20
0
inline bool Carrier::set_or_merge(std::atomic<Ob>& destin, Ob source) const {
    POMAGMA_ASSERT_RANGE_(5, source, item_dim());

    Ob old = 0;
    if (destin.compare_exchange_strong(old, source, std::memory_order_acq_rel,
                                       std::memory_order_acquire)) {
        return true;
    } else {
        ensure_equal(source, old);
        return false;
    }
}
Exemple #21
0
    bool s_execute(WFVector *vec, void *&v){

        ArrayElement *spot=vec->getSpot(pos);

        void * aValue=value.load();
        while (aValue == NULL){
            void *cvalue=spot->load(std::memory_order_relaxed);

            if (Helper::isHelper(cvalue)){
                Helper *tHelper=Helper::unmark(cvalue);
                if (tHelper->watch(cvalue, spot)){
                    Helper::remove(vec, pos, cvalue);
                    tHelper->unwatch();
                }
                aValue=value.load();
            }
            else if (cvalue== NOT_VALUE){
                value.compare_exchange_strong(aValue, (void *)(0x1));
                break;
            }
            else{
                assert(Value::isValid(cvalue));
                value.compare_exchange_strong(aValue, cvalue);
                break;
            }
        }

        aValue=value.load();
        if (aValue == (void *)0x1){
            return false;
        }
        else{
            assert(aValue != NOT_VALUE);
            v=aValue;
            return true;
        }

    };
Exemple #22
0
void foo()
{
    int expected = 11;
    while(!x.compare_exchange_strong(expected, 42, std::memory_order_seq_cst))
    {
        expected = 11;
    }
    
    std::cout << y.load(std::memory_order_relaxed) << " ";
    y.store(21, std::memory_order_relaxed);
    
    std::atomic<int> z;
    z.store(0, std::memory_order_seq_cst);
    
    x.store(11, std::memory_order_relaxed); // can this be moved above z.store?
}
void post_thread_1( void ) {
	for ( int i = 0 ; i < post_count ; ++i ) {
		task* pt = new task( &run );
		{
			_lock->lock();
			_queue.add_tail( pt );
			_lock->unlock();
		}
		int expected = 0;
		if ( _posted.compare_exchange_strong( expected , 1 )) {
			if ( PostQueuedCompletionStatus( _iocp , 0 , 0 , 0 ) == FALSE ) {
				_posted.exchange(0);
			}
		}
	}
}
 bool tryAndRun(cybozu::Socket& client)
 {
     int expected = Sleep;
     if (!state_.compare_exchange_strong(expected, Ready)) return false;
     try {
         s_[0].moveFrom(client);
         s_[1].connect(opt_.serverAddr, opt_.serverPort, opt_.socketTimeoutS * 1000);
         setSocketTimeout(s_[1], opt_.socketTimeoutS);
         state_ = Running2;
     } catch (std::exception& e) {
         s_[0].close();
         s_[1].close();
         state_ = Sleep;
     }
     return true;
 }
    context * steal() {
		int64_t top = top_.load( std::memory_order_acquire);
		std::atomic_thread_fence( std::memory_order_seq_cst);
		int64_t bottom = bottom_.load( std::memory_order_acquire);
        context * ctx = nullptr;
		if ( top < bottom) {
            // queue is not empty
			circular_buffer * buffer = buffer_.load( std::memory_order_consume);
            ctx = buffer->get( top);
			if ( ! top_.compare_exchange_strong( top, top + 1,
                                                 std::memory_order_seq_cst, std::memory_order_relaxed) ) {
				return nullptr;
            }
		}
        return ctx;
	}
Exemple #26
0
EventBaseManager* EventBaseManager::get() {
  EventBaseManager* mgr = globalManager;
  if (mgr) {
    return mgr;
  }

  EventBaseManager* new_mgr = new EventBaseManager;
  bool exchanged = globalManager.compare_exchange_strong(mgr, new_mgr);
  if (!exchanged) {
    delete new_mgr;
    return mgr;
  } else {
    return new_mgr;
  }

}
Exemple #27
0
		//for thread safe
		static unsigned long long next_dispatch(int id, int){
			assert(sizeof(unsigned long long) == 8);
			assert(id <= MAX_CLASS_ID);
			static std::atomic<int>	s_circle_seq(0);
			static std::atomic<uint32_t> s_sec_timestamp(0);
			int cmp_seq_value = MAX_HZ_SEQN;
			s_circle_seq.compare_exchange_strong(cmp_seq_value, 0);
			int seq = s_circle_seq.fetch_add(1);
			if (seq == 1 || s_sec_timestamp == 0){
				s_sec_timestamp = dcs::time_unixtime_s();
			}
			unsigned long long nseq = s_sec_timestamp;
			nseq <<= max_class_2e;
			nseq |= id;
			nseq <<= max_hz_2e;
			nseq |= seq;
			return nseq;
		}
Exemple #28
0
	stored_ptr pop() {
		counted_node_ptr old_head = head.load(std::memory_order_relaxed);
		for (;;) {
			increase_external_count(head, old_head);
			node * const ptr = old_head.get();
			if (ptr == tail.load().get())
				return nullptr;

			counted_node_ptr next = ptr->next.load();
			if (head.compare_exchange_strong(old_head, next)) {
				T * const res = ptr->data.exchange(nullptr);
				free_external_counter(old_head);
				return stored_ptr(res);
			}

			if (!ptr->release_ref())
				deallocate_node(ptr);
		}
	}
	bool tryAndRun(cybozu::Socket& client)
	{
		int expected = Sleep;
		if (!state_.compare_exchange_strong(expected, Ready)) return false;
		if (opt_.verbose) cybozu::PutLog(cybozu::LogInfo, "tryAndRun:in");
		try {
			s_[0].moveFrom(client);
			s_[1].connect(opt_.serverAddr, opt_.serverPort);
			state_ = Running;
			return true;
		} catch (std::exception& e) {
			cybozu::PutLog(cybozu::LogInfo, "tryAndRun::connect err %s", e.what());
			s_[0].close();
			s_[1].close();
			state_ = Sleep;
			waitMsec(100);
			return true;
		}
	}
Exemple #30
0
const BreakIterator* getMaster() {
  if (auto master = kMaster.load(std::memory_order_acquire)) {
    return master;
  }
  UParseError parseError;
  UErrorCode errorCode = U_ZERO_ERROR;
  const BreakIterator* bi
    = new icu::RuleBasedBreakIterator(icu::UnicodeString(strRules),
                                      parseError,
                                      errorCode);
  // Atomically swap in bi, but delete it if this this thread loses the
  // initialization race.
  static const BreakIterator* expectedNull = nullptr;
  if (!kMaster.compare_exchange_strong(expectedNull, bi,
                                       std::memory_order_acq_rel)) {
    delete bi;
  }
  return kMaster.load(std::memory_order_acquire);
}