Exemplo n.º 1
0
    void lock() noexcept {
        std::int32_t collisions = 0, tests = 0, expected = 0;
        // after max. spins or collisions suspend via futex
        while ( BOOST_FIBERS_SPIN_MAX_TESTS > tests && BOOST_FIBERS_SPIN_MAX_COLLISIONS > collisions) {
            // avoid using multiple pause instructions for a delay of a specific cycle count
            // the delay of cpu_relax() (pause on Intel) depends on the processor family
            // the cycle count can not guaranteed from one system to the next
            // -> check the shared variable 'value_' in between each cpu_relax() to prevent
            //    unnecessarily long delays on some systems
            // test shared variable 'status_'
            // first access to 'value_' -> chache miss
            // sucessive acccess to 'value_' -> cache hit
            // if 'value_' was released by other fiber
            // cached 'value_' is invalidated -> cache miss
            if ( 0 != ( expected = value_.load( std::memory_order_relaxed) ) ) {
                ++tests;
#if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
                // give CPU a hint that this thread is in a "spin-wait" loop
                // delays the next instruction's execution for a finite period of time (depends on processor family)
                // the CPU is not under demand, parts of the pipeline are no longer being used
                // -> reduces the power consumed by the CPU
                // -> prevent pipeline stalls
                cpu_relax();
#else
                // std::this_thread::yield() allows this_thread to give up the remaining part of its time slice,
                // but only to another thread on the same processor
                // instead of constant checking, a thread only checks if no other useful work is pending
                std::this_thread::yield();
#endif
            } else if ( ! value_.compare_exchange_strong( expected, 1, std::memory_order_acquire, std::memory_order_release) ) {
                // spinlock now contended
                // utilize 'Binary Exponential Backoff' algorithm
                // linear_congruential_engine is a random number engine based on Linear congruential generator (LCG)
                static thread_local std::minstd_rand generator;
                static std::uniform_int_distribution< std::int32_t > distribution{ 0, static_cast< std::int32_t >( 1) << collisions };
                const std::int32_t z = distribution( generator);
                ++collisions;
                for ( std::int32_t i = 0; i < z; ++i) {
                    // -> reduces the power consumed by the CPU
                    // -> prevent pipeline stalls
                    cpu_relax();
                }
            } else {
                // success, lock acquired
                return;
            }
        }
        // failure, lock not acquired
        // pause via futex
        if ( 2 != expected) {
            expected = value_.exchange( 2, std::memory_order_acquire);
        }
        while ( 0 != expected) {
            futex_wait( & value_, 2);
            expected = value_.exchange( 2, std::memory_order_acquire);
        }
    }
Exemplo n.º 2
0
void print_stats(deadline_timer& stats_timer, error_code const& ec)
{
	if (ec) return;

	time_point now = steady_clock::now();
	float interval = duration_cast<milliseconds>(now - stats_start).count() / 1000.f;
	if (interval <= 0.f) interval = 0.001f;
	stats_start = now;

	printf(
#ifdef DEBUG_STATS
		"node-buf: [%s %s %s %s]"
#endif

		" in: %.1f"
		" invalid_enc: %.1f"
		" invalid_src: %.1f"
		" id_failure: %.1f"
		" out_ping: %.1f"
		" short_tid_pong: %.1f"
		" invalid_pong: %.1f"
		" added: %.1f"
		" backup: %.1f\n"
#ifdef DEBUG_STATS
		, suffix(nodebuf_size[0].load()).c_str()
		, suffix(nodebuf_size[1].load()).c_str()
		, suffix(nodebuf_size[2].load()).c_str()
		, suffix(nodebuf_size[3].load()).c_str()
#endif
		, incoming_queries.exchange(0) / interval
		, invalid_encoding.exchange(0) / interval
		, invalid_src_address.exchange(0) / interval
		, failed_nodeid_queries.exchange(0) / interval
		, outgoing_pings.exchange(0) / interval
		, short_tid_pongs.exchange(0) / interval
		, invalid_pongs.exchange(0) / interval
		, added_nodes.exchange(0) / interval
		, backup_nodes_returned.exchange(0) / interval
		);

#ifdef CLIENTS_STAT
	std::lock_guard<std::mutex> l(client_mutex);
	std::vector<std::pair<int, uint16_t>> ordered;
	for (auto i : client_histogram) {
		ordered.emplace_back(i.second, i.first);
	}
	std::sort(ordered.begin(), ordered.end());
	for (auto i : ordered) {
		printf("[%c%c: %d] ", (i.second >> 8) & 0xff, i.second & 0xff, i.first);
	}
	printf("\n");
	client_histogram.clear();
#endif

	fflush(stdout);

	stats_timer.expires_from_now(boost::posix_time::seconds(print_stats_interval));
	stats_timer.async_wait(std::bind(&print_stats, std::ref(stats_timer), _1));
}
Exemplo n.º 3
0
    /// Atomic dec of internal counter.
    /// @return CHANGED  when value is different from old_value
    ///                  or someone updated it during wait_fast call.
    ///         TIMEDOUT when the thread should wait for another signal
    wakeup_result wait_fast(int* old_value = NULL) {
        int val = value();

        if (old_value && *old_value != val) {
            if (old_value)
                *old_value = val;

            #ifdef PERF_STATS
            ++m_wait_fast_count;
            #endif

            return wakeup_result::CHANGED;
        }

        int res = m_count.exchange(0, std::memory_order_acq_rel);

        if (res == 0)
            return wakeup_result::TIMEDOUT;

        #ifdef PERF_STATS
        ++m_wait_fast_count;
        #endif

        return res == val ? wakeup_result::SIGNALED : wakeup_result::CHANGED;
    }
 // returns nullptr on failure
 pointer try_pop() {
     pointer result = nullptr;
     while (m_consumer_lock.exchange(true)) {
         std::this_thread::yield();
     }
     // only one consumer allowed
     node* first = m_first;
     node* next = m_first->next;
     if (next) {
         // queue is not empty
         result = next->value; // take it out of the node
         next->value = nullptr;
         // swing first forward
         m_first = next;
         // release exclusivity
         m_consumer_lock = false;
         // delete old dummy
         //first->value = nullptr;
         delete first;
         return result;
     }
     else {
         // release exclusivity
         m_consumer_lock = false;
         return nullptr;
     }
 }
Exemplo n.º 5
0
    void try_reclaim(node* old_head)
    {
        if (threads_in_pop == 1) {
            // claim list of to-be-deleted nodes
            node* nodes_to_delete = to_be_deleted.exchange(nullptr);

            // are you the only thread in pop()?
            if (!--threads_in_pop) {
                // on other thread can be accessing this list of pending nodes.
                // There may be new pending nodes, but you're not bothered 
                // about them for now, as long as it's safe to reclaim your 
                // list.
                delete_nodes(nodes_to_delete);
            }
            else if (nodes_to_delete) {
                // not safe to reclaim the nodes, so if there are any,
                // you must chain them back onto the list of nodes 
                // pending deletion. 
                // This can happen if there are multiple threads accessing the
                // data structure concurrently. Other threads might have 
                // called pop() in between the first tet of thread_in_pop and
                // the "claiming" of the list, potentially adding new nodes to
                // the list that are still being accesed by one or more of 
                // those other threads.
                chain_pending_nodes(nodes_to_delete);
            }
            delete old_head;
        }
        else {
            // not safe to delete any nodes, add the node to the pending list
            chain_pending_node(old_head);
            --threads_in_pop;
        }
    }
Exemplo n.º 6
0
/*
 * If the jit maturity counter is enabled, update it with the current amount of
 * emitted code.
 */
void reportJitMaturity(const CodeCache& code) {
  auto static jitMaturityCounter = ServiceData::createCounter("jit.maturity");

  // Optimized translations are faster than profiling translations, which are
  // faster than the interpreter.  But when optimized translations are
  // generated, some profiling translations will become dead.  We assume the
  // incremental value of an optimized translation over the corresponding
  // profiling translations is comparable to the incremental value of a
  // profiling translation of similar size; thus we don't have to apply
  // different weights to code in different regions.
  auto const codeSize =
    code.hot().used() + code.main().used() + code.prof().used();
  if (jitMaturityCounter) {
    // EvalJitMatureSize is supposed to to be set to approximately 20% of the
    // code that will give us full performance, so recover the "fully mature"
    // size with some math.
    auto const fullSize = RuntimeOption::EvalJitMatureSize * 5;
    auto const after = codeSize >= fullSize ? 100
                                            : (codeSize * 100 / fullSize);
    auto const before = jitMaturityCounter->getValue();
    if (after > before) jitMaturityCounter->setValue(after);
  }

  if (!s_loggedJitMature.load(std::memory_order_relaxed) &&
      StructuredLog::enabled() &&
      codeSize >= RuntimeOption::EvalJitMatureSize &&
      !s_loggedJitMature.exchange(true, std::memory_order_relaxed)) {
    StructuredLogEntry cols;
    cols.setInt("jit_mature_sec", time(nullptr) - HttpServer::StartTime);
    StructuredLog::log("hhvm_warmup", cols);
  }
}
Exemplo n.º 7
0
void delete_nodes_with_no_hazards()
{
    // first claims the entire list of nodes to be reclaimed;
    // ensures that this is the only thread trying to reclaim
    // this particular set of nodes; other threads are now free
    // to add futher nodes to the list or event try to reclaim 
    // them without impacting the operation of this thread.
    data_to_reclaim* current = nodes_to_reclaim.exchange(nullptr);

    while (current) {
        data_to_reclaim* const next = current->next;

        // check each node in turn to see if there are any outstanding
        // hazard pointers.
        if (!outstanding_hazard_pointers_for(current->data)) {
            // if there aren't, delete the entry
            delete current;
        }
        else {
            // otherwise, just add the item back on the list for 
            // reclaiming later
            add_to_reclaim_list(current);
        }
        current=next;
    }
}
Exemplo n.º 8
0
void States_FreezeCurrentSlot()
{
	// FIXME : Use of the IsSavingOrLoading flag is mostly a hack until we implement a
	// complete thread to manage queuing savestate tasks, and zipping states to disk.  --air
	if (!SysHasValidState())
	{
		Console.WriteLn("Save state: Aborting (VM is not active).");
		return;
	}

	if (wxGetApp().HasPendingSaves() || IsSavingOrLoading.exchange(true))
	{
		Console.WriteLn("Load or save action is already pending.");
		return;
	}
	Sstates_updateLoadBackupMenuItem(true);

	GSchangeSaveState(StatesC, SaveStateBase::GetFilename(StatesC).ToUTF8());
	StateCopy_SaveToSlot(StatesC);

	// Hack: Update the saveslot saying it's filled *right now* because it's still writing the file and we don't have a timestamp.
	saveslot_cache[StatesC].empty = false;
	saveslot_cache[StatesC].updated = wxDateTime::Now();
	saveslot_cache[StatesC].crc = ElfCRC;

	GetSysExecutorThread().PostIdleEvent(SysExecEvent_ClearSavingLoadingFlag());
}
Exemplo n.º 9
0
static void UpdateCallback(u64 userdata, int cycles_late) {
    SharedMem* mem = reinterpret_cast<SharedMem*>(shared_memory->GetPointer());

    if (is_device_reload_pending.exchange(false))
        LoadInputDevices();

    PadState state;
    state.zl.Assign(zl_button->GetStatus());
    state.zr.Assign(zr_button->GetStatus());

    // Get current c-stick position and update c-stick direction
    float c_stick_x_f, c_stick_y_f;
    std::tie(c_stick_x_f, c_stick_y_f) = c_stick->GetStatus();
    constexpr int MAX_CSTICK_RADIUS = 0x9C; // Max value for a c-stick radius
    const s16 c_stick_x = static_cast<s16>(c_stick_x_f * MAX_CSTICK_RADIUS);
    const s16 c_stick_y = static_cast<s16>(c_stick_y_f * MAX_CSTICK_RADIUS);

    if (!raw_c_stick) {
        const HID::DirectionState direction = HID::GetStickDirectionState(c_stick_x, c_stick_y);
        state.c_stick_up.Assign(direction.up);
        state.c_stick_down.Assign(direction.down);
        state.c_stick_left.Assign(direction.left);
        state.c_stick_right.Assign(direction.right);
    }

    // TODO (wwylele): implement raw C-stick data for raw_c_stick = true

    const u32 last_entry_index = mem->index;
    mem->index = next_pad_index;
    next_pad_index = (next_pad_index + 1) % mem->entries.size();

    // Get the previous Pad state
    PadState old_state{mem->entries[last_entry_index].current_state};

    // Compute bitmask with 1s for bits different from the old state
    PadState changed = {state.hex ^ old_state.hex};

    // Get the current Pad entry
    PadDataEntry& pad_entry = mem->entries[mem->index];

    // Update entry properties
    pad_entry.current_state.hex = state.hex;
    pad_entry.delta_additions.hex = changed.hex & state.hex;
    pad_entry.delta_removals.hex = changed.hex & old_state.hex;
    pad_entry.c_stick_x = c_stick_x;
    pad_entry.c_stick_y = c_stick_y;

    // If we just updated index 0, provide a new timestamp
    if (mem->index == 0) {
        mem->index_reset_ticks_previous = mem->index_reset_ticks;
        mem->index_reset_ticks = CoreTiming::GetTicks();
    }

    update_event->Signal();

    // Reschedule recurrent event
    CoreTiming::ScheduleEvent(msToCycles(update_period) - cycles_late, update_callback_id);
}
Exemplo n.º 10
0
    void lock() noexcept {
        std::size_t collisions = 0 ;
        for (;;) {
            // avoid using multiple pause instructions for a delay of a specific cycle count
            // the delay of cpu_relax() (pause on Intel) depends on the processor family
            // the cycle count can not guaranteed from one system to the next
            // -> check the shared variable 'state_' in between each cpu_relax() to prevent
            //    unnecessarily long delays on some systems
            std::size_t tests = 0;
            // test shared variable 'status_'
            // first access to 'state_' -> chache miss
            // sucessive acccess to 'state_' -> cache hit
            // if 'state_' was released by other fiber
            // cached 'state_' is invalidated -> cache miss
            while ( spinlock_status::locked == state_.load( std::memory_order_relaxed) ) {
#if !defined(BOOST_FIBERS_SPIN_SINGLE_CORE)
                if ( BOOST_FIBERS_SPIN_MAX_TESTS > tests) {
                    ++tests;
                    // give CPU a hint that this thread is in a "spin-wait" loop
                    // delays the next instruction's execution for a finite period of time (depends on processor family)
                    // the CPU is not under demand, parts of the pipeline are no longer being used
                    // -> reduces the power consumed by the CPU
                    // -> prevent pipeline stalls
                    cpu_relax();
                } else {
                    // std::this_thread::sleep_for( 0us) has a fairly long instruction path length,
                    // combined with an expensive ring3 to ring 0 transition costing about 1000 cycles
                    // std::this_thread::sleep_for( 0us) lets give up this_thread the remaining part of its time slice
                    // if and only if a thread of equal or greater priority is ready to run
                    static constexpr std::chrono::microseconds us0{ 0 };
                    std::this_thread::sleep_for( us0);
                }
#else
                std::this_thread::yield();
#endif
            }
            // test-and-set shared variable 'status_'
            // everytime 'status_' is signaled over the bus, even if the test failes
            if ( spinlock_status::locked == state_.exchange( spinlock_status::locked, std::memory_order_acquire) ) {
                // spinlock now contended
                // utilize 'Binary Exponential Backoff' algorithm
                // linear_congruential_engine is a random number engine based on Linear congruential generator (LCG)
                static thread_local std::minstd_rand generator;
                static std::uniform_int_distribution< std::size_t > distribution{ 0, static_cast< std::size_t >( 1) << collisions };
                const std::size_t z = distribution( generator);
                ++collisions;
                for ( std::size_t i = 0; i < z; ++i) {
                    // -> reduces the power consumed by the CPU
                    // -> prevent pipeline stalls
                    cpu_relax();
                }
            } else {
                // success, thread has acquired the lock
                break;
            }
        }
    }
Exemplo n.º 11
0
void bench_thread(B* b) {
	while (!b->should_stop()) {
		lock();
		assert(tester.exchange(1) == 0);
		tester.store(0);
		unlock();
		b->inc();
	}
}
Exemplo n.º 12
0
[[gnu::always_inline]]
inline T exchange_consume(std::atomic<T> &x, U &&desired)
{
    // Wrong for DEC Alpha 21264. But OK for others and contemporary processors.
    // Because relaxed and consume are same for they if supposing that compiler
    // wouldn't break dependency chain (true for pointer dereference).
    T res = x.exchange(std::forward<U>(desired), std::memory_order_relaxed);
    asm volatile("" ::: "memory");
    return res;
}
 void push_impl(node* tmp) {
     // acquire exclusivity
     while (m_producer_lock.exchange(true)) {
         std::this_thread::yield();
     }
     // publish & swing last forward
     m_last->next = tmp;
     m_last = tmp;
     // release exclusivity
     m_producer_lock = false;
 }
Exemplo n.º 14
0
    void TextureSystem::Details::CollectGarbage()
    {
        Texture::Details* garbage = g_textureGarbage.exchange(nullptr, std::memory_order_relaxed);

        while (garbage != nullptr)
        {
            Texture::Details* next = garbage->m_next;
            g_texturePool.Destroy(garbage);
            garbage = next;
        }
    }
Exemplo n.º 15
0
    void start ()
    {
        assert (m_stopped == true);
        assert (m_stop_called == false);

        if (m_stopped.exchange (false) == true)
        {
            m_stop_complete.reset ();
            addReference ();
        }
    }
Exemplo n.º 16
0
            /// Attempts to acquire ownership of the \a recursive_mutex.
            /// Suspends the current HPX-thread until \a timeout if ownership cannot
            /// be obtained immediately.
            ///
            /// \returns \a true if ownership was acquired; otherwise, \a false.
            ///
            /// \throws Throws \a hpx#bad_parameter if an error occurs while
            ///         suspending. Throws \a hpx#yield_aborted if the mutex is
            ///         destroyed while suspended. Throws \a hpx#null_thread_id if
            ///         called outside of a HPX-thread.
//             template<typename Duration>
//             bool timed_lock(Duration const& timeout)
//             {
//                 return timed_lock(boost::get_system_time() + timeout);
//             }
//
//             bool timed_lock(boost::xtime const& timeout)
//             {
//                 return timed_lock(boost::posix_time::ptime(timeout));
//             }
//
            /// Release ownership of the \a recursive_mutex.
            ///
            /// \throws Throws \a hpx#bad_parameter if an error occurs while
            ///         releasing the mutex. Throws \a hpx#null_thread_id if called
            ///         outside of a HPX-thread.
            void unlock()
            {
                if (0 == --recursion_count)
                {
                    locking_thread_id.exchange(
                        thread_id_from_mutex<Mutex>::invalid_id());
                    util::unregister_lock(this);
                    util::reset_ignored(&mtx);
                    mtx.unlock();
                }
            }
Exemplo n.º 17
0
    void stop_async ()
    {
        if (m_called_stop.exchange (1) == 0)
        {
            m_io_service.dispatch (m_strand.wrap (boost::bind (
                &NameResolverImpl::do_stop, 
                    this, CompletionCounter (this))));

            m_journal.debug << "Stopping";
        }
    }
Exemplo n.º 18
0
    void stop_async () override
    {
        if (m_stop_called.exchange (true) == false)
        {
            m_io_service.dispatch (m_strand.wrap (std::bind (
                &ResolverAsioImpl::do_stop,
                    this, CompletionCounter (this))));

            JLOG(m_journal.debug()) << "Queued a stop request";
        }
    }
Exemplo n.º 19
0
void print_stats(deadline_timer& stats_timer, error_code const& ec)
{
	if (ec) return;

	printf(
#ifdef DEBUG_STATS
		"ping-queue: %" PRId64 "m"
		" node-buf: [%dk %dk %dk %dk]"
#endif

		" in: %.1f"
		" invalid_enc: %.1f"
		" invalid_src: %.1f"
		" id_failure: %.1f"
		" out_ping: %.1f"
		" short_tid_pong: %.1f"
		" invalid_pong: %.1f"
		" added: %.1f\n"
#ifdef DEBUG_STATS
		, queue_time.load()
		, nodebuf_size[0].load() / 1000
		, nodebuf_size[1].load() / 1000
		, nodebuf_size[2].load() / 1000
		, nodebuf_size[3].load() / 1000
#endif
		, incoming_queries.exchange(0) / float(print_stats_interval)
		, invalid_encoding.exchange(0) / float(print_stats_interval)
		, invalid_src_address.exchange(0) / float(print_stats_interval)
		, failed_nodeid_queries.exchange(0) / float(print_stats_interval)
		, outgoing_pings.exchange(0) / float(print_stats_interval)
		, short_tid_pongs.exchange(0) / float(print_stats_interval)
		, invalid_pongs.exchange(0) / float(print_stats_interval)
		, added_nodes.exchange(0) / float(print_stats_interval)
		);

#ifdef CLIENTS_STAT
	std::lock_guard<std::mutex> l(client_mutex);
	std::vector<std::pair<int, uint16_t>> ordered;
	for (auto i : client_histogram) {
		ordered.emplace_back(i.second, i.first);
	}
	std::sort(ordered.begin(), ordered.end());
	for (auto i : ordered) {
		printf("[%c%c: %d] ", (i.second >> 8) & 0xff, i.second & 0xff, i.first);
	}
	printf("\n");
	client_histogram.clear();
#endif
	stats_timer.expires_from_now(boost::posix_time::seconds(print_stats_interval));
	stats_timer.async_wait(std::bind(&print_stats, std::ref(stats_timer), _1));
}
Exemplo n.º 20
0
 bool try_basic_lock(thread_id_type current_thread_id)
 {
     if (mtx.try_lock())
     {
         locking_thread_id.exchange(current_thread_id);
         util::ignore_lock(&mtx);
         util::register_lock(this);
         recursion_count.store(1);
         return true;
     }
     return false;
 }
Exemplo n.º 21
0
    //-------------------------------------------------------------------------
    // Resolver
    void do_stop (CompletionCounter)
    {
        assert (m_stop_called == true);

        if (m_stopped.exchange (true) == false)
        {
            m_work.clear ();
            m_resolver.cancel ();

            removeReference ();
        }
    }
Exemplo n.º 22
0
	   /** setter for file state
	    * @param state - file state to mark the file with
	    */
	   inline void state(State state) {
		   // do not change file state when it is marked for deletion:
		   if(m_state.load(std::memory_order_acquire) == State::FILE_IS_MARKED_FOR_DELETION)
			   return;

		   if(state == State::FILE_IS_IN_USE_BY_SYNC)
			   m_lastsyncattempt = boost::posix_time::microsec_clock::local_time();

		   // fire the condition variable for whoever waits for file status to be changed:
		   m_state.exchange(state, std::memory_order_release);
		   boost::mutex::scoped_lock lock(m_state_changed_mux);
		   m_state_changed_condition.notify_all();
	   }
Exemplo n.º 23
0
    void start () override
    {
        assert (m_stopped == true);
        assert (m_stop_called == false);

        if (m_stopped.exchange (false) == true)
        {
            {
                std::lock_guard<std::mutex> lk{m_mut};
                m_asyncHandlersCompleted = false;
            }
            addReference ();
        }
    }
Exemplo n.º 24
0
inline void
LogCatReader::EndOfFile()
{
  Wait(pid.exchange(0));

  const int pid = FindCrash(data.c_str());
  if (pid > 0)
    Save(pid);
  else
    LogFormat("No crash found in logcat");

  StopLogCat();
  OnLogCatFinished(pid > 0);
}
Exemplo n.º 25
0
            /// Acquires ownership of the \a recursive_mutex. Suspends the
            /// current HPX-thread if ownership cannot be obtained immediately.
            ///
            /// \throws Throws \a hpx#bad_parameter if an error occurs while
            ///         suspending. Throws \a hpx#yield_aborted if the mutex is
            ///         destroyed while suspended. Throws \a hpx#null_thread_id if
            ///         called outside of a HPX-thread.
            void lock()
            {
                thread_id_type const id = thread_id_from_mutex<Mutex>::call();
                HPX_ASSERT(id != thread_id_from_mutex<Mutex>::invalid_id());

                if (!try_recursive_lock(id))
                {
                    mtx.lock();
                    locking_thread_id.exchange(id);
                    util::ignore_lock(&mtx);
                    util::register_lock(this);
                    recursion_count.store(1);
                }
            }
Exemplo n.º 26
0
	void onTimer(ev::timer&, int)
	{
		if (filename_.empty())
			return; // not properly configured

		char format[128];
		snprintf(format, sizeof(format), "N:%zu:%zu:%zu",
				numRequests_.exchange(0),
				bytesIn_.exchange(0),
				bytesOut_.exchange(0));

		const char *args[4] = {
			"update",
			filename_.c_str(),
			format,
			nullptr
		};

		rrd_clear_error();
		int rv = rrd_update(3, (char **) args);
		if (rv < 0) {
			log(x0::Severity::error, "Could not update RRD statistics: %s", rrd_get_error());
		}
	}
// travel through the reclaim list, check if it is hazardous to delete a node
// if not delete it
// This portion is computation intensive and needs to be optimized.
void delete_nodes_with_no_hazards() {
    data_to_reclaim *current = nodes_to_reclaim.exchange(nullptr);
    while (current) {
        data_to_reclaim *const next = current->next;
        if (!outstanding_hazard_pointers_for(current->data)) {
            // not a hazard pointer, delete node
            delete current;
        }
        else {
            // chain to another list to delete later
            add_to_reclaim_list(current);
        }
        current = next;
    }
}
Exemplo n.º 28
0
void post_thread_1( void ) {
	for ( int i = 0 ; i < post_count ; ++i ) {
		task* pt = new task( &run );
		{
			_lock->lock();
			_queue.add_tail( pt );
			_lock->unlock();
		}
		int expected = 0;
		if ( _posted.compare_exchange_strong( expected , 1 )) {
			if ( PostQueuedCompletionStatus( _iocp , 0 , 0 , 0 ) == FALSE ) {
				_posted.exchange(0);
			}
		}
	}
}
Exemplo n.º 29
0
    void try_reclaim(node* old_head)
    {
        if (threads_in_pop == 1) {
            node* nodes_to_delete = to_be_deleted.exchange(nullptr);

            if (!--threads_in_pop) {
                delete_nodes(nodes_to_delete);
            } else if (nodes_to_delete) {
                chain_pending_nodes(nodes_to_delete);
            }

            delete old_head;
        } else {
            chain_pending_node(old_head);
            --threads_in_pop;
        }
    }
Exemplo n.º 30
0
void _States_DefrostCurrentSlot( bool isFromBackup )
{
	if( !SysHasValidState() )
	{
		Console.WriteLn( "Load state: Aborting (VM is not active)." );
		return;
	}

	if( IsSavingOrLoading.exchange(true) )
	{
		Console.WriteLn( "Load or save action is already pending." );
		return;
	}

	GSchangeSaveState( StatesC, SaveStateBase::GetFilename( StatesC ).ToUTF8() );
	StateCopy_LoadFromSlot( StatesC, isFromBackup );

	GetSysExecutorThread().PostIdleEvent( SysExecEvent_ClearSavingLoadingFlag() );

	Sstates_updateLoadBackupMenuItem();
}