Exemplo n.º 1
0
void
recursive_timed_mutex::unlock() {
    context * active_ctx = context::active();
    detail::spinlock_lock lk{ wait_queue_splk_ };
    if ( BOOST_UNLIKELY( active_ctx != owner_) ) {
        throw lock_error{
                std::make_error_code( std::errc::operation_not_permitted),
                "boost fiber: no  privilege to perform the operation" };
    }
    if ( 0 == --count_) {
        owner_ = nullptr;
        if ( ! wait_queue_.empty() ) {
            context * ctx = & wait_queue_.front();
            wait_queue_.pop_front();
            std::intptr_t expected = reinterpret_cast< std::intptr_t >( this);
            if ( ctx->twstatus.compare_exchange_strong( expected, static_cast< std::intptr_t >( -1), std::memory_order_acq_rel) ) {
                // notify before timeout
                intrusive_ptr_release( ctx);
                // notify context
                active_ctx->schedule( ctx);
            } else if ( static_cast< std::intptr_t >( 0) == expected) {
                // no timed-wait op.
                // notify context
                active_ctx->schedule( ctx);
            } else {
                // timed-wait op.
                // expected == -1: notify after timeout, same timed-wait op.
                // expected == <any>: notify after timeout, another timed-wait op. was already started
                intrusive_ptr_release( ctx);
                // re-schedule next
            }
        }
    }
}
Exemplo n.º 2
0
void private_thread::run() {
  auto job = const_cast<scheduled_actor*>(self_);
  CAF_PUSH_AID(job->id());
  CAF_LOG_TRACE("");
  scoped_execution_unit ctx{&job->system()};
  auto max_throughput = std::numeric_limits<size_t>::max();
  bool resume_later;
  for (;;) {
    state_ = await_resume_or_shutdown;
    do {
      resume_later = false;
      switch (job->resume(&ctx, max_throughput)) {
        case resumable::resume_later:
          resume_later = true;
          break;
        case resumable::done:
          intrusive_ptr_release(job->ctrl());
          return;
        case resumable::awaiting_message:
          intrusive_ptr_release(job->ctrl());
          break;
        case resumable::shutdown_execution_unit:
          return;
      }
    } while (resume_later);
    // wait until actor becomes ready again or was destroyed
    if (!await_resume())
      return;
  }
}
Exemplo n.º 3
0
void variant::release()
{
	switch(type_) {
	case TYPE_LIST:
		if(--list_->refcount == 0) {
			delete list_;
		}
		break;
	case TYPE_STRING:
		if(--string_->refcount == 0) {
			delete string_;
		}
		break;
	case TYPE_MAP:
		if(--map_->refcount == 0) {
			delete map_;
		}
		break;
	case TYPE_CALLABLE:
		intrusive_ptr_release(callable_);
		break;

	// These are not used here, add them to silence a compiler warning.
	case TYPE_NULL:
	case TYPE_DECIMAL:
	case TYPE_INT :
		break;
	}
}
Exemplo n.º 4
0
int DedupFS::Release(const char *path, struct fuse_file_info *fileInfo) {
  printf("release(path=%s)\n", path);
  intrusive_ptr_release((file_info*)fileInfo->fh);
  bc->sync();
  std::cout << "Sqlite memory used: " << sqlite3_memory_highwater(true) << std::endl;
  return 0;
}
T* default_intrusive_cow_ptr_unshare(T*& ptr) {
  if (!ptr->unique()) {
    auto new_ptr = ptr->copy();
    intrusive_ptr_release(ptr);
    ptr = new_ptr;
  }
  return ptr;
}
Exemplo n.º 6
0
void SIMIX_cond_unref(smx_cond_t cond)
{
  XBT_IN("(%p)",cond);
  XBT_DEBUG("Destroy condition %p", cond);
  if (cond != nullptr) {
    intrusive_ptr_release(cond);
  }
  XBT_OUT();
}
Exemplo n.º 7
0
 /** Move assignment */
 intrusive_ptr &operator=(intrusive_ptr &&rhs)
 {
   if (m_ptr != 0) {
     intrusive_ptr_release(m_ptr);
   }
   m_ptr = rhs.m_ptr;
   rhs.m_ptr = 0;
   return *this;
 }
Exemplo n.º 8
0
	bool compare_exchange(T* expected, T* desired, T** old = NULL) {
		bool success = ptr.compare_exchange_strong(expected, desired);
		if(success && expected != desired) {
			intrusive_ptr_add_ref(desired);
			intrusive_ptr_release(expected);
		}
		if(old)
			*old = expected;
		return success;
	}
Exemplo n.º 9
0
  /** Assignment from raw memory_block pointer */
  intrusive_ptr &operator=(T *rhs)
  {
    if (m_ptr != nullptr) {
      intrusive_ptr_release(m_ptr);
    }

    m_ptr = rhs;
    if (m_ptr != nullptr) {
      intrusive_ptr_retain(m_ptr);
    }

    return *this;
  }
Exemplo n.º 10
0
 /** Assignment */
 intrusive_ptr &operator=(const intrusive_ptr &rhs)
 {
   if (m_ptr != 0) {
     intrusive_ptr_release(m_ptr);
   }
   if (rhs.m_ptr != 0) {
     m_ptr = rhs.m_ptr;
     intrusive_ptr_retain(m_ptr);
   } else {
     m_ptr = 0;
   }
   return *this;
 }
Exemplo n.º 11
0
 void run() {
   CAF_SET_LOGGER_SYS(&system());
   CAF_LOG_TRACE(CAF_ARG(id_));
   // scheduling loop
   for (;;) {
     auto job = policy_.dequeue(this);
     CAF_ASSERT(job != nullptr);
     CAF_ASSERT(job->subtype() != resumable::io_actor);
     CAF_LOG_DEBUG("resume actor:" << CAF_ARG(id_of(job)));
     CAF_PUSH_AID_FROM_PTR(dynamic_cast<abstract_actor*>(job));
     policy_.before_resume(this, job);
     auto res = job->resume(this, max_throughput_);
     policy_.after_resume(this, job);
     switch (res) {
       case resumable::resume_later: {
         // keep reference to this actor, as it remains in the "loop"
         policy_.resume_job_later(this, job);
         break;
       }
       case resumable::done: {
         policy_.after_completion(this, job);
         intrusive_ptr_release(job);
         break;
       }
       case resumable::awaiting_message: {
         // resumable will maybe be enqueued again later, deref it for now
         intrusive_ptr_release(job);
         break;
       }
       case resumable::shutdown_execution_unit: {
         policy_.after_completion(this, job);
         policy_.before_shutdown(this);
         return;
       }
     }
   }
 }
Exemplo n.º 12
0
 void barrier::detach()
 {
     if (node_)
     {
         if (hpx::get_runtime_ptr() != nullptr &&
             hpx::threads::threadmanager_is(state_running) &&
             !hpx::is_stopped_or_shutting_down())
         {
             if ((*node_)->num_ >= (*node_)->cut_off_ || (*node_)->rank_ == 0)
                 hpx::unregister_with_basename(
                     (*node_)->base_name_, (*node_)->rank_);
         }
         intrusive_ptr_release(node_->get());
         node_.reset();
     }
 }
Exemplo n.º 13
0
void variant::release()
{
	switch(type_) {
	case TYPE_LIST:
		if(--list_->refcount == 0) {
			delete list_;
		}
		break;
	case TYPE_STRING:
		if(--string_->refcount == 0) {
			delete string_;
		}
		break;
	case TYPE_CALLABLE:
		intrusive_ptr_release(callable_);
		break;
	}
}
Exemplo n.º 14
0
void variant::release()
{
	switch(type_) {
	case VARIANT_TYPE_LIST:
		if(--list_->refcount == 0) {
			delete list_;
		}
		break;
	case VARIANT_TYPE_STRING:
		if(--string_->refcount == 0) {
			delete string_;
		}
		break;
	case VARIANT_TYPE_MAP:
		if(--map_->refcount == 0) {
			delete map_;
		}
		break;
	case VARIANT_TYPE_CALLABLE:
		intrusive_ptr_release(callable_);
		break;
	case VARIANT_TYPE_CALLABLE_LOADING:
		callable_variants_loading.erase(this);
		break;
	case VARIANT_TYPE_FUNCTION:
		if(--fn_->refcount == 0) {
			delete fn_;
		}
		break;
	case VARIANT_TYPE_DELAYED:
		delayed_variants_loading.erase(this);
		if(--delayed_->refcount == 0) {
			delete delayed_;
		}
		break;

	// These are not used here, add them to silence a compiler warning.
	case VARIANT_TYPE_NULL:
	case VARIANT_TYPE_INT:
	case VARIANT_TYPE_BOOL:
	case VARIANT_TYPE_DECIMAL:
		break;
	}
}
Exemplo n.º 15
0
  friend void push(sink& s, buffer p) {
    v4l2_buffer b = {0};
    b.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
    b.memory = V4L2_MEMORY_MMAP;
    b.index = p.get() - s.p->buffers;

    if(ioctl(s.p->fd.native_handle(), VIDIOC_QBUF, &b)) throw std::system_error(errno, std::system_category());
    intrusive_ptr_add_ref(p.get());

    if(atomic_exchange(&s.p->streaming, true)) {
      auto dqbuf = std::make_unique<v4l2_buffer>();
      memset(dqbuf.get(), 0, sizeof(*dqbuf.get()));
      dqbuf->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
      dqbuf->memory = V4L2_MEMORY_MMAP;
      auto dqp = dqbuf.get();

      s.p->fd.async_read_some(utils::make_ioctl_read_buffer<VIDIOC_DQBUF>(dqp), [&s, buffer = utils::move_on_copy(std::move(dqbuf))](std::error_code const& ec, std::size_t) {
        if(!ec) 
          intrusive_ptr_release(s.p->buffers + unwrap(buffer)->index);
      });
    }
  }
Exemplo n.º 16
0
    void barrier::release()
    {
        if (node_)
        {
            if (hpx::get_runtime_ptr() != nullptr &&
                hpx::threads::threadmanager_is(state_running) &&
                !hpx::is_stopped_or_shutting_down())
            {
                // make sure this runs as an HPX thread
                if (hpx::threads::get_self_ptr() == nullptr)
                {
                    hpx::threads::run_as_hpx_thread(&barrier::release, this);
                }

                hpx::future<void> f;
                if ((*node_)->num_ >= (*node_)->cut_off_ || (*node_)->rank_ == 0)
                {
                    f = hpx::unregister_with_basename(
                        (*node_)->base_name_, (*node_)->rank_);
                }

                // we need to wait on everyone to have its name unregistered,
                // and hold on to our node long enough...
                boost::intrusive_ptr<wrapping_type> node = node_;
                hpx::when_all(f, wait(hpx::launch::async)).then(
                    hpx::launch::sync,
                    [HPX_CAPTURE_MOVE(node)](hpx::future<void> f)
                    {
                        HPX_UNUSED(node);
                        f.get();
                    }
                ).get();
            }
            intrusive_ptr_release(node_->get());
            node_.reset();
        }
    }
Exemplo n.º 17
0
void
scheduler::release_terminated_() noexcept {
    while ( ! terminated_queue_.empty() ) {
        context * ctx = & terminated_queue_.front();
        terminated_queue_.pop_front();
        BOOST_ASSERT( ctx->is_context( type::worker_context) );
        BOOST_ASSERT( ! ctx->is_context( type::pinned_context) );
        BOOST_ASSERT( this == ctx->get_scheduler() );
        BOOST_ASSERT( ctx->is_resumable() );
        BOOST_ASSERT( ! ctx->worker_is_linked() );
        BOOST_ASSERT( ! ctx->ready_is_linked() );
#if ! defined(BOOST_FIBERS_NO_ATOMICS)
        BOOST_ASSERT( ! ctx->remote_ready_is_linked() );
#endif
        BOOST_ASSERT( ! ctx->sleep_is_linked() );
        BOOST_ASSERT( ! ctx->wait_is_linked() );
        BOOST_ASSERT( ctx->wait_queue_.empty() );
        BOOST_ASSERT( ctx->terminated_);
        // if last reference, e.g. fiber::join() or fiber::detach()
        // have been already called, this will call ~context(),
        // the context is automatically removeid from worker-queue
        intrusive_ptr_release( ctx);
    }
}
Exemplo n.º 18
0
	~IntrusivePtr() {
		T* _p = ptr.exchange(NULL);
		if(_p)
			intrusive_ptr_release(_p);
	}
Exemplo n.º 19
0
void multiplexer::runnable::intrusive_ptr_release_impl() {
  intrusive_ptr_release(this);
}
 void operator()(T * p)
 {
   if(p) intrusive_ptr_release(p);
 }
Exemplo n.º 21
0
void PyContentEntry_dealloc( PyContentEntry * self )
{
    if ( self->ptr )
        intrusive_ptr_release( self->ptr );
    Py_TYPE(self)->tp_free( (PyObject *)self );
}
Exemplo n.º 22
0
 ~IntrusivePtr ()
 {
     if (m_ptr)
         intrusive_ptr_release (m_ptr);
 }
Exemplo n.º 23
0
void intrusive_ptr_release(AnnotationValue * p)
{
	intrusive_ptr_release(&(p->name));
}
Exemplo n.º 24
0
/** Decrease the refcount for this mutex */
void SIMIX_mutex_unref(smx_mutex_t mutex)
{
  if (mutex != nullptr)
    intrusive_ptr_release(mutex);
}
Exemplo n.º 25
0
 static void release(Component* component)
 {
     intrusive_ptr_release(component);
 }
Exemplo n.º 26
0
 value_counted::~value_counted()
 {
     intrusive_ptr_release(value_);
 }
Exemplo n.º 27
0
 friend void intrusive_ptr_release(buf* b) {
   if(--(b->refs) == 0)
     b->base->queue.push(b);
   intrusive_ptr_release(b->base);
 }
Exemplo n.º 28
0
void intrusive_ptr_release(AnnotationDefinition * p)
{
	intrusive_ptr_release((Element *)p);
}
Exemplo n.º 29
0
 //!Destructor. If internal pointer is not 0, calls
 //!intrusive_ptr_release(get_pointer(m_ptr)). Does not throw
 ~intrusive_ptr()
 {
    if(m_ptr != 0) intrusive_ptr_release(ipcdetail::get_pointer(m_ptr));
 }
Exemplo n.º 30
0
 /** Destructor */
 ~intrusive_ptr()
 {
   if (m_ptr != 0) {
     intrusive_ptr_release(m_ptr);
   }
 }